Skip to content

Commit 392959b

Browse files
authored
chore: improve code comments clarity (#2943)
<!-- Please read and fill out this form before submitting your PR. Please make sure you have reviewed our contributors guide before submitting your first PR. NOTE: PR titles should follow semantic commits: https://www.conventionalcommits.org/en/v1.0.0/ --> ## Overview <!-- Please provide an explanation of the PR, including the appropriate context, background, goal, and rationale. If there is an issue with this information, please provide a tl;dr and link the issue. Ex: Closes #<issue number> --> improve code comments clarity Signed-off-by: stringscut <[email protected]>
1 parent afcd6bd commit 392959b

File tree

5 files changed

+5
-5
lines changed

5 files changed

+5
-5
lines changed

block/internal/cache/pending_headers.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ import (
1717
// - DA submission of multiple headers is atomic - it's impossible to submit only part of a batch
1818
//
1919
// lastSubmittedHeaderHeight is updated only after receiving confirmation from DA.
20-
// Worst case scenario is when headers was successfully submitted to DA, but confirmation was not received (e.g. node was
20+
// Worst case scenario is when headers were successfully submitted to DA, but confirmation was not received (e.g. node was
2121
// restarted, networking issue occurred). In this case headers are re-submitted to DA (it's extra cost).
2222
// evolve is able to skip duplicate headers so this shouldn't affect full nodes.
2323
// TODO(tzdybal): we shouldn't try to push all pending headers at once; this should depend on max blob size

block/internal/syncing/da_retriever.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,7 @@ func createEmptyDataForHeader(ctx context.Context, header *types.SignedHeader) *
345345
ChainID: header.ChainID(),
346346
Height: header.Height(),
347347
Time: header.BaseHeader.Time,
348-
LastDataHash: nil, // LastDataHash must be filled in the syncer, as it is not available here, block n-1 has not been processed yet.
348+
LastDataHash: nil, // LastDataHash must be filled in the syncer, as it is not available here since block n-1 has not been processed yet.
349349
},
350350
}
351351
}

block/internal/syncing/da_retriever_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ func TestDARetriever_ProcessBlobs_HeaderOnly_EmptyDataExpected(t *testing.T) {
182182
assert.True(t, ok)
183183
assert.Equal(t, uint64(88), hHeight)
184184

185-
// empty data is not marked as data included (the submitter components does handle the empty data case)
185+
// empty data is not marked as data included (the submitter component does handle the empty data case)
186186
_, ok = r.cache.GetDataDAIncluded(events[0].Data.DACommitment().String())
187187
assert.False(t, ok)
188188
}

pkg/genesis/genesis.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ type Genesis struct {
1818
// DAStartHeight corresponds to the height at which the first DA header/data has been published.
1919
// This value is meant to be updated after genesis and shared to all syncing nodes for speeding up syncing via DA.
2020
DAStartHeight uint64 `json:"da_start_height"`
21-
// DaEpochForcedInclusion corresponds to the amount of DA blocks are considered an epochs
21+
// DaEpochForcedInclusion corresponds to the amount of DA blocks are considered an epoch
2222
// When forced inclusion is enabled, the epoch size determines at what frequency the forced included transactions are executed by the application.
2323
DAEpochForcedInclusion uint64 `json:"da_epoch_forced_inclusion"`
2424
}

sequencers/based/sequencer.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ func (s *BasedSequencer) GetNextBatch(ctx context.Context, req coresequencer.Get
134134
}
135135

136136
// Calculate timestamp based on remaining transactions after this batch
137-
// timestamp correspond to the last block time of a DA epoch, based on the remaining transactions to be executed
137+
// timestamp corresponds to the last block time of a DA epoch, based on the remaining transactions to be executed
138138
// this is done in order to handle the case where a DA epoch must fit in multiple blocks
139139
remainingTxs := uint64(len(s.currentBatchTxs)) - s.checkpoint.TxIndex
140140
timestamp := s.currentDAEndTime.Add(-time.Duration(remainingTxs) * time.Millisecond)

0 commit comments

Comments
 (0)