eth/downloader: minor typo fixes in comments (#21035)

# Conflicts:
#	trie/sync.go
This commit is contained in:
Guillaume Ballet 2020-05-06 15:35:04 +02:00 committed by Igor Mandrigin
parent f0ffa8a8b4
commit 61d8a49ece
3 changed files with 5 additions and 6 deletions

View File

@ -372,7 +372,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
log.Info("Block synchronisation started")
}
// If we are already full syncing, but have a fast-sync bloom filter laying
// around, make sure it does't use memory any more. This is a special case
// around, make sure it doesn't use memory any more. This is a special case
// when the user attempts to fast sync a new empty network.
if mode == FullSync && d.stateBloom != nil {
d.stateBloom.Close()
@ -682,7 +682,7 @@ func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, ui
requestHead = 0
}
// requestBottom is the lowest block we want included in the query
// Ideally, we want to include just below own head
// Ideally, we want to include the one just below our own head
requestBottom := int(localHeight - 1)
if requestBottom < 0 {
requestBottom = 0

View File

@ -1165,7 +1165,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
chain := testChainBase.shorten(1)
for i, tt := range tests {
// Register a new peer and ensure it's presence
// Register a new peer and ensure its presence
id := fmt.Sprintf("test %d", i)
if err := tester.newPeer(id, protocol, chain); err != nil {
t.Fatalf("test %d: failed to register new peer: %v", i, err)

View File

@ -234,8 +234,7 @@ func (q *queue) ShouldThrottleReceipts() bool {
}
// resultSlots calculates the number of results slots available for requests
// whilst adhering to both the item and the memory limit too of the results
// cache.
// whilst adhering to both the item and the memory limits of the result cache.
func (q *queue) resultSlots(pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}) int {
// Calculate the maximum length capped by the memory limit
limit := len(q.resultCache)
@ -371,7 +370,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
}
// Results retrieves and permanently removes a batch of fetch results from
// the cache. the result slice will be empty if the queue has been closed.
// the cache. The result slice will be empty if the queue has been closed.
func (q *queue) Results(block bool) []*fetchResult {
q.lock.Lock()
defer q.lock.Unlock()