|
|
@ -23,7 +23,6 @@ import (
|
|
|
|
"fmt"
|
|
|
|
"fmt"
|
|
|
|
"math"
|
|
|
|
"math"
|
|
|
|
"math/big"
|
|
|
|
"math/big"
|
|
|
|
"strings"
|
|
|
|
|
|
|
|
"sync"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
"time"
|
|
|
@ -248,9 +247,10 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
|
|
|
|
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
|
|
|
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
|
|
|
|
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
|
|
|
|
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
|
|
|
|
|
|
|
|
|
|
|
|
log.Trace(fmt.Sprint("Registering peer", id))
|
|
|
|
logger := log.New("peer", id)
|
|
|
|
if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
|
|
|
|
logger.Trace("Registering sync peer")
|
|
|
|
log.Error(fmt.Sprint("Register failed:", err))
|
|
|
|
if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData, logger)); err != nil {
|
|
|
|
|
|
|
|
logger.Error("Failed to register sync peer", "err", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
d.qosReduceConfidence()
|
|
|
|
d.qosReduceConfidence()
|
|
|
@ -263,9 +263,10 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
|
|
|
|
// the queue.
|
|
|
|
// the queue.
|
|
|
|
func (d *Downloader) UnregisterPeer(id string) error {
|
|
|
|
func (d *Downloader) UnregisterPeer(id string) error {
|
|
|
|
// Unregister the peer from the active peer set and revoke any fetch tasks
|
|
|
|
// Unregister the peer from the active peer set and revoke any fetch tasks
|
|
|
|
log.Trace(fmt.Sprint("Unregistering peer", id))
|
|
|
|
logger := log.New("peer", id)
|
|
|
|
|
|
|
|
logger.Trace("Unregistering sync peer")
|
|
|
|
if err := d.peers.Unregister(id); err != nil {
|
|
|
|
if err := d.peers.Unregister(id); err != nil {
|
|
|
|
log.Error(fmt.Sprint("Unregister failed:", err))
|
|
|
|
logger.Error("Failed to unregister sync peer", "err", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
d.queue.Revoke(id)
|
|
|
|
d.queue.Revoke(id)
|
|
|
@ -284,24 +285,19 @@ func (d *Downloader) UnregisterPeer(id string) error {
|
|
|
|
// Synchronise tries to sync up our local block chain with a remote peer, both
|
|
|
|
// Synchronise tries to sync up our local block chain with a remote peer, both
|
|
|
|
// adding various sanity checks as well as wrapping it with various log entries.
|
|
|
|
// adding various sanity checks as well as wrapping it with various log entries.
|
|
|
|
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
|
|
|
|
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
|
|
|
|
log.Trace(fmt.Sprintf("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
err := d.synchronise(id, head, td, mode)
|
|
|
|
err := d.synchronise(id, head, td, mode)
|
|
|
|
switch err {
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
case nil:
|
|
|
|
log.Trace(fmt.Sprintf("Synchronisation completed"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
case errBusy:
|
|
|
|
case errBusy:
|
|
|
|
log.Trace(fmt.Sprintf("Synchronisation already in progress"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
case errTimeout, errBadPeer, errStallingPeer,
|
|
|
|
case errTimeout, errBadPeer, errStallingPeer,
|
|
|
|
errEmptyHeaderSet, errPeersUnavailable, errTooOld,
|
|
|
|
errEmptyHeaderSet, errPeersUnavailable, errTooOld,
|
|
|
|
errInvalidAncestor, errInvalidChain:
|
|
|
|
errInvalidAncestor, errInvalidChain:
|
|
|
|
log.Debug(fmt.Sprintf("Removing peer %v: %v", id, err))
|
|
|
|
log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
|
|
|
|
d.dropPeer(id)
|
|
|
|
d.dropPeer(id)
|
|
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
default:
|
|
|
|
log.Warn(fmt.Sprintf("Synchronisation failed: %v", err))
|
|
|
|
log.Warn("Synchronisation failed, retrying", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -322,7 +318,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
|
|
|
|
|
|
|
|
|
|
|
|
// Post a user notification of the sync (only once per session)
|
|
|
|
// Post a user notification of the sync (only once per session)
|
|
|
|
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
|
|
|
|
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
|
|
|
|
log.Info(fmt.Sprint("Block synchronisation started"))
|
|
|
|
log.Info("Block synchronisation started")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Reset the queue, peer set and wake channels to clean any internal leftover state
|
|
|
|
// Reset the queue, peer set and wake channels to clean any internal leftover state
|
|
|
|
d.queue.Reset()
|
|
|
|
d.queue.Reset()
|
|
|
@ -387,9 +383,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
|
|
|
return errTooOld
|
|
|
|
return errTooOld
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
log.Debug(fmt.Sprintf("Synchronising with the network using: %s [eth/%d]", p.id, p.version))
|
|
|
|
log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash.Hex()[2:10], "td", td, "mode", d.mode)
|
|
|
|
defer func(start time.Time) {
|
|
|
|
defer func(start time.Time) {
|
|
|
|
log.Debug(fmt.Sprintf("Synchronisation terminated after %v", time.Since(start)))
|
|
|
|
log.Debug("Synchronisation terminated", "elapsed", time.Since(start))
|
|
|
|
}(time.Now())
|
|
|
|
}(time.Now())
|
|
|
|
|
|
|
|
|
|
|
|
// Look up the sync boundaries: the common ancestor and the target block
|
|
|
|
// Look up the sync boundaries: the common ancestor and the target block
|
|
|
@ -437,7 +433,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
|
|
|
|
origin = 0
|
|
|
|
origin = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Debug(fmt.Sprintf("Fast syncing until pivot block #%d", pivot))
|
|
|
|
log.Debug("Fast syncing until pivot block", "pivot", pivot)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
d.queue.Prepare(origin+1, d.mode, pivot, latest)
|
|
|
|
d.queue.Prepare(origin+1, d.mode, pivot, latest)
|
|
|
|
if d.syncInitHook != nil {
|
|
|
|
if d.syncInitHook != nil {
|
|
|
@ -522,13 +518,14 @@ func (d *Downloader) Terminate() {
|
|
|
|
// fetchHeight retrieves the head header of the remote peer to aid in estimating
|
|
|
|
// fetchHeight retrieves the head header of the remote peer to aid in estimating
|
|
|
|
// the total time a pending synchronisation would take.
|
|
|
|
// the total time a pending synchronisation would take.
|
|
|
|
func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
|
|
|
func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
|
|
|
log.Debug(fmt.Sprintf("%v: retrieving remote chain height", p))
|
|
|
|
p.logger.Debug("Retrieving remote chain height")
|
|
|
|
|
|
|
|
|
|
|
|
// Request the advertised remote head block and wait for the response
|
|
|
|
// Request the advertised remote head block and wait for the response
|
|
|
|
head, _ := p.currentHead()
|
|
|
|
head, _ := p.currentHead()
|
|
|
|
go p.getRelHeaders(head, 1, 0, false)
|
|
|
|
go p.getRelHeaders(head, 1, 0, false)
|
|
|
|
|
|
|
|
|
|
|
|
timeout := time.After(d.requestTTL())
|
|
|
|
ttl := d.requestTTL()
|
|
|
|
|
|
|
|
timeout := time.After(ttl)
|
|
|
|
for {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
select {
|
|
|
|
case <-d.cancelCh:
|
|
|
|
case <-d.cancelCh:
|
|
|
@ -537,19 +534,21 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
|
|
|
case packet := <-d.headerCh:
|
|
|
|
case packet := <-d.headerCh:
|
|
|
|
// Discard anything not from the origin peer
|
|
|
|
// Discard anything not from the origin peer
|
|
|
|
if packet.PeerId() != p.id {
|
|
|
|
if packet.PeerId() != p.id {
|
|
|
|
log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packet.PeerId()))
|
|
|
|
log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make sure the peer actually gave something valid
|
|
|
|
// Make sure the peer actually gave something valid
|
|
|
|
headers := packet.(*headerPack).headers
|
|
|
|
headers := packet.(*headerPack).headers
|
|
|
|
if len(headers) != 1 {
|
|
|
|
if len(headers) != 1 {
|
|
|
|
log.Debug(fmt.Sprintf("%v: invalid number of head headers: %d != 1", p, len(headers)))
|
|
|
|
p.logger.Debug("Multiple headers for single request", "headers", len(headers))
|
|
|
|
return nil, errBadPeer
|
|
|
|
return nil, errBadPeer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return headers[0], nil
|
|
|
|
head := headers[0]
|
|
|
|
|
|
|
|
p.logger.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash().Hex()[2:10])
|
|
|
|
|
|
|
|
return head, nil
|
|
|
|
|
|
|
|
|
|
|
|
case <-timeout:
|
|
|
|
case <-timeout:
|
|
|
|
log.Debug(fmt.Sprintf("%v: head header timeout", p))
|
|
|
|
p.logger.Debug("Waiting for head header timed out", "elapsed", ttl)
|
|
|
|
return nil, errTimeout
|
|
|
|
return nil, errTimeout
|
|
|
|
|
|
|
|
|
|
|
|
case <-d.bodyCh:
|
|
|
|
case <-d.bodyCh:
|
|
|
@ -566,10 +565,10 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
|
|
|
|
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
|
|
|
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
|
|
|
// the head links match), we do a binary search to find the common ancestor.
|
|
|
|
// the head links match), we do a binary search to find the common ancestor.
|
|
|
|
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
log.Debug(fmt.Sprintf("%v: looking for common ancestor (remote height %d)", p, height))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Figure out the valid ancestor range to prevent rewrite attacks
|
|
|
|
// Figure out the valid ancestor range to prevent rewrite attacks
|
|
|
|
floor, ceil := int64(-1), d.headHeader().Number.Uint64()
|
|
|
|
floor, ceil := int64(-1), d.headHeader().Number.Uint64()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
p.logger.Debug("Looking for common ancestor", "local", ceil, "remote", height)
|
|
|
|
if d.mode == FullSync {
|
|
|
|
if d.mode == FullSync {
|
|
|
|
ceil = d.headBlock().NumberU64()
|
|
|
|
ceil = d.headBlock().NumberU64()
|
|
|
|
} else if d.mode == FastSync {
|
|
|
|
} else if d.mode == FastSync {
|
|
|
@ -597,7 +596,9 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
|
|
|
|
|
|
|
|
// Wait for the remote response to the head fetch
|
|
|
|
// Wait for the remote response to the head fetch
|
|
|
|
number, hash := uint64(0), common.Hash{}
|
|
|
|
number, hash := uint64(0), common.Hash{}
|
|
|
|
timeout := time.After(d.requestTTL())
|
|
|
|
|
|
|
|
|
|
|
|
ttl := d.requestTTL()
|
|
|
|
|
|
|
|
timeout := time.After(ttl)
|
|
|
|
|
|
|
|
|
|
|
|
for finished := false; !finished; {
|
|
|
|
for finished := false; !finished; {
|
|
|
|
select {
|
|
|
|
select {
|
|
|
@ -607,19 +608,19 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
case packet := <-d.headerCh:
|
|
|
|
case packet := <-d.headerCh:
|
|
|
|
// Discard anything not from the origin peer
|
|
|
|
// Discard anything not from the origin peer
|
|
|
|
if packet.PeerId() != p.id {
|
|
|
|
if packet.PeerId() != p.id {
|
|
|
|
log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packet.PeerId()))
|
|
|
|
log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make sure the peer actually gave something valid
|
|
|
|
// Make sure the peer actually gave something valid
|
|
|
|
headers := packet.(*headerPack).headers
|
|
|
|
headers := packet.(*headerPack).headers
|
|
|
|
if len(headers) == 0 {
|
|
|
|
if len(headers) == 0 {
|
|
|
|
log.Warn(fmt.Sprintf("%v: empty head header set", p))
|
|
|
|
p.logger.Warn("Empty head header set")
|
|
|
|
return 0, errEmptyHeaderSet
|
|
|
|
return 0, errEmptyHeaderSet
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make sure the peer's reply conforms to the request
|
|
|
|
// Make sure the peer's reply conforms to the request
|
|
|
|
for i := 0; i < len(headers); i++ {
|
|
|
|
for i := 0; i < len(headers); i++ {
|
|
|
|
if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
|
|
|
|
if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
|
|
|
|
log.Warn(fmt.Sprintf("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number))
|
|
|
|
p.logger.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number)
|
|
|
|
return 0, errInvalidChain
|
|
|
|
return 0, errInvalidChain
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -636,7 +637,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
|
|
|
|
|
|
|
|
// If every header is known, even future ones, the peer straight out lied about its head
|
|
|
|
// If every header is known, even future ones, the peer straight out lied about its head
|
|
|
|
if number > height && i == limit-1 {
|
|
|
|
if number > height && i == limit-1 {
|
|
|
|
log.Warn(fmt.Sprintf("%v: lied about chain head: reported %d, found above %d", p, height, number))
|
|
|
|
p.logger.Warn("Lied about chain head", "reported", height, "found", number)
|
|
|
|
return 0, errStallingPeer
|
|
|
|
return 0, errStallingPeer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break
|
|
|
|
break
|
|
|
@ -644,7 +645,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
case <-timeout:
|
|
|
|
case <-timeout:
|
|
|
|
log.Debug(fmt.Sprintf("%v: head header timeout", p))
|
|
|
|
p.logger.Debug("Waiting for head header timed out", "elapsed", ttl)
|
|
|
|
return 0, errTimeout
|
|
|
|
return 0, errTimeout
|
|
|
|
|
|
|
|
|
|
|
|
case <-d.bodyCh:
|
|
|
|
case <-d.bodyCh:
|
|
|
@ -656,10 +657,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
// If the head fetch already found an ancestor, return
|
|
|
|
// If the head fetch already found an ancestor, return
|
|
|
|
if !common.EmptyHash(hash) {
|
|
|
|
if !common.EmptyHash(hash) {
|
|
|
|
if int64(number) <= floor {
|
|
|
|
if int64(number) <= floor {
|
|
|
|
log.Warn(fmt.Sprintf("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor))
|
|
|
|
p.logger.Warn("Ancestor below allowance", "number", number, "hash", hash.Hex()[2:10], "allowance", floor)
|
|
|
|
return 0, errInvalidAncestor
|
|
|
|
return 0, errInvalidAncestor
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Debug(fmt.Sprintf("%v: common ancestor: #%d [%x…]", p, number, hash[:4]))
|
|
|
|
p.logger.Debug("Found common ancestor", "number", number, "hash", hash.Hex()[2:10])
|
|
|
|
return number, nil
|
|
|
|
return number, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Ancestor not found, we need to binary search over our chain
|
|
|
|
// Ancestor not found, we need to binary search over our chain
|
|
|
@ -671,7 +672,9 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
// Split our chain interval in two, and request the hash to cross check
|
|
|
|
// Split our chain interval in two, and request the hash to cross check
|
|
|
|
check := (start + end) / 2
|
|
|
|
check := (start + end) / 2
|
|
|
|
|
|
|
|
|
|
|
|
timeout := time.After(d.requestTTL())
|
|
|
|
ttl := d.requestTTL()
|
|
|
|
|
|
|
|
timeout := time.After(ttl)
|
|
|
|
|
|
|
|
|
|
|
|
go p.getAbsHeaders(uint64(check), 1, 0, false)
|
|
|
|
go p.getAbsHeaders(uint64(check), 1, 0, false)
|
|
|
|
|
|
|
|
|
|
|
|
// Wait until a reply arrives to this request
|
|
|
|
// Wait until a reply arrives to this request
|
|
|
@ -683,13 +686,13 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
case packer := <-d.headerCh:
|
|
|
|
case packer := <-d.headerCh:
|
|
|
|
// Discard anything not from the origin peer
|
|
|
|
// Discard anything not from the origin peer
|
|
|
|
if packer.PeerId() != p.id {
|
|
|
|
if packer.PeerId() != p.id {
|
|
|
|
log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packer.PeerId()))
|
|
|
|
log.Debug("Received headers from incorrect peer", "peer", packer.PeerId())
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make sure the peer actually gave something valid
|
|
|
|
// Make sure the peer actually gave something valid
|
|
|
|
headers := packer.(*headerPack).headers
|
|
|
|
headers := packer.(*headerPack).headers
|
|
|
|
if len(headers) != 1 {
|
|
|
|
if len(headers) != 1 {
|
|
|
|
log.Debug(fmt.Sprintf("%v: invalid search header set (%d)", p, len(headers)))
|
|
|
|
p.logger.Debug("Multiple headers for single request", "headers", len(headers))
|
|
|
|
return 0, errBadPeer
|
|
|
|
return 0, errBadPeer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
arrived = true
|
|
|
|
arrived = true
|
|
|
@ -701,13 +704,13 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
|
|
|
|
header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
|
|
|
|
if header.Number.Uint64() != check {
|
|
|
|
if header.Number.Uint64() != check {
|
|
|
|
log.Debug(fmt.Sprintf("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check))
|
|
|
|
p.logger.Debug("Received non requested header", "number", header.Number, "hash", header.Hash().Hex()[2:10], "request", check)
|
|
|
|
return 0, errBadPeer
|
|
|
|
return 0, errBadPeer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
start = check
|
|
|
|
start = check
|
|
|
|
|
|
|
|
|
|
|
|
case <-timeout:
|
|
|
|
case <-timeout:
|
|
|
|
log.Debug(fmt.Sprintf("%v: search header timeout", p))
|
|
|
|
p.logger.Debug("Waiting for search header timed out", "elapsed", ttl)
|
|
|
|
return 0, errTimeout
|
|
|
|
return 0, errTimeout
|
|
|
|
|
|
|
|
|
|
|
|
case <-d.bodyCh:
|
|
|
|
case <-d.bodyCh:
|
|
|
@ -719,10 +722,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Ensure valid ancestry and return
|
|
|
|
// Ensure valid ancestry and return
|
|
|
|
if int64(start) <= floor {
|
|
|
|
if int64(start) <= floor {
|
|
|
|
log.Warn(fmt.Sprintf("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor))
|
|
|
|
p.logger.Warn("Ancestor below allowance", "number", start, "hash", hash.Hex()[2:10], "allowance", floor)
|
|
|
|
return 0, errInvalidAncestor
|
|
|
|
return 0, errInvalidAncestor
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Debug(fmt.Sprintf("%v: common ancestor: #%d [%x…]", p, start, hash[:4]))
|
|
|
|
p.logger.Debug("Found common ancestor", "number", start, "hash", hash.Hex()[2:10])
|
|
|
|
return start, nil
|
|
|
|
return start, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -735,8 +738,8 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
|
|
|
|
// can fill in the skeleton - not even the origin peer - it's assumed invalid and
|
|
|
|
// can fill in the skeleton - not even the origin peer - it's assumed invalid and
|
|
|
|
// the origin is dropped.
|
|
|
|
// the origin is dropped.
|
|
|
|
func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|
|
|
func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|
|
|
log.Debug(fmt.Sprintf("%v: directing header downloads from #%d", p, from))
|
|
|
|
p.logger.Debug("Directing header downloads", "origin", from)
|
|
|
|
defer log.Debug(fmt.Sprintf("%v: header download terminated", p))
|
|
|
|
defer p.logger.Debug("Header download terminated")
|
|
|
|
|
|
|
|
|
|
|
|
// Create a timeout timer, and the associated header fetcher
|
|
|
|
// Create a timeout timer, and the associated header fetcher
|
|
|
|
skeleton := true // Skeleton assembly phase or finishing up
|
|
|
|
skeleton := true // Skeleton assembly phase or finishing up
|
|
|
@ -745,15 +748,18 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|
|
|
<-timeout.C // timeout channel should be initially empty
|
|
|
|
<-timeout.C // timeout channel should be initially empty
|
|
|
|
defer timeout.Stop()
|
|
|
|
defer timeout.Stop()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
var ttl time.Duration
|
|
|
|
getHeaders := func(from uint64) {
|
|
|
|
getHeaders := func(from uint64) {
|
|
|
|
request = time.Now()
|
|
|
|
request = time.Now()
|
|
|
|
timeout.Reset(d.requestTTL())
|
|
|
|
|
|
|
|
|
|
|
|
ttl = d.requestTTL()
|
|
|
|
|
|
|
|
timeout.Reset(ttl)
|
|
|
|
|
|
|
|
|
|
|
|
if skeleton {
|
|
|
|
if skeleton {
|
|
|
|
log.Trace(fmt.Sprintf("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from))
|
|
|
|
p.logger.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
|
|
|
|
go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
|
|
|
|
go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
|
|
|
|
} else {
|
|
|
|
} else {
|
|
|
|
log.Trace(fmt.Sprintf("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from))
|
|
|
|
p.logger.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
|
|
|
|
go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
|
|
|
|
go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -768,7 +774,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|
|
|
case packet := <-d.headerCh:
|
|
|
|
case packet := <-d.headerCh:
|
|
|
|
// Make sure the active peer is giving us the skeleton headers
|
|
|
|
// Make sure the active peer is giving us the skeleton headers
|
|
|
|
if packet.PeerId() != p.id {
|
|
|
|
if packet.PeerId() != p.id {
|
|
|
|
log.Debug(fmt.Sprintf("Received skeleton headers from incorrect peer (%s)", packet.PeerId()))
|
|
|
|
log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
|
|
|
|
break
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
headerReqTimer.UpdateSince(request)
|
|
|
|
headerReqTimer.UpdateSince(request)
|
|
|
@ -782,7 +788,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If no more headers are inbound, notify the content fetchers and return
|
|
|
|
// If no more headers are inbound, notify the content fetchers and return
|
|
|
|
if packet.Items() == 0 {
|
|
|
|
if packet.Items() == 0 {
|
|
|
|
log.Debug(fmt.Sprintf("%v: no available headers", p))
|
|
|
|
p.logger.Debug("No more headers available")
|
|
|
|
select {
|
|
|
|
select {
|
|
|
|
case d.headerProcCh <- nil:
|
|
|
|
case d.headerProcCh <- nil:
|
|
|
|
return nil
|
|
|
|
return nil
|
|
|
@ -796,7 +802,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|
|
|
if skeleton {
|
|
|
|
if skeleton {
|
|
|
|
filled, proced, err := d.fillHeaderSkeleton(from, headers)
|
|
|
|
filled, proced, err := d.fillHeaderSkeleton(from, headers)
|
|
|
|
if err != nil {
|
|
|
|
if err != nil {
|
|
|
|
log.Debug(fmt.Sprintf("%v: skeleton chain invalid: %v", p, err))
|
|
|
|
p.logger.Debug("Skeleton chain invalid", "err", err)
|
|
|
|
return errInvalidChain
|
|
|
|
return errInvalidChain
|
|
|
|
}
|
|
|
|
}
|
|
|
|
headers = filled[proced:]
|
|
|
|
headers = filled[proced:]
|
|
|
@ -804,7 +810,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Insert all the new headers and fetch the next batch
|
|
|
|
// Insert all the new headers and fetch the next batch
|
|
|
|
if len(headers) > 0 {
|
|
|
|
if len(headers) > 0 {
|
|
|
|
log.Trace(fmt.Sprintf("%v: schedule %d headers from #%d", p, len(headers), from))
|
|
|
|
p.logger.Trace("Scheduling new headers", "count", len(headers), "from", from)
|
|
|
|
select {
|
|
|
|
select {
|
|
|
|
case d.headerProcCh <- headers:
|
|
|
|
case d.headerProcCh <- headers:
|
|
|
|
case <-d.cancelCh:
|
|
|
|
case <-d.cancelCh:
|
|
|
@ -816,7 +822,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|
|
|
|
|
|
|
|
|
|
|
case <-timeout.C:
|
|
|
|
case <-timeout.C:
|
|
|
|
// Header retrieval timed out, consider the peer bad and drop
|
|
|
|
// Header retrieval timed out, consider the peer bad and drop
|
|
|
|
log.Debug(fmt.Sprintf("%v: header request timed out", p))
|
|
|
|
p.logger.Debug("Header request timed out", "elapsed", ttl)
|
|
|
|
headerTimeoutMeter.Mark(1)
|
|
|
|
headerTimeoutMeter.Mark(1)
|
|
|
|
d.dropPeer(p.id)
|
|
|
|
d.dropPeer(p.id)
|
|
|
|
|
|
|
|
|
|
|
@ -846,7 +852,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
|
|
|
|
// The method returs the entire filled skeleton and also the number of headers
|
|
|
|
// The method returs the entire filled skeleton and also the number of headers
|
|
|
|
// already forwarded for processing.
|
|
|
|
// already forwarded for processing.
|
|
|
|
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
|
|
|
|
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
|
|
|
|
log.Debug(fmt.Sprintf("Filling up skeleton from #%d", from))
|
|
|
|
log.Debug("Filling up skeleton", "from", from)
|
|
|
|
d.queue.ScheduleSkeleton(from, skeleton)
|
|
|
|
d.queue.ScheduleSkeleton(from, skeleton)
|
|
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
var (
|
|
|
@ -865,9 +871,9 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
|
|
|
)
|
|
|
|
)
|
|
|
|
err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire,
|
|
|
|
err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire,
|
|
|
|
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
|
|
|
|
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
|
|
|
|
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header")
|
|
|
|
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
|
|
|
|
|
|
|
|
|
|
|
|
log.Debug(fmt.Sprintf("Skeleton fill terminated: %v", err))
|
|
|
|
log.Debug("Skeleton fill terminated", "err", err)
|
|
|
|
|
|
|
|
|
|
|
|
filled, proced := d.queue.RetrieveHeaders()
|
|
|
|
filled, proced := d.queue.RetrieveHeaders()
|
|
|
|
return filled, proced, err
|
|
|
|
return filled, proced, err
|
|
|
@ -877,7 +883,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
|
|
|
|
// available peers, reserving a chunk of blocks for each, waiting for delivery
|
|
|
|
// available peers, reserving a chunk of blocks for each, waiting for delivery
|
|
|
|
// and also periodically checking for timeouts.
|
|
|
|
// and also periodically checking for timeouts.
|
|
|
|
func (d *Downloader) fetchBodies(from uint64) error {
|
|
|
|
func (d *Downloader) fetchBodies(from uint64) error {
|
|
|
|
log.Debug(fmt.Sprintf("Downloading block bodies from #%d", from))
|
|
|
|
log.Debug("Downloading block bodies", "origin", from)
|
|
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
var (
|
|
|
|
deliver = func(packet dataPack) (int, error) {
|
|
|
|
deliver = func(packet dataPack) (int, error) {
|
|
|
@ -891,9 +897,9 @@ func (d *Downloader) fetchBodies(from uint64) error {
|
|
|
|
)
|
|
|
|
)
|
|
|
|
err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
|
|
|
|
err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire,
|
|
|
|
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
|
|
|
|
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
|
|
|
|
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body")
|
|
|
|
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
|
|
|
|
|
|
|
|
|
|
|
|
log.Debug(fmt.Sprintf("Block body download terminated: %v", err))
|
|
|
|
log.Debug("Block body download terminated", "err", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -901,7 +907,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
|
|
|
|
// available peers, reserving a chunk of receipts for each, waiting for delivery
|
|
|
|
// available peers, reserving a chunk of receipts for each, waiting for delivery
|
|
|
|
// and also periodically checking for timeouts.
|
|
|
|
// and also periodically checking for timeouts.
|
|
|
|
func (d *Downloader) fetchReceipts(from uint64) error {
|
|
|
|
func (d *Downloader) fetchReceipts(from uint64) error {
|
|
|
|
log.Debug(fmt.Sprintf("Downloading receipts from #%d", from))
|
|
|
|
log.Debug("Downloading transaction receipts", "origin", from)
|
|
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
var (
|
|
|
|
deliver = func(packet dataPack) (int, error) {
|
|
|
|
deliver = func(packet dataPack) (int, error) {
|
|
|
@ -915,9 +921,9 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
|
|
|
)
|
|
|
|
)
|
|
|
|
err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
|
|
|
|
err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire,
|
|
|
|
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
|
|
|
|
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
|
|
|
|
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt")
|
|
|
|
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
|
|
|
|
|
|
|
|
|
|
|
|
log.Debug(fmt.Sprintf("Receipt download terminated: %v", err))
|
|
|
|
log.Debug("Transaction receipt download terminated", "err", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -925,7 +931,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
|
|
|
|
// available peers, reserving a chunk of nodes for each, waiting for delivery and
|
|
|
|
// available peers, reserving a chunk of nodes for each, waiting for delivery and
|
|
|
|
// also periodically checking for timeouts.
|
|
|
|
// also periodically checking for timeouts.
|
|
|
|
func (d *Downloader) fetchNodeData() error {
|
|
|
|
func (d *Downloader) fetchNodeData() error {
|
|
|
|
log.Debug(fmt.Sprintf("Downloading node state data"))
|
|
|
|
log.Debug("Downloading node state data")
|
|
|
|
|
|
|
|
|
|
|
|
var (
|
|
|
|
var (
|
|
|
|
deliver = func(packet dataPack) (int, error) {
|
|
|
|
deliver = func(packet dataPack) (int, error) {
|
|
|
@ -933,12 +939,12 @@ func (d *Downloader) fetchNodeData() error {
|
|
|
|
return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(delivered int, progressed bool, err error) {
|
|
|
|
return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(delivered int, progressed bool, err error) {
|
|
|
|
// If the peer returned old-requested data, forgive
|
|
|
|
// If the peer returned old-requested data, forgive
|
|
|
|
if err == trie.ErrNotRequested {
|
|
|
|
if err == trie.ErrNotRequested {
|
|
|
|
log.Debug(fmt.Sprintf("peer %s: replied to stale state request, forgiving", packet.PeerId()))
|
|
|
|
log.Debug("Forgiving reply to stale state request", "peer", packet.PeerId())
|
|
|
|
return
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
if err != nil {
|
|
|
|
// If the node data processing failed, the root hash is very wrong, abort
|
|
|
|
// If the node data processing failed, the root hash is very wrong, abort
|
|
|
|
log.Error(fmt.Sprintf("peer %s: state processing failed: %v", packet.PeerId(), err))
|
|
|
|
log.Error("State processing failed", "peer", packet.PeerId(), "err", err)
|
|
|
|
d.cancel()
|
|
|
|
d.cancel()
|
|
|
|
return
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -957,12 +963,12 @@ func (d *Downloader) fetchNodeData() error {
|
|
|
|
|
|
|
|
|
|
|
|
// If real database progress was made, reset any fast-sync pivot failure
|
|
|
|
// If real database progress was made, reset any fast-sync pivot failure
|
|
|
|
if progressed && atomic.LoadUint32(&d.fsPivotFails) > 1 {
|
|
|
|
if progressed && atomic.LoadUint32(&d.fsPivotFails) > 1 {
|
|
|
|
log.Debug(fmt.Sprintf("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails)))
|
|
|
|
log.Debug("Fast-sync progressed, resetting fail counter", "previous", atomic.LoadUint32(&d.fsPivotFails))
|
|
|
|
atomic.StoreUint32(&d.fsPivotFails, 1) // Don't ever reset to 0, as that will unlock the pivot block
|
|
|
|
atomic.StoreUint32(&d.fsPivotFails, 1) // Don't ever reset to 0, as that will unlock the pivot block
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Log a message to the user and return
|
|
|
|
// Log a message to the user and return
|
|
|
|
if delivered > 0 {
|
|
|
|
if delivered > 0 {
|
|
|
|
log.Info(fmt.Sprintf("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending))
|
|
|
|
log.Info("Imported new state entries", "count", delivered, "elapsed", common.PrettyDuration(time.Since(start)), "processed", syncStatsStateDone, "pending", pending)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -977,9 +983,9 @@ func (d *Downloader) fetchNodeData() error {
|
|
|
|
)
|
|
|
|
)
|
|
|
|
err := d.fetchParts(errCancelStateFetch, d.stateCh, deliver, d.stateWakeCh, expire,
|
|
|
|
err := d.fetchParts(errCancelStateFetch, d.stateCh, deliver, d.stateWakeCh, expire,
|
|
|
|
d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
|
|
|
|
d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
|
|
|
|
d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State")
|
|
|
|
d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "states")
|
|
|
|
|
|
|
|
|
|
|
|
log.Debug(fmt.Sprintf("Node state data download terminated: %v", err))
|
|
|
|
log.Debug("Node state data download terminated", "err", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -1044,11 +1050,11 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|
|
|
// Issue a log to the user to see what's going on
|
|
|
|
// Issue a log to the user to see what's going on
|
|
|
|
switch {
|
|
|
|
switch {
|
|
|
|
case err == nil && packet.Items() == 0:
|
|
|
|
case err == nil && packet.Items() == 0:
|
|
|
|
log.Trace(fmt.Sprintf("%s: no %s delivered", peer, strings.ToLower(kind)))
|
|
|
|
peer.logger.Trace("Requested data not delivered", "type", kind)
|
|
|
|
case err == nil:
|
|
|
|
case err == nil:
|
|
|
|
log.Trace(fmt.Sprintf("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind)))
|
|
|
|
peer.logger.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
|
|
|
|
default:
|
|
|
|
default:
|
|
|
|
log.Trace(fmt.Sprintf("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err))
|
|
|
|
peer.logger.Trace("Failed to deliver retrieved data", "type", kind, "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Blocks assembled, try to update the progress
|
|
|
|
// Blocks assembled, try to update the progress
|
|
|
@ -1091,10 +1097,10 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|
|
|
// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
|
|
|
|
// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
|
|
|
|
// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
|
|
|
|
// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
|
|
|
|
if fails > 2 {
|
|
|
|
if fails > 2 {
|
|
|
|
log.Trace(fmt.Sprintf("%s: %s delivery timeout", peer, strings.ToLower(kind)))
|
|
|
|
peer.logger.Trace("Data delivery timed out", "type", kind)
|
|
|
|
setIdle(peer, 0)
|
|
|
|
setIdle(peer, 0)
|
|
|
|
} else {
|
|
|
|
} else {
|
|
|
|
log.Debug(fmt.Sprintf("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind)))
|
|
|
|
peer.logger.Debug("Stalling delivery, dropping", "type", kind)
|
|
|
|
d.dropPeer(pid)
|
|
|
|
d.dropPeer(pid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1102,7 +1108,7 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|
|
|
// If there's nothing more to fetch, wait or terminate
|
|
|
|
// If there's nothing more to fetch, wait or terminate
|
|
|
|
if pending() == 0 {
|
|
|
|
if pending() == 0 {
|
|
|
|
if !inFlight() && finished {
|
|
|
|
if !inFlight() && finished {
|
|
|
|
log.Debug(fmt.Sprintf("%s fetching completed", kind))
|
|
|
|
log.Debug("Data fetching completed", "type", kind)
|
|
|
|
return nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break
|
|
|
|
break
|
|
|
@ -1130,15 +1136,13 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|
|
|
if request == nil {
|
|
|
|
if request == nil {
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
|
|
|
if request.From > 0 {
|
|
|
|
if request.From > 0 {
|
|
|
|
peer.logger.Trace("Requesting new batch of data", "type", kind, "from", request.From)
|
|
|
|
return fmt.Sprintf("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From)
|
|
|
|
} else if len(request.Headers) > 0 {
|
|
|
|
} else if len(request.Headers) > 0 {
|
|
|
|
peer.logger.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number)
|
|
|
|
return fmt.Sprintf("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number)
|
|
|
|
} else {
|
|
|
|
} else {
|
|
|
|
peer.logger.Trace("Requesting new batch of data", "type", kind, "count", len(request.Hashes))
|
|
|
|
return fmt.Sprintf("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}})
|
|
|
|
|
|
|
|
// Fetch the chunk and make sure any errors return the hashes to the queue
|
|
|
|
// Fetch the chunk and make sure any errors return the hashes to the queue
|
|
|
|
if fetchHook != nil {
|
|
|
|
if fetchHook != nil {
|
|
|
|
fetchHook(request.Headers)
|
|
|
|
fetchHook(request.Headers)
|
|
|
@ -1149,7 +1153,7 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
|
|
|
|
// case, the internal state of the downloader and the queue is very wrong so
|
|
|
|
// case, the internal state of the downloader and the queue is very wrong so
|
|
|
|
// better hard crash and note the error instead of silently accumulating into
|
|
|
|
// better hard crash and note the error instead of silently accumulating into
|
|
|
|
// a much bigger issue.
|
|
|
|
// a much bigger issue.
|
|
|
|
panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, strings.ToLower(kind)))
|
|
|
|
panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
running = true
|
|
|
|
running = true
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1193,8 +1197,10 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|
|
|
if d.headBlock != nil {
|
|
|
|
if d.headBlock != nil {
|
|
|
|
curBlock = d.headBlock().Number()
|
|
|
|
curBlock = d.headBlock().Number()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Warn(fmt.Sprintf("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
|
|
|
|
log.Warn("Rolled back headers", "count", len(hashes),
|
|
|
|
len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock))
|
|
|
|
"header", fmt.Sprintf("%d->%d", lastHeader, d.headHeader().Number),
|
|
|
|
|
|
|
|
"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
|
|
|
|
|
|
|
|
"block", fmt.Sprintf("%d->%d", lastBlock, curBlock))
|
|
|
|
|
|
|
|
|
|
|
|
// If we're already past the pivot point, this could be an attack, thread carefully
|
|
|
|
// If we're already past the pivot point, this could be an attack, thread carefully
|
|
|
|
if rollback[len(rollback)-1].Number.Uint64() > pivot {
|
|
|
|
if rollback[len(rollback)-1].Number.Uint64() > pivot {
|
|
|
@ -1202,7 +1208,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|
|
|
if atomic.LoadUint32(&d.fsPivotFails) == 0 {
|
|
|
|
if atomic.LoadUint32(&d.fsPivotFails) == 0 {
|
|
|
|
for _, header := range rollback {
|
|
|
|
for _, header := range rollback {
|
|
|
|
if header.Number.Uint64() == pivot {
|
|
|
|
if header.Number.Uint64() == pivot {
|
|
|
|
log.Warn(fmt.Sprintf("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4]))
|
|
|
|
log.Warn("Fast-sync critical section failure, locked pivot to header", "number", pivot, "hash", header.Hash().Hex()[2:10])
|
|
|
|
d.fsPivotLock = header
|
|
|
|
d.fsPivotLock = header
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1298,7 +1304,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|
|
|
if n > 0 {
|
|
|
|
if n > 0 {
|
|
|
|
rollback = append(rollback, chunk[:n]...)
|
|
|
|
rollback = append(rollback, chunk[:n]...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Debug(fmt.Sprintf("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err))
|
|
|
|
log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash().Hex()[2:10], "err", err)
|
|
|
|
return errInvalidChain
|
|
|
|
return errInvalidChain
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// All verifications passed, store newly found uncertain headers
|
|
|
|
// All verifications passed, store newly found uncertain headers
|
|
|
@ -1310,7 +1316,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|
|
|
// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
|
|
|
|
// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
|
|
|
|
if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
|
|
|
|
if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
|
|
|
|
if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
|
|
|
|
if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
|
|
|
|
log.Warn(fmt.Sprintf("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4]))
|
|
|
|
log.Warn("Pivot doesn't match locked in one", "remoteNumber", pivot.Number, "remoteHash", pivot.Hash().Hex()[2:10], "localNumber", d.fsPivotLock.Number, "localHash", d.fsPivotLock.Hash().Hex()[2:10])
|
|
|
|
return errInvalidChain
|
|
|
|
return errInvalidChain
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1327,7 +1333,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
|
|
|
|
// Otherwise insert the headers for content retrieval
|
|
|
|
// Otherwise insert the headers for content retrieval
|
|
|
|
inserts := d.queue.Schedule(chunk, origin)
|
|
|
|
inserts := d.queue.Schedule(chunk, origin)
|
|
|
|
if len(inserts) != len(chunk) {
|
|
|
|
if len(inserts) != len(chunk) {
|
|
|
|
log.Debug(fmt.Sprintf("stale headers"))
|
|
|
|
log.Debug("Stale headers")
|
|
|
|
return errBadPeer
|
|
|
|
return errBadPeer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1358,10 +1364,15 @@ func (d *Downloader) processContent() error {
|
|
|
|
d.chainInsertHook(results)
|
|
|
|
d.chainInsertHook(results)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Actually import the blocks
|
|
|
|
// Actually import the blocks
|
|
|
|
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
|
|
|
first, last := results[0].Header, results[len(results)-1].Header
|
|
|
|
first, last := results[0].Header, results[len(results)-1].Header
|
|
|
|
log.Debug("Inserting downloaded chain", "items", len(results),
|
|
|
|
return fmt.Sprintf("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
|
|
|
|
"from", log.Lazy{Fn: func() string {
|
|
|
|
}})
|
|
|
|
return fmt.Sprintf("#%d [%x…]", first.Number, first.Hash().Bytes()[:4])
|
|
|
|
|
|
|
|
}},
|
|
|
|
|
|
|
|
"till", log.Lazy{Fn: func() string {
|
|
|
|
|
|
|
|
return fmt.Sprintf("#%d [%x…]", last.Number, last.Hash().Bytes()[:4])
|
|
|
|
|
|
|
|
}})
|
|
|
|
|
|
|
|
|
|
|
|
for len(results) != 0 {
|
|
|
|
for len(results) != 0 {
|
|
|
|
// Check for any termination requests
|
|
|
|
// Check for any termination requests
|
|
|
|
select {
|
|
|
|
select {
|
|
|
@ -1395,14 +1406,14 @@ func (d *Downloader) processContent() error {
|
|
|
|
case len(receipts) > 0:
|
|
|
|
case len(receipts) > 0:
|
|
|
|
index, err = d.insertReceipts(blocks, receipts)
|
|
|
|
index, err = d.insertReceipts(blocks, receipts)
|
|
|
|
if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
|
|
|
|
if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
|
|
|
|
log.Debug(fmt.Sprintf("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4]))
|
|
|
|
log.Debug("Committing block as new head", "number", blocks[len(blocks)-1].Number(), "hash", blocks[len(blocks)-1].Hash().Hex()[2:10])
|
|
|
|
index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
|
|
|
|
index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
default:
|
|
|
|
index, err = d.insertBlocks(blocks)
|
|
|
|
index, err = d.insertBlocks(blocks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
if err != nil {
|
|
|
|
log.Debug(fmt.Sprintf("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err))
|
|
|
|
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash().Hex()[2:10], "err", err)
|
|
|
|
return errInvalidChain
|
|
|
|
return errInvalidChain
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Shift the results to the next batch
|
|
|
|
// Shift the results to the next batch
|
|
|
@ -1470,7 +1481,7 @@ func (d *Downloader) qosTuner() {
|
|
|
|
atomic.StoreUint64(&d.rttConfidence, conf)
|
|
|
|
atomic.StoreUint64(&d.rttConfidence, conf)
|
|
|
|
|
|
|
|
|
|
|
|
// Log the new QoS values and sleep until the next RTT
|
|
|
|
// Log the new QoS values and sleep until the next RTT
|
|
|
|
log.Debug(fmt.Sprintf("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()))
|
|
|
|
log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
|
|
|
|
select {
|
|
|
|
select {
|
|
|
|
case <-d.quitCh:
|
|
|
|
case <-d.quitCh:
|
|
|
|
return
|
|
|
|
return
|
|
|
@ -1500,7 +1511,7 @@ func (d *Downloader) qosReduceConfidence() {
|
|
|
|
atomic.StoreUint64(&d.rttConfidence, conf)
|
|
|
|
atomic.StoreUint64(&d.rttConfidence, conf)
|
|
|
|
|
|
|
|
|
|
|
|
rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
|
|
|
|
rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
|
|
|
|
log.Debug(fmt.Sprintf("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()))
|
|
|
|
log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// requestRTT returns the current target round trip time for a download request
|
|
|
|
// requestRTT returns the current target round trip time for a download request
|
|
|
|