2020-09-11 06:35:51 +00:00
|
|
|
package download
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-09-20 09:47:24 +00:00
|
|
|
"errors"
|
2020-09-11 06:35:51 +00:00
|
|
|
"fmt"
|
2020-09-20 09:47:24 +00:00
|
|
|
"io"
|
2020-09-11 06:35:51 +00:00
|
|
|
"math/big"
|
2020-09-20 09:47:24 +00:00
|
|
|
"os"
|
2020-09-11 06:35:51 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ledgerwatch/turbo-geth/common"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/consensus/ethash"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/core/types"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/log"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/params"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/turbo/stages/headerdownload"
|
|
|
|
)
|
|
|
|
|
|
|
|
type chainReader struct {
|
|
|
|
config *params.ChainConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cr chainReader) Config() *params.ChainConfig { return cr.config }
|
|
|
|
func (cr chainReader) CurrentHeader() *types.Header { panic("") }
|
|
|
|
func (cr chainReader) GetHeader(hash common.Hash, number uint64) *types.Header { panic("") }
|
|
|
|
func (cr chainReader) GetHeaderByNumber(number uint64) *types.Header { panic("") }
|
|
|
|
func (cr chainReader) GetHeaderByHash(hash common.Hash) *types.Header { panic("") }
|
|
|
|
|
|
|
|
func processSegment(hd *headerdownload.HeaderDownload, segment *headerdownload.ChainSegment) {
|
2020-09-20 09:47:24 +00:00
|
|
|
log.Info(hd.AnchorState())
|
|
|
|
log.Info("processSegment", "from", segment.Headers[0].Number.Uint64(), "to", segment.Headers[len(segment.Headers)-1].Number.Uint64())
|
2020-09-11 06:35:51 +00:00
|
|
|
foundAnchor, start, anchorParent, invalidAnchors := hd.FindAnchors(segment)
|
|
|
|
if len(invalidAnchors) > 0 {
|
|
|
|
if _, err1 := hd.InvalidateAnchors(anchorParent, invalidAnchors); err1 != nil {
|
|
|
|
log.Error("Invalidation of anchor failed", "error", err1)
|
|
|
|
}
|
2020-09-20 09:47:24 +00:00
|
|
|
log.Warn(fmt.Sprintf("Invalidated anchors %v for %x", invalidAnchors, anchorParent))
|
|
|
|
}
|
|
|
|
foundTip, end, penalty := hd.FindTip(segment, start) // We ignore penalty because we will check it as part of PoW check
|
|
|
|
if penalty != headerdownload.NoPenalty {
|
|
|
|
log.Error(fmt.Sprintf("FindTip penalty %d", penalty))
|
|
|
|
return
|
2020-09-11 06:35:51 +00:00
|
|
|
}
|
2020-09-27 20:32:05 +00:00
|
|
|
currentTime := uint64(time.Now().Unix())
|
2020-09-11 06:35:51 +00:00
|
|
|
var powDepth int
|
2020-09-27 20:32:05 +00:00
|
|
|
if powDepth1, err1 := hd.VerifySeals(segment, foundAnchor, foundTip, start, end, currentTime); err1 == nil {
|
2020-09-11 06:35:51 +00:00
|
|
|
powDepth = powDepth1
|
|
|
|
} else {
|
|
|
|
log.Error("VerifySeals", "error", err1)
|
2020-09-27 20:32:05 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if err1 := hd.FlushBuffer(); err1 != nil {
|
|
|
|
log.Error("Could not flush the buffer, will discard the data", "error", err1)
|
|
|
|
return
|
2020-09-11 06:35:51 +00:00
|
|
|
}
|
|
|
|
// There are 4 cases
|
|
|
|
if foundAnchor {
|
|
|
|
if foundTip {
|
|
|
|
// Connect
|
2020-09-20 09:47:24 +00:00
|
|
|
if err1 := hd.Connect(segment, start, end, currentTime); err1 != nil {
|
2020-09-11 06:35:51 +00:00
|
|
|
log.Error("Connect failed", "error", err1)
|
2020-09-20 09:47:24 +00:00
|
|
|
} else {
|
2020-09-27 20:32:05 +00:00
|
|
|
hd.AddSegmentToBuffer(segment, start, end)
|
2020-09-20 09:47:24 +00:00
|
|
|
log.Info("Connected", "start", start, "end", end)
|
2020-09-11 06:35:51 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// ExtendDown
|
2020-09-20 09:47:24 +00:00
|
|
|
if err1 := hd.ExtendDown(segment, start, end, powDepth, currentTime); err1 != nil {
|
2020-09-11 06:35:51 +00:00
|
|
|
log.Error("ExtendDown failed", "error", err1)
|
2020-09-20 09:47:24 +00:00
|
|
|
} else {
|
2020-09-27 20:32:05 +00:00
|
|
|
hd.AddSegmentToBuffer(segment, start, end)
|
2020-09-20 09:47:24 +00:00
|
|
|
log.Info("Extended Down", "start", start, "end", end)
|
2020-09-11 06:35:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if foundTip {
|
2020-09-20 09:47:24 +00:00
|
|
|
if end == 0 {
|
|
|
|
log.Info("No action needed, tip already exists")
|
|
|
|
} else {
|
|
|
|
// ExtendUp
|
|
|
|
if err1 := hd.ExtendUp(segment, start, end, currentTime); err1 != nil {
|
|
|
|
log.Error("ExtendUp failed", "error", err1)
|
|
|
|
} else {
|
2020-09-27 20:32:05 +00:00
|
|
|
hd.AddSegmentToBuffer(segment, start, end)
|
2020-09-20 09:47:24 +00:00
|
|
|
log.Info("Extended Up", "start", start, "end", end)
|
|
|
|
}
|
2020-09-11 06:35:51 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// NewAnchor
|
2020-09-27 20:32:05 +00:00
|
|
|
if err1 := hd.NewAnchor(segment, start, end, currentTime); err1 != nil {
|
2020-09-11 06:35:51 +00:00
|
|
|
log.Error("NewAnchor failed", "error", err1)
|
2020-09-20 09:47:24 +00:00
|
|
|
} else {
|
2020-09-27 20:32:05 +00:00
|
|
|
hd.AddSegmentToBuffer(segment, start, end)
|
2020-09-20 09:47:24 +00:00
|
|
|
log.Info("NewAnchor", "start", start, "end", end)
|
2020-09-11 06:35:51 +00:00
|
|
|
}
|
|
|
|
}
|
2020-09-27 20:32:05 +00:00
|
|
|
if start == 0 || end > 0 {
|
|
|
|
hd.CheckInitiation(segment, params.MainnetGenesisHash)
|
|
|
|
}
|
2020-09-11 06:35:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Downloader needs to be run from a go-routine, and it is in the sole control of the HeaderDownloader object
|
2020-09-20 09:47:24 +00:00
|
|
|
func Downloader(
|
|
|
|
ctx context.Context,
|
|
|
|
filesDir string,
|
2020-09-27 20:32:05 +00:00
|
|
|
bufferLimit int,
|
2020-09-20 09:47:24 +00:00
|
|
|
newBlockCh chan NewBlockFromSentry,
|
|
|
|
newBlockHashCh chan NewBlockHashFromSentry,
|
|
|
|
headersCh chan BlockHeadersFromSentry,
|
|
|
|
penaltyCh chan PenaltyMsg,
|
|
|
|
reqHeadersCh chan headerdownload.HeaderRequest,
|
|
|
|
) {
|
2020-09-11 06:35:51 +00:00
|
|
|
//config := eth.DefaultConfig.Ethash
|
|
|
|
engine := ethash.New(ethash.Config{
|
|
|
|
CachesInMem: 1,
|
|
|
|
CachesLockMmap: false,
|
|
|
|
DatasetDir: "ethash",
|
|
|
|
DatasetsInMem: 1,
|
|
|
|
DatasetsOnDisk: 0,
|
|
|
|
DatasetsLockMmap: false,
|
|
|
|
}, nil, false)
|
|
|
|
cr := chainReader{config: params.MainnetChainConfig}
|
|
|
|
calcDiffFunc := func(childTimestamp uint64, parentTime uint64, parentDifficulty, parentNumber *big.Int, parentHash, parentUncleHash common.Hash) *big.Int {
|
|
|
|
return engine.CalcDifficulty(cr, childTimestamp, parentTime, parentDifficulty, parentNumber, parentHash, parentUncleHash)
|
|
|
|
}
|
|
|
|
verifySealFunc := func(header *types.Header) error {
|
|
|
|
return engine.VerifySeal(cr, header)
|
|
|
|
}
|
|
|
|
hd := headerdownload.NewHeaderDownload(
|
|
|
|
filesDir,
|
2020-09-27 20:32:05 +00:00
|
|
|
bufferLimit, /* bufferLimit */
|
|
|
|
16*1024, /* tipLimit */
|
|
|
|
1024, /* initPowDepth */
|
2020-09-11 06:35:51 +00:00
|
|
|
calcDiffFunc,
|
|
|
|
verifySealFunc,
|
|
|
|
3600, /* newAnchor future limit */
|
|
|
|
3600, /* newAnchor past limit */
|
|
|
|
)
|
2020-09-27 20:32:05 +00:00
|
|
|
hd.InitHardCodedTips("hard-coded-headers.dat")
|
|
|
|
if recovered, err := hd.RecoverFromFiles(uint64(time.Now().Unix())); err != nil || !recovered {
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Recovery from file failed, will start from scratch", "error", err)
|
|
|
|
}
|
|
|
|
// Insert hard-coded headers if present
|
|
|
|
if _, err := os.Stat("hard-coded-headers.dat"); err == nil {
|
|
|
|
if f, err1 := os.Open("hard-coded-headers.dat"); err1 == nil {
|
|
|
|
var hBuffer [headerdownload.HeaderSerLength]byte
|
|
|
|
i := 0
|
|
|
|
for {
|
|
|
|
var h types.Header
|
|
|
|
if _, err2 := io.ReadFull(f, hBuffer[:]); err2 == nil {
|
|
|
|
headerdownload.DeserialiseHeader(&h, hBuffer[:])
|
|
|
|
} else if errors.Is(err2, io.EOF) {
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
log.Error("Failed to read hard coded header", "i", i, "error", err2)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err2 := hd.HardCodedHeader(&h, uint64(time.Now().Unix())); err2 != nil {
|
|
|
|
log.Error("Failed to insert hard coded header", "i", i, "block", h.Number.Uint64(), "error", err2)
|
|
|
|
} else {
|
|
|
|
hd.AddHeaderToBuffer(&h)
|
|
|
|
}
|
|
|
|
i++
|
2020-09-20 09:47:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-09-11 06:35:51 +00:00
|
|
|
}
|
2020-09-27 20:32:05 +00:00
|
|
|
log.Info(hd.AnchorState())
|
2020-09-11 06:35:51 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case newBlockReq := <-newBlockCh:
|
2020-09-27 20:32:05 +00:00
|
|
|
if segments, penalty, err := hd.SingleHeaderAsSegment(newBlockReq.Block.Header()); err == nil {
|
2020-09-11 06:35:51 +00:00
|
|
|
if penalty == headerdownload.NoPenalty {
|
|
|
|
processSegment(hd, segments[0]) // There is only one segment in this case
|
|
|
|
} else {
|
|
|
|
// Send penalty back to the sentry
|
|
|
|
penaltyCh <- PenaltyMsg{SentryMsg: newBlockReq.SentryMsg, penalty: penalty}
|
|
|
|
}
|
|
|
|
} else {
|
2020-09-27 20:32:05 +00:00
|
|
|
log.Error("SingleHeaderAsSegment failed", "error", err)
|
2020-09-11 06:35:51 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
log.Info(fmt.Sprintf("NewBlockMsg{blockNumber: %d}", newBlockReq.Block.NumberU64()))
|
2020-09-20 09:47:24 +00:00
|
|
|
case newBlockHashReq := <-newBlockHashCh:
|
|
|
|
for _, announce := range newBlockHashReq.NewBlockHashesData {
|
|
|
|
if !hd.HasTip(announce.Hash) {
|
|
|
|
log.Info(fmt.Sprintf("Sending header request {hash: %x, height: %d, length: %d}", announce.Hash, announce.Number, 1))
|
|
|
|
reqHeadersCh <- headerdownload.HeaderRequest{
|
|
|
|
Hash: announce.Hash,
|
|
|
|
Number: announce.Number,
|
|
|
|
Length: 1,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case headersReq := <-headersCh:
|
2020-09-27 20:32:05 +00:00
|
|
|
if segments, penalty, err := hd.SplitIntoSegments(headersReq.headers); err == nil {
|
2020-09-20 09:47:24 +00:00
|
|
|
if penalty == headerdownload.NoPenalty {
|
|
|
|
for _, segment := range segments {
|
|
|
|
processSegment(hd, segment)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
penaltyCh <- PenaltyMsg{SentryMsg: headersReq.SentryMsg, penalty: penalty}
|
|
|
|
}
|
|
|
|
} else {
|
2020-09-27 20:32:05 +00:00
|
|
|
log.Error("SingleHeaderAsSegment failed", "error", err)
|
2020-09-20 09:47:24 +00:00
|
|
|
}
|
|
|
|
log.Info("HeadersMsg processed")
|
2020-09-11 06:35:51 +00:00
|
|
|
case <-hd.RequestQueueTimer.C:
|
|
|
|
fmt.Printf("RequestQueueTimer ticked\n")
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
|
|
|
reqs := hd.RequestMoreHeaders(uint64(time.Now().Unix()), 5 /*timeout */)
|
|
|
|
for _, req := range reqs {
|
2020-09-20 09:47:24 +00:00
|
|
|
//log.Info(fmt.Sprintf("Sending header request {hash: %x, height: %d, length: %d}", req.Hash, req.Number, req.Length))
|
2020-09-11 06:35:51 +00:00
|
|
|
reqHeadersCh <- *req
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|