2018-11-20 12:15:26 +00:00
|
|
|
// Copyright 2018 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package core
|
|
|
|
|
|
|
|
import (
|
2019-05-27 13:51:49 +00:00
|
|
|
"context"
|
2020-12-08 09:44:29 +00:00
|
|
|
"fmt"
|
|
|
|
"time"
|
2018-11-20 12:15:26 +00:00
|
|
|
|
2020-12-08 09:44:29 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/common"
|
2019-05-27 13:51:49 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/common/mclock"
|
2020-12-08 09:44:29 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/consensus"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/core/rawdb"
|
2019-05-27 13:51:49 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/core/types"
|
2020-12-08 09:44:29 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/ethdb"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/log"
|
2018-11-20 12:15:26 +00:00
|
|
|
)
|
|
|
|
|
2020-06-29 16:26:33 +00:00
|
|
|
// InsertStats tracks and reports on block insertion.
|
|
|
|
type InsertStats struct {
|
|
|
|
queued, lastIndex, ignored int
|
|
|
|
UsedGas uint64
|
|
|
|
Processed int
|
|
|
|
StartTime mclock.AbsTime
|
2018-11-20 12:15:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// statsReportLimit is the time limit during import and export after which we
|
|
|
|
// always print out progress. This avoids the user wondering what's going on.
|
2019-05-27 13:51:49 +00:00
|
|
|
//const statsReportLimit = 8 * time.Second
|
2018-11-20 12:15:26 +00:00
|
|
|
|
|
|
|
// report prints statistics if some number of blocks have been processed
|
|
|
|
// or more than a few seconds have passed since the last message.
|
2019-05-27 13:51:49 +00:00
|
|
|
/*
|
2020-06-29 16:26:33 +00:00
|
|
|
func (st *InsertStats) report(chain []*types.Block, index int, cache common.StorageSize) {
|
2018-11-20 12:15:26 +00:00
|
|
|
// Fetch the timings for the batch
|
|
|
|
var (
|
|
|
|
now = mclock.Now()
|
2020-10-21 14:53:30 +00:00
|
|
|
elapsed = now.Sub(st.startTime)
|
2018-11-20 12:15:26 +00:00
|
|
|
)
|
|
|
|
// If we're at the last block of the batch or report period reached, log
|
|
|
|
if index == len(chain)-1 || elapsed >= statsReportLimit {
|
|
|
|
// Count the number of transactions in this segment
|
|
|
|
var txs int
|
|
|
|
for _, block := range chain[st.lastIndex : index+1] {
|
|
|
|
txs += len(block.Transactions())
|
|
|
|
}
|
|
|
|
end := chain[index]
|
|
|
|
|
|
|
|
// Assemble the log context and send it to the logger
|
|
|
|
context := []interface{}{
|
|
|
|
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
|
|
|
|
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
|
|
|
|
"number", end.Number(), "hash", end.Hash(),
|
|
|
|
}
|
2019-04-02 20:28:48 +00:00
|
|
|
if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute {
|
2018-11-20 12:15:26 +00:00
|
|
|
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
|
|
|
|
}
|
2019-02-05 10:49:59 +00:00
|
|
|
context = append(context, []interface{}{"dirty", dirty}...)
|
2018-11-20 12:15:26 +00:00
|
|
|
|
|
|
|
if st.queued > 0 {
|
|
|
|
context = append(context, []interface{}{"queued", st.queued}...)
|
|
|
|
}
|
|
|
|
if st.ignored > 0 {
|
|
|
|
context = append(context, []interface{}{"ignored", st.ignored}...)
|
|
|
|
}
|
|
|
|
log.Info("Imported new chain segment", context...)
|
|
|
|
|
|
|
|
// Bump the stats reported to the next section
|
2020-06-29 16:26:33 +00:00
|
|
|
*st = InsertStats{startTime: now, lastIndex: index + 1}
|
2018-11-20 12:15:26 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
*/
|
2018-11-20 12:15:26 +00:00
|
|
|
|
|
|
|
// insertIterator is a helper to assist during chain import.
|
|
|
|
type insertIterator struct {
|
2019-03-25 10:41:50 +00:00
|
|
|
chain types.Blocks // Chain of blocks being iterated over
|
|
|
|
|
|
|
|
results <-chan error // Verification result sink from the consensus engine
|
|
|
|
errors []error // Header verification errors for the blocks
|
|
|
|
|
|
|
|
index int // Current offset of the iterator
|
|
|
|
validator Validator // Validator to run if verification succeeds
|
2018-11-20 12:15:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newInsertIterator creates a new iterator based on the given blocks, which are
|
|
|
|
// assumed to be a contiguous chain.
|
|
|
|
func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator {
|
|
|
|
return &insertIterator{
|
|
|
|
chain: chain,
|
|
|
|
results: results,
|
2019-03-25 10:41:50 +00:00
|
|
|
errors: make([]error, 0, len(chain)),
|
2018-11-20 12:15:26 +00:00
|
|
|
index: -1,
|
|
|
|
validator: validator,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// next returns the next block in the iterator, along with any potential validation
|
|
|
|
// error for that block. When the end is reached, it will return (nil, nil).
|
2019-05-27 13:51:49 +00:00
|
|
|
func (it *insertIterator) next(ctx context.Context) (*types.Block, error) {
|
2018-11-20 12:15:26 +00:00
|
|
|
if it.index+1 >= len(it.chain) {
|
|
|
|
it.index = len(it.chain)
|
|
|
|
return nil, nil
|
|
|
|
}
|
2019-03-25 10:41:50 +00:00
|
|
|
// Advance the iterator and wait for verification result if not yet done
|
2018-11-20 12:15:26 +00:00
|
|
|
it.index++
|
2019-03-25 10:41:50 +00:00
|
|
|
if len(it.errors) <= it.index {
|
|
|
|
it.errors = append(it.errors, <-it.results)
|
|
|
|
}
|
|
|
|
if it.errors[it.index] != nil {
|
|
|
|
return it.chain[it.index], it.errors[it.index]
|
2018-11-20 12:15:26 +00:00
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
return it.chain[it.index], it.validator.ValidateBody(ctx, it.chain[it.index])
|
2018-11-20 12:15:26 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 10:41:50 +00:00
|
|
|
// peek returns the next block in the iterator, along with any potential validation
|
|
|
|
// error for that block, but does **not** advance the iterator.
|
|
|
|
//
|
|
|
|
// Both header and body validation errors (nil too) is cached into the iterator
|
|
|
|
// to avoid duplicating work on the following next() call.
|
|
|
|
func (it *insertIterator) peek() (*types.Block, error) {
|
|
|
|
// If we reached the end of the chain, abort
|
|
|
|
if it.index+1 >= len(it.chain) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
// Wait for verification result if not yet done
|
|
|
|
if len(it.errors) <= it.index+1 {
|
|
|
|
it.errors = append(it.errors, <-it.results)
|
|
|
|
}
|
|
|
|
if it.errors[it.index+1] != nil {
|
|
|
|
return it.chain[it.index+1], it.errors[it.index+1]
|
|
|
|
}
|
|
|
|
// Block header valid, ignore body validation since we don't have a parent anyway
|
|
|
|
return it.chain[it.index+1], nil
|
|
|
|
}
|
|
|
|
|
2019-03-13 10:31:35 +00:00
|
|
|
// previous returns the previous header that was being processed, or nil.
|
|
|
|
func (it *insertIterator) previous() *types.Header {
|
2018-11-20 12:15:26 +00:00
|
|
|
if it.index < 1 {
|
|
|
|
return nil
|
|
|
|
}
|
2019-03-13 10:31:35 +00:00
|
|
|
return it.chain[it.index-1].Header()
|
2018-11-20 12:15:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// first returns the first block in the it.
|
|
|
|
func (it *insertIterator) first() *types.Block {
|
|
|
|
return it.chain[0]
|
|
|
|
}
|
|
|
|
|
|
|
|
// remaining returns the number of remaining blocks.
|
|
|
|
func (it *insertIterator) remaining() int {
|
|
|
|
return len(it.chain) - it.index
|
|
|
|
}
|
|
|
|
|
|
|
|
// processed returns the number of processed blocks.
|
|
|
|
func (it *insertIterator) processed() int {
|
|
|
|
return it.index + 1
|
|
|
|
}
|
2020-12-08 09:44:29 +00:00
|
|
|
|
|
|
|
// InsertBodyChain attempts to insert the given batch of block into the
|
|
|
|
// canonical chain, without executing those blocks
|
|
|
|
func InsertBodyChain(logPrefix string, ctx context.Context, db ethdb.Database, chain types.Blocks, newCanonical bool) (bool, error) {
|
|
|
|
// Sanity check that we have something meaningful to import
|
|
|
|
if len(chain) == 0 {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
// Remove already known canon-blocks
|
|
|
|
var (
|
|
|
|
block, prev *types.Block
|
|
|
|
)
|
|
|
|
// Do a sanity check that the provided chain is actually ordered and linked
|
|
|
|
for i := 1; i < len(chain); i++ {
|
|
|
|
block = chain[i]
|
|
|
|
prev = chain[i-1]
|
|
|
|
if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
|
|
|
|
// Chain broke ancestry, log a message (programming error) and skip insertion
|
|
|
|
log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(),
|
|
|
|
"parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash())
|
|
|
|
|
|
|
|
return true, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(),
|
|
|
|
prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return InsertBodies(
|
|
|
|
logPrefix,
|
|
|
|
ctx,
|
|
|
|
chain,
|
|
|
|
db,
|
|
|
|
newCanonical,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// InsertBodies is insertChain with execute=false and ommission of blockchain object
|
|
|
|
func InsertBodies(
|
|
|
|
logPrefix string,
|
|
|
|
ctx context.Context,
|
|
|
|
chain types.Blocks,
|
|
|
|
db ethdb.Database,
|
|
|
|
newCanonical bool,
|
|
|
|
) (bool, error) {
|
2020-12-16 14:35:14 +00:00
|
|
|
batch := db.NewBatch()
|
2020-12-08 09:44:29 +00:00
|
|
|
defer batch.Rollback()
|
|
|
|
stats := InsertStats{StartTime: mclock.Now()}
|
|
|
|
|
|
|
|
var parentNumber = chain[0].NumberU64() - 1
|
|
|
|
parentHash := chain[0].ParentHash()
|
|
|
|
|
|
|
|
if parent := rawdb.ReadStorageBodyRLP(batch, parentHash, parentNumber); parent == nil {
|
|
|
|
log.Error("chain segment could not be inserted, missing parent", "hash", parentHash)
|
|
|
|
return true, fmt.Errorf("chain segment could not be inserted, missing parent %x", parentHash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over the blocks and insert when the verifier permits
|
|
|
|
for _, block := range chain {
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
// Calculate the total difficulty of the block
|
|
|
|
ptd, err := rawdb.ReadTd(batch, block.ParentHash(), block.NumberU64()-1)
|
|
|
|
if err != nil {
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if ptd == nil {
|
|
|
|
return true, consensus.ErrUnknownAncestor
|
|
|
|
}
|
|
|
|
|
|
|
|
// Irrelevant of the canonical status, write the block itself to the database
|
|
|
|
if common.IsCanceled(ctx) {
|
|
|
|
return true, ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
err = rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
|
|
|
|
if err != nil {
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
|
|
|
|
"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(),
|
|
|
|
"elapsed", common.PrettyDuration(time.Since(start)),
|
|
|
|
"root", block.Root())
|
|
|
|
}
|
|
|
|
stats.Processed = len(chain)
|
|
|
|
stats.Report(logPrefix, chain, len(chain)-1, true)
|
|
|
|
if newCanonical {
|
|
|
|
rawdb.WriteHeadBlockHash(batch, chain[len(chain)-1].Hash())
|
|
|
|
}
|
|
|
|
if _, err := batch.Commit(); err != nil {
|
|
|
|
return true, fmt.Errorf("commit inserting bodies: %w", err)
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|