// Copyright 2018 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . package core import ( "context" "fmt" "time" "github.com/ledgerwatch/turbo-geth/common" "github.com/ledgerwatch/turbo-geth/common/mclock" "github.com/ledgerwatch/turbo-geth/consensus" "github.com/ledgerwatch/turbo-geth/core/rawdb" "github.com/ledgerwatch/turbo-geth/core/types" "github.com/ledgerwatch/turbo-geth/ethdb" "github.com/ledgerwatch/turbo-geth/log" ) // InsertStats tracks and reports on block insertion. type InsertStats struct { queued, lastIndex, ignored int UsedGas uint64 Processed int StartTime mclock.AbsTime } // statsReportLimit is the time limit during import and export after which we // always print out progress. This avoids the user wondering what's going on. //const statsReportLimit = 8 * time.Second // report prints statistics if some number of blocks have been processed // or more than a few seconds have passed since the last message. /* func (st *InsertStats) report(chain []*types.Block, index int, cache common.StorageSize) { // Fetch the timings for the batch var ( now = mclock.Now() elapsed = now.Sub(st.startTime) ) // If we're at the last block of the batch or report period reached, log if index == len(chain)-1 || elapsed >= statsReportLimit { // Count the number of transactions in this segment var txs int for _, block := range chain[st.lastIndex : index+1] { txs += len(block.Transactions()) } end := chain[index] // Assemble the log context and send it to the logger context := []interface{}{ "blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000, "elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), "number", end.Number(), "hash", end.Hash(), } if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute { context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) } context = append(context, []interface{}{"dirty", dirty}...) if st.queued > 0 { context = append(context, []interface{}{"queued", st.queued}...) } if st.ignored > 0 { context = append(context, []interface{}{"ignored", st.ignored}...) } log.Info("Imported new chain segment", context...) // Bump the stats reported to the next section *st = InsertStats{startTime: now, lastIndex: index + 1} } } */ // insertIterator is a helper to assist during chain import. type insertIterator struct { chain types.Blocks // Chain of blocks being iterated over results <-chan error // Verification result sink from the consensus engine errors []error // Header verification errors for the blocks index int // Current offset of the iterator validator Validator // Validator to run if verification succeeds } // newInsertIterator creates a new iterator based on the given blocks, which are // assumed to be a contiguous chain. func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator { return &insertIterator{ chain: chain, results: results, errors: make([]error, 0, len(chain)), index: -1, validator: validator, } } // next returns the next block in the iterator, along with any potential validation // error for that block. When the end is reached, it will return (nil, nil). func (it *insertIterator) next(ctx context.Context) (*types.Block, error) { if it.index+1 >= len(it.chain) { it.index = len(it.chain) return nil, nil } // Advance the iterator and wait for verification result if not yet done it.index++ if len(it.errors) <= it.index { it.errors = append(it.errors, <-it.results) } if it.errors[it.index] != nil { return it.chain[it.index], it.errors[it.index] } return it.chain[it.index], it.validator.ValidateBody(ctx, it.chain[it.index]) } // peek returns the next block in the iterator, along with any potential validation // error for that block, but does **not** advance the iterator. // // Both header and body validation errors (nil too) is cached into the iterator // to avoid duplicating work on the following next() call. func (it *insertIterator) peek() (*types.Block, error) { // If we reached the end of the chain, abort if it.index+1 >= len(it.chain) { return nil, nil } // Wait for verification result if not yet done if len(it.errors) <= it.index+1 { it.errors = append(it.errors, <-it.results) } if it.errors[it.index+1] != nil { return it.chain[it.index+1], it.errors[it.index+1] } // Block header valid, ignore body validation since we don't have a parent anyway return it.chain[it.index+1], nil } // previous returns the previous header that was being processed, or nil. func (it *insertIterator) previous() *types.Header { if it.index < 1 { return nil } return it.chain[it.index-1].Header() } // first returns the first block in the it. func (it *insertIterator) first() *types.Block { return it.chain[0] } // remaining returns the number of remaining blocks. func (it *insertIterator) remaining() int { return len(it.chain) - it.index } // processed returns the number of processed blocks. func (it *insertIterator) processed() int { return it.index + 1 } // InsertBodyChain attempts to insert the given batch of block into the // canonical chain, without executing those blocks func InsertBodyChain(logPrefix string, ctx context.Context, db ethdb.Database, chain types.Blocks, newCanonical bool) (bool, error) { // Sanity check that we have something meaningful to import if len(chain) == 0 { return true, nil } // Remove already known canon-blocks var ( block, prev *types.Block ) // Do a sanity check that the provided chain is actually ordered and linked for i := 1; i < len(chain); i++ { block = chain[i] prev = chain[i-1] if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() { // Chain broke ancestry, log a message (programming error) and skip insertion log.Error("Non contiguous block insert", "number", block.Number(), "hash", block.Hash(), "parent", block.ParentHash(), "prevnumber", prev.Number(), "prevhash", prev.Hash()) return true, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, prev.NumberU64(), prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4]) } } return InsertBodies( logPrefix, ctx, chain, db, newCanonical, ) } // InsertBodies is insertChain with execute=false and ommission of blockchain object func InsertBodies( logPrefix string, ctx context.Context, chain types.Blocks, db ethdb.Database, newCanonical bool, ) (bool, error) { tx, err := db.Begin(ctx, ethdb.RW) if err != nil { return false, err } defer tx.Rollback() batch := tx.NewBatch() defer batch.Rollback() stats := InsertStats{StartTime: mclock.Now()} var parentNumber = chain[0].NumberU64() - 1 parentHash := chain[0].ParentHash() if parent := rawdb.ReadStorageBodyRLP(batch, parentHash, parentNumber); parent == nil { log.Error("chain segment could not be inserted, missing parent", "hash", parentHash) return true, fmt.Errorf("chain segment could not be inserted, missing parent %x", parentHash) } // Iterate over the blocks and insert when the verifier permits for _, block := range chain { start := time.Now() // Calculate the total difficulty of the block ptd, err := rawdb.ReadTd(batch, block.ParentHash(), block.NumberU64()-1) if err != nil { return true, err } if ptd == nil { return true, consensus.ErrUnknownAncestor } // Irrelevant of the canonical status, write the block itself to the database if common.IsCanceled(ctx) { return true, ctx.Err() } err = rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) if err != nil { return true, err } log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(start)), "root", block.Root()) } stats.Processed = len(chain) stats.Report(logPrefix, chain, len(chain)-1, true) if newCanonical { rawdb.WriteHeadBlockHash(batch, chain[len(chain)-1].Hash()) } if _, err := batch.Commit(); err != nil { return true, fmt.Errorf("commit inserting bodies: %w", err) } if _, err := tx.Commit(); err != nil { return true, fmt.Errorf("commit inserting bodies: %w", err) } return false, nil }