[E3] Some fixes for the in-memory database when working with Caplin (… (#9164)

…testing on Sepolia) (#9151)

---------

Co-authored-by: Alex Sharp <alexsharp@Alexs-MacBook-Pro-2.local>
This commit is contained in:
ledgerwatch 2024-01-09 01:26:26 +00:00 committed by GitHub
parent 22f761593f
commit 459ccf8de4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 723 additions and 472 deletions

View File

@ -38,6 +38,8 @@ import (
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
libstate "github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/core"
@ -965,10 +967,11 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error {
}
defer tx.Rollback()
}
txc := wrap.TxContainer{Tx: tx}
if unwind > 0 {
u := sync.NewUnwindState(stages.Execution, s.BlockNumber-unwind, s.BlockNumber)
err := stagedsync.UnwindExecutionStage(u, s, tx, ctx, cfg, true, logger)
err := stagedsync.UnwindExecutionStage(u, s, txc, ctx, cfg, true, logger)
if err != nil {
return err
}
@ -987,7 +990,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error {
return nil
}
err := stagedsync.SpawnExecuteBlocksStage(s, sync, tx, block, ctx, cfg, true /* initialCycle */, logger)
err := stagedsync.SpawnExecuteBlocksStage(s, sync, txc, block, ctx, cfg, true /* initialCycle */, logger)
if err != nil {
return err
}

View File

@ -11,6 +11,7 @@ import (
"time"
"github.com/ledgerwatch/erigon-lib/kv/dbutils"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/c2h5oh/datasize"
chain2 "github.com/ledgerwatch/erigon-lib/chain"
@ -228,9 +229,9 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.
execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, changesAcc, false, false, historyV3, dirs,
br, nil, genesis, syncCfg, agg, nil)
execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
if err := stagedsync.SpawnExecuteBlocksStage(s, unwinder, tx, execToBlock, ctx, execCfg, firstCycle, logger); err != nil {
execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if err := stagedsync.SpawnExecuteBlocksStage(s, unwinder, txc, execToBlock, ctx, execCfg, firstCycle, logger); err != nil {
return fmt.Errorf("spawnExecuteBlocksStage: %w", err)
}
return nil
@ -317,7 +318,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.
stateStages.MockExecFunc(stages.Execution, execUntilFunc(execToBlock))
_ = stateStages.SetCurrentStage(stages.Execution)
if _, err := stateStages.Run(db, tx, false /* firstCycle */); err != nil {
if _, err := stateStages.Run(db, wrap.TxContainer{Tx: tx}, false /* firstCycle */); err != nil {
return err
}
@ -352,8 +353,8 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.
if miner.MiningConfig.Enabled && nextBlock != nil && nextBlock.Coinbase() != (common2.Address{}) {
miner.MiningConfig.Etherbase = nextBlock.Coinbase()
miner.MiningConfig.ExtraData = nextBlock.Extra()
miningStages.MockExecFunc(stages.MiningCreateBlock, func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
err = stagedsync.SpawnMiningCreateBlockStage(s, tx,
miningStages.MockExecFunc(stages.MiningCreateBlock, func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error {
err = stagedsync.SpawnMiningCreateBlockStage(s, txc.Tx,
stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, dirs.Tmp, br),
quit, logger)
if err != nil {
@ -375,7 +376,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context.
//})
_ = miningStages.SetCurrentStage(stages.MiningCreateBlock)
if _, err := miningStages.Run(db, tx, false /* firstCycle */); err != nil {
if _, err := miningStages.Run(db, wrap.TxContainer{Tx: tx}, false /* firstCycle */); err != nil {
return err
}
tx.Rollback()
@ -468,7 +469,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e
}
defer tx.Rollback()
sync.DisableStages(stages.Snapshots, stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders, stages.Execution, stages.AccountHistoryIndex, stages.StorageHistoryIndex, stages.TxLookup, stages.Finish)
if _, err = sync.Run(db, tx, false /* firstCycle */); err != nil {
if _, err = sync.Run(db, wrap.TxContainer{Tx: tx}, false /* firstCycle */); err != nil {
return err
}
execStage := stage(sync, tx, nil, stages.HashState)
@ -492,7 +493,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e
sync.DisableStages(stages.IntermediateHashes)
_ = sync.SetCurrentStage(stages.HashState)
if _, err = sync.Run(db, tx, false /* firstCycle */); err != nil {
if _, err = sync.Run(db, wrap.TxContainer{Tx: tx}, false /* firstCycle */); err != nil {
return err
}
must(tx.Commit())
@ -512,7 +513,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e
_ = sync.SetCurrentStage(stages.IntermediateHashes)
t := time.Now()
if _, err = sync.Run(db, tx, false /* firstCycle */); err != nil {
if _, err = sync.Run(db, wrap.TxContainer{Tx: tx}, false /* firstCycle */); err != nil {
return err
}
logger.Warn("loop", "time", time.Since(t).String())
@ -567,8 +568,8 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger)
/*badBlockHalt=*/ false, historyV3, dirs, br, nil, genesis, syncCfg, agg, nil)
// set block limit of execute stage
sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
if err = stagedsync.SpawnExecuteBlocksStage(stageState, sync, tx, to, ctx, cfg, initialCycle, logger); err != nil {
sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if err = stagedsync.SpawnExecuteBlocksStage(stageState, sync, txc, to, ctx, cfg, initialCycle, logger); err != nil {
return fmt.Errorf("spawnExecuteBlocksStage: %w", err)
}
return nil
@ -583,7 +584,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger)
_ = sync.SetCurrentStage(stages.Execution)
t := time.Now()
if _, err = sync.Run(db, tx, initialCycle); err != nil {
if _, err = sync.Run(db, wrap.TxContainer{Tx: tx}, initialCycle); err != nil {
return err
}
logger.Info("[Integration] ", "loop time", time.Since(t))

View File

@ -521,7 +521,7 @@ func recoverCodeHashPlain(acc *accounts.Account, db kv.Tx, key []byte) {
}
}
func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, txUnwindTo uint64, agg *libstate.AggregatorV3, accumulator *shards.Accumulator) error {
func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, blockUnwindTo, txUnwindTo uint64, agg *libstate.AggregatorV3, accumulator *shards.Accumulator) error {
agg.SetTx(tx)
var currentInc uint64
handle := func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error {

View File

@ -32,6 +32,7 @@ type MemoryMutation struct {
memTx kv.RwTx
memDb kv.RwDB
deletedEntries map[string]map[string]struct{}
deletedDups map[string]map[string]map[string]struct{}
clearedTables map[string]struct{}
db kv.Tx
statelessCursors map[string]kv.RwCursor
@ -60,6 +61,7 @@ func NewMemoryBatch(tx kv.Tx, tmpDir string, logger log.Logger) *MemoryMutation
memDb: tmpDB,
memTx: memTx,
deletedEntries: make(map[string]map[string]struct{}),
deletedDups: map[string]map[string]map[string]struct{}{},
clearedTables: make(map[string]struct{}),
}
}
@ -70,6 +72,7 @@ func NewMemoryBatchWithCustomDB(tx kv.Tx, db kv.RwDB, uTx kv.RwTx, tmpDir string
memDb: db,
memTx: uTx,
deletedEntries: make(map[string]map[string]struct{}),
deletedDups: map[string]map[string]map[string]struct{}{},
clearedTables: make(map[string]struct{}),
}
}
@ -93,6 +96,19 @@ func (m *MemoryMutation) isEntryDeleted(table string, key []byte) bool {
return ok
}
func (m *MemoryMutation) isDupDeleted(table string, key []byte, val []byte) bool {
t, ok := m.deletedDups[table]
if !ok {
return ok
}
k, ok := t[string(key)]
if !ok {
return ok
}
_, ok = k[string(val)]
return ok
}
func (m *MemoryMutation) DBSize() (uint64, error) {
panic("not implemented")
}
@ -243,10 +259,141 @@ func (m *MemoryMutation) RangeAscend(table string, fromPrefix, toPrefix []byte,
panic("please implement me")
}
func (m *MemoryMutation) RangeDescend(table string, fromPrefix, toPrefix []byte, limit int) (iter.KV, error) {
panic("please implement me")
s := &rangeIter{orderAscend: false, limit: int64(limit)}
var err error
if s.iterDb, err = m.db.RangeDescend(table, fromPrefix, toPrefix, limit); err != nil {
return s, err
}
if s.iterMem, err = m.memTx.RangeDescend(table, fromPrefix, toPrefix, limit); err != nil {
return s, err
}
return s.init()
}
type rangeIter struct {
iterDb, iterMem iter.KV
hasNextDb, hasNextMem bool
nextKdb, nextVdb, nextKmem, nextVmem []byte
orderAscend bool
limit int64
}
func (s *rangeIter) init() (*rangeIter, error) {
s.hasNextDb = s.iterDb.HasNext()
s.hasNextMem = s.iterMem.HasNext()
var err error
if s.hasNextDb {
if s.nextKdb, s.nextVdb, err = s.iterDb.Next(); err != nil {
return s, err
}
}
if s.hasNextMem {
if s.nextKmem, s.nextVmem, err = s.iterMem.Next(); err != nil {
return s, err
}
}
return s, nil
}
func (s *rangeIter) HasNext() bool {
if s.limit == 0 {
return false
}
return s.hasNextDb || s.hasNextMem
}
func (s *rangeIter) Next() (k, v []byte, err error) {
s.limit--
c := bytes.Compare(s.nextKdb, s.nextKmem)
if !s.hasNextMem || c == -1 && s.orderAscend || c == 1 && !s.orderAscend || c == 0 {
if s.hasNextDb {
k = s.nextKdb
v = s.nextVdb
s.hasNextDb = s.iterDb.HasNext()
if s.nextKdb, s.nextVdb, err = s.iterDb.Next(); err != nil {
return nil, nil, err
}
}
}
if !s.hasNextDb || c == 1 && s.orderAscend || c == -1 && !s.orderAscend || c == 0 {
if s.hasNextMem {
k = s.nextKmem
v = s.nextVmem
s.hasNextMem = s.iterMem.HasNext()
if s.nextKmem, s.nextVmem, err = s.iterMem.Next(); err != nil {
return nil, nil, err
}
}
}
return
}
func (m *MemoryMutation) RangeDupSort(table string, key []byte, fromPrefix, toPrefix []byte, asc order.By, limit int) (iter.KV, error) {
panic("please implement me")
s := &rangeDupSortIter{key: key, orderAscend: bool(asc), limit: int64(limit)}
var err error
if s.iterDb, err = m.db.RangeDupSort(table, key, fromPrefix, toPrefix, asc, limit); err != nil {
return s, err
}
if s.iterMem, err = m.memTx.RangeDupSort(table, key, fromPrefix, toPrefix, asc, limit); err != nil {
return s, err
}
return s.init()
}
type rangeDupSortIter struct {
iterDb, iterMem iter.KV
hasNextDb, hasNextMem bool
key []byte
nextVdb, nextVmem []byte
orderAscend bool
limit int64
}
func (s *rangeDupSortIter) init() (*rangeDupSortIter, error) {
s.hasNextDb = s.iterDb.HasNext()
s.hasNextMem = s.iterMem.HasNext()
var err error
if s.hasNextDb {
if _, s.nextVdb, err = s.iterDb.Next(); err != nil {
return s, err
}
}
if s.hasNextMem {
if _, s.nextVmem, err = s.iterMem.Next(); err != nil {
return s, err
}
}
return s, nil
}
func (s *rangeDupSortIter) HasNext() bool {
if s.limit == 0 {
return false
}
return s.hasNextDb || s.hasNextMem
}
func (s *rangeDupSortIter) Next() (k, v []byte, err error) {
s.limit--
k = s.key
c := bytes.Compare(s.nextVdb, s.nextVmem)
if !s.hasNextMem || c == -1 && s.orderAscend || c == 1 && !s.orderAscend || c == 0 {
if s.hasNextDb {
v = s.nextVdb
s.hasNextDb = s.iterDb.HasNext()
if _, s.nextVdb, err = s.iterDb.Next(); err != nil {
return nil, nil, err
}
}
}
if !s.hasNextDb || c == 1 && s.orderAscend || c == -1 && !s.orderAscend || c == 0 {
if s.hasNextMem {
v = s.nextVmem
s.hasNextMem = s.iterMem.HasNext()
if _, s.nextVmem, err = s.iterMem.Next(); err != nil {
return nil, nil, err
}
}
}
return
}
func (m *MemoryMutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error {
@ -271,13 +418,29 @@ func (m *MemoryMutation) ForPrefix(bucket string, prefix []byte, walker func(k,
}
func (m *MemoryMutation) Delete(table string, k []byte) error {
if _, ok := m.deletedEntries[table]; !ok {
m.deletedEntries[table] = make(map[string]struct{})
t, ok := m.deletedEntries[table]
if !ok {
t = make(map[string]struct{})
m.deletedEntries[table] = t
}
m.deletedEntries[table][string(k)] = struct{}{}
t[string(k)] = struct{}{}
return m.memTx.Delete(table, k)
}
func (m *MemoryMutation) deleteDup(table string, k, v []byte) {
t, ok := m.deletedDups[table]
if !ok {
t = map[string]map[string]struct{}{}
m.deletedDups[table] = t
}
km, ok := t[string(k)]
if !ok {
km = map[string]struct{}{}
t[string(k)] = km
}
km[string(v)] = struct{}{}
}
func (m *MemoryMutation) Commit() error {
m.statelessCursors = nil
return nil
@ -467,7 +630,7 @@ func (m *MemoryMutation) MemTx() kv.RwTx {
// Cursor creates a new cursor (the real fun begins here)
func (m *MemoryMutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) {
c := &memoryMutationCursor{}
c := &memoryMutationCursor{pureDupSort: isTablePurelyDupsort(bucket)}
// We can filter duplicates in dup sorted table
c.table = bucket

View File

@ -47,6 +47,7 @@ type memoryMutationCursor struct {
currentDbEntry cursorEntry
currentMemEntry cursorEntry
isPrevFromDb bool
pureDupSort bool
}
func (m *memoryMutationCursor) isTableCleared() bool {
@ -337,8 +338,13 @@ func (m *memoryMutationCursor) Delete(k []byte) error {
}
func (m *memoryMutationCursor) DeleteCurrent() error {
panic("DeleteCurrent Not implemented")
if !m.pureDupSort {
return m.mutation.Delete(m.table, m.currentPair.key)
}
m.mutation.deleteDup(m.table, m.currentPair.key, m.currentPair.value)
return nil
}
func (m *memoryMutationCursor) DeleteExact(_, _ []byte) error {
panic("DeleteExact Not implemented")
}
@ -502,5 +508,34 @@ func (m *memoryMutationCursor) CountDuplicates() (uint64, error) {
}
func (m *memoryMutationCursor) SeekBothExact(key, value []byte) ([]byte, []byte, error) {
panic("SeekBothExact Not implemented")
memKey, memValue, err := m.memCursor.SeekBothExact(key, value)
if err != nil || m.isTableCleared() {
return memKey, memValue, err
}
if memKey != nil {
m.currentMemEntry.key = memKey
m.currentMemEntry.value = memValue
m.currentDbEntry.key = key
m.currentDbEntry.value, err = m.cursor.SeekBothRange(key, value)
m.isPrevFromDb = false
m.currentPair = cursorEntry{memKey, memValue}
return memKey, memValue, err
}
dbKey, dbValue, err := m.cursor.SeekBothExact(key, value)
if err != nil {
return nil, nil, err
}
if dbKey != nil && !m.mutation.isDupDeleted(m.table, key, value) {
m.currentDbEntry.key = dbKey
m.currentDbEntry.value = dbValue
m.currentMemEntry.key = key
m.currentMemEntry.value, err = m.memCursor.SeekBothRange(key, value)
m.isPrevFromDb = true
m.currentPair = cursorEntry{dbKey, dbValue}
return dbKey, dbValue, err
}
return nil, nil, nil
}

View File

@ -0,0 +1,10 @@
package wrap
import (
"github.com/ledgerwatch/erigon-lib/kv"
)
type TxContainer struct {
Tx kv.RwTx
Ttx kv.TemporalTx
}

View File

@ -38,6 +38,7 @@ import (
"github.com/ledgerwatch/erigon-lib/diagnostics"
"github.com/ledgerwatch/erigon-lib/downloader/downloadergrpc"
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/cl/clparams"
"github.com/ledgerwatch/erigon/cl/cltypes"
"github.com/ledgerwatch/erigon/cl/fork"
@ -529,20 +530,20 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
backend.engine = ethconsensusconfig.CreateConsensusEngine(ctx, stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, heimdallClient, config.WithoutHeimdall, blockReader, false /* readonly */, logger)
inMemoryExecution := func(batch kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody,
inMemoryExecution := func(txc wrap.TxContainer, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody,
notifications *shards.Notifications) error {
terseLogger := log.New()
terseLogger.SetHandler(log.LvlFilterHandler(log.LvlWarn, log.StderrHandler))
// Needs its own notifications to not update RPC daemon and txpool about pending blocks
stateSync := stages2.NewInMemoryExecution(backend.sentryCtx, backend.chainDB, config, backend.sentriesClient,
dirs, notifications, blockReader, blockWriter, backend.agg, backend.silkworm, terseLogger)
chainReader := stagedsync.NewChainReaderImpl(chainConfig, batch, blockReader, logger)
chainReader := stagedsync.NewChainReaderImpl(chainConfig, txc.Tx, blockReader, logger)
// We start the mining step
if err := stages2.StateStep(ctx, chainReader, backend.engine, batch, backend.blockWriter, stateSync, backend.sentriesClient.Bd, header, body, unwindPoint, headersChain, bodiesChain, config.HistoryV3); err != nil {
if err := stages2.StateStep(ctx, chainReader, backend.engine, txc, backend.blockWriter, stateSync, backend.sentriesClient.Bd, header, body, unwindPoint, headersChain, bodiesChain, config.HistoryV3); err != nil {
logger.Warn("Could not validate block", "err", err)
return err
}
progress, err := stages.GetStageProgress(batch, stages.IntermediateHashes)
progress, err := stages.GetStageProgress(txc.Tx, stages.IntermediateHashes)
if err != nil {
return err
}

View File

@ -5,6 +5,7 @@ import (
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
"github.com/ledgerwatch/log/v3"
@ -30,13 +31,13 @@ func DefaultStages(ctx context.Context,
{
ID: stages.Snapshots,
Description: "Download snapshots",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if badBlockUnwind {
return nil
}
return SpawnStageSnapshots(s, ctx, tx, snapshots, firstCycle, logger)
return SpawnStageSnapshots(s, ctx, txc.Tx, snapshots, firstCycle, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return nil
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
@ -46,14 +47,14 @@ func DefaultStages(ctx context.Context,
{
ID: stages.Headers,
Description: "Download headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if badBlockUnwind {
return nil
}
return SpawnStageHeaders(s, u, ctx, tx, headers, firstCycle, test, logger)
return SpawnStageHeaders(s, u, ctx, txc.Tx, headers, firstCycle, test, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return HeadersUnwind(u, s, tx, headers, test)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return HeadersUnwind(u, s, txc.Tx, headers, test)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return nil
@ -62,14 +63,14 @@ func DefaultStages(ctx context.Context,
{
ID: stages.BorHeimdall,
Description: "Download Bor-specific data from Heimdall",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if badBlockUnwind {
return nil
}
return BorHeimdallForward(s, u, ctx, tx, borHeimdallCfg, false, logger)
return BorHeimdallForward(s, u, ctx, txc.Tx, borHeimdallCfg, false, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return BorHeimdallUnwind(u, ctx, s, tx, borHeimdallCfg)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return BorHeimdallUnwind(u, ctx, s, txc.Tx, borHeimdallCfg)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return BorHeimdallPrune(p, ctx, tx, borHeimdallCfg)
@ -78,11 +79,11 @@ func DefaultStages(ctx context.Context,
{
ID: stages.BlockHashes,
Description: "Write block hashes",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnBlockHashStage(s, tx, blockHashCfg, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnBlockHashStage(s, txc.Tx, blockHashCfg, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindBlockHashStage(u, tx, blockHashCfg, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindBlockHashStage(u, txc.Tx, blockHashCfg, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneBlockHashStage(p, tx, blockHashCfg, ctx)
@ -91,11 +92,11 @@ func DefaultStages(ctx context.Context,
{
ID: stages.Bodies,
Description: "Download block bodies",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return BodiesForward(s, u, ctx, tx, bodies, test, firstCycle, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return BodiesForward(s, u, ctx, txc.Tx, bodies, test, firstCycle, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindBodiesStage(u, tx, bodies, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindBodiesStage(u, txc.Tx, bodies, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return nil
@ -104,11 +105,11 @@ func DefaultStages(ctx context.Context,
{
ID: stages.Senders,
Description: "Recover senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnRecoverSendersStage(senders, s, u, txc.Tx, 0, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindSendersStage(u, tx, senders, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindSendersStage(u, txc.Tx, senders, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneSendersStage(p, tx, senders, ctx)
@ -118,11 +119,11 @@ func DefaultStages(ctx context.Context,
ID: stages.Execution,
Description: "Execute blocks w/o hash checks",
Disabled: dbg.StagesOnlyBlocks,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnExecuteBlocksStage(s, u, txc, 0, ctx, exec, firstCycle, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindExecutionStage(u, s, txc, ctx, exec, firstCycle, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneExecutionStage(p, tx, exec, ctx, firstCycle)
@ -132,11 +133,11 @@ func DefaultStages(ctx context.Context,
ID: stages.HashState,
Description: "Hash the key in the state",
Disabled: bodies.historyV3 || ethconfig.EnableHistoryV4InTest || dbg.StagesOnlyBlocks,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnHashStateStage(s, tx, hashState, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindHashStateStage(u, s, tx, hashState, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneHashStateStage(p, tx, hashState, ctx)
@ -146,19 +147,19 @@ func DefaultStages(ctx context.Context,
ID: stages.IntermediateHashes,
Description: "Generate intermediate hashes and computing state root",
Disabled: bodies.historyV3 || ethconfig.EnableHistoryV4InTest || dbg.StagesOnlyBlocks,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if exec.chainConfig.IsPrague(0) {
_, err := SpawnVerkleTrie(s, u, tx, trieCfg, ctx, logger)
_, err := SpawnVerkleTrie(s, u, txc.Tx, trieCfg, ctx, logger)
return err
}
_, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, logger)
_, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger)
return err
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
if exec.chainConfig.IsPrague(0) {
return UnwindVerkleTrie(u, s, tx, trieCfg, ctx, logger)
return UnwindVerkleTrie(u, s, txc.Tx, trieCfg, ctx, logger)
}
return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx, logger)
return UnwindIntermediateHashesStage(u, s, txc.Tx, trieCfg, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneIntermediateHashesStage(p, tx, trieCfg, ctx)
@ -169,11 +170,11 @@ func DefaultStages(ctx context.Context,
Description: "Generate call traces index",
DisabledDescription: "Work In Progress",
Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnCallTraces(s, tx, callTraces, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnCallTraces(s, txc.Tx, callTraces, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindCallTraces(u, s, tx, callTraces, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindCallTraces(u, s, txc.Tx, callTraces, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneCallTraces(p, tx, callTraces, ctx, logger)
@ -183,11 +184,11 @@ func DefaultStages(ctx context.Context,
ID: stages.AccountHistoryIndex,
Description: "Generate account history index",
Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnAccountHistoryIndex(s, tx, history, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnAccountHistoryIndex(s, txc.Tx, history, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindAccountHistoryIndex(u, s, tx, history, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindAccountHistoryIndex(u, s, txc.Tx, history, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneAccountHistoryIndex(p, tx, history, ctx, logger)
@ -197,11 +198,11 @@ func DefaultStages(ctx context.Context,
ID: stages.StorageHistoryIndex,
Description: "Generate storage history index",
Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnStorageHistoryIndex(s, tx, history, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnStorageHistoryIndex(s, txc.Tx, history, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindStorageHistoryIndex(u, s, tx, history, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindStorageHistoryIndex(u, s, txc.Tx, history, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneStorageHistoryIndex(p, tx, history, ctx, logger)
@ -211,11 +212,11 @@ func DefaultStages(ctx context.Context,
ID: stages.LogIndex,
Description: "Generate receipt logs index",
Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnLogIndex(s, tx, logIndex, ctx, 0, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnLogIndex(s, txc.Tx, logIndex, ctx, 0, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindLogIndex(u, s, tx, logIndex, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindLogIndex(u, s, txc.Tx, logIndex, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneLogIndex(p, tx, logIndex, ctx, logger)
@ -225,11 +226,11 @@ func DefaultStages(ctx context.Context,
ID: stages.TxLookup,
Description: "Generate tx lookup index",
Disabled: dbg.StagesOnlyBlocks,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnTxLookup(s, tx, 0 /* toBlock */, txLookup, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnTxLookup(s, txc.Tx, 0 /* toBlock */, txLookup, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindTxLookup(u, s, tx, txLookup, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindTxLookup(u, s, txc.Tx, txLookup, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneTxLookup(p, tx, txLookup, ctx, firstCycle, logger)
@ -238,11 +239,11 @@ func DefaultStages(ctx context.Context,
{
ID: stages.Finish,
Description: "Final: update current block for the RPC API",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, tx kv.RwTx, logger log.Logger) error {
return FinishForward(s, tx, finish, firstCycle)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return FinishForward(s, txc.Tx, finish, firstCycle)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindFinish(u, tx, finish, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindFinish(u, txc.Tx, finish, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneFinish(p, tx, finish, ctx)
@ -256,13 +257,13 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
{
ID: stages.Snapshots,
Description: "Download snapshots",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if badBlockUnwind {
return nil
}
return SpawnStageSnapshots(s, ctx, tx, snapshots, firstCycle, logger)
return SpawnStageSnapshots(s, ctx, txc.Tx, snapshots, firstCycle, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return nil
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
@ -272,11 +273,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
{
ID: stages.BlockHashes,
Description: "Write block hashes",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnBlockHashStage(s, tx, blockHashCfg, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnBlockHashStage(s, txc.Tx, blockHashCfg, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindBlockHashStage(u, tx, blockHashCfg, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindBlockHashStage(u, txc.Tx, blockHashCfg, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneBlockHashStage(p, tx, blockHashCfg, ctx)
@ -285,11 +286,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
{
ID: stages.Senders,
Description: "Recover senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnRecoverSendersStage(senders, s, u, txc.Tx, 0, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindSendersStage(u, tx, senders, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindSendersStage(u, txc.Tx, senders, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneSendersStage(p, tx, senders, ctx)
@ -298,11 +299,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
{
ID: stages.Execution,
Description: "Execute blocks w/o hash checks",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnExecuteBlocksStage(s, u, txc, 0, ctx, exec, firstCycle, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindExecutionStage(u, s, txc, ctx, exec, firstCycle, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneExecutionStage(p, tx, exec, ctx, firstCycle)
@ -312,11 +313,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
ID: stages.HashState,
Description: "Hash the key in the state",
Disabled: exec.historyV3 && ethconfig.EnableHistoryV4InTest,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnHashStateStage(s, tx, hashState, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindHashStateStage(u, s, tx, hashState, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneHashStateStage(p, tx, hashState, ctx)
@ -326,19 +327,19 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
ID: stages.IntermediateHashes,
Description: "Generate intermediate hashes and computing state root",
Disabled: exec.historyV3 && ethconfig.EnableHistoryV4InTest,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if exec.chainConfig.IsPrague(0) {
_, err := SpawnVerkleTrie(s, u, tx, trieCfg, ctx, logger)
_, err := SpawnVerkleTrie(s, u, txc.Tx, trieCfg, ctx, logger)
return err
}
_, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, logger)
_, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger)
return err
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
if exec.chainConfig.IsPrague(0) {
return UnwindVerkleTrie(u, s, tx, trieCfg, ctx, logger)
return UnwindVerkleTrie(u, s, txc.Tx, trieCfg, ctx, logger)
}
return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx, logger)
return UnwindIntermediateHashesStage(u, s, txc.Tx, trieCfg, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneIntermediateHashesStage(p, tx, trieCfg, ctx)
@ -349,11 +350,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
Description: "Generate call traces index",
DisabledDescription: "Work In Progress",
Disabled: exec.historyV3,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnCallTraces(s, tx, callTraces, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnCallTraces(s, txc.Tx, callTraces, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindCallTraces(u, s, tx, callTraces, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindCallTraces(u, s, txc.Tx, callTraces, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneCallTraces(p, tx, callTraces, ctx, logger)
@ -363,11 +364,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
ID: stages.AccountHistoryIndex,
Description: "Generate account history index",
Disabled: exec.historyV3,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnAccountHistoryIndex(s, tx, history, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnAccountHistoryIndex(s, txc.Tx, history, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindAccountHistoryIndex(u, s, tx, history, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindAccountHistoryIndex(u, s, txc.Tx, history, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneAccountHistoryIndex(p, tx, history, ctx, logger)
@ -377,11 +378,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
ID: stages.StorageHistoryIndex,
Description: "Generate storage history index",
Disabled: exec.historyV3,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnStorageHistoryIndex(s, tx, history, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnStorageHistoryIndex(s, txc.Tx, history, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindStorageHistoryIndex(u, s, tx, history, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindStorageHistoryIndex(u, s, txc.Tx, history, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneStorageHistoryIndex(p, tx, history, ctx, logger)
@ -391,11 +392,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
ID: stages.LogIndex,
Description: "Generate receipt logs index",
Disabled: exec.historyV3,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnLogIndex(s, tx, logIndex, ctx, 0, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnLogIndex(s, txc.Tx, logIndex, ctx, 0, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindLogIndex(u, s, tx, logIndex, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindLogIndex(u, s, txc.Tx, logIndex, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneLogIndex(p, tx, logIndex, ctx, logger)
@ -404,11 +405,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
{
ID: stages.TxLookup,
Description: "Generate tx lookup index",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnTxLookup(s, tx, 0 /* toBlock */, txLookup, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnTxLookup(s, txc.Tx, 0 /* toBlock */, txLookup, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindTxLookup(u, s, tx, txLookup, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindTxLookup(u, s, txc.Tx, txLookup, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneTxLookup(p, tx, txLookup, ctx, firstCycle, logger)
@ -417,11 +418,11 @@ func PipelineStages(ctx context.Context, snapshots SnapshotsCfg, blockHashCfg Bl
{
ID: stages.Finish,
Description: "Final: update current block for the RPC API",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, tx kv.RwTx, logger log.Logger) error {
return FinishForward(s, tx, finish, firstCycle)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return FinishForward(s, txc.Tx, finish, firstCycle)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindFinish(u, tx, finish, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindFinish(u, txc.Tx, finish, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneFinish(p, tx, finish, ctx)
@ -436,13 +437,13 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
{
ID: stages.Snapshots,
Description: "Download snapshots",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if badBlockUnwind {
return nil
}
return SpawnStageSnapshots(s, ctx, tx, snapshots, firstCycle, logger)
return SpawnStageSnapshots(s, ctx, txc.Tx, snapshots, firstCycle, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return nil
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
@ -452,14 +453,14 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
{
ID: stages.Headers,
Description: "Download headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if badBlockUnwind {
return nil
}
return SpawnStageHeaders(s, u, ctx, tx, headers, firstCycle, test, logger)
return SpawnStageHeaders(s, u, ctx, txc.Tx, headers, firstCycle, test, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return HeadersUnwind(u, s, tx, headers, test)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return HeadersUnwind(u, s, txc.Tx, headers, test)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return nil
@ -468,11 +469,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
{
ID: stages.BlockHashes,
Description: "Write block hashes",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnBlockHashStage(s, tx, blockHashCfg, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnBlockHashStage(s, txc.Tx, blockHashCfg, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindBlockHashStage(u, tx, blockHashCfg, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindBlockHashStage(u, txc.Tx, blockHashCfg, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneBlockHashStage(p, tx, blockHashCfg, ctx)
@ -481,11 +482,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
{
ID: stages.Bodies,
Description: "Download block bodies",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return BodiesForward(s, u, ctx, tx, bodies, test, firstCycle, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return BodiesForward(s, u, ctx, txc.Tx, bodies, test, firstCycle, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindBodiesStage(u, tx, bodies, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindBodiesStage(u, txc.Tx, bodies, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return nil
@ -494,11 +495,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
{
ID: stages.Senders,
Description: "Recover senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnRecoverSendersStage(senders, s, u, txc.Tx, 0, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindSendersStage(u, tx, senders, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindSendersStage(u, txc.Tx, senders, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneSendersStage(p, tx, senders, ctx)
@ -507,11 +508,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
{
ID: stages.Execution,
Description: "Execute blocks w/o hash checks",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnExecuteBlocksStage(s, u, txc, 0, ctx, exec, firstCycle, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindExecutionStage(u, s, txc, ctx, exec, firstCycle, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneExecutionStage(p, tx, exec, ctx, firstCycle)
@ -521,11 +522,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
ID: stages.HashState,
Description: "Hash the key in the state",
Disabled: exec.historyV3 && ethconfig.EnableHistoryV4InTest,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnHashStateStage(s, tx, hashState, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindHashStateStage(u, s, tx, hashState, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneHashStateStage(p, tx, hashState, ctx)
@ -535,19 +536,19 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
ID: stages.IntermediateHashes,
Description: "Generate intermediate hashes and computing state root",
Disabled: exec.historyV3 && ethconfig.EnableHistoryV4InTest,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if exec.chainConfig.IsPrague(0) {
_, err := SpawnVerkleTrie(s, u, tx, trieCfg, ctx, logger)
_, err := SpawnVerkleTrie(s, u, txc.Tx, trieCfg, ctx, logger)
return err
}
_, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, logger)
_, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger)
return err
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
if exec.chainConfig.IsPrague(0) {
return UnwindVerkleTrie(u, s, tx, trieCfg, ctx, logger)
return UnwindVerkleTrie(u, s, txc.Tx, trieCfg, ctx, logger)
}
return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx, logger)
return UnwindIntermediateHashesStage(u, s, txc.Tx, trieCfg, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneIntermediateHashesStage(p, tx, trieCfg, ctx)
@ -558,11 +559,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
Description: "Generate call traces index",
DisabledDescription: "Work In Progress",
Disabled: exec.historyV3,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnCallTraces(s, tx, callTraces, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnCallTraces(s, txc.Tx, callTraces, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindCallTraces(u, s, tx, callTraces, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindCallTraces(u, s, txc.Tx, callTraces, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneCallTraces(p, tx, callTraces, ctx, logger)
@ -572,11 +573,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
ID: stages.AccountHistoryIndex,
Description: "Generate account history index",
Disabled: exec.historyV3,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnAccountHistoryIndex(s, tx, history, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnAccountHistoryIndex(s, txc.Tx, history, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindAccountHistoryIndex(u, s, tx, history, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindAccountHistoryIndex(u, s, txc.Tx, history, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneAccountHistoryIndex(p, tx, history, ctx, logger)
@ -586,11 +587,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
ID: stages.StorageHistoryIndex,
Description: "Generate storage history index",
Disabled: exec.historyV3,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnStorageHistoryIndex(s, tx, history, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnStorageHistoryIndex(s, txc.Tx, history, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindStorageHistoryIndex(u, s, tx, history, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindStorageHistoryIndex(u, s, txc.Tx, history, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneStorageHistoryIndex(p, tx, history, ctx, logger)
@ -600,11 +601,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
ID: stages.LogIndex,
Description: "Generate receipt logs index",
Disabled: exec.historyV3,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnLogIndex(s, tx, logIndex, ctx, 0, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnLogIndex(s, txc.Tx, logIndex, ctx, 0, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindLogIndex(u, s, tx, logIndex, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindLogIndex(u, s, txc.Tx, logIndex, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneLogIndex(p, tx, logIndex, ctx, logger)
@ -613,11 +614,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
{
ID: stages.TxLookup,
Description: "Generate tx lookup index",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnTxLookup(s, tx, 0 /* toBlock */, txLookup, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnTxLookup(s, txc.Tx, 0 /* toBlock */, txLookup, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindTxLookup(u, s, tx, txLookup, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindTxLookup(u, s, txc.Tx, txLookup, ctx, logger)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneTxLookup(p, tx, txLookup, ctx, firstCycle, logger)
@ -626,11 +627,11 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers
{
ID: stages.Finish,
Description: "Final: update current block for the RPC API",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, tx kv.RwTx, logger log.Logger) error {
return FinishForward(s, tx, finish, firstCycle)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, _ Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return FinishForward(s, txc.Tx, finish, firstCycle)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindFinish(u, tx, finish, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindFinish(u, txc.Tx, finish, ctx)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return PruneFinish(p, tx, finish, ctx)
@ -645,72 +646,72 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc
{
ID: stages.Headers,
Description: "Download headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return HeadersUnwind(u, s, tx, headers, false)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return HeadersUnwind(u, s, txc.Tx, headers, false)
},
},
{
ID: stages.Bodies,
Description: "Download block bodies",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindBodiesStage(u, tx, bodies, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindBodiesStage(u, txc.Tx, bodies, ctx)
},
},
{
ID: stages.BlockHashes,
Description: "Write block hashes",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnBlockHashStage(s, tx, blockHashCfg, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnBlockHashStage(s, txc.Tx, blockHashCfg, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindBlockHashStage(u, tx, blockHashCfg, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindBlockHashStage(u, txc.Tx, blockHashCfg, ctx)
},
},
{
ID: stages.Senders,
Description: "Recover senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnRecoverSendersStage(senders, s, u, txc.Tx, 0, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindSendersStage(u, tx, senders, ctx)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindSendersStage(u, txc.Tx, senders, ctx)
},
},
{
ID: stages.Execution,
Description: "Execute blocks w/o hash checks",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnExecuteBlocksStage(s, u, txc, 0, ctx, exec, firstCycle, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindExecutionStage(u, s, txc, ctx, exec, firstCycle, logger)
},
},
{
ID: stages.HashState,
Description: "Hash the key in the state",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnHashStateStage(s, tx, hashState, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindHashStateStage(u, s, tx, hashState, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindHashStateStage(u, s, txc.Tx, hashState, ctx, logger)
},
},
{
ID: stages.IntermediateHashes,
Description: "Generate intermediate hashes and computing state root",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
_, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
_, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger)
return err
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx, logger)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return UnwindIntermediateHashesStage(u, s, txc.Tx, trieCfg, ctx, logger)
},
},
}

View File

@ -29,6 +29,7 @@ import (
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
"github.com/ledgerwatch/erigon-lib/metrics"
libstate "github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/cmd/state/exec22"
"github.com/ledgerwatch/erigon/cmd/state/exec3"
"github.com/ledgerwatch/erigon/common/math"
@ -144,7 +145,7 @@ rwloop does:
When rwLoop has nothing to do - it does Prune, or flush of WAL to RwTx (agg.rotate+agg.Flush)
*/
func ExecV3(ctx context.Context,
execStage *StageState, u Unwinder, workerCount int, cfg ExecuteBlockCfg, applyTx kv.RwTx,
execStage *StageState, u Unwinder, workerCount int, cfg ExecuteBlockCfg, txc wrap.TxContainer,
parallel bool, logPrefix string,
maxBlockNum uint64,
logger log.Logger,
@ -156,6 +157,7 @@ func ExecV3(ctx context.Context,
agg, engine := cfg.agg, cfg.engine
chainConfig, genesis := cfg.chainConfig, cfg.genesis
applyTx := txc.Tx
useExternalTx := applyTx != nil
if !useExternalTx && !parallel {
var err error

View File

@ -5,18 +5,19 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
)
// ExecFunc is the execution function for the stage to move forward.
// * state - is the current state of the stage and contains stage data.
// * unwinder - if the stage needs to cause unwinding, `unwinder` methods can be used.
type ExecFunc func(firstCycle bool, badBlockUnwind bool, s *StageState, unwinder Unwinder, tx kv.RwTx, logger log.Logger) error
type ExecFunc func(firstCycle bool, badBlockUnwind bool, s *StageState, unwinder Unwinder, txc wrap.TxContainer, logger log.Logger) error
// UnwindFunc is the unwinding logic of the stage.
// * unwindState - contains information about the unwind itself.
// * stageState - represents the state of this stage at the beginning of unwind.
type UnwindFunc func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error
type UnwindFunc func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error
// PruneFunc is the execution function for the stage to prune old data.
// * state - is the current state of the stage and contains stage data.

View File

@ -28,6 +28,7 @@ import (
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
"github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2"
libstate "github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/common/changeset"
"github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/consensus"
@ -239,7 +240,7 @@ func newStateReaderWriter(
// ================ Erigon3 ================
func ExecBlockV3(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) {
func ExecBlockV3(s *StageState, u Unwinder, txc wrap.TxContainer, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) {
workersCount := cfg.syncCfg.ExecWorkerCount
//workersCount := 2
if !initialCycle {
@ -248,7 +249,7 @@ func ExecBlockV3(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx cont
cfg.agg.SetWorkers(estimate.CompressSnapshot.WorkersQuarter())
if initialCycle {
reconstituteToBlock, found, err := reconstituteBlock(cfg.agg, cfg.db, tx)
reconstituteToBlock, found, err := reconstituteBlock(cfg.agg, cfg.db, txc.Tx)
if err != nil {
return err
}
@ -264,7 +265,7 @@ func ExecBlockV3(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx cont
}
}
prevStageProgress, err := senderStageProgress(tx, cfg.db)
prevStageProgress, err := senderStageProgress(txc.Tx, cfg.db)
if err != nil {
return err
}
@ -280,8 +281,8 @@ func ExecBlockV3(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx cont
if to > s.BlockNumber+16 {
logger.Info(fmt.Sprintf("[%s] Blocks execution", logPrefix), "from", s.BlockNumber, "to", to)
}
parallel := tx == nil
if err := ExecV3(ctx, s, u, workersCount, cfg, tx, parallel, logPrefix,
parallel := txc.Tx == nil
if err := ExecV3(ctx, s, u, workersCount, cfg, txc, parallel, logPrefix,
to, logger, initialCycle); err != nil {
return fmt.Errorf("ExecV3: %w", err)
}
@ -308,28 +309,28 @@ func reconstituteBlock(agg *libstate.AggregatorV3, db kv.RoDB, tx kv.Tx) (n uint
return
}
func unwindExec3(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, accumulator *shards.Accumulator, logger log.Logger) (err error) {
func unwindExec3(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx context.Context, cfg ExecuteBlockCfg, accumulator *shards.Accumulator, logger log.Logger) (err error) {
cfg.agg.SetLogPrefix(s.LogPrefix())
rs := state.NewStateV3(cfg.dirs.Tmp, logger)
// unwind all txs of u.UnwindPoint block. 1 txn in begin/end of block - system txs
txNum, err := rawdbv3.TxNums.Min(tx, u.UnwindPoint+1)
txNum, err := rawdbv3.TxNums.Min(txc.Tx, u.UnwindPoint+1)
if err != nil {
return err
}
if err := rs.Unwind(ctx, tx, txNum, cfg.agg, accumulator); err != nil {
if err := rs.Unwind(ctx, txc.Tx, u.UnwindPoint, txNum, cfg.agg, accumulator); err != nil {
return fmt.Errorf("StateV3.Unwind: %w", err)
}
if err := rs.Flush(ctx, tx, s.LogPrefix(), time.NewTicker(30*time.Second)); err != nil {
if err := rs.Flush(ctx, txc.Tx, s.LogPrefix(), time.NewTicker(30*time.Second)); err != nil {
return fmt.Errorf("StateV3.Flush: %w", err)
}
if err := rawdb.TruncateReceipts(tx, u.UnwindPoint+1); err != nil {
if err := rawdb.TruncateReceipts(txc.Tx, u.UnwindPoint+1); err != nil {
return fmt.Errorf("truncate receipts: %w", err)
}
if err := rawdb.TruncateBorReceipts(tx, u.UnwindPoint+1); err != nil {
if err := rawdb.TruncateBorReceipts(txc.Tx, u.UnwindPoint+1); err != nil {
return fmt.Errorf("truncate bor receipts: %w", err)
}
if err := rawdb.DeleteNewerEpochs(tx, u.UnwindPoint+1); err != nil {
if err := rawdb.DeleteNewerEpochs(txc.Tx, u.UnwindPoint+1); err != nil {
return fmt.Errorf("delete newer epochs: %w", err)
}
@ -358,29 +359,29 @@ func senderStageProgress(tx kv.Tx, db kv.RoDB) (prevStageProgress uint64, err er
// ================ Erigon3 End ================
func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) {
func SpawnExecuteBlocksStage(s *StageState, u Unwinder, txc wrap.TxContainer, toBlock uint64, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) {
if cfg.historyV3 {
if err = ExecBlockV3(s, u, tx, toBlock, ctx, cfg, initialCycle, logger); err != nil {
if err = ExecBlockV3(s, u, txc, toBlock, ctx, cfg, initialCycle, logger); err != nil {
return err
}
return nil
}
quit := ctx.Done()
useExternalTx := tx != nil
useExternalTx := txc.Tx != nil
if !useExternalTx {
tx, err = cfg.db.BeginRw(context.Background())
txc.Tx, err = cfg.db.BeginRw(context.Background())
if err != nil {
return err
}
defer tx.Rollback()
defer txc.Tx.Rollback()
}
prevStageProgress, errStart := stages.GetStageProgress(tx, stages.Senders)
prevStageProgress, errStart := stages.GetStageProgress(txc.Tx, stages.Senders)
if errStart != nil {
return errStart
}
nextStageProgress, err := stages.GetStageProgress(tx, stages.HashState)
nextStageProgress, err := stages.GetStageProgress(txc.Tx, stages.HashState)
if err != nil {
return err
}
@ -415,7 +416,7 @@ func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint
var batch kv.PendingMutations
// state is stored through ethdb batches
batch = membatch.NewHashBatch(tx, quit, cfg.dirs.Tmp, logger)
batch = membatch.NewHashBatch(txc.Tx, quit, cfg.dirs.Tmp, logger)
// avoids stacking defers within the loop
defer func() {
batch.Close()
@ -443,11 +444,11 @@ Loop:
}
}
blockHash, err := cfg.blockReader.CanonicalHash(ctx, tx, blockNum)
blockHash, err := cfg.blockReader.CanonicalHash(ctx, txc.Tx, blockNum)
if err != nil {
return err
}
block, _, err := cfg.blockReader.BlockWithSenders(ctx, tx, blockHash, blockNum)
block, _, err := cfg.blockReader.BlockWithSenders(ctx, txc.Tx, blockHash, blockNum)
if err != nil {
return err
}
@ -463,11 +464,11 @@ Loop:
writeReceipts := nextStagesExpectData || blockNum > cfg.prune.Receipts.PruneTo(to)
writeCallTraces := nextStagesExpectData || blockNum > cfg.prune.CallTraces.PruneTo(to)
_, isMemoryMutation := tx.(*membatchwithdb.MemoryMutation)
_, isMemoryMutation := txc.Tx.(*membatchwithdb.MemoryMutation)
if cfg.silkworm != nil && !isMemoryMutation {
blockNum, err = silkworm.ExecuteBlocks(cfg.silkworm, tx, cfg.chainConfig.ChainID, blockNum, to, uint64(cfg.batchSize), writeChangeSets, writeReceipts, writeCallTraces)
blockNum, err = silkworm.ExecuteBlocks(cfg.silkworm, txc.Tx, cfg.chainConfig.ChainID, blockNum, to, uint64(cfg.batchSize), writeChangeSets, writeReceipts, writeCallTraces)
} else {
err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, initialCycle, stateStream, logger)
err = executeBlock(block, txc.Tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, initialCycle, stateStream, logger)
}
if err != nil {
@ -507,24 +508,24 @@ Loop:
if shouldUpdateProgress {
logger.Info("Committed State", "gas reached", currentStateGas, "gasTarget", gasState)
currentStateGas = 0
if err = batch.Flush(ctx, tx); err != nil {
if err = batch.Flush(ctx, txc.Tx); err != nil {
return err
}
if err = s.Update(tx, stageProgress); err != nil {
if err = s.Update(txc.Tx, stageProgress); err != nil {
return err
}
if !useExternalTx {
if err = tx.Commit(); err != nil {
if err = txc.Tx.Commit(); err != nil {
return err
}
tx, err = cfg.db.BeginRw(context.Background())
txc.Tx, err = cfg.db.BeginRw(context.Background())
if err != nil {
return err
}
// TODO: This creates stacked up deferrals
defer tx.Rollback()
defer txc.Tx.Rollback()
}
batch = membatch.NewHashBatch(tx, quit, cfg.dirs.Tmp, logger)
batch = membatch.NewHashBatch(txc.Tx, quit, cfg.dirs.Tmp, logger)
}
gas = gas + block.GasUsed()
@ -534,25 +535,25 @@ Loop:
case <-logEvery.C:
logBlock, logTx, logTime = logProgress(logPrefix, logBlock, logTime, blockNum, logTx, lastLogTx, gas, float64(currentStateGas)/float64(gasState), batch, logger)
gas = 0
tx.CollectMetrics()
txc.Tx.CollectMetrics()
syncMetrics[stages.Execution].SetUint64(blockNum)
}
}
if err = s.Update(batch, stageProgress); err != nil {
if err = s.Update(txc.Tx, stageProgress); err != nil {
return err
}
if err = batch.Flush(ctx, tx); err != nil {
if err = batch.Flush(ctx, txc.Tx); err != nil {
return fmt.Errorf("batch commit: %w", err)
}
_, err = rawdb.IncrementStateVersion(tx)
_, err = rawdb.IncrementStateVersion(txc.Tx)
if err != nil {
return fmt.Errorf("writing plain state version: %w", err)
}
if !useExternalTx {
if err = tx.Commit(); err != nil {
if err = txc.Tx.Commit(); err != nil {
return err
}
}
@ -673,37 +674,37 @@ func logProgress(logPrefix string, prevBlock uint64, prevTime time.Time, current
return currentBlock, currentTx, currentTime
}
func UnwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) {
func UnwindExecutionStage(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) (err error) {
if u.UnwindPoint >= s.BlockNumber {
return nil
}
useExternalTx := tx != nil
useExternalTx := txc.Tx != nil
if !useExternalTx {
tx, err = cfg.db.BeginRw(context.Background())
txc.Tx, err = cfg.db.BeginRw(context.Background())
if err != nil {
return err
}
defer tx.Rollback()
defer txc.Tx.Rollback()
}
logPrefix := u.LogPrefix()
logger.Info(fmt.Sprintf("[%s] Unwind Execution", logPrefix), "from", s.BlockNumber, "to", u.UnwindPoint)
if err = unwindExecutionStage(u, s, tx, ctx, cfg, initialCycle, logger); err != nil {
if err = unwindExecutionStage(u, s, txc, ctx, cfg, initialCycle, logger); err != nil {
return err
}
if err = u.Done(tx); err != nil {
if err = u.Done(txc.Tx); err != nil {
return err
}
if !useExternalTx {
if err = tx.Commit(); err != nil {
if err = txc.Tx.Commit(); err != nil {
return err
}
}
return nil
}
func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) error {
func unwindExecutionStage(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) error {
logPrefix := s.LogPrefix()
stateBucket := kv.PlainState
storageKeyLength := length.Addr + length.Incarnation + length.Hash
@ -712,11 +713,11 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context
if !initialCycle && cfg.stateStream && s.BlockNumber-u.UnwindPoint < stateStreamLimit {
accumulator = cfg.accumulator
hash, err := cfg.blockReader.CanonicalHash(ctx, tx, u.UnwindPoint)
hash, err := cfg.blockReader.CanonicalHash(ctx, txc.Tx, u.UnwindPoint)
if err != nil {
return fmt.Errorf("read canonical hash of unwind point: %w", err)
}
txs, err := cfg.blockReader.RawTransactions(ctx, tx, u.UnwindPoint, s.BlockNumber)
txs, err := cfg.blockReader.RawTransactions(ctx, txc.Tx, u.UnwindPoint, s.BlockNumber)
if err != nil {
return err
}
@ -724,17 +725,17 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context
}
if cfg.historyV3 {
return unwindExec3(u, s, tx, ctx, cfg, accumulator, logger)
return unwindExec3(u, s, txc, ctx, cfg, accumulator, logger)
}
changes := etl.NewCollector(logPrefix, cfg.dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger)
defer changes.Close()
errRewind := changeset.RewindData(tx, s.BlockNumber, u.UnwindPoint, changes, ctx.Done())
errRewind := changeset.RewindData(txc.Tx, s.BlockNumber, u.UnwindPoint, changes, ctx.Done())
if errRewind != nil {
return fmt.Errorf("getting rewind data: %w", errRewind)
}
if err := changes.Load(tx, stateBucket, func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error {
if err := changes.Load(txc.Tx, stateBucket, func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error {
if len(k) == 20 {
if len(v) > 0 {
var acc accounts.Account
@ -743,19 +744,19 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context
}
// Fetch the code hash
recoverCodeHashPlain(&acc, tx, k)
recoverCodeHashPlain(&acc, txc.Tx, k)
var address common.Address
copy(address[:], k)
// cleanup contract code bucket
original, err := state.NewPlainStateReader(tx).ReadAccountData(address)
original, err := state.NewPlainStateReader(txc.Tx).ReadAccountData(address)
if err != nil {
return fmt.Errorf("read account for %x: %w", address, err)
}
if original != nil {
// clean up all the code incarnations original incarnation and the new one
for incarnation := original.Incarnation; incarnation > acc.Incarnation && incarnation > 0; incarnation-- {
err = tx.Delete(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], incarnation))
err = txc.Tx.Delete(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], incarnation))
if err != nil {
return fmt.Errorf("writeAccountPlain for %x: %w", address, err)
}
@ -807,23 +808,23 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context
return err
}
if err := historyv2.Truncate(tx, u.UnwindPoint+1); err != nil {
if err := historyv2.Truncate(txc.Tx, u.UnwindPoint+1); err != nil {
return err
}
if err := rawdb.TruncateReceipts(tx, u.UnwindPoint+1); err != nil {
if err := rawdb.TruncateReceipts(txc.Tx, u.UnwindPoint+1); err != nil {
return fmt.Errorf("truncate receipts: %w", err)
}
if err := rawdb.TruncateBorReceipts(tx, u.UnwindPoint+1); err != nil {
if err := rawdb.TruncateBorReceipts(txc.Tx, u.UnwindPoint+1); err != nil {
return fmt.Errorf("truncate bor receipts: %w", err)
}
if err := rawdb.DeleteNewerEpochs(tx, u.UnwindPoint+1); err != nil {
if err := rawdb.DeleteNewerEpochs(txc.Tx, u.UnwindPoint+1); err != nil {
return fmt.Errorf("delete newer epochs: %w", err)
}
// Truncate CallTraceSet
keyStart := hexutility.EncodeTs(u.UnwindPoint + 1)
c, err := tx.RwCursorDupSort(kv.CallTraceSet)
c, err := txc.Tx.RwCursorDupSort(kv.CallTraceSet)
if err != nil {
return err
}
@ -832,7 +833,7 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, ctx context
if err != nil {
return err
}
if err = tx.Delete(kv.CallTraceSet, k); err != nil {
if err = txc.Tx.Delete(kv.CallTraceSet, k); err != nil {
return err
}
}

View File

@ -11,6 +11,8 @@ import (
"github.com/ledgerwatch/log/v3"
"github.com/stretchr/testify/require"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/memdb"
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
@ -42,7 +44,7 @@ func TestExec(t *testing.T) {
u := &UnwindState{ID: stages.Execution, UnwindPoint: 25}
s := &StageState{ID: stages.Execution, BlockNumber: 50}
err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false, logger)
err = UnwindExecutionStage(u, s, wrap.TxContainer{Tx: tx2}, ctx, cfg, false, logger)
require.NoError(err)
compareCurrentState(t, newAgg(t, logger), tx1, tx2, kv.PlainState, kv.PlainContractCode, kv.ContractTEVMCode)
@ -58,7 +60,7 @@ func TestExec(t *testing.T) {
u := &UnwindState{ID: stages.Execution, UnwindPoint: 25}
s := &StageState{ID: stages.Execution, BlockNumber: 50}
err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false, logger)
err = UnwindExecutionStage(u, s, wrap.TxContainer{Tx: tx2}, ctx, cfg, false, logger)
require.NoError(err)
compareCurrentState(t, newAgg(t, logger), tx1, tx2, kv.PlainState, kv.PlainContractCode)
@ -76,7 +78,7 @@ func TestExec(t *testing.T) {
}
u := &UnwindState{ID: stages.Execution, UnwindPoint: 25}
s := &StageState{ID: stages.Execution, BlockNumber: 50}
err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false, logger)
err = UnwindExecutionStage(u, s, wrap.TxContainer{Tx: tx2}, ctx, cfg, false, logger)
require.NoError(err)
compareCurrentState(t, newAgg(t, logger), tx1, tx2, kv.PlainState, kv.PlainContractCode)
@ -204,7 +206,7 @@ func TestExec22(t *testing.T) {
u := &UnwindState{ID: stages.Execution, UnwindPoint: 25}
s := &StageState{ID: stages.Execution, BlockNumber: 50}
err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false, logger)
err = UnwindExecutionStage(u, s, wrap.TxContainer{Tx: tx2}, ctx, cfg, false, logger)
require.NoError(err)
compareCurrentState(t, agg, tx1, tx2, kv.PlainState, kv.PlainContractCode)
@ -228,7 +230,7 @@ func TestExec22(t *testing.T) {
u := &UnwindState{ID: stages.Execution, UnwindPoint: 25}
s := &StageState{ID: stages.Execution, BlockNumber: 50}
err = UnwindExecutionStage(u, s, tx2, ctx, cfg, false, logger)
err = UnwindExecutionStage(u, s, wrap.TxContainer{Tx: tx2}, ctx, cfg, false, logger)
require.NoError(err)
tx1.ForEach(kv.PlainState, nil, func(k, v []byte) error {

View File

@ -5,6 +5,7 @@ import (
"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
"github.com/ledgerwatch/log/v3"
@ -30,23 +31,25 @@ func MiningStages(
{
ID: stages.MiningCreateBlock,
Description: "Mining: construct new block from tx pool",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnMiningCreateBlockStage(s, tx, createBlockCfg, ctx.Done(), logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnMiningCreateBlockStage(s, txc.Tx, createBlockCfg, ctx.Done(), logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { return nil },
Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil },
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return nil
},
Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil },
},
{
ID: stages.BorHeimdall,
Description: "Download Bor-specific data from Heimdall",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if badBlockUnwind {
return nil
}
return BorHeimdallForward(s, u, ctx, tx, borHeimdallCfg, true, logger)
return BorHeimdallForward(s, u, ctx, txc.Tx, borHeimdallCfg, true, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
return BorHeimdallUnwind(u, ctx, s, tx, borHeimdallCfg)
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return BorHeimdallUnwind(u, ctx, s, txc.Tx, borHeimdallCfg)
},
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
return BorHeimdallPrune(p, ctx, tx, borHeimdallCfg)
@ -55,45 +58,53 @@ func MiningStages(
{
ID: stages.MiningExecution,
Description: "Mining: execute new block from tx pool",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
//fmt.Println("SpawnMiningExecStage")
//defer fmt.Println("SpawnMiningExecStage", "DONE")
return SpawnMiningExecStage(s, tx, execCfg, ctx.Done(), logger)
return SpawnMiningExecStage(s, txc.Tx, execCfg, ctx.Done(), logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { return nil },
Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil },
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return nil
},
Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil },
},
{
ID: stages.HashState,
Description: "Hash the key in the state",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnHashStateStage(s, tx, hashStateCfg, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnHashStateStage(s, txc.Tx, hashStateCfg, ctx, logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { return nil },
Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil },
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return nil
},
Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil },
},
{
ID: stages.IntermediateHashes,
Description: "Generate intermediate hashes and computing state root",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
stateRoot, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx, logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
stateRoot, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger)
if err != nil {
return err
}
createBlockCfg.miner.MiningBlock.Header.Root = stateRoot
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { return nil },
Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil },
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return nil
},
Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil },
},
{
ID: stages.MiningFinish,
Description: "Mining: create and propagate valid block",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
return SpawnMiningFinishStage(s, tx, finish, ctx.Done(), logger)
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
return SpawnMiningFinishStage(s, txc.Tx, finish, ctx.Done(), logger)
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error { return nil },
Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil },
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
return nil
},
Prune: func(firstCycle bool, u *PruneState, tx kv.RwTx, logger log.Logger) error { return nil },
},
}
}

View File

@ -21,6 +21,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/memdb"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/bor"
"github.com/ledgerwatch/erigon/consensus/bor/clerk"
@ -216,7 +217,7 @@ func (h *Harness) RunStageForwardWithReturnError(t *testing.T, id stages.SyncSta
stageState, err := h.stateSync.StageState(id, nil, h.chainDataDB)
require.NoError(t, err)
return stage.Forward(true, false, stageState, h.stateSync, nil, h.logger)
return stage.Forward(true, false, stageState, h.stateSync, wrap.TxContainer{}, h.logger)
}
func (h *Harness) ReadSpansFromDB(ctx context.Context) (spans []*span.HeimdallSpan, err error) {

View File

@ -11,6 +11,8 @@ import (
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/diagnostics"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
)
@ -236,7 +238,7 @@ func (s *Sync) StageState(stage stages.SyncStage, tx kv.Tx, db kv.RoDB) (*StageS
return &StageState{s, stage, blockNum}, nil
}
func (s *Sync) RunUnwind(db kv.RwDB, tx kv.RwTx) error {
func (s *Sync) RunUnwind(db kv.RwDB, txc wrap.TxContainer) error {
if s.unwindPoint == nil {
return nil
}
@ -244,7 +246,7 @@ func (s *Sync) RunUnwind(db kv.RwDB, tx kv.RwTx) error {
if s.unwindOrder[j] == nil || s.unwindOrder[j].Disabled || s.unwindOrder[j].Unwind == nil {
continue
}
if err := s.unwindStage(false, s.unwindOrder[j], db, tx); err != nil {
if err := s.unwindStage(false, s.unwindOrder[j], db, txc); err != nil {
return err
}
}
@ -257,7 +259,7 @@ func (s *Sync) RunUnwind(db kv.RwDB, tx kv.RwTx) error {
return nil
}
func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error {
func (s *Sync) RunNoInterrupt(db kv.RwDB, txc wrap.TxContainer, firstCycle bool) error {
s.prevUnwindPoint = nil
s.timings = s.timings[:0]
@ -268,7 +270,7 @@ func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error {
if s.unwindOrder[j] == nil || s.unwindOrder[j].Disabled || s.unwindOrder[j].Unwind == nil {
continue
}
if err := s.unwindStage(firstCycle, s.unwindOrder[j], db, tx); err != nil {
if err := s.unwindStage(firstCycle, s.unwindOrder[j], db, txc); err != nil {
return err
}
}
@ -300,7 +302,7 @@ func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error {
continue
}
if err := s.runStage(stage, db, tx, firstCycle, badBlockUnwind); err != nil {
if err := s.runStage(stage, db, txc, firstCycle, badBlockUnwind); err != nil {
return err
}
@ -325,7 +327,7 @@ func (s *Sync) RunNoInterrupt(db kv.RwDB, tx kv.RwTx, firstCycle bool) error {
return nil
}
func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) (bool, error) {
func (s *Sync) Run(db kv.RwDB, txc wrap.TxContainer, firstCycle bool) (bool, error) {
s.prevUnwindPoint = nil
s.timings = s.timings[:0]
@ -338,7 +340,7 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) (bool, error) {
if s.unwindOrder[j] == nil || s.unwindOrder[j].Disabled || s.unwindOrder[j].Unwind == nil {
continue
}
if err := s.unwindStage(firstCycle, s.unwindOrder[j], db, tx); err != nil {
if err := s.unwindStage(firstCycle, s.unwindOrder[j], db, txc); err != nil {
return false, err
}
}
@ -375,7 +377,7 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) (bool, error) {
continue
}
if err := s.runStage(stage, db, tx, firstCycle, badBlockUnwind); err != nil {
if err := s.runStage(stage, db, txc, firstCycle, badBlockUnwind); err != nil {
return false, err
}
@ -387,7 +389,7 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) (bool, error) {
if string(stage.ID) == s.cfg.BreakAfterStage { // break process loop
s.logger.Warn("--sync.loop.break.after caused stage break")
if s.posTransition != nil {
ptx := tx
ptx := txc.Tx
if ptx == nil {
if tx, err := db.BeginRw(context.Background()); err == nil {
@ -490,14 +492,14 @@ func PrintTables(db kv.RoDB, tx kv.RwTx) []interface{} {
return bucketSizes
}
func (s *Sync) runStage(stage *Stage, db kv.RwDB, tx kv.RwTx, firstCycle bool, badBlockUnwind bool) (err error) {
func (s *Sync) runStage(stage *Stage, db kv.RwDB, txc wrap.TxContainer, firstCycle bool, badBlockUnwind bool) (err error) {
start := time.Now()
stageState, err := s.StageState(stage.ID, tx, db)
stageState, err := s.StageState(stage.ID, txc.Tx, db)
if err != nil {
return err
}
if err = stage.Forward(firstCycle, badBlockUnwind, stageState, s, tx, s.logger); err != nil {
if err = stage.Forward(firstCycle, badBlockUnwind, stageState, s, txc, s.logger); err != nil {
wrappedError := fmt.Errorf("[%s] %w", s.LogPrefix(), err)
s.logger.Debug("Error while executing stage", "err", wrappedError)
return wrappedError
@ -514,10 +516,10 @@ func (s *Sync) runStage(stage *Stage, db kv.RwDB, tx kv.RwTx, firstCycle bool, b
return nil
}
func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db kv.RwDB, tx kv.RwTx) error {
func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db kv.RwDB, txc wrap.TxContainer) error {
start := time.Now()
s.logger.Trace("Unwind...", "stage", stage.ID)
stageState, err := s.StageState(stage.ID, tx, db)
stageState, err := s.StageState(stage.ID, txc.Tx, db)
if err != nil {
return err
}
@ -533,7 +535,7 @@ func (s *Sync) unwindStage(firstCycle bool, stage *Stage, db kv.RwDB, tx kv.RwTx
return err
}
err = stage.Unwind(firstCycle, unwind, stageState, tx, s.logger)
err = stage.Unwind(firstCycle, unwind, stageState, txc, s.logger)
if err != nil {
return fmt.Errorf("[%s] %w", s.LogPrefix(), err)
}

View File

@ -5,8 +5,8 @@ import (
"fmt"
"testing"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/memdb"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/log/v3"
"github.com/stretchr/testify/assert"
@ -20,7 +20,7 @@ func TestStagesSuccess(t *testing.T) {
{
ID: stages.Headers,
Description: "Downloading headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Headers)
return nil
},
@ -28,7 +28,7 @@ func TestStagesSuccess(t *testing.T) {
{
ID: stages.Bodies,
Description: "Downloading block bodiess",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Bodies)
return nil
},
@ -36,7 +36,7 @@ func TestStagesSuccess(t *testing.T) {
{
ID: stages.Senders,
Description: "Recovering senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Senders)
return nil
},
@ -44,7 +44,7 @@ func TestStagesSuccess(t *testing.T) {
}
state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, tx, true /* initialCycle */)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.NoError(t, err)
expectedFlow := []stages.SyncStage{
@ -59,7 +59,7 @@ func TestDisabledStages(t *testing.T) {
{
ID: stages.Headers,
Description: "Downloading headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Headers)
return nil
},
@ -67,7 +67,7 @@ func TestDisabledStages(t *testing.T) {
{
ID: stages.Bodies,
Description: "Downloading block bodiess",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Bodies)
return nil
},
@ -76,7 +76,7 @@ func TestDisabledStages(t *testing.T) {
{
ID: stages.Senders,
Description: "Recovering senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Senders)
return nil
},
@ -84,7 +84,7 @@ func TestDisabledStages(t *testing.T) {
}
state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, tx, true /* initialCycle */)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.NoError(t, err)
expectedFlow := []stages.SyncStage{
@ -100,7 +100,7 @@ func TestErroredStage(t *testing.T) {
{
ID: stages.Headers,
Description: "Downloading headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Headers)
return nil
},
@ -108,7 +108,7 @@ func TestErroredStage(t *testing.T) {
{
ID: stages.Bodies,
Description: "Downloading block bodiess",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Bodies)
return expectedErr
},
@ -116,7 +116,7 @@ func TestErroredStage(t *testing.T) {
{
ID: stages.Senders,
Description: "Recovering senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Senders)
return nil
},
@ -124,7 +124,7 @@ func TestErroredStage(t *testing.T) {
}
state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, tx, true /* initialCycle */)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.Equal(t, fmt.Errorf("[2/3 Bodies] %w", expectedErr), err)
expectedFlow := []stages.SyncStage{
@ -140,39 +140,39 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) {
{
ID: stages.Headers,
Description: "Downloading headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Headers)
if s.BlockNumber == 0 {
return s.Update(tx, 2000)
return s.Update(txc.Tx, 2000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Headers))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
{
ID: stages.Bodies,
Description: "Downloading block bodiess",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Bodies)
if s.BlockNumber == 0 {
return s.Update(tx, 1000)
return s.Update(txc.Tx, 1000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Bodies))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
{
ID: stages.Senders,
Description: "Recovering senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
if s.BlockNumber == 0 {
if err := s.Update(tx, 1700); err != nil {
if err := s.Update(txc.Tx, 1700); err != nil {
return err
}
}
@ -184,30 +184,30 @@ func TestUnwindSomeStagesBehindUnwindPoint(t *testing.T) {
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Senders))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
{
ID: stages.IntermediateHashes,
Disabled: true,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.IntermediateHashes)
if s.BlockNumber == 0 {
return s.Update(tx, 2000)
return s.Update(txc.Tx, 2000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.IntermediateHashes))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
}
state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, tx, true /* initialCycle */)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.NoError(t, err)
expectedFlow := []stages.SyncStage{
@ -238,69 +238,69 @@ func TestUnwind(t *testing.T) {
{
ID: stages.Headers,
Description: "Downloading headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Headers)
if s.BlockNumber == 0 {
return s.Update(tx, 2000)
return s.Update(txc.Tx, 2000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Headers))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
{
ID: stages.Bodies,
Description: "Downloading block bodiess",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Bodies)
if s.BlockNumber == 0 {
return s.Update(tx, 2000)
return s.Update(txc.Tx, 2000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Bodies))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
{
ID: stages.Senders,
Description: "Recovering senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Senders)
if !unwound {
unwound = true
u.UnwindTo(500, UnwindReason{})
return s.Update(tx, 3000)
return s.Update(txc.Tx, 3000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Senders))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
{
ID: stages.IntermediateHashes,
Disabled: true,
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.IntermediateHashes)
if s.BlockNumber == 0 {
return s.Update(tx, 2000)
return s.Update(txc.Tx, 2000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.IntermediateHashes))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
}
state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[3].ID, s[2].ID, s[1].ID, s[0].ID}, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, tx, true /* initialCycle */)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.NoError(t, err)
expectedFlow := []stages.SyncStage{
@ -327,7 +327,7 @@ func TestUnwind(t *testing.T) {
flow = flow[:0]
state.unwindOrder = []*Stage{s[3], s[2], s[1], s[0]}
state.UnwindTo(100, UnwindReason{})
_, err = state.Run(db, tx, true /* initialCycle */)
_, err = state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.NoError(t, err)
expectedFlow = []stages.SyncStage{
@ -346,25 +346,25 @@ func TestUnwindEmptyUnwinder(t *testing.T) {
{
ID: stages.Headers,
Description: "Downloading headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Headers)
if s.BlockNumber == 0 {
return s.Update(tx, 2000)
return s.Update(txc.Tx, 2000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Headers))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
{
ID: stages.Bodies,
Description: "Downloading block bodiess",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Bodies)
if s.BlockNumber == 0 {
return s.Update(tx, 2000)
return s.Update(txc.Tx, 2000)
}
return nil
},
@ -372,24 +372,24 @@ func TestUnwindEmptyUnwinder(t *testing.T) {
{
ID: stages.Senders,
Description: "Recovering senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Senders)
if !unwound {
unwound = true
u.UnwindTo(500, UnwindReason{})
return s.Update(tx, 3000)
return s.Update(txc.Tx, 3000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Senders))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
}
state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, tx, true /* initialCycle */)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.NoError(t, err)
expectedFlow := []stages.SyncStage{
@ -420,36 +420,36 @@ func TestSyncDoTwice(t *testing.T) {
{
ID: stages.Headers,
Description: "Downloading headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Headers)
return s.Update(tx, s.BlockNumber+100)
return s.Update(txc.Tx, s.BlockNumber+100)
},
},
{
ID: stages.Bodies,
Description: "Downloading block bodiess",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Bodies)
return s.Update(tx, s.BlockNumber+200)
return s.Update(txc.Tx, s.BlockNumber+200)
},
},
{
ID: stages.Senders,
Description: "Recovering senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Senders)
return s.Update(tx, s.BlockNumber+300)
return s.Update(txc.Tx, s.BlockNumber+300)
},
},
}
state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, tx, true /* initialCycle */)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.NoError(t, err)
state = New(ethconfig.Defaults.Sync, s, nil, nil, log.New())
_, err = state.Run(db, tx, true /* initialCycle */)
_, err = state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.NoError(t, err)
expectedFlow := []stages.SyncStage{
@ -478,7 +478,7 @@ func TestStateSyncInterruptRestart(t *testing.T) {
{
ID: stages.Headers,
Description: "Downloading headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Headers)
return nil
},
@ -486,7 +486,7 @@ func TestStateSyncInterruptRestart(t *testing.T) {
{
ID: stages.Bodies,
Description: "Downloading block bodiess",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Bodies)
return expectedErr
},
@ -494,7 +494,7 @@ func TestStateSyncInterruptRestart(t *testing.T) {
{
ID: stages.Senders,
Description: "Recovering senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Senders)
return nil
},
@ -503,13 +503,13 @@ func TestStateSyncInterruptRestart(t *testing.T) {
state := New(ethconfig.Defaults.Sync, s, nil, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, tx, true /* initialCycle */)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.Equal(t, fmt.Errorf("[2/3 Bodies] %w", expectedErr), err)
expectedErr = nil
state = New(ethconfig.Defaults.Sync, s, nil, nil, log.New())
_, err = state.Run(db, tx, true /* initialCycle */)
_, err = state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.NoError(t, err)
expectedFlow := []stages.SyncStage{
@ -530,59 +530,59 @@ func TestSyncInterruptLongUnwind(t *testing.T) {
{
ID: stages.Headers,
Description: "Downloading headers",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Headers)
if s.BlockNumber == 0 {
return s.Update(tx, 2000)
return s.Update(txc.Tx, 2000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Headers))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
{
ID: stages.Bodies,
Description: "Downloading block bodiess",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Bodies)
if s.BlockNumber == 0 {
return s.Update(tx, 2000)
return s.Update(txc.Tx, 2000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Bodies))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
{
ID: stages.Senders,
Description: "Recovering senders from tx signatures",
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx, logger log.Logger) error {
Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, stages.Senders)
if !unwound {
unwound = true
u.UnwindTo(500, UnwindReason{})
return s.Update(tx, 3000)
return s.Update(txc.Tx, 3000)
}
return nil
},
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx, logger log.Logger) error {
Unwind: func(firstCycle bool, u *UnwindState, s *StageState, txc wrap.TxContainer, logger log.Logger) error {
flow = append(flow, unwindOf(stages.Senders))
if !interrupted {
interrupted = true
return errInterrupted
}
assert.Equal(t, 500, int(u.UnwindPoint))
return u.Done(tx)
return u.Done(txc.Tx)
},
},
}
state := New(ethconfig.Defaults.Sync, s, []stages.SyncStage{s[2].ID, s[1].ID, s[0].ID}, nil, log.New())
db, tx := memdb.NewTestTx(t)
_, err := state.Run(db, tx, true /* initialCycle */)
_, err := state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.Error(t, errInterrupted, err)
//state = NewState(s)
@ -590,7 +590,7 @@ func TestSyncInterruptLongUnwind(t *testing.T) {
//err = state.LoadUnwindInfo(tx)
//assert.NoError(t, err)
//state.UnwindTo(500, libcommon.Hash{})
_, err = state.Run(db, tx, true /* initialCycle */)
_, err = state.Run(db, wrap.TxContainer{Tx: tx}, true /* initialCycle */)
assert.NoError(t, err)
expectedFlow := []stages.SyncStage{

View File

@ -12,6 +12,7 @@ import (
"syscall"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/log/v3"
"github.com/urfave/cli/v2"
@ -221,7 +222,7 @@ func InsertChain(ethereum *eth.Ethereum, chain *core.ChainPack, logger log.Logge
blockReader, _ := ethereum.BlockIO()
hook := stages.NewHook(ethereum.SentryCtx(), ethereum.ChainDB(), ethereum.Notifications(), ethereum.StagedSync(), blockReader, ethereum.ChainConfig(), logger, sentryControlServer.UpdateHead)
err := stages.StageLoopIteration(ethereum.SentryCtx(), ethereum.ChainDB(), nil, ethereum.StagedSync(), initialCycle, logger, blockReader, hook, false)
err := stages.StageLoopIteration(ethereum.SentryCtx(), ethereum.ChainDB(), wrap.TxContainer{}, ethereum.StagedSync(), initialCycle, logger, blockReader, hook, false)
if err != nil {
return err
}

View File

@ -24,13 +24,14 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/membatchwithdb"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
"github.com/ledgerwatch/erigon/turbo/engineapi/engine_types"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon/common/math"
"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/turbo/shards"
@ -39,7 +40,7 @@ import (
// the maximum point from the current head, past which side forks are not validated anymore.
const maxForkDepth = 32 // 32 slots is the duration of an epoch thus there cannot be side forks in PoS deeper than 32 blocks from head.
type validatePayloadFunc func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody, *shards.Notifications) error
type validatePayloadFunc func(wrap.TxContainer, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody, *shards.Notifications) error
type ForkValidator struct {
// current memory batch containing chain head that extend canonical fork.
@ -152,6 +153,8 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t
if extendCanonical {
extendingFork := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger)
defer extendingFork.Close()
var txc wrap.TxContainer
txc.Tx = extendingFork
fv.extendingForkNotifications = &shards.Notifications{
Events: shards.NewEvents(),
@ -160,7 +163,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t
// Update fork head hash.
fv.extendingForkHeadHash = header.Hash()
fv.extendingForkNumber = header.Number.Uint64()
status, latestValidHash, validationError, criticalError = fv.validateAndStorePayload(extendingFork, header, body, 0, nil, nil, fv.extendingForkNotifications)
status, latestValidHash, validationError, criticalError = fv.validateAndStorePayload(txc, header, body, 0, nil, nil, fv.extendingForkNotifications)
if criticalError != nil {
return
}
@ -231,11 +234,13 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t
}
batch := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger)
defer batch.Rollback()
var txc wrap.TxContainer
txc.Tx = batch
notifications := &shards.Notifications{
Events: shards.NewEvents(),
Accumulator: shards.NewAccumulator(),
}
return fv.validateAndStorePayload(batch, header, body, unwindPoint, headersChain, bodiesChain, notifications)
return fv.validateAndStorePayload(txc, header, body, unwindPoint, headersChain, bodiesChain, notifications)
}
// Clear wipes out current extending fork data, this method is called after fcu is called,
@ -255,9 +260,9 @@ func (fv *ForkValidator) ClearWithUnwind(accumulator *shards.Accumulator, c shar
}
// validateAndStorePayload validate and store a payload fork chain if such chain results valid.
func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody,
func (fv *ForkValidator) validateAndStorePayload(txc wrap.TxContainer, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody,
notifications *shards.Notifications) (status engine_types.EngineStatus, latestValidHash libcommon.Hash, validationError error, criticalError error) {
if err := fv.validatePayload(tx, header, body, unwindPoint, headersChain, bodiesChain, notifications); err != nil {
if err := fv.validatePayload(txc, header, body, unwindPoint, headersChain, bodiesChain, notifications); err != nil {
if errors.Is(err, consensus.ErrInvalidBlock) {
validationError = err
} else {
@ -269,11 +274,11 @@ func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Heade
latestValidHash = header.Hash()
if validationError != nil {
var latestValidNumber uint64
latestValidNumber, criticalError = stages.GetStageProgress(tx, stages.IntermediateHashes)
latestValidNumber, criticalError = stages.GetStageProgress(txc.Tx, stages.IntermediateHashes)
if criticalError != nil {
return
}
latestValidHash, criticalError = rawdb.ReadCanonicalHash(tx, latestValidNumber)
latestValidHash, criticalError = rawdb.ReadCanonicalHash(txc.Tx, latestValidNumber)
if criticalError != nil {
return
}
@ -287,7 +292,7 @@ func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Heade
// If we do not have the body we can recover it from the batch.
if body != nil {
if _, criticalError = rawdb.WriteRawBodyIfNotExists(tx, header.Hash(), header.Number.Uint64(), body); criticalError != nil {
if _, criticalError = rawdb.WriteRawBodyIfNotExists(txc.Tx, header.Hash(), header.Number.Uint64(), body); criticalError != nil {
return
}
}

View File

@ -9,6 +9,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/gointerfaces"
"github.com/ledgerwatch/erigon-lib/gointerfaces/execution"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/log/v3"
"golang.org/x/sync/semaphore"
"google.golang.org/protobuf/types/known/emptypb"
@ -235,7 +236,7 @@ func (e *EthereumExecutionModule) Start(ctx context.Context) {
for more {
var err error
if more, err = e.executionPipeline.Run(e.db, nil, true); err != nil {
if more, err = e.executionPipeline.Run(e.db, wrap.TxContainer{}, true); err != nil {
if !errors.Is(err, context.Canceled) {
e.logger.Error("Could not start execution service", "err", err)
}

View File

@ -10,6 +10,7 @@ import (
"github.com/ledgerwatch/erigon-lib/gointerfaces/execution"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/eth/stagedsync"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
@ -234,7 +235,7 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, blockHas
}
// Run the unwind
if err := e.executionPipeline.RunUnwind(e.db, tx); err != nil {
if err := e.executionPipeline.RunUnwind(e.db, wrap.TxContainer{Tx: tx}); err != nil {
err = fmt.Errorf("updateForkChoice: %w", err)
sendForkchoiceErrorWithoutWaiting(outcomeCh, err)
return
@ -305,7 +306,7 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, blockHas
}
}
// Run the forkchoice
if _, err := e.executionPipeline.Run(e.db, tx, false); err != nil {
if _, err := e.executionPipeline.Run(e.db, wrap.TxContainer{Tx: tx}, false); err != nil {
err = fmt.Errorf("updateForkChoice: %w", err)
sendForkchoiceErrorWithoutWaiting(outcomeCh, err)
return

View File

@ -29,8 +29,9 @@ func (e *EthereumExecutionModule) InsertBlocks(ctx context.Context, req *executi
return nil, fmt.Errorf("ethereumExecutionModule.InsertBlocks: cannot convert headers: %s", err)
}
body := eth1_utils.ConvertRawBlockBodyFromRpc(block.Body)
height := header.Number.Uint64()
// Parent's total difficulty
parentTd, err := rawdb.ReadTd(tx, header.ParentHash, header.Number.Uint64()-1)
parentTd, err := rawdb.ReadTd(tx, header.ParentHash, height-1)
if err != nil || parentTd == nil {
return nil, fmt.Errorf("parent's total difficulty not found with hash %x and height %d: %v", header.ParentHash, header.Number.Uint64()-1, err)
}
@ -38,13 +39,13 @@ func (e *EthereumExecutionModule) InsertBlocks(ctx context.Context, req *executi
// Sum TDs.
td := parentTd.Add(parentTd, header.Difficulty)
if err := rawdb.WriteHeader(tx, header); err != nil {
return nil, fmt.Errorf("ethereumExecutionModule.InsertHeaders: could not insert: %s", err)
return nil, fmt.Errorf("ethereumExecutionModule.InsertHeaders: writeHeader: %s", err)
}
if err := rawdb.WriteTd(tx, header.Hash(), header.Number.Uint64(), td); err != nil {
return nil, fmt.Errorf("ethereumExecutionModule.InsertHeaders: could not insert: %s", err)
if err := rawdb.WriteTd(tx, header.Hash(), height, td); err != nil {
return nil, fmt.Errorf("ethereumExecutionModule.InsertHeaders: writeTd: %s", err)
}
if _, err := rawdb.WriteRawBodyIfNotExists(tx, header.Hash(), header.Number.Uint64(), body); err != nil {
return nil, fmt.Errorf("ethereumExecutionModule.InsertBlocks: could not insert: %s", err)
if _, err := rawdb.WriteRawBodyIfNotExists(tx, header.Hash(), height, body); err != nil {
return nil, fmt.Errorf("ethereumExecutionModule.InsertBlocks: writeBody: %s", err)
}
}
if err := tx.Commit(); err != nil {

View File

@ -8,6 +8,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/direct"
"github.com/ledgerwatch/erigon-lib/gointerfaces/sentry"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/stretchr/testify/require"
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices"
@ -55,7 +56,7 @@ func TestEthSubscribe(t *testing.T) {
highestSeenHeader := chain.TopBlock.NumberU64()
hook := stages.NewHook(m.Ctx, m.DB, m.Notifications, m.Sync, m.BlockReader, m.ChainConfig, m.Log, m.UpdateHead)
if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, logger, m.BlockReader, hook, false); err != nil {
if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, logger, m.BlockReader, hook, false); err != nil {
t.Fatal(err)
}

View File

@ -10,6 +10,7 @@ import (
"github.com/holiman/uint256"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon-lib/gointerfaces/sentry"
"github.com/ledgerwatch/erigon-lib/gointerfaces/txpool"
@ -70,7 +71,7 @@ func oneBlockStep(mockSentry *mock.MockSentry, require *require.Assertions, t *t
mockSentry.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed
initialCycle := mock.MockInsertAsInitialCycle
if err := stages.StageLoopIteration(mockSentry.Ctx, mockSentry.DB, nil, mockSentry.Sync, initialCycle, log.New(), mockSentry.BlockReader, nil, false); err != nil {
if err := stages.StageLoopIteration(mockSentry.Ctx, mockSentry.DB, wrap.TxContainer{}, mockSentry.Sync, initialCycle, log.New(), mockSentry.BlockReader, nil, false); err != nil {
t.Fatal(err)
}
}

View File

@ -33,6 +33,7 @@ import (
"github.com/ledgerwatch/erigon-lib/txpool"
"github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg"
types2 "github.com/ledgerwatch/erigon-lib/types"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/bor"
@ -343,20 +344,20 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK
}
latestBlockBuiltStore := builder.NewLatestBlockBuiltStore()
inMemoryExecution := func(batch kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody,
inMemoryExecution := func(txc wrap.TxContainer, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody,
notifications *shards.Notifications) error {
terseLogger := log.New()
terseLogger.SetHandler(log.LvlFilterHandler(log.LvlWarn, log.StderrHandler))
// Needs its own notifications to not update RPC daemon and txpool about pending blocks
stateSync := stages2.NewInMemoryExecution(mock.Ctx, mock.DB, &cfg, mock.sentriesClient,
dirs, notifications, mock.BlockReader, blockWriter, mock.agg, nil, terseLogger)
chainReader := stagedsync.NewChainReaderImpl(mock.ChainConfig, batch, mock.BlockReader, logger)
chainReader := stagedsync.NewChainReaderImpl(mock.ChainConfig, txc.Tx, mock.BlockReader, logger)
// We start the mining step
if err := stages2.StateStep(ctx, chainReader, mock.Engine, batch, blockWriter, stateSync, mock.sentriesClient.Bd, header, body, unwindPoint, headersChain, bodiesChain, histV3); err != nil {
if err := stages2.StateStep(ctx, chainReader, mock.Engine, txc, blockWriter, stateSync, mock.sentriesClient.Bd, header, body, unwindPoint, headersChain, bodiesChain, histV3); err != nil {
logger.Warn("Could not validate block", "err", err)
return err
}
progress, err := stages.GetStageProgress(batch, stages.IntermediateHashes)
progress, err := stages.GetStageProgress(txc.Tx, stages.IntermediateHashes)
if err != nil {
return err
}
@ -664,7 +665,7 @@ func (ms *MockSentry) insertPoWBlocks(chain *core.ChainPack) error {
initialCycle := MockInsertAsInitialCycle
hook := stages2.NewHook(ms.Ctx, ms.DB, ms.Notifications, ms.Sync, ms.BlockReader, ms.ChainConfig, ms.Log, ms.UpdateHead)
if err = stages2.StageLoopIteration(ms.Ctx, ms.DB, nil, ms.Sync, initialCycle, ms.Log, ms.BlockReader, hook, false); err != nil {
if err = stages2.StageLoopIteration(ms.Ctx, ms.DB, wrap.TxContainer{}, ms.Sync, initialCycle, ms.Log, ms.BlockReader, hook, false); err != nil {
return err
}
if ms.TxPool != nil {

View File

@ -7,6 +7,7 @@ import (
"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/gointerfaces/sentry"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/log/v3"
"github.com/stretchr/testify/require"
@ -58,7 +59,7 @@ func TestHeaderStep(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed
initialCycle := mock.MockInsertAsInitialCycle
if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
t.Fatal(err)
}
}
@ -97,7 +98,7 @@ func TestMineBlockWith1Tx(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
initialCycle := mock.MockInsertAsInitialCycle
if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, log.New(), m.BlockReader, nil, false); err != nil {
if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, log.New(), m.BlockReader, nil, false); err != nil {
t.Fatal(err)
}
}
@ -166,7 +167,7 @@ func TestReorg(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
initialCycle := mock.MockInsertAsInitialCycle
if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
t.Fatal(err)
}
@ -219,7 +220,7 @@ func TestReorg(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
initialCycle = false
if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
t.Fatal(err)
}
@ -262,7 +263,7 @@ func TestReorg(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
// This is unwind step
if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
t.Fatal(err)
}
@ -299,7 +300,7 @@ func TestReorg(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
initialCycle = mock.MockInsertAsInitialCycle
if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
t.Fatal(err)
}
}
@ -396,7 +397,7 @@ func TestAnchorReplace(t *testing.T) {
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed
initialCycle := mock.MockInsertAsInitialCycle
if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, nil, false); err != nil {
t.Fatal(err)
}
}
@ -502,7 +503,7 @@ func TestAnchorReplace2(t *testing.T) {
initialCycle := mock.MockInsertAsInitialCycle
hook := stages.NewHook(m.Ctx, m.DB, m.Notifications, m.Sync, m.BlockReader, m.ChainConfig, m.Log, m.UpdateHead)
if err := stages.StageLoopIteration(m.Ctx, m.DB, nil, m.Sync, initialCycle, m.Log, m.BlockReader, hook, false); err != nil {
if err := stages.StageLoopIteration(m.Ctx, m.DB, wrap.TxContainer{}, m.Sync, initialCycle, m.Log, m.BlockReader, hook, false); err != nil {
t.Fatal(err)
}
}

View File

@ -19,6 +19,7 @@ import (
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/membatchwithdb"
"github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/erigon-lib/wrap"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/bor"
@ -68,7 +69,7 @@ func StageLoop(ctx context.Context,
}
// Estimate the current top height seen from the peer
err := StageLoopIteration(ctx, db, nil, sync, initialCycle, logger, blockReader, hook, forcePartialCommit)
err := StageLoopIteration(ctx, db, wrap.TxContainer{}, sync, initialCycle, logger, blockReader, hook, forcePartialCommit)
if err != nil {
if errors.Is(err, libcommon.ErrStopped) || errors.Is(err, context.Canceled) {
@ -99,15 +100,15 @@ func StageLoop(ctx context.Context,
}
}
func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stagedsync.Sync, initialCycle bool, logger log.Logger, blockReader services.FullBlockReader, hook *Hook, forcePartialCommit bool) (err error) {
func StageLoopIteration(ctx context.Context, db kv.RwDB, txc wrap.TxContainer, sync *stagedsync.Sync, initialCycle bool, logger log.Logger, blockReader services.FullBlockReader, hook *Hook, forcePartialCommit bool) (err error) {
defer func() {
if rec := recover(); rec != nil {
err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack())
}
}() // avoid crash because Erigon's core does many things
externalTx := tx != nil
finishProgressBefore, borProgressBefore, headersProgressBefore, err := stagesHeadersAndFinish(db, tx)
externalTx := txc.Tx != nil
finishProgressBefore, borProgressBefore, headersProgressBefore, err := stagesHeadersAndFinish(db, txc.Tx)
if err != nil {
return err
}
@ -134,19 +135,19 @@ func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stage
// - Prune(limited time)+Commit(sync). Write to disk happening here.
if canRunCycleInOneTransaction && !externalTx {
tx, err = db.BeginRwNosync(ctx)
txc.Tx, err = db.BeginRwNosync(ctx)
if err != nil {
return err
}
defer tx.Rollback()
defer txc.Tx.Rollback()
}
if hook != nil {
if err = hook.BeforeRun(tx, isSynced); err != nil {
if err = hook.BeforeRun(txc.Tx, isSynced); err != nil {
return err
}
}
_, err = sync.Run(db, tx, initialCycle)
_, err = sync.Run(db, txc, initialCycle)
if err != nil {
return err
}
@ -154,10 +155,10 @@ func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stage
var tableSizes []interface{}
var commitTime time.Duration
if canRunCycleInOneTransaction && !externalTx {
tableSizes = stagedsync.PrintTables(db, tx) // Need to do this before commit to access tx
tableSizes = stagedsync.PrintTables(db, txc.Tx) // Need to do this before commit to access tx
commitStart := time.Now()
errTx := tx.Commit()
tx = nil
errTx := txc.Tx.Commit()
txc.Tx = nil
if errTx != nil {
return errTx
}
@ -166,7 +167,7 @@ func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stage
// -- send notifications START
if hook != nil {
if err = hook.AfterRun(tx, finishProgressBefore); err != nil {
if err = hook.AfterRun(txc.Tx, finishProgressBefore); err != nil {
return err
}
}
@ -182,7 +183,7 @@ func StageLoopIteration(ctx context.Context, db kv.RwDB, tx kv.RwTx, sync *stage
// -- send notifications END
// -- Prune+commit(sync)
if err := stageLoopStepPrune(ctx, db, tx, sync, initialCycle); err != nil {
if err := stageLoopStepPrune(ctx, db, txc.Tx, sync, initialCycle); err != nil {
return err
}
@ -348,8 +349,9 @@ func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, tmpDir
miningBatch := membatchwithdb.NewMemoryBatch(tx, tmpDir, logger)
defer miningBatch.Rollback()
txc := wrap.TxContainer{Tx: miningBatch}
if _, err = mining.Run(nil, miningBatch, false /* firstCycle */); err != nil {
if _, err = mining.Run(nil, txc, false /* firstCycle */); err != nil {
return err
}
tx.Rollback()
@ -373,18 +375,20 @@ func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader c
if err := rawdb.WriteHeader(batch, currentHeader); err != nil {
return err
}
prevHash, err := rawdb.ReadCanonicalHash(batch, currentHeight)
if err != nil {
return err
}
if err := rawdb.WriteCanonicalHash(batch, currentHash, currentHeight); err != nil {
return err
}
if err := rawdb.WriteHeadHeaderHash(batch, currentHash); err != nil {
return err
}
var ok bool
var err error
if ok, err = rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil {
if _, err := rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil {
return err
}
if histV3 && ok {
if histV3 && prevHash != currentHash {
if err := rawdb.AppendCanonicalTxNums(batch, currentHeight); err != nil {
return err
}
@ -398,7 +402,7 @@ func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader c
return nil
}
func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine consensus.Engine, batch kv.RwTx, blockWriter *blockio.BlockWriter, stateSync *stagedsync.Sync, Bd *bodydownload.BodyDownload, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, histV3 bool) (err error) {
func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine consensus.Engine, txc wrap.TxContainer, blockWriter *blockio.BlockWriter, stateSync *stagedsync.Sync, Bd *bodydownload.BodyDownload, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, histV3 bool) (err error) {
defer func() {
if rec := recover(); rec != nil {
err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack())
@ -409,11 +413,11 @@ func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine co
if unwindPoint > 0 {
// Run it through the unwind
stateSync.UnwindTo(unwindPoint, stagedsync.StagedUnwind)
if err = stateSync.RunUnwind(nil, batch); err != nil {
if err = stateSync.RunUnwind(nil, txc); err != nil {
return err
}
}
if err := rawdb.TruncateCanonicalChain(ctx, batch, header.Number.Uint64()+1); err != nil {
if err := rawdb.TruncateCanonicalChain(ctx, txc.Tx, header.Number.Uint64()+1); err != nil {
return err
}
// Once we unwound we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain))
@ -421,11 +425,11 @@ func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine co
currentHeader := headersChain[i]
currentBody := bodiesChain[i]
if err := addAndVerifyBlockStep(batch, engine, chainReader, currentHeader, currentBody, histV3); err != nil {
if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, currentHeader, currentBody, histV3); err != nil {
return err
}
// Run state sync
if err = stateSync.RunNoInterrupt(nil, batch, false /* firstCycle */); err != nil {
if err = stateSync.RunNoInterrupt(nil, txc, false /* firstCycle */); err != nil {
return err
}
}
@ -435,11 +439,11 @@ func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine co
return nil
}
// Prepare memory state for block execution
if err := addAndVerifyBlockStep(batch, engine, chainReader, header, body, histV3); err != nil {
if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, header, body, histV3); err != nil {
return err
}
// Run state sync
if err = stateSync.RunNoInterrupt(nil, batch, false /* firstCycle */); err != nil {
if err = stateSync.RunNoInterrupt(nil, txc, false /* firstCycle */); err != nil {
return err
}
return nil