diff --git a/cmd/state/e3types/txtask.go b/cmd/state/exec22/txtask.go similarity index 99% rename from cmd/state/e3types/txtask.go rename to cmd/state/exec22/txtask.go index b19242b2b..6103f446b 100644 --- a/cmd/state/e3types/txtask.go +++ b/cmd/state/exec22/txtask.go @@ -1,4 +1,4 @@ -package e3types +package exec22 import ( "container/heap" diff --git a/cmd/state/exec3/state.go b/cmd/state/exec3/state.go index 9edf098a8..036e87778 100644 --- a/cmd/state/exec3/state.go +++ b/cmd/state/exec3/state.go @@ -11,7 +11,7 @@ import ( "github.com/ledgerwatch/log/v3" "golang.org/x/sync/errgroup" - "github.com/ledgerwatch/erigon/cmd/state/e3types" + "github.com/ledgerwatch/erigon/cmd/state/exec22" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core" @@ -30,7 +30,7 @@ type Worker struct { chainTx kv.Tx background bool // if true - worker does manage RoTx (begin/rollback) in .ResetTx() blockReader services.FullBlockReader - in *e3types.QueueWithRetry + in *exec22.QueueWithRetry rs *state.StateV3 stateWriter *state.StateWriterV3 stateReader *state.StateReaderV3 @@ -40,7 +40,7 @@ type Worker struct { ctx context.Context engine consensus.Engine genesis *types.Genesis - resultCh *e3types.ResultsQueue + resultCh *exec22.ResultsQueue chain ChainReader isPoSA bool posa consensus.PoSA @@ -52,7 +52,7 @@ type Worker struct { ibs *state.IntraBlockState } -func NewWorker(lock sync.Locker, ctx context.Context, background bool, chainDb kv.RoDB, rs *state.StateV3, in *e3types.QueueWithRetry, blockReader services.FullBlockReader, chainConfig *chain.Config, genesis *types.Genesis, results *e3types.ResultsQueue, engine consensus.Engine) *Worker { +func NewWorker(lock sync.Locker, ctx context.Context, background bool, chainDb kv.RoDB, rs *state.StateV3, in *exec22.QueueWithRetry, blockReader services.FullBlockReader, chainConfig *chain.Config, genesis *types.Genesis, results *exec22.ResultsQueue, engine consensus.Engine) *Worker { w := &Worker{ lock: lock, chainDb: chainDb, @@ -111,13 +111,13 @@ func (rw *Worker) Run() error { return nil } -func (rw *Worker) RunTxTask(txTask *e3types.TxTask) { +func (rw *Worker) RunTxTask(txTask *exec22.TxTask) { rw.lock.Lock() defer rw.lock.Unlock() rw.RunTxTaskNoLock(txTask) } -func (rw *Worker) RunTxTaskNoLock(txTask *e3types.TxTask) { +func (rw *Worker) RunTxTaskNoLock(txTask *exec22.TxTask) { if rw.background && rw.chainTx == nil { var err error if rw.chainTx, err = rw.chainDb.BeginRo(rw.ctx); err != nil { @@ -282,11 +282,11 @@ func (cr ChainReader) GetTd(hash libcommon.Hash, number uint64) *big.Int { return td } -func NewWorkersPool(lock sync.Locker, ctx context.Context, background bool, chainDb kv.RoDB, rs *state.StateV3, in *e3types.QueueWithRetry, blockReader services.FullBlockReader, chainConfig *chain.Config, genesis *types.Genesis, engine consensus.Engine, workerCount int) (reconWorkers []*Worker, applyWorker *Worker, rws *e3types.ResultsQueue, clear func(), wait func()) { +func NewWorkersPool(lock sync.Locker, ctx context.Context, background bool, chainDb kv.RoDB, rs *state.StateV3, in *exec22.QueueWithRetry, blockReader services.FullBlockReader, chainConfig *chain.Config, genesis *types.Genesis, engine consensus.Engine, workerCount int) (reconWorkers []*Worker, applyWorker *Worker, rws *exec22.ResultsQueue, clear func(), wait func()) { reconWorkers = make([]*Worker, workerCount) resultChSize := workerCount * 8 - rws = e3types.NewResultsQueue(resultChSize, workerCount) // workerCount * 4 + rws = exec22.NewResultsQueue(resultChSize, workerCount) // workerCount * 4 { // we all errors in background workers (except ctx.Cancele), because applyLoop will detect this error anyway. // and in applyLoop all errors are critical diff --git a/cmd/state/exec3/state_recon.go b/cmd/state/exec3/state_recon.go index c249bfc51..3ca57a7a6 100644 --- a/cmd/state/exec3/state_recon.go +++ b/cmd/state/exec3/state_recon.go @@ -15,7 +15,7 @@ import ( libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/cmd/state/e3types" + "github.com/ledgerwatch/erigon/cmd/state/exec22" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" @@ -286,7 +286,7 @@ func (rw *ReconWorker) Run() error { var noop = state.NewNoopWriter() -func (rw *ReconWorker) runTxTask(txTask *e3types.TxTask) error { +func (rw *ReconWorker) runTxTask(txTask *exec22.TxTask) error { rw.lock.Lock() defer rw.lock.Unlock() rw.stateReader.SetTxNum(txTask.TxNum) diff --git a/core/state/recon_state.go b/core/state/recon_state.go index 90eef465a..1bc8fa8a1 100644 --- a/core/state/recon_state.go +++ b/core/state/recon_state.go @@ -12,7 +12,7 @@ import ( "github.com/RoaringBitmap/roaring/roaring64" "github.com/google/btree" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/cmd/state/e3types" + "github.com/ledgerwatch/erigon/cmd/state/exec22" btree2 "github.com/tidwall/btree" ) @@ -39,9 +39,9 @@ func ReconnLess(i, thanItem reconPair) bool { type ReconnWork struct { lock sync.RWMutex doneBitmap roaring64.Bitmap - triggers map[uint64][]*e3types.TxTask - workCh chan *e3types.TxTask - queue e3types.TxTaskQueue + triggers map[uint64][]*exec22.TxTask + workCh chan *exec22.TxTask + queue exec22.TxTaskQueue rollbackCount uint64 maxTxNum uint64 } @@ -56,11 +56,11 @@ type ReconState struct { sizeEstimate int } -func NewReconState(workCh chan *e3types.TxTask) *ReconState { +func NewReconState(workCh chan *exec22.TxTask) *ReconState { rs := &ReconState{ ReconnWork: &ReconnWork{ workCh: workCh, - triggers: map[uint64][]*e3types.TxTask{}, + triggers: map[uint64][]*exec22.TxTask{}, }, changes: map[string]*btree2.BTreeG[reconPair]{}, hints: map[string]*btree2.PathHint{}, @@ -68,11 +68,11 @@ func NewReconState(workCh chan *e3types.TxTask) *ReconState { return rs } -func (rs *ReconState) Reset(workCh chan *e3types.TxTask) { +func (rs *ReconState) Reset(workCh chan *exec22.TxTask) { rs.lock.Lock() defer rs.lock.Unlock() rs.workCh = workCh - rs.triggers = map[uint64][]*e3types.TxTask{} + rs.triggers = map[uint64][]*exec22.TxTask{} rs.rollbackCount = 0 rs.queue = rs.queue[:cap(rs.queue)] for i := 0; i < len(rs.queue); i++ { @@ -186,7 +186,7 @@ func (rs *ReconState) Flush(rwTx kv.RwTx) error { return nil } -func (rs *ReconnWork) Schedule(ctx context.Context) (*e3types.TxTask, bool, error) { +func (rs *ReconnWork) Schedule(ctx context.Context) (*exec22.TxTask, bool, error) { rs.lock.Lock() defer rs.lock.Unlock() Loop: @@ -203,7 +203,7 @@ Loop: } } if rs.queue.Len() > 0 { - return heap.Pop(&rs.queue).(*e3types.TxTask), true, nil + return heap.Pop(&rs.queue).(*exec22.TxTask), true, nil } return nil, false, nil } @@ -223,7 +223,7 @@ func (rs *ReconnWork) CommitTxNum(txNum uint64) { } } -func (rs *ReconnWork) RollbackTx(txTask *e3types.TxTask, dependency uint64) { +func (rs *ReconnWork) RollbackTx(txTask *exec22.TxTask, dependency uint64) { rs.lock.Lock() defer rs.lock.Unlock() if rs.doneBitmap.Contains(dependency) { diff --git a/core/state/rw_v3.go b/core/state/rw_v3.go index f70972c4f..b89fa3bba 100644 --- a/core/state/rw_v3.go +++ b/core/state/rw_v3.go @@ -18,7 +18,7 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" libstate "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/cmd/state/e3types" + "github.com/ledgerwatch/erigon/cmd/state/exec22" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/turbo/shards" @@ -40,7 +40,7 @@ type StateV3 struct { chIncs map[string][]byte chContractCode map[string][]byte - triggers map[uint64]*e3types.TxTask + triggers map[uint64]*exec22.TxTask senderTxNums map[common.Address]uint64 triggerLock sync.Mutex @@ -52,7 +52,7 @@ type StateV3 struct { func NewStateV3(tmpdir string) *StateV3 { rs := &StateV3{ tmpdir: tmpdir, - triggers: map[uint64]*e3types.TxTask{}, + triggers: map[uint64]*exec22.TxTask{}, senderTxNums: map[common.Address]uint64{}, chCode: map[string][]byte{}, chAccs: map[string][]byte{}, @@ -219,15 +219,15 @@ func (rs *StateV3) Flush(ctx context.Context, rwTx kv.RwTx, logPrefix string, lo return nil } -func (rs *StateV3) ReTry(txTask *e3types.TxTask, in *e3types.QueueWithRetry) { +func (rs *StateV3) ReTry(txTask *exec22.TxTask, in *exec22.QueueWithRetry) { rs.resetTxTask(txTask) in.ReTry(txTask) } -func (rs *StateV3) AddWork(ctx context.Context, txTask *e3types.TxTask, in *e3types.QueueWithRetry) { +func (rs *StateV3) AddWork(ctx context.Context, txTask *exec22.TxTask, in *exec22.QueueWithRetry) { rs.resetTxTask(txTask) in.Add(ctx, txTask) } -func (rs *StateV3) resetTxTask(txTask *e3types.TxTask) { +func (rs *StateV3) resetTxTask(txTask *exec22.TxTask) { txTask.BalanceIncreaseSet = nil returnReadList(txTask.ReadLists) txTask.ReadLists = nil @@ -247,7 +247,7 @@ func (rs *StateV3) resetTxTask(txTask *e3types.TxTask) { */ } -func (rs *StateV3) RegisterSender(txTask *e3types.TxTask) bool { +func (rs *StateV3) RegisterSender(txTask *exec22.TxTask) bool { //TODO: it deadlocks on panic, fix it defer func() { rec := recover() @@ -269,7 +269,7 @@ func (rs *StateV3) RegisterSender(txTask *e3types.TxTask) bool { return !deferral } -func (rs *StateV3) CommitTxNum(sender *common.Address, txNum uint64, in *e3types.QueueWithRetry) (count int) { +func (rs *StateV3) CommitTxNum(sender *common.Address, txNum uint64, in *exec22.QueueWithRetry) (count int) { ExecTxsDone.Inc() rs.triggerLock.Lock() @@ -288,7 +288,7 @@ func (rs *StateV3) CommitTxNum(sender *common.Address, txNum uint64, in *e3types return count } -func (rs *StateV3) writeStateHistory(roTx kv.Tx, txTask *e3types.TxTask, agg *libstate.AggregatorV3) error { +func (rs *StateV3) writeStateHistory(roTx kv.Tx, txTask *exec22.TxTask, agg *libstate.AggregatorV3) error { rs.lock.RLock() defer rs.lock.RUnlock() @@ -396,7 +396,7 @@ func (rs *StateV3) writeStateHistory(roTx kv.Tx, txTask *e3types.TxTask, agg *li return nil } -func (rs *StateV3) applyState(roTx kv.Tx, txTask *e3types.TxTask, agg *libstate.AggregatorV3) error { +func (rs *StateV3) applyState(roTx kv.Tx, txTask *exec22.TxTask, agg *libstate.AggregatorV3) error { emptyRemoval := txTask.Rules.IsSpuriousDragon rs.lock.Lock() defer rs.lock.Unlock() @@ -444,7 +444,7 @@ func (rs *StateV3) applyState(roTx kv.Tx, txTask *e3types.TxTask, agg *libstate. return nil } -func (rs *StateV3) ApplyState(roTx kv.Tx, txTask *e3types.TxTask, agg *libstate.AggregatorV3) error { +func (rs *StateV3) ApplyState(roTx kv.Tx, txTask *exec22.TxTask, agg *libstate.AggregatorV3) error { defer agg.BatchHistoryWriteStart().BatchHistoryWriteEnd() agg.SetTxNum(txTask.TxNum) @@ -462,7 +462,7 @@ func (rs *StateV3) ApplyState(roTx kv.Tx, txTask *e3types.TxTask, agg *libstate. return nil } -func (rs *StateV3) ApplyHistory(txTask *e3types.TxTask, agg *libstate.AggregatorV3) error { +func (rs *StateV3) ApplyHistory(txTask *exec22.TxTask, agg *libstate.AggregatorV3) error { if dbg.DiscardHistory() { return nil } @@ -610,7 +610,7 @@ func (rs *StateV3) SizeEstimate() (r uint64) { return r * 2 // multiply 2 here, to cover data-structures overhead. more precise accounting - expensive. } -func (rs *StateV3) ReadsValid(readLists map[string]*e3types.KvList) bool { +func (rs *StateV3) ReadsValid(readLists map[string]*exec22.KvList) bool { rs.lock.RLock() defer rs.lock.RUnlock() for table, list := range readLists { @@ -640,7 +640,7 @@ func (rs *StateV3) ReadsValid(readLists map[string]*e3types.KvList) bool { return true } -func (rs *StateV3) readsValidMap(table string, list *e3types.KvList, m map[string][]byte) bool { +func (rs *StateV3) readsValidMap(table string, list *exec22.KvList, m map[string][]byte) bool { switch table { case CodeSizeTable: for i, key := range list.Keys { @@ -662,7 +662,7 @@ func (rs *StateV3) readsValidMap(table string, list *e3types.KvList, m map[strin return true } -func (rs *StateV3) readsValidBtree(table string, list *e3types.KvList, m *btree2.Map[string, []byte]) bool { +func (rs *StateV3) readsValidBtree(table string, list *exec22.KvList, m *btree2.Map[string, []byte]) bool { for i, key := range list.Keys { if val, ok := m.Get(key); ok { if !bytes.Equal(list.Vals[i], val) { @@ -676,7 +676,7 @@ func (rs *StateV3) readsValidBtree(table string, list *e3types.KvList, m *btree2 type StateWriterV3 struct { rs *StateV3 txNum uint64 - writeLists map[string]*e3types.KvList + writeLists map[string]*exec22.KvList accountPrevs map[string][]byte accountDels map[string]*accounts.Account storagePrevs map[string][]byte @@ -702,7 +702,7 @@ func (w *StateWriterV3) ResetWriteSet() { w.codePrevs = nil } -func (w *StateWriterV3) WriteSet() map[string]*e3types.KvList { +func (w *StateWriterV3) WriteSet() map[string]*exec22.KvList { return w.writeLists } @@ -792,7 +792,7 @@ type StateReaderV3 struct { composite []byte discardReadList bool - readLists map[string]*e3types.KvList + readLists map[string]*exec22.KvList } func NewStateReaderV3(rs *StateV3) *StateReaderV3 { @@ -802,12 +802,12 @@ func NewStateReaderV3(rs *StateV3) *StateReaderV3 { } } -func (r *StateReaderV3) DiscardReadList() { r.discardReadList = true } -func (r *StateReaderV3) SetTxNum(txNum uint64) { r.txNum = txNum } -func (r *StateReaderV3) SetTx(tx kv.Tx) { r.tx = tx } -func (r *StateReaderV3) ReadSet() map[string]*e3types.KvList { return r.readLists } -func (r *StateReaderV3) SetTrace(trace bool) { r.trace = trace } -func (r *StateReaderV3) ResetReadSet() { r.readLists = newReadList() } +func (r *StateReaderV3) DiscardReadList() { r.discardReadList = true } +func (r *StateReaderV3) SetTxNum(txNum uint64) { r.txNum = txNum } +func (r *StateReaderV3) SetTx(tx kv.Tx) { r.tx = tx } +func (r *StateReaderV3) ReadSet() map[string]*exec22.KvList { return r.readLists } +func (r *StateReaderV3) SetTrace(trace bool) { r.trace = trace } +func (r *StateReaderV3) ResetReadSet() { r.readLists = newReadList() } func (r *StateReaderV3) ReadAccountData(address common.Address) (*accounts.Account, error) { addr := address.Bytes() @@ -929,7 +929,7 @@ func (r *StateReaderV3) ReadAccountIncarnation(address common.Address) (uint64, var writeListPool = sync.Pool{ New: func() any { - return map[string]*e3types.KvList{ + return map[string]*exec22.KvList{ kv.PlainState: {}, StorageTable: {}, kv.Code: {}, @@ -939,14 +939,14 @@ var writeListPool = sync.Pool{ }, } -func newWriteList() map[string]*e3types.KvList { - v := writeListPool.Get().(map[string]*e3types.KvList) +func newWriteList() map[string]*exec22.KvList { + v := writeListPool.Get().(map[string]*exec22.KvList) for _, tbl := range v { tbl.Keys, tbl.Vals = tbl.Keys[:0], tbl.Vals[:0] } return v } -func returnWriteList(v map[string]*e3types.KvList) { +func returnWriteList(v map[string]*exec22.KvList) { if v == nil { return } @@ -955,7 +955,7 @@ func returnWriteList(v map[string]*e3types.KvList) { var readListPool = sync.Pool{ New: func() any { - return map[string]*e3types.KvList{ + return map[string]*exec22.KvList{ kv.PlainState: {}, kv.Code: {}, CodeSizeTable: {}, @@ -965,14 +965,14 @@ var readListPool = sync.Pool{ }, } -func newReadList() map[string]*e3types.KvList { - v := readListPool.Get().(map[string]*e3types.KvList) +func newReadList() map[string]*exec22.KvList { + v := readListPool.Get().(map[string]*exec22.KvList) for _, tbl := range v { tbl.Keys, tbl.Vals = tbl.Keys[:0], tbl.Vals[:0] } return v } -func returnReadList(v map[string]*e3types.KvList) { +func returnReadList(v map[string]*exec22.KvList) { if v == nil { return } diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index eab209ef0..7a1a1e957 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -31,7 +31,7 @@ import ( "github.com/torquem-ch/mdbx-go/mdbx" "golang.org/x/sync/errgroup" - "github.com/ledgerwatch/erigon/cmd/state/e3types" + "github.com/ledgerwatch/erigon/cmd/state/exec22" "github.com/ledgerwatch/erigon/cmd/state/exec3" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core" @@ -64,7 +64,7 @@ type Progress struct { logPrefix string } -func (p *Progress) Log(rs *state.StateV3, in *e3types.QueueWithRetry, rws *e3types.ResultsQueue, doneCount, inputBlockNum, outputBlockNum, outTxNum, repeatCount uint64, idxStepsAmountInDB float64) { +func (p *Progress) Log(rs *state.StateV3, in *exec22.QueueWithRetry, rws *exec22.ResultsQueue, doneCount, inputBlockNum, outputBlockNum, outTxNum, repeatCount uint64, idxStepsAmountInDB float64) { ExecStepsInDB.Set(uint64(idxStepsAmountInDB * 100)) var m runtime.MemStats dbg.ReadMemStats(&m) @@ -197,7 +197,7 @@ func ExecV3(ctx context.Context, // Maybe need split channels? Maybe don't exit from ApplyLoop? Maybe current way is also ok? // input queue - in := e3types.NewQueueWithRetry(100_000) + in := exec22.NewQueueWithRetry(100_000) defer in.Close() rwsConsumed := make(chan struct{}, 1) @@ -353,7 +353,7 @@ func ExecV3(ctx context.Context, } // Drain results channel because read sets do not carry over - rws.DropResults(func(txTask *e3types.TxTask) { + rws.DropResults(func(txTask *exec22.TxTask) { rs.ReTry(txTask, in) }) @@ -533,7 +533,7 @@ Loop: for txIndex := -1; txIndex <= len(txs); txIndex++ { // Do not oversend, wait for the result heap to go under certain size - txTask := &e3types.TxTask{ + txTask := &exec22.TxTask{ BlockNum: blockNum, Header: header, Coinbase: b.Coinbase(), @@ -721,7 +721,7 @@ func blockWithSenders(db kv.RoDB, tx kv.Tx, blockReader services.BlockReader, bl return b, nil } -func processResultQueue(in *e3types.QueueWithRetry, rws *e3types.ResultsQueueIter, outputTxNumIn uint64, rs *state.StateV3, agg *state2.AggregatorV3, applyTx kv.Tx, backPressure chan struct{}, applyWorker *exec3.Worker, canRetry, forceStopAtBlockEnd bool) (outputTxNum uint64, conflicts, triggers int, processedBlockNum uint64, stopedAtBlockEnd bool, err error) { +func processResultQueue(in *exec22.QueueWithRetry, rws *exec22.ResultsQueueIter, outputTxNumIn uint64, rs *state.StateV3, agg *state2.AggregatorV3, applyTx kv.Tx, backPressure chan struct{}, applyWorker *exec3.Worker, canRetry, forceStopAtBlockEnd bool) (outputTxNum uint64, conflicts, triggers int, processedBlockNum uint64, stopedAtBlockEnd bool, err error) { var i int outputTxNum = outputTxNumIn for rws.HasNext(outputTxNum) { @@ -866,7 +866,7 @@ func reconstituteStep(last bool, } g, reconstWorkersCtx := errgroup.WithContext(ctx) defer g.Wait() - workCh := make(chan *e3types.TxTask, workerCount*4) + workCh := make(chan *exec22.TxTask, workerCount*4) defer func() { fmt.Printf("close1\n") safeCloseTxTaskCh(workCh) @@ -1022,7 +1022,7 @@ func reconstituteStep(last bool, for txIndex := -1; txIndex <= len(txs); txIndex++ { if bitmap.Contains(inputTxNum) { binary.BigEndian.PutUint64(txKey[:], inputTxNum) - txTask := &e3types.TxTask{ + txTask := &exec22.TxTask{ BlockNum: bn, Header: header, Coinbase: b.Coinbase(), @@ -1278,7 +1278,7 @@ func reconstituteStep(last bool, return nil } -func safeCloseTxTaskCh(ch chan *e3types.TxTask) { +func safeCloseTxTaskCh(ch chan *exec22.TxTask) { if ch == nil { return } diff --git a/eth/stagedsync/stage_execute_test.go b/eth/stagedsync/stage_execute_test.go index 6b36621dc..c22b89a4a 100644 --- a/eth/stagedsync/stage_execute_test.go +++ b/eth/stagedsync/stage_execute_test.go @@ -12,7 +12,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" libstate "github.com/ledgerwatch/erigon-lib/state" - "github.com/ledgerwatch/erigon/cmd/state/e3types" + "github.com/ledgerwatch/erigon/cmd/state/exec22" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" @@ -135,7 +135,7 @@ func apply(tx kv.RwTx, agg *libstate.AggregatorV3) (beforeBlock, afterBlock test stateWriter.SetTxNum(n) stateWriter.ResetWriteSet() }, func(n, from, numberOfBlocks uint64) { - txTask := &e3types.TxTask{ + txTask := &exec22.TxTask{ BlockNum: n, Rules: params.TestRules, TxNum: n,