WIP: cmd, turbo, core, eth: TransactionsV3 flag and persist in new table (#6754)

This commit is contained in:
nanevardanyan 2023-02-24 22:49:25 +04:00 committed by GitHub
parent 4525db6a9c
commit ab6239b30f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
44 changed files with 274 additions and 347 deletions

View File

@ -134,7 +134,7 @@ func NewTestSimulatedBackendWithConfig(t *testing.T, alloc core.GenesisAlloc, co
func (b *SimulatedBackend) DB() kv.RwDB { return b.m.DB }
func (b *SimulatedBackend) Agg() *state2.AggregatorV3 { return b.m.HistoryV3Components() }
func (b *SimulatedBackend) BlockReader() *snapshotsync.BlockReaderWithSnapshots {
return snapshotsync.NewBlockReaderWithSnapshots(b.m.BlockSnapshots)
return snapshotsync.NewBlockReaderWithSnapshots(b.m.BlockSnapshots, b.m.TransactionsV3)
}
func (b *SimulatedBackend) HistoryV3() bool { return b.m.HistoryV3 }
func (b *SimulatedBackend) Engine() consensus.Engine { return b.m.Engine }

View File

@ -231,6 +231,11 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) (
return err
}
config.TransactionsV3, err = kvcfg.TransactionsV3.WriteOnce(tx, config.TransactionsV3)
if err != nil {
return err
}
// if we are in the incorrect syncmode then we change it to the appropriate one
if !isCorrectSync {
log.Warn("Incorrect snapshot enablement", "got", config.Sync.UseSnapshots, "change_to", useSnapshots)
@ -268,7 +273,7 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) (
allSnapshots *snapshotsync.RoSnapshots
agg *libstate.AggregatorV3
)
backend.blockReader, allSnapshots, agg, err = backend.setUpBlockReader(ctx, config.Dirs, config.Snapshot, config.Downloader)
backend.blockReader, allSnapshots, agg, err = backend.setUpBlockReader(ctx, config.Dirs, config.Snapshot, config.Downloader, config.TransactionsV3)
if err != nil {
return nil, err
}
@ -477,7 +482,7 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) (
mining := stagedsync.New(
stagedsync.MiningStages(backend.sentryCtx,
stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, nil, tmpdir),
stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0, backend.txPool2, backend.txPool2DB),
stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0, backend.txPool2, backend.txPool2DB, allSnapshots, config.TransactionsV3),
stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3, backend.agg),
stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, backend.blockReader, nil, config.HistoryV3, backend.agg),
stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miner, backend.miningSealingQuit),
@ -495,7 +500,7 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) (
proposingSync := stagedsync.New(
stagedsync.MiningStages(backend.sentryCtx,
stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, param, tmpdir),
stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, backend.txPool2, backend.txPool2DB),
stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, backend.txPool2, backend.txPool2DB, allSnapshots, config.TransactionsV3),
stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3, backend.agg),
stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, backend.blockReader, nil, config.HistoryV3, backend.agg),
stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit),
@ -612,7 +617,7 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) (
return nil, err
}
backend.stagedSync, err = stages3.NewStagedSync(backend.sentryCtx, backend.chainDB, stack.Config().P2P, config, backend.sentriesClient, backend.notifications, backend.downloaderClient, allSnapshots, backend.agg, backend.forkValidator, backend.engine)
backend.stagedSync, err = stages3.NewStagedSync(backend.sentryCtx, backend.chainDB, stack.Config().P2P, config, backend.sentriesClient, backend.notifications, backend.downloaderClient, allSnapshots, backend.agg, backend.forkValidator, backend.engine, config.TransactionsV3)
if err != nil {
return nil, err
}
@ -857,18 +862,13 @@ func (s *Ethereum) NodesInfo(limit int) (*remote.NodesInfoReply, error) {
}
// sets up blockReader and client downloader
func (s *Ethereum) setUpBlockReader(ctx context.Context, dirs datadir.Dirs, snConfig ethconfig.Snapshot, downloaderCfg *downloadercfg.Cfg) (services.FullBlockReader, *snapshotsync.RoSnapshots, *libstate.AggregatorV3, error) {
if !snConfig.Enabled {
blockReader := snapshotsync.NewBlockReader()
return blockReader, nil, nil, nil
}
func (s *Ethereum) setUpBlockReader(ctx context.Context, dirs datadir.Dirs, snConfig ethconfig.Snapshot, downloaderCfg *downloadercfg.Cfg, transactionsV3 bool) (services.FullBlockReader, *snapshotsync.RoSnapshots, *libstate.AggregatorV3, error) {
allSnapshots := snapshotsync.NewRoSnapshots(snConfig, dirs.Snap)
var err error
if !snConfig.NoDownloader {
allSnapshots.OptimisticalyReopenWithDB(s.chainDB)
}
blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots)
blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, transactionsV3)
if !snConfig.NoDownloader {
if snConfig.DownloaderAddr != "" {

View File

@ -15,7 +15,6 @@ import (
"github.com/ledgerwatch/erigon/ethdb/prune"
"github.com/ledgerwatch/erigon/p2p"
"github.com/ledgerwatch/erigon/turbo/engineapi"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/erigon/turbo/shards"
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
)
@ -42,14 +41,10 @@ func NewStagedSync(ctx context.Context,
agg *state.AggregatorV3,
forkValidator *engineapi.ForkValidator,
engine consensus.Engine,
transactionsV3 bool,
) (*stagedsync.Sync, error) {
dirs := cfg.Dirs
var blockReader services.FullBlockReader
if cfg.Snapshot.Enabled {
blockReader = snapshotsync.NewBlockReaderWithSnapshots(snapshots)
} else {
blockReader = snapshotsync.NewBlockReader()
}
blockReader := snapshotsync.NewBlockReaderWithSnapshots(snapshots, transactionsV3)
blockRetire := snapshotsync.NewBlockRetire(1, dirs.Tmp, snapshots, db, snapDownloader, notifications.Events)
// During Import we don't want other services like header requests, body requests etc. to be running.
@ -100,6 +95,7 @@ func NewStagedSync(ctx context.Context,
snapshots,
blockReader,
cfg.HistoryV3,
cfg.TransactionsV3,
),
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockRetire, controlServer.Hd),
stagedsync.StageExecuteBlocksCfg(

View File

@ -585,7 +585,7 @@ func stageBodies(db kv.RwDB, ctx context.Context) error {
sn, agg := allSnapshots(ctx, db)
defer sn.Close()
defer agg.Close()
chainConfig, historyV3 := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db)
chainConfig, historyV3, transactionsV3 := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), kvcfg.TransactionsV3.FromDB(db)
_, _, sync, _, _ := newSync(ctx, db, nil)
if err := db.Update(ctx, func(tx kv.RwTx) error {
@ -597,7 +597,7 @@ func stageBodies(db kv.RwDB, ctx context.Context) error {
}
u := sync.NewUnwindState(stages.Bodies, s.BlockNumber-unwind, s.BlockNumber)
if err := stagedsync.UnwindBodiesStage(u, tx, stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, sn, getBlockReader(db), historyV3), ctx); err != nil {
if err := stagedsync.UnwindBodiesStage(u, tx, stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, sn, getBlockReader(db), historyV3, transactionsV3), ctx); err != nil {
return err
}
@ -1216,11 +1216,9 @@ var _blockReaderSingleton services.FullBlockReader
func getBlockReader(db kv.RoDB) (blockReader services.FullBlockReader) {
openBlockReaderOnce.Do(func() {
_blockReaderSingleton = snapshotsync.NewBlockReader()
if sn, _ := allSnapshots(context.Background(), db); sn.Cfg().Enabled {
x := snapshotsync.NewBlockReaderWithSnapshots(sn)
_blockReaderSingleton = x
}
sn, _ := allSnapshots(context.Background(), db)
transactionsV3 := kvcfg.TransactionsV3.FromDB(db)
_blockReaderSingleton = snapshotsync.NewBlockReaderWithSnapshots(sn, transactionsV3)
})
return _blockReaderSingleton
}
@ -1292,7 +1290,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig)
miningSync := stagedsync.New(
stagedsync.MiningStages(ctx,
stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, nil, dirs.Tmp),
stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, dirs.Tmp, nil, 0, nil, nil),
stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, dirs.Tmp, nil, 0, nil, nil, allSn, cfg.TransactionsV3),
stagedsync.StageHashStateCfg(db, dirs, historyV3, agg),
stagedsync.StageTrieCfg(db, false, true, false, dirs.Tmp, br, nil, historyV3, agg),
stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, miningCancel),

View File

@ -344,7 +344,6 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
}
// Configure sapshots
if cfg.Snap.Enabled {
allSnapshots = snapshotsync.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap)
// To povide good UX - immediatly can read snapshots after RPCDaemon start, even if Erigon is down
// Erigon does store list of snapshots in db: means RPCDaemon can read this list now, but read by `remoteKvClient.Snapshots` after establish grpc connection
@ -392,7 +391,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
}()
}
onNewSnapshot()
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots)
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, ethconfig.Defaults.TransactionsV3)
var histV3Enabled bool
_ = db.View(ctx, func(tx kv.Tx) error {
@ -407,10 +406,6 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
}
}
stateCache = kvcache.NewDummy()
} else {
blockReader = snapshotsync.NewBlockReader()
stateCache = kvcache.NewDummy()
}
}
// If DB can't be configured - used PrivateApiAddr as remote DB
if db == nil {

View File

@ -54,7 +54,7 @@ func TestCallTraceOneByOne(t *testing.T) {
}
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewTraceAPI(
NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine),
m.DB, &httpcfg.HttpCfg{})
@ -103,7 +103,7 @@ func TestCallTraceUnwind(t *testing.T) {
}
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, &httpcfg.HttpCfg{})
if err = m.InsertChain(chainA); err != nil {
t.Fatalf("inserting chainA: %v", err)
@ -165,7 +165,7 @@ func TestFilterNoAddresses(t *testing.T) {
t.Fatalf("generate chain: %v", err)
}
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, &httpcfg.HttpCfg{})
// Insert blocks 1 by 1, to tirgget possible "off by one" errors
for i := 0; i < chain.Length(); i++ {
@ -191,7 +191,7 @@ func TestFilterNoAddresses(t *testing.T) {
func TestFilterAddressIntersection(t *testing.T) {
m := stages.Mock(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, &httpcfg.HttpCfg{})
toAddress1, toAddress2, other := common.Address{1}, common.Address{2}, common.Address{3}

View File

@ -20,7 +20,7 @@ func TestNotFoundMustReturnNil(t *testing.T) {
require := require.New(t)
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(
NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine),

View File

@ -53,7 +53,7 @@ var debugTraceTransactionNoRefundTests = []struct {
func TestTraceBlockByNumber(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
baseApi := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
ethApi := NewEthAPI(baseApi, m.DB, nil, nil, nil, 5000000, 100_000)
@ -102,7 +102,7 @@ func TestTraceBlockByNumber(t *testing.T) {
func TestTraceBlockByHash(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
baseApi := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
ethApi := NewEthAPI(baseApi, m.DB, nil, nil, nil, 5000000, 100_000)
@ -138,7 +138,7 @@ func TestTraceBlockByHash(t *testing.T) {
func TestTraceTransaction(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
base := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
api := NewPrivateDebugAPI(base, m.DB, 0)
@ -170,7 +170,7 @@ func TestTraceTransaction(t *testing.T) {
func TestTraceTransactionNoRefund(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
agg := m.HistoryV3Components()
api := NewPrivateDebugAPI(
NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine),
@ -204,7 +204,7 @@ func TestTraceTransactionNoRefund(t *testing.T) {
func TestStorageRangeAt(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
agg := m.HistoryV3Components()
api := NewPrivateDebugAPI(
NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine),
@ -301,7 +301,7 @@ func TestStorageRangeAt(t *testing.T) {
func TestAccountRange(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
agg := m.HistoryV3Components()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
base := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
@ -364,7 +364,7 @@ func TestAccountRange(t *testing.T) {
func TestGetModifiedAccountsByNumber(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
agg := m.HistoryV3Components()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
base := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
@ -468,7 +468,7 @@ func TestMapTxNum2BlockNum(t *testing.T) {
func TestAccountAt(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
base := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
api := NewPrivateDebugAPI(base, m.DB, 0)

View File

@ -30,7 +30,7 @@ import (
func TestGetLogs(t *testing.T) {
assert := assert.New(t)
m, _, _ := rpcdaemontest.CreateTestSentry(t)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
agg := m.HistoryV3Components()
baseApi := NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
{
@ -63,7 +63,7 @@ func TestGetLogs(t *testing.T) {
func TestErigonGetLatestLogs(t *testing.T) {
assert := assert.New(t)
m, _, _ := rpcdaemontest.CreateTestSentry(t)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
db := m.DB
agg := m.HistoryV3Components()
@ -98,7 +98,7 @@ func TestErigonGetLatestLogs(t *testing.T) {
func TestErigonGetLatestLogsIgnoreTopics(t *testing.T) {
assert := assert.New(t)
m, _, _ := rpcdaemontest.CreateTestSentry(t)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
db := m.DB
agg := m.HistoryV3Components()
@ -190,7 +190,7 @@ func TestGetBlockReceiptsByBlockHash(t *testing.T) {
// Assemble the test environment
m := mockWithGenerator(t, 4, generator)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewErigonAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil)

View File

@ -25,7 +25,7 @@ func TestGetBalanceChangesInBlock(t *testing.T) {
assert := assert.New(t)
myBlockNum := rpc.BlockNumberOrHashWithNumber(0)
m, _, _ := rpcdaemontest.CreateTestSentry(t)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
db := m.DB
agg := m.HistoryV3Components()
@ -50,7 +50,7 @@ func TestGetTransactionReceipt(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
db := m.DB
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), db, nil, nil, nil, 5000000, 100_000)
// Call GetTransactionReceipt for transaction which is not in the database
@ -62,7 +62,7 @@ func TestGetTransactionReceipt(t *testing.T) {
func TestGetTransactionReceiptUnprotected(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
// Call GetTransactionReceipt for un-protected transaction
@ -77,7 +77,7 @@ func TestGetStorageAt_ByBlockNumber_WithRequireCanonicalDefault(t *testing.T) {
assert := assert.New(t)
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
@ -94,7 +94,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault(t *testing.T) {
assert := assert.New(t)
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
@ -111,7 +111,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue(t *testing.T) {
assert := assert.New(t)
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
@ -127,7 +127,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue(t *testing.T) {
func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
@ -151,7 +151,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError
func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_BlockNotFoundError(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
@ -176,7 +176,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(
assert := assert.New(t)
m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
@ -198,7 +198,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(
func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing.T) {
m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
@ -217,7 +217,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *
func TestCall_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testing.T) {
m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
@ -243,7 +243,7 @@ func TestCall_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testi
func TestCall_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing.T) {
m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")

View File

@ -26,7 +26,7 @@ import (
func TestGetBlockByNumberWithLatestTag(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
b, err := api.GetBlockByNumber(context.Background(), rpc.LatestBlockNumber, false)
@ -40,7 +40,7 @@ func TestGetBlockByNumberWithLatestTag(t *testing.T) {
func TestGetBlockByNumberWithLatestTag_WithHeadHashInDb(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
ctx := context.Background()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
tx, err := m.DB.BeginRw(ctx)
@ -73,7 +73,7 @@ func TestGetBlockByNumberWithLatestTag_WithHeadHashInDb(t *testing.T) {
func TestGetBlockByNumberWithPendingTag(t *testing.T) {
m := stages.MockWithTxPool(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m)
@ -104,7 +104,7 @@ func TestGetBlockByNumberWithPendingTag(t *testing.T) {
func TestGetBlockByNumber_WithFinalizedTag_NoFinalizedBlockInDb(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
ctx := context.Background()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
@ -116,7 +116,7 @@ func TestGetBlockByNumber_WithFinalizedTag_NoFinalizedBlockInDb(t *testing.T) {
func TestGetBlockByNumber_WithFinalizedTag_WithFinalizedBlockInDb(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
ctx := context.Background()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
tx, err := m.DB.BeginRw(ctx)
@ -149,7 +149,7 @@ func TestGetBlockByNumber_WithFinalizedTag_WithFinalizedBlockInDb(t *testing.T)
func TestGetBlockByNumber_WithSafeTag_NoSafeBlockInDb(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
ctx := context.Background()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
@ -161,7 +161,7 @@ func TestGetBlockByNumber_WithSafeTag_NoSafeBlockInDb(t *testing.T) {
func TestGetBlockByNumber_WithSafeTag_WithSafeBlockInDb(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
ctx := context.Background()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
tx, err := m.DB.BeginRw(ctx)
@ -194,7 +194,7 @@ func TestGetBlockByNumber_WithSafeTag_WithSafeBlockInDb(t *testing.T) {
func TestGetBlockTransactionCountByHash(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
ctx := context.Background()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
@ -230,7 +230,7 @@ func TestGetBlockTransactionCountByHash(t *testing.T) {
func TestGetBlockTransactionCountByHash_ZeroTx(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
ctx := context.Background()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
@ -266,7 +266,7 @@ func TestGetBlockTransactionCountByHash_ZeroTx(t *testing.T) {
func TestGetBlockTransactionCountByNumber(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
ctx := context.Background()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
@ -301,7 +301,7 @@ func TestGetBlockTransactionCountByNumber(t *testing.T) {
func TestGetBlockTransactionCountByNumber_ZeroTx(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
ctx := context.Background()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)

View File

@ -37,7 +37,7 @@ import (
func TestEstimateGas(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t))
mining := txpool.NewMiningClient(conn)
@ -56,7 +56,7 @@ func TestEstimateGas(t *testing.T) {
func TestEthCallNonCanonical(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, nil, nil, 5000000, 100_000)
var from = libcommon.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
@ -76,7 +76,7 @@ func TestEthCallToPrunedBlock(t *testing.T) {
ethCallBlockNumber := rpc.BlockNumber(2)
m, bankAddress, contractAddress := chainWithDeployedContract(t)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
prune(t, m.DB, pruneTo)
@ -101,7 +101,7 @@ func TestGetBlockByTimestampLatestTime(t *testing.T) {
ctx := context.Background()
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
tx, err := m.DB.BeginRo(ctx)
if err != nil {
t.Errorf("fail at beginning tx")
@ -139,7 +139,7 @@ func TestGetBlockByTimestampOldestTime(t *testing.T) {
ctx := context.Background()
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
tx, err := m.DB.BeginRo(ctx)
if err != nil {
t.Errorf("failed at beginning tx")
@ -181,7 +181,7 @@ func TestGetBlockByTimeHigherThanLatestBlock(t *testing.T) {
ctx := context.Background()
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
tx, err := m.DB.BeginRo(ctx)
if err != nil {
t.Errorf("fail at beginning tx")
@ -220,7 +220,7 @@ func TestGetBlockByTimeMiddle(t *testing.T) {
ctx := context.Background()
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
tx, err := m.DB.BeginRo(ctx)
if err != nil {
t.Errorf("fail at beginning tx")
@ -272,7 +272,7 @@ func TestGetBlockByTimestamp(t *testing.T) {
ctx := context.Background()
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
tx, err := m.DB.BeginRo(ctx)
if err != nil {
t.Errorf("fail at beginning tx")

View File

@ -26,7 +26,7 @@ func TestNewFilters(t *testing.T) {
assert := assert.New(t)
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t))
mining := txpool.NewMiningClient(conn)

View File

@ -20,12 +20,13 @@ import (
)
func TestPendingBlock(t *testing.T) {
m := stages.Mock(t)
ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t))
mining := txpool.NewMiningClient(conn)
ff := rpchelper.New(ctx, nil, nil, mining, func() {})
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
engine := ethash.NewFaker()
api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, false, rpccfg.DefaultEvmCallTimeout, engine), nil, nil, nil, mining, 5000000, 100_000)
api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3), nil, false, rpccfg.DefaultEvmCallTimeout, engine), nil, nil, nil, mining, 5000000, 100_000)
expect := uint64(12345)
b, err := rlp.EncodeToBytes(types.NewBlockWithHeader(&types.Header{Number: big.NewInt(int64(expect))}))
require.NoError(t, err)

View File

@ -25,7 +25,7 @@ func TestEthSubscribe(t *testing.T) {
if m.HistoryV3 {
t.Skip()
}
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 7, func(i int, b *core.BlockGen) {
b.SetCoinbase(libcommon.Address{1})
}, false /* intermediateHashes */)

View File

@ -44,7 +44,7 @@ func TestGasPrice(t *testing.T) {
m := createGasPriceTestKV(t, testCase.chainSize)
defer m.DB.Close()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
base := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
base := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3), nil, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
eth := NewEthAPI(base, m.DB, nil, nil, nil, 5000000, 100_000)
ctx := context.Background()

View File

@ -30,7 +30,7 @@ Testing tracing RPC API by generating patters of contracts invoking one another
func TestGeneratedDebugApi(t *testing.T) {
m := rpcdaemontest.CreateTestSentryForTraces(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
baseApi := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
api := NewPrivateDebugAPI(baseApi, m.DB, 0)
@ -118,7 +118,7 @@ func TestGeneratedDebugApi(t *testing.T) {
func TestGeneratedTraceApi(t *testing.T) {
m := rpcdaemontest.CreateTestSentryForTraces(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
baseApi := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
api := NewTraceAPI(baseApi, m.DB, &httpcfg.HttpCfg{})
@ -277,7 +277,7 @@ func TestGeneratedTraceApi(t *testing.T) {
func TestGeneratedTraceApiCollision(t *testing.T) {
m := rpcdaemontest.CreateTestSentryForTracesCollision(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
baseApi := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine)
api := NewTraceAPI(baseApi, m.DB, &httpcfg.HttpCfg{})

View File

@ -13,7 +13,7 @@ import (
func TestGetContractCreator(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewOtterscanAPI(NewBaseApi(nil, nil, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB)
addr := libcommon.HexToAddress("0x537e697c7ab75a26f9ecf0ce810e3154dfcaaf44")

View File

@ -151,7 +151,7 @@ func TestBackwardBlockProviderWithMultipleChunksBlockNotFound(t *testing.T) {
func TestSearchTransactionsBefore(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewOtterscanAPI(NewBaseApi(nil, nil, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB)
addr := libcommon.HexToAddress("0x537e697c7ab75a26f9ecf0ce810e3154dfcaaf44")

View File

@ -13,7 +13,7 @@ import (
func TestGetTransactionBySenderAndNonce(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewOtterscanAPI(NewBaseApi(nil, nil, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB)
addr := common.HexToAddress("0x537e697c7ab75a26f9ecf0ce810e3154dfcaaf44")

View File

@ -73,7 +73,7 @@ func TestSendRawTransaction(t *testing.T) {
txPool := txpool.NewTxpoolClient(conn)
ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {})
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := commands.NewEthAPI(commands.NewBaseApi(ff, stateCache, br, nil, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, nil, txPool, nil, 5000000, 100_000)
buf := bytes.NewBuffer(nil)

View File

@ -23,7 +23,7 @@ func TestEmptyQuery(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewTraceAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, &httpcfg.HttpCfg{})
// Call GetTransactionReceipt for transaction which is not in the database
@ -43,7 +43,7 @@ func TestCoinbaseBalance(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewTraceAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, &httpcfg.HttpCfg{})
// Call GetTransactionReceipt for transaction which is not in the database
@ -73,7 +73,7 @@ func TestReplayTransaction(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewTraceAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, &httpcfg.HttpCfg{})
var txnHash libcommon.Hash
@ -103,7 +103,7 @@ func TestReplayTransaction(t *testing.T) {
func TestReplayBlockTransactions(t *testing.T) {
m, _, _ := rpcdaemontest.CreateTestSentry(t)
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
api := NewTraceAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, &httpcfg.HttpCfg{})

View File

@ -39,7 +39,7 @@ func TestTxPoolContent(t *testing.T) {
txPool := txpool.NewTxpoolClient(conn)
ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {})
agg := m.HistoryV3Components()
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine), m.DB, txPool)
expectValue := uint64(1234)

View File

@ -294,7 +294,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g
ethashApi := apis[1].Service.(*ethash.API)
server := grpc.NewServer()
remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, false))
remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3), nil, nil, nil, false))
txpool.RegisterTxpoolServer(server, m.TxPoolGrpcServer)
txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi))
listener := bufconn.Listen(1024 * 1024)

View File

@ -26,13 +26,13 @@ import (
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
)
var (
historyfile string
nocheck bool
transactionsV3 bool
)
func init() {
@ -41,6 +41,7 @@ func init() {
withSnapshotBlocks(checkChangeSetsCmd)
checkChangeSetsCmd.Flags().StringVar(&historyfile, "historyfile", "", "path to the file where the changesets and history are expected to be. If omitted, the same as <datadir>/erion/chaindata")
checkChangeSetsCmd.Flags().BoolVar(&nocheck, "nocheck", false, "set to turn off the changeset checking and only execute transaction (for performance testing)")
checkChangeSetsCmd.Flags().BoolVar(&transactionsV3, "experimental.transactions.v3", false, "(this flag is in testing stage) Not recommended yet: Can't change this flag after node creation. New DB table for transactions allows keeping multiple branches of block bodies in the DB simultaneously")
rootCmd.AddCommand(checkChangeSetsCmd)
}
@ -49,13 +50,13 @@ var checkChangeSetsCmd = &cobra.Command{
Short: "Re-executes historical transactions in read-only mode and checks that their outputs match the database ChangeSets",
RunE: func(cmd *cobra.Command, args []string) error {
logger := log.New()
return CheckChangeSets(genesis, logger, block, chaindata, historyfile, nocheck)
return CheckChangeSets(genesis, logger, block, chaindata, historyfile, nocheck, transactionsV3)
},
}
// CheckChangeSets re-executes historical transactions in read-only mode
// and checks that their outputs match the database ChangeSets.
func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64, chaindata string, historyfile string, nocheck bool) error {
func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64, chaindata string, historyfile string, nocheck bool, transactionV3 bool) error {
if len(historyfile) == 0 {
historyfile = chaindata
}
@ -74,19 +75,13 @@ func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64,
if err != nil {
return err
}
var blockReader services.FullBlockReader
var allSnapshots *snapshotsync.RoSnapshots
useSnapshots := ethconfig.UseSnapshotsByChainName(chainConfig.ChainName) && snapshotsCli
if useSnapshots {
allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadirCli, "snapshots"))
allSnapshots := snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadirCli, "snapshots"))
defer allSnapshots.Close()
if err := allSnapshots.ReopenFolder(); err != nil {
return fmt.Errorf("reopen snapshot segments: %w", err)
}
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots)
} else {
blockReader = snapshotsync.NewBlockReader()
}
blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, transactionV3)
chainDb := db
defer chainDb.Close()
historyDb := chainDb

View File

@ -22,6 +22,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/dbg"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
"github.com/ledgerwatch/erigon-lib/kv/mdbx"
kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx"
"github.com/ledgerwatch/log/v3"
@ -29,7 +30,6 @@ import (
"github.com/ledgerwatch/erigon/eth/ethconsensusconfig"
"github.com/ledgerwatch/erigon/turbo/logging"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/misc"
@ -225,19 +225,13 @@ func Erigon2(genesis *core.Genesis, chainConfig *chain2.Config, logger log.Logge
}
}()
var blockReader services.FullBlockReader
var allSnapshots *snapshotsync.RoSnapshots
useSnapshots := ethconfig.UseSnapshotsByChainName(chainConfig.ChainName) && snapshotsCli
if useSnapshots {
allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadirCli, "snapshots"))
allSnapshots := snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadirCli, "snapshots"))
defer allSnapshots.Close()
if err := allSnapshots.ReopenWithDB(db); err != nil {
return fmt.Errorf("reopen snapshot segments: %w", err)
}
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots)
} else {
blockReader = snapshotsync.NewBlockReader()
}
transactionsV3 := kvcfg.TransactionsV3.FromDB(db)
blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, transactionsV3)
engine := initConsensusEngine(chainConfig, allSnapshots)
for !interrupt {

View File

@ -23,6 +23,7 @@ import (
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx"
libstate "github.com/ledgerwatch/erigon-lib/state"
@ -199,7 +200,8 @@ func Erigon4(genesis *core.Genesis, chainConfig *chain2.Config, logger log.Logge
if err := allSnapshots.ReopenFolder(); err != nil {
return fmt.Errorf("reopen snapshot segments: %w", err)
}
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots)
transactionsV3 := kvcfg.TransactionsV3.FromDB(db)
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, transactionsV3)
engine := initConsensusEngine(chainConfig, allSnapshots)
getHeader := func(hash libcommon.Hash, number uint64) *types.Header {

View File

@ -141,7 +141,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error {
if err := allSnapshots.ReopenWithDB(db); err != nil {
return fmt.Errorf("reopen snapshot segments: %w", err)
}
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots)
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, ethconfig.Defaults.TransactionsV3)
readWrapper := state.NewHistoryReader23(h.MakeContext(), ri)
for !interrupt {

View File

@ -610,6 +610,10 @@ var (
Name: "experimental.history.v3",
Usage: "(also known as Erigon3) Not recommended yet: Can't change this flag after node creation. New DB and Snapshots format of history allows: parallel blocks execution, get state as of given transaction without executing whole block.",
}
TransactionV3Flag = cli.BoolFlag{
Name: "experimental.transactions.v3",
Usage: "(this flag is in testing stage) Not recommended yet: Can't change this flag after node creation. New DB table for transactions allows keeping multiple branches of block bodies in the DB simultaneously",
}
CliqueSnapshotCheckpointIntervalFlag = cli.UintFlag{
Name: "clique.checkpoint",
@ -1497,6 +1501,7 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C
cfg.Ethstats = ctx.String(EthStatsURLFlag.Name)
cfg.P2PEnabled = len(nodeConfig.P2P.SentryAddr) == 0
cfg.HistoryV3 = ctx.Bool(HistoryV3Flag.Name)
cfg.TransactionsV3 = ctx.Bool(TransactionV3Flag.Name)
if ctx.IsSet(NetworkIdFlag.Name) {
cfg.NetworkID = ctx.Uint64(NetworkIdFlag.Name)
}

View File

@ -22,6 +22,7 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
"math"
"math/big"
"time"
@ -385,10 +386,17 @@ func ReadStorageBody(db kv.Getter, hash libcommon.Hash, number uint64) (types.Bo
return *bodyForStorage, nil
}
func CanonicalTxnByID(db kv.Getter, id uint64) (types.Transaction, error) {
func CanonicalTxnByID(db kv.Getter, id uint64, blockHash libcommon.Hash, transactionsV3 bool) (types.Transaction, error) {
txIdKey := make([]byte, 8)
binary.BigEndian.PutUint64(txIdKey, id)
v, err := db.GetOne(kv.EthTx, txIdKey)
var v []byte
var err error
if transactionsV3 {
key := append(txIdKey, blockHash.Bytes()...)
v, err = db.GetOne(kv.EthTxV3, key)
} else {
v, err = db.GetOne(kv.EthTx, txIdKey)
}
if err != nil {
return nil, err
}
@ -456,7 +464,7 @@ func NonCanonicalTransactions(db kv.Getter, baseTxId uint64, amount uint32) ([]t
return txs, nil
}
func WriteTransactions(db kv.RwTx, txs []types.Transaction, baseTxId uint64) error {
func WriteTransactions(db kv.RwTx, txs []types.Transaction, baseTxId uint64, blockHash *libcommon.Hash) error {
txId := baseTxId
buf := bytes.NewBuffer(nil)
for _, tx := range txs {
@ -470,22 +478,36 @@ func WriteTransactions(db kv.RwTx, txs []types.Transaction, baseTxId uint64) err
}
// If next Append returns KeyExists error - it means you need to open transaction in App code before calling this func. Batch is also fine.
if blockHash != nil {
key := append(txIdKey, blockHash.Bytes()...)
if err := db.Append(kv.EthTxV3, key, common.CopyBytes(buf.Bytes())); err != nil {
return err
}
} else {
if err := db.Append(kv.EthTx, txIdKey, common.CopyBytes(buf.Bytes())); err != nil {
return err
}
}
}
return nil
}
func WriteRawTransactions(tx kv.RwTx, txs [][]byte, baseTxId uint64) error {
func WriteRawTransactions(tx kv.RwTx, txs [][]byte, baseTxId uint64, blockHash *common2.Hash) error {
txId := baseTxId
for _, txn := range txs {
txIdKey := make([]byte, 8)
binary.BigEndian.PutUint64(txIdKey, txId)
// If next Append returns KeyExists error - it means you need to open transaction in App code before calling this func. Batch is also fine.
if blockHash != nil {
if err := tx.Append(kv.EthTx, txIdKey, txn); err != nil {
return fmt.Errorf("txId=%d, baseTxId=%d, %w", txId, baseTxId, err)
}
} else {
key := append(txIdKey, blockHash.Bytes()...)
if err := tx.Append(kv.EthTxV3, key, txn); err != nil {
return fmt.Errorf("txId=%d, baseTxId=%d, %w", txId, baseTxId, err)
}
}
txId++
}
return nil
@ -676,7 +698,7 @@ func WriteRawBody(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.Ra
}
lastTxnID = baseTxnID + uint64(data.TxAmount) - 1
firstNonSystemTxnID := baseTxnID + 1
if err = WriteRawTransactions(db, body.Transactions, firstNonSystemTxnID); err != nil {
if err = WriteRawTransactions(db, body.Transactions, firstNonSystemTxnID, &hash); err != nil {
return false, 0, fmt.Errorf("WriteRawTransactions: %w", err)
}
return true, lastTxnID, nil
@ -698,7 +720,12 @@ func WriteBody(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.Body)
if err := WriteBodyForStorage(db, hash, number, &data); err != nil {
return fmt.Errorf("failed to write body: %w", err)
}
err = WriteTransactions(db, body.Transactions, baseTxId+1)
transactionV3, _ := kvcfg.TransactionsV3.Enabled(db.(kv.Tx))
if transactionV3 {
err = WriteTransactions(db, body.Transactions, baseTxId+1, &hash)
} else {
err = WriteTransactions(db, body.Transactions, baseTxId+1, nil)
}
if err != nil {
return fmt.Errorf("failed to WriteTransactions: %w", err)
}
@ -724,7 +751,7 @@ func deleteBody(db kv.Deleter, hash libcommon.Hash, number uint64) {
}
// MakeBodiesCanonical - move all txs of non-canonical blocks from NonCanonicalTxs table to EthTx table
func MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix string, logEvery *time.Ticker, cb func(blockNum, lastTxnNum uint64) error) error {
func MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix string, logEvery *time.Ticker, transactionsV3 bool, cb func(blockNum, lastTxnNum uint64) error) error {
for blockNum := from; ; blockNum++ {
h, err := ReadCanonicalHash(tx, blockNum)
if err != nil {
@ -751,9 +778,16 @@ func MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix
i := uint64(0)
if err := tx.ForAmount(kv.NonCanonicalTxs, hexutility.EncodeTs(bodyForStorage.BaseTxId+1), bodyForStorage.TxAmount-2, func(k, v []byte) error {
id := newBaseId + 1 + i
if transactionsV3 {
key := append(hexutility.EncodeTs(id), h.Bytes()...)
if err := tx.Put(kv.EthTxV3, key, v); err != nil {
return err
}
} else {
if err := tx.Put(kv.EthTx, hexutility.EncodeTs(id), v); err != nil {
return err
}
}
if err := tx.Delete(kv.NonCanonicalTxs, k); err != nil {
return err
}

View File

@ -263,6 +263,11 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
return err
}
config.TransactionsV3, err = kvcfg.TransactionsV3.WriteOnce(tx, config.TransactionsV3)
if err != nil {
return err
}
// if we are in the incorrect syncmode then we change it to the appropriate one
if !isCorrectSync {
log.Warn("Incorrect snapshot enablement", "got", config.Sync.UseSnapshots, "change_to", useSnapshots)
@ -297,7 +302,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
Accumulator: shards.NewAccumulator(),
},
}
blockReader, allSnapshots, agg, err := backend.setUpBlockReader(ctx, config.Dirs, config.Snapshot, config.Downloader, backend.notifications.Events)
blockReader, allSnapshots, agg, err := backend.setUpBlockReader(ctx, config.Dirs, config.Snapshot, config.Downloader, backend.notifications.Events, config.TransactionsV3)
if err != nil {
return nil, err
}
@ -502,7 +507,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
mining := stagedsync.New(
stagedsync.MiningStages(backend.sentryCtx,
stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, nil, tmpdir),
stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0, backend.txPool2, backend.txPool2DB),
stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil, 0, backend.txPool2, backend.txPool2DB, allSnapshots, config.TransactionsV3),
stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3, backend.agg),
stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg),
stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miner, backend.miningSealingQuit),
@ -520,7 +525,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
proposingSync := stagedsync.New(
stagedsync.MiningStages(backend.sentryCtx,
stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, param, tmpdir),
stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, backend.txPool2, backend.txPool2DB),
stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, backend.txPool2, backend.txPool2DB, allSnapshots, config.TransactionsV3),
stagedsync.StageHashStateCfg(backend.chainDB, dirs, config.HistoryV3, backend.agg),
stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil, config.HistoryV3, backend.agg),
stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit),
@ -969,18 +974,13 @@ func (s *Ethereum) NodesInfo(limit int) (*remote.NodesInfoReply, error) {
}
// sets up blockReader and client downloader
func (s *Ethereum) setUpBlockReader(ctx context.Context, dirs datadir.Dirs, snConfig ethconfig.Snapshot, downloaderCfg *downloadercfg.Cfg, notifications *shards.Events) (services.FullBlockReader, *snapshotsync.RoSnapshots, *libstate.AggregatorV3, error) {
if !snConfig.Enabled {
blockReader := snapshotsync.NewBlockReader()
return blockReader, nil, nil, nil
}
func (s *Ethereum) setUpBlockReader(ctx context.Context, dirs datadir.Dirs, snConfig ethconfig.Snapshot, downloaderCfg *downloadercfg.Cfg, notifications *shards.Events, transactionsV3 bool) (services.FullBlockReader, *snapshotsync.RoSnapshots, *libstate.AggregatorV3, error) {
allSnapshots := snapshotsync.NewRoSnapshots(snConfig, dirs.Snap)
var err error
if !snConfig.NoDownloader {
allSnapshots.OptimisticalyReopenWithDB(s.chainDB)
}
blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots)
blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, transactionsV3)
if !snConfig.NoDownloader {
if snConfig.DownloaderAddr != "" {

View File

@ -230,6 +230,9 @@ type Config struct {
// gRPC Address to connect to Heimdall node
HeimdallgRPCAddress string
// New DB table for storing transactions allows: keeping multiple branches of block bodies in the DB simultaneously
TransactionsV3 bool
// URL to connect to Heimdall node
HeimdallURL string

View File

@ -3,3 +3,4 @@
package ethconfig
const EnableHistoryV3InTest = false
const EnableTransactionsV3InTest = false

View File

@ -35,10 +35,11 @@ type BodiesCfg struct {
snapshots *snapshotsync.RoSnapshots
blockReader services.FullBlockReader
historyV3 bool
transactionsV3 bool
}
func StageBodiesCfg(db kv.RwDB, bd *bodydownload.BodyDownload, bodyReqSend func(context.Context, *bodydownload.BodyRequest) ([64]byte, bool), penalise func(context.Context, []headerdownload.PenaltyItem), blockPropagator adapter.BlockPropagator, timeout int, chanConfig chain.Config, snapshots *snapshotsync.RoSnapshots, blockReader services.FullBlockReader, historyV3 bool) BodiesCfg {
return BodiesCfg{db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, snapshots: snapshots, blockReader: blockReader, historyV3: historyV3}
func StageBodiesCfg(db kv.RwDB, bd *bodydownload.BodyDownload, bodyReqSend func(context.Context, *bodydownload.BodyRequest) ([64]byte, bool), penalise func(context.Context, []headerdownload.PenaltyItem), blockPropagator adapter.BlockPropagator, timeout int, chanConfig chain.Config, snapshots *snapshotsync.RoSnapshots, blockReader services.FullBlockReader, historyV3 bool, transactionsV3 bool) BodiesCfg {
return BodiesCfg{db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, snapshots: snapshots, blockReader: blockReader, historyV3: historyV3, transactionsV3: transactionsV3}
}
// BodiesForward progresses Bodies stage in the forward direction
@ -105,7 +106,7 @@ func BodiesForward(
// Property of blockchain: same block in different forks will have different hashes.
// Means - can mark all canonical blocks as non-canonical on unwind, and
// do opposite here - without storing any meta-info.
if err := rawdb.MakeBodiesCanonical(tx, s.BlockNumber+1, ctx, logPrefix, logEvery, func(blockNum, lastTxnNum uint64) error {
if err := rawdb.MakeBodiesCanonical(tx, s.BlockNumber+1, ctx, logPrefix, logEvery, cfg.transactionsV3, func(blockNum, lastTxnNum uint64) error {
if cfg.historyV3 {
if err := rawdbv3.TxNums.Append(tx, blockNum, lastTxnNum); err != nil {
return err

View File

@ -44,7 +44,7 @@ func TestBodiesUnwind(t *testing.T) {
require.Equal(5*(3+2), int(n)) // from 0, 5 block with 3 txn in each
}
{
err = rawdb.MakeBodiesCanonical(tx, 5+1, ctx, "test", logEvery, nil) // block 5 already canonical, start from next one
err = rawdb.MakeBodiesCanonical(tx, 5+1, ctx, "test", logEvery, false, nil) // block 5 already canonical, start from next one
require.NoError(err)
n, err := tx.ReadSequence(kv.EthTx)
require.NoError(err)
@ -69,7 +69,7 @@ func TestBodiesUnwind(t *testing.T) {
require.NoError(err)
require.Equal(5*(3+2), int(n)) // from 0, 5 block with 3 txn in each
err = rawdb.MakeBodiesCanonical(tx, 5+1, ctx, "test", logEvery, nil) // block 5 already canonical, start from next one
err = rawdb.MakeBodiesCanonical(tx, 5+1, ctx, "test", logEvery, false, nil) // block 5 already canonical, start from next one
require.NoError(err)
n, err = tx.ReadSequence(kv.EthTx)
require.NoError(err)

View File

@ -77,7 +77,7 @@ func TestAccountAndStorageTrie(t *testing.T) {
// ----------------------------------------------------------------
historyV3 := false
blockReader := snapshotsync.NewBlockReader()
blockReader := snapshotsync.NewBlockReaderWithSnapshots(nil, false)
cfg := StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil)
_, err := RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx)
assert.Nil(t, err)
@ -199,7 +199,7 @@ func TestAccountTrieAroundExtensionNode(t *testing.T) {
hash6 := libcommon.HexToHash("0x3100000000000000000000000000000000000000000000000000000000000000")
assert.Nil(t, tx.Put(kv.HashedAccounts, hash6[:], encoded))
blockReader := snapshotsync.NewBlockReader()
blockReader := snapshotsync.NewBlockReaderWithSnapshots(nil, false)
_, err := RegenerateIntermediateHashes("IH", tx, StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil), libcommon.Hash{} /* expectedRootHash */, ctx)
assert.Nil(t, err)
@ -262,7 +262,7 @@ func TestStorageDeletion(t *testing.T) {
// Populate account & storage trie DB tables
// ----------------------------------------------------------------
historyV3 := false
blockReader := snapshotsync.NewBlockReader()
blockReader := snapshotsync.NewBlockReaderWithSnapshots(nil, false)
cfg := StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil)
_, err = RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx)
assert.Nil(t, err)
@ -381,7 +381,7 @@ func TestHiveTrieRoot(t *testing.T) {
common.FromHex("02081bc16d674ec80000")))
historyV3 := false
blockReader := snapshotsync.NewBlockReader()
blockReader := snapshotsync.NewBlockReaderWithSnapshots(nil, false)
cfg := StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil)
_, err := RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx)
require.Nil(t, err)

View File

@ -66,6 +66,8 @@ func StageMiningExecCfg(
payloadId uint64,
txPool2 *txpool.TxPool,
txPool2DB kv.RoDB,
snapshots *snapshotsync.RoSnapshots,
transactionsV3 bool,
) MiningExecCfg {
return MiningExecCfg{
db: db,
@ -73,7 +75,7 @@ func StageMiningExecCfg(
notifier: notifier,
chainConfig: chainConfig,
engine: engine,
blockReader: snapshotsync.NewBlockReader(),
blockReader: snapshotsync.NewBlockReaderWithSnapshots(snapshots, transactionsV3),
vmConfig: vmConfig,
tmpdir: tmpdir,
interrupt: interrupt,

View File

@ -239,7 +239,7 @@ func writeRawBodyDeprecated(db kv.RwTx, hash common2.Hash, number uint64, body *
if err = rawdb.WriteBodyForStorage(db, hash, number, &data); err != nil {
return fmt.Errorf("failed to write body: %w", err)
}
if err = rawdb.WriteRawTransactions(db, body.Transactions, baseTxId); err != nil {
if err = rawdb.WriteRawTransactions(db, body.Transactions, baseTxId, &hash); err != nil {
return fmt.Errorf("failed to WriteRawTransactions: %w, blockNum=%d", err, number)
}
return nil

View File

@ -118,6 +118,7 @@ var DefaultFlags = []cli.Flag{
&utils.MetricsHTTPFlag,
&utils.MetricsPortFlag,
&utils.HistoryV3Flag,
&utils.TransactionV3Flag,
&utils.IdentityFlag,
&utils.CliqueSnapshotCheckpointIntervalFlag,
&utils.CliqueSnapshotInmemorySnapshotsFlag,

View File

@ -18,104 +18,6 @@ import (
"github.com/ledgerwatch/erigon/rlp"
)
// BlockReader can read blocks from db and snapshots
type BlockReader struct {
}
func NewBlockReader() *BlockReader {
return &BlockReader{}
}
func (back *BlockReader) CanonicalHash(ctx context.Context, tx kv.Getter, blockHeight uint64) (libcommon.Hash, error) {
return rawdb.ReadCanonicalHash(tx, blockHeight)
}
func (back *BlockReader) Snapshots() *RoSnapshots { return nil }
func (back *BlockReader) Header(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (*types.Header, error) {
h := rawdb.ReadHeader(tx, hash, blockHeight)
return h, nil
}
func (back *BlockReader) Body(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (body *types.Body, txAmount uint32, err error) {
body, _, txAmount = rawdb.ReadBody(tx, hash, blockHeight)
return body, txAmount, nil
}
func (back *BlockReader) BodyWithTransactions(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (body *types.Body, err error) {
return rawdb.ReadBodyWithTransactions(tx, hash, blockHeight)
}
func (back *BlockReader) BodyRlp(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (bodyRlp rlp.RawValue, err error) {
body, _, err := back.Body(ctx, tx, hash, blockHeight)
if err != nil {
return nil, err
}
bodyRlp, err = rlp.EncodeToBytes(body)
if err != nil {
return nil, err
}
return bodyRlp, nil
}
func (back *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (*types.Header, error) {
h := rawdb.ReadHeaderByNumber(tx, blockHeight)
return h, nil
}
func (back *BlockReader) HeaderByHash(ctx context.Context, tx kv.Getter, hash libcommon.Hash) (*types.Header, error) {
return rawdb.ReadHeaderByHash(tx, hash)
}
func (back *BlockReader) BlockWithSenders(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (block *types.Block, senders []libcommon.Address, err error) {
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockHeight)
if err != nil {
return nil, nil, fmt.Errorf("requested non-canonical hash %x. canonical=%x", hash, canonicalHash)
}
if canonicalHash == hash {
block, senders, err = rawdb.ReadBlockWithSenders(tx, hash, blockHeight)
if err != nil {
return nil, nil, err
}
return block, senders, nil
}
return rawdb.NonCanonicalBlockWithSenders(tx, hash, blockHeight)
}
func (back *BlockReader) TxnLookup(ctx context.Context, tx kv.Getter, txnHash libcommon.Hash) (uint64, bool, error) {
n, err := rawdb.ReadTxLookupEntry(tx, txnHash)
if err != nil {
return 0, false, err
}
if n == nil {
return 0, false, nil
}
return *n, true, nil
}
func (back *BlockReader) TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNum uint64, i int) (txn types.Transaction, err error) {
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockNum)
if err != nil {
return nil, err
}
var k [8 + 32]byte
binary.BigEndian.PutUint64(k[:], blockNum)
copy(k[8:], canonicalHash[:])
b, err := rawdb.ReadBodyForStorageByKey(tx, k[:])
if err != nil {
return nil, err
}
if b == nil {
return nil, nil
}
txn, err = rawdb.CanonicalTxnByID(tx, b.BaseTxId+1+uint64(i))
if err != nil {
return nil, err
}
return txn, nil
}
type RemoteBlockReader struct {
client remote.ETHBACKENDClient
}
@ -253,10 +155,11 @@ func (back *RemoteBlockReader) BodyRlp(ctx context.Context, tx kv.Getter, hash l
// BlockReaderWithSnapshots can read blocks from db and snapshots
type BlockReaderWithSnapshots struct {
sn *RoSnapshots
TransactionsV3 bool
}
func NewBlockReaderWithSnapshots(snapshots *RoSnapshots) *BlockReaderWithSnapshots {
return &BlockReaderWithSnapshots{sn: snapshots}
func NewBlockReaderWithSnapshots(snapshots *RoSnapshots, transactionsV3 bool) *BlockReaderWithSnapshots {
return &BlockReaderWithSnapshots{sn: snapshots, TransactionsV3: transactionsV3}
}
func (back *BlockReaderWithSnapshots) Snapshots() *RoSnapshots { return back.sn }
@ -763,7 +666,7 @@ func (back *BlockReaderWithSnapshots) TxnByIdxInBlock(ctx context.Context, tx kv
return nil, nil
}
txn, err = rawdb.CanonicalTxnByID(tx, b.BaseTxId+1+uint64(i))
txn, err = rawdb.CanonicalTxnByID(tx, b.BaseTxId+1+uint64(i), canonicalHash, back.TransactionsV3)
if err != nil {
return nil, err
}

View File

@ -490,7 +490,7 @@ func TestChainTxReorgs(t *testing.T) {
t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt)
}
}
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
// added tx
txs = types.Transactions{pastAdd, freshAdd, futureAdd}
@ -793,7 +793,7 @@ func doModesTest(t *testing.T, pm prune.Mode) error {
require.Equal(uint64(0), found.Minimum())
}
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
if pm.TxIndex.Enabled() {
b, err := rawdb.ReadBlockByNumber(tx, 1)

View File

@ -1,4 +1,4 @@
package headerdownload
package headerdownload_test
import (
"context"
@ -13,9 +13,12 @@ import (
"github.com/ledgerwatch/erigon/params"
"github.com/ledgerwatch/erigon/rlp"
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
"github.com/ledgerwatch/erigon/turbo/stages"
"github.com/ledgerwatch/erigon/turbo/stages/headerdownload"
)
func TestInserter1(t *testing.T) {
m := stages.Mock(t)
funds := big.NewInt(1000000000)
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address := crypto.PubkeyToAddress(key.PublicKey)
@ -37,7 +40,7 @@ func TestInserter1(t *testing.T) {
t.Fatal(err)
}
defer tx.Rollback()
hi := NewHeaderInserter("headers", big.NewInt(0), 0, snapshotsync.NewBlockReader())
hi := headerdownload.NewHeaderInserter("headers", big.NewInt(0), 0, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3))
h1 := types.Header{
Number: big.NewInt(1),
Difficulty: big.NewInt(10),
@ -51,11 +54,11 @@ func TestInserter1(t *testing.T) {
}
h2Hash := h2.Hash()
data1, _ := rlp.EncodeToBytes(&h1)
if _, err = hi.FeedHeaderPoW(tx, snapshotsync.NewBlockReader(), &h1, data1, h1Hash, 1); err != nil {
if _, err = hi.FeedHeaderPoW(tx, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3), &h1, data1, h1Hash, 1); err != nil {
t.Errorf("feed empty header 1: %v", err)
}
data2, _ := rlp.EncodeToBytes(&h2)
if _, err = hi.FeedHeaderPoW(tx, snapshotsync.NewBlockReader(), &h2, data2, h2Hash, 2); err != nil {
if _, err = hi.FeedHeaderPoW(tx, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3), &h2, data2, h2Hash, 2); err != nil {
t.Errorf("feed empty header 2: %v", err)
}
}

View File

@ -97,6 +97,7 @@ type MockSentry struct {
txPoolDB kv.RwDB
HistoryV3 bool
TransactionsV3 bool
agg *libstate.AggregatorV3
BlockSnapshots *snapshotsync.RoSnapshots
}
@ -281,11 +282,12 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey
PeerId: gointerfaces.ConvertHashToH512([64]byte{0x12, 0x34, 0x50}), // "12345"
BlockSnapshots: snapshotsync.NewRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap),
HistoryV3: cfg.HistoryV3,
TransactionsV3: cfg.TransactionsV3,
}
if t != nil {
t.Cleanup(mock.Close)
}
blockReader := snapshotsync.NewBlockReaderWithSnapshots(mock.BlockSnapshots)
blockReader := snapshotsync.NewBlockReaderWithSnapshots(mock.BlockSnapshots, mock.TransactionsV3)
mock.Address = crypto.PubkeyToAddress(mock.Key.PublicKey)
@ -410,6 +412,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey
mock.BlockSnapshots,
blockReader,
cfg.HistoryV3,
cfg.TransactionsV3,
),
stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, dirs.Tmp, prune, blockRetire, nil),
stagedsync.StageExecuteBlocksCfg(
@ -462,7 +465,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey
mock.MiningSync = stagedsync.New(
stagedsync.MiningStages(mock.Ctx,
stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, mock.TxPool, nil, nil, dirs.Tmp),
stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, dirs.Tmp, nil, 0, mock.TxPool, nil),
stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, dirs.Tmp, nil, 0, mock.TxPool, nil, mock.BlockSnapshots, cfg.TransactionsV3),
stagedsync.StageHashStateCfg(mock.DB, dirs, cfg.HistoryV3, mock.agg),
stagedsync.StageTrieCfg(mock.DB, false, true, false, dirs.Tmp, blockReader, nil, cfg.HistoryV3, mock.agg),
stagedsync.StageMiningFinishCfg(mock.DB, *mock.ChainConfig, mock.Engine, miner, miningCancel),

View File

@ -31,7 +31,6 @@ import (
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
"github.com/ledgerwatch/erigon/p2p"
"github.com/ledgerwatch/erigon/turbo/engineapi"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/erigon/turbo/shards"
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
"github.com/ledgerwatch/erigon/turbo/stages/bodydownload"
@ -358,12 +357,7 @@ func NewDefaultStages(ctx context.Context,
engine consensus.Engine,
) []*stagedsync.Stage {
dirs := cfg.Dirs
var blockReader services.FullBlockReader
if cfg.Snapshot.Enabled {
blockReader = snapshotsync.NewBlockReaderWithSnapshots(snapshots)
} else {
blockReader = snapshotsync.NewBlockReader()
}
blockReader := snapshotsync.NewBlockReaderWithSnapshots(snapshots, cfg.TransactionsV3)
blockRetire := snapshotsync.NewBlockRetire(1, dirs.Tmp, snapshots, db, snapDownloader, notifications.Events)
// During Import we don't want other services like header requests, body requests etc. to be running.
@ -411,6 +405,7 @@ func NewDefaultStages(ctx context.Context,
snapshots,
blockReader,
cfg.HistoryV3,
cfg.TransactionsV3,
),
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockRetire, controlServer.Hd),
stagedsync.StageExecuteBlocksCfg(
@ -443,12 +438,7 @@ func NewDefaultStages(ctx context.Context,
}
func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config, controlServer *sentry.MultiClient, dirs datadir.Dirs, notifications *shards.Notifications, snapshots *snapshotsync.RoSnapshots, agg *state.AggregatorV3) (*stagedsync.Sync, error) {
var blockReader services.FullBlockReader
if cfg.Snapshot.Enabled {
blockReader = snapshotsync.NewBlockReaderWithSnapshots(snapshots)
} else {
blockReader = snapshotsync.NewBlockReader()
}
blockReader := snapshotsync.NewBlockReaderWithSnapshots(snapshots, cfg.TransactionsV3)
return stagedsync.New(
stagedsync.StateStages(ctx,
@ -467,7 +457,7 @@ func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config
dirs.Tmp,
nil, nil,
),
stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, snapshots, blockReader, cfg.HistoryV3),
stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, snapshots, blockReader, cfg.HistoryV3, cfg.TransactionsV3),
stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig),
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, true, dirs.Tmp, cfg.Prune, nil, controlServer.Hd),
stagedsync.StageExecuteBlocksCfg(