mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-22 03:30:37 +00:00
move e2 snapshots management closer to e3: step 1 (#7543)
- always RLock all snapshots - to guarantee consistency - introduce class View (analog of RoTx and MakeContext) - move read methods to View object - View object will be managed by temporal_tx --------- Co-authored-by: Alex Sharp <alexsharp@Alexs-MacBook-Pro-2.local>
This commit is contained in:
parent
cb04c203d8
commit
2865b85888
@ -33,6 +33,7 @@ import (
|
||||
state2 "github.com/ledgerwatch/erigon-lib/state"
|
||||
types2 "github.com/ledgerwatch/erigon-lib/types"
|
||||
"github.com/ledgerwatch/erigon/eth/ethconfig"
|
||||
"github.com/ledgerwatch/erigon/turbo/services"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
|
||||
ethereum "github.com/ledgerwatch/erigon"
|
||||
@ -123,8 +124,8 @@ func NewTestSimulatedBackendWithConfig(t *testing.T, alloc types.GenesisAlloc, c
|
||||
}
|
||||
func (b *SimulatedBackend) DB() kv.RwDB { return b.m.DB }
|
||||
func (b *SimulatedBackend) Agg() *state2.AggregatorV3 { return b.m.HistoryV3Components() }
|
||||
func (b *SimulatedBackend) BlockReader() *snapshotsync.BlockReaderWithSnapshots {
|
||||
return snapshotsync.NewBlockReaderWithSnapshots(b.m.BlockSnapshots, b.m.TransactionsV3)
|
||||
func (b *SimulatedBackend) BlockReader() services.FullBlockReader {
|
||||
return snapshotsync.NewBlockReader(b.m.BlockSnapshots, b.m.TransactionsV3)
|
||||
}
|
||||
func (b *SimulatedBackend) HistoryV3() bool { return b.m.HistoryV3 }
|
||||
func (b *SimulatedBackend) Engine() consensus.Engine { return b.m.Engine }
|
||||
|
@ -846,7 +846,7 @@ func (s *Ethereum) setUpBlockReader(ctx context.Context, dirs datadir.Dirs, snCo
|
||||
if !snConfig.NoDownloader {
|
||||
allSnapshots.OptimisticalyReopenWithDB(s.chainDB)
|
||||
}
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, transactionsV3)
|
||||
blockReader := snapshotsync.NewBlockReader(allSnapshots, transactionsV3)
|
||||
|
||||
if !snConfig.NoDownloader {
|
||||
if snConfig.DownloaderAddr != "" {
|
||||
|
@ -46,7 +46,7 @@ func NewStagedSync(ctx context.Context,
|
||||
logger log.Logger,
|
||||
) (*stagedsync.Sync, error) {
|
||||
dirs := cfg.Dirs
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(snapshots, transactionsV3)
|
||||
blockReader := snapshotsync.NewBlockReader(snapshots, transactionsV3)
|
||||
blockRetire := snapshotsync.NewBlockRetire(1, dirs.Tmp, snapshots, db, snapDownloader, notifications.Events, logger)
|
||||
|
||||
// During Import we don't want other services like header requests, body requests etc. to be running.
|
||||
|
@ -530,6 +530,8 @@ func extractBodies(datadir string) error {
|
||||
Produce: false,
|
||||
}, filepath.Join(datadir, "snapshots"), log.New())
|
||||
snaps.ReopenFolder()
|
||||
|
||||
/* method Iterate was removed, need re-implement
|
||||
snaps.Bodies.View(func(sns []*snapshotsync.BodySegment) error {
|
||||
for _, sn := range sns {
|
||||
var firstBlockNum, firstBaseTxNum, firstAmount uint64
|
||||
@ -562,13 +564,14 @@ func extractBodies(datadir string) error {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if _, err := snaps.ViewTxs(snaps.BlocksAvailable(), func(sn *snapshotsync.TxnSegment) error {
|
||||
lastTxnID := sn.IdxTxnHash.BaseDataID() + uint64(sn.Seg.Count())
|
||||
fmt.Printf("txTxnID = %d\n", lastTxnID)
|
||||
return nil
|
||||
}); err != nil {
|
||||
*/
|
||||
br := snapshotsync.NewBlockReader(snaps, false)
|
||||
lastTxnID, _, err := br.LastTxNumInSnapshot(snaps.BlocksAvailable())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("txTxnID = %d\n", lastTxnID)
|
||||
|
||||
db := mdbx.MustOpen(filepath.Join(datadir, "chaindata"))
|
||||
defer db.Close()
|
||||
tx, err := db.BeginRo(context.Background())
|
||||
|
@ -1400,7 +1400,7 @@ func getBlockReader(db kv.RoDB, logger log.Logger) (blockReader services.FullBlo
|
||||
openBlockReaderOnce.Do(func() {
|
||||
sn, _ := allSnapshots(context.Background(), db, logger)
|
||||
transactionsV3 := kvcfg.TransactionsV3.FromDB(db)
|
||||
_blockReaderSingleton = snapshotsync.NewBlockReaderWithSnapshots(sn, transactionsV3)
|
||||
_blockReaderSingleton = snapshotsync.NewBlockReader(sn, transactionsV3)
|
||||
})
|
||||
return _blockReaderSingleton
|
||||
}
|
||||
|
@ -389,7 +389,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
|
||||
}()
|
||||
}
|
||||
onNewSnapshot()
|
||||
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, ethconfig.Defaults.TransactionsV3)
|
||||
blockReader = snapshotsync.NewBlockReader(allSnapshots, ethconfig.Defaults.TransactionsV3)
|
||||
|
||||
var histV3Enabled bool
|
||||
_ = db.View(ctx, func(tx kv.Tx) error {
|
||||
|
@ -54,7 +54,7 @@ func TestCallTraceOneByOne(t *testing.T) {
|
||||
}
|
||||
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
api := NewTraceAPI(
|
||||
NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs),
|
||||
m.DB, &httpcfg.HttpCfg{})
|
||||
@ -103,7 +103,7 @@ func TestCallTraceUnwind(t *testing.T) {
|
||||
}
|
||||
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, &httpcfg.HttpCfg{})
|
||||
|
||||
if err = m.InsertChain(chainA); err != nil {
|
||||
@ -166,7 +166,7 @@ func TestFilterNoAddresses(t *testing.T) {
|
||||
t.Fatalf("generate chain: %v", err)
|
||||
}
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, &httpcfg.HttpCfg{})
|
||||
// Insert blocks 1 by 1, to tirgget possible "off by one" errors
|
||||
for i := 0; i < chain.Length(); i++ {
|
||||
@ -192,7 +192,7 @@ func TestFilterNoAddresses(t *testing.T) {
|
||||
func TestFilterAddressIntersection(t *testing.T) {
|
||||
m := stages.Mock(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, &httpcfg.HttpCfg{})
|
||||
|
||||
toAddress1, toAddress2, other := common.Address{1}, common.Address{2}, common.Address{3}
|
||||
|
@ -21,7 +21,7 @@ func TestNotFoundMustReturnNil(t *testing.T) {
|
||||
require := require.New(t)
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(
|
||||
NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs),
|
||||
|
@ -54,7 +54,7 @@ var debugTraceTransactionNoRefundTests = []struct {
|
||||
func TestTraceBlockByNumber(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
baseApi := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
ethApi := NewEthAPI(baseApi, m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
@ -103,7 +103,7 @@ func TestTraceBlockByNumber(t *testing.T) {
|
||||
func TestTraceBlockByHash(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
baseApi := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
ethApi := NewEthAPI(baseApi, m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
@ -139,7 +139,7 @@ func TestTraceBlockByHash(t *testing.T) {
|
||||
func TestTraceTransaction(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
base := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
api := NewPrivateDebugAPI(base, m.DB, 0)
|
||||
@ -171,7 +171,7 @@ func TestTraceTransaction(t *testing.T) {
|
||||
|
||||
func TestTraceTransactionNoRefund(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
agg := m.HistoryV3Components()
|
||||
api := NewPrivateDebugAPI(
|
||||
NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs),
|
||||
@ -205,7 +205,7 @@ func TestTraceTransactionNoRefund(t *testing.T) {
|
||||
|
||||
func TestStorageRangeAt(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
agg := m.HistoryV3Components()
|
||||
api := NewPrivateDebugAPI(
|
||||
NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs),
|
||||
@ -302,7 +302,7 @@ func TestStorageRangeAt(t *testing.T) {
|
||||
|
||||
func TestAccountRange(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
agg := m.HistoryV3Components()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
base := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
@ -365,7 +365,7 @@ func TestAccountRange(t *testing.T) {
|
||||
|
||||
func TestGetModifiedAccountsByNumber(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
agg := m.HistoryV3Components()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
base := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
@ -472,7 +472,7 @@ func TestMapTxNum2BlockNum(t *testing.T) {
|
||||
func TestAccountAt(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
base := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
api := NewPrivateDebugAPI(base, m.DB, 0)
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
func TestGetLogs(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
agg := m.HistoryV3Components()
|
||||
baseApi := NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
{
|
||||
@ -64,7 +64,7 @@ func TestGetLogs(t *testing.T) {
|
||||
func TestErigonGetLatestLogs(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
db := m.DB
|
||||
agg := m.HistoryV3Components()
|
||||
@ -99,7 +99,7 @@ func TestErigonGetLatestLogs(t *testing.T) {
|
||||
func TestErigonGetLatestLogsIgnoreTopics(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
db := m.DB
|
||||
agg := m.HistoryV3Components()
|
||||
@ -191,7 +191,7 @@ func TestGetBlockReceiptsByBlockHash(t *testing.T) {
|
||||
// Assemble the test environment
|
||||
m := mockWithGenerator(t, 4, generator)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewErigonAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil)
|
||||
|
||||
|
@ -26,7 +26,7 @@ func TestGetBalanceChangesInBlock(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
myBlockNum := rpc.BlockNumberOrHashWithNumber(0)
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
db := m.DB
|
||||
agg := m.HistoryV3Components()
|
||||
@ -51,7 +51,7 @@ func TestGetTransactionReceipt(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
db := m.DB
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), db, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
// Call GetTransactionReceipt for transaction which is not in the database
|
||||
@ -63,7 +63,7 @@ func TestGetTransactionReceipt(t *testing.T) {
|
||||
func TestGetTransactionReceiptUnprotected(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
// Call GetTransactionReceipt for un-protected transaction
|
||||
@ -78,7 +78,7 @@ func TestGetStorageAt_ByBlockNumber_WithRequireCanonicalDefault(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
@ -95,7 +95,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
@ -112,7 +112,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
@ -128,7 +128,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue(t *testing.T) {
|
||||
func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
@ -152,7 +152,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError
|
||||
func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_BlockNotFoundError(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
@ -177,7 +177,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(
|
||||
assert := assert.New(t)
|
||||
m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
@ -199,7 +199,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(
|
||||
func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing.T) {
|
||||
m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
@ -218,7 +218,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *
|
||||
func TestCall_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testing.T) {
|
||||
m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
@ -244,7 +244,7 @@ func TestCall_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testi
|
||||
func TestCall_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing.T) {
|
||||
m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
func TestGetBlockByNumberWithLatestTag(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
b, err := api.GetBlockByNumber(context.Background(), rpc.LatestBlockNumber, false)
|
||||
@ -41,7 +41,7 @@ func TestGetBlockByNumberWithLatestTag(t *testing.T) {
|
||||
func TestGetBlockByNumberWithLatestTag_WithHeadHashInDb(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
ctx := context.Background()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
tx, err := m.DB.BeginRw(ctx)
|
||||
@ -74,7 +74,7 @@ func TestGetBlockByNumberWithLatestTag_WithHeadHashInDb(t *testing.T) {
|
||||
func TestGetBlockByNumberWithPendingTag(t *testing.T) {
|
||||
m := stages.MockWithTxPool(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
|
||||
ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m)
|
||||
@ -105,7 +105,7 @@ func TestGetBlockByNumberWithPendingTag(t *testing.T) {
|
||||
func TestGetBlockByNumber_WithFinalizedTag_NoFinalizedBlockInDb(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
ctx := context.Background()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
@ -117,7 +117,7 @@ func TestGetBlockByNumber_WithFinalizedTag_NoFinalizedBlockInDb(t *testing.T) {
|
||||
func TestGetBlockByNumber_WithFinalizedTag_WithFinalizedBlockInDb(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
ctx := context.Background()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
tx, err := m.DB.BeginRw(ctx)
|
||||
@ -150,7 +150,7 @@ func TestGetBlockByNumber_WithFinalizedTag_WithFinalizedBlockInDb(t *testing.T)
|
||||
func TestGetBlockByNumber_WithSafeTag_NoSafeBlockInDb(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
ctx := context.Background()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
@ -162,7 +162,7 @@ func TestGetBlockByNumber_WithSafeTag_NoSafeBlockInDb(t *testing.T) {
|
||||
func TestGetBlockByNumber_WithSafeTag_WithSafeBlockInDb(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
ctx := context.Background()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
tx, err := m.DB.BeginRw(ctx)
|
||||
@ -195,7 +195,7 @@ func TestGetBlockByNumber_WithSafeTag_WithSafeBlockInDb(t *testing.T) {
|
||||
func TestGetBlockTransactionCountByHash(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
ctx := context.Background()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
|
||||
@ -231,7 +231,7 @@ func TestGetBlockTransactionCountByHash(t *testing.T) {
|
||||
func TestGetBlockTransactionCountByHash_ZeroTx(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
ctx := context.Background()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
|
||||
@ -267,7 +267,7 @@ func TestGetBlockTransactionCountByHash_ZeroTx(t *testing.T) {
|
||||
func TestGetBlockTransactionCountByNumber(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
ctx := context.Background()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
@ -302,7 +302,7 @@ func TestGetBlockTransactionCountByNumber(t *testing.T) {
|
||||
func TestGetBlockTransactionCountByNumber_ZeroTx(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
ctx := context.Background()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
|
@ -39,7 +39,7 @@ import (
|
||||
func TestEstimateGas(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t))
|
||||
mining := txpool.NewMiningClient(conn)
|
||||
@ -58,7 +58,7 @@ func TestEstimateGas(t *testing.T) {
|
||||
func TestEthCallNonCanonical(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewEthAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
var from = libcommon.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
@ -78,7 +78,7 @@ func TestEthCallToPrunedBlock(t *testing.T) {
|
||||
ethCallBlockNumber := rpc.BlockNumber(2)
|
||||
|
||||
m, bankAddress, contractAddress := chainWithDeployedContract(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
|
||||
doPrune(t, m.DB, pruneTo)
|
||||
|
||||
@ -103,7 +103,7 @@ func TestGetProof(t *testing.T) {
|
||||
maxGetProofRewindBlockCount = 1 // Note, this is unsafe for parallel tests, but, this test is the only consumer for now
|
||||
|
||||
m, bankAddr, contractAddr := chainWithDeployedContract(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
|
||||
if m.HistoryV3 {
|
||||
t.Skip("not supported by Erigon3")
|
||||
@ -233,7 +233,7 @@ func TestGetBlockByTimestampLatestTime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
tx, err := m.DB.BeginRo(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("fail at beginning tx")
|
||||
@ -271,7 +271,7 @@ func TestGetBlockByTimestampOldestTime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
tx, err := m.DB.BeginRo(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("failed at beginning tx")
|
||||
@ -313,7 +313,7 @@ func TestGetBlockByTimeHigherThanLatestBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
tx, err := m.DB.BeginRo(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("fail at beginning tx")
|
||||
@ -352,7 +352,7 @@ func TestGetBlockByTimeMiddle(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
tx, err := m.DB.BeginRo(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("fail at beginning tx")
|
||||
@ -403,7 +403,7 @@ func TestGetBlockByTimestamp(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
tx, err := m.DB.BeginRo(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("fail at beginning tx")
|
||||
|
@ -27,7 +27,7 @@ func TestNewFilters(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t))
|
||||
mining := txpool.NewMiningClient(conn)
|
||||
|
@ -27,7 +27,7 @@ func TestPendingBlock(t *testing.T) {
|
||||
ff := rpchelper.New(ctx, nil, nil, mining, func() {}, m.Log)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
engine := ethash.NewFaker()
|
||||
api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3), nil, false, rpccfg.DefaultEvmCallTimeout, engine,
|
||||
api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3), nil, false, rpccfg.DefaultEvmCallTimeout, engine,
|
||||
m.Dirs), nil, nil, nil, mining, 5000000, 100_000, log.New())
|
||||
expect := uint64(12345)
|
||||
b, err := rlp.EncodeToBytes(types.NewBlockWithHeader(&types.Header{Number: big.NewInt(int64(expect))}))
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
|
||||
func TestEthSubscribe(t *testing.T) {
|
||||
m, require := stages.Mock(t), require.New(t)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 7, func(i int, b *core.BlockGen) {
|
||||
b.SetCoinbase(libcommon.Address{1})
|
||||
}, false /* intermediateHashes */)
|
||||
|
@ -44,7 +44,7 @@ func TestGasPrice(t *testing.T) {
|
||||
m := createGasPriceTestKV(t, testCase.chainSize)
|
||||
defer m.DB.Close()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
base := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3), nil, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
base := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3), nil, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
eth := NewEthAPI(base, m.DB, nil, nil, nil, 5000000, 100_000, log.New())
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -30,7 +30,7 @@ Testing tracing RPC API by generating patters of contracts invoking one another
|
||||
func TestGeneratedDebugApi(t *testing.T) {
|
||||
m := rpcdaemontest.CreateTestSentryForTraces(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
baseApi := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
api := NewPrivateDebugAPI(baseApi, m.DB, 0)
|
||||
@ -118,7 +118,7 @@ func TestGeneratedDebugApi(t *testing.T) {
|
||||
func TestGeneratedTraceApi(t *testing.T) {
|
||||
m := rpcdaemontest.CreateTestSentryForTraces(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
baseApi := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
api := NewTraceAPI(baseApi, m.DB, &httpcfg.HttpCfg{})
|
||||
@ -277,7 +277,7 @@ func TestGeneratedTraceApi(t *testing.T) {
|
||||
func TestGeneratedTraceApiCollision(t *testing.T) {
|
||||
m := rpcdaemontest.CreateTestSentryForTracesCollision(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
baseApi := NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs)
|
||||
api := NewTraceAPI(baseApi, m.DB, &httpcfg.HttpCfg{})
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
func TestGetContractCreator(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
api := NewOtterscanAPI(NewBaseApi(nil, nil, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB)
|
||||
|
||||
addr := libcommon.HexToAddress("0x537e697c7ab75a26f9ecf0ce810e3154dfcaaf44")
|
||||
|
@ -151,7 +151,7 @@ func TestBackwardBlockProviderWithMultipleChunksBlockNotFound(t *testing.T) {
|
||||
func TestSearchTransactionsBefore(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
api := NewOtterscanAPI(NewBaseApi(nil, nil, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB)
|
||||
|
||||
addr := libcommon.HexToAddress("0x537e697c7ab75a26f9ecf0ce810e3154dfcaaf44")
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
func TestGetTransactionBySenderAndNonce(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
api := NewOtterscanAPI(NewBaseApi(nil, nil, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB)
|
||||
|
||||
addr := common.HexToAddress("0x537e697c7ab75a26f9ecf0ce810e3154dfcaaf44")
|
||||
|
@ -75,7 +75,7 @@ func TestSendRawTransaction(t *testing.T) {
|
||||
txPool := txpool.NewTxpoolClient(conn)
|
||||
ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}, m.Log)
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
api := commands.NewEthAPI(commands.NewBaseApi(ff, stateCache, br, nil, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, nil, txPool, nil, 5000000, 100_000, logger)
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
@ -23,7 +23,7 @@ func TestEmptyQuery(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
|
||||
api := NewTraceAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, &httpcfg.HttpCfg{})
|
||||
// Call GetTransactionReceipt for transaction which is not in the database
|
||||
@ -43,7 +43,7 @@ func TestCoinbaseBalance(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
|
||||
api := NewTraceAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, &httpcfg.HttpCfg{})
|
||||
// Call GetTransactionReceipt for transaction which is not in the database
|
||||
@ -73,7 +73,7 @@ func TestReplayTransaction(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
|
||||
api := NewTraceAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, &httpcfg.HttpCfg{})
|
||||
var txnHash libcommon.Hash
|
||||
@ -103,7 +103,7 @@ func TestReplayTransaction(t *testing.T) {
|
||||
func TestReplayBlockTransactions(t *testing.T) {
|
||||
m, _, _ := rpcdaemontest.CreateTestSentry(t)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
|
||||
stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
|
||||
api := NewTraceAPI(NewBaseApi(nil, stateCache, br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, &httpcfg.HttpCfg{})
|
||||
|
@ -36,7 +36,7 @@ func TestTxPoolContent(t *testing.T) {
|
||||
txPool := txpool.NewTxpoolClient(conn)
|
||||
ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}, m.Log)
|
||||
agg := m.HistoryV3Components()
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), br, agg, false, rpccfg.DefaultEvmCallTimeout, m.Engine, m.Dirs), m.DB, txPool)
|
||||
|
||||
expectValue := uint64(1234)
|
||||
|
@ -296,7 +296,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g
|
||||
server := grpc.NewServer()
|
||||
|
||||
remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events,
|
||||
snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3), nil, nil, nil, false, log.New()))
|
||||
snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3), nil, nil, nil, false, log.New()))
|
||||
txpool.RegisterTxpoolServer(server, m.TxPoolGrpcServer)
|
||||
txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi, m.Log))
|
||||
listener := bufconn.Listen(1024 * 1024)
|
||||
|
@ -85,7 +85,7 @@ func CheckChangeSets(genesis *types.Genesis, logger log.Logger, blockNum uint64,
|
||||
if err := allSnapshots.ReopenFolder(); err != nil {
|
||||
return fmt.Errorf("reopen snapshot segments: %w", err)
|
||||
}
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, transactionV3)
|
||||
blockReader := snapshotsync.NewBlockReader(allSnapshots, transactionV3)
|
||||
|
||||
chainDb := db
|
||||
defer chainDb.Close()
|
||||
|
@ -243,7 +243,7 @@ func Erigon4(genesis *types.Genesis, chainConfig *chain2.Config, logger log.Logg
|
||||
}
|
||||
//transactionsV3 := kvcfg.TransactionsV3.FromDB(db)
|
||||
transactionsV3 := false
|
||||
blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, transactionsV3)
|
||||
blockReader = snapshotsync.NewBlockReader(allSnapshots, transactionsV3)
|
||||
engine := initConsensusEngine(chainConfig, allSnapshots, logger)
|
||||
|
||||
getHeader := func(hash libcommon.Hash, number uint64) *types.Header {
|
||||
|
@ -974,7 +974,7 @@ func (s *Ethereum) setUpBlockReader(ctx context.Context, dirs datadir.Dirs, snCo
|
||||
if !snConfig.NoDownloader {
|
||||
allSnapshots.OptimisticalyReopenWithDB(s.chainDB)
|
||||
}
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots, transactionsV3)
|
||||
blockReader := snapshotsync.NewBlockReader(allSnapshots, transactionsV3)
|
||||
|
||||
if !snConfig.NoDownloader {
|
||||
if snConfig.DownloaderAddr != "" {
|
||||
|
@ -78,7 +78,7 @@ func TestAccountAndStorageTrie(t *testing.T) {
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
historyV3 := false
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(nil, false)
|
||||
blockReader := snapshotsync.NewBlockReader(nil, false)
|
||||
cfg := StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil)
|
||||
_, err := RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx, log.New())
|
||||
assert.Nil(t, err)
|
||||
@ -200,7 +200,7 @@ func TestAccountTrieAroundExtensionNode(t *testing.T) {
|
||||
hash6 := libcommon.HexToHash("0x3100000000000000000000000000000000000000000000000000000000000000")
|
||||
assert.Nil(t, tx.Put(kv.HashedAccounts, hash6[:], encoded))
|
||||
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(nil, false)
|
||||
blockReader := snapshotsync.NewBlockReader(nil, false)
|
||||
_, err := RegenerateIntermediateHashes("IH", tx, StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil), libcommon.Hash{} /* expectedRootHash */, ctx, log.New())
|
||||
assert.Nil(t, err)
|
||||
|
||||
@ -263,7 +263,7 @@ func TestStorageDeletion(t *testing.T) {
|
||||
// Populate account & storage trie DB tables
|
||||
// ----------------------------------------------------------------
|
||||
historyV3 := false
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(nil, false)
|
||||
blockReader := snapshotsync.NewBlockReader(nil, false)
|
||||
cfg := StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil)
|
||||
_, err = RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx, log.New())
|
||||
assert.Nil(t, err)
|
||||
@ -382,7 +382,7 @@ func TestHiveTrieRoot(t *testing.T) {
|
||||
common.FromHex("02081bc16d674ec80000")))
|
||||
|
||||
historyV3 := false
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(nil, false)
|
||||
blockReader := snapshotsync.NewBlockReader(nil, false)
|
||||
cfg := StageTrieCfg(db, false, true, false, t.TempDir(), blockReader, nil, historyV3, nil)
|
||||
logger := log.New()
|
||||
_, err := RegenerateIntermediateHashes("IH", tx, cfg, libcommon.Hash{} /* expectedRootHash */, ctx, logger)
|
||||
|
@ -73,7 +73,7 @@ func StageMiningExecCfg(
|
||||
notifier: notifier,
|
||||
chainConfig: chainConfig,
|
||||
engine: engine,
|
||||
blockReader: snapshotsync.NewBlockReaderWithSnapshots(snapshots, transactionsV3),
|
||||
blockReader: snapshotsync.NewBlockReader(snapshots, transactionsV3),
|
||||
vmConfig: vmConfig,
|
||||
tmpdir: tmpdir,
|
||||
interrupt: interrupt,
|
||||
|
@ -258,14 +258,17 @@ func FillDBFromSnapshots(logPrefix string, ctx context.Context, tx kv.RwTx, dirs
|
||||
}
|
||||
|
||||
case stages.Bodies:
|
||||
type LastTxNumProvider interface {
|
||||
LastTxNumInSnapshot(blockNum uint64) (uint64, bool, error)
|
||||
}
|
||||
lastTxnID, ok, err := blockReader.(LastTxNumProvider).LastTxNumInSnapshot(blocksAvailable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// ResetSequence - allow set arbitrary value to sequence (for example to decrement it to exact value)
|
||||
ok, err := sn.ViewTxs(blocksAvailable, func(sn *snapshotsync.TxnSegment) error {
|
||||
lastTxnID := sn.IdxTxnHash.BaseDataID() + uint64(sn.Seg.Count())
|
||||
if err := rawdb.ResetSequence(tx, kv.EthTx, lastTxnID); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := rawdb.ResetSequence(tx, kv.EthTx, lastTxnID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -288,28 +291,24 @@ func FillDBFromSnapshots(logPrefix string, ctx context.Context, tx kv.RwTx, dirs
|
||||
if err := rawdbv3.TxNums.WriteForGenesis(tx, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := sn.Bodies.View(func(bs []*snapshotsync.BodySegment) error {
|
||||
for _, b := range bs {
|
||||
if err := b.Iterate(func(blockNum, baseTxNum, txAmount uint64) error {
|
||||
if blockNum == 0 || blockNum > toBlock {
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-logEvery.C:
|
||||
logger.Info(fmt.Sprintf("[%s] MaxTxNums index: %dk/%dk", logPrefix, blockNum/1000, sn.BlocksAvailable()/1000))
|
||||
default:
|
||||
}
|
||||
maxTxNum := baseTxNum + txAmount - 1
|
||||
type IterBody interface {
|
||||
IterateBodies(f func(blockNum, baseTxNum, txAmount uint64) error) error
|
||||
}
|
||||
if err := blockReader.(IterBody).IterateBodies(func(blockNum, baseTxNum, txAmount uint64) error {
|
||||
if blockNum == 0 || blockNum > toBlock {
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-logEvery.C:
|
||||
logger.Info(fmt.Sprintf("[%s] MaxTxNums index: %dk/%dk", logPrefix, blockNum/1000, sn.BlocksAvailable()/1000))
|
||||
default:
|
||||
}
|
||||
maxTxNum := baseTxNum + txAmount - 1
|
||||
|
||||
if err := rawdbv3.TxNums.Append(tx, blockNum, maxTxNum); err != nil {
|
||||
return fmt.Errorf("%w. blockNum=%d, maxTxNum=%d", err, blockNum, maxTxNum)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rawdbv3.TxNums.Append(tx, blockNum, maxTxNum); err != nil {
|
||||
return fmt.Errorf("%w. blockNum=%d, maxTxNum=%d", err, blockNum, maxTxNum)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
@ -34,7 +34,6 @@ var migrations = map[kv.Label][]Migration{
|
||||
kv.ChainDB: {
|
||||
dbSchemaVersion5,
|
||||
txsBeginEnd,
|
||||
resetBlocks4,
|
||||
},
|
||||
kv.TxPoolDB: {},
|
||||
kv.SentryDB: {},
|
||||
|
@ -1,134 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
|
||||
libcommon "github.com/ledgerwatch/erigon-lib/common"
|
||||
"github.com/ledgerwatch/erigon-lib/common/datadir"
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
|
||||
"github.com/ledgerwatch/erigon/cmd/hack/tool"
|
||||
"github.com/ledgerwatch/erigon/core/rawdb"
|
||||
"github.com/ledgerwatch/erigon/core/rawdb/rawdbreset"
|
||||
"github.com/ledgerwatch/erigon/eth/ethconfig"
|
||||
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
|
||||
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
|
||||
"github.com/ledgerwatch/erigon/turbo/snapshotsync/snap"
|
||||
)
|
||||
|
||||
var resetBlocks4 = Migration{
|
||||
Name: "reset_blocks_4",
|
||||
Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback, logger log.Logger) (err error) {
|
||||
tx, err := db.BeginRw(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
enabled, err := snap.Enabled(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !enabled {
|
||||
if err := BeforeCommit(tx, nil, true); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
// Detect whether the correction is required
|
||||
snaps := snapshotsync.NewRoSnapshots(ethconfig.Snapshot{
|
||||
Enabled: true,
|
||||
KeepBlocks: true,
|
||||
Produce: false,
|
||||
}, dirs.Snap, logger)
|
||||
snaps.ReopenFolder()
|
||||
var lastFound bool
|
||||
var lastBlockNum, lastBaseTxNum, lastAmount uint64
|
||||
if err := snaps.Bodies.View(func(sns []*snapshotsync.BodySegment) error {
|
||||
// Take the last snapshot
|
||||
if len(sns) == 0 {
|
||||
return nil
|
||||
}
|
||||
sn := sns[len(sns)-1]
|
||||
sn.Iterate(func(blockNum uint64, baseTxNum uint64, txAmount uint64) error {
|
||||
lastBlockNum = blockNum
|
||||
lastBaseTxNum = baseTxNum
|
||||
lastAmount = txAmount
|
||||
lastFound = true
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if !lastFound {
|
||||
if err := BeforeCommit(tx, nil, true); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
c, err := tx.Cursor(kv.BlockBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
var fixNeeded bool
|
||||
for k, _, err := c.First(); k != nil; k, _, err = c.Next() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockNumber := binary.BigEndian.Uint64(k[:8])
|
||||
if blockNumber != lastBlockNum+1 {
|
||||
continue
|
||||
}
|
||||
blockHash := libcommon.BytesToHash(k[8:])
|
||||
var hash libcommon.Hash
|
||||
if hash, err = rawdb.ReadCanonicalHash(tx, blockNumber); err != nil {
|
||||
return err
|
||||
}
|
||||
// ReadBody is not returning baseTxId which is written into the DB record, but that value + 1
|
||||
_, baseTxId, _ := rawdb.ReadBody(tx, blockHash, blockNumber)
|
||||
if hash != blockHash {
|
||||
continue
|
||||
}
|
||||
if lastBaseTxNum+lastAmount+1 != baseTxId {
|
||||
logger.Info("Fix required, last block in seg files", "height", lastBlockNum, "baseTxNum", lastBaseTxNum, "txAmount", lastAmount, "first txId in DB", baseTxId, "expected", lastBaseTxNum+lastAmount+1)
|
||||
fixNeeded = true
|
||||
}
|
||||
}
|
||||
if !fixNeeded {
|
||||
log.Info("Fix is not required")
|
||||
if err := BeforeCommit(tx, nil, true); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
headersProgress, _ := stages.GetStageProgress(tx, stages.Headers)
|
||||
if headersProgress > 0 {
|
||||
log.Warn("NOTE: this migration will remove recent blocks (and senders) to fix several recent bugs. Your node will re-download last ~400K blocks, should not take very long")
|
||||
}
|
||||
|
||||
cc := tool.ChainConfig(tx)
|
||||
if err := rawdbreset.ResetBlocks(tx, db, nil, nil, nil, dirs, *cc, nil, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := rawdbreset.ResetSenders(context.Background(), db, tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := rawdbreset.ResetTxLookup(tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This migration is no-op, but it forces the migration mechanism to apply it and thus write the DB schema version info
|
||||
if err := BeforeCommit(tx, nil, true); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
},
|
||||
}
|
@ -152,38 +152,40 @@ func (back *RemoteBlockReader) BodyRlp(ctx context.Context, tx kv.Getter, hash l
|
||||
return bodyRlp, nil
|
||||
}
|
||||
|
||||
// BlockReaderWithSnapshots can read blocks from db and snapshots
|
||||
type BlockReaderWithSnapshots struct {
|
||||
// BlockReader can read blocks from db and snapshots
|
||||
type BlockReader struct {
|
||||
sn *RoSnapshots
|
||||
TransactionsV3 bool
|
||||
}
|
||||
|
||||
func NewBlockReaderWithSnapshots(snapshots *RoSnapshots, transactionsV3 bool) *BlockReaderWithSnapshots {
|
||||
return &BlockReaderWithSnapshots{sn: snapshots, TransactionsV3: transactionsV3}
|
||||
func NewBlockReader(snapshots *RoSnapshots, transactionsV3 bool) *BlockReader {
|
||||
return &BlockReader{sn: snapshots, TransactionsV3: transactionsV3}
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) Snapshots() *RoSnapshots { return back.sn }
|
||||
func (back *BlockReader) Snapshots() *RoSnapshots { return back.sn }
|
||||
|
||||
func (back *BlockReaderWithSnapshots) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (h *types.Header, err error) {
|
||||
ok, err := back.sn.ViewHeaders(blockHeight, func(segment *HeaderSegment) error {
|
||||
h, _, err = back.headerFromSnapshot(blockHeight, segment, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
func (back *BlockReader) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (h *types.Header, err error) {
|
||||
h = rawdb.ReadHeaderByNumber(tx, blockHeight)
|
||||
if h != nil {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
seg, ok := view.HeadersSegment(blockHeight)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
h, _, err = back.headerFromSnapshot(blockHeight, seg, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return h, nil
|
||||
}
|
||||
h = rawdb.ReadHeaderByNumber(tx, blockHeight)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// HeaderByHash - will search header in all snapshots starting from recent
|
||||
func (back *BlockReaderWithSnapshots) HeaderByHash(ctx context.Context, tx kv.Getter, hash libcommon.Hash) (h *types.Header, err error) {
|
||||
func (back *BlockReader) HeaderByHash(ctx context.Context, tx kv.Getter, hash libcommon.Hash) (h *types.Header, err error) {
|
||||
h, err = rawdb.ReadHeaderByHash(tx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -192,112 +194,118 @@ func (back *BlockReaderWithSnapshots) HeaderByHash(ctx context.Context, tx kv.Ge
|
||||
return h, nil
|
||||
}
|
||||
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
segments := view.Headers()
|
||||
|
||||
buf := make([]byte, 128)
|
||||
if err := back.sn.Headers.View(func(segments []*HeaderSegment) error {
|
||||
for i := len(segments) - 1; i >= 0; i-- {
|
||||
if segments[i].idxHeaderHash == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
h, err = back.headerFromSnapshotByHash(hash, segments[i], buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if h != nil {
|
||||
break
|
||||
}
|
||||
for i := len(segments) - 1; i >= 0; i-- {
|
||||
if segments[i].idxHeaderHash == nil {
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) CanonicalHash(ctx context.Context, tx kv.Getter, blockHeight uint64) (h libcommon.Hash, err error) {
|
||||
ok, err := back.sn.ViewHeaders(blockHeight, func(segment *HeaderSegment) error {
|
||||
header, _, err := back.headerFromSnapshot(blockHeight, segment, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header == nil {
|
||||
return nil
|
||||
}
|
||||
h = header.Hash()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return h, err
|
||||
}
|
||||
if ok {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
return rawdb.ReadCanonicalHash(tx, blockHeight)
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) Header(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (h *types.Header, err error) {
|
||||
ok, err := back.sn.ViewHeaders(blockHeight, func(segment *HeaderSegment) error {
|
||||
h, _, err = back.headerFromSnapshot(blockHeight, segment, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return h, err
|
||||
}
|
||||
if ok {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
h = rawdb.ReadHeader(tx, hash, blockHeight)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) BodyWithTransactions(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (body *types.Body, err error) {
|
||||
var baseTxnID uint64
|
||||
var txsAmount uint32
|
||||
var buf []byte
|
||||
ok, err := back.sn.ViewBodies(blockHeight, func(seg *BodySegment) error {
|
||||
body, baseTxnID, txsAmount, buf, err = back.bodyFromSnapshot(blockHeight, seg, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
ok, err = back.sn.ViewTxs(blockHeight, func(seg *TxnSegment) error {
|
||||
txs, senders, err := back.txsFromSnapshot(baseTxnID, txsAmount, seg, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if txs == nil {
|
||||
return nil
|
||||
}
|
||||
body.Transactions = txs
|
||||
body.SendersToTxs(senders)
|
||||
return nil
|
||||
})
|
||||
h, err = back.headerFromSnapshotByHash(hash, segments[i], buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return body, nil
|
||||
if h != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var emptyHash = libcommon.Hash{}
|
||||
|
||||
func (back *BlockReader) CanonicalHash(ctx context.Context, tx kv.Getter, blockHeight uint64) (h libcommon.Hash, err error) {
|
||||
h, err = rawdb.ReadCanonicalHash(tx, blockHeight)
|
||||
if err != nil {
|
||||
return h, err
|
||||
}
|
||||
if h != emptyHash {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
seg, ok := view.HeadersSegment(blockHeight)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
header, _, err := back.headerFromSnapshot(blockHeight, seg, nil)
|
||||
if err != nil {
|
||||
return h, err
|
||||
}
|
||||
if header == nil {
|
||||
return h, nil
|
||||
}
|
||||
h = header.Hash()
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (back *BlockReader) Header(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (h *types.Header, err error) {
|
||||
h = rawdb.ReadHeader(tx, hash, blockHeight)
|
||||
if h != nil {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
seg, ok := view.HeadersSegment(blockHeight)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
h, _, err = back.headerFromSnapshot(blockHeight, seg, nil)
|
||||
if err != nil {
|
||||
return h, err
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (back *BlockReader) BodyWithTransactions(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (body *types.Body, err error) {
|
||||
body, err = rawdb.ReadBodyWithTransactions(tx, hash, blockHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if body != nil {
|
||||
return body, nil
|
||||
}
|
||||
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
|
||||
var baseTxnID uint64
|
||||
var txsAmount uint32
|
||||
var buf []byte
|
||||
seg, ok := view.BodiesSegment(blockHeight)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
body, baseTxnID, txsAmount, buf, err = back.bodyFromSnapshot(blockHeight, seg, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if body == nil {
|
||||
return nil, nil
|
||||
}
|
||||
txnSeg, ok := view.TxsSegment(blockHeight)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
txs, senders, err := back.txsFromSnapshot(baseTxnID, txsAmount, txnSeg, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if txs == nil {
|
||||
return nil, nil
|
||||
}
|
||||
body.Transactions = txs
|
||||
body.SendersToTxs(senders)
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) BodyRlp(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (bodyRlp rlp.RawValue, err error) {
|
||||
func (back *BlockReader) BodyRlp(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (bodyRlp rlp.RawValue, err error) {
|
||||
body, err := back.BodyWithTransactions(ctx, tx, hash, blockHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -309,96 +317,101 @@ func (back *BlockReaderWithSnapshots) BodyRlp(ctx context.Context, tx kv.Getter,
|
||||
return bodyRlp, nil
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) Body(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (body *types.Body, txAmount uint32, err error) {
|
||||
ok, err := back.sn.ViewBodies(blockHeight, func(seg *BodySegment) error {
|
||||
body, _, txAmount, _, err = back.bodyFromSnapshot(blockHeight, seg, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
func (back *BlockReader) Body(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (body *types.Body, txAmount uint32, err error) {
|
||||
if blockHeight >= back.sn.BlocksAvailable() {
|
||||
body, _, txAmount = rawdb.ReadBody(tx, hash, blockHeight)
|
||||
return body, txAmount, nil
|
||||
}
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
|
||||
seg, ok := view.BodiesSegment(blockHeight)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
body, _, txAmount, _, err = back.bodyFromSnapshot(blockHeight, seg, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if ok {
|
||||
return body, txAmount, nil
|
||||
}
|
||||
body, _, txAmount = rawdb.ReadBody(tx, hash, blockHeight)
|
||||
return body, txAmount, nil
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) BlockWithSenders(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (block *types.Block, senders []libcommon.Address, err error) {
|
||||
var buf []byte
|
||||
var h *types.Header
|
||||
ok, err := back.sn.ViewHeaders(blockHeight, func(seg *HeaderSegment) error {
|
||||
h, buf, err = back.headerFromSnapshot(blockHeight, seg, buf)
|
||||
func (back *BlockReader) BlockWithSenders(ctx context.Context, tx kv.Getter, hash libcommon.Hash, blockHeight uint64) (block *types.Block, senders []libcommon.Address, err error) {
|
||||
if blockHeight >= back.sn.BlocksAvailable() {
|
||||
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, nil, fmt.Errorf("requested non-canonical hash %x. canonical=%x", hash, canonicalHash)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if ok && h != nil {
|
||||
var b *types.Body
|
||||
var baseTxnId uint64
|
||||
var txsAmount uint32
|
||||
ok, err = back.sn.ViewBodies(blockHeight, func(seg *BodySegment) error {
|
||||
b, baseTxnId, txsAmount, buf, err = back.bodyFromSnapshot(blockHeight, seg, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if ok && b != nil {
|
||||
if txsAmount == 0 {
|
||||
block = types.NewBlockFromStorage(hash, h, nil, b.Uncles, b.Withdrawals)
|
||||
if len(senders) != block.Transactions().Len() {
|
||||
return block, senders, nil // no senders is fine - will recover them on the fly
|
||||
}
|
||||
block.SendersToTxs(senders)
|
||||
return block, senders, nil
|
||||
}
|
||||
var txs []types.Transaction
|
||||
ok, err = back.sn.ViewTxs(blockHeight, func(seg *TxnSegment) error {
|
||||
txs, senders, err = back.txsFromSnapshot(baseTxnId, txsAmount, seg, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if canonicalHash == hash {
|
||||
block, senders, err = rawdb.ReadBlockWithSenders(tx, hash, blockHeight)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if ok {
|
||||
block = types.NewBlockFromStorage(hash, h, txs, b.Uncles, b.Withdrawals)
|
||||
if len(senders) != block.Transactions().Len() {
|
||||
return block, senders, nil // no senders is fine - will recover them on the fly
|
||||
}
|
||||
block.SendersToTxs(senders)
|
||||
return block, senders, nil
|
||||
}
|
||||
return block, senders, nil
|
||||
}
|
||||
return rawdb.NonCanonicalBlockWithSenders(tx, hash, blockHeight)
|
||||
}
|
||||
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockHeight)
|
||||
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
seg, ok := view.HeadersSegment(blockHeight)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
h, buf, err := back.headerFromSnapshot(blockHeight, seg, buf)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("requested non-canonical hash %x. canonical=%x", hash, canonicalHash)
|
||||
return nil, nil, err
|
||||
}
|
||||
if canonicalHash == hash {
|
||||
block, senders, err = rawdb.ReadBlockWithSenders(tx, hash, blockHeight)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var b *types.Body
|
||||
var baseTxnId uint64
|
||||
var txsAmount uint32
|
||||
bodySeg, ok := view.BodiesSegment(blockHeight)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
b, baseTxnId, txsAmount, buf, err = back.bodyFromSnapshot(blockHeight, bodySeg, buf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if b == nil {
|
||||
return
|
||||
}
|
||||
if txsAmount == 0 {
|
||||
block = types.NewBlockFromStorage(hash, h, nil, b.Uncles, b.Withdrawals)
|
||||
if len(senders) != block.Transactions().Len() {
|
||||
return block, senders, nil // no senders is fine - will recover them on the fly
|
||||
}
|
||||
block.SendersToTxs(senders)
|
||||
return block, senders, nil
|
||||
}
|
||||
return rawdb.NonCanonicalBlockWithSenders(tx, hash, blockHeight)
|
||||
|
||||
txnSeg, ok := view.TxsSegment(blockHeight)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var txs []types.Transaction
|
||||
txs, senders, err = back.txsFromSnapshot(baseTxnId, txsAmount, txnSeg, buf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
block = types.NewBlockFromStorage(hash, h, txs, b.Uncles, b.Withdrawals)
|
||||
if len(senders) != block.Transactions().Len() {
|
||||
return block, senders, nil // no senders is fine - will recover them on the fly
|
||||
}
|
||||
block.SendersToTxs(senders)
|
||||
return block, senders, nil
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) headerFromSnapshot(blockHeight uint64, sn *HeaderSegment, buf []byte) (*types.Header, []byte, error) {
|
||||
func (back *BlockReader) headerFromSnapshot(blockHeight uint64, sn *HeaderSegment, buf []byte) (*types.Header, []byte, error) {
|
||||
if sn.idxHeaderHash == nil {
|
||||
return nil, buf, nil
|
||||
}
|
||||
@ -423,7 +436,7 @@ func (back *BlockReaderWithSnapshots) headerFromSnapshot(blockHeight uint64, sn
|
||||
// because HeaderByHash method will search header in all snapshots - and may request header which doesn't exists
|
||||
// but because our indices are based on PerfectHashMap, no way to know is given key exists or not, only way -
|
||||
// to make sure is to fetch it and compare hash
|
||||
func (back *BlockReaderWithSnapshots) headerFromSnapshotByHash(hash libcommon.Hash, sn *HeaderSegment, buf []byte) (*types.Header, error) {
|
||||
func (back *BlockReader) headerFromSnapshotByHash(hash libcommon.Hash, sn *HeaderSegment, buf []byte) (*types.Header, error) {
|
||||
defer func() {
|
||||
if rec := recover(); rec != nil {
|
||||
panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.ranges.from, sn.ranges.to, dbg.Stack()))
|
||||
@ -456,7 +469,7 @@ func (back *BlockReaderWithSnapshots) headerFromSnapshotByHash(hash libcommon.Ha
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) bodyFromSnapshot(blockHeight uint64, sn *BodySegment, buf []byte) (*types.Body, uint64, uint32, []byte, error) {
|
||||
func (back *BlockReader) bodyFromSnapshot(blockHeight uint64, sn *BodySegment, buf []byte) (*types.Body, uint64, uint32, []byte, error) {
|
||||
b, buf, err := back.bodyForStorageFromSnapshot(blockHeight, sn, buf)
|
||||
if err != nil {
|
||||
return nil, 0, 0, buf, err
|
||||
@ -472,7 +485,7 @@ func (back *BlockReaderWithSnapshots) bodyFromSnapshot(blockHeight uint64, sn *B
|
||||
return body, b.BaseTxId + 1, txsAmount, buf, nil // empty txs in the beginning and end of block
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uint64, sn *BodySegment, buf []byte) (*types.BodyForStorage, []byte, error) {
|
||||
func (back *BlockReader) bodyForStorageFromSnapshot(blockHeight uint64, sn *BodySegment, buf []byte) (*types.BodyForStorage, []byte, error) {
|
||||
defer func() {
|
||||
if rec := recover(); rec != nil {
|
||||
panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.ranges.from, sn.ranges.to, dbg.Stack()))
|
||||
@ -505,7 +518,7 @@ func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uin
|
||||
return b, buf, nil
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) txsFromSnapshot(baseTxnID uint64, txsAmount uint32, txsSeg *TxnSegment, buf []byte) (txs []types.Transaction, senders []libcommon.Address, err error) {
|
||||
func (back *BlockReader) txsFromSnapshot(baseTxnID uint64, txsAmount uint32, txsSeg *TxnSegment, buf []byte) (txs []types.Transaction, senders []libcommon.Address, err error) {
|
||||
defer func() {
|
||||
if rec := recover(); rec != nil {
|
||||
panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, txsSeg.ranges.from, txsSeg.ranges.to, dbg.Stack()))
|
||||
@ -547,7 +560,7 @@ func (back *BlockReaderWithSnapshots) txsFromSnapshot(baseTxnID uint64, txsAmoun
|
||||
return txs, senders, nil
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) txnByID(txnID uint64, sn *TxnSegment, buf []byte) (txn types.Transaction, err error) {
|
||||
func (back *BlockReader) txnByID(txnID uint64, sn *TxnSegment, buf []byte) (txn types.Transaction, err error) {
|
||||
offset := sn.IdxTxnHash.OrdinalLookup(txnID - sn.IdxTxnHash.BaseDataID())
|
||||
gg := sn.Seg.MakeGetter()
|
||||
gg.Reset(offset)
|
||||
@ -565,7 +578,7 @@ func (back *BlockReaderWithSnapshots) txnByID(txnID uint64, sn *TxnSegment, buf
|
||||
return
|
||||
}
|
||||
|
||||
func (back *BlockReaderWithSnapshots) txnByHash(txnHash libcommon.Hash, segments []*TxnSegment, buf []byte) (txn types.Transaction, blockNum, txnID uint64, err error) {
|
||||
func (back *BlockReader) txnByHash(txnHash libcommon.Hash, segments []*TxnSegment, buf []byte) (txn types.Transaction, blockNum, txnID uint64, err error) {
|
||||
for i := len(segments) - 1; i >= 0; i-- {
|
||||
sn := segments[i]
|
||||
if sn.IdxTxnHash == nil || sn.IdxTxnHash2BlockNum == nil {
|
||||
@ -605,72 +618,61 @@ func (back *BlockReaderWithSnapshots) txnByHash(txnHash libcommon.Hash, segments
|
||||
|
||||
// TxnByIdxInBlock - doesn't include system-transactions in the begin/end of block
|
||||
// return nil if 0 < i < body.TxAmount
|
||||
func (back *BlockReaderWithSnapshots) TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNum uint64, i int) (txn types.Transaction, err error) {
|
||||
var b *types.BodyForStorage
|
||||
ok, err := back.sn.ViewBodies(blockNum, func(segment *BodySegment) error {
|
||||
b, _, err = back.bodyForStorageFromSnapshot(blockNum, segment, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ok {
|
||||
// if block has no transactions, or requested txNum out of non-system transactions length
|
||||
if b.TxAmount == 2 || i == -1 || i >= int(b.TxAmount-2) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ok, err = back.sn.Txs.ViewSegment(blockNum, func(segment *TxnSegment) error {
|
||||
// +1 because block has system-txn in the beginning of block
|
||||
txn, err = back.txnByID(b.BaseTxId+1+uint64(i), segment, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if txn == nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
})
|
||||
func (back *BlockReader) TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNum uint64, i int) (txn types.Transaction, err error) {
|
||||
if blockNum >= back.sn.BlocksAvailable() {
|
||||
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return txn, nil
|
||||
var k [8 + 32]byte
|
||||
binary.BigEndian.PutUint64(k[:], blockNum)
|
||||
copy(k[8:], canonicalHash[:])
|
||||
b, err := rawdb.ReadBodyForStorageByKey(tx, k[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
if b == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
txn, err = rawdb.CanonicalTxnByID(tx, b.BaseTxId+1+uint64(i), canonicalHash, back.TransactionsV3)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return txn, nil
|
||||
}
|
||||
|
||||
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
seg, ok := view.BodiesSegment(blockNum)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var k [8 + 32]byte
|
||||
binary.BigEndian.PutUint64(k[:], blockNum)
|
||||
copy(k[8:], canonicalHash[:])
|
||||
b, err = rawdb.ReadBodyForStorageByKey(tx, k[:])
|
||||
|
||||
var b *types.BodyForStorage
|
||||
b, _, err = back.bodyForStorageFromSnapshot(blockNum, seg, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// if block has no transactions, or requested txNum out of non-system transactions length
|
||||
if b.TxAmount == 2 || i == -1 || i >= int(b.TxAmount-2) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
txn, err = rawdb.CanonicalTxnByID(tx, b.BaseTxId+1+uint64(i), canonicalHash, back.TransactionsV3)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
txnSeg, ok := view.TxsSegment(blockNum)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
return txn, nil
|
||||
// +1 because block has system-txn in the beginning of block
|
||||
return back.txnByID(b.BaseTxId+1+uint64(i), txnSeg, nil)
|
||||
}
|
||||
|
||||
// TxnLookup - find blockNumber and txnID by txnHash
|
||||
func (back *BlockReaderWithSnapshots) TxnLookup(ctx context.Context, tx kv.Getter, txnHash libcommon.Hash) (uint64, bool, error) {
|
||||
func (back *BlockReader) TxnLookup(ctx context.Context, tx kv.Getter, txnHash libcommon.Hash) (uint64, bool, error) {
|
||||
n, err := rawdb.ReadTxLookupEntry(tx, txnHash)
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
@ -679,18 +681,13 @@ func (back *BlockReaderWithSnapshots) TxnLookup(ctx context.Context, tx kv.Gette
|
||||
return *n, true, nil
|
||||
}
|
||||
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
|
||||
var txn types.Transaction
|
||||
var blockNum uint64
|
||||
if err := back.sn.Txs.View(func(segments []*TxnSegment) error {
|
||||
txn, blockNum, _, err = back.txnByHash(txnHash, segments, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if txn == nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
txn, blockNum, _, err = back.txnByHash(txnHash, view.Txs(), nil)
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
if txn == nil {
|
||||
@ -698,3 +695,42 @@ func (back *BlockReaderWithSnapshots) TxnLookup(ctx context.Context, tx kv.Gette
|
||||
}
|
||||
return blockNum, true, nil
|
||||
}
|
||||
|
||||
func (back *BlockReader) LastTxNumInSnapshot(blockNum uint64) (uint64, bool, error) {
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
|
||||
sn, ok := view.TxsSegment(blockNum)
|
||||
if !ok {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
lastTxnID := sn.IdxTxnHash.BaseDataID() + uint64(sn.Seg.Count())
|
||||
return lastTxnID, true, nil
|
||||
}
|
||||
|
||||
func (back *BlockReader) IterateBodies(f func(blockNum, baseTxNum, txAmount uint64) error) error {
|
||||
view := back.sn.View()
|
||||
defer view.Close()
|
||||
|
||||
for _, sn := range view.Bodies() {
|
||||
sn := sn
|
||||
defer sn.seg.EnableMadvNormal().DisableReadAhead()
|
||||
|
||||
var buf []byte
|
||||
g := sn.seg.MakeGetter()
|
||||
blockNum := sn.ranges.from
|
||||
var b types.BodyForStorage
|
||||
for g.HasNext() {
|
||||
buf, _ = g.Next(buf[:0])
|
||||
if err := rlp.DecodeBytes(buf, &b); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f(blockNum, b.BaseTxId, uint64(b.TxAmount)); err != nil {
|
||||
return err
|
||||
}
|
||||
blockNum++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -194,26 +194,6 @@ func (sn *BodySegment) reopenIdx(dir string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sn *BodySegment) Iterate(f func(blockNum, baseTxNum, txAmount uint64) error) error {
|
||||
defer sn.seg.EnableMadvNormal().DisableReadAhead()
|
||||
|
||||
var buf []byte
|
||||
g := sn.seg.MakeGetter()
|
||||
blockNum := sn.ranges.from
|
||||
var b types.BodyForStorage
|
||||
for g.HasNext() {
|
||||
buf, _ = g.Next(buf[:0])
|
||||
if err := rlp.DecodeBytes(buf, &b); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f(blockNum, b.BaseTxId, uint64(b.TxAmount)); err != nil {
|
||||
return err
|
||||
}
|
||||
blockNum++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sn *TxnSegment) closeIdx() {
|
||||
if sn.IdxTxnHash != nil {
|
||||
sn.IdxTxnHash.Close()
|
||||
@ -299,17 +279,6 @@ func (s *headerSegments) View(f func(segments []*HeaderSegment) error) error {
|
||||
defer s.lock.RUnlock()
|
||||
return f(s.segments)
|
||||
}
|
||||
func (s *headerSegments) ViewSegment(blockNum uint64, f func(sn *HeaderSegment) error) (found bool, err error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
for _, seg := range s.segments {
|
||||
if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) {
|
||||
continue
|
||||
}
|
||||
return true, f(seg)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type bodySegments struct {
|
||||
lock sync.RWMutex
|
||||
@ -699,12 +668,12 @@ Loop:
|
||||
}
|
||||
|
||||
func (s *RoSnapshots) Ranges() (ranges []Range) {
|
||||
_ = s.Headers.View(func(segments []*HeaderSegment) error {
|
||||
for _, sn := range segments {
|
||||
ranges = append(ranges, sn.ranges)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
view := s.View()
|
||||
defer view.Close()
|
||||
|
||||
for _, sn := range view.Headers() {
|
||||
ranges = append(ranges, sn.ranges)
|
||||
}
|
||||
return ranges
|
||||
}
|
||||
|
||||
@ -843,24 +812,6 @@ func (s *RoSnapshots) PrintDebug() {
|
||||
fmt.Printf("%d, %t, %t\n", sn.ranges.from, sn.IdxTxnHash == nil, sn.IdxTxnHash2BlockNum == nil)
|
||||
}
|
||||
}
|
||||
func (s *RoSnapshots) ViewHeaders(blockNum uint64, f func(sn *HeaderSegment) error) (found bool, err error) {
|
||||
if !s.indicesReady.Load() || blockNum > s.BlocksAvailable() {
|
||||
return false, nil
|
||||
}
|
||||
return s.Headers.ViewSegment(blockNum, f)
|
||||
}
|
||||
func (s *RoSnapshots) ViewBodies(blockNum uint64, f func(sn *BodySegment) error) (found bool, err error) {
|
||||
if !s.indicesReady.Load() || blockNum > s.BlocksAvailable() {
|
||||
return false, nil
|
||||
}
|
||||
return s.Bodies.ViewSegment(blockNum, f)
|
||||
}
|
||||
func (s *RoSnapshots) ViewTxs(blockNum uint64, f func(sn *TxnSegment) error) (found bool, err error) {
|
||||
if !s.indicesReady.Load() || blockNum > s.BlocksAvailable() {
|
||||
return false, nil
|
||||
}
|
||||
return s.Txs.ViewSegment(blockNum, f)
|
||||
}
|
||||
|
||||
func buildIdx(ctx context.Context, sn snaptype.FileInfo, chainID uint256.Int, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) error {
|
||||
//_, fName := filepath.Split(sn.Path)
|
||||
@ -1926,30 +1877,28 @@ RETRY:
|
||||
func ForEachHeader(ctx context.Context, s *RoSnapshots, walker func(header *types.Header) error) error {
|
||||
r := bytes.NewReader(nil)
|
||||
word := make([]byte, 0, 2*4096)
|
||||
err := s.Headers.View(func(snapshots []*HeaderSegment) error {
|
||||
for _, sn := range snapshots {
|
||||
if err := sn.seg.WithReadAhead(func() error {
|
||||
g := sn.seg.MakeGetter()
|
||||
for g.HasNext() {
|
||||
word, _ = g.Next(word[:0])
|
||||
var header types.Header
|
||||
r.Reset(word[1:])
|
||||
if err := rlp.Decode(r, &header); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := walker(&header); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
view := s.View()
|
||||
defer view.Close()
|
||||
|
||||
for _, sn := range view.Headers() {
|
||||
if err := sn.seg.WithReadAhead(func() error {
|
||||
g := sn.seg.MakeGetter()
|
||||
for g.HasNext() {
|
||||
word, _ = g.Next(word[:0])
|
||||
var header types.Header
|
||||
r.Reset(word[1:])
|
||||
if err := rlp.Decode(r, &header); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := walker(&header); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -2000,28 +1949,81 @@ func (*Merger) FindMergeRanges(currentRanges []Range) (toMerge []Range) {
|
||||
return toMerge
|
||||
}
|
||||
|
||||
type View struct {
|
||||
s *RoSnapshots
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (s *RoSnapshots) View() *View {
|
||||
v := &View{s: s}
|
||||
v.s.Headers.lock.RLock()
|
||||
v.s.Bodies.lock.RLock()
|
||||
v.s.Txs.lock.RLock()
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *View) Close() {
|
||||
if v.closed {
|
||||
return
|
||||
}
|
||||
v.closed = true
|
||||
v.s.Headers.lock.RUnlock()
|
||||
v.s.Bodies.lock.RUnlock()
|
||||
v.s.Txs.lock.RUnlock()
|
||||
}
|
||||
func (v *View) Headers() []*HeaderSegment { return v.s.Headers.segments }
|
||||
func (v *View) Bodies() []*BodySegment { return v.s.Bodies.segments }
|
||||
func (v *View) Txs() []*TxnSegment { return v.s.Txs.segments }
|
||||
func (v *View) HeadersSegment(blockNum uint64) (*HeaderSegment, bool) {
|
||||
for _, seg := range v.Headers() {
|
||||
if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) {
|
||||
continue
|
||||
}
|
||||
return seg, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
func (v *View) BodiesSegment(blockNum uint64) (*BodySegment, bool) {
|
||||
for _, seg := range v.Bodies() {
|
||||
if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) {
|
||||
continue
|
||||
}
|
||||
return seg, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
func (v *View) TxsSegment(blockNum uint64) (*TxnSegment, bool) {
|
||||
for _, seg := range v.Txs() {
|
||||
if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) {
|
||||
continue
|
||||
}
|
||||
return seg, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (map[snaptype.Type][]string, error) {
|
||||
toMerge := map[snaptype.Type][]string{}
|
||||
err := snapshots.Headers.View(func(hSegments []*HeaderSegment) error {
|
||||
return snapshots.Bodies.View(func(bSegments []*BodySegment) error {
|
||||
return snapshots.Txs.View(func(tSegments []*TxnSegment) error {
|
||||
for i, sn := range hSegments {
|
||||
if sn.ranges.from < from {
|
||||
continue
|
||||
}
|
||||
if sn.ranges.to > to {
|
||||
break
|
||||
}
|
||||
toMerge[snaptype.Headers] = append(toMerge[snaptype.Headers], hSegments[i].seg.FilePath())
|
||||
toMerge[snaptype.Bodies] = append(toMerge[snaptype.Bodies], bSegments[i].seg.FilePath())
|
||||
toMerge[snaptype.Transactions] = append(toMerge[snaptype.Transactions], tSegments[i].Seg.FilePath())
|
||||
}
|
||||
view := snapshots.View()
|
||||
defer view.Close()
|
||||
|
||||
return nil
|
||||
})
|
||||
})
|
||||
})
|
||||
return toMerge, err
|
||||
hSegments := view.Headers()
|
||||
bSegments := view.Bodies()
|
||||
tSegments := view.Txs()
|
||||
|
||||
for i, sn := range hSegments {
|
||||
if sn.ranges.from < from {
|
||||
continue
|
||||
}
|
||||
if sn.ranges.to > to {
|
||||
break
|
||||
}
|
||||
toMerge[snaptype.Headers] = append(toMerge[snaptype.Headers], hSegments[i].seg.FilePath())
|
||||
toMerge[snaptype.Bodies] = append(toMerge[snaptype.Bodies], bSegments[i].seg.FilePath())
|
||||
toMerge[snaptype.Transactions] = append(toMerge[snaptype.Transactions], tSegments[i].Seg.FilePath())
|
||||
}
|
||||
|
||||
return toMerge, nil
|
||||
}
|
||||
|
||||
// Merge does merge segments in given ranges
|
||||
@ -2174,54 +2176,3 @@ func BuildProtoRequest(downloadRequest []DownloadRequest) *proto_downloader.Down
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
||||
type BodiesIterator struct{}
|
||||
|
||||
func (i BodiesIterator) ForEach(tx kv.Tx, s *RoSnapshots, f func(blockNum uint64, baseTxNum uint64, txAmount uint64) error) error {
|
||||
var blocksInSnapshtos uint64
|
||||
if s != nil && s.cfg.Enabled {
|
||||
blocksInSnapshtos = s.SegmentsMax()
|
||||
}
|
||||
|
||||
if s != nil && s.cfg.Enabled {
|
||||
if err := s.Bodies.View(func(bs []*BodySegment) error {
|
||||
for _, b := range bs {
|
||||
if err := b.Iterate(f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("build txNum => blockNum mapping: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := blocksInSnapshtos + 1; ; i++ {
|
||||
body, baseTxId, txAmount, err := rawdb.ReadBodyByNumber(tx, i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if body == nil {
|
||||
break
|
||||
}
|
||||
if err := f(i, baseTxId-1, uint64(txAmount)+2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BackgroundResult - used only indicate that some work is done
|
||||
// no much reason to pass exact results by this object, just get latest state when need
|
||||
type BackgroundResult struct {
|
||||
has bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (br *BackgroundResult) Has() bool { return br.has }
|
||||
func (br *BackgroundResult) Set(err error) { br.has, br.err = true, err }
|
||||
func (br *BackgroundResult) GetAndReset() (bool, error) {
|
||||
has, err := br.has, br.err
|
||||
br.has, br.err = false, nil
|
||||
return has, err
|
||||
}
|
||||
|
@ -163,24 +163,18 @@ func TestOpenAllSnapshot(t *testing.T) {
|
||||
require.NoError(err)
|
||||
require.Equal(2, len(s.Headers.segments))
|
||||
|
||||
ok, err := s.ViewTxs(10, func(sn *TxnSegment) error {
|
||||
require.Equal(int(sn.ranges.to), 500_000)
|
||||
return nil
|
||||
})
|
||||
require.NoError(err)
|
||||
require.True(ok)
|
||||
view := s.View()
|
||||
defer view.Close()
|
||||
|
||||
ok, err = s.ViewTxs(500_000, func(sn *TxnSegment) error {
|
||||
require.Equal(int(sn.ranges.to), 1_000_000) // [from:to)
|
||||
return nil
|
||||
})
|
||||
require.NoError(err)
|
||||
seg, ok := view.TxsSegment(10)
|
||||
require.True(ok)
|
||||
require.Equal(int(seg.ranges.to), 500_000)
|
||||
|
||||
ok, err = s.ViewTxs(1_000_000, func(sn *TxnSegment) error {
|
||||
return nil
|
||||
})
|
||||
require.NoError(err)
|
||||
seg, ok = view.TxsSegment(500_000)
|
||||
require.True(ok)
|
||||
require.Equal(int(seg.ranges.to), 1_000_000)
|
||||
|
||||
_, ok = view.TxsSegment(1_000_000)
|
||||
require.False(ok)
|
||||
|
||||
// Erigon may create new snapshots by itself - with high bigger than hardcoded ExpectedBlocks
|
||||
|
@ -490,7 +490,7 @@ func TestChainTxReorgs(t *testing.T) {
|
||||
t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt)
|
||||
}
|
||||
}
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
|
||||
// added tx
|
||||
txs = types.Transactions{pastAdd, freshAdd, futureAdd}
|
||||
@ -793,7 +793,7 @@ func doModesTest(t *testing.T, pm prune.Mode) error {
|
||||
require.Equal(uint64(0), found.Minimum())
|
||||
}
|
||||
|
||||
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)
|
||||
br := snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3)
|
||||
|
||||
if pm.TxIndex.Enabled() {
|
||||
b, err := rawdb.ReadBlockByNumber(tx, 1)
|
||||
|
@ -40,7 +40,7 @@ func TestInserter1(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
hi := headerdownload.NewHeaderInserter("headers", big.NewInt(0), 0, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3))
|
||||
hi := headerdownload.NewHeaderInserter("headers", big.NewInt(0), 0, snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3))
|
||||
h1 := types.Header{
|
||||
Number: big.NewInt(1),
|
||||
Difficulty: big.NewInt(10),
|
||||
@ -54,11 +54,11 @@ func TestInserter1(t *testing.T) {
|
||||
}
|
||||
h2Hash := h2.Hash()
|
||||
data1, _ := rlp.EncodeToBytes(&h1)
|
||||
if _, err = hi.FeedHeaderPoW(tx, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3), &h1, data1, h1Hash, 1); err != nil {
|
||||
if _, err = hi.FeedHeaderPoW(tx, snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3), &h1, data1, h1Hash, 1); err != nil {
|
||||
t.Errorf("feed empty header 1: %v", err)
|
||||
}
|
||||
data2, _ := rlp.EncodeToBytes(&h2)
|
||||
if _, err = hi.FeedHeaderPoW(tx, snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3), &h2, data2, h2Hash, 2); err != nil {
|
||||
if _, err = hi.FeedHeaderPoW(tx, snapshotsync.NewBlockReader(m.BlockSnapshots, m.TransactionsV3), &h2, data2, h2Hash, 2); err != nil {
|
||||
t.Errorf("feed empty header 2: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -259,7 +259,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK
|
||||
if tb != nil {
|
||||
tb.Cleanup(mock.Close)
|
||||
}
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(mock.BlockSnapshots, mock.TransactionsV3)
|
||||
blockReader := snapshotsync.NewBlockReader(mock.BlockSnapshots, mock.TransactionsV3)
|
||||
|
||||
mock.Address = crypto.PubkeyToAddress(mock.Key.PublicKey)
|
||||
|
||||
|
@ -382,7 +382,7 @@ func NewDefaultStages(ctx context.Context,
|
||||
logger log.Logger,
|
||||
) []*stagedsync.Stage {
|
||||
dirs := cfg.Dirs
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(snapshots, cfg.TransactionsV3)
|
||||
blockReader := snapshotsync.NewBlockReader(snapshots, cfg.TransactionsV3)
|
||||
blockRetire := snapshotsync.NewBlockRetire(1, dirs.Tmp, snapshots, db, snapDownloader, notifications.Events, logger)
|
||||
|
||||
// During Import we don't want other services like header requests, body requests etc. to be running.
|
||||
@ -465,7 +465,7 @@ func NewDefaultStages(ctx context.Context,
|
||||
func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config, controlServer *sentry.MultiClient,
|
||||
dirs datadir.Dirs, notifications *shards.Notifications, snapshots *snapshotsync.RoSnapshots, agg *state.AggregatorV3,
|
||||
logger log.Logger) (*stagedsync.Sync, error) {
|
||||
blockReader := snapshotsync.NewBlockReaderWithSnapshots(snapshots, cfg.TransactionsV3)
|
||||
blockReader := snapshotsync.NewBlockReader(snapshots, cfg.TransactionsV3)
|
||||
|
||||
return stagedsync.New(
|
||||
stagedsync.StateStages(ctx,
|
||||
|
Loading…
Reference in New Issue
Block a user