mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-22 03:30:37 +00:00
erigon snapshots integrity: add check for body.BaseTxnID (#9121)
This commit is contained in:
parent
18baf81b78
commit
82822ee602
@ -30,6 +30,7 @@ func runTest(t *testing.T, blocks []*cltypes.SignedBeaconBlock, preState, postSt
|
||||
}
|
||||
|
||||
func TestStateAntiquaryCapella(t *testing.T) {
|
||||
t.Skip("TODO: oom")
|
||||
blocks, preState, postState := tests.GetCapellaRandom()
|
||||
runTest(t, blocks, preState, postState)
|
||||
}
|
||||
|
1
eth/integrity/e3_history_no_system_txs.go
Normal file
1
eth/integrity/e3_history_no_system_txs.go
Normal file
@ -0,0 +1 @@
|
||||
package integrity
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon-lib/metrics"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/ledgerwatch/erigon-lib/common"
|
||||
"github.com/ledgerwatch/erigon-lib/common/datadir"
|
||||
@ -144,6 +145,20 @@ var snapshotCommand = cli.Command{
|
||||
},
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "integrity",
|
||||
Action: doIntegrity,
|
||||
Flags: joinFlags([]cli.Flag{
|
||||
&utils.DataDirFlag,
|
||||
}),
|
||||
},
|
||||
//{
|
||||
// Name: "bodies_decrement_datafix",
|
||||
// Action: doBodiesDecrement,
|
||||
// Flags: joinFlags([]cli.Flag{
|
||||
// &utils.DataDirFlag,
|
||||
// }),
|
||||
//},
|
||||
},
|
||||
}
|
||||
|
||||
@ -174,6 +189,39 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func doIntegrity(cliCtx *cli.Context) error {
|
||||
logger, _, err := debug.Setup(cliCtx, true /* root logger */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := cliCtx.Context
|
||||
dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name))
|
||||
chainDB := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen()
|
||||
defer chainDB.Close()
|
||||
|
||||
cfg := ethconfig.NewSnapCfg(true, false, true)
|
||||
chainConfig := fromdb.ChainConfig(chainDB)
|
||||
blockSnaps, borSnaps, blockRetire, agg, err := openSnaps(ctx, cfg, dirs, snapcfg.KnownCfg(chainConfig.ChainName, 0).Version, chainDB, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer blockSnaps.Close()
|
||||
defer borSnaps.Close()
|
||||
defer agg.Close()
|
||||
|
||||
blockReader, _ := blockRetire.IO()
|
||||
if err := blockReader.(*freezeblocks.BlockReader).IntegrityTxnID(false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//if err := integrity.E3HistoryNoSystemTxs(ctx, chainDB, agg); err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func doDiff(cliCtx *cli.Context) error {
|
||||
defer log.Info("Done")
|
||||
srcF, dstF := cliCtx.String("src"), cliCtx.String("dst")
|
||||
@ -243,6 +291,7 @@ func doDecompressSpeed(cliCtx *cli.Context) error {
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRam(cliCtx *cli.Context) error {
|
||||
var logger log.Logger
|
||||
var err error
|
||||
@ -279,19 +328,17 @@ func doIndicesCommand(cliCtx *cli.Context) error {
|
||||
|
||||
dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name))
|
||||
rebuild := cliCtx.Bool(SnapshotRebuildFlag.Name)
|
||||
//from := cliCtx.Uint64(SnapshotFromFlag.Name)
|
||||
|
||||
chainDB := mdbx.NewMDBX(logger).Path(dirs.Chaindata).MustOpen()
|
||||
chainDB := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen()
|
||||
defer chainDB.Close()
|
||||
|
||||
dir.MustExist(dirs.SnapHistory)
|
||||
chainConfig := fromdb.ChainConfig(chainDB)
|
||||
|
||||
if rebuild {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
cfg := ethconfig.NewSnapCfg(true, false, true)
|
||||
chainConfig := fromdb.ChainConfig(chainDB)
|
||||
blockSnaps, borSnaps, br, agg, err := openSnaps(ctx, cfg, dirs, snapcfg.KnownCfg(chainConfig.ChainName, 0).Version, chainDB, logger)
|
||||
|
||||
if err != nil {
|
||||
@ -325,13 +372,16 @@ func openSnaps(ctx context.Context, cfg ethconfig.BlocksFreezing, dirs datadir.D
|
||||
return
|
||||
}
|
||||
borSnaps.LogStat("open")
|
||||
|
||||
agg, err = libstate.NewAggregatorV3(ctx, dirs.SnapHistory, dirs.Tmp, ethconfig.HistoryV3AggregationStep, chainDB, logger)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
agg.SetWorkers(estimate.CompressSnapshot.Workers())
|
||||
err = agg.OpenFolder()
|
||||
agg = openAgg(ctx, dirs, chainDB, logger)
|
||||
err = chainDB.View(ctx, func(tx kv.Tx) error {
|
||||
ac := agg.MakeContext()
|
||||
defer ac.Close()
|
||||
//ac.LogStats(tx, func(endTxNumMinimax uint64) uint64 {
|
||||
// _, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax)
|
||||
// return histBlockNumProgress
|
||||
//})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -461,7 +511,7 @@ func doRetireCommand(cliCtx *cli.Context) error {
|
||||
every := cliCtx.Uint64(SnapshotEveryFlag.Name)
|
||||
version := uint8(cliCtx.Int(SnapshotVersionFlag.Name))
|
||||
|
||||
db := mdbx.NewMDBX(logger).Label(kv.ChainDB).Path(dirs.Chaindata).MustOpen()
|
||||
db := dbCfg(kv.ChainDB, dirs.Chaindata).MustOpen()
|
||||
defer db.Close()
|
||||
|
||||
cfg := ethconfig.NewSnapCfg(true, false, true)
|
||||
@ -641,3 +691,113 @@ func doUploaderCommand(cliCtx *cli.Context) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
func doBodiesDecrement(cliCtx *cli.Context) error {
|
||||
logger, _, err := debug.Setup(cliCtx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name))
|
||||
ctx := cliCtx.Context
|
||||
logEvery := time.NewTicker(30 * time.Second)
|
||||
defer logEvery.Stop()
|
||||
|
||||
list, err := snaptype.Segments(dirs.Snap, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var l []snaptype.FileInfo
|
||||
for _, f := range list {
|
||||
if f.T != snaptype.Bodies {
|
||||
continue
|
||||
}
|
||||
if f.From < 14_500_000 {
|
||||
continue
|
||||
}
|
||||
l = append(l, f)
|
||||
}
|
||||
migrateSingleBody := func(srcF, dstF string) error {
|
||||
src, err := compress.NewDecompressor(srcF)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer src.Close()
|
||||
dst, err := compress.NewCompressor(ctx, "compress", dstF, dirs.Tmp, compress.MinPatternScore, estimate.CompressSnapshot.Workers(), log.LvlInfo, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dst.Close()
|
||||
|
||||
i := 0
|
||||
srcG := src.MakeGetter()
|
||||
var buf []byte
|
||||
dstBuf := bytes.NewBuffer(nil)
|
||||
for srcG.HasNext() {
|
||||
i++
|
||||
buf, _ = srcG.Next(buf[:0])
|
||||
body := &types.BodyForStorage{}
|
||||
if err := rlp.Decode(bytes.NewReader(buf), body); err != nil {
|
||||
return err
|
||||
}
|
||||
body.BaseTxId -= 1
|
||||
dstBuf.Reset()
|
||||
if err := rlp.Encode(dstBuf, body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dst.AddWord(dstBuf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-logEvery.C:
|
||||
logger.Info("[bodies] progress", "f", src.FileName(), "progress", fmt.Sprintf("%dK/%dK", i/1_000, src.Count()/1_000))
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err := dst.Compress(); err != nil {
|
||||
return err
|
||||
}
|
||||
src.Close()
|
||||
dst.Close()
|
||||
os.Rename(srcF, srcF+".back")
|
||||
os.Rename(dstF, srcF)
|
||||
os.Remove(srcF + ".torrent")
|
||||
os.Remove(srcF + ".idx")
|
||||
ext := filepath.Ext(srcF)
|
||||
withoutExt := srcF[:len(srcF)-len(ext)]
|
||||
_ = os.Remove(withoutExt + ".idx")
|
||||
return nil
|
||||
}
|
||||
for _, f := range l {
|
||||
srcF, dstF := f.Path, f.Path+"2"
|
||||
if err := migrateSingleBody(srcF, dstF); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
|
||||
func dbCfg(label kv.Label, path string) mdbx.MdbxOpts {
|
||||
const ThreadsLimit = 9_000
|
||||
limiterB := semaphore.NewWeighted(ThreadsLimit)
|
||||
opts := mdbx.NewMDBX(log.New()).Path(path).Label(label).RoTxsLimiter(limiterB)
|
||||
// integration tool don't intent to create db, then easiest way to open db - it's pass mdbx.Accede flag, which allow
|
||||
// to read all options from DB, instead of overriding them
|
||||
opts = opts.Accede()
|
||||
return opts
|
||||
}
|
||||
func openAgg(ctx context.Context, dirs datadir.Dirs, chainDB kv.RwDB, logger log.Logger) *libstate.AggregatorV3 {
|
||||
agg, err := libstate.NewAggregatorV3(ctx, dirs.Snap, dirs.Tmp, ethconfig.HistoryV3AggregationStep, chainDB, logger)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err = agg.OpenFolder(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
agg.SetWorkers(estimate.CompressSnapshot.Workers())
|
||||
return agg
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon/eth/ethconfig"
|
||||
"github.com/ledgerwatch/erigon/rlp"
|
||||
"github.com/ledgerwatch/erigon/turbo/services"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
)
|
||||
|
||||
type RemoteBlockReader struct {
|
||||
@ -868,6 +869,33 @@ func (r *BlockReader) IterateFrozenBodies(f func(blockNum, baseTxNum, txAmount u
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *BlockReader) IntegrityTxnID(failFast bool) error {
|
||||
defer log.Info("[integrity] IntegrityTxnID done")
|
||||
view := r.sn.View()
|
||||
defer view.Close()
|
||||
|
||||
var expectedFirstTxnID uint64
|
||||
for _, snb := range view.Bodies() {
|
||||
firstBlockNum := snb.idxBodyNumber.BaseDataID()
|
||||
sn, _ := view.TxsSegment(firstBlockNum)
|
||||
b, _, err := r.bodyForStorageFromSnapshot(firstBlockNum, snb, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b.BaseTxId != expectedFirstTxnID {
|
||||
err := fmt.Errorf("[integrity] IntegrityTxnID: bn=%d, baseID=%d, cnt=%d, expectedFirstTxnID=%d", firstBlockNum, b.BaseTxId, sn.Seg.Count(), expectedFirstTxnID)
|
||||
if failFast {
|
||||
return err
|
||||
} else {
|
||||
log.Error(err.Error())
|
||||
}
|
||||
}
|
||||
expectedFirstTxnID = b.BaseTxId + uint64(sn.Seg.Count())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *BlockReader) BadHeaderNumber(ctx context.Context, tx kv.Getter, hash common.Hash) (blockHeight *uint64, err error) {
|
||||
return rawdb.ReadBadHeaderNumber(tx, hash)
|
||||
}
|
||||
|
@ -1404,14 +1404,13 @@ func CanDeleteTo(curBlockNum uint64, blocksInSnapshots uint64) (blockTo uint64)
|
||||
func (br *BlockRetire) retireBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDelete func(l []string) error) (bool, error) {
|
||||
notifier, logger, blockReader, tmpDir, db, workers := br.notifier, br.logger, br.blockReader, br.tmpDir, br.db, br.workers
|
||||
snapshots := br.snapshots()
|
||||
firstTxNum := blockReader.(*BlockReader).FirstTxNumNotInSnapshots()
|
||||
|
||||
blockFrom, blockTo, ok := CanRetire(maxBlockNum, minBlockNum)
|
||||
|
||||
if ok {
|
||||
logger.Log(lvl, "[snapshots] Retire Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000))
|
||||
// in future we will do it in background
|
||||
if err := DumpBlocks(ctx, snapshots.version, blockFrom, blockTo, snaptype.Erigon2MergeLimit, tmpDir, snapshots.Dir(), firstTxNum, db, workers, lvl, logger, blockReader); err != nil {
|
||||
if err := DumpBlocks(ctx, snapshots.version, blockFrom, blockTo, snaptype.Erigon2MergeLimit, tmpDir, snapshots.Dir(), db, workers, lvl, logger, blockReader); err != nil {
|
||||
return ok, fmt.Errorf("DumpBlocks: %w", err)
|
||||
}
|
||||
if err := snapshots.ReopenFolder(); err != nil {
|
||||
@ -1635,21 +1634,24 @@ func (br *BlockRetire) buildBorMissedIndicesIfNeed(ctx context.Context, logPrefi
|
||||
return nil
|
||||
}
|
||||
|
||||
func DumpBlocks(ctx context.Context, version uint8, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error {
|
||||
func DumpBlocks(ctx context.Context, version uint8, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error {
|
||||
if blocksPerFile == 0 {
|
||||
return nil
|
||||
}
|
||||
chainConfig := fromdb.ChainConfig(chainDB)
|
||||
|
||||
firstTxNum := blockReader.(*BlockReader).FirstTxNumNotInSnapshots()
|
||||
for i := blockFrom; i < blockTo; i = chooseSegmentEnd(i, blockTo, blocksPerFile) {
|
||||
if err := dumpBlocksRange(ctx, version, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, firstTxNum, chainDB, *chainConfig, workers, lvl, logger, blockReader); err != nil {
|
||||
lastTxNum, err := dumpBlocksRange(ctx, version, i, chooseSegmentEnd(i, blockTo, blocksPerFile), tmpDir, snapDir, firstTxNum, chainDB, *chainConfig, workers, lvl, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
firstTxNum = lastTxNum + 1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dumpBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, chainConfig chain.Config, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error {
|
||||
func dumpBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint64, tmpDir, snapDir string, firstTxNum uint64, chainDB kv.RoDB, chainConfig chain.Config, workers int, lvl log.Lvl, logger log.Logger) (lastTxNum uint64, err error) {
|
||||
logEvery := time.NewTicker(20 * time.Second)
|
||||
defer logEvery.Stop()
|
||||
|
||||
@ -1659,21 +1661,21 @@ func dumpBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint
|
||||
|
||||
sn, err := compress.NewCompressor(ctx, "Snapshot Headers", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
return lastTxNum, err
|
||||
}
|
||||
defer sn.Close()
|
||||
if err := DumpHeaders(ctx, chainDB, blockFrom, blockTo, workers, lvl, logger, func(v []byte) error {
|
||||
return sn.AddWord(v)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("DumpHeaders: %w", err)
|
||||
return lastTxNum, fmt.Errorf("DumpHeaders: %w", err)
|
||||
}
|
||||
if err := sn.Compress(); err != nil {
|
||||
return fmt.Errorf("compress: %w", err)
|
||||
return lastTxNum, fmt.Errorf("compress: %w", err)
|
||||
}
|
||||
|
||||
p := &background.Progress{}
|
||||
if err := buildIdx(ctx, f, &chainConfig, tmpDir, p, lvl, logger); err != nil {
|
||||
return err
|
||||
return lastTxNum, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -1683,21 +1685,22 @@ func dumpBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint
|
||||
|
||||
sn, err := compress.NewCompressor(ctx, "Snapshot Bodies", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
return lastTxNum, err
|
||||
}
|
||||
defer sn.Close()
|
||||
if err := DumpBodies(ctx, chainDB, blockFrom, blockTo, firstTxNum, workers, lvl, logger, func(v []byte) error {
|
||||
lastTxNum, err = DumpBodies(ctx, chainDB, blockFrom, blockTo, firstTxNum, lvl, logger, func(v []byte) error {
|
||||
return sn.AddWord(v)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("DumpBodies: %w", err)
|
||||
})
|
||||
if err != nil {
|
||||
return lastTxNum, fmt.Errorf("DumpBodies: %w", err)
|
||||
}
|
||||
if err := sn.Compress(); err != nil {
|
||||
return fmt.Errorf("compress: %w", err)
|
||||
return lastTxNum, fmt.Errorf("compress: %w", err)
|
||||
}
|
||||
|
||||
p := &background.Progress{}
|
||||
if err := buildIdx(ctx, f, &chainConfig, tmpDir, p, lvl, logger); err != nil {
|
||||
return err
|
||||
return lastTxNum, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -1707,7 +1710,7 @@ func dumpBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint
|
||||
|
||||
sn, err := compress.NewCompressor(ctx, "Snapshot Txs", f.Path, tmpDir, compress.MinPatternScore, workers, log.LvlTrace, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("NewCompressor: %w, %s", err, f.Path)
|
||||
return lastTxNum, fmt.Errorf("NewCompressor: %w, %s", err, f.Path)
|
||||
}
|
||||
defer sn.Close()
|
||||
|
||||
@ -1715,10 +1718,10 @@ func dumpBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint
|
||||
return sn.AddWord(v)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("DumpTxs: %w", err)
|
||||
return lastTxNum, fmt.Errorf("DumpTxs: %w", err)
|
||||
}
|
||||
if expectedCount != sn.Count() {
|
||||
return fmt.Errorf("incorrect tx count: %d, expected from db: %d", sn.Count(), expectedCount)
|
||||
return lastTxNum, fmt.Errorf("incorrect tx count: %d, expected from db: %d", sn.Count(), expectedCount)
|
||||
}
|
||||
snapDir, fileName := filepath.Split(f.Path)
|
||||
ext := filepath.Ext(fileName)
|
||||
@ -1726,23 +1729,23 @@ func dumpBlocksRange(ctx context.Context, version uint8, blockFrom, blockTo uint
|
||||
t := time.Now()
|
||||
_, expectedCount, err = txsAmountBasedOnBodiesSnapshots(snapDir, version, blockFrom, blockTo)
|
||||
if err != nil {
|
||||
return err
|
||||
return lastTxNum, err
|
||||
}
|
||||
if expectedCount != sn.Count() {
|
||||
return fmt.Errorf("incorrect tx count: %d, expected from snapshots: %d", sn.Count(), expectedCount)
|
||||
return lastTxNum, fmt.Errorf("incorrect tx count: %d, expected from snapshots: %d", sn.Count(), expectedCount)
|
||||
}
|
||||
if err := sn.Compress(); err != nil {
|
||||
return fmt.Errorf("compress: %w", err)
|
||||
return lastTxNum, fmt.Errorf("compress: %w", err)
|
||||
}
|
||||
logger.Log(lvl, "[snapshots] Compression", "took", time.Since(t), "ratio", sn.Ratio.String(), "file", fileName[:len(fileName)-len(ext)])
|
||||
|
||||
p := &background.Progress{}
|
||||
if err := buildIdx(ctx, f, &chainConfig, tmpDir, p, lvl, logger); err != nil {
|
||||
return err
|
||||
return lastTxNum, err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return lastTxNum, nil
|
||||
}
|
||||
|
||||
func hasIdxFile(sn snaptype.FileInfo, logger log.Logger) bool {
|
||||
@ -2032,7 +2035,7 @@ func DumpHeaders(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, wor
|
||||
}
|
||||
|
||||
// DumpBodies - [from, to)
|
||||
func DumpBodies(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, firstTxNum uint64, workers int, lvl log.Lvl, logger log.Logger, collect func([]byte) error) error {
|
||||
func DumpBodies(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, firstTxNum uint64, lvl log.Lvl, logger log.Logger, collect func([]byte) error) (uint64, error) {
|
||||
logEvery := time.NewTicker(20 * time.Second)
|
||||
defer logEvery.Stop()
|
||||
|
||||
@ -2089,10 +2092,10 @@ func DumpBodies(ctx context.Context, db kv.RoDB, blockFrom, blockTo uint64, firs
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
return firstTxNum, err
|
||||
}
|
||||
|
||||
return nil
|
||||
return firstTxNum, nil
|
||||
}
|
||||
|
||||
var EmptyTxHash = common2.Hash{}
|
||||
|
@ -181,7 +181,7 @@ func TestDump(t *testing.T) {
|
||||
txsAmount := uint64(0)
|
||||
var baseIdList []uint64
|
||||
firstTxNum := uint64(0)
|
||||
err := freezeblocks.DumpBodies(m.Ctx, m.DB, 0, uint64(test.chainSize-3), firstTxNum, 1, log.LvlInfo, log.New(), func(v []byte) error {
|
||||
_, err := freezeblocks.DumpBodies(m.Ctx, m.DB, 0, uint64(test.chainSize-3), firstTxNum, log.LvlInfo, log.New(), func(v []byte) error {
|
||||
i++
|
||||
body := &types.BodyForStorage{}
|
||||
require.NoError(rlp.DecodeBytes(v, body))
|
||||
@ -197,7 +197,7 @@ func TestDump(t *testing.T) {
|
||||
firstTxNum += txsAmount
|
||||
i = 0
|
||||
baseIdList = baseIdList[:0]
|
||||
err = freezeblocks.DumpBodies(m.Ctx, m.DB, 2, uint64(2*test.chainSize), firstTxNum, 1, log.LvlInfo, log.New(), func(v []byte) error {
|
||||
_, err = freezeblocks.DumpBodies(m.Ctx, m.DB, 2, uint64(2*test.chainSize), firstTxNum, log.LvlInfo, log.New(), func(v []byte) error {
|
||||
i++
|
||||
body := &types.BodyForStorage{}
|
||||
require.NoError(rlp.DecodeBytes(v, body))
|
||||
@ -215,7 +215,7 @@ func TestDump(t *testing.T) {
|
||||
i := 0
|
||||
var baseIdList []uint64
|
||||
firstTxNum := uint64(1000)
|
||||
err := freezeblocks.DumpBodies(m.Ctx, m.DB, 2, uint64(test.chainSize), firstTxNum, 1, log.LvlInfo, log.New(), func(v []byte) error {
|
||||
lastTxNum, err := freezeblocks.DumpBodies(m.Ctx, m.DB, 2, uint64(test.chainSize), firstTxNum, log.LvlInfo, log.New(), func(v []byte) error {
|
||||
i++
|
||||
body := &types.BodyForStorage{}
|
||||
require.NoError(rlp.DecodeBytes(v, body))
|
||||
@ -225,6 +225,8 @@ func TestDump(t *testing.T) {
|
||||
require.NoError(err)
|
||||
require.Equal(test.chainSize-2, i)
|
||||
require.Equal(baseIdRange(int(firstTxNum), 3, test.chainSize-2), baseIdList)
|
||||
require.Equal(lastTxNum, baseIdList[len(baseIdList)-1]+3)
|
||||
require.Equal(lastTxNum, firstTxNum+uint64(i*3))
|
||||
})
|
||||
t.Run("blocks", func(t *testing.T) {
|
||||
if test.chainSize < 1000 || test.chainSize%1000 != 0 {
|
||||
@ -239,7 +241,7 @@ func TestDump(t *testing.T) {
|
||||
snConfig := snapcfg.KnownCfg(networkname.MainnetChainName, 0)
|
||||
snConfig.ExpectBlocks = math.MaxUint64
|
||||
|
||||
err := freezeblocks.DumpBlocks(m.Ctx, 1, 0, uint64(test.chainSize), uint64(test.chainSize), tmpDir, snapDir, 0, m.DB, 1, log.LvlInfo, logger, m.BlockReader)
|
||||
err := freezeblocks.DumpBlocks(m.Ctx, 1, 0, uint64(test.chainSize), uint64(test.chainSize), tmpDir, snapDir, m.DB, 1, log.LvlInfo, logger, m.BlockReader)
|
||||
require.NoError(err)
|
||||
})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user