mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-22 03:30:37 +00:00
Store Canonical/NonCanonical/Bad blocks/senders/txs in same tables. Make re-org cheaper (#7648)
- allow store non-canonical blocks/senders - optimize re-org: don't update/delete most of data - allow mark chain as `Bad` - will be not visible by eth_getBlockByHash, but can read if have hash+num
This commit is contained in:
parent
f62b6fd722
commit
e14664d53b
@ -249,7 +249,7 @@ func NewBackend(stack *node.Node, config *ethconfig.Config, logger log.Logger) (
|
||||
genesisSpec = nil
|
||||
}
|
||||
var genesisErr error
|
||||
chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverrideShanghaiTime, tmpdir, logger)
|
||||
chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverrideShanghaiTime, tmpdir, logger, blockWriter)
|
||||
if _, ok := genesisErr.(*chain.ConfigCompatError); genesisErr != nil && !ok {
|
||||
return genesisErr
|
||||
}
|
||||
|
@ -61,7 +61,10 @@ func (api *OtterscanAPIImpl) GetContractCreator(ctx context.Context, addr common
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lastTxNum, _ := rawdbv3.TxNums.Max(tx, headNumber)
|
||||
lastTxNum, err := rawdbv3.TxNums.Max(tx, headNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Contract; search for creation tx; navigate forward on AccountsHistory/ChangeSets
|
||||
//
|
||||
@ -92,6 +95,8 @@ func (api *OtterscanAPIImpl) GetContractCreator(ctx context.Context, addr common
|
||||
log.Error("Unexpected error, couldn't find changeset", "txNum", i, "addr", addr)
|
||||
return nil, err
|
||||
}
|
||||
fmt.Printf("i: %d, %t, %x\n", i, ok, v)
|
||||
|
||||
if !ok {
|
||||
err = fmt.Errorf("couldn't find history txnID=%v addr=%v", txnID, addr)
|
||||
log.Error("[rpc] Unexpected error", "err", err)
|
||||
|
@ -18,7 +18,7 @@ func TestGetContractCreator(t *testing.T) {
|
||||
addr := libcommon.HexToAddress("0x537e697c7ab75a26f9ecf0ce810e3154dfcaaf44")
|
||||
expectCreator := libcommon.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7")
|
||||
expectCredByTx := libcommon.HexToHash("0x6e25f89e24254ba3eb460291393a4715fd3c33d805334cbd05c1b2efe1080f18")
|
||||
t.Run("valid input", func(t *testing.T) {
|
||||
t.Run("valid inputs", func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
results, err := api.GetContractCreator(m.Ctx, addr)
|
||||
require.NoError(err)
|
||||
|
@ -51,6 +51,9 @@ func (back *RemoteBackend) CurrentBlock(db kv.Tx) (*types.Block, error) {
|
||||
func (back *RemoteBackend) RawTransactions(ctx context.Context, tx kv.Getter, fromBlock, toBlock uint64) (txs [][]byte, err error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
func (back *RemoteBackend) ReadAncestor(db kv.Getter, hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
||||
panic("not implemented")
|
||||
}
|
||||
func (back *RemoteBackend) BlockByNumber(ctx context.Context, db kv.Tx, number uint64) (*types.Block, error) {
|
||||
hash, err := back.CanonicalHash(ctx, db, number)
|
||||
if err != nil {
|
||||
@ -205,6 +208,9 @@ func (back *RemoteBackend) TxnLookup(ctx context.Context, tx kv.Getter, txnHash
|
||||
func (back *RemoteBackend) HasSenders(ctx context.Context, tx kv.Getter, hash common.Hash, blockNum uint64) (bool, error) {
|
||||
panic("HasSenders is low-level method, don't use it in RPCDaemon")
|
||||
}
|
||||
func (back *RemoteBackend) BadHeaderNumber(ctx context.Context, tx kv.Getter, hash common.Hash) (blockNum *uint64, err error) {
|
||||
return back.blockReader.BadHeaderNumber(ctx, tx, hash)
|
||||
}
|
||||
func (back *RemoteBackend) BlockWithSenders(ctx context.Context, tx kv.Getter, hash common.Hash, blockNum uint64) (block *types.Block, senders []common.Address, err error) {
|
||||
return back.blockReader.BlockWithSenders(ctx, tx, hash, blockNum)
|
||||
}
|
||||
|
@ -9,7 +9,9 @@ import (
|
||||
libcommon "github.com/ledgerwatch/erigon-lib/common"
|
||||
"github.com/ledgerwatch/erigon-lib/common/datadir"
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
|
||||
"github.com/ledgerwatch/erigon/core"
|
||||
"github.com/ledgerwatch/erigon/core/rawdb/blockio"
|
||||
"github.com/ledgerwatch/erigon/core/state/temporal"
|
||||
"github.com/ledgerwatch/erigon/turbo/rpchelper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -33,7 +35,12 @@ func TestGenesisBlockHashes(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
_, block, err := core.WriteGenesisBlock(tx, genesis, nil, "", logger)
|
||||
histV3, err := kvcfg.HistoryV3.Enabled(tx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
blockWriter := blockio.NewBlockWriter(histV3)
|
||||
_, block, err := core.WriteGenesisBlock(tx, genesis, nil, "", logger, blockWriter)
|
||||
require.NoError(t, err)
|
||||
expect := params.GenesisHashByChainName(network)
|
||||
require.NotNil(t, expect, network)
|
||||
@ -79,14 +86,19 @@ func TestCommitGenesisIdempotency(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer tx.Rollback()
|
||||
|
||||
histV3, err := kvcfg.HistoryV3.Enabled(tx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
blockWriter := blockio.NewBlockWriter(histV3)
|
||||
genesis := core.GenesisBlockByChainName(networkname.MainnetChainName)
|
||||
_, _, err = core.WriteGenesisBlock(tx, genesis, nil, "", logger)
|
||||
_, _, err = core.WriteGenesisBlock(tx, genesis, nil, "", logger, blockWriter)
|
||||
require.NoError(t, err)
|
||||
seq, err := tx.ReadSequence(kv.EthTx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), seq)
|
||||
|
||||
_, _, err = core.WriteGenesisBlock(tx, genesis, nil, "", logger)
|
||||
_, _, err = core.WriteGenesisBlock(tx, genesis, nil, "", logger, blockWriter)
|
||||
require.NoError(t, err)
|
||||
seq, err = tx.ReadSequence(kv.EthTx)
|
||||
require.NoError(t, err)
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/ledgerwatch/erigon-lib/kv/kvcfg"
|
||||
"github.com/ledgerwatch/erigon/core/rawdb/blockio"
|
||||
"github.com/ledgerwatch/erigon/turbo/snapshotsync"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
@ -75,7 +76,12 @@ func CommitGenesisBlockWithOverride(db kv.RwDB, genesis *types.Genesis, override
|
||||
return nil, nil, err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
c, b, err := WriteGenesisBlock(tx, genesis, overrideShanghaiTime, tmpDir, logger)
|
||||
histV3, err := kvcfg.HistoryV3.Enabled(tx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blockWriter := blockio.NewBlockWriter(histV3)
|
||||
c, b, err := WriteGenesisBlock(tx, genesis, overrideShanghaiTime, tmpDir, logger, blockWriter)
|
||||
if err != nil {
|
||||
return c, b, err
|
||||
}
|
||||
@ -86,7 +92,8 @@ func CommitGenesisBlockWithOverride(db kv.RwDB, genesis *types.Genesis, override
|
||||
return c, b, nil
|
||||
}
|
||||
|
||||
func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideShanghaiTime *big.Int, tmpDir string, logger log.Logger) (*chain.Config, *types.Block, error) {
|
||||
func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideShanghaiTime *big.Int, tmpDir string, logger log.Logger, blockWriter *blockio.BlockWriter) (*chain.Config, *types.Block, error) {
|
||||
var storedBlock *types.Block
|
||||
if genesis != nil && genesis.Config == nil {
|
||||
return params.AllProtocolChanges, nil, types.ErrGenesisNoConfig
|
||||
}
|
||||
@ -110,7 +117,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideShanghaiTime
|
||||
custom = false
|
||||
}
|
||||
applyOverrides(genesis.Config)
|
||||
block, _, err1 := write(tx, genesis, tmpDir)
|
||||
block, _, err1 := write(tx, genesis, tmpDir, blockWriter)
|
||||
if err1 != nil {
|
||||
return genesis.Config, nil, err1
|
||||
}
|
||||
@ -131,9 +138,14 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideShanghaiTime
|
||||
return genesis.Config, block, &types.GenesisMismatchError{Stored: storedHash, New: hash}
|
||||
}
|
||||
}
|
||||
storedBlock, err := rawdb.ReadBlockByHash(tx, storedHash)
|
||||
if err != nil {
|
||||
return genesis.Config, nil, err
|
||||
blockReader := snapshotsync.NewBlockReader(snapshotsync.NewRoSnapshots(ethconfig.Snapshot{Enabled: false}, "", log.New()))
|
||||
number := rawdb.ReadHeaderNumber(tx, storedHash)
|
||||
if number != nil {
|
||||
var err error
|
||||
storedBlock, _, err = blockReader.BlockWithSenders(context.Background(), tx, storedHash, *number)
|
||||
if err != nil {
|
||||
return genesis.Config, nil, err
|
||||
}
|
||||
}
|
||||
// Get the existing chain configuration.
|
||||
newCfg := genesis.ConfigOrDefault(storedHash)
|
||||
@ -223,7 +235,12 @@ func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string) *types.Block
|
||||
panic(err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
block, _, err := write(tx, g, tmpDir)
|
||||
histV3, err := kvcfg.HistoryV3.Enabled(tx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
blockWriter := blockio.NewBlockWriter(histV3)
|
||||
block, _, err := write(tx, g, tmpDir, blockWriter)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -236,7 +253,7 @@ func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string) *types.Block
|
||||
|
||||
// Write writes the block and state of a genesis specification to the database.
|
||||
// The block is committed as the canonical head block.
|
||||
func write(tx kv.RwTx, g *types.Genesis, tmpDir string) (*types.Block, *state.IntraBlockState, error) {
|
||||
func write(tx kv.RwTx, g *types.Genesis, tmpDir string, blockWriter *blockio.BlockWriter) (*types.Block, *state.IntraBlockState, error) {
|
||||
block, statedb, err2 := WriteGenesisState(g, tx, tmpDir)
|
||||
if err2 != nil {
|
||||
return block, statedb, err2
|
||||
@ -248,11 +265,6 @@ func write(tx kv.RwTx, g *types.Genesis, tmpDir string) (*types.Block, *state.In
|
||||
if err := config.CheckConfigForkOrder(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
histV3, err := kvcfg.HistoryV3.Enabled(tx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blockWriter := blockio.NewBlockWriter(histV3)
|
||||
|
||||
if err := blockWriter.WriteBlock(tx, block); err != nil {
|
||||
return nil, nil, err
|
||||
|
@ -40,10 +40,8 @@ import (
|
||||
"github.com/ledgerwatch/erigon/common"
|
||||
"github.com/ledgerwatch/erigon/common/dbutils"
|
||||
"github.com/ledgerwatch/erigon/core/types"
|
||||
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
|
||||
"github.com/ledgerwatch/erigon/ethdb/cbor"
|
||||
"github.com/ledgerwatch/erigon/rlp"
|
||||
"github.com/ledgerwatch/erigon/turbo/services"
|
||||
)
|
||||
|
||||
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
|
||||
@ -67,12 +65,21 @@ func WriteCanonicalHash(db kv.Putter, hash libcommon.Hash, number uint64) error
|
||||
}
|
||||
|
||||
// TruncateCanonicalHash removes all the number to hash canonical mapping from block number N
|
||||
func TruncateCanonicalHash(tx kv.RwTx, blockFrom uint64, deleteHeaders bool) error {
|
||||
if err := tx.ForEach(kv.HeaderCanonical, hexutility.EncodeTs(blockFrom), func(k, v []byte) error {
|
||||
if deleteHeaders {
|
||||
DeleteHeader(tx, libcommon.BytesToHash(v), blockFrom)
|
||||
// Mark chain as bad feature:
|
||||
// - BadBlock must be not available by hash
|
||||
// - but available by hash+num - if read num from kv.BadHeaderNumber table
|
||||
// - prune blocks: must delete Canonical/NonCanonical/BadBlocks also
|
||||
func TruncateCanonicalHash(tx kv.RwTx, blockFrom uint64, markChainAsBad bool) error {
|
||||
if err := tx.ForEach(kv.HeaderCanonical, hexutility.EncodeTs(blockFrom), func(blockNumBytes, blockHash []byte) error {
|
||||
if markChainAsBad {
|
||||
if err := tx.Delete(kv.HeaderNumber, blockHash); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.Put(kv.BadHeaderNumber, blockHash, blockNumBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.Delete(kv.HeaderCanonical, k)
|
||||
return tx.Delete(kv.HeaderCanonical, blockNumBytes)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("TruncateCanonicalHash: %w", err)
|
||||
}
|
||||
@ -115,6 +122,20 @@ func ReadHeaderNumber(db kv.Getter, hash libcommon.Hash) *uint64 {
|
||||
number := binary.BigEndian.Uint64(data)
|
||||
return &number
|
||||
}
|
||||
func ReadBadHeaderNumber(db kv.Getter, hash libcommon.Hash) (*uint64, error) {
|
||||
data, err := db.GetOne(kv.BadHeaderNumber, hash.Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(data) != 8 {
|
||||
return nil, fmt.Errorf("ReadHeaderNumber got wrong data len: %d", len(data))
|
||||
}
|
||||
number := binary.BigEndian.Uint64(data)
|
||||
return &number, nil
|
||||
}
|
||||
|
||||
// WriteHeaderNumber stores the hash->number mapping.
|
||||
func WriteHeaderNumber(db kv.Putter, hash libcommon.Hash, number uint64) error {
|
||||
@ -163,13 +184,6 @@ func WriteHeadBlockHash(db kv.Putter, hash libcommon.Hash) {
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteHeaderNumber removes hash->number mapping.
|
||||
func DeleteHeaderNumber(db kv.Deleter, hash libcommon.Hash) {
|
||||
if err := db.Delete(kv.HeaderNumber, hash[:]); err != nil {
|
||||
log.Crit("Failed to delete hash mapping", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ReadForkchoiceHead retrieves headBlockHash from the last Engine API forkChoiceUpdated.
|
||||
func ReadForkchoiceHead(db kv.Getter) libcommon.Hash {
|
||||
data, err := db.GetOne(kv.LastForkchoice, []byte("headBlockHash"))
|
||||
@ -242,14 +256,6 @@ func ReadHeaderRLP(db kv.Getter, hash libcommon.Hash, number uint64) rlp.RawValu
|
||||
return data
|
||||
}
|
||||
|
||||
// HasHeader verifies the existence of a block header corresponding to the hash.
|
||||
func HasHeader(db kv.Has, hash libcommon.Hash, number uint64) bool {
|
||||
if has, err := db.Has(kv.Headers, dbutils.HeaderKey(number, hash)); !has || err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadHeader retrieves the block header corresponding to the hash.
|
||||
func ReadHeader(db kv.Getter, hash libcommon.Hash, number uint64) *types.Header {
|
||||
data := ReadHeaderRLP(db, hash, number)
|
||||
@ -278,27 +284,6 @@ func ReadCurrentHeader(db kv.Getter) *types.Header {
|
||||
return ReadHeader(db, headHash, *headNumber)
|
||||
}
|
||||
|
||||
func ReadCurrentBlock(db kv.Tx) *types.Block {
|
||||
headHash := ReadHeadBlockHash(db)
|
||||
headNumber := ReadHeaderNumber(db, headHash)
|
||||
if headNumber == nil {
|
||||
return nil
|
||||
}
|
||||
return ReadBlock(db, headHash, *headNumber)
|
||||
}
|
||||
|
||||
func ReadLastBlockSynced(db kv.Tx) (*types.Block, error) {
|
||||
headNumber, err := stages.GetStageProgress(db, stages.Execution)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headHash, err := ReadCanonicalHash(db, headNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ReadBlock(db, headHash, headNumber), nil
|
||||
}
|
||||
|
||||
func ReadHeadersByNumber(db kv.Tx, number uint64) ([]*types.Header, error) {
|
||||
var res []*types.Header
|
||||
c, err := db.Cursor(kv.Headers)
|
||||
@ -360,7 +345,7 @@ func WriteHeaderRaw(db kv.StatelessRwTx, number uint64, hash libcommon.Hash, hea
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteHeader - dangerous, use DeleteAncientBlocks/TruncateBlocks methods
|
||||
// DeleteHeader - dangerous, use PruneBlocks/TruncateBlocks methods
|
||||
func DeleteHeader(db kv.Deleter, hash libcommon.Hash, number uint64) {
|
||||
if err := db.Delete(kv.Headers, dbutils.HeaderKey(number, hash)); err != nil {
|
||||
log.Crit("Failed to delete header", "err", err)
|
||||
@ -372,22 +357,13 @@ func DeleteHeader(db kv.Deleter, hash libcommon.Hash, number uint64) {
|
||||
|
||||
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
||||
func ReadBodyRLP(db kv.Tx, hash libcommon.Hash, number uint64) rlp.RawValue {
|
||||
body := ReadCanonicalBodyWithTransactions(db, hash, number)
|
||||
body, _ := ReadBodyWithTransactions(db, hash, number)
|
||||
bodyRlp, err := rlp.EncodeToBytes(body)
|
||||
if err != nil {
|
||||
log.Error("ReadBodyRLP failed", "err", err)
|
||||
}
|
||||
return bodyRlp
|
||||
}
|
||||
func NonCanonicalBodyRLP(db kv.Tx, hash libcommon.Hash, number uint64) rlp.RawValue {
|
||||
body := NonCanonicalBodyWithTransactions(db, hash, number)
|
||||
bodyRlp, err := rlp.EncodeToBytes(body)
|
||||
if err != nil {
|
||||
log.Error("ReadBodyRLP failed", "err", err)
|
||||
}
|
||||
return bodyRlp
|
||||
}
|
||||
|
||||
func ReadStorageBodyRLP(db kv.Getter, hash libcommon.Hash, number uint64) rlp.RawValue {
|
||||
bodyRlp, err := db.GetOne(kv.BlockBody, dbutils.BlockBodyKey(number, hash))
|
||||
if err != nil {
|
||||
@ -408,12 +384,10 @@ func ReadStorageBody(db kv.Getter, hash libcommon.Hash, number uint64) (types.Bo
|
||||
return *bodyForStorage, nil
|
||||
}
|
||||
|
||||
func CanonicalTxnByID(db kv.Getter, id uint64, blockHash libcommon.Hash) (types.Transaction, error) {
|
||||
func CanonicalTxnByID(db kv.Getter, id uint64) (types.Transaction, error) {
|
||||
txIdKey := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txIdKey, id)
|
||||
var v []byte
|
||||
var err error
|
||||
v, err = db.GetOne(kv.EthTx, txIdKey)
|
||||
v, err := db.GetOne(kv.EthTx, txIdKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -524,42 +498,16 @@ func ReadBodyByNumber(db kv.Tx, number uint64) (*types.Body, uint64, uint32, err
|
||||
}
|
||||
|
||||
func ReadBodyWithTransactions(db kv.Getter, hash libcommon.Hash, number uint64) (*types.Body, error) {
|
||||
canonicalHash, err := ReadCanonicalHash(db, number)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read canonical hash failed: %d, %w", number, err)
|
||||
}
|
||||
if canonicalHash == hash {
|
||||
return ReadCanonicalBodyWithTransactions(db, hash, number), nil
|
||||
}
|
||||
return NonCanonicalBodyWithTransactions(db, hash, number), nil
|
||||
}
|
||||
|
||||
func ReadCanonicalBodyWithTransactions(db kv.Getter, hash libcommon.Hash, number uint64) *types.Body {
|
||||
body, baseTxId, txAmount := ReadBody(db, hash, number)
|
||||
if body == nil {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
var err error
|
||||
body.Transactions, err = CanonicalTransactions(db, baseTxId, txAmount)
|
||||
if err != nil {
|
||||
log.Error("failed ReadTransactionByHash", "hash", hash, "block", number, "err", err)
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
return body
|
||||
}
|
||||
|
||||
func NonCanonicalBodyWithTransactions(db kv.Getter, hash libcommon.Hash, number uint64) *types.Body {
|
||||
body, baseTxId, txAmount := ReadBody(db, hash, number)
|
||||
if body == nil {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
body.Transactions, err = NonCanonicalTransactions(db, baseTxId, txAmount)
|
||||
if err != nil {
|
||||
log.Error("failed ReadTransactionByHash", "hash", hash, "block", number, "err", err)
|
||||
return nil
|
||||
}
|
||||
return body
|
||||
return body, err
|
||||
}
|
||||
|
||||
func RawTransactionsRange(db kv.Getter, from, to uint64) (res [][]byte, err error) {
|
||||
@ -695,7 +643,7 @@ func WriteRawBody(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.Ra
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func WriteBody(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.Body) error {
|
||||
func WriteBody(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.Body) (err error) {
|
||||
// Pre-processing
|
||||
body.SendersFromTxs()
|
||||
baseTxId, err := db.IncrementSequence(kv.EthTx, uint64(len(body.Transactions))+2)
|
||||
@ -708,11 +656,10 @@ func WriteBody(db kv.RwTx, hash libcommon.Hash, number uint64, body *types.Body)
|
||||
Uncles: body.Uncles,
|
||||
Withdrawals: body.Withdrawals,
|
||||
}
|
||||
if err := WriteBodyForStorage(db, hash, number, &data); err != nil {
|
||||
if err = WriteBodyForStorage(db, hash, number, &data); err != nil {
|
||||
return fmt.Errorf("failed to write body: %w", err)
|
||||
}
|
||||
err = WriteTransactions(db, body.Transactions, baseTxId+1)
|
||||
if err != nil {
|
||||
if err = WriteTransactions(db, body.Transactions, baseTxId+1); err != nil {
|
||||
return fmt.Errorf("failed to WriteTransactions: %w", err)
|
||||
}
|
||||
return nil
|
||||
@ -758,8 +705,8 @@ func AppendCanonicalTxNums(tx kv.RwTx, from uint64) (err error) {
|
||||
if len(data) == 0 {
|
||||
break
|
||||
}
|
||||
bodyForStorage := new(types.BodyForStorage)
|
||||
if err := rlp.DecodeBytes(data, bodyForStorage); err != nil {
|
||||
bodyForStorage := types.BodyForStorage{}
|
||||
if err := rlp.DecodeBytes(data, &bodyForStorage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -772,162 +719,6 @@ func AppendCanonicalTxNums(tx kv.RwTx, from uint64) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeBodiesCanonical - move all txs of non-canonical blocks from NonCanonicalTxs table to EthTx table
|
||||
func MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix string, logEvery *time.Ticker, cb func(blockNum uint64, lastTxnNum uint64) error) error {
|
||||
for blockNum := from; ; blockNum++ {
|
||||
h, err := ReadCanonicalHash(tx, blockNum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if h == (libcommon.Hash{}) {
|
||||
break
|
||||
}
|
||||
|
||||
data := ReadStorageBodyRLP(tx, h, blockNum)
|
||||
if len(data) == 0 {
|
||||
break
|
||||
}
|
||||
bodyForStorage := new(types.BodyForStorage)
|
||||
if err := rlp.DecodeBytes(data, bodyForStorage); err != nil {
|
||||
return err
|
||||
}
|
||||
ethTx := kv.EthTx
|
||||
newBaseId, err := tx.IncrementSequence(ethTx, uint64(bodyForStorage.TxAmount))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// next loop does move only non-system txs. need move system-txs manually (because they may not exist)
|
||||
i := uint64(0)
|
||||
if err := tx.ForAmount(kv.NonCanonicalTxs, hexutility.EncodeTs(bodyForStorage.BaseTxId+1), bodyForStorage.TxAmount-2, func(k, v []byte) error {
|
||||
id := newBaseId + 1 + i
|
||||
if err := tx.Put(kv.EthTx, hexutility.EncodeTs(id), v); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.Delete(kv.NonCanonicalTxs, k); err != nil {
|
||||
return err
|
||||
}
|
||||
i++
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bodyForStorage.BaseTxId = newBaseId
|
||||
if err := WriteBodyForStorage(tx, h, blockNum, bodyForStorage); err != nil {
|
||||
return err
|
||||
}
|
||||
if cb != nil {
|
||||
lastTxnNum := bodyForStorage.BaseTxId + uint64(bodyForStorage.TxAmount)
|
||||
if err = cb(blockNum, lastTxnNum); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-logEvery.C:
|
||||
log.Info(fmt.Sprintf("[%s] Making bodies canonical...", logPrefix), "current block", blockNum)
|
||||
default:
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeBodiesNonCanonical - move all txs of canonical blocks to NonCanonicalTxs bucket
|
||||
func MakeBodiesNonCanonical(tx kv.RwTx, from uint64, deleteBodies bool, ctx context.Context, logPrefix string, logEvery *time.Ticker) error {
|
||||
var firstMovedTxnID uint64
|
||||
var firstMovedTxnIDIsSet bool
|
||||
for blockNum := from; ; blockNum++ {
|
||||
h, err := ReadCanonicalHash(tx, blockNum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if h == (libcommon.Hash{}) {
|
||||
break
|
||||
}
|
||||
data := ReadStorageBodyRLP(tx, h, blockNum)
|
||||
if len(data) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
bodyForStorage := new(types.BodyForStorage)
|
||||
if err := rlp.DecodeBytes(data, bodyForStorage); err != nil {
|
||||
return err
|
||||
}
|
||||
if !firstMovedTxnIDIsSet {
|
||||
firstMovedTxnIDIsSet = true
|
||||
firstMovedTxnID = bodyForStorage.BaseTxId
|
||||
}
|
||||
|
||||
newBaseId := uint64(0)
|
||||
if !deleteBodies {
|
||||
// move txs to NonCanonical bucket, it has own sequence
|
||||
newBaseId, err = tx.IncrementSequence(kv.NonCanonicalTxs, uint64(bodyForStorage.TxAmount))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// next loop does move only non-system txs. need move system-txs manually (because they may not exist)
|
||||
i := uint64(0)
|
||||
if err := tx.ForAmount(kv.EthTx, hexutility.EncodeTs(bodyForStorage.BaseTxId+1), bodyForStorage.TxAmount-2, func(k, v []byte) error {
|
||||
if !deleteBodies {
|
||||
id := newBaseId + 1 + i
|
||||
if err := tx.Put(kv.NonCanonicalTxs, hexutility.EncodeTs(id), v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tx.Delete(kv.EthTx, k); err != nil {
|
||||
return err
|
||||
}
|
||||
i++
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if deleteBodies {
|
||||
DeleteBody(tx, h, blockNum)
|
||||
} else {
|
||||
bodyForStorage.BaseTxId = newBaseId
|
||||
if err := WriteBodyForStorage(tx, h, blockNum, bodyForStorage); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-logEvery.C:
|
||||
log.Info(fmt.Sprintf("[%s] Unwinding transactions...", logPrefix), "current block", blockNum)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// EthTx must have canonical id's - means need decrement it's sequence on unwind
|
||||
if firstMovedTxnIDIsSet {
|
||||
c, err := tx.Cursor(kv.EthTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k, _, err := c.Last()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if k != nil && binary.BigEndian.Uint64(k) >= firstMovedTxnID {
|
||||
panic(fmt.Sprintf("must not happen, ResetSequence: %d, lastInDB: %d", firstMovedTxnID, binary.BigEndian.Uint64(k)))
|
||||
}
|
||||
|
||||
if err := ResetSequence(tx, kv.EthTx, firstMovedTxnID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadTd retrieves a block's total difficulty corresponding to the hash.
|
||||
func ReadTd(db kv.Getter, hash libcommon.Hash, number uint64) (*big.Int, error) {
|
||||
data, err := db.GetOne(kv.HeaderTD, dbutils.HeaderKey(number, hash))
|
||||
@ -974,15 +765,6 @@ func TruncateTd(tx kv.RwTx, blockFrom uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasReceipts verifies the existence of all the transaction receipts belonging
|
||||
// to a block.
|
||||
func HasReceipts(db kv.Has, hash libcommon.Hash, number uint64) bool {
|
||||
if has, err := db.Has(kv.Receipts, hexutility.EncodeTs(number)); !has || err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
|
||||
// The receipt metadata fields are not guaranteed to be populated, so they
|
||||
// should not be used. Use ReadReceipts instead if the metadata is needed.
|
||||
@ -1169,34 +951,13 @@ func ReadBlock(tx kv.Getter, hash libcommon.Hash, number uint64) *types.Block {
|
||||
if header == nil {
|
||||
return nil
|
||||
}
|
||||
body := ReadCanonicalBodyWithTransactions(tx, hash, number)
|
||||
body, _ := ReadBodyWithTransactions(tx, hash, number)
|
||||
if body == nil {
|
||||
return nil
|
||||
}
|
||||
return types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles, body.Withdrawals)
|
||||
}
|
||||
|
||||
func NonCanonicalBlockWithSenders(tx kv.Getter, hash libcommon.Hash, number uint64) (*types.Block, []libcommon.Address, error) {
|
||||
header := ReadHeader(tx, hash, number)
|
||||
if header == nil {
|
||||
return nil, nil, fmt.Errorf("header not found for block %d, %x", number, hash)
|
||||
}
|
||||
body := NonCanonicalBodyWithTransactions(tx, hash, number)
|
||||
if body == nil {
|
||||
return nil, nil, fmt.Errorf("body not found for block %d, %x", number, hash)
|
||||
}
|
||||
block := types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles, body.Withdrawals)
|
||||
senders, err := ReadSenders(tx, hash, number)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(senders) != block.Transactions().Len() {
|
||||
return block, senders, nil // no senders is fine - will recover them on the fly
|
||||
}
|
||||
block.SendersToTxs(senders)
|
||||
return block, senders, nil
|
||||
}
|
||||
|
||||
// HasBlock - is more efficient than ReadBlock because doesn't read transactions.
|
||||
// It's is not equivalent of HasHeader because headers and bodies written by different stages
|
||||
func HasBlock(db kv.Getter, hash libcommon.Hash, number uint64) bool {
|
||||
@ -1222,17 +983,20 @@ func ReadBlockWithSenders(db kv.Getter, hash libcommon.Hash, number uint64) (*ty
|
||||
|
||||
// WriteBlock serializes a block into the database, header and body separately.
|
||||
func WriteBlock(db kv.RwTx, block *types.Block) error {
|
||||
if err := WriteHeader(db, block.HeaderNoCopy()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
||||
return err
|
||||
}
|
||||
return WriteHeader(db, block.Header())
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAncientBlocks - delete [1, to) old blocks after moving it to snapshots.
|
||||
// PruneBlocks - delete [1, to) old blocks after moving it to snapshots.
|
||||
// keeps genesis in db: [1, to)
|
||||
// doesn't change sequences of kv.EthTx and kv.NonCanonicalTxs
|
||||
// doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty
|
||||
func DeleteAncientBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error {
|
||||
func PruneBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error {
|
||||
c, err := tx.Cursor(kv.Headers)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1250,7 +1014,6 @@ func DeleteAncientBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) erro
|
||||
blockFrom := binary.BigEndian.Uint64(firstK)
|
||||
stopAtBlock := cmp.Min(blockTo, blockFrom+uint64(blocksDeleteLimit))
|
||||
|
||||
var canonicalHash libcommon.Hash
|
||||
var b *types.BodyForStorage
|
||||
|
||||
for k, _, err := c.Current(); k != nil; k, _, err = c.Next() {
|
||||
@ -1263,27 +1026,17 @@ func DeleteAncientBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) erro
|
||||
break
|
||||
}
|
||||
|
||||
canonicalHash, err = ReadCanonicalHash(tx, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isCanonical := bytes.Equal(k[8:], canonicalHash[:])
|
||||
|
||||
b, err = ReadBodyForStorageByKey(tx, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b == nil {
|
||||
log.Debug("DeleteAncientBlocks: block body not found", "height", n)
|
||||
log.Debug("PruneBlocks: block body not found", "height", n)
|
||||
} else {
|
||||
txIDBytes := make([]byte, 8)
|
||||
for txID := b.BaseTxId; txID < b.BaseTxId+uint64(b.TxAmount); txID++ {
|
||||
binary.BigEndian.PutUint64(txIDBytes, txID)
|
||||
bucket := kv.EthTx
|
||||
if !isCanonical {
|
||||
bucket = kv.NonCanonicalTxs
|
||||
}
|
||||
if err = tx.Delete(bucket, txIDBytes); err != nil {
|
||||
if err = tx.Delete(kv.EthTx, txIDBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1311,127 +1064,46 @@ func DeleteAncientBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) erro
|
||||
func TruncateBlocks(ctx context.Context, tx kv.RwTx, blockFrom uint64) error {
|
||||
logEvery := time.NewTicker(20 * time.Second)
|
||||
defer logEvery.Stop()
|
||||
|
||||
c, err := tx.Cursor(kv.Headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
if blockFrom < 1 { //protect genesis
|
||||
blockFrom = 1
|
||||
}
|
||||
sequenceTo := map[string]uint64{}
|
||||
k, _, err := c.Last()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if k != nil {
|
||||
n := binary.BigEndian.Uint64(k)
|
||||
if n > 1 {
|
||||
log.Info("TruncateBlocks", "block", n)
|
||||
defer log.Info("TruncateBlocks done")
|
||||
}
|
||||
}
|
||||
for ; k != nil; k, _, err = c.Prev() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n := binary.BigEndian.Uint64(k)
|
||||
if n < blockFrom { // [from, to)
|
||||
break
|
||||
}
|
||||
canonicalHash, err := ReadCanonicalHash(tx, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isCanonical := bytes.Equal(k[8:], canonicalHash[:])
|
||||
|
||||
return tx.ForEach(kv.Headers, hexutility.EncodeTs(blockFrom), func(k, v []byte) error {
|
||||
b, err := ReadBodyForStorageByKey(tx, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != nil {
|
||||
bucket := kv.EthTx
|
||||
if !isCanonical {
|
||||
bucket = kv.NonCanonicalTxs
|
||||
}
|
||||
if err := tx.ForEach(bucket, hexutility.EncodeTs(b.BaseTxId), func(k, _ []byte) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-logEvery.C:
|
||||
log.Info("TruncateBlocks", "block", n)
|
||||
default:
|
||||
}
|
||||
|
||||
if err := tx.Delete(bucket, k); err != nil {
|
||||
txIDBytes := make([]byte, 8)
|
||||
for txID := b.BaseTxId; txID < b.BaseTxId+uint64(b.TxAmount); txID++ {
|
||||
binary.BigEndian.PutUint64(txIDBytes, txID)
|
||||
if err = tx.Delete(kv.EthTx, txIDBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
sequenceTo[bucket] = b.BaseTxId
|
||||
}
|
||||
// Copying k because otherwise the same memory will be reused
|
||||
// for the next key and Delete below will end up deleting 1 more record than required
|
||||
kCopy := common.CopyBytes(k)
|
||||
if err := tx.Delete(kv.Headers, kCopy); err != nil {
|
||||
if err := tx.Delete(kv.Senders, kCopy); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.Delete(kv.BlockBody, kCopy); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.Delete(kv.Headers, kCopy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-logEvery.C:
|
||||
log.Info("TruncateBlocks", "block", n)
|
||||
log.Info("TruncateBlocks", "block", binary.BigEndian.Uint64(k))
|
||||
default:
|
||||
}
|
||||
}
|
||||
for bucket, sequence := range sequenceTo {
|
||||
if err := ResetSequence(tx, bucket, sequence); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func ReadBlockByNumber(db kv.Tx, number uint64) (*types.Block, error) {
|
||||
hash, err := ReadCanonicalHash(db, number)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed ReadCanonicalHash: %w", err)
|
||||
}
|
||||
if hash == (libcommon.Hash{}) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return ReadBlock(db, hash, number), nil
|
||||
}
|
||||
|
||||
func CanonicalBlockByNumberWithSenders(db kv.Tx, number uint64) (*types.Block, []libcommon.Address, error) {
|
||||
hash, err := ReadCanonicalHash(db, number)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed ReadCanonicalHash: %w", err)
|
||||
}
|
||||
if hash == (libcommon.Hash{}) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
return ReadBlockWithSenders(db, hash, number)
|
||||
}
|
||||
|
||||
func ReadBlockByHash(db kv.Tx, hash libcommon.Hash) (*types.Block, error) {
|
||||
number := ReadHeaderNumber(db, hash)
|
||||
if number == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return ReadBlock(db, hash, *number), nil
|
||||
}
|
||||
|
||||
func ReadTotalIssued(db kv.Getter, number uint64) (*big.Int, error) {
|
||||
data, err := db.GetOne(kv.Issuance, hexutility.EncodeTs(number))
|
||||
if err != nil {
|
||||
@ -1495,58 +1167,6 @@ func ReadHeaderByHash(db kv.Getter, hash libcommon.Hash) (*types.Header, error)
|
||||
return ReadHeader(db, hash, *number), nil
|
||||
}
|
||||
|
||||
func ReadAncestor(db kv.Getter, hash libcommon.Hash, number, ancestor uint64, maxNonCanonical *uint64, blockReader services.HeaderAndCanonicalReader) (libcommon.Hash, uint64) {
|
||||
if ancestor > number {
|
||||
return libcommon.Hash{}, 0
|
||||
}
|
||||
if ancestor == 1 {
|
||||
header, err := blockReader.Header(context.Background(), db, hash, number)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// in this case it is cheaper to just read the header
|
||||
if header != nil {
|
||||
return header.ParentHash, number - 1
|
||||
}
|
||||
return libcommon.Hash{}, 0
|
||||
}
|
||||
for ancestor != 0 {
|
||||
h, err := blockReader.CanonicalHash(context.Background(), db, number)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if h == hash {
|
||||
ancestorHash, err := blockReader.CanonicalHash(context.Background(), db, number-ancestor)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
h, err := blockReader.CanonicalHash(context.Background(), db, number)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if h == hash {
|
||||
number -= ancestor
|
||||
return ancestorHash, number
|
||||
}
|
||||
}
|
||||
if *maxNonCanonical == 0 {
|
||||
return libcommon.Hash{}, 0
|
||||
}
|
||||
*maxNonCanonical--
|
||||
ancestor--
|
||||
header, err := blockReader.Header(context.Background(), db, hash, number)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if header == nil {
|
||||
return libcommon.Hash{}, 0
|
||||
}
|
||||
hash = header.ParentHash
|
||||
number--
|
||||
}
|
||||
return hash, number
|
||||
}
|
||||
|
||||
func DeleteNewerEpochs(tx kv.RwTx, number uint64) error {
|
||||
if err := tx.ForEach(kv.PendingEpoch, hexutility.EncodeTs(number), func(k, v []byte) error {
|
||||
return tx.Delete(kv.Epoch, k)
|
||||
|
@ -112,18 +112,20 @@ func TestBodyStorage(t *testing.T) {
|
||||
hasher := sha3.NewLegacyKeccak256()
|
||||
_ = rlp.Encode(hasher, body)
|
||||
hash := libcommon.BytesToHash(hasher.Sum(nil))
|
||||
header := &types.Header{Number: libcommon.Big1}
|
||||
|
||||
if entry, _ := br.BodyWithTransactions(ctx, tx, hash, 0); entry != nil {
|
||||
if entry, _ := br.BodyWithTransactions(ctx, tx, header.Hash(), 1); entry != nil {
|
||||
t.Fatalf("Non existent body returned: %v", entry)
|
||||
}
|
||||
require.NoError(rawdb.WriteCanonicalHash(tx, hash, 0))
|
||||
require.NoError(bw.WriteBody(tx, hash, 0, body))
|
||||
if entry, _ := br.BodyWithTransactions(ctx, tx, hash, 0); entry == nil {
|
||||
require.NoError(rawdb.WriteCanonicalHash(tx, header.Hash(), 1))
|
||||
require.NoError(bw.WriteHeader(tx, header))
|
||||
require.NoError(bw.WriteBody(tx, header.Hash(), 1, body))
|
||||
if entry, _ := br.BodyWithTransactions(ctx, tx, header.Hash(), 1); entry == nil {
|
||||
t.Fatalf("Stored body not found")
|
||||
} else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
|
||||
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
|
||||
}
|
||||
if entry := rawdb.ReadBodyRLP(tx, hash, 0); entry == nil {
|
||||
if entry := rawdb.ReadBodyRLP(tx, header.Hash(), 1); entry == nil {
|
||||
//if entry, _ := br.BodyWithTransactions(ctx, tx, hash, 0); entry == nil {
|
||||
t.Fatalf("Stored body RLP not found")
|
||||
} else {
|
||||
@ -139,8 +141,8 @@ func TestBodyStorage(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Delete the body and verify the execution
|
||||
rawdb.DeleteBody(tx, hash, 0)
|
||||
if entry, _ := br.BodyWithTransactions(ctx, tx, hash, 0); entry != nil {
|
||||
rawdb.DeleteBody(tx, hash, 1)
|
||||
if entry, _ := br.BodyWithTransactions(ctx, tx, hash, 1); entry != nil {
|
||||
t.Fatalf("Deleted body returned: %v", entry)
|
||||
}
|
||||
}
|
||||
@ -148,8 +150,9 @@ func TestBodyStorage(t *testing.T) {
|
||||
// Tests block storage and retrieval operations.
|
||||
func TestBlockStorage(t *testing.T) {
|
||||
m := stages.Mock(t)
|
||||
require := require.New(t)
|
||||
tx, err := m.DB.BeginRw(m.Ctx)
|
||||
require.NoError(t, err)
|
||||
require.NoError(err)
|
||||
defer tx.Rollback()
|
||||
ctx := m.Ctx
|
||||
br, bw := m.NewBlocksIO()
|
||||
@ -213,12 +216,45 @@ func TestBlockStorage(t *testing.T) {
|
||||
}
|
||||
|
||||
// write again and delete it as old one
|
||||
if err := bw.WriteBlock(tx, block); err != nil {
|
||||
t.Fatalf("Could not write block: %v", err)
|
||||
}
|
||||
if err := rawdb.DeleteAncientBlocks(tx, 0, 1); err != nil {
|
||||
t.Fatal(err)
|
||||
require.NoError(bw.WriteBlock(tx, block))
|
||||
|
||||
{
|
||||
// mark chain as bad
|
||||
// - it must be not available by hash
|
||||
// - but available by hash+num - if read num from kv.BadHeaderNumber table
|
||||
// - prune blocks: must delete Canonical/NonCanonical/BadBlocks also
|
||||
foundBn, _ := br.BadHeaderNumber(ctx, tx, block.Hash())
|
||||
require.Nil(foundBn)
|
||||
found, _ := br.BlockByHash(ctx, tx, block.Hash())
|
||||
require.NotNil(found)
|
||||
|
||||
err = rawdb.WriteCanonicalHash(tx, block.Hash(), block.NumberU64())
|
||||
require.NoError(err)
|
||||
err = rawdb.TruncateCanonicalHash(tx, block.NumberU64(), true)
|
||||
require.NoError(err)
|
||||
foundBlock, _ := br.BlockByHash(ctx, tx, block.Hash())
|
||||
require.Nil(foundBlock)
|
||||
|
||||
foundBn = rawdb.ReadHeaderNumber(tx, block.Hash())
|
||||
require.Nil(foundBn)
|
||||
foundBn, _ = br.BadHeaderNumber(ctx, tx, block.Hash())
|
||||
require.NotNil(foundBn)
|
||||
foundBlock, _ = br.BlockByNumber(ctx, tx, *foundBn)
|
||||
require.Nil(foundBlock)
|
||||
foundBlock, _, _ = br.BlockWithSenders(ctx, tx, block.Hash(), *foundBn)
|
||||
require.NotNil(foundBlock)
|
||||
}
|
||||
|
||||
// prune: [1: N)
|
||||
require.NoError(bw.PruneBlocks(ctx, tx, 0, 1))
|
||||
entry, _ := br.BodyWithTransactions(ctx, tx, block.Hash(), block.NumberU64())
|
||||
require.NotNil(entry)
|
||||
require.NoError(bw.PruneBlocks(ctx, tx, 1, 1))
|
||||
entry, _ = br.BodyWithTransactions(ctx, tx, block.Hash(), block.NumberU64())
|
||||
require.NotNil(entry)
|
||||
require.NoError(bw.PruneBlocks(ctx, tx, 2, 1))
|
||||
entry, _ = br.BodyWithTransactions(ctx, tx, block.Hash(), block.NumberU64())
|
||||
require.Nil(entry)
|
||||
}
|
||||
|
||||
// Tests that partial block contents don't get reassembled into full blocks.
|
||||
@ -502,11 +538,11 @@ func TestBlockReceiptStorage(t *testing.T) {
|
||||
// Tests block storage and retrieval operations with withdrawals.
|
||||
func TestBlockWithdrawalsStorage(t *testing.T) {
|
||||
m := stages.Mock(t)
|
||||
require := require.New(t)
|
||||
tx, err := m.DB.BeginRw(m.Ctx)
|
||||
require.NoError(t, err)
|
||||
require.NoError(err)
|
||||
defer tx.Rollback()
|
||||
br, bw := m.NewBlocksIO()
|
||||
require := require.New(t)
|
||||
ctx := context.Background()
|
||||
|
||||
// create fake withdrawals
|
||||
@ -549,6 +585,9 @@ func TestBlockWithdrawalsStorage(t *testing.T) {
|
||||
// Write withdrawals to block
|
||||
wBlock := types.NewBlockFromStorage(block.Hash(), block.Header(), block.Transactions(), block.Uncles(), withdrawals)
|
||||
|
||||
if err := bw.WriteHeader(tx, wBlock.HeaderNoCopy()); err != nil {
|
||||
t.Fatalf("Could not write body: %v", err)
|
||||
}
|
||||
if err := bw.WriteBody(tx, wBlock.Hash(), wBlock.NumberU64(), wBlock.Body()); err != nil {
|
||||
t.Fatalf("Could not write body: %v", err)
|
||||
}
|
||||
@ -620,9 +659,16 @@ func TestBlockWithdrawalsStorage(t *testing.T) {
|
||||
if err := bw.WriteBlock(tx, block); err != nil {
|
||||
t.Fatalf("Could not write block: %v", err)
|
||||
}
|
||||
if err := rawdb.DeleteAncientBlocks(tx, 0, 1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// prune: [1: N)
|
||||
require.NoError(bw.PruneBlocks(ctx, tx, 0, 1))
|
||||
entry, _ = br.BodyWithTransactions(ctx, tx, block.Hash(), block.NumberU64())
|
||||
require.NotNil(entry)
|
||||
require.NoError(bw.PruneBlocks(ctx, tx, 1, 1))
|
||||
entry, _ = br.BodyWithTransactions(ctx, tx, block.Hash(), block.NumberU64())
|
||||
require.NotNil(entry)
|
||||
require.NoError(bw.PruneBlocks(ctx, tx, 2, 1))
|
||||
entry, _ = br.BodyWithTransactions(ctx, tx, block.Hash(), block.NumberU64())
|
||||
require.Nil(entry)
|
||||
}
|
||||
|
||||
// Tests pre-shanghai body to make sure withdrawals doesn't panic
|
||||
|
@ -63,99 +63,3 @@ func WriteTxLookupEntries(db kv.Putter, block *types.Block) {
|
||||
func DeleteTxLookupEntry(db kv.Deleter, hash libcommon.Hash) error {
|
||||
return db.Delete(kv.TxLookup, hash.Bytes())
|
||||
}
|
||||
|
||||
// ReadTransactionByHash retrieves a specific transaction from the database, along with
|
||||
// its added positional metadata.
|
||||
func ReadTransactionByHash(db kv.Tx, hash libcommon.Hash) (types.Transaction, libcommon.Hash, uint64, uint64, error) {
|
||||
blockNumber, err := ReadTxLookupEntry(db, hash)
|
||||
if err != nil {
|
||||
return nil, libcommon.Hash{}, 0, 0, err
|
||||
}
|
||||
if blockNumber == nil {
|
||||
return nil, libcommon.Hash{}, 0, 0, nil
|
||||
}
|
||||
blockHash, err := ReadCanonicalHash(db, *blockNumber)
|
||||
if err != nil {
|
||||
return nil, libcommon.Hash{}, 0, 0, err
|
||||
}
|
||||
if blockHash == (libcommon.Hash{}) {
|
||||
return nil, libcommon.Hash{}, 0, 0, nil
|
||||
}
|
||||
body := ReadCanonicalBodyWithTransactions(db, blockHash, *blockNumber)
|
||||
if body == nil {
|
||||
log.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash)
|
||||
return nil, libcommon.Hash{}, 0, 0, nil
|
||||
}
|
||||
senders, err1 := ReadSenders(db, blockHash, *blockNumber)
|
||||
if err1 != nil {
|
||||
return nil, libcommon.Hash{}, 0, 0, err1
|
||||
}
|
||||
body.SendersToTxs(senders)
|
||||
for txIndex, tx := range body.Transactions {
|
||||
if tx.Hash() == hash {
|
||||
return tx, blockHash, *blockNumber, uint64(txIndex), nil
|
||||
}
|
||||
}
|
||||
log.Error("Transaction not found", "number", blockNumber, "hash", blockHash, "txhash", hash)
|
||||
return nil, libcommon.Hash{}, 0, 0, nil
|
||||
}
|
||||
|
||||
// ReadTransaction retrieves a specific transaction from the database, along with
|
||||
// its added positional metadata.
|
||||
func ReadTransaction(db kv.Tx, hash libcommon.Hash, blockNumber uint64) (types.Transaction, libcommon.Hash, uint64, uint64, error) {
|
||||
blockHash, err := ReadCanonicalHash(db, blockNumber)
|
||||
if err != nil {
|
||||
return nil, libcommon.Hash{}, 0, 0, err
|
||||
}
|
||||
if blockHash == (libcommon.Hash{}) {
|
||||
return nil, libcommon.Hash{}, 0, 0, nil
|
||||
}
|
||||
body := ReadCanonicalBodyWithTransactions(db, blockHash, blockNumber)
|
||||
if body == nil {
|
||||
log.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash)
|
||||
return nil, libcommon.Hash{}, 0, 0, nil
|
||||
}
|
||||
senders, err1 := ReadSenders(db, blockHash, blockNumber)
|
||||
if err1 != nil {
|
||||
return nil, libcommon.Hash{}, 0, 0, err1
|
||||
}
|
||||
body.SendersToTxs(senders)
|
||||
for txIndex, tx := range body.Transactions {
|
||||
if tx.Hash() == hash {
|
||||
return tx, blockHash, blockNumber, uint64(txIndex), nil
|
||||
}
|
||||
}
|
||||
log.Error("Transaction not found", "number", blockNumber, "hash", blockHash, "txhash", hash)
|
||||
return nil, libcommon.Hash{}, 0, 0, nil
|
||||
}
|
||||
|
||||
func ReadReceipt(db kv.Tx, txHash libcommon.Hash) (*types.Receipt, libcommon.Hash, uint64, uint64, error) {
|
||||
// Retrieve the context of the receipt based on the transaction hash
|
||||
blockNumber, err := ReadTxLookupEntry(db, txHash)
|
||||
if err != nil {
|
||||
return nil, libcommon.Hash{}, 0, 0, err
|
||||
}
|
||||
if blockNumber == nil {
|
||||
return nil, libcommon.Hash{}, 0, 0, nil
|
||||
}
|
||||
blockHash, err := ReadCanonicalHash(db, *blockNumber)
|
||||
if err != nil {
|
||||
return nil, libcommon.Hash{}, 0, 0, err
|
||||
}
|
||||
if blockHash == (libcommon.Hash{}) {
|
||||
return nil, libcommon.Hash{}, 0, 0, nil
|
||||
}
|
||||
b, senders, err := ReadBlockWithSenders(db, blockHash, *blockNumber)
|
||||
if err != nil {
|
||||
return nil, libcommon.Hash{}, 0, 0, err
|
||||
}
|
||||
// Read all the receipts from the block and return the one with the matching hash
|
||||
receipts := ReadReceipts(db, b, senders)
|
||||
for receiptIndex, receipt := range receipts {
|
||||
if receipt.TxHash == txHash {
|
||||
return receipt, blockHash, *blockNumber, uint64(receiptIndex), nil
|
||||
}
|
||||
}
|
||||
log.Error("Receipt not found", "number", blockNumber, "hash", blockHash, "txhash", txHash)
|
||||
return nil, libcommon.Hash{}, 0, 0, nil
|
||||
}
|
||||
|
@ -3,9 +3,7 @@ package blockio
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ledgerwatch/erigon-lib/common"
|
||||
"github.com/ledgerwatch/erigon-lib/common/hexutility"
|
||||
@ -33,18 +31,11 @@ type BlockWriter struct {
|
||||
}
|
||||
|
||||
func NewBlockWriter(historyV3 bool) *BlockWriter {
|
||||
return &BlockWriter{historyV3: historyV3, txsV3: false}
|
||||
return &BlockWriter{historyV3: historyV3, txsV3: true}
|
||||
}
|
||||
|
||||
func (w *BlockWriter) TxsV3Enabled() bool { return w.txsV3 }
|
||||
func (w *BlockWriter) WriteBlock(tx kv.RwTx, block *types.Block) error {
|
||||
if err := rawdb.WriteHeader(tx, block.HeaderNoCopy()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := rawdb.WriteBody(tx, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return rawdb.WriteBlock(tx, block)
|
||||
}
|
||||
func (w *BlockWriter) WriteHeader(tx kv.RwTx, header *types.Header) error {
|
||||
return rawdb.WriteHeader(tx, header)
|
||||
@ -81,23 +72,7 @@ func (w *BlockWriter) FillHeaderNumberIndex(logPrefix string, tx kv.RwTx, tmpDir
|
||||
)
|
||||
}
|
||||
|
||||
func (w *BlockWriter) MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix string, logEvery *time.Ticker) error {
|
||||
if !w.txsV3 {
|
||||
// Property of blockchain: same block in different forks will have different hashes.
|
||||
// Means - can mark all canonical blocks as non-canonical on unwind, and
|
||||
// do opposite here - without storing any meta-info.
|
||||
if err := rawdb.MakeBodiesCanonical(tx, from, ctx, logPrefix, logEvery, func(blockNum, lastTxnNum uint64) error {
|
||||
if w.historyV3 {
|
||||
if err := rawdbv3.TxNums.Append(tx, blockNum, lastTxnNum); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("make block canonical: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (w *BlockWriter) MakeBodiesCanonical(tx kv.RwTx, from uint64) error {
|
||||
if w.historyV3 {
|
||||
if err := rawdb.AppendCanonicalTxNums(tx, from); err != nil {
|
||||
return err
|
||||
@ -105,24 +80,7 @@ func (w *BlockWriter) MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.C
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (w *BlockWriter) MakeBodiesNonCanonical(tx kv.RwTx, from uint64, deleteBodies bool, ctx context.Context, logPrefix string, logEvery *time.Ticker) error {
|
||||
if !w.txsV3 {
|
||||
if err := rawdb.MakeBodiesNonCanonical(tx, from, deleteBodies, ctx, logPrefix, logEvery); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.historyV3 {
|
||||
if err := rawdbv3.TxNums.Truncate(tx, from); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//if deleteBodies {
|
||||
//if err := rawdb.MakeBodiesNonCanonical(tx, from, deleteBodies, ctx, logPrefix, logEvery); err != nil {
|
||||
// return err
|
||||
//}
|
||||
//}
|
||||
func (w *BlockWriter) MakeBodiesNonCanonical(tx kv.RwTx, from uint64) error {
|
||||
if w.historyV3 {
|
||||
if err := rawdbv3.TxNums.Truncate(tx, from); err != nil {
|
||||
return err
|
||||
@ -183,5 +141,5 @@ func (w *BlockWriter) ResetSenders(ctx context.Context, db kv.RoDB, tx kv.RwTx)
|
||||
// doesn't change sequences of kv.EthTx and kv.NonCanonicalTxs
|
||||
// doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty
|
||||
func (w *BlockWriter) PruneBlocks(ctx context.Context, tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) error {
|
||||
return rawdb.DeleteAncientBlocks(tx, blockTo, blocksDeleteLimit)
|
||||
return rawdb.PruneBlocks(tx, blockTo, blocksDeleteLimit)
|
||||
}
|
||||
|
@ -279,7 +279,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
|
||||
genesisSpec = nil
|
||||
}
|
||||
var genesisErr error
|
||||
chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverrideShanghaiTime, tmpdir, logger)
|
||||
chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverrideShanghaiTime, tmpdir, logger, blockWriter)
|
||||
if _, ok := genesisErr.(*chain.ConfigCompatError); genesisErr != nil && !ok {
|
||||
return genesisErr
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ func AnswerGetBlockHeadersQuery(db kv.Tx, query *GetBlockHeadersPacket, blockRea
|
||||
if ancestor == 0 {
|
||||
unknown = true
|
||||
} else {
|
||||
query.Origin.Hash, query.Origin.Number = rawdb.ReadAncestor(db, query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical, blockReader)
|
||||
query.Origin.Hash, query.Origin.Number = blockReader.ReadAncestor(db, query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
|
||||
unknown = query.Origin.Hash == libcommon.Hash{}
|
||||
}
|
||||
case hashMode && !query.Reverse:
|
||||
@ -107,7 +107,7 @@ func AnswerGetBlockHeadersQuery(db kv.Tx, query *GetBlockHeadersPacket, blockRea
|
||||
}
|
||||
if header != nil {
|
||||
nextHash := header.Hash()
|
||||
expOldHash, _ := rawdb.ReadAncestor(db, nextHash, next, query.Skip+1, &maxNonCanonical, blockReader)
|
||||
expOldHash, _ := blockReader.ReadAncestor(db, nextHash, next, query.Skip+1, &maxNonCanonical)
|
||||
if expOldHash == query.Origin.Hash {
|
||||
query.Origin.Hash, query.Origin.Number = nextHash, next
|
||||
} else {
|
||||
|
@ -24,7 +24,7 @@ func DefaultStages(ctx context.Context, snapshots SnapshotsCfg, headers HeadersC
|
||||
return nil
|
||||
},
|
||||
Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx, logger log.Logger) error {
|
||||
return SnapshotsPrune(p, snapshots, ctx, tx)
|
||||
return SnapshotsPrune(p, firstCycle, snapshots, ctx, tx)
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -114,7 +114,7 @@ func BodiesForward(
|
||||
// Property of blockchain: same block in different forks will have different hashes.
|
||||
// Means - can mark all canonical blocks as non-canonical on unwind, and
|
||||
// do opposite here - without storing any meta-info.
|
||||
if err := cfg.blockWriter.MakeBodiesCanonical(tx, s.BlockNumber+1, ctx, logPrefix, logEvery); err != nil {
|
||||
if err := cfg.blockWriter.MakeBodiesCanonical(tx, s.BlockNumber+1); err != nil {
|
||||
return fmt.Errorf("make block canonical: %w", err)
|
||||
}
|
||||
|
||||
@ -342,8 +342,7 @@ func UnwindBodiesStage(u *UnwindState, tx kv.RwTx, cfg BodiesCfg, ctx context.Co
|
||||
logEvery := time.NewTicker(logInterval)
|
||||
defer logEvery.Stop()
|
||||
|
||||
badBlock := u.BadBlock != (libcommon.Hash{})
|
||||
if err := cfg.blockWriter.MakeBodiesNonCanonical(tx, u.UnwindPoint+1, badBlock /* deleteBodies */, ctx, u.LogPrefix(), logEvery); err != nil {
|
||||
if err := cfg.blockWriter.MakeBodiesNonCanonical(tx, u.UnwindPoint+1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -2,12 +2,14 @@ package stagedsync_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
libcommon "github.com/ledgerwatch/erigon-lib/common"
|
||||
"github.com/ledgerwatch/erigon-lib/common/u256"
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
|
||||
stages2 "github.com/ledgerwatch/erigon/turbo/stages"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -34,50 +36,98 @@ func TestBodiesUnwind(t *testing.T) {
|
||||
defer logEvery.Stop()
|
||||
|
||||
b := &types.RawBody{Transactions: [][]byte{rlpTxn, rlpTxn, rlpTxn}}
|
||||
h := &types.Header{}
|
||||
for i := uint64(1); i <= 10; i++ {
|
||||
_, err = bw.WriteRawBodyIfNotExists(tx, libcommon.Hash{byte(i)}, i, b)
|
||||
h.Number = big.NewInt(int64(i))
|
||||
hash := h.Hash()
|
||||
err = bw.WriteHeader(tx, h)
|
||||
require.NoError(err)
|
||||
err = rawdb.WriteCanonicalHash(tx, libcommon.Hash{byte(i)}, i)
|
||||
err = rawdb.WriteCanonicalHash(tx, hash, i)
|
||||
require.NoError(err)
|
||||
_, err = bw.WriteRawBodyIfNotExists(tx, hash, i, b)
|
||||
require.NoError(err)
|
||||
}
|
||||
{
|
||||
err = bw.MakeBodiesNonCanonical(tx, 5+1, false, m.Ctx, "test", logEvery) // block 5 already canonical, start from next one
|
||||
require.NoError(err)
|
||||
|
||||
err = bw.MakeBodiesCanonical(tx, 1)
|
||||
require.NoError(err)
|
||||
|
||||
{
|
||||
n, err := tx.ReadSequence(kv.EthTx)
|
||||
require.NoError(err)
|
||||
require.Equal(2+5*(3+2), int(n)) // genesis 2 system txs + from 1, 5 block with 3 txn in each
|
||||
require.Equal(2+10*(3+2), int(n)) // genesis 2 system txs + from 1, 10 block with 3 txn in each
|
||||
|
||||
if m.HistoryV3 {
|
||||
lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx)
|
||||
require.NoError(err)
|
||||
require.Equal(10, int(lastBlockNum))
|
||||
require.Equal(1+10*(3+2), int(lastTxNum))
|
||||
}
|
||||
|
||||
err = bw.MakeBodiesNonCanonical(tx, 5+1) // block 5 already canonical, start from next one
|
||||
require.NoError(err)
|
||||
|
||||
n, err = tx.ReadSequence(kv.EthTx)
|
||||
require.NoError(err)
|
||||
require.Equal(2+10*(3+2), int(n)) // genesis 2 system txs + from 1, 5 block with 3 txn in each
|
||||
|
||||
if m.HistoryV3 {
|
||||
lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx)
|
||||
require.NoError(err)
|
||||
require.Equal(5, int(lastBlockNum))
|
||||
require.Equal(1+5*(3+2), int(lastTxNum))
|
||||
}
|
||||
}
|
||||
{
|
||||
err = bw.MakeBodiesCanonical(tx, 5+1, m.Ctx, "test", logEvery) // block 5 already canonical, start from next one
|
||||
require.NoError(err)
|
||||
n, err := tx.ReadSequence(kv.EthTx)
|
||||
require.NoError(err)
|
||||
require.Equal(2+10*(3+2), int(n))
|
||||
|
||||
_, err = bw.WriteRawBodyIfNotExists(tx, libcommon.Hash{11}, 11, b)
|
||||
require.NoError(err)
|
||||
err = rawdb.WriteCanonicalHash(tx, libcommon.Hash{11}, 11)
|
||||
require.NoError(err)
|
||||
|
||||
err = bw.MakeBodiesCanonical(tx, 5+1) // block 5 already canonical, start from next one
|
||||
require.NoError(err)
|
||||
n, err := tx.ReadSequence(kv.EthTx)
|
||||
require.NoError(err)
|
||||
require.Equal(2+11*(3+2), int(n))
|
||||
|
||||
n, err = tx.ReadSequence(kv.EthTx)
|
||||
require.NoError(err)
|
||||
require.Equal(2+11*(3+2), int(n))
|
||||
|
||||
if m.HistoryV3 {
|
||||
lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx)
|
||||
require.NoError(err)
|
||||
require.Equal(11, int(lastBlockNum))
|
||||
require.Equal(1+11*(3+2), int(lastTxNum))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// unwind to block 5, means mark blocks >= 6 as non-canonical
|
||||
err = bw.MakeBodiesNonCanonical(tx, 5+1, false, m.Ctx, "test", logEvery)
|
||||
err = bw.MakeBodiesNonCanonical(tx, 5+1)
|
||||
require.NoError(err)
|
||||
|
||||
n, err := tx.ReadSequence(kv.EthTx)
|
||||
require.NoError(err)
|
||||
require.Equal(2+5*(3+2), int(n)) // from 0, 5 block with 3 txn in each
|
||||
require.Equal(2+11*(3+2), int(n)) // from 0, 5 block with 3 txn in each
|
||||
|
||||
err = bw.MakeBodiesCanonical(tx, 5+1, m.Ctx, "test", logEvery) // block 5 already canonical, start from next one
|
||||
if m.HistoryV3 {
|
||||
lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx)
|
||||
require.NoError(err)
|
||||
require.Equal(5, int(lastBlockNum))
|
||||
require.Equal(1+5*(3+2), int(lastTxNum))
|
||||
}
|
||||
|
||||
err = bw.MakeBodiesCanonical(tx, 5+1) // block 5 already canonical, start from next one
|
||||
require.NoError(err)
|
||||
n, err = tx.ReadSequence(kv.EthTx)
|
||||
require.NoError(err)
|
||||
require.Equal(2+11*(3+2), int(n))
|
||||
|
||||
if m.HistoryV3 {
|
||||
lastBlockNum, lastTxNum, err := rawdbv3.TxNums.Last(tx)
|
||||
require.NoError(err)
|
||||
require.Equal(11, int(lastBlockNum))
|
||||
require.Equal(1+11*(3+2), int(lastTxNum))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
|
||||
types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types"
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/erigon/turbo/services"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
|
||||
common2 "github.com/ledgerwatch/erigon/common"
|
||||
@ -22,7 +23,6 @@ import (
|
||||
"github.com/ledgerwatch/erigon/ethdb/cbor"
|
||||
"github.com/ledgerwatch/erigon/params"
|
||||
"github.com/ledgerwatch/erigon/turbo/engineapi"
|
||||
"github.com/ledgerwatch/erigon/turbo/services"
|
||||
)
|
||||
|
||||
type FinishCfg struct {
|
||||
|
@ -1030,7 +1030,7 @@ func HeadersUnwind(u *UnwindState, s *StageState, tx kv.RwTx, cfg HeadersCfg, te
|
||||
return fmt.Errorf("iterate over headers to mark bad headers: %w", err)
|
||||
}
|
||||
}
|
||||
if err := rawdb.TruncateCanonicalHash(tx, u.UnwindPoint+1, false /* deleteHeaders */); err != nil {
|
||||
if err := rawdb.TruncateCanonicalHash(tx, u.UnwindPoint+1, badBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if badBlock {
|
||||
|
@ -158,21 +158,18 @@ func TestSenders(t *testing.T) {
|
||||
}
|
||||
|
||||
{
|
||||
if br.TxsV3Enabled() {
|
||||
c, _ := tx.Cursor(kv.EthTx)
|
||||
cnt, _ := c.Count()
|
||||
assert.Equal(t, 5, int(cnt))
|
||||
} else {
|
||||
txs, err := rawdb.CanonicalTransactions(tx, 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(txs))
|
||||
txs, err = rawdb.CanonicalTransactions(tx, 5, 3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(txs))
|
||||
txs, err = rawdb.CanonicalTransactions(tx, 5, 1024)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(txs))
|
||||
}
|
||||
c, _ := tx.Cursor(kv.EthTx)
|
||||
cnt, _ := c.Count()
|
||||
assert.Equal(t, 5, int(cnt))
|
||||
|
||||
txs, err := rawdb.CanonicalTransactions(tx, 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(txs))
|
||||
txs, err = rawdb.CanonicalTransactions(tx, 5, 3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(txs))
|
||||
txs, err = rawdb.CanonicalTransactions(tx, 5, 1024)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(txs))
|
||||
}
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ func calculateTime(amountLeft, rate uint64) string {
|
||||
/* ====== PRUNING ====== */
|
||||
// snapshots pruning sections works more as a retiring of blocks
|
||||
// retiring blocks means moving block data from db into snapshots
|
||||
func SnapshotsPrune(s *PruneState, cfg SnapshotsCfg, ctx context.Context, tx kv.RwTx) (err error) {
|
||||
func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx context.Context, tx kv.RwTx) (err error) {
|
||||
useExternalTx := tx != nil
|
||||
if !useExternalTx {
|
||||
tx, err = cfg.db.BeginRw(ctx)
|
||||
|
2
go.mod
2
go.mod
@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/ledgerwatch/erigon-lib v0.0.0-20230603004413-000eeb04faea
|
||||
github.com/ledgerwatch/erigon-lib v0.0.0-20230603101257-079c50c8e7e9
|
||||
github.com/ledgerwatch/erigon-snapshot v1.2.1-0.20230605042354-196538d42475
|
||||
github.com/ledgerwatch/log/v3 v3.8.0
|
||||
github.com/ledgerwatch/secp256k1 v1.0.0
|
||||
|
4
go.sum
4
go.sum
@ -445,8 +445,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/ledgerwatch/erigon-lib v0.0.0-20230603004413-000eeb04faea h1:/e+68/pCCffOumNnh2IiIWFKrahLD+/D223QtB+Kfe8=
|
||||
github.com/ledgerwatch/erigon-lib v0.0.0-20230603004413-000eeb04faea/go.mod h1:FhbowXTrC1rT1Les246ls279E7EtF05T/3AWbKN3oI8=
|
||||
github.com/ledgerwatch/erigon-lib v0.0.0-20230603101257-079c50c8e7e9 h1:1rVUe0LKXOlxQBZWqukakqmEmhdmyT+OmPHb1EHLKh8=
|
||||
github.com/ledgerwatch/erigon-lib v0.0.0-20230603101257-079c50c8e7e9/go.mod h1:FhbowXTrC1rT1Les246ls279E7EtF05T/3AWbKN3oI8=
|
||||
github.com/ledgerwatch/erigon-snapshot v1.2.0 h1:Pf6eu5XqB29Mlg3oY9zxZ8qenSi2azgcwuNRDvV2rAM=
|
||||
github.com/ledgerwatch/erigon-snapshot v1.2.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo=
|
||||
github.com/ledgerwatch/erigon-snapshot v1.2.1-0.20230605042354-196538d42475 h1:1BvWA6agTUS4RZUHx79f45HpvelMVv4iEddaURUYcC8=
|
||||
|
@ -34,6 +34,7 @@ var migrations = map[kv.Label][]Migration{
|
||||
kv.ChainDB: {
|
||||
dbSchemaVersion5,
|
||||
TxsBeginEnd,
|
||||
TxsV3,
|
||||
},
|
||||
kv.TxPoolDB: {},
|
||||
kv.SentryDB: {},
|
||||
|
91
migrations/txs_v3.go
Normal file
91
migrations/txs_v3.go
Normal file
@ -0,0 +1,91 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
common2 "github.com/ledgerwatch/erigon-lib/common"
|
||||
"github.com/ledgerwatch/erigon-lib/common/datadir"
|
||||
"github.com/ledgerwatch/erigon-lib/common/dbg"
|
||||
"github.com/ledgerwatch/erigon-lib/common/hexutility"
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
|
||||
"github.com/ledgerwatch/erigon/common"
|
||||
"github.com/ledgerwatch/erigon/core/rawdb"
|
||||
)
|
||||
|
||||
var TxsV3 = Migration{
|
||||
Name: "txs_v3",
|
||||
Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback, logger log.Logger) (err error) {
|
||||
logEvery := time.NewTicker(10 * time.Second)
|
||||
defer logEvery.Stop()
|
||||
|
||||
// in TxsV3 format canonical and non-canonical blocks are stored in the same table: kv.EthTx
|
||||
|
||||
txIDBytes := make([]byte, 8)
|
||||
|
||||
// just delete all non-canonical blocks.
|
||||
if err := db.Update(context.Background(), func(tx kv.RwTx) error {
|
||||
from := hexutility.EncodeTs(1) // protect genesis
|
||||
if err := tx.ForEach(kv.BlockBody, from, func(k, _ []byte) error {
|
||||
blockNum := binary.BigEndian.Uint64(k)
|
||||
select {
|
||||
case <-logEvery.C:
|
||||
var m runtime.MemStats
|
||||
dbg.ReadMemStats(&m)
|
||||
logger.Info("[txs_v3] Migration progress", "block_num", blockNum,
|
||||
"alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys))
|
||||
default:
|
||||
}
|
||||
|
||||
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockNum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isCanonical := bytes.Equal(k[8:], canonicalHash[:])
|
||||
if isCanonical {
|
||||
return nil // skip
|
||||
}
|
||||
b, err := rawdb.ReadBodyForStorageByKey(tx, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b == nil {
|
||||
log.Debug("PruneBlocks: block body not found", "height", blockNum)
|
||||
} else {
|
||||
for txID := b.BaseTxId; txID < b.BaseTxId+uint64(b.TxAmount); txID++ {
|
||||
binary.BigEndian.PutUint64(txIDBytes, txID)
|
||||
if err = tx.Delete(kv.NonCanonicalTxs, txIDBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Copying k because otherwise the same memory will be reused
|
||||
// for the next key and Delete below will end up deleting 1 more record than required
|
||||
kCopy := common.CopyBytes(k)
|
||||
if err = tx.Delete(kv.Senders, kCopy); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.Delete(kv.BlockBody, kCopy); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.Delete(kv.Headers, kCopy); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This migration is no-op, but it forces the migration mechanism to apply it and thus write the DB schema version info
|
||||
return BeforeCommit(tx, nil, true)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
189
migrations/txs_v3_test.go
Normal file
189
migrations/txs_v3_test.go
Normal file
@ -0,0 +1,189 @@
|
||||
package migrations_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
libcommon "github.com/ledgerwatch/erigon-lib/common"
|
||||
"github.com/ledgerwatch/erigon-lib/common/hexutility"
|
||||
"github.com/ledgerwatch/erigon-lib/common/u256"
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/erigon-lib/kv/memdb"
|
||||
"github.com/ledgerwatch/erigon/common/dbutils"
|
||||
"github.com/ledgerwatch/erigon/migrations"
|
||||
"github.com/ledgerwatch/erigon/rlp"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ledgerwatch/erigon/core/rawdb"
|
||||
"github.com/ledgerwatch/erigon/core/types"
|
||||
)
|
||||
|
||||
func TestTxsV3(t *testing.T) {
|
||||
require, tmpDir, db := require.New(t), t.TempDir(), memdb.NewTestDB(t)
|
||||
txn := &types.DynamicFeeTransaction{Tip: u256.N1, FeeCap: u256.N1, CommonTx: types.CommonTx{ChainID: u256.N1, Value: u256.N1, Gas: 1, Nonce: 1}}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
err := txn.MarshalBinary(buf)
|
||||
require.NoError(err)
|
||||
rlpTxn := buf.Bytes()
|
||||
logEvery := time.NewTicker(10 * time.Second)
|
||||
defer logEvery.Stop()
|
||||
|
||||
b := &types.RawBody{Transactions: [][]byte{rlpTxn, rlpTxn, rlpTxn}}
|
||||
err = db.Update(context.Background(), func(tx kv.RwTx) error {
|
||||
for i := uint64(1); i < 10; i++ {
|
||||
h := &types.Header{Number: big.NewInt(int64(i)), Extra: []byte("fork1")}
|
||||
hash := h.Hash()
|
||||
err = rawdb.WriteCanonicalHash(tx, hash, i)
|
||||
require.NoError(err)
|
||||
err = rawdb.WriteHeader(tx, h)
|
||||
require.NoError(err)
|
||||
_, err = rawdb.WriteRawBody(tx, hash, i, b)
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
err = makeBodiesNonCanonicalDeprecated(tx, 7, context.Background(), "", logEvery)
|
||||
require.NoError(err)
|
||||
|
||||
for i := uint64(7); i < 10; i++ {
|
||||
require.NoError(err)
|
||||
h := &types.Header{Number: big.NewInt(int64(i)), Extra: []byte("fork2")}
|
||||
hash := h.Hash()
|
||||
err = rawdb.WriteCanonicalHash(tx, hash, i)
|
||||
require.NoError(err)
|
||||
err = rawdb.WriteHeader(tx, h)
|
||||
require.NoError(err)
|
||||
_, err = rawdb.WriteRawBody(tx, hash, i, b)
|
||||
require.NoError(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(err)
|
||||
|
||||
migrator := migrations.NewMigrator(kv.ChainDB)
|
||||
migrator.Migrations = []migrations.Migration{migrations.TxsV3}
|
||||
logger := log.New()
|
||||
err = migrator.Apply(db, tmpDir, logger)
|
||||
require.NoError(err)
|
||||
|
||||
//err = db.View(context.Background(), func(tx kv.Tx) error {
|
||||
// v, err := tx.ReadSequence(kv.EthTx)
|
||||
// require.NoError(err)
|
||||
// require.Equal(uint64(3*10+2*10), v)
|
||||
// return nil
|
||||
//})
|
||||
//require.NoError(err)
|
||||
|
||||
err = db.View(context.Background(), func(tx kv.Tx) error {
|
||||
for i := uint64(7); i < 10; i++ {
|
||||
h := &types.Header{Number: big.NewInt(int64(i)), Extra: []byte("fork1")}
|
||||
hash := h.Hash()
|
||||
|
||||
has, err := tx.Has(kv.BlockBody, dbutils.BlockBodyKey(i, hash))
|
||||
require.NoError(err)
|
||||
require.False(has)
|
||||
}
|
||||
|
||||
c, err := tx.Cursor(kv.NonCanonicalTxs)
|
||||
require.NoError(err)
|
||||
cnt, err := c.Count()
|
||||
require.NoError(err)
|
||||
require.Zero(cnt)
|
||||
|
||||
has, err := tx.Has(kv.EthTx, hexutility.EncodeTs(0))
|
||||
require.NoError(err)
|
||||
require.False(has)
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
func makeBodiesNonCanonicalDeprecated(tx kv.RwTx, from uint64, ctx context.Context, logPrefix string, logEvery *time.Ticker) error {
|
||||
var firstMovedTxnID uint64
|
||||
var firstMovedTxnIDIsSet bool
|
||||
for blockNum := from; ; blockNum++ {
|
||||
h, err := rawdb.ReadCanonicalHash(tx, blockNum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if h == (libcommon.Hash{}) {
|
||||
break
|
||||
}
|
||||
data := rawdb.ReadStorageBodyRLP(tx, h, blockNum)
|
||||
if len(data) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
bodyForStorage := new(types.BodyForStorage)
|
||||
if err := rlp.DecodeBytes(data, bodyForStorage); err != nil {
|
||||
return err
|
||||
}
|
||||
if !firstMovedTxnIDIsSet {
|
||||
firstMovedTxnIDIsSet = true
|
||||
firstMovedTxnID = bodyForStorage.BaseTxId
|
||||
}
|
||||
|
||||
newBaseId := uint64(0)
|
||||
|
||||
// move txs to NonCanonical bucket, it has own sequence
|
||||
newBaseId, err = tx.IncrementSequence(kv.NonCanonicalTxs, uint64(bodyForStorage.TxAmount))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// next loop does move only non-system txs. need move system-txs manually (because they may not exist)
|
||||
i := uint64(0)
|
||||
if err := tx.ForAmount(kv.EthTx, hexutility.EncodeTs(bodyForStorage.BaseTxId+1), bodyForStorage.TxAmount-2, func(k, v []byte) error {
|
||||
id := newBaseId + 1 + i
|
||||
if err := tx.Put(kv.NonCanonicalTxs, hexutility.EncodeTs(id), v); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.Delete(kv.EthTx, k); err != nil {
|
||||
return err
|
||||
}
|
||||
i++
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bodyForStorage.BaseTxId = newBaseId
|
||||
if err := rawdb.WriteBodyForStorage(tx, h, blockNum, bodyForStorage); err != nil {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-logEvery.C:
|
||||
log.Info(fmt.Sprintf("[%s] Unwinding transactions...", logPrefix), "current block", blockNum)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// EthTx must have canonical id's - means need decrement it's sequence on unwind
|
||||
if firstMovedTxnIDIsSet {
|
||||
c, err := tx.Cursor(kv.EthTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k, _, err := c.Last()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if k != nil && binary.BigEndian.Uint64(k) >= firstMovedTxnID {
|
||||
panic(fmt.Sprintf("must not happen, ResetSequence: %d, lastInDB: %d", firstMovedTxnID, binary.BigEndian.Uint64(k)))
|
||||
}
|
||||
|
||||
if err := rawdb.ResetSequence(tx, kv.EthTx, firstMovedTxnID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -25,10 +25,12 @@ type HeaderReader interface {
|
||||
Header(ctx context.Context, tx kv.Getter, hash common.Hash, blockNum uint64) (*types.Header, error)
|
||||
HeaderByNumber(ctx context.Context, tx kv.Getter, blockNum uint64) (*types.Header, error)
|
||||
HeaderByHash(ctx context.Context, tx kv.Getter, hash common.Hash) (*types.Header, error)
|
||||
ReadAncestor(db kv.Getter, hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64)
|
||||
}
|
||||
|
||||
type CanonicalReader interface {
|
||||
CanonicalHash(ctx context.Context, tx kv.Getter, blockNum uint64) (common.Hash, error)
|
||||
BadHeaderNumber(ctx context.Context, tx kv.Getter, hash common.Hash) (blockHeight *uint64, err error)
|
||||
}
|
||||
|
||||
type BodyReader interface {
|
||||
@ -61,7 +63,6 @@ type FullBlockReader interface {
|
||||
TxnReader
|
||||
CanonicalReader
|
||||
|
||||
TxsV3Enabled() bool
|
||||
Snapshots() BlockSnapshots
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,14 @@ func (r *RemoteBlockReader) CurrentBlock(db kv.Tx) (*types.Block, error) {
|
||||
func (r *RemoteBlockReader) RawTransactions(ctx context.Context, tx kv.Getter, fromBlock, toBlock uint64) (txs [][]byte, err error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
func (r *RemoteBlockReader) ReadAncestor(db kv.Getter, hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (r *RemoteBlockReader) BadHeaderNumber(ctx context.Context, tx kv.Getter, hash common.Hash) (blockHeight *uint64, err error) {
|
||||
return rawdb.ReadBadHeaderNumber(tx, hash)
|
||||
}
|
||||
|
||||
func (r *RemoteBlockReader) BlockByNumber(ctx context.Context, db kv.Tx, number uint64) (*types.Block, error) {
|
||||
hash, err := r.CanonicalHash(ctx, db, number)
|
||||
if err != nil {
|
||||
@ -87,9 +95,6 @@ func (r *RemoteBlockReader) CanonicalHash(ctx context.Context, tx kv.Getter, blo
|
||||
func NewRemoteBlockReader(client remote.ETHBACKENDClient) *RemoteBlockReader {
|
||||
return &RemoteBlockReader{client}
|
||||
}
|
||||
func (r *RemoteBlockReader) TxsV3Enabled() bool {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (r *RemoteBlockReader) TxnLookup(ctx context.Context, tx kv.Getter, txnHash common.Hash) (uint64, bool, error) {
|
||||
reply, err := r.client.TxnLookup(ctx, &remote.TxnLookupRequest{TxnHash: gointerfaces.ConvertHashToH256(txnHash)})
|
||||
@ -126,6 +131,7 @@ func (r *RemoteBlockReader) TxnByIdxInBlock(ctx context.Context, tx kv.Getter, b
|
||||
func (r *RemoteBlockReader) HasSenders(ctx context.Context, _ kv.Getter, hash common.Hash, blockHeight uint64) (bool, error) {
|
||||
panic("HasSenders is low-level method, don't use it in RPCDaemon")
|
||||
}
|
||||
|
||||
func (r *RemoteBlockReader) BlockWithSenders(ctx context.Context, _ kv.Getter, hash common.Hash, blockHeight uint64) (block *types.Block, senders []common.Address, err error) {
|
||||
reply, err := r.client.Block(ctx, &remote.BlockRequest{BlockHash: gointerfaces.ConvertHashToH256(hash), BlockHeight: blockHeight})
|
||||
if err != nil {
|
||||
@ -197,7 +203,7 @@ type BlockReader struct {
|
||||
}
|
||||
|
||||
func NewBlockReader(snapshots services.BlockSnapshots) *BlockReader {
|
||||
return &BlockReader{sn: snapshots.(*RoSnapshots), TransactionsV3: false}
|
||||
return &BlockReader{sn: snapshots.(*RoSnapshots), TransactionsV3: true}
|
||||
}
|
||||
|
||||
func (r *BlockReader) Snapshots() services.BlockSnapshots { return r.sn }
|
||||
@ -396,38 +402,21 @@ func (r *BlockReader) BlockWithSenders(ctx context.Context, tx kv.Getter, hash c
|
||||
func (r *BlockReader) blockWithSenders(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64, forceCanonical bool) (block *types.Block, senders []common.Address, err error) {
|
||||
blocksAvailable := r.sn.BlocksAvailable()
|
||||
if blocksAvailable == 0 || blockHeight > blocksAvailable {
|
||||
if r.TransactionsV3 {
|
||||
if forceCanonical {
|
||||
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockHeight)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("requested non-canonical hash %x. canonical=%x", hash, canonicalHash)
|
||||
}
|
||||
if canonicalHash != hash {
|
||||
return nil, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
block, senders, err = rawdb.ReadBlockWithSenders(tx, hash, blockHeight)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return block, senders, nil
|
||||
}
|
||||
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockHeight)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("requested non-canonical hash %x. canonical=%x", hash, canonicalHash)
|
||||
}
|
||||
if canonicalHash == hash {
|
||||
block, senders, err = rawdb.ReadBlockWithSenders(tx, hash, blockHeight)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return block, senders, nil
|
||||
}
|
||||
if forceCanonical {
|
||||
canonicalHash, err := rawdb.ReadCanonicalHash(tx, blockHeight)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("requested non-canonical hash %x. canonical=%x", hash, canonicalHash)
|
||||
}
|
||||
if canonicalHash != hash {
|
||||
return nil, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
block, senders, err = rawdb.ReadBlockWithSenders(tx, hash, blockHeight)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return rawdb.NonCanonicalBlockWithSenders(tx, hash, blockHeight)
|
||||
return block, senders, nil
|
||||
}
|
||||
|
||||
view := r.sn.View()
|
||||
@ -714,7 +703,7 @@ func (r *BlockReader) TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNu
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
txn, err = rawdb.CanonicalTxnByID(tx, b.BaseTxId+1+uint64(i), canonicalHash)
|
||||
txn, err = rawdb.CanonicalTxnByID(tx, b.BaseTxId+1+uint64(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -813,7 +802,9 @@ func (r *BlockReader) IterateFrozenBodies(f func(blockNum, baseTxNum, txAmount u
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (r *BlockReader) TxsV3Enabled() bool { return r.TransactionsV3 }
|
||||
func (r *BlockReader) BadHeaderNumber(ctx context.Context, tx kv.Getter, hash common.Hash) (blockHeight *uint64, err error) {
|
||||
return rawdb.ReadBadHeaderNumber(tx, hash)
|
||||
}
|
||||
func (r *BlockReader) BlockByNumber(ctx context.Context, db kv.Tx, number uint64) (*types.Block, error) {
|
||||
hash, err := rawdb.ReadCanonicalHash(db, number)
|
||||
if err != nil {
|
||||
@ -845,3 +836,54 @@ func (r *BlockReader) CurrentBlock(db kv.Tx) (*types.Block, error) {
|
||||
func (r *BlockReader) RawTransactions(ctx context.Context, tx kv.Getter, fromBlock, toBlock uint64) (txs [][]byte, err error) {
|
||||
return rawdb.RawTransactionsRange(tx, fromBlock, toBlock)
|
||||
}
|
||||
func (r *BlockReader) ReadAncestor(db kv.Getter, hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
|
||||
if ancestor > number {
|
||||
return common.Hash{}, 0
|
||||
}
|
||||
if ancestor == 1 {
|
||||
header, err := r.Header(context.Background(), db, hash, number)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// in this case it is cheaper to just read the header
|
||||
if header != nil {
|
||||
return header.ParentHash, number - 1
|
||||
}
|
||||
return common.Hash{}, 0
|
||||
}
|
||||
for ancestor != 0 {
|
||||
h, err := r.CanonicalHash(context.Background(), db, number)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if h == hash {
|
||||
ancestorHash, err := r.CanonicalHash(context.Background(), db, number-ancestor)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
h, err := r.CanonicalHash(context.Background(), db, number)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if h == hash {
|
||||
number -= ancestor
|
||||
return ancestorHash, number
|
||||
}
|
||||
}
|
||||
if *maxNonCanonical == 0 {
|
||||
return common.Hash{}, 0
|
||||
}
|
||||
*maxNonCanonical--
|
||||
ancestor--
|
||||
header, err := r.Header(context.Background(), db, hash, number)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if header == nil {
|
||||
return common.Hash{}, 0
|
||||
}
|
||||
hash = header.ParentHash
|
||||
number--
|
||||
}
|
||||
return hash, number
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user