erigon-pulse/core/rawdb/accessors_chain.go

1453 lines
43 KiB
Go

// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"math"
"math/big"
"time"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces"
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/dbutils"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/ethdb/cbor"
"github.com/ledgerwatch/erigon/rlp"
)
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
func ReadCanonicalHash(db kv.Getter, number uint64) (common.Hash, error) {
data, err := db.GetOne(kv.HeaderCanonical, dbutils.EncodeBlockNumber(number))
if err != nil {
return common.Hash{}, fmt.Errorf("failed ReadCanonicalHash: %w, number=%d", err, number)
}
if len(data) == 0 {
return common.Hash{}, nil
}
return common.BytesToHash(data), nil
}
// WriteCanonicalHash stores the hash assigned to a canonical block number.
func WriteCanonicalHash(db kv.Putter, hash common.Hash, number uint64) error {
if err := db.Put(kv.HeaderCanonical, dbutils.EncodeBlockNumber(number), hash.Bytes()); err != nil {
return fmt.Errorf("failed to store number to hash mapping: %w", err)
}
return nil
}
// DeleteCanonicalHash removes the number to hash canonical mapping.
func DeleteCanonicalHash(db kv.Deleter, number uint64) error {
if err := db.Delete(kv.HeaderCanonical, dbutils.EncodeBlockNumber(number), nil); err != nil {
return fmt.Errorf("failed to delete number to hash mapping: %w", err)
}
return nil
}
// ReadHeaderNumber returns the header number assigned to a hash.
func ReadHeaderNumber(db kv.Getter, hash common.Hash) *uint64 {
data, err := db.GetOne(kv.HeaderNumber, hash.Bytes())
if err != nil {
log.Error("ReadHeaderNumber failed", "err", err)
}
if len(data) == 0 {
return nil
}
if len(data) != 8 {
log.Error("ReadHeaderNumber got wrong data len", "len", len(data))
return nil
}
number := binary.BigEndian.Uint64(data)
return &number
}
// WriteHeaderNumber stores the hash->number mapping.
func WriteHeaderNumber(db kv.Putter, hash common.Hash, number uint64) error {
if err := db.Put(kv.HeaderNumber, hash[:], dbutils.EncodeBlockNumber(number)); err != nil {
return err
}
return nil
}
// DeleteHeaderNumber removes hash->number mapping.
func DeleteHeaderNumber(db kv.Deleter, hash common.Hash) {
if err := db.Delete(kv.HeaderNumber, hash[:], nil); err != nil {
log.Crit("Failed to delete hash to number mapping", "err", err)
}
}
// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
func ReadHeadHeaderHash(db kv.Getter) common.Hash {
data, err := db.GetOne(kv.HeadHeaderKey, []byte(kv.HeadHeaderKey))
if err != nil {
log.Error("ReadHeadHeaderHash failed", "err", err)
}
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// WriteHeadHeaderHash stores the hash of the current canonical head header.
func WriteHeadHeaderHash(db kv.Putter, hash common.Hash) error {
if err := db.Put(kv.HeadHeaderKey, []byte(kv.HeadHeaderKey), hash.Bytes()); err != nil {
return fmt.Errorf("failed to store last header's hash: %w", err)
}
return nil
}
// ReadHeadBlockHash retrieves the hash of the current canonical head block.
func ReadHeadBlockHash(db kv.Getter) common.Hash {
data, err := db.GetOne(kv.HeadBlockKey, []byte(kv.HeadBlockKey))
if err != nil {
log.Error("ReadHeadBlockHash failed", "err", err)
}
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// WriteHeadBlockHash stores the head block's hash.
func WriteHeadBlockHash(db kv.Putter, hash common.Hash) {
if err := db.Put(kv.HeadBlockKey, []byte(kv.HeadBlockKey), hash.Bytes()); err != nil {
log.Crit("Failed to store last block's hash", "err", err)
}
}
// ReadForkchoiceHead retrieves headBlockHash from the last Engine API forkChoiceUpdated.
func ReadForkchoiceHead(db kv.Getter) common.Hash {
data, err := db.GetOne(kv.LastForkchoice, []byte("headBlockHash"))
if err != nil {
log.Error("ReadForkchoiceHead failed", "err", err)
}
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// WriteForkchoiceHead stores headBlockHash from the last Engine API forkChoiceUpdated.
func WriteForkchoiceHead(db kv.Putter, hash common.Hash) {
if err := db.Put(kv.LastForkchoice, []byte("headBlockHash"), hash.Bytes()); err != nil {
log.Crit("Failed to store last headBlockHash", "err", err)
}
}
// ReadForkchoiceSafe retrieves safeBlockHash from the last Engine API forkChoiceUpdated.
func ReadForkchoiceSafe(db kv.Getter) common.Hash {
data, err := db.GetOne(kv.LastForkchoice, []byte("safeBlockHash"))
if err != nil {
log.Error("ReadForkchoiceSafe failed", "err", err)
}
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// WriteForkchoiceSafe stores safeBlockHash from the last Engine API forkChoiceUpdated.
func WriteForkchoiceSafe(db kv.Putter, hash common.Hash) {
if err := db.Put(kv.LastForkchoice, []byte("safeBlockHash"), hash.Bytes()); err != nil {
log.Crit("Failed to store last safeBlockHash", "err", err)
}
}
// ReadForkchoiceFinalized retrieves finalizedBlockHash from the last Engine API forkChoiceUpdated.
func ReadForkchoiceFinalized(db kv.Getter) common.Hash {
data, err := db.GetOne(kv.LastForkchoice, []byte("finalizedBlockHash"))
if err != nil {
log.Error("ReadForkchoiceFinalized failed", "err", err)
}
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// WriteForkchoiceFinalized stores finalizedBlockHash from the last Engine API forkChoiceUpdated.
func WriteForkchoiceFinalized(db kv.Putter, hash common.Hash) {
if err := db.Put(kv.LastForkchoice, []byte("finalizedBlockHash"), hash.Bytes()); err != nil {
log.Crit("Failed to store last finalizedBlockHash", "err", err)
}
}
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db kv.Getter, hash common.Hash, number uint64) rlp.RawValue {
data, err := db.GetOne(kv.Headers, dbutils.HeaderKey(number, hash))
if err != nil {
log.Error("ReadHeaderRLP failed", "err", err)
}
return data
}
// HasHeader verifies the existence of a block header corresponding to the hash.
func HasHeader(db kv.Has, hash common.Hash, number uint64) bool {
if has, err := db.Has(kv.Headers, dbutils.HeaderKey(number, hash)); !has || err != nil {
return false
}
return true
}
// ReadHeader retrieves the block header corresponding to the hash.
func ReadHeader(db kv.Getter, hash common.Hash, number uint64) *types.Header {
data := ReadHeaderRLP(db, hash, number)
if len(data) == 0 {
return nil
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
log.Error("Invalid block header RLP", "hash", hash, "err", err)
return nil
}
return header
}
func ReadCurrentBlockNumber(db kv.Getter) *uint64 {
headHash := ReadHeadHeaderHash(db)
return ReadHeaderNumber(db, headHash)
}
func ReadCurrentHeader(db kv.Getter) *types.Header {
headHash := ReadHeadHeaderHash(db)
headNumber := ReadHeaderNumber(db, headHash)
if headNumber == nil {
return nil
}
return ReadHeader(db, headHash, *headNumber)
}
func ReadCurrentBlock(db kv.Tx) *types.Block {
headHash := ReadHeadBlockHash(db)
headNumber := ReadHeaderNumber(db, headHash)
if headNumber == nil {
return nil
}
return ReadBlock(db, headHash, *headNumber)
}
func ReadHeadersByNumber(db kv.Tx, number uint64) ([]*types.Header, error) {
var res []*types.Header
c, err := db.Cursor(kv.Headers)
if err != nil {
return nil, err
}
defer c.Close()
prefix := dbutils.EncodeBlockNumber(number)
for k, v, err := c.Seek(prefix); k != nil; k, v, err = c.Next() {
if err != nil {
return nil, err
}
if !bytes.HasPrefix(k, prefix) {
break
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(v), header); err != nil {
return nil, fmt.Errorf("invalid block header RLP: hash=%x, err=%w", k[8:], err)
}
res = append(res, header)
}
return res, nil
}
// WriteHeader stores a block header into the database and also stores the hash-
// to-number mapping.
func WriteHeader(db kv.Putter, header *types.Header) {
var (
hash = header.Hash()
number = header.Number.Uint64()
encoded = dbutils.EncodeBlockNumber(number)
)
if err := db.Put(kv.HeaderNumber, hash[:], encoded); err != nil {
log.Crit("Failed to store hash to number mapping", "err", err)
}
// Write the encoded header
data, err := rlp.EncodeToBytes(header)
if err != nil {
log.Crit("Failed to RLP encode header", "err", err)
}
if err := db.Put(kv.Headers, dbutils.HeaderKey(number, hash), data); err != nil {
log.Crit("Failed to store header", "err", err)
}
}
// DeleteHeader removes all block header data associated with a hash.
func DeleteHeader(db kv.Deleter, hash common.Hash, number uint64) {
if err := db.Delete(kv.Headers, dbutils.HeaderKey(number, hash), nil); err != nil {
log.Crit("Failed to delete header", "err", err)
}
if err := db.Delete(kv.HeaderNumber, hash.Bytes(), nil); err != nil {
log.Crit("Failed to delete hash to number mapping", "err", err)
}
}
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func ReadBodyRLP(db kv.Tx, hash common.Hash, number uint64) rlp.RawValue {
body := ReadCanonicalBodyWithTransactions(db, hash, number)
bodyRlp, err := rlp.EncodeToBytes(body)
if err != nil {
log.Error("ReadBodyRLP failed", "err", err)
}
return bodyRlp
}
func NonCanonicalBodyRLP(db kv.Tx, hash common.Hash, number uint64) rlp.RawValue {
body := NonCanonicalBodyWithTransactions(db, hash, number)
bodyRlp, err := rlp.EncodeToBytes(body)
if err != nil {
log.Error("ReadBodyRLP failed", "err", err)
}
return bodyRlp
}
func ReadStorageBodyRLP(db kv.Getter, hash common.Hash, number uint64) rlp.RawValue {
bodyRlp, err := db.GetOne(kv.BlockBody, dbutils.BlockBodyKey(number, hash))
if err != nil {
log.Error("ReadBodyRLP failed", "err", err)
}
return bodyRlp
}
func ReadStorageBody(db kv.Getter, hash common.Hash, number uint64) (types.BodyForStorage, error) {
bodyRlp, err := db.GetOne(kv.BlockBody, dbutils.BlockBodyKey(number, hash))
if err != nil {
log.Error("ReadBodyRLP failed", "err", err)
}
bodyForStorage := new(types.BodyForStorage)
if err := rlp.DecodeBytes(bodyRlp, bodyForStorage); err != nil {
return types.BodyForStorage{}, err
}
return *bodyForStorage, nil
}
func CanonicalTransactions(db kv.Getter, baseTxId uint64, amount uint32) ([]types.Transaction, error) {
if amount == 0 {
return []types.Transaction{}, nil
}
txIdKey := make([]byte, 8)
reader := bytes.NewReader(nil)
stream := rlp.NewStream(reader, 0)
txs := make([]types.Transaction, amount)
binary.BigEndian.PutUint64(txIdKey, baseTxId)
i := uint32(0)
if err := db.ForAmount(kv.EthTx, txIdKey, amount, func(k, v []byte) error {
var decodeErr error
reader.Reset(v)
stream.Reset(reader, 0)
if txs[i], decodeErr = types.DecodeTransaction(stream); decodeErr != nil {
return decodeErr
}
i++
return nil
}); err != nil {
return nil, err
}
txs = txs[:i] // user may request big "amount", but db can return small "amount". Return as much as we found.
return txs, nil
}
func NonCanonicalTransactions(db kv.Getter, baseTxId uint64, amount uint32) ([]types.Transaction, error) {
if amount == 0 {
return []types.Transaction{}, nil
}
txIdKey := make([]byte, 8)
reader := bytes.NewReader(nil)
stream := rlp.NewStream(reader, 0)
txs := make([]types.Transaction, amount)
binary.BigEndian.PutUint64(txIdKey, baseTxId)
i := uint32(0)
if err := db.ForAmount(kv.EthTx, txIdKey, amount, func(k, v []byte) error {
var decodeErr error
reader.Reset(v)
stream.Reset(reader, 0)
if txs[i], decodeErr = types.DecodeTransaction(stream); decodeErr != nil {
return decodeErr
}
i++
return nil
}); err != nil {
return nil, err
}
txs = txs[:i] // user may request big "amount", but db can return small "amount". Return as much as we found.
return txs, nil
}
func WriteTransactions(db kv.RwTx, txs []types.Transaction, baseTxId uint64) error {
txId := baseTxId
buf := bytes.NewBuffer(nil)
for _, tx := range txs {
txIdKey := make([]byte, 8)
binary.BigEndian.PutUint64(txIdKey, txId)
txId++
buf.Reset()
if err := rlp.Encode(buf, tx); err != nil {
return fmt.Errorf("broken tx rlp: %w", err)
}
// If next Append returns KeyExists error - it means you need to open transaction in App code before calling this func. Batch is also fine.
if err := db.Append(kv.EthTx, txIdKey, common.CopyBytes(buf.Bytes())); err != nil {
return err
}
}
return nil
}
func WriteRawTransactions(db kv.RwTx, txs [][]byte, baseTxId uint64) error {
txId := baseTxId
for _, tx := range txs {
txIdKey := make([]byte, 8)
binary.BigEndian.PutUint64(txIdKey, txId)
// If next Append returns KeyExists error - it means you need to open transaction in App code before calling this func. Batch is also fine.
if err := db.Append(kv.EthTx, txIdKey, tx); err != nil {
c, err := db.Cursor(kv.EthTx)
if err != nil {
kk, _, _ := c.Last()
c.Close()
return fmt.Errorf("txId=%d, baseTxId=%d, lastInDb=%d, %w", txId, baseTxId, binary.BigEndian.Uint64(kk), err)
}
return err
}
txId++
}
return nil
}
// WriteBodyForStorage stores an RLP encoded block body into the database.
func WriteBodyForStorage(db kv.Putter, hash common.Hash, number uint64, body *types.BodyForStorage) error {
data, err := rlp.EncodeToBytes(body)
if err != nil {
return err
}
return db.Put(kv.BlockBody, dbutils.BlockBodyKey(number, hash), data)
}
// ReadBodyByNumber - returns canonical block body
func ReadBodyByNumber(db kv.Tx, number uint64) (*types.Body, uint64, uint32, error) {
hash, err := ReadCanonicalHash(db, number)
if err != nil {
return nil, 0, 0, fmt.Errorf("failed ReadCanonicalHash: %w", err)
}
if hash == (common.Hash{}) {
return nil, 0, 0, nil
}
body, baseTxId, txAmount := ReadBody(db, hash, number)
return body, baseTxId, txAmount, nil
}
func ReadBodyWithTransactions(db kv.Getter, hash common.Hash, number uint64) (*types.Body, error) {
canonicalHash, err := ReadCanonicalHash(db, number)
if err != nil {
return nil, fmt.Errorf("read canonical hash failed: %d, %w", number, err)
}
if canonicalHash == hash {
return ReadCanonicalBodyWithTransactions(db, hash, number), nil
}
return NonCanonicalBodyWithTransactions(db, hash, number), nil
}
func ReadCanonicalBodyWithTransactions(db kv.Getter, hash common.Hash, number uint64) *types.Body {
body, baseTxId, txAmount := ReadBody(db, hash, number)
if body == nil {
return nil
}
var err error
body.Transactions, err = CanonicalTransactions(db, baseTxId, txAmount)
if err != nil {
log.Error("failed ReadTransactionByHash", "hash", hash, "block", number, "err", err)
return nil
}
return body
}
func NonCanonicalBodyWithTransactions(db kv.Getter, hash common.Hash, number uint64) *types.Body {
body, baseTxId, txAmount := ReadBody(db, hash, number)
if body == nil {
return nil
}
var err error
body.Transactions, err = NonCanonicalTransactions(db, baseTxId, txAmount)
if err != nil {
log.Error("failed ReadTransactionByHash", "hash", hash, "block", number, "err", err)
return nil
}
return body
}
func RawTransactionsRange(db kv.Getter, from, to uint64) (res [][]byte, err error) {
blockKey := make([]byte, dbutils.NumberLength+common.HashLength)
encNum := make([]byte, 8)
for i := from; i < to+1; i++ {
binary.BigEndian.PutUint64(encNum, i)
hash, err := db.GetOne(kv.HeaderCanonical, encNum)
if err != nil {
return nil, err
}
if len(hash) == 0 {
continue
}
binary.BigEndian.PutUint64(blockKey, i)
copy(blockKey[dbutils.NumberLength:], hash)
bodyRlp, err := db.GetOne(kv.BlockBody, blockKey)
if err != nil {
return nil, err
}
if len(bodyRlp) == 0 {
continue
}
baseTxId, txAmount, err := types.DecodeOnlyTxMetadataFromBody(bodyRlp)
if err != nil {
return nil, err
}
binary.BigEndian.PutUint64(encNum, baseTxId)
if err = db.ForAmount(kv.EthTx, encNum, txAmount, func(k, v []byte) error {
res = append(res, v)
return nil
}); err != nil {
return nil, err
}
}
return
}
// ResetSequence - allow set arbitrary value to sequence (for example to decrement it to exact value)
func ResetSequence(tx kv.RwTx, bucket string, newValue uint64) error {
c, err := tx.Cursor(bucket)
if err != nil {
return err
}
k, _, err := c.Last()
if err != nil {
return err
}
if k != nil && binary.BigEndian.Uint64(k) >= newValue {
panic(fmt.Sprintf("must not happen. ResetSequence: %s, %d < lastInDB: %d\n", bucket, newValue, binary.BigEndian.Uint64(k)))
}
newVBytes := make([]byte, 8)
binary.BigEndian.PutUint64(newVBytes, newValue)
if err := tx.Put(kv.Sequence, []byte(bucket), newVBytes); err != nil {
return err
}
return nil
}
func ReadBodyForStorageByKey(db kv.Getter, k []byte) (*types.BodyForStorage, error) {
bodyRlp, err := db.GetOne(kv.BlockBody, k)
if err != nil {
return nil, err
}
if len(bodyRlp) == 0 {
return nil, nil
}
bodyForStorage := new(types.BodyForStorage)
if err := rlp.DecodeBytes(bodyRlp, bodyForStorage); err != nil {
return nil, err
}
return bodyForStorage, nil
}
func ReadBody(db kv.Getter, hash common.Hash, number uint64) (*types.Body, uint64, uint32) {
data := ReadStorageBodyRLP(db, hash, number)
if len(data) == 0 {
return nil, 0, 0
}
bodyForStorage := new(types.BodyForStorage)
err := rlp.DecodeBytes(data, bodyForStorage)
if err != nil {
log.Error("Invalid block body RLP", "hash", hash, "err", err)
return nil, 0, 0
}
body := new(types.Body)
body.Uncles = bodyForStorage.Uncles
if bodyForStorage.TxAmount < 2 {
panic(fmt.Sprintf("block body hash too few txs amount: %d, %d", number, bodyForStorage.TxAmount))
}
return body, bodyForStorage.BaseTxId + 1, bodyForStorage.TxAmount - 2 // 1 system txn in the begining of block, and 1 at the end
}
func ReadSenders(db kv.Getter, hash common.Hash, number uint64) ([]common.Address, error) {
data, err := db.GetOne(kv.Senders, dbutils.BlockBodyKey(number, hash))
if err != nil {
return nil, fmt.Errorf("readSenders failed: %w", err)
}
senders := make([]common.Address, len(data)/common.AddressLength)
for i := 0; i < len(senders); i++ {
copy(senders[i][:], data[i*common.AddressLength:])
}
return senders, nil
}
func WriteRawBodyIfNotExists(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBody) error {
exists, err := db.Has(kv.BlockBody, dbutils.BlockBodyKey(number, hash))
if err != nil {
return err
}
if exists {
return nil
}
return WriteRawBody(db, hash, number, body)
}
func WriteRawBody(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBody) error {
baseTxId, err := db.IncrementSequence(kv.EthTx, uint64(len(body.Transactions))+2)
if err != nil {
return err
}
data := types.BodyForStorage{
BaseTxId: baseTxId,
TxAmount: uint32(len(body.Transactions)) + 2,
Uncles: body.Uncles,
}
if err = WriteBodyForStorage(db, hash, number, &data); err != nil {
return fmt.Errorf("WriteBodyForStorage: %w", err)
}
if err = WriteRawTransactions(db, body.Transactions, baseTxId+1); err != nil {
return fmt.Errorf("WriteRawTransactions: %w", err)
}
return nil
}
func WriteBody(db kv.RwTx, hash common.Hash, number uint64, body *types.Body) error {
// Pre-processing
body.SendersFromTxs()
baseTxId, err := db.IncrementSequence(kv.EthTx, uint64(len(body.Transactions))+2)
if err != nil {
return err
}
data := types.BodyForStorage{
BaseTxId: baseTxId,
TxAmount: uint32(len(body.Transactions)) + 2,
Uncles: body.Uncles,
}
if err := WriteBodyForStorage(db, hash, number, &data); err != nil {
return fmt.Errorf("failed to write body: %w", err)
}
err = WriteTransactions(db, body.Transactions, baseTxId+1)
if err != nil {
return fmt.Errorf("failed to WriteTransactions: %w", err)
}
return nil
}
func WriteSenders(db kv.Putter, hash common.Hash, number uint64, senders []common.Address) error {
data := make([]byte, common.AddressLength*len(senders))
for i, sender := range senders {
copy(data[i*common.AddressLength:], sender[:])
}
if err := db.Put(kv.Senders, dbutils.BlockBodyKey(number, hash), data); err != nil {
return fmt.Errorf("failed to store block senders: %w", err)
}
return nil
}
// DeleteBody removes all block body data associated with a hash.
func DeleteBody(db kv.Deleter, hash common.Hash, number uint64) {
if err := db.Delete(kv.BlockBody, dbutils.BlockBodyKey(number, hash), nil); err != nil {
log.Crit("Failed to delete block body", "err", err)
}
}
// MakeBodiesCanonical - move all txs of non-canonical blocks from NonCanonicalTxs table to EthTx table
func MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix string, logEvery *time.Ticker) error {
for blockNum := from; ; blockNum++ {
h, err := ReadCanonicalHash(tx, blockNum)
if err != nil {
return err
}
if h == (common.Hash{}) {
break
}
data := ReadStorageBodyRLP(tx, h, blockNum)
if len(data) == 0 {
break
}
bodyForStorage := new(types.BodyForStorage)
if err := rlp.DecodeBytes(data, bodyForStorage); err != nil {
return err
}
newBaseId, err := tx.IncrementSequence(kv.EthTx, uint64(bodyForStorage.TxAmount))
if err != nil {
return err
}
// next loop does move only non-system txs. need move system-txs manually (because they may not exist)
i := uint64(0)
if err := tx.ForAmount(kv.NonCanonicalTxs, dbutils.EncodeBlockNumber(bodyForStorage.BaseTxId+1), bodyForStorage.TxAmount-2, func(k, v []byte) error {
id := newBaseId + 1 + i
if err := tx.Put(kv.EthTx, dbutils.EncodeBlockNumber(id), v); err != nil {
return err
}
if err := tx.Delete(kv.NonCanonicalTxs, k, nil); err != nil {
return err
}
i++
return nil
}); err != nil {
return err
}
bodyForStorage.BaseTxId = newBaseId
if err := WriteBodyForStorage(tx, h, blockNum, bodyForStorage); err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case <-logEvery.C:
log.Info(fmt.Sprintf("[%s] Making bodies canonical...", logPrefix), "current block", blockNum)
default:
}
}
return nil
}
// MakeBodiesNonCanonical - move all txs of canonical blocks to NonCanonicalTxs bucket
func MakeBodiesNonCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix string, logEvery *time.Ticker) error {
var firstMovedTxnID uint64
var firstMovedTxnIDIsSet bool
for blockNum := from; ; blockNum++ {
h, err := ReadCanonicalHash(tx, blockNum)
if err != nil {
return err
}
if h == (common.Hash{}) {
break
}
data := ReadStorageBodyRLP(tx, h, blockNum)
if len(data) == 0 {
break
}
bodyForStorage := new(types.BodyForStorage)
if err := rlp.DecodeBytes(data, bodyForStorage); err != nil {
return err
}
if !firstMovedTxnIDIsSet {
firstMovedTxnIDIsSet = true
firstMovedTxnID = bodyForStorage.BaseTxId
}
// move txs to NonCanonical bucket, it has own sequence
newBaseId, err := tx.IncrementSequence(kv.NonCanonicalTxs, uint64(bodyForStorage.TxAmount))
if err != nil {
return err
}
// next loop does move only non-system txs. need move system-txs manually (because they may not exist)
i := uint64(0)
if err := tx.ForAmount(kv.EthTx, dbutils.EncodeBlockNumber(bodyForStorage.BaseTxId+1), bodyForStorage.TxAmount-2, func(k, v []byte) error {
id := newBaseId + 1 + i
if err := tx.Put(kv.NonCanonicalTxs, dbutils.EncodeBlockNumber(id), v); err != nil {
return err
}
if err := tx.Delete(kv.EthTx, k, nil); err != nil {
return err
}
i++
return nil
}); err != nil {
return err
}
bodyForStorage.BaseTxId = newBaseId
if err := WriteBodyForStorage(tx, h, blockNum, bodyForStorage); err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case <-logEvery.C:
log.Info(fmt.Sprintf("[%s] Unwinding transactions...", logPrefix), "current block", blockNum)
default:
}
}
// EthTx must have canonical id's - means need decrement it's sequence on unwind
if firstMovedTxnIDIsSet {
c, err := tx.Cursor(kv.EthTx)
if err != nil {
return err
}
k, _, err := c.Last()
if err != nil {
return err
}
if k != nil && binary.BigEndian.Uint64(k) >= firstMovedTxnID {
panic(fmt.Sprintf("must not happen, ResetSequence: %d, lastInDB: %d\n", firstMovedTxnID, binary.BigEndian.Uint64(k)))
}
if err := ResetSequence(tx, kv.EthTx, firstMovedTxnID); err != nil {
return err
}
}
return nil
}
// ReadTd retrieves a block's total difficulty corresponding to the hash.
func ReadTd(db kv.Getter, hash common.Hash, number uint64) (*big.Int, error) {
data, err := db.GetOne(kv.HeaderTD, dbutils.HeaderKey(number, hash))
if err != nil {
return nil, fmt.Errorf("failed ReadTd: %w", err)
}
if len(data) == 0 {
return nil, nil
}
td := new(big.Int)
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
return nil, fmt.Errorf("invalid block total difficulty RLP: %x, %w", hash, err)
}
return td, nil
}
func ReadTdByHash(db kv.Getter, hash common.Hash) (*big.Int, error) {
headNumber := ReadHeaderNumber(db, hash)
if headNumber == nil {
return nil, nil
}
return ReadTd(db, hash, *headNumber)
}
// WriteTd stores the total difficulty of a block into the database.
func WriteTd(db kv.Putter, hash common.Hash, number uint64, td *big.Int) error {
data, err := rlp.EncodeToBytes(td)
if err != nil {
return fmt.Errorf("failed to RLP encode block total difficulty: %w", err)
}
if err := db.Put(kv.HeaderTD, dbutils.HeaderKey(number, hash), data); err != nil {
return fmt.Errorf("failed to store block total difficulty: %w", err)
}
return nil
}
// DeleteTd removes all block total difficulty data associated with a hash.
func DeleteTd(db kv.Deleter, hash common.Hash, number uint64) error {
if err := db.Delete(kv.HeaderTD, dbutils.HeaderKey(number, hash), nil); err != nil {
return fmt.Errorf("failed to delete block total difficulty: %w", err)
}
return nil
}
// HasReceipts verifies the existence of all the transaction receipts belonging
// to a block.
func HasReceipts(db kv.Has, hash common.Hash, number uint64) bool {
if has, err := db.Has(kv.Receipts, dbutils.EncodeBlockNumber(number)); !has || err != nil {
return false
}
return true
}
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
// The receipt metadata fields are not guaranteed to be populated, so they
// should not be used. Use ReadReceipts instead if the metadata is needed.
func ReadRawReceipts(db kv.Tx, blockNum uint64) types.Receipts {
// Retrieve the flattened receipt slice
data, err := db.GetOne(kv.Receipts, dbutils.EncodeBlockNumber(blockNum))
if err != nil {
log.Error("ReadRawReceipts failed", "err", err)
}
if len(data) == 0 {
return nil
}
var receipts types.Receipts
if err := cbor.Unmarshal(&receipts, bytes.NewReader(data)); err != nil {
log.Error("receipt unmarshal failed", "err", err)
return nil
}
prefix := make([]byte, 8)
binary.BigEndian.PutUint64(prefix, blockNum)
if err := db.ForPrefix(kv.Log, prefix, func(k, v []byte) error {
var logs types.Logs
if err := cbor.Unmarshal(&logs, bytes.NewReader(v)); err != nil {
return fmt.Errorf("receipt unmarshal failed: %w", err)
}
receipts[binary.BigEndian.Uint32(k[8:])].Logs = logs
return nil
}); err != nil {
log.Error("logs fetching failed", "err", err)
return nil
}
return receipts
}
// ReadReceipts retrieves all the transaction receipts belonging to a block, including
// its corresponding metadata fields. If it is unable to populate these metadata
// fields then nil is returned.
//
// The current implementation populates these metadata fields by reading the receipts'
// corresponding block body, so if the block body is not found it will return nil even
// if the receipt itself is stored.
func ReadReceipts(db kv.Tx, block *types.Block, senders []common.Address) types.Receipts {
if block == nil {
return nil
}
// We're deriving many fields from the block body, retrieve beside the receipt
receipts := ReadRawReceipts(db, block.NumberU64())
if receipts == nil {
return nil
}
block.SendersToTxs(senders)
if err := receipts.DeriveFields(block.Hash(), block.NumberU64(), block.Transactions(), senders); err != nil {
log.Error("Failed to derive block receipts fields", "hash", block.Hash(), "number", block.NumberU64(), "err", err)
return nil
}
return receipts
}
func ReadReceiptsByHash(db kv.Tx, hash common.Hash) (types.Receipts, error) {
number := ReadHeaderNumber(db, hash)
if number == nil {
return nil, nil
}
canonicalHash, err := ReadCanonicalHash(db, *number)
if err != nil {
return nil, fmt.Errorf("requested non-canonical hash %x. canonical=%x", hash, canonicalHash)
}
b, s, err := ReadBlockWithSenders(db, hash, *number)
if err != nil {
return nil, err
}
if b == nil {
return nil, nil
}
receipts := ReadReceipts(db, b, s)
if receipts == nil {
return nil, nil
}
return receipts, nil
}
// WriteReceipts stores all the transaction receipts belonging to a block.
func WriteReceipts(tx kv.Putter, number uint64, receipts types.Receipts) error {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
for txId, r := range receipts {
if len(r.Logs) == 0 {
continue
}
buf.Reset()
err := cbor.Marshal(buf, r.Logs)
if err != nil {
return fmt.Errorf("encode block logs for block %d: %w", number, err)
}
if err = tx.Put(kv.Log, dbutils.LogKey(number, uint32(txId)), buf.Bytes()); err != nil {
return fmt.Errorf("writing logs for block %d: %w", number, err)
}
}
buf.Reset()
err := cbor.Marshal(buf, receipts)
if err != nil {
return fmt.Errorf("encode block receipts for block %d: %w", number, err)
}
if err = tx.Put(kv.Receipts, dbutils.EncodeBlockNumber(number), buf.Bytes()); err != nil {
return fmt.Errorf("writing receipts for block %d: %w", number, err)
}
return nil
}
// AppendReceipts stores all the transaction receipts belonging to a block.
func AppendReceipts(tx kv.StatelessWriteTx, blockNumber uint64, receipts types.Receipts) error {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
for txId, r := range receipts {
if len(r.Logs) == 0 {
continue
}
buf.Reset()
err := cbor.Marshal(buf, r.Logs)
if err != nil {
return fmt.Errorf("encode block receipts for block %d: %w", blockNumber, err)
}
if err = tx.Append(kv.Log, dbutils.LogKey(blockNumber, uint32(txId)), buf.Bytes()); err != nil {
return fmt.Errorf("writing receipts for block %d: %w", blockNumber, err)
}
}
buf.Reset()
err := cbor.Marshal(buf, receipts)
if err != nil {
return fmt.Errorf("encode block receipts for block %d: %w", blockNumber, err)
}
if err = tx.Append(kv.Receipts, dbutils.EncodeBlockNumber(blockNumber), buf.Bytes()); err != nil {
return fmt.Errorf("writing receipts for block %d: %w", blockNumber, err)
}
return nil
}
// DeleteReceipts removes all receipt data associated with a block hash.
func DeleteReceipts(db kv.RwTx, number uint64) error {
if err := db.Delete(kv.Receipts, dbutils.EncodeBlockNumber(number), nil); err != nil {
return fmt.Errorf("receipts delete failed: %d, %w", number, err)
}
prefix := make([]byte, 8)
binary.BigEndian.PutUint64(prefix, number)
if err := db.ForPrefix(kv.Log, prefix, func(k, v []byte) error {
return db.Delete(kv.Log, k, nil)
}); err != nil {
return err
}
return nil
}
// DeleteNewerReceipts removes all receipt for given block number or newer
func DeleteNewerReceipts(db kv.RwTx, number uint64) error {
if err := db.ForEach(kv.Receipts, dbutils.EncodeBlockNumber(number), func(k, v []byte) error {
return db.Delete(kv.Receipts, k, nil)
}); err != nil {
return err
}
from := make([]byte, 8)
binary.BigEndian.PutUint64(from, number)
if err := db.ForEach(kv.Log, from, func(k, v []byte) error {
return db.Delete(kv.Log, k, nil)
}); err != nil {
return err
}
return nil
}
func ReceiptsAvailableFrom(tx kv.Tx) (uint64, error) {
c, err := tx.Cursor(kv.Receipts)
if err != nil {
return math.MaxUint64, err
}
defer c.Close()
k, _, err := c.First()
if err != nil {
return math.MaxUint64, err
}
if len(k) == 0 {
return math.MaxUint64, nil
}
return binary.BigEndian.Uint64(k), nil
}
// ReadBlock retrieves an entire block corresponding to the hash, assembling it
// back from the stored header and body. If either the header or body could not
// be retrieved nil is returned.
//
// Note, due to concurrent download of header and block body the header and thus
// canonical hash can be stored in the database but the body data not (yet).
func ReadBlock(tx kv.Getter, hash common.Hash, number uint64) *types.Block {
header := ReadHeader(tx, hash, number)
if header == nil {
return nil
}
body := ReadCanonicalBodyWithTransactions(tx, hash, number)
if body == nil {
return nil
}
return types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles)
}
func NonCanonicalBlockWithSenders(tx kv.Getter, hash common.Hash, number uint64) (*types.Block, []common.Address, error) {
header := ReadHeader(tx, hash, number)
if header == nil {
return nil, nil, fmt.Errorf("header not found for block %d, %x", number, hash)
}
body := ReadCanonicalBodyWithTransactions(tx, hash, number)
if body == nil {
return nil, nil, fmt.Errorf("body not found for block %d, %x", number, hash)
}
block := types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles)
senders, err := ReadSenders(tx, hash, number)
if err != nil {
return nil, nil, err
}
if len(senders) != block.Transactions().Len() {
return block, senders, nil // no senders is fine - will recover them on the fly
}
block.SendersToTxs(senders)
return block, senders, nil
}
// HasBlock - is more efficient than ReadBlock because doesn't read transactions.
// It's is not equivalent of HasHeader because headers and bodies written by different stages
func HasBlock(db kv.Getter, hash common.Hash, number uint64) bool {
body := ReadStorageBodyRLP(db, hash, number)
return len(body) > 0
}
func ReadBlockWithSenders(db kv.Getter, hash common.Hash, number uint64) (*types.Block, []common.Address, error) {
block := ReadBlock(db, hash, number)
if block == nil {
return nil, nil, nil
}
senders, err := ReadSenders(db, hash, number)
if err != nil {
return nil, nil, err
}
if len(senders) != block.Transactions().Len() {
return block, senders, nil // no senders is fine - will recover them on the fly
}
block.SendersToTxs(senders)
return block, senders, nil
}
// WriteBlock serializes a block into the database, header and body separately.
func WriteBlock(db kv.RwTx, block *types.Block) error {
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
return err
}
WriteHeader(db, block.Header())
return nil
}
func min(a, b uint64) uint64 {
if a < b {
return a
}
return b
}
// DeleteAncientBlocks - delete old block after moving it to snapshots. [from, to)
// doesn't delete reciepts
func DeleteAncientBlocks(db kv.RwTx, blockTo uint64, blocksDeleteLimit int) error {
c, err := db.Cursor(kv.Headers)
if err != nil {
return err
}
defer c.Close()
var stopAtBlock uint64
{
k, _, err := c.First()
if err != nil {
return err
}
firstBlock := binary.BigEndian.Uint64(k)
stopAtBlock = min(blockTo, firstBlock+uint64(blocksDeleteLimit))
}
for k, _, err := c.First(); k != nil; k, _, err = c.Next() {
if err != nil {
return err
}
n := binary.BigEndian.Uint64(k)
if n >= stopAtBlock {
break
}
canonicalHash, err := ReadCanonicalHash(db, n)
if err != nil {
return err
}
isCanonical := bytes.Equal(k[8:], canonicalHash[:])
b, err := ReadBodyForStorageByKey(db, k)
if err != nil {
return err
}
txIDBytes := make([]byte, 8)
for txID := b.BaseTxId; txID < b.BaseTxId+uint64(b.TxAmount); txID++ {
binary.BigEndian.PutUint64(txIDBytes, txID)
bucket := kv.EthTx
if !isCanonical {
bucket = kv.NonCanonicalTxs
}
if err := db.Delete(bucket, txIDBytes, nil); err != nil {
return err
}
}
if err := db.Delete(kv.Headers, k, nil); err != nil {
return err
}
if err := db.Delete(kv.BlockBody, k, nil); err != nil {
return err
}
if err := db.Delete(kv.Senders, k, nil); err != nil {
return err
}
}
return nil
}
// DeleteBlock removes all block data associated with a hash.
func DeleteBlock(db kv.RwTx, hash common.Hash, number uint64) error {
if err := DeleteReceipts(db, number); err != nil {
return err
}
DeleteHeader(db, hash, number)
DeleteBody(db, hash, number)
if err := DeleteTd(db, hash, number); err != nil {
return err
}
return nil
}
func ReadBlockByNumber(db kv.Tx, number uint64) (*types.Block, error) {
hash, err := ReadCanonicalHash(db, number)
if err != nil {
return nil, fmt.Errorf("failed ReadCanonicalHash: %w", err)
}
if hash == (common.Hash{}) {
return nil, nil
}
return ReadBlock(db, hash, number), nil
}
func CanonicalBlockByNumberWithSenders(db kv.Tx, number uint64) (*types.Block, []common.Address, error) {
hash, err := ReadCanonicalHash(db, number)
if err != nil {
return nil, nil, fmt.Errorf("failed ReadCanonicalHash: %w", err)
}
if hash == (common.Hash{}) {
return nil, nil, nil
}
return ReadBlockWithSenders(db, hash, number)
}
func ReadBlockByHash(db kv.Tx, hash common.Hash) (*types.Block, error) {
number := ReadHeaderNumber(db, hash)
if number == nil {
return nil, nil
}
return ReadBlock(db, hash, *number), nil
}
func ReadTotalIssued(db kv.Getter, number uint64) (*big.Int, error) {
data, err := db.GetOne(kv.Issuance, dbutils.EncodeBlockNumber(number))
if err != nil {
return nil, err
}
return new(big.Int).SetBytes(data), nil
}
func WriteTotalIssued(db kv.Putter, number uint64, totalIssued *big.Int) error {
return db.Put(kv.Issuance, dbutils.EncodeBlockNumber(number), totalIssued.Bytes())
}
func ReadTotalBurnt(db kv.Getter, number uint64) (*big.Int, error) {
data, err := db.GetOne(kv.Issuance, append([]byte("burnt"), dbutils.EncodeBlockNumber(number)...))
if err != nil {
return nil, err
}
return new(big.Int).SetBytes(data), nil
}
func WriteTotalBurnt(db kv.Putter, number uint64, totalBurnt *big.Int) error {
return db.Put(kv.Issuance, append([]byte("burnt"), dbutils.EncodeBlockNumber(number)...), totalBurnt.Bytes())
}
func ReadCumulativeGasUsed(db kv.Getter, number uint64) (*big.Int, error) {
data, err := db.GetOne(kv.CumulativeGasIndex, dbutils.EncodeBlockNumber(number))
if err != nil {
return nil, err
}
if len(data) == 0 {
return big.NewInt(0), nil
}
return new(big.Int).SetBytes(data), nil
}
func WriteCumulativeGasUsed(db kv.Putter, number uint64, cumulativeGasUsed *big.Int) error {
return db.Put(kv.CumulativeGasIndex, dbutils.EncodeBlockNumber(number), cumulativeGasUsed.Bytes())
}
func ReadHeaderByNumber(db kv.Getter, number uint64) *types.Header {
hash, err := ReadCanonicalHash(db, number)
if err != nil {
log.Error("ReadCanonicalHash failed", "err", err)
return nil
}
if hash == (common.Hash{}) {
return nil
}
return ReadHeader(db, hash, number)
}
func ReadHeaderByHash(db kv.Getter, hash common.Hash) (*types.Header, error) {
number := ReadHeaderNumber(db, hash)
if number == nil {
return nil, nil
}
return ReadHeader(db, hash, *number), nil
}
func ReadAncestor(db kv.Getter, hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64, blockReader interfaces.HeaderAndCanonicalReader) (common.Hash, uint64) {
if ancestor > number {
return common.Hash{}, 0
}
if ancestor == 1 {
header, err := blockReader.Header(context.Background(), db, hash, number)
if err != nil {
panic(err)
}
// in this case it is cheaper to just read the header
if header != nil {
return header.ParentHash, number - 1
}
return common.Hash{}, 0
}
for ancestor != 0 {
h, err := blockReader.CanonicalHash(context.Background(), db, number)
if err != nil {
panic(err)
}
if h == hash {
ancestorHash, err := blockReader.CanonicalHash(context.Background(), db, number-ancestor)
if err != nil {
panic(err)
}
h, err := blockReader.CanonicalHash(context.Background(), db, number)
if err != nil {
panic(err)
}
if h == hash {
number -= ancestor
return ancestorHash, number
}
}
if *maxNonCanonical == 0 {
return common.Hash{}, 0
}
*maxNonCanonical--
ancestor--
header, err := blockReader.Header(context.Background(), db, hash, number)
if err != nil {
panic(err)
}
if header == nil {
return common.Hash{}, 0
}
hash = header.ParentHash
number--
}
return hash, number
}
func DeleteNewerEpochs(tx kv.RwTx, number uint64) error {
if err := tx.ForEach(kv.PendingEpoch, dbutils.EncodeBlockNumber(number), func(k, v []byte) error {
return tx.Delete(kv.Epoch, k, nil)
}); err != nil {
return err
}
return tx.ForEach(kv.Epoch, dbutils.EncodeBlockNumber(number), func(k, v []byte) error {
return tx.Delete(kv.Epoch, k, nil)
})
}
func ReadEpoch(tx kv.Tx, blockNum uint64, blockHash common.Hash) (transitionProof []byte, err error) {
k := make([]byte, dbutils.NumberLength+common.HashLength)
binary.BigEndian.PutUint64(k, blockNum)
copy(k[dbutils.NumberLength:], blockHash[:])
return tx.GetOne(kv.Epoch, k)
}
func FindEpochBeforeOrEqualNumber(tx kv.Tx, n uint64) (blockNum uint64, blockHash common.Hash, transitionProof []byte, err error) {
c, err := tx.Cursor(kv.Epoch)
if err != nil {
return 0, common.Hash{}, nil, err
}
defer c.Close()
seek := dbutils.EncodeBlockNumber(n)
k, v, err := c.Seek(seek)
if err != nil {
return 0, common.Hash{}, nil, err
}
if k != nil {
num := binary.BigEndian.Uint64(k)
if num == n {
return n, common.BytesToHash(k[dbutils.NumberLength:]), v, nil
}
}
k, v, err = c.Prev()
if err != nil {
return 0, common.Hash{}, nil, err
}
if k == nil {
return 0, common.Hash{}, nil, nil
}
return binary.BigEndian.Uint64(k), common.BytesToHash(k[dbutils.NumberLength:]), v, nil
}
func WriteEpoch(tx kv.RwTx, blockNum uint64, blockHash common.Hash, transitionProof []byte) (err error) {
k := make([]byte, dbutils.NumberLength+common.HashLength)
binary.BigEndian.PutUint64(k, blockNum)
copy(k[dbutils.NumberLength:], blockHash[:])
return tx.Put(kv.Epoch, k, transitionProof)
}
func ReadPendingEpoch(tx kv.Tx, blockNum uint64, blockHash common.Hash) (transitionProof []byte, err error) {
k := make([]byte, 8+32)
binary.BigEndian.PutUint64(k, blockNum)
copy(k[8:], blockHash[:])
return tx.GetOne(kv.PendingEpoch, k)
}
func WritePendingEpoch(tx kv.RwTx, blockNum uint64, blockHash common.Hash, transitionProof []byte) (err error) {
k := make([]byte, 8+32)
binary.BigEndian.PutUint64(k, blockNum)
copy(k[8:], blockHash[:])
return tx.Put(kv.PendingEpoch, k, transitionProof)
}
// Transitioned returns true if the block number comes after POS transition or is the last POW block
func Transitioned(db kv.Getter, blockNum uint64, terminalTotalDifficulty *big.Int) (trans bool, err error) {
if terminalTotalDifficulty == nil {
return false, nil
}
if terminalTotalDifficulty.Cmp(common.Big0) == 0 {
return true, nil
}
header := ReadHeaderByNumber(db, blockNum)
if header == nil {
return false, nil
}
if header.Difficulty.Cmp(common.Big0) == 0 {
return true, nil
}
headerTd, err := ReadTd(db, header.Hash(), blockNum)
if err != nil {
return false, err
}
return headerTd.Cmp(terminalTotalDifficulty) >= 0, nil
}