2021-07-04 13:48:13 +00:00
|
|
|
package migrations
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2021-07-24 04:28:05 +00:00
|
|
|
"context"
|
2021-07-04 13:48:13 +00:00
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
2021-07-29 11:53:13 +00:00
|
|
|
"github.com/ledgerwatch/erigon-lib/kv"
|
2021-07-04 13:48:13 +00:00
|
|
|
"github.com/ledgerwatch/erigon/common"
|
Pruning for: exec, log_index, tx_lookup, history stages (#2399)
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* add tvm flag
* save
* db migration for storage mode
add flag --prune=
remove flag --storage-mode=
add flag --experiments=tevm,...
rename integration set_storage_mode to set_prune
* fix
* forward move of stages must skip everything before PruneTo
* keep in db progress of prune method
* keep in db progress of prune method
* simplify logs
* simplify logs
* simplify logs
* fix test
* simplify logs
* simplify logs
* simplify logs
* simplify logs
* remove callTraceSet as dupsort
use etl transform for txlookup prune
remove some logs
* cleanup tests a bit
* print_stages and eth_sync to show prune progress
* fix print_stages
* add readme about --prune.to flag
* more docs
* add --prune.history.older and other flags support
* fix migration on empty db
* better toString
* better toString
2021-07-20 20:03:19 +00:00
|
|
|
"github.com/ledgerwatch/erigon/common/changeset"
|
2021-07-04 13:48:13 +00:00
|
|
|
"github.com/ledgerwatch/erigon/consensus/ethash"
|
|
|
|
"github.com/ledgerwatch/erigon/consensus/misc"
|
|
|
|
"github.com/ledgerwatch/erigon/core"
|
|
|
|
"github.com/ledgerwatch/erigon/core/rawdb"
|
|
|
|
"github.com/ledgerwatch/erigon/core/state"
|
|
|
|
"github.com/ledgerwatch/erigon/core/types"
|
|
|
|
"github.com/ledgerwatch/erigon/core/vm"
|
2021-08-10 02:48:56 +00:00
|
|
|
"github.com/ledgerwatch/erigon/ethdb"
|
2021-07-04 13:48:13 +00:00
|
|
|
"github.com/ledgerwatch/erigon/ethdb/cbor"
|
|
|
|
"github.com/ledgerwatch/erigon/params"
|
2021-07-29 10:23:23 +00:00
|
|
|
"github.com/ledgerwatch/log/v3"
|
2021-07-04 13:48:13 +00:00
|
|
|
)
|
|
|
|
|
2021-07-28 02:47:38 +00:00
|
|
|
func availableReceiptFrom(tx kv.Tx) (uint64, error) {
|
|
|
|
c, err := tx.Cursor(kv.Receipts)
|
Pruning for: exec, log_index, tx_lookup, history stages (#2399)
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* add tvm flag
* save
* db migration for storage mode
add flag --prune=
remove flag --storage-mode=
add flag --experiments=tevm,...
rename integration set_storage_mode to set_prune
* fix
* forward move of stages must skip everything before PruneTo
* keep in db progress of prune method
* keep in db progress of prune method
* simplify logs
* simplify logs
* simplify logs
* fix test
* simplify logs
* simplify logs
* simplify logs
* simplify logs
* remove callTraceSet as dupsort
use etl transform for txlookup prune
remove some logs
* cleanup tests a bit
* print_stages and eth_sync to show prune progress
* fix print_stages
* add readme about --prune.to flag
* more docs
* add --prune.history.older and other flags support
* fix migration on empty db
* better toString
* better toString
2021-07-20 20:03:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
k, _, err := c.First()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if len(k) == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
return binary.BigEndian.Uint64(k), nil
|
|
|
|
}
|
|
|
|
|
2021-07-05 10:25:18 +00:00
|
|
|
var ReceiptRepair = Migration{
|
2021-07-04 13:48:13 +00:00
|
|
|
Name: "receipt_repair",
|
2021-07-28 02:47:38 +00:00
|
|
|
Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) {
|
2021-07-24 04:28:05 +00:00
|
|
|
tx, err := db.BeginRw(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2021-07-04 13:48:13 +00:00
|
|
|
}
|
2021-07-24 04:28:05 +00:00
|
|
|
defer tx.Rollback()
|
Pruning for: exec, log_index, tx_lookup, history stages (#2399)
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* add tvm flag
* save
* db migration for storage mode
add flag --prune=
remove flag --storage-mode=
add flag --experiments=tevm,...
rename integration set_storage_mode to set_prune
* fix
* forward move of stages must skip everything before PruneTo
* keep in db progress of prune method
* keep in db progress of prune method
* simplify logs
* simplify logs
* simplify logs
* fix test
* simplify logs
* simplify logs
* simplify logs
* simplify logs
* remove callTraceSet as dupsort
use etl transform for txlookup prune
remove some logs
* cleanup tests a bit
* print_stages and eth_sync to show prune progress
* fix print_stages
* add readme about --prune.to flag
* more docs
* add --prune.history.older and other flags support
* fix migration on empty db
* better toString
* better toString
2021-07-20 20:03:19 +00:00
|
|
|
|
|
|
|
blockNum, err := changeset.AvailableFrom(tx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
receiptsFrom, err := availableReceiptFrom(tx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if receiptsFrom > blockNum {
|
|
|
|
blockNum = receiptsFrom
|
2021-07-04 13:48:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
genesisBlock, err := rawdb.ReadBlockByNumber(tx, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
chainConfig, cerr := rawdb.ReadChainConfig(tx, genesisBlock.Hash())
|
|
|
|
if cerr != nil {
|
|
|
|
return cerr
|
|
|
|
}
|
|
|
|
vmConfig := vm.Config{}
|
|
|
|
noOpWriter := state.NewNoopWriter()
|
|
|
|
var buf bytes.Buffer
|
|
|
|
fixedCount := 0
|
|
|
|
logInterval := 30 * time.Second
|
|
|
|
logEvery := time.NewTicker(logInterval)
|
|
|
|
var key [8]byte
|
|
|
|
var v []byte
|
Pruning for: exec, log_index, tx_lookup, history stages (#2399)
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* Pruning for: exec, log_index, tx_lookup, history stages
* add tvm flag
* save
* db migration for storage mode
add flag --prune=
remove flag --storage-mode=
add flag --experiments=tevm,...
rename integration set_storage_mode to set_prune
* fix
* forward move of stages must skip everything before PruneTo
* keep in db progress of prune method
* keep in db progress of prune method
* simplify logs
* simplify logs
* simplify logs
* fix test
* simplify logs
* simplify logs
* simplify logs
* simplify logs
* remove callTraceSet as dupsort
use etl transform for txlookup prune
remove some logs
* cleanup tests a bit
* print_stages and eth_sync to show prune progress
* fix print_stages
* add readme about --prune.to flag
* more docs
* add --prune.history.older and other flags support
* fix migration on empty db
* better toString
* better toString
2021-07-20 20:03:19 +00:00
|
|
|
for ; true; blockNum++ {
|
2021-07-04 13:48:13 +00:00
|
|
|
select {
|
|
|
|
default:
|
|
|
|
case <-logEvery.C:
|
|
|
|
log.Info("Progress", "block", blockNum, "fixed", fixedCount)
|
|
|
|
}
|
|
|
|
var hash common.Hash
|
|
|
|
if hash, err = rawdb.ReadCanonicalHash(tx, blockNum); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if hash == (common.Hash{}) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
binary.BigEndian.PutUint64(key[:], blockNum)
|
2021-07-28 02:47:38 +00:00
|
|
|
if v, err = tx.GetOne(kv.Receipts, key[:]); err != nil {
|
2021-07-04 13:48:13 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
var receipts types.Receipts
|
|
|
|
if err = cbor.Unmarshal(&receipts, bytes.NewReader(v)); err == nil {
|
|
|
|
broken := false
|
|
|
|
for _, receipt := range receipts {
|
|
|
|
if receipt.CumulativeGasUsed < 10000 {
|
|
|
|
broken = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !broken {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var block *types.Block
|
|
|
|
if block, _, err = rawdb.ReadBlockWithSenders(tx, hash, blockNum); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-07-24 07:14:11 +00:00
|
|
|
dbstate := state.NewPlainState(tx, block.NumberU64()-1)
|
2021-07-04 13:48:13 +00:00
|
|
|
intraBlockState := state.New(dbstate)
|
|
|
|
|
|
|
|
getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(tx, hash, number) }
|
2021-08-10 02:48:56 +00:00
|
|
|
contractHasTEVM := ethdb.GetHasTEVM(tx)
|
|
|
|
receipts1, err1 := runBlock(intraBlockState, noOpWriter, noOpWriter, chainConfig, getHeader, contractHasTEVM, block, vmConfig)
|
2021-07-04 13:48:13 +00:00
|
|
|
if err1 != nil {
|
|
|
|
return err1
|
|
|
|
}
|
|
|
|
fix := true
|
|
|
|
if chainConfig.IsByzantium(block.Number().Uint64()) {
|
|
|
|
receiptSha := types.DeriveSha(receipts1)
|
|
|
|
if receiptSha != block.Header().ReceiptHash {
|
|
|
|
fmt.Printf("(retrace) mismatched receipt headers for block %d: %x, %x\n", block.NumberU64(), receiptSha, block.Header().ReceiptHash)
|
|
|
|
fix = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if fix {
|
|
|
|
// All good, we can fix receipt record
|
|
|
|
buf.Reset()
|
|
|
|
err := cbor.Marshal(&buf, receipts1)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("encode block receipts for block %d: %v", blockNum, err)
|
|
|
|
}
|
2021-07-28 02:47:38 +00:00
|
|
|
if err = tx.Put(kv.Receipts, key[:], buf.Bytes()); err != nil {
|
2021-07-04 13:48:13 +00:00
|
|
|
return fmt.Errorf("writing receipts for block %d: %v", blockNum, err)
|
|
|
|
}
|
|
|
|
fixedCount++
|
|
|
|
}
|
|
|
|
}
|
2021-07-24 04:28:05 +00:00
|
|
|
if err := BeforeCommit(tx, nil, true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return tx.Commit()
|
2021-07-04 13:48:13 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
func runBlock(ibs *state.IntraBlockState, txnWriter state.StateWriter, blockWriter state.StateWriter,
|
2021-08-10 02:48:56 +00:00
|
|
|
chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, contractHasTEVM func(common.Hash) (bool, error), block *types.Block, vmConfig vm.Config) (types.Receipts, error) {
|
2021-07-04 13:48:13 +00:00
|
|
|
header := block.Header()
|
|
|
|
vmConfig.TraceJumpDest = true
|
|
|
|
engine := ethash.NewFullFaker()
|
|
|
|
gp := new(core.GasPool).AddGas(block.GasLimit())
|
|
|
|
usedGas := new(uint64)
|
|
|
|
var receipts types.Receipts
|
|
|
|
if chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 {
|
|
|
|
misc.ApplyDAOHardFork(ibs)
|
|
|
|
}
|
|
|
|
for i, tx := range block.Transactions() {
|
|
|
|
ibs.Prepare(tx.Hash(), block.Hash(), i)
|
2021-08-10 02:48:56 +00:00
|
|
|
receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig, contractHasTEVM)
|
2021-07-04 13:48:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("could not apply tx %d [%x] failed: %v", i, tx.Hash(), err)
|
|
|
|
}
|
|
|
|
receipts = append(receipts, receipt)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !vmConfig.ReadOnly {
|
|
|
|
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
|
2021-07-21 11:13:26 +00:00
|
|
|
if _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, block.Transactions(), block.Uncles(), receipts, nil, nil, nil, nil); err != nil {
|
2021-07-04 13:48:13 +00:00
|
|
|
return nil, fmt.Errorf("finalize of block %d failed: %v", block.NumberU64(), err)
|
|
|
|
}
|
|
|
|
|
2021-07-05 18:52:50 +00:00
|
|
|
if err := ibs.CommitBlock(chainConfig.Rules(header.Number.Uint64()), blockWriter); err != nil {
|
2021-07-04 13:48:13 +00:00
|
|
|
return nil, fmt.Errorf("committing block %d failed: %v", block.NumberU64(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return receipts, nil
|
|
|
|
}
|