Added db loggers to all db callers and fixed flag settings (#9099)

Mdbx now takes a logger - but this has not been pushed to all callers -
meaning it had an invalid logger

This fixes the log propagation.

It also fixed a start-up issue for http.enabled and txpool.disable
created by a previous merge
This commit is contained in:
Mark Holt 2023-12-31 10:10:08 +00:00 committed by GitHub
parent 46ecf030f5
commit 19bc328a07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
49 changed files with 309 additions and 193 deletions

View File

@ -92,7 +92,7 @@ func (s *Sentinel) createLocalNode(
udpPort, tcpPort int,
tmpDir string,
) (*enode.LocalNode, error) {
db, err := enode.OpenDB(s.ctx, "", tmpDir)
db, err := enode.OpenDB(s.ctx, "", tmpDir, s.logger)
if err != nil {
return nil, fmt.Errorf("could not open node's peer database: %w", err)
}

View File

@ -120,7 +120,7 @@ func main() {
ctx, cancel := common.RootContext()
defer cancel()
db, err := enode.OpenDB(ctx, "" /* path */, "" /* tmpDir */)
db, err := enode.OpenDB(ctx, "" /* path */, "" /* tmpDir */, logger)
if err != nil {
panic(err)
}

View File

@ -182,18 +182,18 @@ func (n *BlockProducer) IsBlockProducer() bool {
return true
}
type NonBlockProducer struct {
type BlockConsumer struct {
NodeArgs
HttpApi string `arg:"--http.api" default:"admin,eth,debug,net,trace,web3,erigon,txpool" json:"http.api"`
TorrentPort string `arg:"--torrent.port" default:"42070" json:"torrent.port"`
NoDiscover string `arg:"--nodiscover" flag:"" default:"true" json:"nodiscover"`
}
func (n *NonBlockProducer) IsBlockProducer() bool {
func (n *BlockConsumer) IsBlockProducer() bool {
return false
}
func (n *NonBlockProducer) Account() *accounts.Account {
func (n *BlockConsumer) Account() *accounts.Account {
return nil
}

View File

@ -36,7 +36,7 @@ func TestNodeArgs(t *testing.T) {
t.Fatal(asMap, "not found")
}
nodeArgs, _ = args.AsArgs(args.NonBlockProducer{
nodeArgs, _ = args.AsArgs(args.BlockConsumer{
NodeArgs: args.NodeArgs{
DataDir: filepath.Join("data", fmt.Sprintf("%d", 2)),
StaticPeers: "enode",

View File

@ -145,6 +145,10 @@ func CurrentNetwork(ctx context.Context) *Network {
}
}
if devnet, ok := ctx.Value(ckDevnet).(Devnet); ok {
return devnet.SelectNetwork(ctx, 0)
}
return nil
}

View File

@ -10,6 +10,7 @@ import (
"syscall"
"time"
"github.com/ledgerwatch/erigon/cmd/devnet/networks"
"github.com/ledgerwatch/erigon/cmd/devnet/services"
"github.com/ledgerwatch/erigon/cmd/devnet/services/polygon"
@ -23,7 +24,6 @@ import (
"github.com/ledgerwatch/erigon/cmd/devnet/devnetutils"
"github.com/ledgerwatch/erigon/cmd/devnet/requests"
"github.com/ledgerwatch/erigon/cmd/devnet/scenarios"
"github.com/ledgerwatch/erigon/cmd/devnet/tests"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon/cmd/utils/flags"
@ -119,6 +119,18 @@ var (
Usage: "internal flag",
}
txCountFlag = cli.IntFlag{
Name: "txcount",
Usage: "Transaction count, (scenario dependent - may be total or reoccurring)",
Value: 100,
}
BlockProducersFlag = cli.UintFlag{
Name: "block-producers",
Usage: "The number of block producers to instantiate in the network",
Value: 1,
}
WaitFlag = cli.BoolFlag{
Name: "wait",
Usage: "Wait until interrupted after all scenarios have run",
@ -156,6 +168,8 @@ func main() {
&insecureFlag,
&metricsURLsFlag,
&WaitFlag,
&txCountFlag,
&BlockProducersFlag,
&logging.LogVerbosityFlag,
&logging.LogConsoleVerbosityFlag,
&logging.LogDirVerbosityFlag,
@ -241,7 +255,8 @@ func mainContext(ctx *cli.Context) error {
go connectDiagnosticsIfEnabled(ctx, logger)
enabledScenarios := strings.Split(ctx.String(ScenariosFlag.Name), ",")
if err = allScenarios(runCtx).Run(runCtx, enabledScenarios...); err != nil {
if err = allScenarios(ctx, runCtx).Run(runCtx, enabledScenarios...); err != nil {
return err
}
@ -256,7 +271,7 @@ func mainContext(ctx *cli.Context) error {
return nil
}
func allScenarios(runCtx devnet.Context) scenarios.Scenarios {
func allScenarios(cliCtx *cli.Context, runCtx devnet.Context) scenarios.Scenarios {
// unsubscribe from all the subscriptions made
defer services.UnsubscribeAll()
@ -313,6 +328,11 @@ func allScenarios(runCtx devnet.Context) scenarios.Scenarios {
//{Text: "BatchProcessTransfers", Args: []any{"child-funder", 1, 10, 2, 2}},
},
},
"block-production": {
Steps: []*scenarios.Step{
{Text: "SendTxLoad", Args: []any{recipientAddress, accounts.DevAddress, sendValue, cliCtx.Uint(txCountFlag.Name)}},
},
},
}
}
@ -321,21 +341,22 @@ func initDevnet(ctx *cli.Context, logger log.Logger) (devnet.Devnet, error) {
chainName := ctx.String(ChainFlag.Name)
baseRpcHost := ctx.String(BaseRpcHostFlag.Name)
baseRpcPort := ctx.Int(BaseRpcPortFlag.Name)
producerCount := int(ctx.Uint(BlockProducersFlag.Name))
switch chainName {
case networkname.BorDevnetChainName:
if ctx.Bool(WithoutHeimdallFlag.Name) {
return tests.NewBorDevnetWithoutHeimdall(dataDir, baseRpcHost, baseRpcPort, logger), nil
return networks.NewBorDevnetWithoutHeimdall(dataDir, baseRpcHost, baseRpcPort, logger), nil
} else if ctx.Bool(LocalHeimdallFlag.Name) {
heimdallGrpcAddr := ctx.String(HeimdallGrpcAddressFlag.Name)
sprintSize := uint64(ctx.Int(BorSprintSizeFlag.Name))
return tests.NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, logger), nil
return networks.NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, producerCount, logger), nil
} else {
return tests.NewBorDevnetWithRemoteHeimdall(dataDir, baseRpcHost, baseRpcPort, logger), nil
return networks.NewBorDevnetWithRemoteHeimdall(dataDir, baseRpcHost, baseRpcPort, producerCount, logger), nil
}
case networkname.DevChainName:
return tests.NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, logger), nil
return networks.NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, producerCount, logger), nil
default:
return nil, fmt.Errorf("unknown network: '%s'", chainName)

View File

@ -1,4 +1,4 @@
package tests
package networks
import (
"time"
@ -47,7 +47,7 @@ func NewBorDevnetWithoutHeimdall(
},
AccountSlots: 200,
},
&args.NonBlockProducer{
&args.BlockConsumer{
NodeArgs: args.NodeArgs{
ConsoleVerbosity: "0",
DirVerbosity: "5",
@ -67,6 +67,7 @@ func NewBorDevnetWithHeimdall(
heimdall *polygon.Heimdall,
heimdallGrpcAddr string,
checkpointOwner *accounts.Account,
producerCount int,
withMilestones bool,
logger log.Logger,
) devnet.Devnet {
@ -77,6 +78,23 @@ func NewBorDevnetWithHeimdall(
services = append(services, heimdall)
}
var nodes []devnet.Node
if producerCount == 0 {
producerCount++
}
for i := 0; i < producerCount; i++ {
nodes = append(nodes, &args.BlockProducer{
NodeArgs: args.NodeArgs{
ConsoleVerbosity: "0",
DirVerbosity: "5",
HeimdallGrpcAddr: heimdallGrpcAddr,
},
AccountSlots: 20000,
})
}
borNetwork := devnet.Network{
DataDir: dataDir,
Chain: networkname.BorDevnetChainName,
@ -91,39 +109,14 @@ func NewBorDevnetWithHeimdall(
Alloc: types.GenesisAlloc{
faucetSource.Address: {Balance: accounts.EtherAmount(200_000)},
},
Nodes: []devnet.Node{
&args.BlockProducer{
Nodes: append(nodes,
&args.BlockConsumer{
NodeArgs: args.NodeArgs{
ConsoleVerbosity: "0",
DirVerbosity: "5",
HeimdallGrpcAddr: heimdallGrpcAddr,
},
AccountSlots: 200,
},
&args.BlockProducer{
NodeArgs: args.NodeArgs{
ConsoleVerbosity: "0",
DirVerbosity: "5",
HeimdallGrpcAddr: heimdallGrpcAddr,
},
AccountSlots: 200,
},
/*&args.BlockProducer{
Node: args.Node{
ConsoleVerbosity: "0",
DirVerbosity: "5",
HeimdallGrpcAddr: heimdallGrpcAddr,
},
AccountSlots: 200,
},*/
&args.NonBlockProducer{
NodeArgs: args.NodeArgs{
ConsoleVerbosity: "0",
DirVerbosity: "5",
HeimdallGrpcAddr: heimdallGrpcAddr,
},
},
},
}),
}
devNetwork := devnet.Network{
@ -150,7 +143,7 @@ func NewBorDevnetWithHeimdall(
DevPeriod: 5,
AccountSlots: 200,
},
&args.NonBlockProducer{
&args.BlockConsumer{
NodeArgs: args.NodeArgs{
ConsoleVerbosity: "0",
DirVerbosity: "3",
@ -169,6 +162,7 @@ func NewBorDevnetWithRemoteHeimdall(
dataDir string,
baseRpcHost string,
baseRpcPort int,
producerCount int,
logger log.Logger,
) devnet.Devnet {
heimdallGrpcAddr := ""
@ -181,6 +175,7 @@ func NewBorDevnetWithRemoteHeimdall(
nil,
heimdallGrpcAddr,
checkpointOwner,
producerCount,
withMilestones,
logger)
}
@ -191,6 +186,7 @@ func NewBorDevnetWithLocalHeimdall(
baseRpcPort int,
heimdallGrpcAddr string,
sprintSize uint64,
producerCount int,
logger log.Logger,
) devnet.Devnet {
config := *params.BorDevnetChainConfig
@ -216,6 +212,7 @@ func NewBorDevnetWithLocalHeimdall(
heimdall,
heimdallGrpcAddr,
checkpointOwner,
producerCount,
// milestones are not supported yet on the local heimdall
false,
logger)

View File

@ -1,4 +1,4 @@
package tests
package networks
import (
"github.com/ledgerwatch/erigon-lib/chain/networkname"
@ -14,10 +14,27 @@ func NewDevDevnet(
dataDir string,
baseRpcHost string,
baseRpcPort int,
producerCount int,
logger log.Logger,
) devnet.Devnet {
faucetSource := accounts.NewAccount("faucet-source")
var nodes []devnet.Node
if producerCount == 0 {
producerCount++
}
for i := 0; i < producerCount; i++ {
nodes = append(nodes, &args.BlockProducer{
NodeArgs: args.NodeArgs{
ConsoleVerbosity: "0",
DirVerbosity: "5",
},
AccountSlots: 200,
})
}
network := devnet.Network{
DataDir: dataDir,
Chain: networkname.DevChainName,
@ -32,21 +49,13 @@ func NewDevDevnet(
account_services.NewFaucet(networkname.DevChainName, faucetSource),
},
MaxNumberOfEmptyBlockChecks: 30,
Nodes: []devnet.Node{
&args.BlockProducer{
Nodes: append(nodes,
&args.BlockConsumer{
NodeArgs: args.NodeArgs{
ConsoleVerbosity: "0",
DirVerbosity: "5",
},
AccountSlots: 200,
},
&args.NonBlockProducer{
NodeArgs: args.NodeArgs{
ConsoleVerbosity: "0",
DirVerbosity: "5",
},
},
},
}),
}
return devnet.Devnet{&network}

View File

@ -4,17 +4,19 @@ import (
"fmt"
"os"
"runtime"
"strconv"
"testing"
"github.com/ledgerwatch/erigon-lib/chain/networkname"
"github.com/ledgerwatch/erigon/cmd/devnet/devnet"
"github.com/ledgerwatch/erigon/cmd/devnet/networks"
"github.com/ledgerwatch/erigon/cmd/devnet/services"
"github.com/ledgerwatch/erigon/cmd/devnet/services/polygon"
"github.com/ledgerwatch/erigon/turbo/debug"
"github.com/ledgerwatch/log/v3"
)
func initDevnet(chainName string, dataDir string, logger log.Logger) (devnet.Devnet, error) {
func initDevnet(chainName string, dataDir string, producerCount int, logger log.Logger) (devnet.Devnet, error) {
const baseRpcHost = "localhost"
const baseRpcPort = 8545
@ -22,17 +24,17 @@ func initDevnet(chainName string, dataDir string, logger log.Logger) (devnet.Dev
case networkname.BorDevnetChainName:
heimdallGrpcAddr := polygon.HeimdallGrpcAddressDefault
const sprintSize uint64 = 0
return NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, logger), nil
return networks.NewBorDevnetWithLocalHeimdall(dataDir, baseRpcHost, baseRpcPort, heimdallGrpcAddr, sprintSize, producerCount, logger), nil
case networkname.DevChainName:
return NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, logger), nil
return networks.NewDevDevnet(dataDir, baseRpcHost, baseRpcPort, producerCount, logger), nil
case "":
envChainName, _ := os.LookupEnv("DEVNET_CHAIN")
if envChainName == "" {
envChainName = networkname.DevChainName
}
return initDevnet(envChainName, dataDir, logger)
return initDevnet(envChainName, dataDir, producerCount, logger)
default:
return nil, fmt.Errorf("unknown network: '%s'", chainName)
@ -48,8 +50,15 @@ func ContextStart(t *testing.T, chainName string) (devnet.Context, error) {
logger := log.New()
dataDir := t.TempDir()
envProducerCount, _ := os.LookupEnv("PRODUCER_COUNT")
if envProducerCount == "" {
envProducerCount = "1"
}
producerCount, _ := strconv.ParseUint(envProducerCount, 10, 64)
var network devnet.Devnet
network, err := initDevnet(chainName, dataDir, logger)
network, err := initDevnet(chainName, dataDir, int(producerCount), logger)
if err != nil {
return nil, fmt.Errorf("ContextStart initDevnet failed: %w", err)
}

View File

@ -27,6 +27,7 @@ func init() {
scenarios.StepHandler(CheckTxPoolContent),
scenarios.StepHandler(SendTxWithDynamicFee),
scenarios.StepHandler(AwaitBlocks),
scenarios.StepHandler(SendTxLoad),
)
}
@ -93,7 +94,7 @@ func SendTxWithDynamicFee(ctx context.Context, to, from string, amount uint64) (
// get the latest nonce for the next transaction
logger := devnet.Logger(ctx)
lowerThanBaseFeeTxs, higherThanBaseFeeTxs, err := CreateManyEIP1559TransactionsRefWithBaseFee2(ctx, to, from)
lowerThanBaseFeeTxs, higherThanBaseFeeTxs, err := CreateManyEIP1559TransactionsRefWithBaseFee2(ctx, to, from, 200)
if err != nil {
logger.Error("failed CreateManyEIP1559TransactionsRefWithBaseFee", "error", err)
return nil, err
@ -112,7 +113,7 @@ func SendTxWithDynamicFee(ctx context.Context, to, from string, amount uint64) (
return nil, err
}
CheckTxPoolContent(ctx, 100, 0, 100)
CheckTxPoolContent(ctx, len(higherThanBaseFeeHashlist), 0, len(lowerThanBaseFeeHashlist))
CheckTxPoolContent(ctx, -1, -1, -1)
@ -125,6 +126,55 @@ func SendTxWithDynamicFee(ctx context.Context, to, from string, amount uint64) (
return append(lowerThanBaseFeeHashlist, higherThanBaseFeeHashlist...), nil
}
func SendTxLoad(ctx context.Context, to, from string, amount uint64, txPerSec uint) error {
logger := devnet.Logger(ctx)
batchCount := txPerSec / 4
if batchCount < 1 {
batchCount = 1
}
ms250 := 250 * time.Millisecond
for {
start := time.Now()
lowtx, hightx, err := CreateManyEIP1559TransactionsRefWithBaseFee2(ctx, to, from, int(batchCount))
if err != nil {
logger.Error("failed Create Txs", "error", err)
return err
}
_, err = SendManyTransactions(ctx, lowtx)
if err != nil {
logger.Error("failed SendManyTransactions(higherThanBaseFeeTxs)", "error", err)
return err
}
_, err = SendManyTransactions(ctx, hightx)
if err != nil {
logger.Error("failed SendManyTransactions(lowerThanBaseFeeTxs)", "error", err)
return err
}
select {
case <-ctx.Done():
return nil
default:
}
duration := time.Since(start)
if duration < ms250 {
time.Sleep(ms250 - duration)
}
}
}
func AwaitBlocks(ctx context.Context, sleepTime time.Duration) error {
logger := devnet.Logger(ctx)
@ -154,7 +204,6 @@ func AwaitBlocks(ctx context.Context, sleepTime time.Duration) error {
}
const gasPrice = 912_345_678
const gasAmount = 875_000_000
func CreateManyEIP1559TransactionsRefWithBaseFee(ctx context.Context, to, from string, logger log.Logger) ([]types.Transaction, []types.Transaction, error) {
toAddress := libcommon.HexToAddress(to)
@ -177,7 +226,7 @@ func CreateManyEIP1559TransactionsRefWithBaseFee(ctx context.Context, to, from s
return lowerBaseFeeTransactions, higherBaseFeeTransactions, nil
}
func CreateManyEIP1559TransactionsRefWithBaseFee2(ctx context.Context, to, from string) ([]types.Transaction, []types.Transaction, error) {
func CreateManyEIP1559TransactionsRefWithBaseFee2(ctx context.Context, to, from string, count int) ([]types.Transaction, []types.Transaction, error) {
toAddress := libcommon.HexToAddress(to)
fromAddress := libcommon.HexToAddress(from)
@ -188,7 +237,10 @@ func CreateManyEIP1559TransactionsRefWithBaseFee2(ctx context.Context, to, from
devnet.Logger(ctx).Info("BaseFeePerGas2", "val", baseFeePerGas)
lowerBaseFeeTransactions, higherBaseFeeTransactions, err := signEIP1559TxsLowerAndHigherThanBaseFee2(ctx, 100, 100, baseFeePerGas, toAddress, fromAddress)
lower := count - devnetutils.RandomInt(count)
higher := count - lower
lowerBaseFeeTransactions, higherBaseFeeTransactions, err := signEIP1559TxsLowerAndHigherThanBaseFee2(ctx, lower, higher, baseFeePerGas, toAddress, fromAddress)
if err != nil {
return nil, nil, fmt.Errorf("failed signEIP1559TxsLowerAndHigherThanBaseFee2: %v", err)
@ -207,7 +259,7 @@ func CreateTransaction(node devnet.Node, to, from string, value uint64) (types.T
if strings.HasPrefix(to, "0x") {
toAddress = libcommon.HexToAddress(from)
} else {
return nil, libcommon.Address{}, fmt.Errorf("Unknown to account: %s", to)
return nil, libcommon.Address{}, fmt.Errorf("unknown to account: %s", to)
}
} else {
toAddress = toAccount.Address
@ -216,7 +268,7 @@ func CreateTransaction(node devnet.Node, to, from string, value uint64) (types.T
fromAccount := accounts.GetAccount(from)
if fromAccount == nil {
return nil, libcommon.Address{}, fmt.Errorf("Unknown from account: %s", from)
return nil, libcommon.Address{}, fmt.Errorf("unknown from account: %s", from)
}
res, err := node.GetTransactionCount(fromAccount.Address, rpc.PendingBlock)

View File

@ -154,7 +154,7 @@ func runCmd(ctx *cli.Context) error {
defer db.Close()
if ctx.String(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.String(GenesisFlag.Name))
core.MustCommitGenesis(gen, db, "")
core.MustCommitGenesis(gen, db, "", log.Root())
genesisConfig = gen
chainConfig = gen.Config
} else {

View File

@ -9,6 +9,7 @@ import (
"text/tabwriter"
"github.com/ledgerwatch/erigon/turbo/backup"
"github.com/ledgerwatch/log/v3"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
@ -49,7 +50,7 @@ var cmdResetState = &cobra.Command{
return
}
if err = reset2.ResetState(db, ctx, chain, ""); err != nil {
if err = reset2.ResetState(db, ctx, chain, "", log.Root()); err != nil {
if !errors.Is(err, context.Canceled) {
logger.Error(err.Error())
}

View File

@ -922,7 +922,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error {
return reset2.WarmupExec(ctx, db)
}
if reset {
return reset2.ResetExec(ctx, db, chain, "")
return reset2.ResetExec(ctx, db, chain, "", logger)
}
if txtrace {

View File

@ -85,7 +85,7 @@ func NewServer(ctx context.Context, flags CommandFlags, logger log.Logger) (*Ser
}
func makeLocalNode(ctx context.Context, nodeDBPath string, privateKey *ecdsa.PrivateKey, chain string, logger log.Logger) (*enode.LocalNode, error) {
db, err := enode.OpenDB(ctx, nodeDBPath, "")
db, err := enode.OpenDB(ctx, nodeDBPath, "", logger)
if err != nil {
return nil, err
}

View File

@ -109,7 +109,7 @@ func StateRoot(ctx context.Context, genesis *types.Genesis, snapshotVersion uint
if rwTx, err = db.BeginRw(ctx); err != nil {
return err
}
_, genesisIbs, err4 := core.GenesisToBlock(genesis, "")
_, genesisIbs, err4 := core.GenesisToBlock(genesis, "", logger)
if err4 != nil {
return err4
}

View File

@ -143,7 +143,7 @@ func (rw *Worker) RunTxTaskNoLock(txTask *exec22.TxTask) {
if txTask.BlockNum == 0 {
// Genesis block
// fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum)
_, ibs, err = core.GenesisToBlock(rw.genesis, "")
_, ibs, err = core.GenesisToBlock(rw.genesis, "", logger)
if err != nil {
panic(err)
}

View File

@ -297,7 +297,7 @@ func (rw *ReconWorker) runTxTask(txTask *exec22.TxTask) error {
if txTask.BlockNum == 0 && txTask.TxIndex == -1 {
//fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum)
// Genesis block
_, ibs, err = core.GenesisToBlock(rw.genesis, "")
_, ibs, err = core.GenesisToBlock(rw.genesis, "", logger)
if err != nil {
return err
}

View File

@ -148,6 +148,7 @@ var (
TxPoolDisableFlag = cli.BoolFlag{
Name: "txpool.disable",
Usage: "Experimental external pool and block producer, see ./cmd/txpool/readme.md for more info. Disabling internal txpool and block producer.",
Value: false,
}
TxPoolGossipDisableFlag = cli.BoolFlag{
Name: "txpool.gossip.disable",

View File

@ -17,6 +17,7 @@ import (
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/turbo/stages/mock"
"github.com/ledgerwatch/erigon/turbo/trie"
"github.com/ledgerwatch/log/v3"
)
// Check that the first block of Gnosis Chain, which doesn't have any transactions,
@ -24,7 +25,7 @@ import (
func TestEmptyBlock(t *testing.T) {
require := require.New(t)
genesis := core.GnosisGenesisBlock()
genesisBlock, _, err := core.GenesisToBlock(genesis, "")
genesisBlock, _, err := core.GenesisToBlock(genesis, "", log.Root())
require.NoError(err)
genesis.Config.TerminalTotalDifficultyPassed = false

View File

@ -50,12 +50,12 @@ func TestGenesisBlockRoots(t *testing.T) {
require := require.New(t)
var err error
block, _, _ := core.GenesisToBlock(core.MainnetGenesisBlock(), "")
block, _, _ := core.GenesisToBlock(core.MainnetGenesisBlock(), "", log.Root())
if block.Hash() != params.MainnetGenesisHash {
t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainnetGenesisHash)
}
block, _, err = core.GenesisToBlock(core.GnosisGenesisBlock(), "")
block, _, err = core.GenesisToBlock(core.GnosisGenesisBlock(), "", log.Root())
require.NoError(err)
if block.Root() != params.GnosisGenesisStateRoot {
t.Errorf("wrong Gnosis Chain genesis state root, got %v, want %v", block.Root(), params.GnosisGenesisStateRoot)
@ -64,7 +64,7 @@ func TestGenesisBlockRoots(t *testing.T) {
t.Errorf("wrong Gnosis Chain genesis hash, got %v, want %v", block.Hash(), params.GnosisGenesisHash)
}
block, _, err = core.GenesisToBlock(core.ChiadoGenesisBlock(), "")
block, _, err = core.GenesisToBlock(core.ChiadoGenesisBlock(), "", log.Root())
require.NoError(err)
if block.Root() != params.ChiadoGenesisStateRoot {
t.Errorf("wrong Chiado genesis state root, got %v, want %v", block.Root(), params.ChiadoGenesisStateRoot)

View File

@ -111,7 +111,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideCancunTime *b
custom = false
}
applyOverrides(genesis.Config)
block, _, err1 := write(tx, genesis, tmpDir)
block, _, err1 := write(tx, genesis, tmpDir, logger)
if err1 != nil {
return genesis.Config, nil, err1
}
@ -123,7 +123,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideCancunTime *b
// Check whether the genesis block is already written.
if genesis != nil {
block, _, err1 := GenesisToBlock(genesis, tmpDir)
block, _, err1 := GenesisToBlock(genesis, tmpDir, logger)
if err1 != nil {
return genesis.Config, nil, err1
}
@ -180,8 +180,8 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overrideCancunTime *b
return newCfg, storedBlock, nil
}
func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string) (*types.Block, *state.IntraBlockState, error) {
block, statedb, err := GenesisToBlock(g, tmpDir)
func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) {
block, statedb, err := GenesisToBlock(g, tmpDir, logger)
if err != nil {
return nil, nil, err
}
@ -229,13 +229,13 @@ func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string) (*types.Bloc
}
return block, statedb, nil
}
func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string) *types.Block {
func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string, logger log.Logger) *types.Block {
tx, err := db.BeginRw(context.Background())
if err != nil {
panic(err)
}
defer tx.Rollback()
block, _, err := write(tx, g, tmpDir)
block, _, err := write(tx, g, tmpDir, logger)
if err != nil {
panic(err)
}
@ -248,8 +248,8 @@ func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string) *types.Block
// Write writes the block and state of a genesis specification to the database.
// The block is committed as the canonical head block.
func write(tx kv.RwTx, g *types.Genesis, tmpDir string) (*types.Block, *state.IntraBlockState, error) {
block, statedb, err2 := WriteGenesisState(g, tx, tmpDir)
func write(tx kv.RwTx, g *types.Genesis, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) {
block, statedb, err2 := WriteGenesisState(g, tx, tmpDir, logger)
if err2 != nil {
return block, statedb, err2
}
@ -309,9 +309,9 @@ func write(tx kv.RwTx, g *types.Genesis, tmpDir string) (*types.Block, *state.In
}
// GenesisBlockForTesting creates and writes a block in which addr has the given wei balance.
func GenesisBlockForTesting(db kv.RwDB, addr libcommon.Address, balance *big.Int, tmpDir string) *types.Block {
func GenesisBlockForTesting(db kv.RwDB, addr libcommon.Address, balance *big.Int, tmpDir string, logger log.Logger) *types.Block {
g := types.Genesis{Alloc: types.GenesisAlloc{addr: {Balance: balance}}, Config: params.TestChainConfig}
block := MustCommitGenesis(&g, db, tmpDir)
block := MustCommitGenesis(&g, db, tmpDir, logger)
return block
}
@ -320,14 +320,14 @@ type GenAccount struct {
Balance *big.Int
}
func GenesisWithAccounts(db kv.RwDB, accs []GenAccount, tmpDir string) *types.Block {
func GenesisWithAccounts(db kv.RwDB, accs []GenAccount, tmpDir string, logger log.Logger) *types.Block {
g := types.Genesis{Config: params.TestChainConfig}
allocs := make(map[libcommon.Address]types.GenesisAccount)
for _, acc := range accs {
allocs[acc.Addr] = types.GenesisAccount{Balance: acc.Balance}
}
g.Alloc = allocs
block := MustCommitGenesis(&g, db, tmpDir)
block := MustCommitGenesis(&g, db, tmpDir, logger)
return block
}
@ -489,7 +489,7 @@ func DeveloperGenesisBlock(period uint64, faucet libcommon.Address) *types.Genes
// ToBlock creates the genesis block and writes state of a genesis specification
// to the given database (or discards it if nil).
func GenesisToBlock(g *types.Genesis, tmpDir string) (*types.Block, *state.IntraBlockState, error) {
func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) {
_ = g.Alloc //nil-check
head := &types.Header{
@ -556,7 +556,7 @@ func GenesisToBlock(g *types.Genesis, tmpDir string) (*types.Block, *state.Intra
// TODO(yperbasis): use memdb.MemoryMutation instead
defer wg.Done()
genesisTmpDB := mdbx.NewMDBX(log.New()).InMem(tmpDir).MapSize(2 * datasize.GB).GrowthStep(1 * datasize.MB).MustOpen()
genesisTmpDB := mdbx.NewMDBX(logger).InMem(tmpDir).MapSize(2 * datasize.GB).GrowthStep(1 * datasize.MB).MustOpen()
defer genesisTmpDB.Close()
var tx kv.RwTx
if tx, err = genesisTmpDB.BeginRw(context.Background()); err != nil {

View File

@ -20,7 +20,7 @@ import (
"github.com/ledgerwatch/log/v3"
)
func ResetState(db kv.RwDB, ctx context.Context, chain string, tmpDir string) error {
func ResetState(db kv.RwDB, ctx context.Context, chain string, tmpDir string, logger log.Logger) error {
// don't reset senders here
if err := Reset(ctx, db, stages.HashState); err != nil {
return err
@ -44,7 +44,7 @@ func ResetState(db kv.RwDB, ctx context.Context, chain string, tmpDir string) er
return err
}
if err := ResetExec(ctx, db, chain, tmpDir); err != nil {
if err := ResetExec(ctx, db, chain, tmpDir, logger); err != nil {
return err
}
return nil
@ -130,7 +130,7 @@ func WarmupExec(ctx context.Context, db kv.RwDB) (err error) {
return
}
func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string) (err error) {
func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string, logger log.Logger) (err error) {
historyV3 := kvcfg.HistoryV3.FromDB(db)
if historyV3 {
stateHistoryBuckets = append(stateHistoryBuckets, stateHistoryV3Buckets...)
@ -156,7 +156,7 @@ func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string) (er
}
if !historyV3 {
genesis := core.GenesisBlockByChainName(chain)
if _, _, err := core.WriteGenesisState(genesis, tx, tmpDir); err != nil {
if _, _, err := core.WriteGenesisState(genesis, tx, tmpDir, logger); err != nil {
return err
}
}

View File

@ -25,6 +25,7 @@ import (
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon/core/state/temporal"
"github.com/ledgerwatch/log/v3"
"golang.org/x/crypto/sha3"
"github.com/ledgerwatch/erigon/common/u256"
@ -35,7 +36,7 @@ import (
"github.com/ledgerwatch/erigon/rlp"
)
func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir string) *types.Block {
func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir string, logger log.Logger) *types.Block {
_, db, _ := temporal.NewTestDB(tb, datadir.New(tmpDir), nil)
var (
aa = libcommon.HexToAddress("0x000000000000000000000000000000000000aaaa")
@ -49,7 +50,7 @@ func getBlock(tb testing.TB, transactions int, uncles int, dataSize int, tmpDir
Config: params.TestChainConfig,
Alloc: types.GenesisAlloc{address: {Balance: funds}},
}
genesis = MustCommitGenesis(gspec, db, tmpDir)
genesis = MustCommitGenesis(gspec, db, tmpDir, logger)
)
// We need to generate as many blocks +1 as uncles
@ -91,7 +92,7 @@ func TestRlpIterator(t *testing.T) {
func testRlpIterator(t *testing.T, txs, uncles, datasize int) {
desc := fmt.Sprintf("%d txs [%d datasize] and %d uncles", txs, datasize, uncles)
bodyRlp, _ := rlp.EncodeToBytes(getBlock(t, txs, uncles, datasize, "").Body())
bodyRlp, _ := rlp.EncodeToBytes(getBlock(t, txs, uncles, datasize, "", log.Root()).Body())
it, err := rlp.NewListIterator(bodyRlp)
if err != nil {
t.Fatal(err)
@ -150,7 +151,7 @@ func BenchmarkHashing(b *testing.B) {
blockRlp []byte
)
{
block := getBlock(b, 200, 2, 50, "")
block := getBlock(b, 200, 2, 50, "", log.Root())
bodyRlp, _ = rlp.EncodeToBytes(block.Body())
blockRlp, _ = rlp.EncodeToBytes(block)
}

View File

@ -45,8 +45,8 @@ type MemoryMutation struct {
// defer batch.Close()
// ... some calculations on `batch`
// batch.Commit()
func NewMemoryBatch(tx kv.Tx, tmpDir string) *MemoryMutation {
tmpDB := mdbx.NewMDBX(log.New()).InMem(tmpDir).GrowthStep(64 * datasize.MB).MapSize(512 * datasize.GB).MustOpen()
func NewMemoryBatch(tx kv.Tx, tmpDir string, logger log.Logger) *MemoryMutation {
tmpDB := mdbx.NewMDBX(logger).InMem(tmpDir).GrowthStep(64 * datasize.MB).MapSize(512 * datasize.GB).MustOpen()
memTx, err := tmpDB.BeginRw(context.Background())
if err != nil {
panic(err)

View File

@ -21,6 +21,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/log/v3"
)
func initializeDbNonDupSort(rwTx kv.RwTx) {
@ -35,7 +36,7 @@ func TestPutAppendHas(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
require.NoError(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.5")))
require.Error(t, batch.Append(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3")))
require.NoError(t, batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value1.3")))
@ -64,7 +65,7 @@ func TestLastMiningDB(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
batch.Put(kv.HashedAccounts, []byte("BCAA"), []byte("value5"))
@ -88,7 +89,7 @@ func TestLastMiningMem(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5"))
@ -111,7 +112,7 @@ func TestDeleteMining(t *testing.T) {
_, rwTx := memdb.NewTestTx(t)
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5"))
batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5"))
@ -137,7 +138,7 @@ func TestFlush(t *testing.T) {
_, rwTx := memdb.NewTestTx(t)
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4"))
batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value5"))
batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5"))
@ -158,7 +159,7 @@ func TestForEach(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5"))
require.NoError(t, batch.Flush(rwTx))
@ -200,7 +201,7 @@ func TestForPrefix(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
var keys1 []string
var values1 []string
@ -239,7 +240,7 @@ func TestForAmount(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
var keys []string
@ -272,7 +273,7 @@ func TestGetOneAfterClearBucket(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
err := batch.ClearBucket(kv.HashedAccounts)
@ -295,7 +296,7 @@ func TestSeekExactAfterClearBucket(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
err := batch.ClearBucket(kv.HashedAccounts)
@ -331,7 +332,7 @@ func TestFirstAfterClearBucket(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
err := batch.ClearBucket(kv.HashedAccounts)
@ -359,7 +360,7 @@ func TestIncReadSequence(t *testing.T) {
initializeDbNonDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
_, err := batch.IncrementSequence(kv.HashedAccounts, uint64(12))
@ -382,7 +383,7 @@ func TestNext(t *testing.T) {
initializeDbDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
batch.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.2"))
@ -426,7 +427,7 @@ func TestNextNoDup(t *testing.T) {
initializeDbDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
batch.Put(kv.AccountChangeSet, []byte("key2"), []byte("value2.1"))
@ -453,7 +454,7 @@ func TestDeleteCurrentDuplicates(t *testing.T) {
initializeDbDupSort(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
cursor, err := batch.RwCursorDupSort(kv.AccountChangeSet)
@ -488,7 +489,7 @@ func TestSeekBothRange(t *testing.T) {
rwTx.Put(kv.AccountChangeSet, []byte("key1"), []byte("value1.1"))
rwTx.Put(kv.AccountChangeSet, []byte("key3"), []byte("value3.3"))
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
cursor, err := batch.RwCursorDupSort(kv.AccountChangeSet)
@ -522,7 +523,7 @@ func TestAutoConversion(t *testing.T) {
initializeDbAutoConversion(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
c, err := batch.RwCursor(kv.PlainState)
@ -578,7 +579,7 @@ func TestAutoConversionDelete(t *testing.T) {
initializeDbAutoConversion(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
c, err := batch.RwCursor(kv.PlainState)
@ -615,7 +616,7 @@ func TestAutoConversionSeekBothRange(t *testing.T) {
initializeDbAutoConversion(rwTx)
batch := NewMemoryBatch(rwTx, "")
batch := NewMemoryBatch(rwTx, "", log.Root())
defer batch.Close()
c, err := batch.RwCursorDupSort(kv.PlainState)

View File

@ -102,7 +102,7 @@ func SaveChainConfigIfNeed(ctx context.Context, coreDB kv.RoDB, txPoolDB kv.RwDB
func AllComponents(ctx context.Context, cfg txpoolcfg.Config, cache kvcache.Cache, newTxs chan types.Announcements, chainDB kv.RoDB,
sentryClients []direct.SentryClient, stateChangesClient txpool.StateChangesClient, logger log.Logger) (kv.RwDB, *txpool.TxPool, *txpool.Fetch, *txpool.Send, *txpool.GrpcServer, error) {
opts := mdbx.NewMDBX(log.New()).Label(kv.TxPoolDB).Path(cfg.DBDir).
opts := mdbx.NewMDBX(logger).Label(kv.TxPoolDB).Path(cfg.DBDir).
WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.TxpoolTablesCfg }).
WriteMergeThreshold(3 * 8192).
PageSize(uint64(16 * datasize.KB)).

View File

@ -665,7 +665,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder,
logger)
// We start the mining step
if err := stages2.MiningStep(ctx, backend.chainDB, proposingSync, tmpdir); err != nil {
if err := stages2.MiningStep(ctx, backend.chainDB, proposingSync, tmpdir, logger); err != nil {
return nil, err
}
block := <-miningStatePos.MiningResultPOSCh
@ -783,7 +783,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
}
}()
if err := backend.StartMining(context.Background(), backend.chainDB, mining, backend.config.Miner, backend.gasPrice, backend.sentriesClient.Hd.QuitPoWMining, tmpdir); err != nil {
if err := backend.StartMining(context.Background(), backend.chainDB, mining, backend.config.Miner, backend.gasPrice, backend.sentriesClient.Hd.QuitPoWMining, tmpdir, logger); err != nil {
return nil, err
}
@ -1013,7 +1013,7 @@ func (s *Ethereum) shouldPreserve(block *types.Block) bool { //nolint
// StartMining starts the miner with the given number of CPU threads. If mining
// is already running, this method adjust the number of threads allowed to use
// and updates the minimum price required by the transaction pool.
func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsync.Sync, cfg params.MiningConfig, gasPrice *uint256.Int, quitCh chan struct{}, tmpDir string) error {
func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsync.Sync, cfg params.MiningConfig, gasPrice *uint256.Int, quitCh chan struct{}, tmpDir string, logger log.Logger) error {
var borcfg *bor.Bor
if b, ok := s.engine.(*bor.Bor); ok {
@ -1141,7 +1141,7 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy
works = true
hasWork = false
mineEvery.Reset(cfg.Recommit)
go func() { errc <- stages2.MiningStep(ctx, db, mining, tmpDir) }()
go func() { errc <- stages2.MiningStep(ctx, db, mining, tmpDir, logger) }()
}
}
}()

View File

@ -132,12 +132,18 @@ func (it *lookup) startQueries() bool {
return it.queries > 0
}
type ctxKey int
const (
ckNoSlowdown ctxKey = iota
)
func disableLookupSlowdown(ctx context.Context) context.Context {
return context.WithValue(ctx, "p2p.discover.lookup.noSlowdown", true)
return context.WithValue(ctx, ckNoSlowdown, true)
}
func isDisabledLookupSlowdown(ctx context.Context) bool {
return ctx.Value("p2p.discover.lookup.noSlowdown") != nil
return ctx.Value(ckNoSlowdown) != nil
}
func (it *lookup) slowdown() {

View File

@ -29,6 +29,7 @@ import (
"github.com/ledgerwatch/erigon/p2p/enode"
"github.com/ledgerwatch/erigon/p2p/enr"
"github.com/ledgerwatch/erigon/p2p/netutil"
"github.com/ledgerwatch/log/v3"
)
func TestTable_pingReplace(t *testing.T) {
@ -49,7 +50,7 @@ func TestTable_pingReplace(t *testing.T) {
func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) {
transport := newPingRecorder()
tmpDir := t.TempDir()
tab, db := newTestTable(transport, tmpDir)
tab, db := newTestTable(transport, tmpDir, log.Root())
defer db.Close()
defer tab.close()
@ -118,7 +119,7 @@ func testTableBumpNoDuplicatesRun(t *testing.T, bucketCountGen byte, bumpCountGe
if len(bumps) > 0 {
tmpDir := t.TempDir()
tab, db := newTestTable(newPingRecorder(), tmpDir)
tab, db := newTestTable(newPingRecorder(), tmpDir, log.Root())
defer db.Close()
defer tab.close()
@ -170,7 +171,7 @@ func TestTable_bumpNoDuplicates_examples(t *testing.T) {
func TestTable_IPLimit(t *testing.T) {
transport := newPingRecorder()
tmpDir := t.TempDir()
tab, db := newTestTable(transport, tmpDir)
tab, db := newTestTable(transport, tmpDir, log.Root())
defer db.Close()
defer tab.close()
@ -188,7 +189,7 @@ func TestTable_IPLimit(t *testing.T) {
func TestTable_BucketIPLimit(t *testing.T) {
transport := newPingRecorder()
tmpDir := t.TempDir()
tab, db := newTestTable(transport, tmpDir)
tab, db := newTestTable(transport, tmpDir, log.Root())
defer db.Close()
defer tab.close()
@ -224,7 +225,7 @@ func testTableFindNodeByIDRun(t *testing.T, nodesCountGen uint16, resultsCountGe
// for any node table, Target and N
transport := newPingRecorder()
tmpDir := t.TempDir()
tab, db := newTestTable(transport, tmpDir)
tab, db := newTestTable(transport, tmpDir, log.Root())
defer db.Close()
defer tab.close()
@ -328,7 +329,7 @@ func testTableReadRandomNodesGetAllRun(t *testing.T, nodesCountGen uint16, rand
buf := make([]*enode.Node, nodesCount)
transport := newPingRecorder()
tmpDir := t.TempDir()
tab, db := newTestTable(transport, tmpDir)
tab, db := newTestTable(transport, tmpDir, log.Root())
defer db.Close()
defer tab.close()
<-tab.initDone
@ -392,7 +393,7 @@ func generateNode(rand *rand.Rand) *node {
func TestTable_addVerifiedNode(t *testing.T) {
tmpDir := t.TempDir()
tab, db := newTestTable(newPingRecorder(), tmpDir)
tab, db := newTestTable(newPingRecorder(), tmpDir, log.Root())
<-tab.initDone
defer db.Close()
defer tab.close()
@ -425,7 +426,7 @@ func TestTable_addVerifiedNode(t *testing.T) {
func TestTable_addSeenNode(t *testing.T) {
tmpDir := t.TempDir()
tab, db := newTestTable(newPingRecorder(), tmpDir)
tab, db := newTestTable(newPingRecorder(), tmpDir, log.Root())
<-tab.initDone
defer db.Close()
defer tab.close()
@ -460,7 +461,7 @@ func TestTable_addSeenNode(t *testing.T) {
func TestTable_revalidateSyncRecord(t *testing.T) {
transport := newPingRecorder()
tmpDir := t.TempDir()
tab, db := newTestTable(transport, tmpDir)
tab, db := newTestTable(transport, tmpDir, log.Root())
<-tab.initDone
defer db.Close()
defer tab.close()

View File

@ -43,8 +43,8 @@ func init() {
nullNode = enode.SignNull(&r, enode.ID{})
}
func newTestTable(t transport, tmpDir string) (*Table, *enode.DB) {
db, err := enode.OpenDB(context.Background(), "", tmpDir)
func newTestTable(t transport, tmpDir string, logger log.Logger) (*Table, *enode.DB) {
db, err := enode.OpenDB(context.Background(), "", tmpDir, logger)
if err != nil {
panic(err)
}

View File

@ -82,7 +82,7 @@ func newUDPTestContext(ctx context.Context, t *testing.T, logger log.Logger) *ud
tmpDir := t.TempDir()
var err error
test.db, err = enode.OpenDB(ctx, "", tmpDir)
test.db, err = enode.OpenDB(ctx, "", tmpDir, logger)
if err != nil {
panic(err)
}
@ -619,7 +619,7 @@ func startLocalhostV4(ctx context.Context, t *testing.T, cfg Config, logger log.
cfg.PrivateKey = newkey()
tmpDir := t.TempDir()
db, err := enode.OpenDB(context.Background(), "", tmpDir)
db, err := enode.OpenDB(context.Background(), "", tmpDir, logger)
if err != nil {
panic(err)
}

View File

@ -41,7 +41,7 @@ import (
func startLocalhostV5(t *testing.T, cfg Config, logger log.Logger) *UDPv5 {
cfg.PrivateKey = newkey()
tmpDir := t.TempDir()
db, err := enode.OpenDB(context.Background(), "", tmpDir)
db, err := enode.OpenDB(context.Background(), "", tmpDir, logger)
if err != nil {
panic(err)
}
@ -573,7 +573,7 @@ func newUDPV5TestContext(ctx context.Context, t *testing.T, logger log.Logger) *
t.Cleanup(test.close)
var err error
tmpDir := t.TempDir()
test.db, err = enode.OpenDB(context.Background(), "", tmpDir)
test.db, err = enode.OpenDB(context.Background(), "", tmpDir, logger)
if err != nil {
panic(err)
}
@ -627,7 +627,7 @@ func (test *udpV5Test) getNode(key *ecdsa.PrivateKey, addr *net.UDPAddr, logger
ln := test.nodesByID[id]
if ln == nil {
tmpDir := test.t.TempDir()
db, err := enode.OpenDB(context.Background(), "", tmpDir)
db, err := enode.OpenDB(context.Background(), "", tmpDir, logger)
if err != nil {
panic(err)
}

View File

@ -537,7 +537,7 @@ func (t *handshakeTest) close() {
}
func (n *handshakeTestNode) init(key *ecdsa.PrivateKey, ip net.IP, clock mclock.Clock, tmpDir string, logger log.Logger) {
db, err := enode.OpenDB(context.Background(), "", tmpDir)
db, err := enode.OpenDB(context.Background(), "", tmpDir, logger)
if err != nil {
panic(err)
}

View File

@ -29,7 +29,7 @@ import (
)
func newLocalNodeForTesting(tmpDir string, logger log.Logger) (*LocalNode, *DB) {
db, err := OpenDB(context.Background(), "", tmpDir)
db, err := OpenDB(context.Background(), "", tmpDir, logger)
if err != nil {
panic(err)
}

View File

@ -82,8 +82,7 @@ type DB struct {
// OpenDB opens a node database for storing and retrieving infos about known peers in the
// network. If no path is given an in-memory, temporary database is constructed.
func OpenDB(ctx context.Context, path string, tmpDir string) (*DB, error) {
logger := log.New() //TODO: move higher
func OpenDB(ctx context.Context, path string, tmpDir string, logger log.Logger) (*DB, error) {
if path == "" {
return newMemoryDB(logger, tmpDir)
}

View File

@ -25,6 +25,8 @@ import (
"reflect"
"testing"
"time"
"github.com/ledgerwatch/log/v3"
)
var keytestID = HexID("51232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439")
@ -89,7 +91,7 @@ var nodeDBInt64Tests = []struct {
func TestDBInt64(t *testing.T) {
tmpDir := t.TempDir()
db, err := OpenDB(context.Background(), "", tmpDir)
db, err := OpenDB(context.Background(), "", tmpDir, log.Root())
if err != nil {
panic(err)
}
@ -125,7 +127,7 @@ func TestDBFetchStore(t *testing.T) {
inst := time.Now()
num := 314
db, err := OpenDB(context.Background(), "", tmpDir)
db, err := OpenDB(context.Background(), "", tmpDir, log.Root())
if err != nil {
panic(err)
}
@ -268,7 +270,7 @@ func TestDBSeedQuery(t *testing.T) {
}
func testSeedQuery(tmpDir string) error {
db, err := OpenDB(context.Background(), "", tmpDir)
db, err := OpenDB(context.Background(), "", tmpDir, log.Root())
if err != nil {
panic(err)
}
@ -318,7 +320,7 @@ func TestDBPersistency(t *testing.T) {
)
// Create a persistent database and store some values
db, err := OpenDB(context.Background(), filepath.Join(root, "database"), root)
db, err := OpenDB(context.Background(), filepath.Join(root, "database"), root, log.Root())
if err != nil {
t.Fatalf("failed to create persistent database: %v", err)
}
@ -329,7 +331,7 @@ func TestDBPersistency(t *testing.T) {
db.Close()
// ReopenSegments the database and check the value
db, err = OpenDB(context.Background(), filepath.Join(root, "database"), root)
db, err = OpenDB(context.Background(), filepath.Join(root, "database"), root, log.Root())
if err != nil {
t.Fatalf("failed to open persistent database: %v", err)
}
@ -432,7 +434,7 @@ var nodeDBExpirationNodes = []struct {
func TestDBExpiration(t *testing.T) {
tmpDir := t.TempDir()
db, err := OpenDB(context.Background(), "", tmpDir)
db, err := OpenDB(context.Background(), "", tmpDir, log.Root())
if err != nil {
panic(err)
}
@ -479,7 +481,7 @@ func TestDBExpiration(t *testing.T) {
// in the database.
func TestDBExpireV5(t *testing.T) {
tmpDir := t.TempDir()
db, err := OpenDB(context.Background(), "", tmpDir)
db, err := OpenDB(context.Background(), "", tmpDir, log.Root())
if err != nil {
panic(err)
}

View File

@ -22,6 +22,7 @@ import (
"github.com/ledgerwatch/erigon/core/state/temporal"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/p2p"
"github.com/ledgerwatch/log/v3"
)
func testSentryServer(db kv.Getter, genesis *types.Genesis, genesisHash libcommon.Hash) *GrpcServer {
@ -88,8 +89,8 @@ func testForkIDSplit(t *testing.T, protocol uint) {
gspecNoFork = &types.Genesis{Config: configNoFork}
gspecProFork = &types.Genesis{Config: configProFork}
genesisNoFork = core.MustCommitGenesis(gspecNoFork, dbNoFork, "")
genesisProFork = core.MustCommitGenesis(gspecProFork, dbProFork, "")
genesisNoFork = core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root())
genesisProFork = core.MustCommitGenesis(gspecProFork, dbProFork, "", log.Root())
)
var s1, s2 *GrpcServer
@ -177,7 +178,7 @@ func TestSentryServerImpl_SetStatusInitPanic(t *testing.T) {
configNoFork := &chain.Config{HomesteadBlock: big.NewInt(1), ChainID: big.NewInt(1)}
_, dbNoFork, _ := temporal.NewTestDB(t, datadir.New(t.TempDir()), nil)
gspecNoFork := &types.Genesis{Config: configNoFork}
genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, "")
genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root())
ss := &GrpcServer{p2p: &p2p.Config{}}
_, err := ss.SetStatus(context.Background(), &proto_sentry.StatusData{

View File

@ -558,7 +558,7 @@ func (srv *Server) setupLocalNode() error {
}
sort.Sort(capsByNameAndVersion(srv.ourHandshake.Caps))
// Create the local node
db, err := enode.OpenDB(srv.quitCtx, srv.Config.NodeDatabase, srv.Config.TmpDir)
db, err := enode.OpenDB(srv.quitCtx, srv.Config.NodeDatabase, srv.Config.TmpDir, srv.logger)
if err != nil {
return err
}

View File

@ -46,6 +46,7 @@ import (
"github.com/ledgerwatch/erigon/rlp"
"github.com/ledgerwatch/erigon/turbo/rpchelper"
"github.com/ledgerwatch/erigon/turbo/trie"
"github.com/ledgerwatch/log/v3"
)
// StateTest checks transaction processing without block context.
@ -182,7 +183,7 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co
return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork}
}
vmconfig.ExtraEips = eips
block, _, err := core.GenesisToBlock(t.genesis(config), "")
block, _, err := core.GenesisToBlock(t.genesis(config), "", log.Root())
if err != nil {
return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork}
}

View File

@ -98,6 +98,16 @@ var snapshotCommand = cli.Command{
&erigoncli.UploadFromFlag,
&erigoncli.FrozenBlockLimitFlag,
}),
Before: func(context *cli.Context) error {
erigoncli.SyncLoopBreakAfterFlag.Value = "Senders"
erigoncli.SyncLoopBlockLimitFlag.Value = 100000
erigoncli.SyncLoopPruneLimitFlag.Value = 100000
erigoncli.FrozenBlockLimitFlag.Value = 1500000
utils.NoDownloaderFlag.Value = true
utils.HTTPEnabledFlag.Value = false
utils.TxPoolDisableFlag.Value = true
return nil
},
},
{
Name: "uncompress",
@ -582,13 +592,6 @@ func doRetireCommand(cliCtx *cli.Context) error {
}
func uploaderCommandFlags(flags []cli.Flag) []cli.Flag {
erigoncli.SyncLoopBreakAfterFlag.Value = "Senders"
erigoncli.SyncLoopBlockLimitFlag.Value = 100000
erigoncli.SyncLoopPruneLimitFlag.Value = 100000
erigoncli.FrozenBlockLimitFlag.Value = 1500000
utils.NoDownloaderFlag.Value = true
utils.HTTPEnabledFlag.Value = false
utils.TxPoolDisableFlag.Value = true
return joinFlags(erigoncli.DefaultFlags, flags, []cli.Flag{
&erigoncli.SyncLoopBreakAfterFlag,
&erigoncli.SyncLoopBlockLimitFlag,

View File

@ -419,7 +419,13 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config, logger log.Logg
apis := ctx.String(utils.HTTPApiFlag.Name)
c := &httpcfg.HttpCfg{
Enabled: ctx.Bool(utils.HTTPEnabledFlag.Name),
Enabled: func() bool {
if ctx.IsSet(utils.HTTPEnabledFlag.Name) {
return ctx.Bool(utils.HTTPEnabledFlag.Name)
}
return true
}(),
HttpServerEnabled: ctx.Bool(utils.HTTPServerEnabledFlag.Name),
Dirs: cfg.Dirs,

View File

@ -17,9 +17,10 @@ import (
"context"
"errors"
"fmt"
"github.com/ledgerwatch/erigon/cl/phase1/core/state/lru"
"sync"
"github.com/ledgerwatch/erigon/cl/phase1/core/state/lru"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/membatchwithdb"
@ -132,7 +133,7 @@ func (fv *ForkValidator) FlushExtendingFork(tx kv.RwTx, accumulator *shards.Accu
// if the payload extends the canonical chain, then we stack it in extendingFork without any unwind.
// if the payload is a fork then we unwind to the point where the fork meets the canonical chain, and there we check whether it is valid.
// if for any reason none of the actions above can be performed due to lack of information, we accept the payload and avoid validation.
func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *types.RawBody, extendCanonical bool) (status engine_types.EngineStatus, latestValidHash libcommon.Hash, validationError error, criticalError error) {
func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *types.RawBody, extendCanonical bool, logger log.Logger) (status engine_types.EngineStatus, latestValidHash libcommon.Hash, validationError error, criticalError error) {
fv.lock.Lock()
defer fv.lock.Unlock()
if fv.validatePayload == nil {
@ -149,7 +150,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t
log.Debug("Execution ForkValidator.ValidatePayload", "extendCanonical", extendCanonical)
if extendCanonical {
extendingFork := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir)
extendingFork := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger)
defer extendingFork.Close()
fv.extendingForkNotifications = &shards.Notifications{
@ -186,7 +187,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t
return
}
log.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint)
logger.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint)
var bodiesChain []*types.RawBody
var headersChain []*types.Header
@ -222,13 +223,13 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t
if criticalError != nil {
return
}
log.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint)
logger.Debug("Execution ForkValidator.ValidatePayload", "foundCanonical", foundCanonical, "currentHash", currentHash, "unwindPoint", unwindPoint)
}
// Do not set an unwind point if we are already there.
if unwindPoint == fv.currentHeight {
unwindPoint = 0
}
batch := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir)
batch := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger)
defer batch.Rollback()
notifications := &shards.Notifications{
Events: shards.NewEvents(),

View File

@ -183,7 +183,7 @@ func (e *EthereumExecutionModule) ValidateChain(ctx context.Context, req *execut
extendingHash := e.forkValidator.ExtendingForkHeadHash()
extendCanonical := extendingHash == libcommon.Hash{} && header.ParentHash == currentHeadHash
status, lvh, validationError, criticalError := e.forkValidator.ValidatePayload(tx, header, body.RawBody(), extendCanonical)
status, lvh, validationError, criticalError := e.forkValidator.ValidatePayload(tx, header, body.RawBody(), extendCanonical, e.logger)
if criticalError != nil {
return nil, criticalError
}

View File

@ -355,7 +355,7 @@ func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, sto
if latestBlock-blockNr > uint64(api.MaxGetProofRewindBlockCount) {
return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", uint64(api.MaxGetProofRewindBlockCount), latestBlock)
}
batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp)
batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp, api.logger)
defer batch.Rollback()
unwindState := &stagedsync.UnwindState{UnwindPoint: blockNr}

View File

@ -32,8 +32,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ledgerwatch/erigon-lib/chain"
chain2 "github.com/ledgerwatch/erigon-lib/chain"
libchain "github.com/ledgerwatch/erigon-lib/chain"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/length"
"github.com/ledgerwatch/erigon-lib/kv"
@ -616,7 +615,7 @@ func TestEIP155Transition(t *testing.T) {
funds = big.NewInt(1000000000)
deleteAddr = libcommon.Address{1}
gspec = &types.Genesis{
Config: &chain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)},
Config: &libchain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)},
Alloc: types.GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
}
)
@ -689,7 +688,7 @@ func TestEIP155Transition(t *testing.T) {
}
// generate an invalid chain id transaction
config := &chain2.Config{ChainID: big.NewInt(2), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)}
config := &libchain.Config{ChainID: big.NewInt(2), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)}
chain, chainErr = core.GenerateChain(config, chain.TopBlock, m.Engine, m.DB, 4, func(i int, block *core.BlockGen) {
var (
basicTx = func(signer types.Signer) (types.Transaction, error) {
@ -741,7 +740,7 @@ func doModesTest(t *testing.T, pm prune.Mode) error {
funds = big.NewInt(1000000000)
deleteAddr = libcommon.Address{1}
gspec = &types.Genesis{
Config: &chain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)},
Config: &libchain.Config{ChainID: big.NewInt(1), TangerineWhistleBlock: big.NewInt(0), SpuriousDragonBlock: big.NewInt(2), HomesteadBlock: new(big.Int)},
Alloc: types.GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
}
)
@ -959,7 +958,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
funds = big.NewInt(1000000000)
theAddr = libcommon.Address{1}
gspec = &types.Genesis{
Config: &chain.Config{
Config: &libchain.Config{
ChainID: big.NewInt(1),
HomesteadBlock: new(big.Int),
TangerineWhistleBlock: new(big.Int),

View File

@ -87,7 +87,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "custom block in DB, genesis == nil",
fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) {
core.MustCommitGenesis(&customg, db, tmpdir)
core.MustCommitGenesis(&customg, db, tmpdir, logger)
return core.CommitGenesisBlock(db, nil, tmpdir, logger)
},
wantHash: customghash,
@ -96,7 +96,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "custom block in DB, genesis == sepolia",
fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) {
core.MustCommitGenesis(&customg, db, tmpdir)
core.MustCommitGenesis(&customg, db, tmpdir, logger)
return core.CommitGenesisBlock(db, core.SepoliaGenesisBlock(), tmpdir, logger)
},
wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.SepoliaGenesisHash},
@ -106,7 +106,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "custom block in DB, genesis == bor-mainnet",
fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) {
core.MustCommitGenesis(&customg, db, tmpdir)
core.MustCommitGenesis(&customg, db, tmpdir, logger)
return core.CommitGenesisBlock(db, core.BorMainnetGenesisBlock(), tmpdir, logger)
},
wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.BorMainnetGenesisHash},
@ -116,7 +116,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "custom block in DB, genesis == mumbai",
fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) {
core.MustCommitGenesis(&customg, db, tmpdir)
core.MustCommitGenesis(&customg, db, tmpdir, logger)
return core.CommitGenesisBlock(db, core.MumbaiGenesisBlock(), tmpdir, logger)
},
wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.MumbaiGenesisHash},
@ -126,7 +126,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "custom block in DB, genesis == amoy",
fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) {
core.MustCommitGenesis(&customg, db, tmpdir)
core.MustCommitGenesis(&customg, db, tmpdir, logger)
return core.CommitGenesisBlock(db, core.AmoyGenesisBlock(), tmpdir, logger)
},
wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.AmoyGenesisHash},
@ -136,7 +136,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "compatible config in DB",
fn: func(db kv.RwDB) (*chain.Config, *types.Block, error) {
core.MustCommitGenesis(&oldcustomg, db, tmpdir)
core.MustCommitGenesis(&oldcustomg, db, tmpdir, logger)
return core.CommitGenesisBlock(db, &customg, tmpdir, logger)
},
wantHash: customghash,

View File

@ -423,7 +423,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK
), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder,
logger)
// We start the mining step
if err := stages2.MiningStep(ctx, mock.DB, proposingSync, tmpdir); err != nil {
if err := stages2.MiningStep(ctx, mock.DB, proposingSync, tmpdir, logger); err != nil {
return nil, err
}
block := <-miningStatePos.MiningResultPOSCh

View File

@ -119,7 +119,7 @@ func TestMineBlockWith1Tx(t *testing.T) {
}
m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed
err = stages.MiningStep(m.Ctx, m.DB, m.MiningSync, "")
err = stages.MiningStep(m.Ctx, m.DB, m.MiningSync, "", log.Root())
require.NoError(err)
got := <-m.PendingBlocks

View File

@ -333,7 +333,7 @@ func (h *Hook) afterRun(tx kv.Tx, finishProgressBefore uint64) error {
return nil
}
func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, tmpDir string) (err error) {
func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, tmpDir string, logger log.Logger) (err error) {
defer func() {
if rec := recover(); rec != nil {
err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack())
@ -346,7 +346,7 @@ func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync, tmpDir
}
defer tx.Rollback()
miningBatch := membatchwithdb.NewMemoryBatch(tx, tmpDir)
miningBatch := membatchwithdb.NewMemoryBatch(tx, tmpDir, logger)
defer miningBatch.Rollback()
if _, err = mining.Run(nil, miningBatch, false /* firstCycle */); err != nil {