mirror of
https://gitlab.com/pulsechaincom/erigon-pulse.git
synced 2024-12-21 19:20:39 +00:00
rpcdaemon: must not create db - because doesn't know right parameters (#8445)
This commit is contained in:
parent
fe263ae02e
commit
6d9a4f4d94
@ -117,7 +117,10 @@ func main() {
|
||||
|
||||
printNotice(&nodeKey.PublicKey, *realaddr)
|
||||
|
||||
db, err := enode.OpenDB("" /* path */, "" /* tmpDir */)
|
||||
ctx, cancel := common.RootContext()
|
||||
defer cancel()
|
||||
|
||||
db, err := enode.OpenDB(ctx, "" /* path */, "" /* tmpDir */)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -127,9 +130,6 @@ func main() {
|
||||
NetRestrict: restrictList,
|
||||
}
|
||||
|
||||
ctx, cancel := common.RootContext()
|
||||
defer cancel()
|
||||
|
||||
if *runv5 {
|
||||
if _, err := discover.ListenV5(ctx, conn, ln, cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
mdbx2 "github.com/erigontech/mdbx-go/mdbx"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
|
||||
"github.com/ledgerwatch/erigon-lib/common"
|
||||
@ -380,7 +379,7 @@ func checkChainName(dirs datadir.Dirs, chainName string) error {
|
||||
}
|
||||
db := mdbx.NewMDBX(log.New()).
|
||||
Path(dirs.Chaindata).Label(kv.ChainDB).
|
||||
Flags(func(flags uint) uint { return flags | mdbx2.Accede }).
|
||||
Accede().
|
||||
MustOpen()
|
||||
defer db.Close()
|
||||
if err := db.View(context.Background(), func(tx kv.Tx) error {
|
||||
|
@ -779,7 +779,7 @@ func defragSteps(filename string, bucketsCfg kv.TableCfg, generateFs ...func(kv.
|
||||
var db kv.RwDB
|
||||
db, err = kv2.NewMDBX(logger).Path(dir).WithTableCfg(func(kv.TableCfg) kv.TableCfg {
|
||||
return bucketsCfg
|
||||
}).Open()
|
||||
}).Open(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening database: %w", err)
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/erigontech/mdbx-go/mdbx"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/semaphore"
|
||||
@ -63,7 +62,7 @@ func RootCommand() *cobra.Command {
|
||||
func dbCfg(label kv.Label, path string) kv2.MdbxOpts {
|
||||
const ThreadsLimit = 9_000
|
||||
limiterB := semaphore.NewWeighted(ThreadsLimit)
|
||||
opts := kv2.NewMDBX(log.New()).Path(path).Label(label).RoTxsLimiter(limiterB)
|
||||
opts := kv2.NewMDBX(log.New()).Path(path).Label(label).RoTxsLimiter(limiterB).Accede()
|
||||
if label == kv.ChainDB {
|
||||
opts = opts.MapSize(8 * datasize.TB)
|
||||
}
|
||||
@ -76,7 +75,7 @@ func dbCfg(label kv.Label, path string) kv2.MdbxOpts {
|
||||
func openDB(opts kv2.MdbxOpts, applyMigrations bool, logger log.Logger) (kv.RwDB, error) {
|
||||
// integration tool don't intent to create db, then easiest way to open db - it's pass mdbx.Accede flag, which allow
|
||||
// to read all options from DB, instead of overriding them
|
||||
opts = opts.Flags(func(f uint) uint { return f | mdbx.Accede })
|
||||
opts = opts.Accede()
|
||||
|
||||
db := opts.MustOpen()
|
||||
if applyMigrations {
|
||||
|
@ -314,7 +314,7 @@ var cmdPrintStages = &cobra.Command{
|
||||
Short: "",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger := debug.SetupCobra(cmd, "integration")
|
||||
db, err := openDB(dbCfg(kv.ChainDB, chaindata).Readonly(), false, logger)
|
||||
db, err := openDB(dbCfg(kv.ChainDB, chaindata), false, logger)
|
||||
if err != nil {
|
||||
logger.Error("Opening DB", "error", err)
|
||||
return
|
||||
@ -1475,7 +1475,7 @@ func newDomains(ctx context.Context, db kv.RwDB, stepSize uint64, mode libstate.
|
||||
cfg.Snapshot = allSn.Cfg()
|
||||
|
||||
blockReader, _ := blocksIO(db, logger)
|
||||
engine, _ := initConsensusEngine(chainConfig, cfg.Dirs.DataDir, db, blockReader, logger)
|
||||
engine, _ := initConsensusEngine(ctx, chainConfig, cfg.Dirs.DataDir, db, blockReader, logger)
|
||||
return engine, cfg, allSn, agg
|
||||
}
|
||||
|
||||
@ -1512,7 +1512,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig,
|
||||
cfg.Snapshot = allSn.Cfg()
|
||||
|
||||
blockReader, blockWriter := blocksIO(db, logger)
|
||||
engine, heimdallClient := initConsensusEngine(chainConfig, cfg.Dirs.DataDir, db, blockReader, logger)
|
||||
engine, heimdallClient := initConsensusEngine(ctx, chainConfig, cfg.Dirs.DataDir, db, blockReader, logger)
|
||||
|
||||
maxBlockBroadcastPeers := func(header *types.Header) uint { return 0 }
|
||||
|
||||
@ -1603,7 +1603,7 @@ func overrideStorageMode(db kv.RwDB, logger log.Logger) error {
|
||||
})
|
||||
}
|
||||
|
||||
func initConsensusEngine(cc *chain2.Config, dir string, db kv.RwDB, blockReader services.FullBlockReader, logger log.Logger) (engine consensus.Engine, heimdallClient heimdall.IHeimdallClient) {
|
||||
func initConsensusEngine(ctx context.Context, cc *chain2.Config, dir string, db kv.RwDB, blockReader services.FullBlockReader, logger log.Logger) (engine consensus.Engine, heimdallClient heimdall.IHeimdallClient) {
|
||||
config := ethconfig.Defaults
|
||||
|
||||
var consensusConfig interface{}
|
||||
@ -1624,6 +1624,6 @@ func initConsensusEngine(cc *chain2.Config, dir string, db kv.RwDB, blockReader
|
||||
} else {
|
||||
consensusConfig = &config.Ethash
|
||||
}
|
||||
return ethconsensusconfig.CreateConsensusEngine(&nodecfg.Config{Dirs: datadir.New(dir)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify,
|
||||
return ethconsensusconfig.CreateConsensusEngine(ctx, &nodecfg.Config{Dirs: datadir.New(dir)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify,
|
||||
heimdallClient, config.WithoutHeimdall, blockReader, db.ReadOnly(), logger), heimdallClient
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ var readDomains = &cobra.Command{
|
||||
}
|
||||
defer chainDb.Close()
|
||||
|
||||
stateDb, err := kv2.NewMDBX(log.New()).Path(filepath.Join(dirs.DataDir, "statedb")).WriteMap().Open()
|
||||
stateDb, err := kv2.NewMDBX(log.New()).Path(filepath.Join(dirs.DataDir, "statedb")).WriteMap().Open(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func mainWithFlags(ctx context.Context, flags observer.CommandFlags, logger log.Logger) error {
|
||||
server, err := observer.NewServer(flags, logger)
|
||||
server, err := observer.NewServer(ctx, flags, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ type Server struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewServer(flags CommandFlags, logger log.Logger) (*Server, error) {
|
||||
func NewServer(ctx context.Context, flags CommandFlags, logger log.Logger) (*Server, error) {
|
||||
nodeDBPath := filepath.Join(flags.DataDir, "nodes", "eth66")
|
||||
|
||||
nodeKeyConfig := p2p.NodeKeyConfig{}
|
||||
@ -42,7 +42,7 @@ func NewServer(flags CommandFlags, logger log.Logger) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localNode, err := makeLocalNode(nodeDBPath, privateKey, flags.Chain, logger)
|
||||
localNode, err := makeLocalNode(ctx, nodeDBPath, privateKey, flags.Chain, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -84,8 +84,8 @@ func NewServer(flags CommandFlags, logger log.Logger) (*Server, error) {
|
||||
return &instance, nil
|
||||
}
|
||||
|
||||
func makeLocalNode(nodeDBPath string, privateKey *ecdsa.PrivateKey, chain string, logger log.Logger) (*enode.LocalNode, error) {
|
||||
db, err := enode.OpenDB(nodeDBPath, "")
|
||||
func makeLocalNode(ctx context.Context, nodeDBPath string, privateKey *ecdsa.PrivateKey, chain string, logger log.Logger) (*enode.LocalNode, error) {
|
||||
db, err := enode.OpenDB(ctx, nodeDBPath, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -310,11 +310,20 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
|
||||
var cc *chain.Config
|
||||
|
||||
if cfg.WithDatadir {
|
||||
// Opening all databases in Accede and non-Readonly modes. Here is the motivation:
|
||||
// Rpcdaemon must provide 2 features:
|
||||
// 1. ability to start even if Erigon is down (to prevent cascade outage).
|
||||
// 2. don't create databases by itself - because it doesn't know right parameters (Erigon may have cli flags: pagesize, etc...)
|
||||
// Some databases (consensus, txpool, downloader) are woring in SafeNoSync mode - in this mode
|
||||
// power-off may leave db in recoverable-non-consistent state. Such db can be recovered only if open in non-Readonly mode.
|
||||
// Accede mode preventing db-creation:
|
||||
// at first start RpcDaemon may start earlier than Erigon
|
||||
// Accede mode will check db existence (may wait with retries). It's ok to fail in this case - some supervisor will restart us.
|
||||
var rwKv kv.RwDB
|
||||
dir.MustExist(cfg.Dirs.SnapHistory)
|
||||
logger.Trace("Creating chain db", "path", cfg.Dirs.Chaindata)
|
||||
logger.Warn("Opening chain db", "path", cfg.Dirs.Chaindata)
|
||||
limiter := semaphore.NewWeighted(int64(cfg.DBReadConcurrency))
|
||||
rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Open()
|
||||
rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Accede().Open(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, nil, ff, nil, err
|
||||
}
|
||||
@ -468,16 +477,8 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger,
|
||||
|
||||
// bor (consensus) specific db
|
||||
borDbPath := filepath.Join(cfg.DataDir, "bor")
|
||||
{
|
||||
// ensure db exist
|
||||
tmpDb, err := kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Open()
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, nil, ff, nil, err
|
||||
}
|
||||
tmpDb.Close()
|
||||
}
|
||||
logger.Trace("Creating consensus db", "path", borDbPath)
|
||||
borKv, err = kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Readonly().Open()
|
||||
logger.Warn("[rpc] Opening Bor db", "path", borDbPath)
|
||||
borKv, err = kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Accede().Open(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, nil, ff, nil, err
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@ -20,7 +22,9 @@ func main() {
|
||||
logger := debug.SetupCobra(cmd, "sentry")
|
||||
db, backend, txPool, mining, stateCache, blockReader, engine, ff, agg, err := cli.RemoteServices(ctx, *cfg, logger, rootCancel)
|
||||
if err != nil {
|
||||
logger.Error("Could not connect to DB", "err", err)
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Error("Could not connect to DB", "err", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
defer db.Close()
|
||||
|
@ -84,7 +84,7 @@ func (s *Sentinel) createLocalNode(
|
||||
udpPort, tcpPort int,
|
||||
tmpDir string,
|
||||
) (*enode.LocalNode, error) {
|
||||
db, err := enode.OpenDB("", tmpDir)
|
||||
db, err := enode.OpenDB(s.ctx, "", tmpDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not open node's peer database: %w", err)
|
||||
}
|
||||
|
@ -56,13 +56,13 @@ var checkChangeSetsCmd = &cobra.Command{
|
||||
Short: "Re-executes historical transactions in read-only mode and checks that their outputs match the database ChangeSets",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger := debug.SetupCobra(cmd, "check_change_sets")
|
||||
return CheckChangeSets(genesis, block, chaindata, historyfile, nocheck, logger)
|
||||
return CheckChangeSets(cmd.Context(), genesis, block, chaindata, historyfile, nocheck, logger)
|
||||
},
|
||||
}
|
||||
|
||||
// CheckChangeSets re-executes historical transactions in read-only mode
|
||||
// and checks that their outputs match the database ChangeSets.
|
||||
func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string, historyfile string, nocheck bool, logger log.Logger) error {
|
||||
func CheckChangeSets(ctx context.Context, genesis *types.Genesis, blockNum uint64, chaindata string, historyfile string, nocheck bool, logger log.Logger) error {
|
||||
if len(historyfile) == 0 {
|
||||
historyfile = chaindata
|
||||
}
|
||||
@ -77,7 +77,7 @@ func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string,
|
||||
interruptCh <- true
|
||||
}()
|
||||
|
||||
db, err := kv2.NewMDBX(logger).Path(chaindata).Open()
|
||||
db, err := kv2.NewMDBX(logger).Path(chaindata).Open(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -94,7 +94,6 @@ func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string,
|
||||
if chaindata != historyfile {
|
||||
historyDb = kv2.MustOpen(historyfile)
|
||||
}
|
||||
ctx := context.Background()
|
||||
historyTx, err1 := historyDb.BeginRo(ctx)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
@ -124,7 +123,7 @@ func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string,
|
||||
commitEvery := time.NewTicker(30 * time.Second)
|
||||
defer commitEvery.Stop()
|
||||
|
||||
engine := initConsensusEngine(chainConfig, allSnapshots, blockReader, logger)
|
||||
engine := initConsensusEngine(ctx, chainConfig, allSnapshots, blockReader, logger)
|
||||
|
||||
for !interrupt {
|
||||
|
||||
@ -278,7 +277,7 @@ func CheckChangeSets(genesis *types.Genesis, blockNum uint64, chaindata string,
|
||||
return nil
|
||||
}
|
||||
|
||||
func initConsensusEngine(cc *chain2.Config, snapshots *freezeblocks.RoSnapshots, blockReader services.FullBlockReader, logger log.Logger) (engine consensus.Engine) {
|
||||
func initConsensusEngine(ctx context.Context, cc *chain2.Config, snapshots *freezeblocks.RoSnapshots, blockReader services.FullBlockReader, logger log.Logger) (engine consensus.Engine) {
|
||||
config := ethconfig.Defaults
|
||||
|
||||
var consensusConfig interface{}
|
||||
@ -292,5 +291,5 @@ func initConsensusEngine(cc *chain2.Config, snapshots *freezeblocks.RoSnapshots,
|
||||
} else {
|
||||
consensusConfig = &config.Ethash
|
||||
}
|
||||
return ethconsensusconfig.CreateConsensusEngine(&nodecfg.Config{Dirs: datadir.New(datadirCli)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, nil /* heimdallClient */, config.WithoutHeimdall, blockReader, true /* readonly */, logger)
|
||||
return ethconsensusconfig.CreateConsensusEngine(ctx, &nodecfg.Config{Dirs: datadir.New(datadirCli)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, nil /* heimdallClient */, config.WithoutHeimdall, blockReader, true /* readonly */, logger)
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ var stateRootCmd = &cobra.Command{
|
||||
Short: "Exerimental command to re-execute blocks from beginning and compute state root",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
logger := debug.SetupCobra(cmd, "stateroot")
|
||||
return StateRoot(genesis, block, datadirCli, logger)
|
||||
return StateRoot(cmd.Context(), genesis, block, datadirCli, logger)
|
||||
},
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) {
|
||||
return br, bw
|
||||
}
|
||||
|
||||
func StateRoot(genesis *types.Genesis, blockNum uint64, datadir string, logger log.Logger) error {
|
||||
func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, datadir string, logger log.Logger) error {
|
||||
sigs := make(chan os.Signal, 1)
|
||||
interruptCh := make(chan bool, 1)
|
||||
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
|
||||
@ -70,12 +70,11 @@ func StateRoot(genesis *types.Genesis, blockNum uint64, datadir string, logger l
|
||||
interruptCh <- true
|
||||
}()
|
||||
dirs := datadir2.New(datadir)
|
||||
historyDb, err := kv2.NewMDBX(logger).Path(dirs.Chaindata).Open()
|
||||
historyDb, err := kv2.NewMDBX(logger).Path(dirs.Chaindata).Open(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer historyDb.Close()
|
||||
ctx := context.Background()
|
||||
historyTx, err1 := historyDb.BeginRo(ctx)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
@ -89,7 +88,7 @@ func StateRoot(genesis *types.Genesis, blockNum uint64, datadir string, logger l
|
||||
} else if err = os.RemoveAll(stateDbPath); err != nil {
|
||||
return err
|
||||
}
|
||||
db, err2 := kv2.NewMDBX(logger).Path(stateDbPath).Open()
|
||||
db, err2 := kv2.NewMDBX(logger).Path(stateDbPath).Open(ctx)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
@ -34,17 +34,17 @@ type optionsCfg struct {
|
||||
|
||||
const DumpSize = uint64(20000000000)
|
||||
|
||||
func IncrementVerkleTree(cfg optionsCfg, logger log.Logger) error {
|
||||
func IncrementVerkleTree(ctx context.Context, cfg optionsCfg, logger log.Logger) error {
|
||||
start := time.Now()
|
||||
|
||||
db, err := mdbx.Open(cfg.stateDb, log.Root(), true)
|
||||
db, err := openDB(ctx, cfg.stateDb, log.Root(), true)
|
||||
if err != nil {
|
||||
logger.Error("Error while opening database", "err", err.Error())
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
vDb, err := mdbx.Open(cfg.verkleDb, log.Root(), false)
|
||||
vDb, err := openDB(ctx, cfg.verkleDb, log.Root(), false)
|
||||
if err != nil {
|
||||
logger.Error("Error while opening db transaction", "err", err.Error())
|
||||
return err
|
||||
@ -88,15 +88,15 @@ func IncrementVerkleTree(cfg optionsCfg, logger log.Logger) error {
|
||||
return vTx.Commit()
|
||||
}
|
||||
|
||||
func RegeneratePedersenHashstate(cfg optionsCfg, logger log.Logger) error {
|
||||
db, err := mdbx.Open(cfg.stateDb, log.Root(), true)
|
||||
func RegeneratePedersenHashstate(ctx context.Context, cfg optionsCfg, logger log.Logger) error {
|
||||
db, err := openDB(ctx, cfg.stateDb, log.Root(), true)
|
||||
if err != nil {
|
||||
logger.Error("Error while opening database", "err", err.Error())
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
vDb, err := mdbx.Open(cfg.stateDb, log.Root(), false)
|
||||
vDb, err := openDB(ctx, cfg.stateDb, log.Root(), false)
|
||||
if err != nil {
|
||||
logger.Error("Error while opening db transaction", "err", err.Error())
|
||||
return err
|
||||
@ -130,16 +130,16 @@ func RegeneratePedersenHashstate(cfg optionsCfg, logger log.Logger) error {
|
||||
return vTx.Commit()
|
||||
}
|
||||
|
||||
func GenerateVerkleTree(cfg optionsCfg, logger log.Logger) error {
|
||||
func GenerateVerkleTree(ctx context.Context, cfg optionsCfg, logger log.Logger) error {
|
||||
start := time.Now()
|
||||
db, err := mdbx.Open(cfg.stateDb, log.Root(), true)
|
||||
db, err := openDB(ctx, cfg.stateDb, log.Root(), true)
|
||||
if err != nil {
|
||||
logger.Error("Error while opening database", "err", err.Error())
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
vDb, err := mdbx.Open(cfg.verkleDb, log.Root(), false)
|
||||
vDb, err := openDB(ctx, cfg.verkleDb, log.Root(), false)
|
||||
if err != nil {
|
||||
logger.Error("Error while opening db transaction", "err", err.Error())
|
||||
return err
|
||||
@ -191,8 +191,8 @@ func GenerateVerkleTree(cfg optionsCfg, logger log.Logger) error {
|
||||
return vTx.Commit()
|
||||
}
|
||||
|
||||
func analyseOut(cfg optionsCfg, logger log.Logger) error {
|
||||
db, err := mdbx.Open(cfg.verkleDb, logger, false)
|
||||
func analyseOut(ctx context.Context, cfg optionsCfg, logger log.Logger) error {
|
||||
db, err := openDB(ctx, cfg.verkleDb, logger, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -218,8 +218,8 @@ func analyseOut(cfg optionsCfg, logger log.Logger) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dump(cfg optionsCfg) error {
|
||||
db, err := mdbx.Open(cfg.verkleDb, log.Root(), false)
|
||||
func dump(ctx context.Context, cfg optionsCfg) error {
|
||||
db, err := openDB(ctx, cfg.verkleDb, log.Root(), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -285,8 +285,8 @@ func dump(cfg optionsCfg) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dump_acc_preimages(cfg optionsCfg) error {
|
||||
db, err := mdbx.Open(cfg.stateDb, log.Root(), false)
|
||||
func dump_acc_preimages(ctx context.Context, cfg optionsCfg) error {
|
||||
db, err := openDB(ctx, cfg.stateDb, log.Root(), false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -339,8 +339,8 @@ func dump_acc_preimages(cfg optionsCfg) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func dump_storage_preimages(cfg optionsCfg, logger log.Logger) error {
|
||||
db, err := mdbx.Open(cfg.stateDb, logger, false)
|
||||
func dump_storage_preimages(ctx context.Context, cfg optionsCfg, logger log.Logger) error {
|
||||
db, err := openDB(ctx, cfg.stateDb, logger, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -443,35 +443,50 @@ func main() {
|
||||
}
|
||||
switch *action {
|
||||
case "hashstate":
|
||||
if err := RegeneratePedersenHashstate(opt, logger); err != nil {
|
||||
if err := RegeneratePedersenHashstate(ctx, opt, logger); err != nil {
|
||||
logger.Error("Error", "err", err.Error())
|
||||
}
|
||||
case "bucketsizes":
|
||||
if err := analyseOut(opt, logger); err != nil {
|
||||
if err := analyseOut(ctx, opt, logger); err != nil {
|
||||
logger.Error("Error", "err", err.Error())
|
||||
}
|
||||
case "verkle":
|
||||
if err := GenerateVerkleTree(opt, logger); err != nil {
|
||||
if err := GenerateVerkleTree(ctx, opt, logger); err != nil {
|
||||
logger.Error("Error", "err", err.Error())
|
||||
}
|
||||
case "incremental":
|
||||
if err := IncrementVerkleTree(opt, logger); err != nil {
|
||||
if err := IncrementVerkleTree(ctx, opt, logger); err != nil {
|
||||
logger.Error("Error", "err", err.Error())
|
||||
}
|
||||
case "dump":
|
||||
log.Info("Dumping in dump.txt")
|
||||
if err := dump(opt); err != nil {
|
||||
if err := dump(ctx, opt); err != nil {
|
||||
log.Error("Error", "err", err.Error())
|
||||
}
|
||||
case "acc_preimages":
|
||||
if err := dump_acc_preimages(opt); err != nil {
|
||||
if err := dump_acc_preimages(ctx, opt); err != nil {
|
||||
logger.Error("Error", "err", err.Error())
|
||||
}
|
||||
case "storage_preimages":
|
||||
if err := dump_storage_preimages(opt, logger); err != nil {
|
||||
if err := dump_storage_preimages(ctx, opt, logger); err != nil {
|
||||
logger.Error("Error", "err", err.Error())
|
||||
}
|
||||
default:
|
||||
log.Warn("No valid --action specified, aborting")
|
||||
}
|
||||
}
|
||||
|
||||
func openDB(ctx context.Context, path string, logger log.Logger, accede bool) (kv.RwDB, error) {
|
||||
var db kv.RwDB
|
||||
var err error
|
||||
opts := mdbx.NewMDBX(logger).Path(path)
|
||||
if accede {
|
||||
opts = opts.Accede()
|
||||
}
|
||||
db, err = opts.Open(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ type VerkleMarker struct {
|
||||
|
||||
//nolint:gocritic
|
||||
func NewVerkleMarker(tempdir string) *VerkleMarker {
|
||||
markedSlotsDb, err := mdbx.NewTemporaryMdbx(tempdir)
|
||||
markedSlotsDb, err := mdbx.NewTemporaryMdbx(context.TODO(), tempdir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ func New(ctx context.Context, cfg *downloadercfg.Cfg, logger log.Logger, verbosi
|
||||
// }
|
||||
//}
|
||||
|
||||
db, c, m, torrentClient, err := openClient(cfg.Dirs.Downloader, cfg.Dirs.Snap, cfg.ClientConfig)
|
||||
db, c, m, torrentClient, err := openClient(ctx, cfg.Dirs.Downloader, cfg.Dirs.Snap, cfg.ClientConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("openClient: %w", err)
|
||||
}
|
||||
@ -630,7 +630,7 @@ func (d *Downloader) StopSeeding(hash metainfo.Hash) error {
|
||||
|
||||
func (d *Downloader) TorrentClient() *torrent.Client { return d.torrentClient }
|
||||
|
||||
func openClient(dbDir, snapDir string, cfg *torrent.ClientConfig) (db kv.RwDB, c storage.PieceCompletion, m storage.ClientImplCloser, torrentClient *torrent.Client, err error) {
|
||||
func openClient(ctx context.Context, dbDir, snapDir string, cfg *torrent.ClientConfig) (db kv.RwDB, c storage.PieceCompletion, m storage.ClientImplCloser, torrentClient *torrent.Client, err error) {
|
||||
db, err = mdbx.NewMDBX(log.New()).
|
||||
Label(kv.DownloaderDB).
|
||||
WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.DownloaderTablesCfg }).
|
||||
@ -638,7 +638,7 @@ func openClient(dbDir, snapDir string, cfg *torrent.ClientConfig) (db kv.RwDB, c
|
||||
GrowthStep(16 * datasize.MB).
|
||||
MapSize(16 * datasize.GB).
|
||||
Path(dbDir).
|
||||
Open()
|
||||
Open(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("torrentcfg.openClient: %w", err)
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
@ -34,6 +35,7 @@ import (
|
||||
"github.com/erigontech/mdbx-go/mdbx"
|
||||
stack2 "github.com/go-stack/stack"
|
||||
"github.com/ledgerwatch/erigon-lib/common/dbg"
|
||||
"github.com/ledgerwatch/erigon-lib/common/dir"
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/erigon-lib/kv/iter"
|
||||
"github.com/ledgerwatch/erigon-lib/kv/order"
|
||||
@ -167,6 +169,10 @@ func (opts MdbxOpts) Readonly() MdbxOpts {
|
||||
opts.flags = opts.flags | mdbx.Readonly
|
||||
return opts
|
||||
}
|
||||
func (opts MdbxOpts) Accede() MdbxOpts {
|
||||
opts.flags = opts.flags | mdbx.Accede
|
||||
return opts
|
||||
}
|
||||
|
||||
func (opts MdbxOpts) SyncPeriod(period time.Duration) MdbxOpts {
|
||||
opts.syncPeriod = period
|
||||
@ -219,7 +225,9 @@ func PathDbMap() map[string]kv.RoDB {
|
||||
return maps.Clone(pathDbMap)
|
||||
}
|
||||
|
||||
func (opts MdbxOpts) Open() (kv.RwDB, error) {
|
||||
var ErrDBDoesNotExists = fmt.Errorf("can't create database - because opening in `Accede` mode. probably another (main) process can create it")
|
||||
|
||||
func (opts MdbxOpts) Open(ctx context.Context) (kv.RwDB, error) {
|
||||
if dbg.WriteMap() {
|
||||
opts = opts.WriteMap() //nolint
|
||||
}
|
||||
@ -235,6 +243,24 @@ func (opts MdbxOpts) Open() (kv.RwDB, error) {
|
||||
if dbg.MdbxReadAhead() {
|
||||
opts = opts.Flags(func(u uint) uint { return u &^ mdbx.NoReadahead }) //nolint
|
||||
}
|
||||
if opts.flags&mdbx.Accede != 0 || opts.flags&mdbx.Readonly != 0 {
|
||||
for retry := 0; ; retry++ {
|
||||
exists := dir.FileExist(filepath.Join(opts.path, "mdbx.dat"))
|
||||
if exists {
|
||||
break
|
||||
}
|
||||
if retry >= 5 {
|
||||
return nil, fmt.Errorf("%w, label: %s, path: %s", ErrDBDoesNotExists, opts.label.String(), opts.path)
|
||||
}
|
||||
select {
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
env, err := mdbx.NewEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -397,7 +423,7 @@ func (opts MdbxOpts) Open() (kv.RwDB, error) {
|
||||
}
|
||||
|
||||
func (opts MdbxOpts) MustOpen() kv.RwDB {
|
||||
db, err := opts.Open()
|
||||
db, err := opts.Open(context.Background())
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("fail to open mdbx: %w", err))
|
||||
}
|
||||
@ -420,6 +446,7 @@ type MdbxKV struct {
|
||||
|
||||
func (db *MdbxKV) PageSize() uint64 { return db.opts.pageSize }
|
||||
func (db *MdbxKV) ReadOnly() bool { return db.opts.HasFlag(mdbx.Readonly) }
|
||||
func (db *MdbxKV) Accede() bool { return db.opts.HasFlag(mdbx.Accede) }
|
||||
|
||||
// openDBIs - first trying to open existing DBI's in RO transaction
|
||||
// otherwise re-try by RW transaction
|
||||
|
@ -29,13 +29,13 @@ type TemporaryMdbx struct {
|
||||
path string
|
||||
}
|
||||
|
||||
func NewTemporaryMdbx(tempdir string) (kv.RwDB, error) {
|
||||
func NewTemporaryMdbx(ctx context.Context, tempdir string) (kv.RwDB, error) {
|
||||
path, err := os.MkdirTemp(tempdir, "mdbx-temp")
|
||||
if err != nil {
|
||||
return &TemporaryMdbx{}, err
|
||||
}
|
||||
|
||||
db, err := Open(path, log.Root(), false)
|
||||
db, err := NewMDBX(log.New()).Path(path).Open(ctx)
|
||||
if err != nil {
|
||||
return &TemporaryMdbx{}, err
|
||||
}
|
||||
|
@ -17,21 +17,14 @@
|
||||
package mdbx
|
||||
|
||||
import (
|
||||
mdbxbind "github.com/erigontech/mdbx-go/mdbx"
|
||||
"context"
|
||||
|
||||
"github.com/ledgerwatch/erigon-lib/kv"
|
||||
"github.com/ledgerwatch/log/v3"
|
||||
)
|
||||
|
||||
func MustOpen(path string) kv.RwDB {
|
||||
db, err := Open(path, log.New(), false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
func MustOpenRo(path string) kv.RoDB {
|
||||
db, err := Open(path, log.New(), true)
|
||||
db, err := Open(context.Background(), path, log.New(), false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -39,14 +32,14 @@ func MustOpenRo(path string) kv.RoDB {
|
||||
}
|
||||
|
||||
// Open - main method to open database.
|
||||
func Open(path string, logger log.Logger, readOnly bool) (kv.RwDB, error) {
|
||||
func Open(ctx context.Context, path string, logger log.Logger, accede bool) (kv.RwDB, error) {
|
||||
var db kv.RwDB
|
||||
var err error
|
||||
opts := NewMDBX(logger).Path(path)
|
||||
if readOnly {
|
||||
opts = opts.Flags(func(flags uint) uint { return flags | mdbxbind.Readonly })
|
||||
if accede {
|
||||
opts = opts.Accede()
|
||||
}
|
||||
db, err = opts.Open()
|
||||
db, err = opts.Open(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -318,7 +318,7 @@ func TestAggregator_RestartOnFiles(t *testing.T) {
|
||||
|
||||
newDb, err := mdbx.NewMDBX(logger).InMem(filepath.Join(path, "db4")).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg {
|
||||
return kv.ChaindataTablesCfg
|
||||
}).Open()
|
||||
}).Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(newDb.Close)
|
||||
|
||||
|
@ -120,7 +120,7 @@ func AllComponents(ctx context.Context, cfg txpoolcfg.Config, cache kvcache.Cach
|
||||
opts = opts.GrowthStep(cfg.MdbxGrowthStep)
|
||||
}
|
||||
|
||||
txPoolDB, err := opts.Open()
|
||||
txPoolDB, err := opts.Open(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, err
|
||||
|
@ -215,7 +215,7 @@ const blockBufferSize = 128
|
||||
|
||||
// New creates a new Ethereum object (including the
|
||||
// initialisation of the common Ethereum object)
|
||||
func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethereum, error) {
|
||||
func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethereum, error) {
|
||||
config.Snapshot.Enabled = config.Sync.UseSnapshots
|
||||
if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(libcommon.Big0) <= 0 {
|
||||
logger.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice)
|
||||
@ -229,7 +229,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
|
||||
}
|
||||
|
||||
// Assemble the Ethereum object
|
||||
chainKv, err := node.OpenDatabase(stack.Config(), kv.ChainDB, "", false, logger)
|
||||
chainKv, err := node.OpenDatabase(ctx, stack.Config(), kv.ChainDB, "", false, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -475,7 +475,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
|
||||
flags.Milestone = config.WithHeimdallMilestones
|
||||
}
|
||||
|
||||
backend.engine = ethconsensusconfig.CreateConsensusEngine(stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, heimdallClient, config.WithoutHeimdall, blockReader, false /* readonly */, logger)
|
||||
backend.engine = ethconsensusconfig.CreateConsensusEngine(ctx, stack.Config(), chainConfig, consensusConfig, config.Miner.Notify, config.Miner.Noverify, heimdallClient, config.WithoutHeimdall, blockReader, false /* readonly */, logger)
|
||||
|
||||
if config.SilkwormEnabled {
|
||||
backend.silkworm, err = silkworm.New(config.SilkwormPath)
|
||||
|
@ -1,6 +1,7 @@
|
||||
package ethconsensusconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
@ -25,7 +26,7 @@ import (
|
||||
"github.com/ledgerwatch/erigon/turbo/services"
|
||||
)
|
||||
|
||||
func CreateConsensusEngine(nodeConfig *nodecfg.Config, chainConfig *chain.Config, config interface{}, notify []string, noVerify bool,
|
||||
func CreateConsensusEngine(ctx context.Context, nodeConfig *nodecfg.Config, chainConfig *chain.Config, config interface{}, notify []string, noVerify bool,
|
||||
heimdallClient heimdall.IHeimdallClient, withoutHeimdall bool, blockReader services.FullBlockReader, readonly bool,
|
||||
logger log.Logger,
|
||||
) consensus.Engine {
|
||||
@ -70,7 +71,7 @@ func CreateConsensusEngine(nodeConfig *nodecfg.Config, chainConfig *chain.Config
|
||||
var err error
|
||||
var db kv.RwDB
|
||||
|
||||
db, err = node.OpenDatabase(nodeConfig, kv.ConsensusDB, "clique", readonly, logger)
|
||||
db, err = node.OpenDatabase(ctx, nodeConfig, kv.ConsensusDB, "clique", readonly, logger)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -83,7 +84,7 @@ func CreateConsensusEngine(nodeConfig *nodecfg.Config, chainConfig *chain.Config
|
||||
var err error
|
||||
var db kv.RwDB
|
||||
|
||||
db, err = node.OpenDatabase(nodeConfig, kv.ConsensusDB, "aura", readonly, logger)
|
||||
db, err = node.OpenDatabase(ctx, nodeConfig, kv.ConsensusDB, "aura", readonly, logger)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -106,7 +107,7 @@ func CreateConsensusEngine(nodeConfig *nodecfg.Config, chainConfig *chain.Config
|
||||
var err error
|
||||
var db kv.RwDB
|
||||
|
||||
db, err = node.OpenDatabase(nodeConfig, kv.ConsensusDB, "bor", readonly, logger)
|
||||
db, err = node.OpenDatabase(ctx, nodeConfig, kv.ConsensusDB, "bor", readonly, logger)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -127,7 +128,7 @@ func CreateConsensusEngine(nodeConfig *nodecfg.Config, chainConfig *chain.Config
|
||||
}
|
||||
}
|
||||
|
||||
func CreateConsensusEngineBareBones(chainConfig *chain.Config, logger log.Logger) consensus.Engine {
|
||||
func CreateConsensusEngineBareBones(ctx context.Context, chainConfig *chain.Config, logger log.Logger) consensus.Engine {
|
||||
var consensusConfig interface{}
|
||||
|
||||
if chainConfig.Clique != nil {
|
||||
@ -142,6 +143,6 @@ func CreateConsensusEngineBareBones(chainConfig *chain.Config, logger log.Logger
|
||||
consensusConfig = ðashCfg
|
||||
}
|
||||
|
||||
return CreateConsensusEngine(&nodecfg.Config{}, chainConfig, consensusConfig, nil /* notify */, true, /* noVerify */
|
||||
return CreateConsensusEngine(ctx, &nodecfg.Config{}, chainConfig, consensusConfig, nil /* notify */, true, /* noVerify */
|
||||
nil /* heimdallClient */, true /* withoutHeimdall */, nil /* blockReader */, false /* readonly */, logger)
|
||||
}
|
||||
|
@ -1392,7 +1392,7 @@ func ReconstituteState(ctx context.Context, s *StageState, dirs datadir.Dirs, wo
|
||||
}).
|
||||
PageSize(uint64(8 * datasize.KB)).
|
||||
WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.ReconTablesCfg }).
|
||||
Open()
|
||||
Open(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -295,7 +295,7 @@ func (n *Node) DataDir() string {
|
||||
return n.config.Dirs.DataDir
|
||||
}
|
||||
|
||||
func OpenDatabase(config *nodecfg.Config, label kv.Label, name string, readonly bool, logger log.Logger) (kv.RwDB, error) {
|
||||
func OpenDatabase(ctx context.Context, config *nodecfg.Config, label kv.Label, name string, readonly bool, logger log.Logger) (kv.RwDB, error) {
|
||||
switch label {
|
||||
case kv.ChainDB:
|
||||
name = "chaindata"
|
||||
@ -303,7 +303,7 @@ func OpenDatabase(config *nodecfg.Config, label kv.Label, name string, readonly
|
||||
name = "txpool"
|
||||
case kv.ConsensusDB:
|
||||
if len(name) == 0 {
|
||||
return nil, fmt.Errorf("Expected a consensus name")
|
||||
return nil, fmt.Errorf("expected a consensus name")
|
||||
}
|
||||
default:
|
||||
name = "test"
|
||||
@ -360,10 +360,9 @@ func OpenDatabase(config *nodecfg.Config, label kv.Label, name string, readonly
|
||||
opts = opts.GrowthStep(config.MdbxGrowthStep)
|
||||
}
|
||||
default:
|
||||
opts = opts.GrowthStep(16 * datasize.MB)
|
||||
}
|
||||
|
||||
return opts.Open()
|
||||
return opts.Open(ctx)
|
||||
}
|
||||
var err error
|
||||
db, err = openFunc(false)
|
||||
|
@ -147,7 +147,7 @@ func TestNodeCloseClosesDB(t *testing.T) {
|
||||
stack, _ := New(context.Background(), testNodeConfig(t), logger)
|
||||
defer stack.Close()
|
||||
|
||||
db, err := OpenDatabase(stack.Config(), kv.SentryDB, "", false, logger)
|
||||
db, err := OpenDatabase(context.Background(), stack.Config(), kv.SentryDB, "", false, logger)
|
||||
if err != nil {
|
||||
t.Fatal("can't open DB:", err)
|
||||
}
|
||||
@ -179,7 +179,7 @@ func TestNodeOpenDatabaseFromLifecycleStart(t *testing.T) {
|
||||
var db kv.RwDB
|
||||
stack.RegisterLifecycle(&InstrumentedService{
|
||||
startHook: func() {
|
||||
db, err = OpenDatabase(stack.Config(), kv.SentryDB, "", false, logger)
|
||||
db, err = OpenDatabase(context.Background(), stack.Config(), kv.SentryDB, "", false, logger)
|
||||
if err != nil {
|
||||
t.Fatal("can't open DB:", err)
|
||||
}
|
||||
@ -205,7 +205,7 @@ func TestNodeOpenDatabaseFromLifecycleStop(t *testing.T) {
|
||||
|
||||
stack.RegisterLifecycle(&InstrumentedService{
|
||||
stopHook: func() {
|
||||
db, err := OpenDatabase(stack.Config(), kv.ChainDB, "", false, logger)
|
||||
db, err := OpenDatabase(context.Background(), stack.Config(), kv.ChainDB, "", false, logger)
|
||||
if err != nil {
|
||||
t.Fatal("can't open DB:", err)
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package discover
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
@ -43,7 +44,7 @@ func init() {
|
||||
}
|
||||
|
||||
func newTestTable(t transport, tmpDir string) (*Table, *enode.DB) {
|
||||
db, err := enode.OpenDB("", tmpDir)
|
||||
db, err := enode.OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ func newUDPTestContext(ctx context.Context, t *testing.T, logger log.Logger) *ud
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
var err error
|
||||
test.db, err = enode.OpenDB("", tmpDir)
|
||||
test.db, err = enode.OpenDB(ctx, "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -620,7 +620,7 @@ func startLocalhostV4(ctx context.Context, t *testing.T, cfg Config, logger log.
|
||||
|
||||
cfg.PrivateKey = newkey()
|
||||
tmpDir := t.TempDir()
|
||||
db, err := enode.OpenDB("", tmpDir)
|
||||
db, err := enode.OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ import (
|
||||
func startLocalhostV5(t *testing.T, cfg Config, logger log.Logger) *UDPv5 {
|
||||
cfg.PrivateKey = newkey()
|
||||
tmpDir := t.TempDir()
|
||||
db, err := enode.OpenDB("", tmpDir)
|
||||
db, err := enode.OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -573,7 +573,7 @@ func newUDPV5TestContext(ctx context.Context, t *testing.T, logger log.Logger) *
|
||||
t.Cleanup(test.close)
|
||||
var err error
|
||||
tmpDir := t.TempDir()
|
||||
test.db, err = enode.OpenDB("", tmpDir)
|
||||
test.db, err = enode.OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -627,7 +627,7 @@ func (test *udpV5Test) getNode(key *ecdsa.PrivateKey, addr *net.UDPAddr, logger
|
||||
ln := test.nodesByID[id]
|
||||
if ln == nil {
|
||||
tmpDir := test.t.TempDir()
|
||||
db, err := enode.OpenDB("", tmpDir)
|
||||
db, err := enode.OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package v5wire
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
@ -535,7 +536,7 @@ func (t *handshakeTest) close() {
|
||||
}
|
||||
|
||||
func (n *handshakeTestNode) init(key *ecdsa.PrivateKey, ip net.IP, clock mclock.Clock, tmpDir string, logger log.Logger) {
|
||||
db, err := enode.OpenDB("", tmpDir)
|
||||
db, err := enode.OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package enode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"net"
|
||||
"testing"
|
||||
@ -28,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
func newLocalNodeForTesting(tmpDir string, logger log.Logger) (*LocalNode, *DB) {
|
||||
db, err := OpenDB("", tmpDir)
|
||||
db, err := OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -83,12 +83,12 @@ type DB struct {
|
||||
|
||||
// OpenDB opens a node database for storing and retrieving infos about known peers in the
|
||||
// network. If no path is given an in-memory, temporary database is constructed.
|
||||
func OpenDB(path string, tmpDir string) (*DB, error) {
|
||||
func OpenDB(ctx context.Context, path string, tmpDir string) (*DB, error) {
|
||||
logger := log.New() //TODO: move higher
|
||||
if path == "" {
|
||||
return newMemoryDB(logger, tmpDir)
|
||||
}
|
||||
return newPersistentDB(logger, path)
|
||||
return newPersistentDB(ctx, logger, path)
|
||||
}
|
||||
|
||||
func bucketsConfig(_ kv.TableCfg) kv.TableCfg {
|
||||
@ -102,7 +102,7 @@ func bucketsConfig(_ kv.TableCfg) kv.TableCfg {
|
||||
func newMemoryDB(logger log.Logger, tmpDir string) (*DB, error) {
|
||||
db := &DB{quit: make(chan struct{})}
|
||||
var err error
|
||||
db.kv, err = mdbx.NewMDBX(logger).InMem(tmpDir).Label(kv.SentryDB).WithTableCfg(bucketsConfig).MapSize(1 * datasize.GB).Open()
|
||||
db.kv, err = mdbx.NewMDBX(logger).InMem(tmpDir).Label(kv.SentryDB).WithTableCfg(bucketsConfig).MapSize(1 * datasize.GB).Open(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -111,7 +111,7 @@ func newMemoryDB(logger log.Logger, tmpDir string) (*DB, error) {
|
||||
|
||||
// newPersistentNodeDB creates/opens a persistent node database,
|
||||
// also flushing its contents in case of a version mismatch.
|
||||
func newPersistentDB(logger log.Logger, path string) (*DB, error) {
|
||||
func newPersistentDB(ctx context.Context, logger log.Logger, path string) (*DB, error) {
|
||||
var db kv.RwDB
|
||||
var err error
|
||||
db, err = mdbx.NewMDBX(logger).
|
||||
@ -122,7 +122,7 @@ func newPersistentDB(logger log.Logger, path string) (*DB, error) {
|
||||
GrowthStep(16 * datasize.MB).
|
||||
Flags(func(f uint) uint { return f ^ mdbx1.Durable | mdbx1.SafeNoSync }).
|
||||
SyncPeriod(2 * time.Second).
|
||||
Open()
|
||||
Open(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -156,7 +156,7 @@ func newPersistentDB(logger log.Logger, path string) (*DB, error) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newPersistentDB(logger, path)
|
||||
return newPersistentDB(ctx, logger, path)
|
||||
}
|
||||
return &DB{kv: db, quit: make(chan struct{})}, nil
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package enode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"path/filepath"
|
||||
@ -88,7 +89,7 @@ var nodeDBInt64Tests = []struct {
|
||||
|
||||
func TestDBInt64(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
db, err := OpenDB("", tmpDir)
|
||||
db, err := OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -124,7 +125,7 @@ func TestDBFetchStore(t *testing.T) {
|
||||
inst := time.Now()
|
||||
num := 314
|
||||
|
||||
db, err := OpenDB("", tmpDir)
|
||||
db, err := OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -267,7 +268,7 @@ func TestDBSeedQuery(t *testing.T) {
|
||||
}
|
||||
|
||||
func testSeedQuery(tmpDir string) error {
|
||||
db, err := OpenDB("", tmpDir)
|
||||
db, err := OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -317,7 +318,7 @@ func TestDBPersistency(t *testing.T) {
|
||||
)
|
||||
|
||||
// Create a persistent database and store some values
|
||||
db, err := OpenDB(filepath.Join(root, "database"), root)
|
||||
db, err := OpenDB(context.Background(), filepath.Join(root, "database"), root)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create persistent database: %v", err)
|
||||
}
|
||||
@ -328,7 +329,7 @@ func TestDBPersistency(t *testing.T) {
|
||||
db.Close()
|
||||
|
||||
// ReopenSegments the database and check the value
|
||||
db, err = OpenDB(filepath.Join(root, "database"), root)
|
||||
db, err = OpenDB(context.Background(), filepath.Join(root, "database"), root)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open persistent database: %v", err)
|
||||
}
|
||||
@ -431,7 +432,7 @@ var nodeDBExpirationNodes = []struct {
|
||||
|
||||
func TestDBExpiration(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
db, err := OpenDB("", tmpDir)
|
||||
db, err := OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -478,7 +479,7 @@ func TestDBExpiration(t *testing.T) {
|
||||
// in the database.
|
||||
func TestDBExpireV5(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
db, err := OpenDB("", tmpDir)
|
||||
db, err := OpenDB(context.Background(), "", tmpDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -537,7 +537,7 @@ func (srv *Server) setupLocalNode() error {
|
||||
}
|
||||
sort.Sort(capsByNameAndVersion(srv.ourHandshake.Caps))
|
||||
// Create the local node
|
||||
db, err := enode.OpenDB(srv.Config.NodeDatabase, srv.Config.TmpDir)
|
||||
db, err := enode.OpenDB(srv.quitCtx, srv.Config.NodeDatabase, srv.Config.TmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ func (bt *BlockTest) Run(t *testing.T, checkStateRoot bool) error {
|
||||
if !ok {
|
||||
return UnsupportedForkError{bt.json.Network}
|
||||
}
|
||||
engine := ethconsensusconfig.CreateConsensusEngineBareBones(config, log.New())
|
||||
engine := ethconsensusconfig.CreateConsensusEngineBareBones(context.Background(), config, log.New())
|
||||
m := mock.MockWithGenesisEngine(t, bt.genesis(config), engine, false, checkStateRoot)
|
||||
defer m.Close()
|
||||
|
||||
|
@ -66,7 +66,7 @@ func NewNodeConfig() *nodecfg.Config {
|
||||
}
|
||||
|
||||
// InitNode initializes a node with the given genesis file and config
|
||||
func InitMiner(genesis *types.Genesis, privKey *ecdsa.PrivateKey, withoutHeimdall bool, minerID int) (*node.Node, *eth.Ethereum, error) {
|
||||
func InitMiner(ctx context.Context, genesis *types.Genesis, privKey *ecdsa.PrivateKey, withoutHeimdall bool, minerID int) (*node.Node, *eth.Ethereum, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
ddir, _ := os.MkdirTemp("", "")
|
||||
|
||||
@ -153,7 +153,7 @@ func InitMiner(genesis *types.Genesis, privKey *ecdsa.PrivateKey, withoutHeimdal
|
||||
ethCfg.DeprecatedTxPool.AccountSlots = 1000000
|
||||
ethCfg.DeprecatedTxPool.GlobalSlots = 1000000
|
||||
|
||||
ethBackend, err := eth.New(stack, ethCfg, logger)
|
||||
ethBackend, err := eth.New(ctx, stack, ethCfg, logger)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ func TestMiningBenchmark(t *testing.T) {
|
||||
var txs []*types.Transaction
|
||||
|
||||
for i := 0; i < 1; i++ {
|
||||
stack, ethBackend, err := helper.InitMiner(&genesis, pkeys[i], true, i)
|
||||
stack, ethBackend, err := helper.InitMiner(context.Background(), &genesis, pkeys[i], true, i)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ func importChain(cliCtx *cli.Context) error {
|
||||
stack := makeConfigNode(cliCtx.Context, nodeCfg, logger)
|
||||
defer stack.Close()
|
||||
|
||||
ethereum, err := eth.New(stack, ethCfg, logger)
|
||||
ethereum, err := eth.New(cliCtx.Context, stack, ethCfg, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -34,14 +34,14 @@ It expects the genesis file as argument.`,
|
||||
|
||||
// initGenesis will initialise the given JSON format genesis file and writes it as
|
||||
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
||||
func initGenesis(ctx *cli.Context) error {
|
||||
func initGenesis(cliCtx *cli.Context) error {
|
||||
var logger log.Logger
|
||||
var err error
|
||||
if logger, _, err = debug.Setup(ctx, true /* rootLogger */); err != nil {
|
||||
if logger, _, err = debug.Setup(cliCtx, true /* rootLogger */); err != nil {
|
||||
return err
|
||||
}
|
||||
// Make sure we have a valid genesis JSON
|
||||
genesisPath := ctx.Args().First()
|
||||
genesisPath := cliCtx.Args().First()
|
||||
if len(genesisPath) == 0 {
|
||||
utils.Fatalf("Must supply path to genesis JSON file")
|
||||
}
|
||||
@ -58,10 +58,10 @@ func initGenesis(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
// Open and initialise both full and light databases
|
||||
stack := MakeConfigNodeDefault(ctx, logger)
|
||||
stack := MakeConfigNodeDefault(cliCtx, logger)
|
||||
defer stack.Close()
|
||||
|
||||
chaindb, err := node.OpenDatabase(stack.Config(), kv.ChainDB, "", false, logger)
|
||||
chaindb, err := node.OpenDatabase(cliCtx.Context, stack.Config(), kv.ChainDB, "", false, logger)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to open database: %v", err)
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ func (e *EngineBlockDownloader) download(hashToDownload libcommon.Hash, download
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
tmpDb, err := mdbx.NewTemporaryMdbx(e.tmpdir)
|
||||
tmpDb, err := mdbx.NewTemporaryMdbx(e.ctx, e.tmpdir)
|
||||
if err != nil {
|
||||
e.logger.Warn("[EngineBlockDownloader] Could create temporary mdbx", "err", err)
|
||||
e.status.Store(headerdownload.Idle)
|
||||
|
@ -119,7 +119,7 @@ func New(
|
||||
utils.Fatalf("Failed to create Erigon node: %v", err)
|
||||
}
|
||||
|
||||
ethereum, err := eth.New(node, ethConfig, logger)
|
||||
ethereum, err := eth.New(ctx, node, ethConfig, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -570,7 +570,7 @@ func MockWithZeroTTDGnosis(t *testing.T, withPosDownloader bool) *MockSentry {
|
||||
address: {Balance: funds},
|
||||
},
|
||||
}
|
||||
engine := ethconsensusconfig.CreateConsensusEngineBareBones(chainConfig, log.New())
|
||||
engine := ethconsensusconfig.CreateConsensusEngineBareBones(context.Background(), chainConfig, log.New())
|
||||
checkStateRoot := true
|
||||
return MockWithGenesisEngine(t, gspec, engine, withPosDownloader, checkStateRoot)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user