diff --git a/RELEASE_INSTRUCTIONS.md b/RELEASE_INSTRUCTIONS.md index 98fe0fc5e..668fcdde3 100644 --- a/RELEASE_INSTRUCTIONS.md +++ b/RELEASE_INSTRUCTIONS.md @@ -9,7 +9,7 @@ until which we have manually checked the usefulness of the Jump dest code bitmap commands: ```` make state -./build/bin/state checkChangeSets --chaindata --block +./build/bin/state checkChangeSets --datadir --block ```` If there are any transactions where code bitmap was useful, warning messages like this will be displayed: ```` diff --git a/TESTING.md b/TESTING.md index 5254d148e..60f4c64c8 100644 --- a/TESTING.md +++ b/TESTING.md @@ -156,7 +156,7 @@ The way to perform this check is almost the same as the Incremental Sync, but st Having up-to-date database, and having shut down the turbo-geth node (it will work without shutting down, but it will lead to bloating of the database file), this command can be executed: ``` -./build/bin/state checkChangeSets --chaindata ~/mainnet/tg/chaindata --block 11000000 +./build/bin/state checkChangeSets --datadir --block 11000000 ``` Please note the difference in notation when referring to the database. Turbo-geth command uses `--datadir` which points to `~mainnet`, and it looks for the actual database directory under `tg/chaindata`, but `checkChangeSets` need to be given slightly different path, pointing directly to the database directory. diff --git a/cmd/headers/README.md b/cmd/headers/README.md index 62d902d03..0b7138e5f 100644 --- a/cmd/headers/README.md +++ b/cmd/headers/README.md @@ -12,7 +12,7 @@ combined). Ethereum mainnet configuration is currently hard-coded. ## Running with an external p2p sentry ``` -./buid/bin/headers download --chaindata +./buid/bin/headers download --datadir ``` The command above specifies `--datadir` option - directory where the database files will be written. These two options @@ -21,7 +21,7 @@ p2p sentry running on the same computer listening to the port `9091`. In order t computer, or a different port (or both), the option `--sentry.api.addr` can be used. For example: ``` -./buid/bin/headers download --chaindata --sentry.api.addr localhost:9999 +./buid/bin/headers download --datadir --sentry.api.addr localhost:9999 ``` The command above will expect the p2p sentry running on the same computer, but on the port `9999` @@ -29,7 +29,7 @@ The command above will expect the p2p sentry running on the same computer, but o ## Running with an internal p2p sentry ``` -./buid/bin/headers download --chaindata --combined +./buid/bin/headers download --datadir --combined ``` The command above will run p2p sentry and the header downloader in the same proccess. In this mode, p2p sentry can be diff --git a/cmd/headers/commands/download.go b/cmd/headers/commands/download.go index 96e7c410a..6dfd34b6f 100644 --- a/cmd/headers/commands/download.go +++ b/cmd/headers/commands/download.go @@ -26,8 +26,7 @@ func init() { downloadCmd.Flags().BoolVar(&discovery, "discovery", true, "discovery mode") downloadCmd.Flags().StringVar(&netRestrict, "netrestrict", "", "CIDR range to accept peers from ") - withChaindata(downloadCmd) - withLmdbFlags(downloadCmd) + withDatadir(downloadCmd) rootCmd.AddCommand(downloadCmd) } diff --git a/cmd/headers/commands/root.go b/cmd/headers/commands/root.go index 7af51e7fe..14db9ad3e 100644 --- a/cmd/headers/commands/root.go +++ b/cmd/headers/commands/root.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "os/signal" + "path" "syscall" "github.com/c2h5oh/datasize" @@ -12,16 +13,19 @@ import ( "github.com/ledgerwatch/turbo-geth/ethdb" "github.com/ledgerwatch/turbo-geth/internal/debug" "github.com/ledgerwatch/turbo-geth/log" + "github.com/ledgerwatch/turbo-geth/node" "github.com/spf13/cobra" ) var ( - sentryAddr string // Address of the sentry : - sentryAddrs []string // Address of the sentry : - chaindata string // Path to chaindata - database string // Type of database (lmdb or mdbx) - mapSizeStr string // Map size for LMDB - freelistReuse int + sentryAddr string // Address of the sentry : + sentryAddrs []string // Address of the sentry : + chaindata string // Path to chaindata + snapshotDir string + snapshotMode string + datadir string // Path to td working dir + database string // Type of database (lmdb or mdbx) + mapSizeStr string // Map size for LMDB ) func init() { @@ -53,6 +57,12 @@ var rootCmd = &cobra.Command{ if err := debug.SetupCobra(cmd); err != nil { panic(err) } + if chaindata == "" { + chaindata = path.Join(datadir, "tg", "chaindata") + } + //if snapshotDir == "" { + // snapshotDir = path.Join(datadir, "tg", "snapshot") + //} }, PersistentPostRun: func(cmd *cobra.Command, args []string) { debug.Exit() @@ -72,16 +82,19 @@ func must(err error) { } } -func withChaindata(cmd *cobra.Command) { +func withDatadir(cmd *cobra.Command) { + cmd.Flags().StringVar(&datadir, "datadir", node.DefaultDataDir(), "data directory for temporary ELT files") + must(cmd.MarkFlagDirname("datadir")) + cmd.Flags().StringVar(&mapSizeStr, "lmdb.mapSize", "", "map size for LMDB") + cmd.Flags().StringVar(&chaindata, "chaindata", "", "path to the db") must(cmd.MarkFlagDirname("chaindata")) - must(cmd.MarkFlagRequired("chaindata")) - cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx") -} -func withLmdbFlags(cmd *cobra.Command) { - cmd.Flags().StringVar(&mapSizeStr, "lmdb.mapSize", "", "map size for LMDB") - cmd.Flags().IntVar(&freelistReuse, "maxFreelistReuse", 0, "Find a big enough contiguous page range for large values in freelist is hard just allocate new pages and even don't try to search if value is bigger than this limit. Measured in pages.") + cmd.Flags().StringVar(&snapshotMode, "snapshot.mode", "", "set of snapshots to use") + cmd.Flags().StringVar(&snapshotDir, "snapshot.dir", "", "snapshot dir") + must(cmd.MarkFlagDirname("snapshot.dir")) + + cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx") } func openDatabase(path string) *ethdb.ObjectDatabase { @@ -100,9 +113,6 @@ func openKV(path string, exclusive bool) ethdb.RwKV { must(mapSize.UnmarshalText([]byte(mapSizeStr))) opts = opts.MapSize(mapSize) } - if freelistReuse > 0 { - opts = opts.MaxFreelistReuse(uint(freelistReuse)) - } return opts.MustOpen() } @@ -115,8 +125,5 @@ func openKV(path string, exclusive bool) ethdb.RwKV { must(mapSize.UnmarshalText([]byte(mapSizeStr))) opts = opts.MapSize(mapSize) } - if freelistReuse > 0 { - opts = opts.MaxFreelistReuse(uint(freelistReuse)) - } return opts.MustOpen() } diff --git a/cmd/integration/Readme.md b/cmd/integration/Readme.md index eac5ed06e..f9563ebde 100644 --- a/cmd/integration/Readme.md +++ b/cmd/integration/Readme.md @@ -1,7 +1,7 @@ Integration - tool to run TurboGeth stages in custom way: run/reset single stage, run all stages but reorg every X blocks, etc... ## Examples -All commands require parameter `--chaindata=/tg/chaindata` - I will skip it for readability. +All commands require parameter `--datadir=` - I will skip it for readability. ``` integration --help @@ -36,7 +36,7 @@ Pre-requirements of `state_stages` command: ``` make all -./build/bin/integration state_stages --chaindata=/tg/chaindata --unwind=10 --unwind.every=20 --pprof +./build/bin/integration state_stages --datadir= --unwind=10 --unwind.every=20 --pprof integration reset_state # drops all stages after Senders stage (including it's db tables DB tables) ``` @@ -54,7 +54,7 @@ For example: ``` make all -./build/bin/integration stage_hash_state --chaindata=/tg/chaindata --reset -./build/bin/integration stage_trie --chaindata=/tg/chaindata --reset +./build/bin/integration stage_hash_state --datadir= --reset +./build/bin/integration stage_trie --datadir= --reset # Then run TurobGeth as usually. It will take 2-3 hours to re-calculate dropped db tables ``` \ No newline at end of file diff --git a/cmd/integration/commands/flags.go b/cmd/integration/commands/flags.go index d73580cf4..429712cfc 100644 --- a/cmd/integration/commands/flags.go +++ b/cmd/integration/commands/flags.go @@ -23,7 +23,6 @@ var ( bucket string datadir string mapSizeStr string - freelistReuse int migration string integritySlow bool integrityFast bool @@ -38,15 +37,6 @@ func must(err error) { } } -func withChaindata(cmd *cobra.Command) { - cmd.Flags().StringVar(&chaindata, "chaindata", "", "path to the db") - must(cmd.MarkFlagDirname("chaindata")) - must(cmd.MarkFlagRequired("chaindata")) - cmd.Flags().StringVar(&snapshotMode, "snapshot.mode", "", "set of snapshots to use") - cmd.Flags().StringVar(&snapshotDir, "snapshot.dir", "", "snapshot dir") - cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx") -} - func withMining(cmd *cobra.Command) { cmd.Flags().Bool("mine", false, "Enable mining") cmd.Flags().StringArray("miner.notify", nil, "Comma separated HTTP URL list to notify of new work packages") @@ -65,11 +55,6 @@ func withFile(cmd *cobra.Command) { must(cmd.MarkFlagRequired("file")) } -func withLmdbFlags(cmd *cobra.Command) { - cmd.Flags().StringVar(&mapSizeStr, "lmdb.mapSize", "", "map size for LMDB") - cmd.Flags().IntVar(&freelistReuse, "maxFreelistReuse", 0, "Find a big enough contiguous page range for large values in freelist is hard just allocate new pages and even don't try to search if value is bigger than this limit. Measured in pages.") -} - func withReferenceChaindata(cmd *cobra.Command) { cmd.Flags().StringVar(&referenceChaindata, "chaindata.reference", "", "path to the 2nd (reference/etalon) db") must(cmd.MarkFlagDirname("chaindata.reference")) @@ -101,7 +86,7 @@ func withBucket(cmd *cobra.Command) { } func withDatadir2(cmd *cobra.Command) { - cmd.Flags().String(utils.DataDirFlag.Name, utils.DataDirFlag.Value.String(), utils.DataDirFlag.Usage) + cmd.Flags().String(utils.DataDirFlag.Name, node.DefaultDataDir(), utils.DataDirFlag.Usage) must(cmd.MarkFlagDirname(utils.DataDirFlag.Name)) must(cmd.MarkFlagRequired(utils.DataDirFlag.Name)) cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx") @@ -109,6 +94,17 @@ func withDatadir2(cmd *cobra.Command) { func withDatadir(cmd *cobra.Command) { cmd.Flags().StringVar(&datadir, "datadir", node.DefaultDataDir(), "data directory for temporary ELT files") + must(cmd.MarkFlagDirname("datadir")) + cmd.Flags().StringVar(&mapSizeStr, "lmdb.mapSize", "", "map size for LMDB") + + cmd.Flags().StringVar(&chaindata, "chaindata", "", "path to the db") + must(cmd.MarkFlagDirname("chaindata")) + + cmd.Flags().StringVar(&snapshotMode, "snapshot.mode", "", "set of snapshots to use") + cmd.Flags().StringVar(&snapshotDir, "snapshot.dir", "", "snapshot dir") + must(cmd.MarkFlagDirname("snapshot.dir")) + + cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx") } func withBatchSize(cmd *cobra.Command) { diff --git a/cmd/integration/commands/refetence_db.go b/cmd/integration/commands/refetence_db.go index 461cd1e9c..74b2d343b 100644 --- a/cmd/integration/commands/refetence_db.go +++ b/cmd/integration/commands/refetence_db.go @@ -127,31 +127,31 @@ var cmdFToMdbx = &cobra.Command{ } func init() { - withChaindata(cmdCompareBucket) + withDatadir(cmdCompareBucket) withReferenceChaindata(cmdCompareBucket) withBucket(cmdCompareBucket) rootCmd.AddCommand(cmdCompareBucket) - withChaindata(cmdCompareStates) + withDatadir(cmdCompareStates) withReferenceChaindata(cmdCompareStates) withBucket(cmdCompareStates) rootCmd.AddCommand(cmdCompareStates) - withChaindata(cmdLmdbToMdbx) + withDatadir(cmdLmdbToMdbx) withToChaindata(cmdLmdbToMdbx) withBucket(cmdLmdbToMdbx) rootCmd.AddCommand(cmdLmdbToMdbx) - withChaindata(cmdLmdbToLmdb) + withDatadir(cmdLmdbToLmdb) withToChaindata(cmdLmdbToLmdb) withBucket(cmdLmdbToLmdb) rootCmd.AddCommand(cmdLmdbToLmdb) - withChaindata(cmdMdbxToMdbx) + withDatadir(cmdMdbxToMdbx) withToChaindata(cmdMdbxToMdbx) withBucket(cmdMdbxToMdbx) diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index 68bb44159..c3984253f 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -53,11 +53,11 @@ var cmdClearUnwindStack = &cobra.Command{ } func init() { - withChaindata(cmdResetState) + withDatadir(cmdResetState) rootCmd.AddCommand(cmdResetState) - withChaindata(cmdClearUnwindStack) + withDatadir(cmdClearUnwindStack) rootCmd.AddCommand(cmdClearUnwindStack) } diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index 40ac5d6a5..8a52b4aae 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -1,6 +1,8 @@ package commands import ( + "path" + "github.com/c2h5oh/datasize" "github.com/ledgerwatch/turbo-geth/cmd/utils" "github.com/ledgerwatch/turbo-geth/ethdb" @@ -19,6 +21,12 @@ var rootCmd = &cobra.Command{ if err := utils.SetupCobra(cmd); err != nil { panic(err) } + if chaindata == "" { + chaindata = path.Join(datadir, "tg", "chaindata") + } + if snapshotDir == "" { + snapshotDir = path.Join(datadir, "tg", "snapshot") + } }, PersistentPostRun: func(cmd *cobra.Command, args []string) { defer utils.StopDebug() @@ -74,9 +82,6 @@ func openKV(path string, exclusive bool) ethdb.RwKV { must(mapSize.UnmarshalText([]byte(mapSizeStr))) opts = opts.MapSize(mapSize) } - if freelistReuse > 0 { - opts = opts.MaxFreelistReuse(uint(freelistReuse)) - } return opts.MustOpen() } @@ -89,9 +94,6 @@ func openKV(path string, exclusive bool) ethdb.RwKV { must(mapSize.UnmarshalText([]byte(mapSizeStr))) opts = opts.MapSize(mapSize) } - if freelistReuse > 0 { - opts = opts.MaxFreelistReuse(uint(freelistReuse)) - } kv := opts.MustOpen() metrics.AddCallback(kv.CollectMetrics) return kv diff --git a/cmd/integration/commands/snapshot_check.go b/cmd/integration/commands/snapshot_check.go index e2045eb3f..b3e9f5dc3 100644 --- a/cmd/integration/commands/snapshot_check.go +++ b/cmd/integration/commands/snapshot_check.go @@ -22,7 +22,7 @@ import ( ) func init() { - withChaindata(cmdSnapshotCheck) + withDatadir(cmdSnapshotCheck) withBlock(cmdSnapshotCheck) withBatchSize(cmdSnapshotCheck) cmdSnapshotCheck.Flags().StringVar(&tmpDBPath, "tmp_db", "", "path to temporary db(for debug)") @@ -33,7 +33,7 @@ var tmpDBPath string var cmdSnapshotCheck = &cobra.Command{ Use: "snapshot_check", Short: "check execution over state snapshot by block", - Example: "go run cmd/integration/main.go snapshot_check --block 11400000 --chaindata /media/b00ris/nvme/backup/snapshotsync/tg/chaindata/ --snapshotDir /media/b00ris/nvme/snapshots/ --snapshotMode s --tmp_db /media/b00ris/nvme/tmp/debug", + Example: "go run cmd/integration/main.go snapshot_check --block 11400000 --datadir /media/b00ris/nvme/backup/snapshotsync/ --snapshotDir /media/b00ris/nvme/snapshots/ --snapshotMode s --tmp_db /media/b00ris/nvme/tmp/debug", RunE: func(cmd *cobra.Command, args []string) error { ctx := utils.RootContext() //db to provide headers, blocks, senders ... diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index b9b2ba813..0a9d9e21f 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -233,12 +233,10 @@ var cmdRunMigrations = &cobra.Command{ } func init() { - withChaindata(cmdPrintStages) - withLmdbFlags(cmdPrintStages) + withDatadir(cmdPrintStages) rootCmd.AddCommand(cmdPrintStages) - withChaindata(cmdStageSenders) - withLmdbFlags(cmdStageSenders) + //withChaindata(cmdStageSenders) withReset(cmdStageSenders) withBlock(cmdStageSenders) withUnwind(cmdStageSenders) @@ -246,14 +244,12 @@ func init() { rootCmd.AddCommand(cmdStageSenders) - withChaindata(cmdStageBodies) - withUnwind(cmdStageBodies) withDatadir(cmdStageBodies) + withUnwind(cmdStageBodies) rootCmd.AddCommand(cmdStageBodies) - withChaindata(cmdStageExec) - withLmdbFlags(cmdStageExec) + withDatadir(cmdStageExec) withReset(cmdStageExec) withBlock(cmdStageExec) withUnwind(cmdStageExec) @@ -263,55 +259,43 @@ func init() { rootCmd.AddCommand(cmdStageExec) - withChaindata(cmdStageHashState) - withLmdbFlags(cmdStageHashState) + withDatadir(cmdStageHashState) withReset(cmdStageHashState) withBlock(cmdStageHashState) withUnwind(cmdStageHashState) withBatchSize(cmdStageHashState) - withDatadir(cmdStageHashState) rootCmd.AddCommand(cmdStageHashState) - withChaindata(cmdStageTrie) - withLmdbFlags(cmdStageTrie) + withDatadir(cmdStageTrie) withReset(cmdStageTrie) withBlock(cmdStageTrie) withUnwind(cmdStageTrie) - withDatadir(cmdStageTrie) withIntegrityChecks(cmdStageTrie) rootCmd.AddCommand(cmdStageTrie) - withChaindata(cmdStageHistory) - withLmdbFlags(cmdStageHistory) + withDatadir(cmdStageHistory) withReset(cmdStageHistory) withBlock(cmdStageHistory) withUnwind(cmdStageHistory) - withDatadir(cmdStageHistory) rootCmd.AddCommand(cmdStageHistory) - withChaindata(cmdLogIndex) - withLmdbFlags(cmdLogIndex) + withDatadir(cmdLogIndex) withReset(cmdLogIndex) withBlock(cmdLogIndex) withUnwind(cmdLogIndex) - withDatadir(cmdLogIndex) rootCmd.AddCommand(cmdLogIndex) - withChaindata(cmdCallTraces) - withLmdbFlags(cmdCallTraces) + withDatadir(cmdCallTraces) withReset(cmdCallTraces) withBlock(cmdCallTraces) withUnwind(cmdCallTraces) - withDatadir(cmdCallTraces) rootCmd.AddCommand(cmdCallTraces) - withChaindata(cmdStageTxLookup) - withLmdbFlags(cmdStageTxLookup) withReset(cmdStageTxLookup) withBlock(cmdStageTxLookup) withUnwind(cmdStageTxLookup) @@ -319,16 +303,13 @@ func init() { rootCmd.AddCommand(cmdStageTxLookup) - withChaindata(cmdPrintMigrations) + withDatadir(cmdPrintMigrations) rootCmd.AddCommand(cmdPrintMigrations) - withChaindata(cmdRemoveMigration) - withLmdbFlags(cmdRemoveMigration) + withDatadir(cmdRemoveMigration) withMigration(cmdRemoveMigration) rootCmd.AddCommand(cmdRemoveMigration) - withChaindata(cmdRunMigrations) - withLmdbFlags(cmdRunMigrations) withDatadir(cmdRunMigrations) rootCmd.AddCommand(cmdRunMigrations) } diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index 1284b1476..61a7d7961 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -67,7 +67,6 @@ Examples: log.Error(err.Error()) return nil } - } return nil }, @@ -123,13 +122,13 @@ func init() { rootCmd.AddCommand(stateStags) - withChaindata(loopIhCmd) + withDatadir(loopIhCmd) withBatchSize(loopIhCmd) withUnwind(loopIhCmd) rootCmd.AddCommand(loopIhCmd) - withChaindata(loopExecCmd) + withDatadir(loopExecCmd) withBatchSize(loopExecCmd) withUnwind(loopExecCmd) diff --git a/cmd/rpcdaemon/commands/error_messages.go b/cmd/rpcdaemon/commands/error_messages.go index 7c7e2eaa6..b593ea595 100644 --- a/cmd/rpcdaemon/commands/error_messages.go +++ b/cmd/rpcdaemon/commands/error_messages.go @@ -4,7 +4,7 @@ package commands const NotImplemented = "the method is currently not implemented: %s" // NotAvailableChainData x -const NotAvailableChainData = "the function %s is not available, please use --private.api.addr option instead of --chaindata option" +const NotAvailableChainData = "the function %s is not available, please use --private.api.addr option instead of --datadir option" // NotAvailableDeprecated x const NotAvailableDeprecated = "the method has been deprecated: %s" diff --git a/cmd/rpcdaemon/commands/net_api.go b/cmd/rpcdaemon/commands/net_api.go index 7c712ea31..11b0cd9f3 100644 --- a/cmd/rpcdaemon/commands/net_api.go +++ b/cmd/rpcdaemon/commands/net_api.go @@ -37,7 +37,7 @@ func (api *NetAPIImpl) Listening(_ context.Context) (bool, error) { // Version implements net_version. Returns the current network id. func (api *NetAPIImpl) Version(ctx context.Context) (string, error) { if api.ethBackend == nil { - // We're running in --chaindata mode or otherwise cannot get the backend + // We're running in --datadir mode or otherwise cannot get the backend return "", fmt.Errorf(NotAvailableChainData, "net_version") } diff --git a/cmd/rpcdaemon/commands/send_transaction.go b/cmd/rpcdaemon/commands/send_transaction.go index a6abee66e..770fa09ef 100644 --- a/cmd/rpcdaemon/commands/send_transaction.go +++ b/cmd/rpcdaemon/commands/send_transaction.go @@ -11,7 +11,7 @@ import ( // SendRawTransaction implements eth_sendRawTransaction. Creates new message call transaction or a contract creation for previously-signed transactions. func (api *APIImpl) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) { if api.ethBackend == nil { - // We're running in --chaindata mode or otherwise cannot get the backend + // We're running in --datadir mode or otherwise cannot get the backend return common.Hash{}, fmt.Errorf(NotAvailableChainData, "eth_sendRawTransaction") } res, err := api.ethBackend.AddLocal(ctx, encodedTx) diff --git a/cmd/snapshots/generator/commands/copy_from_state.go b/cmd/snapshots/generator/commands/copy_from_state.go index 496c6117f..329c86362 100644 --- a/cmd/snapshots/generator/commands/copy_from_state.go +++ b/cmd/snapshots/generator/commands/copy_from_state.go @@ -14,7 +14,7 @@ import ( ) func init() { - withChaindata(copyFromStateSnapshotCmd) + withDatadir(copyFromStateSnapshotCmd) withSnapshotFile(copyFromStateSnapshotCmd) withSnapshotData(copyFromStateSnapshotCmd) withBlock(copyFromStateSnapshotCmd) @@ -22,11 +22,11 @@ func init() { } -//go run cmd/snapshots/generator/main.go state_copy --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state --chaindata /media/b00ris/nvme/backup/snapshotsync/tg/chaindata/ &> /media/b00ris/nvme/copy.log +//go run cmd/snapshots/generator/main.go state_copy --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state --datadir /media/b00ris/nvme/backup/snapshotsync/ &> /media/b00ris/nvme/copy.log var copyFromStateSnapshotCmd = &cobra.Command{ Use: "state_copy", Short: "Copy from state snapshot", - Example: "go run cmd/snapshots/generator/main.go state_copy --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state --chaindata /media/b00ris/nvme/backup/snapshotsync/tg/chaindata/", + Example: "go run cmd/snapshots/generator/main.go state_copy --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state --datadir /media/b00ris/nvme/backup/snapshotsync", RunE: func(cmd *cobra.Command, args []string) error { return CopyFromState(cmd.Context(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) }, diff --git a/cmd/snapshots/generator/commands/generate_body_snapshot.go b/cmd/snapshots/generator/commands/generate_body_snapshot.go index c778f56ac..a1771a257 100644 --- a/cmd/snapshots/generator/commands/generate_body_snapshot.go +++ b/cmd/snapshots/generator/commands/generate_body_snapshot.go @@ -18,7 +18,7 @@ import ( ) func init() { - withChaindata(generateBodiesSnapshotCmd) + withDatadir(generateBodiesSnapshotCmd) withSnapshotFile(generateBodiesSnapshotCmd) withSnapshotData(generateBodiesSnapshotCmd) withBlock(generateBodiesSnapshotCmd) @@ -29,7 +29,7 @@ func init() { var generateBodiesSnapshotCmd = &cobra.Command{ Use: "bodies", Short: "Generate bodies snapshot", - Example: "go run cmd/snapshots/generator/main.go bodies --block 11000000 --chaindata /media/b00ris/nvme/snapshotsync/tg/chaindata/ --snapshotDir /media/b00ris/nvme/snapshotsync/tg/snapshots/ --snapshotMode \"hb\" --snapshot /media/b00ris/nvme/snapshots/bodies_test", + Example: "go run cmd/snapshots/generator/main.go bodies --block 11000000 --datadir /media/b00ris/nvme/snapshotsync/ --snapshotDir /media/b00ris/nvme/snapshotsync/tg/snapshots/ --snapshotMode \"hb\" --snapshot /media/b00ris/nvme/snapshots/bodies_test", RunE: func(cmd *cobra.Command, args []string) error { return BodySnapshot(cmd.Context(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) }, diff --git a/cmd/snapshots/generator/commands/generate_header_snapshot.go b/cmd/snapshots/generator/commands/generate_header_snapshot.go index 514a5c783..829020b3f 100644 --- a/cmd/snapshots/generator/commands/generate_header_snapshot.go +++ b/cmd/snapshots/generator/commands/generate_header_snapshot.go @@ -19,7 +19,7 @@ import ( ) func init() { - withChaindata(generateHeadersSnapshotCmd) + withDatadir(generateHeadersSnapshotCmd) withSnapshotFile(generateHeadersSnapshotCmd) withSnapshotData(generateHeadersSnapshotCmd) withBlock(generateHeadersSnapshotCmd) @@ -30,7 +30,7 @@ func init() { var generateHeadersSnapshotCmd = &cobra.Command{ Use: "headers", Short: "Generate headers snapshot", - Example: "go run cmd/snapshots/generator/main.go headers --block 11000000 --chaindata /media/b00ris/nvme/snapshotsync/tg/chaindata/ --snapshotDir /media/b00ris/nvme/snapshotsync/tg/snapshots/ --snapshotMode \"hb\" --snapshot /media/b00ris/nvme/snapshots/headers_test", + Example: "go run cmd/snapshots/generator/main.go headers --block 11000000 --datadir /media/b00ris/nvme/snapshotsync/ --snapshotDir /media/b00ris/nvme/snapshotsync/tg/snapshots/ --snapshotMode \"hb\" --snapshot /media/b00ris/nvme/snapshots/headers_test", RunE: func(cmd *cobra.Command, args []string) error { return HeaderSnapshot(cmd.Context(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) }, diff --git a/cmd/snapshots/generator/commands/generate_state_snapshot.go b/cmd/snapshots/generator/commands/generate_state_snapshot.go index 02babbfa8..c59e9a28e 100644 --- a/cmd/snapshots/generator/commands/generate_state_snapshot.go +++ b/cmd/snapshots/generator/commands/generate_state_snapshot.go @@ -18,7 +18,7 @@ import ( ) func init() { - withChaindata(generateStateSnapshotCmd) + withDatadir(generateStateSnapshotCmd) withSnapshotFile(generateStateSnapshotCmd) withSnapshotData(generateStateSnapshotCmd) withBlock(generateStateSnapshotCmd) @@ -26,11 +26,11 @@ func init() { } -//go run cmd/snapshots/generator/main.go state_copy --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state --chaindata /media/b00ris/nvme/backup/snapshotsync/tg/chaindata/ &> /media/b00ris/nvme/copy.log +//go run cmd/snapshots/generator/main.go state_copy --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state --datadir /media/b00ris/nvme/backup/snapshotsync &> /media/b00ris/nvme/copy.log var generateStateSnapshotCmd = &cobra.Command{ Use: "state", Short: "Generate state snapshot", - Example: "go run ./cmd/state/main.go stateSnapshot --block 11000000 --chaindata /media/b00ris/nvme/tgstaged/tg/chaindata/ --snapshot /media/b00ris/nvme/snapshots/state", + Example: "go run ./cmd/state/main.go stateSnapshot --block 11000000 --datadir /media/b00ris/nvme/tgstaged/ --snapshot /media/b00ris/nvme/snapshots/state", RunE: func(cmd *cobra.Command, args []string) error { return GenerateStateSnapshot(cmd.Context(), chaindata, snapshotFile, block, snapshotDir, snapshotMode) }, diff --git a/cmd/snapshots/generator/commands/root.go b/cmd/snapshots/generator/commands/root.go index 28d28ec5a..abada525a 100644 --- a/cmd/snapshots/generator/commands/root.go +++ b/cmd/snapshots/generator/commands/root.go @@ -5,11 +5,13 @@ import ( "fmt" "os" "os/signal" + "path" "syscall" "github.com/ledgerwatch/turbo-geth/cmd/utils" "github.com/ledgerwatch/turbo-geth/internal/debug" "github.com/ledgerwatch/turbo-geth/log" + "github.com/ledgerwatch/turbo-geth/node" "github.com/spf13/cobra" ) @@ -43,6 +45,8 @@ func rootContext() context.Context { } var ( + datadir string + database string chaindata string snapshotFile string block uint64 @@ -57,6 +61,9 @@ var rootCmd = &cobra.Command{ if err := debug.SetupCobra(cmd); err != nil { panic(err) } + if chaindata == "" { + chaindata = path.Join(datadir, "tg", "chaindata") + } }, PersistentPostRun: func(cmd *cobra.Command, args []string) { debug.Exit() @@ -77,9 +84,18 @@ func withSnapshotData(cmd *cobra.Command) { cmd.Flags().StringVar(&snapshotDir, "snapshot.dir", "", "snapshot dir") } -func withChaindata(cmd *cobra.Command) { - cmd.Flags().StringVar(&chaindata, "chaindata", "chaindata", "path to the chaindata file used as input to analysis") - must(cmd.MarkFlagFilename("chaindata", "")) +func withDatadir(cmd *cobra.Command) { + cmd.Flags().StringVar(&datadir, "datadir", node.DefaultDataDir(), "data directory for temporary ELT files") + must(cmd.MarkFlagDirname("datadir")) + + cmd.Flags().StringVar(&chaindata, "chaindata", "", "path to the db") + must(cmd.MarkFlagDirname("chaindata")) + + cmd.Flags().StringVar(&snapshotMode, "snapshot.mode", "", "set of snapshots to use") + cmd.Flags().StringVar(&snapshotDir, "snapshot.dir", "", "snapshot dir") + must(cmd.MarkFlagDirname("snapshot.dir")) + + cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx") } func withSnapshotFile(cmd *cobra.Command) { diff --git a/cmd/snapshots/generator/commands/verify_state_snapshot.go b/cmd/snapshots/generator/commands/verify_state_snapshot.go index d762461fa..850a380b2 100644 --- a/cmd/snapshots/generator/commands/verify_state_snapshot.go +++ b/cmd/snapshots/generator/commands/verify_state_snapshot.go @@ -17,7 +17,7 @@ import ( ) func init() { - withChaindata(verifyStateSnapshotCmd) + withDatadir(verifyStateSnapshotCmd) withSnapshotFile(verifyStateSnapshotCmd) withBlock(verifyStateSnapshotCmd) @@ -28,7 +28,7 @@ func init() { var verifyStateSnapshotCmd = &cobra.Command{ Use: "verify_state", Short: "Verify state snapshot", - Example: "go run cmd/snapshots/generator/main.go verify_state --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state/ --chaindata /media/b00ris/nvme/backup/snapshotsync/tg/chaindata/ ", + Example: "go run cmd/snapshots/generator/main.go verify_state --block 11000000 --snapshot /media/b00ris/nvme/snapshots/state/ --datadir /media/b00ris/nvme/backup/snapshotsync/", RunE: func(cmd *cobra.Command, args []string) error { return VerifyStateSnapshot(cmd.Context(), chaindata, snapshotFile, block) }, diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go index d56856ec6..e001ae462 100644 --- a/cmd/state/commands/check_change_sets.go +++ b/cmd/state/commands/check_change_sets.go @@ -33,8 +33,8 @@ var ( func init() { withBlock(checkChangeSetsCmd) - withChaindata(checkChangeSetsCmd) - checkChangeSetsCmd.Flags().StringVar(&historyfile, "historyfile", "", "path to the file where the changesets and history are expected to be. If omitted, the same as --chaindata") + withDatadir(checkChangeSetsCmd) + checkChangeSetsCmd.Flags().StringVar(&historyfile, "historyfile", "", "path to the file where the changesets and history are expected to be. If omitted, the same as /tg/chaindata") checkChangeSetsCmd.Flags().BoolVar(&nocheck, "nocheck", false, "set to turn off the changeset checking and only execute transaction (for performance testing)") checkChangeSetsCmd.Flags().BoolVar(&writeReceipts, "writeReceipts", false, "set to turn on writing receipts as the execution ongoing") rootCmd.AddCommand(checkChangeSetsCmd) diff --git a/cmd/state/commands/check_enc.go b/cmd/state/commands/check_enc.go index ff65935d0..f3c6deb26 100644 --- a/cmd/state/commands/check_enc.go +++ b/cmd/state/commands/check_enc.go @@ -6,7 +6,7 @@ import ( ) func init() { - withChaindata(checkEncCmd) + withDatadir(checkEncCmd) withStatsfile(checkEncCmd) rootCmd.AddCommand(checkEncCmd) } diff --git a/cmd/state/commands/check_index.go b/cmd/state/commands/check_index.go index 653d90aae..4127ffe31 100644 --- a/cmd/state/commands/check_index.go +++ b/cmd/state/commands/check_index.go @@ -7,7 +7,7 @@ import ( ) func init() { - withChaindata(checkIndexCMD) + withDatadir(checkIndexCMD) withIndexBucket(checkIndexCMD) withCSBucket(checkIndexCMD) rootCmd.AddCommand(checkIndexCMD) diff --git a/cmd/state/commands/global_flags_vars.go b/cmd/state/commands/global_flags_vars.go index 2978c5848..cc2997755 100644 --- a/cmd/state/commands/global_flags_vars.go +++ b/cmd/state/commands/global_flags_vars.go @@ -2,11 +2,16 @@ package commands import ( "github.com/ledgerwatch/turbo-geth/common/dbutils" + "github.com/ledgerwatch/turbo-geth/node" "github.com/spf13/cobra" ) var ( + datadir string chaindata string + snapshotDir string + snapshotMode string + database string statsfile string block uint64 changeSetBucket string @@ -23,9 +28,18 @@ func withBlock(cmd *cobra.Command) { cmd.Flags().Uint64Var(&block, "block", 1, "specifies a block number for operation") } -func withChaindata(cmd *cobra.Command) { - cmd.Flags().StringVar(&chaindata, "chaindata", "chaindata", "path to the chaindata file used as input to analysis") - must(cmd.MarkFlagFilename("chaindata", "")) +func withDatadir(cmd *cobra.Command) { + cmd.Flags().StringVar(&datadir, "datadir", node.DefaultDataDir(), "data directory for temporary ELT files") + must(cmd.MarkFlagDirname("datadir")) + + cmd.Flags().StringVar(&chaindata, "chaindata", "", "path to the db") + must(cmd.MarkFlagDirname("chaindata")) + + cmd.Flags().StringVar(&snapshotMode, "snapshot.mode", "", "set of snapshots to use") + cmd.Flags().StringVar(&snapshotDir, "snapshot.dir", "", "snapshot dir") + must(cmd.MarkFlagDirname("snapshot.dir")) + + cmd.Flags().StringVar(&database, "database", "", "lmdb|mdbx") } func withStatsfile(cmd *cobra.Command) { diff --git a/cmd/state/commands/index_stats.go b/cmd/state/commands/index_stats.go index 5e7a995f7..e6407adcd 100644 --- a/cmd/state/commands/index_stats.go +++ b/cmd/state/commands/index_stats.go @@ -6,7 +6,7 @@ import ( ) func init() { - withChaindata(indexStatsCmd) + withDatadir(indexStatsCmd) withStatsfile(indexStatsCmd) withIndexBucket(indexStatsCmd) rootCmd.AddCommand(indexStatsCmd) diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index 169fa5c33..edf11f127 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -36,7 +36,7 @@ var ( func init() { withBlock(opcodeTracerCmd) - withChaindata(opcodeTracerCmd) + withDatadir(opcodeTracerCmd) opcodeTracerCmd.Flags().Uint64Var(&numBlocks, "numBlocks", 1, "number of blocks to run the operation on") opcodeTracerCmd.Flags().BoolVar(&saveOpcodes, "saveOpcodes", false, "set to save the opcodes") opcodeTracerCmd.Flags().BoolVar(&saveBBlocks, "saveBBlocks", false, "set to save the basic blocks") diff --git a/cmd/state/commands/regenerate_txlookup.go b/cmd/state/commands/regenerate_txlookup.go index 78f00bd12..f782bf3f2 100644 --- a/cmd/state/commands/regenerate_txlookup.go +++ b/cmd/state/commands/regenerate_txlookup.go @@ -6,7 +6,7 @@ import ( ) func init() { - withChaindata(regenerateTxLookupCmd) + withDatadir(regenerateTxLookupCmd) rootCmd.AddCommand(regenerateTxLookupCmd) } diff --git a/cmd/state/commands/root.go b/cmd/state/commands/root.go index f0e0976c0..41d1b65a8 100644 --- a/cmd/state/commands/root.go +++ b/cmd/state/commands/root.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "os/signal" + "path" "syscall" "github.com/ledgerwatch/turbo-geth/cmd/utils" @@ -55,6 +56,9 @@ var rootCmd = &cobra.Command{ if genesisPath != "" { genesis = genesisFromFile(genesisPath) } + if chaindata == "" { + chaindata = path.Join(datadir, "tg", "chaindata") + } }, PersistentPostRun: func(cmd *cobra.Command, args []string) { debug.Exit() diff --git a/cmd/state/commands/verify_headers_snapshot.go b/cmd/state/commands/verify_headers_snapshot.go index 07aafb899..386e5e10e 100644 --- a/cmd/state/commands/verify_headers_snapshot.go +++ b/cmd/state/commands/verify_headers_snapshot.go @@ -6,7 +6,7 @@ import ( ) func init() { - withChaindata(verifyHeadersSnapshotCmd) + withDatadir(verifyHeadersSnapshotCmd) rootCmd.AddCommand(verifyHeadersSnapshotCmd) } diff --git a/cmd/state/commands/verify_txlookup.go b/cmd/state/commands/verify_txlookup.go index 3d0fc1264..82f2dcb4f 100644 --- a/cmd/state/commands/verify_txlookup.go +++ b/cmd/state/commands/verify_txlookup.go @@ -6,7 +6,7 @@ import ( ) func init() { - withChaindata(verifyTxLookupCmd) + withDatadir(verifyTxLookupCmd) rootCmd.AddCommand(verifyTxLookupCmd) } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index d8a37976e..016d5284c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -801,25 +801,32 @@ func SetNodeConfigCobra(cmd *cobra.Command, cfg *node.Config) { setDataDirCobra(flags, cfg) } +func DataDirForNetwork(datadir string, network string) string { + if datadir != node.DefaultDataDir() { + return datadir + } + + switch network { + case params.DevChainName: + return "" // unless explicitly requested, use memory databases + case params.RinkebyChainName: + return filepath.Join(datadir, "rinkeby") + case params.GoerliChainName: + filepath.Join(datadir, "goerli") + case params.YoloV3ChainName: + return filepath.Join(datadir, "yolo-v3") + default: + return datadir + } + + return datadir +} + func setDataDir(ctx *cli.Context, cfg *node.Config) { if ctx.GlobalIsSet(DataDirFlag.Name) { cfg.DataDir = ctx.GlobalString(DataDirFlag.Name) } else { - chain := ctx.GlobalString(ChainFlag.Name) - switch chain { - case params.RinkebyChainName: - if cfg.DataDir == node.DefaultDataDir() { - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") - } - case params.GoerliChainName: - if cfg.DataDir == node.DefaultDataDir() { - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") - } - case params.YoloV3ChainName: - if cfg.DataDir == node.DefaultDataDir() { - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "yolo-v3") - } - } + cfg.DataDir = DataDirForNetwork(cfg.DataDir, ctx.GlobalString(ChainFlag.Name)) } } @@ -832,22 +839,7 @@ func setDataDirCobra(f *pflag.FlagSet, cfg *node.Config) { if dirname != "" { cfg.DataDir = dirname } else if chain != nil { - switch *chain { - case params.DevChainName: - cfg.DataDir = "" // unless explicitly requested, use memory databases - case params.RinkebyChainName: - if cfg.DataDir == node.DefaultDataDir() { - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") - } - case params.GoerliChainName: - if cfg.DataDir == node.DefaultDataDir() { - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") - } - case params.YoloV3ChainName: - if cfg.DataDir == node.DefaultDataDir() { - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "yolo-v3") - } - } + cfg.DataDir = DataDirForNetwork(cfg.DataDir, *chain) } } diff --git a/ethdb/Readme.md b/ethdb/Readme.md index 4355744b0..f9e02d57e 100644 --- a/ethdb/Readme.md +++ b/ethdb/Readme.md @@ -135,14 +135,14 @@ Install all database tools: `make db-tools` - tools with prefix `mdb_` is for lmdb, `lmdbgo_` is for lmdb written in go, `mdbx_` is for mdbx. ``` -./build/bin/mdbx_dump -a /path/to/chaindata | lz4 > dump.lz4 -lz4 -d < dump.lz4 | ./build/bin/mdbx_load -an /path/to/chaindata +./build/bin/mdbx_dump -a /tg/chaindata | lz4 > dump.lz4 +lz4 -d < dump.lz4 | ./build/bin/mdbx_load -an /tg/chaindata ``` ## How to get table checksum ``` -./build/bin/mdbx_dump -s table_name /path/to/chaindata | tail -n +4 | sha256sum # tail here is for excluding header +./build/bin/mdbx_dump -s table_name /tg/chaindata | tail -n +4 | sha256sum # tail here is for excluding header Header example: VERSION=3 diff --git a/ethdb/kv_lmdb.go b/ethdb/kv_lmdb.go index 98e69acaa..2db1bf327 100644 --- a/ethdb/kv_lmdb.go +++ b/ethdb/kv_lmdb.go @@ -33,13 +33,12 @@ var ( type BucketConfigsFunc func(defaultBuckets dbutils.BucketsCfg) dbutils.BucketsCfg type LmdbOpts struct { - inMem bool - flags uint - path string - exclusive bool - bucketsCfg BucketConfigsFunc - mapSize datasize.ByteSize - maxFreelistReuse uint + inMem bool + flags uint + path string + exclusive bool + bucketsCfg BucketConfigsFunc + mapSize datasize.ByteSize } func NewLMDB() LmdbOpts { @@ -68,11 +67,6 @@ func (opts LmdbOpts) MapSize(sz datasize.ByteSize) LmdbOpts { return opts } -func (opts LmdbOpts) MaxFreelistReuse(pages uint) LmdbOpts { - opts.maxFreelistReuse = pages - return opts -} - func (opts LmdbOpts) Flags(f func(uint) uint) LmdbOpts { opts.flags = f(opts.flags) return opts @@ -133,10 +127,7 @@ func (opts LmdbOpts) Open() (kv RwKV, err error) { return nil, err } - if opts.maxFreelistReuse == 0 { - opts.maxFreelistReuse = LMDBDefaultMaxFreelistReuse - } - if err = env.SetMaxFreelistReuse(opts.maxFreelistReuse); err != nil { + if err = env.SetMaxFreelistReuse(LMDBDefaultMaxFreelistReuse); err != nil { return nil, err } diff --git a/ethdb/kv_mdbx.go b/ethdb/kv_mdbx.go index 7bcb6b112..6d379114f 100644 --- a/ethdb/kv_mdbx.go +++ b/ethdb/kv_mdbx.go @@ -32,7 +32,6 @@ type MdbxOpts struct { bucketsCfg BucketConfigsFunc mapSize datasize.ByteSize dirtyListMaxPages uint64 - maxFreelistReuse uint } func NewMDBX() MdbxOpts { @@ -77,11 +76,6 @@ func (opts MdbxOpts) MapSize(sz datasize.ByteSize) MdbxOpts { return opts } -func (opts MdbxOpts) MaxFreelistReuse(pages uint) MdbxOpts { - opts.maxFreelistReuse = pages - return opts -} - func (opts MdbxOpts) WithBucketsConfig(f BucketConfigsFunc) MdbxOpts { opts.bucketsCfg = f return opts @@ -128,10 +122,6 @@ func (opts MdbxOpts) Open() (RwKV, error) { opts.dirtyListMaxPages = 8 * 1024 } - if opts.maxFreelistReuse == 0 { - opts.maxFreelistReuse = LMDBDefaultMaxFreelistReuse - } - if opts.flags&mdbx.Accede == 0 { if opts.inMem { if err = env.SetGeometry(int(1*datasize.MB), int(1*datasize.MB), int(64*datasize.MB), int(1*datasize.MB), 0, 4*1024); err != nil { diff --git a/ethdb/mdbx/env.go b/ethdb/mdbx/env.go index f92d82b7e..21d0f75ff 100644 --- a/ethdb/mdbx/env.go +++ b/ethdb/mdbx/env.go @@ -439,34 +439,6 @@ func (env *Env) Path() (string, error) { return C.GoString(cpath), nil } -// SetMaxFreelistReuse sets the size of the environment memory map. -// -// Find a big enough contiguous page range for large values in freelist is hard -// just allocate new pages and even don't try to search if value is bigger than this limit. -// measured in pages -//func (env *Env) SetMaxFreelistReuse(pagesLimit uint) error { -// ret := C.mdbx_env_set_maxfree_reuse(env._env, C.uint(pagesLimit)) -// return operrno("mdbx_env_set_maxfree_reuse", ret) -//} - -// MaxFreelistReuse -//func (env *Env) MaxFreelistReuse() (uint, error) { -// var pages C.uint -// ret := C.mdbx_env_get_maxfree_reuse(env._env, &pages) -// return uint(pages), operrno("mdbx_env_get_maxreaders", ret) -//} - -// SetMapSize sets the size of the environment memory map. -// -// See mdbx_env_set_mapsize. -//func (env *Env) SetMapSize(size int64) error { -// if size < 0 { -// return errNegSize -// } -// ret := C.mdbx_env_set_mapsize(env._env, C.size_t(size)) -// return operrno("mdbx_env_set_mapsize", ret) -//} - func (env *Env) SetOption(option uint, value uint64) error { ret := C.mdbx_env_set_option(env._env, C.MDBX_option_t(option), C.uint64_t(value)) return operrno("mdbx_env_set_option", ret) diff --git a/node/config.go b/node/config.go index 773fd082e..540970eb9 100644 --- a/node/config.go +++ b/node/config.go @@ -142,10 +142,9 @@ type Config struct { Logger log.Logger `toml:",omitempty"` // Whether to use LMDB. - LMDB bool - LMDBMapSize datasize.ByteSize - LMDBMaxFreelistReuse uint - MDBX bool + LMDB bool + LMDBMapSize datasize.ByteSize + MDBX bool // Address to listen to when launchig listener for remote database access // empty string means not to start the listener diff --git a/node/node.go b/node/node.go index 8829633cc..e119a8e79 100644 --- a/node/node.go +++ b/node/node.go @@ -580,9 +580,9 @@ func (n *Node) OpenDatabaseWithFreezer(name string, tmpdir string) (*ethdb.Objec return ethdb.NewObjectDatabase(kv), nil } } else { - log.Info("Opening Database (LMDB)", "mapSize", n.config.LMDBMapSize.HR(), "maxFreelistReuse", n.config.LMDBMaxFreelistReuse) + log.Info("Opening Database (LMDB)", "mapSize", n.config.LMDBMapSize.HR()) openFunc = func(exclusive bool) (*ethdb.ObjectDatabase, error) { - opts := ethdb.NewLMDB().Path(dbPath).MapSize(n.config.LMDBMapSize).MaxFreelistReuse(n.config.LMDBMaxFreelistReuse) + opts := ethdb.NewLMDB().Path(dbPath).MapSize(n.config.LMDBMapSize) if exclusive { opts = opts.Exclusive() } diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index b3f64af1a..2680719e0 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -32,7 +32,6 @@ var DefaultFlags = []cli.Flag{ PrivateApiAddr, EtlBufferSizeFlag, LMDBMapSizeFlag, - LMDBMaxFreelistReuseFlag, TLSFlag, TLSCertFlag, TLSKeyFlag, diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 6d2ac77a1..fe992fca1 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -84,11 +84,6 @@ var ( Usage: "Sets Memory map size. Lower it if you have issues with opening the DB", Value: ethdb.LMDBDefaultMapSize.String(), } - LMDBMaxFreelistReuseFlag = cli.UintFlag{ - Name: "lmdb.maxFreelistReuse", - Usage: "Find a big enough contiguous page range for large values in freelist is hard just allocate new pages and even don't try to search if value is bigger than this limit. Measured in pages.", - Value: ethdb.LMDBDefaultMaxFreelistReuse, - } // mTLS flags TLSFlag = cli.BoolFlag{ @@ -230,15 +225,6 @@ func ApplyFlagsForNodeConfig(ctx *cli.Context, cfg *node.Config) { } } - if cfg.LMDB { - cfg.LMDBMaxFreelistReuse = ctx.GlobalUint(LMDBMaxFreelistReuseFlag.Name) - if cfg.LMDBMaxFreelistReuse < 16 { - log.Error("Invalid LMDB MaxFreelistReuse provided. Will use defaults", - "lmdb.maxFreelistReuse", ethdb.LMDBDefaultMaxFreelistReuse, - "err", "the value should be at least 16", - ) - } - } } // setPrivateApi populates configuration fields related to the remote