Add storage mode flags as letters. (#229)

This commit is contained in:
Igor Mandrigin 2019-12-06 12:19:00 +01:00 committed by GitHub
parent d091240759
commit 1e231a8a9a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 307 additions and 61 deletions

View File

@ -66,7 +66,7 @@ It expects the genesis file as argument.`,
utils.DataDirFlag, utils.DataDirFlag,
utils.CacheFlag, utils.CacheFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.GCModeFlag, utils.GCModePruningFlag,
utils.CacheDatabaseFlag, utils.CacheDatabaseFlag,
utils.CacheGCFlag, utils.CacheGCFlag,
}, },

View File

@ -89,7 +89,7 @@ var (
utils.TxPoolLifetimeFlag, utils.TxPoolLifetimeFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.ExitWhenSyncedFlag, utils.ExitWhenSyncedFlag,
utils.GCModeFlag, utils.GCModePruningFlag,
utils.GCModeLimitFlag, utils.GCModeLimitFlag,
utils.GCModeBlockToPruneFlag, utils.GCModeBlockToPruneFlag,
utils.GCModeTickTimeout, utils.GCModeTickTimeout,
@ -106,7 +106,7 @@ var (
utils.CacheGCFlag, utils.CacheGCFlag,
utils.TrieCacheGenFlag, utils.TrieCacheGenFlag,
utils.DownloadOnlyFlag, utils.DownloadOnlyFlag,
utils.NoHistory, utils.StorageModeFlag,
utils.ArchiveSyncInterval, utils.ArchiveSyncInterval,
utils.DatabaseFlag, utils.DatabaseFlag,
utils.RemoteDbListenAddress, utils.RemoteDbListenAddress,

View File

@ -77,7 +77,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.GoerliFlag, utils.GoerliFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.ExitWhenSyncedFlag, utils.ExitWhenSyncedFlag,
utils.GCModeFlag, utils.GCModePruningFlag,
utils.GCModeLimitFlag, utils.GCModeLimitFlag,
utils.GCModeBlockToPruneFlag, utils.GCModeBlockToPruneFlag,
utils.GCModeTickTimeout, utils.GCModeTickTimeout,
@ -86,7 +86,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.LightKDFFlag, utils.LightKDFFlag,
utils.WhitelistFlag, utils.WhitelistFlag,
utils.DownloadOnlyFlag, utils.DownloadOnlyFlag,
utils.NoHistory, utils.StorageModeFlag,
utils.ArchiveSyncInterval, utils.ArchiveSyncInterval,
}, },
}, },

View File

@ -209,24 +209,23 @@ var (
Usage: `Blockchain sync mode ("fast", "full", or "light")`, Usage: `Blockchain sync mode ("fast", "full", or "light")`,
Value: &defaultSyncMode, Value: &defaultSyncMode,
} }
GCModeFlag = cli.StringFlag{ GCModePruningFlag = cli.BoolFlag{
Name: "gcmode", Name: "pruning",
Usage: `Blockchain garbage collection mode ("full", "archive")`, Usage: `Enable storage pruning`,
Value: "archive",
} }
GCModeLimitFlag = cli.Uint64Flag{ GCModeLimitFlag = cli.Uint64Flag{
Name: "gcmode.stop_limit", Name: "pruning.stop_limit",
Usage: `Blockchain garbage collection mode limit("full")"`, Usage: `Blockchain pruning limit`,
Value: 1024, Value: 1024,
} }
GCModeBlockToPruneFlag = cli.Uint64Flag{ GCModeBlockToPruneFlag = cli.Uint64Flag{
Name: "gcmode.processing_limit", Name: "pruning.processing_limit",
Usage: `Block to prune per tick"`, Usage: `Block to prune per tick`,
Value: 20, Value: 20,
} }
GCModeTickTimeout = cli.DurationFlag{ GCModeTickTimeout = cli.DurationFlag{
Name: "gcmode.tick", Name: "pruning.tick",
Usage: `Time of tick"`, Usage: `Time of tick`,
Value: time.Second * 2, Value: time.Second * 2,
} }
LightServFlag = cli.IntFlag{ LightServFlag = cli.IntFlag{
@ -413,9 +412,14 @@ var (
Name: "trie-cache-gens", Name: "trie-cache-gens",
Usage: "Number of trie node generations to keep in memory", Usage: "Number of trie node generations to keep in memory",
} }
NoHistory = cli.BoolFlag{ StorageModeFlag = cli.StringFlag{
Name: "no-history", Name: "storage-mode",
Usage: "Write the whole state history", Usage: `Configures the storage mode of the app:
* h - write history to the DB
* p - write preimages to the DB
* r - write receipts to the DB
* t - write tx lookup index to the DB`,
Value: eth.DefaultStorageMode.ToString(),
} }
ArchiveSyncInterval = cli.IntFlag{ ArchiveSyncInterval = cli.IntFlag{
Name: "archive-sync-interval", Name: "archive-sync-interval",
@ -1463,18 +1467,20 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name) cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name)
} }
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { // TODO: Invert the logic there.
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) cfg.NoPruning = !ctx.GlobalBool(GCModePruningFlag.Name)
}
if ctx.GlobalIsSet(GCModeFlag.Name) {
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
}
cfg.BlocksBeforePruning = ctx.GlobalUint64(GCModeLimitFlag.Name) cfg.BlocksBeforePruning = ctx.GlobalUint64(GCModeLimitFlag.Name)
cfg.BlocksToPrune = ctx.GlobalUint64(GCModeBlockToPruneFlag.Name) cfg.BlocksToPrune = ctx.GlobalUint64(GCModeBlockToPruneFlag.Name)
cfg.PruningTimeout = ctx.GlobalDuration(GCModeTickTimeout.Name) cfg.PruningTimeout = ctx.GlobalDuration(GCModeTickTimeout.Name)
cfg.DownloadOnly = ctx.GlobalBoolT(DownloadOnlyFlag.Name) cfg.DownloadOnly = ctx.GlobalBoolT(DownloadOnlyFlag.Name)
cfg.NoHistory = ctx.GlobalBoolT(NoHistory.Name)
mode, err := eth.StorageModeFromString(ctx.GlobalString(StorageModeFlag.Name))
if err != nil {
Fatalf(fmt.Sprintf("error while parsing mode: %v", err))
}
cfg.StorageMode = mode
cfg.ArchiveSyncInterval = ctx.GlobalInt(ArchiveSyncInterval.Name) cfg.ArchiveSyncInterval = ctx.GlobalInt(ArchiveSyncInterval.Name)
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
@ -1683,14 +1689,10 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
}, nil, false) }, nil, false)
} }
} }
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
}
cache := &core.CacheConfig{ cache := &core.CacheConfig{
TrieCleanLimit: eth.DefaultConfig.TrieCleanCache, TrieCleanLimit: eth.DefaultConfig.TrieCleanCache,
TrieCleanNoPrefetch: ctx.GlobalBool(CacheNoPrefetchFlag.Name), TrieCleanNoPrefetch: ctx.GlobalBool(CacheNoPrefetchFlag.Name),
TrieDirtyLimit: eth.DefaultConfig.TrieDirtyCache, TrieDirtyLimit: eth.DefaultConfig.TrieDirtyCache,
TrieDirtyDisabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
TrieTimeLimit: eth.DefaultConfig.TrieTimeout, TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
} }
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {

View File

@ -116,7 +116,6 @@ type CacheConfig struct {
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
BlocksBeforePruning uint64 BlocksBeforePruning uint64
@ -191,6 +190,8 @@ type BlockChain struct {
highestKnownBlock uint64 highestKnownBlock uint64
highestKnownBlockMu sync.Mutex highestKnownBlockMu sync.Mutex
enableReceipts bool // Whether receipts need to be written to the database enableReceipts bool // Whether receipts need to be written to the database
enableTxLookupIndex bool // Whether we store tx lookup index into the database
enablePreimages bool // Whether we store preimages into the database
resolveReads bool resolveReads bool
pruner Pruner pruner Pruner
} }
@ -224,21 +225,24 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
cdb := db.NewBatch() cdb := db.NewBatch()
bc := &BlockChain{ bc := &BlockChain{
chainConfig: chainConfig, chainConfig: chainConfig,
cacheConfig: cacheConfig, cacheConfig: cacheConfig,
db: cdb, db: cdb,
triegc: prque.New(nil), triegc: prque.New(nil),
quit: make(chan struct{}), quit: make(chan struct{}),
shouldPreserve: shouldPreserve, shouldPreserve: shouldPreserve,
bodyCache: bodyCache, bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache, bodyRLPCache: bodyRLPCache,
receiptsCache: receiptsCache, receiptsCache: receiptsCache,
blockCache: blockCache, blockCache: blockCache,
txLookupCache: txLookupCache, txLookupCache: txLookupCache,
futureBlocks: futureBlocks, futureBlocks: futureBlocks,
engine: engine, engine: engine,
vmConfig: vmConfig, vmConfig: vmConfig,
badBlocks: badBlocks, badBlocks: badBlocks,
enableTxLookupIndex: true,
enableReceipts: false,
enablePreimages: true,
} }
bc.validator = NewBlockValidator(chainConfig, bc, engine) bc.validator = NewBlockValidator(chainConfig, bc, engine)
bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
@ -335,12 +339,21 @@ func (bc *BlockChain) EnableReceipts(er bool) {
bc.enableReceipts = er bc.enableReceipts = er
} }
func (bc *BlockChain) EnableTxLookupIndex(et bool) {
bc.enableTxLookupIndex = et
}
func (bc *BlockChain) EnablePreimages(ep bool) {
bc.enablePreimages = ep
}
func (bc *BlockChain) GetTrieDbState() (*state.TrieDbState, error) { func (bc *BlockChain) GetTrieDbState() (*state.TrieDbState, error) {
if bc.trieDbState == nil { if bc.trieDbState == nil {
currentBlockNr := bc.CurrentBlock().NumberU64() currentBlockNr := bc.CurrentBlock().NumberU64()
log.Info("Creating IntraBlockState from latest state", "block", currentBlockNr) log.Info("Creating IntraBlockState from latest state", "block", currentBlockNr)
var err error var err error
bc.trieDbState, err = state.NewTrieDbState(bc.CurrentBlock().Header().Root, bc.db, currentBlockNr) bc.trieDbState, err = state.NewTrieDbState(bc.CurrentBlock().Header().Root, bc.db, currentBlockNr)
bc.trieDbState.EnablePreimages(bc.enablePreimages)
if err != nil { if err != nil {
log.Error("Creation aborted", "error", err) log.Error("Creation aborted", "error", err)
return nil, err return nil, err
@ -1079,7 +1092,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Flush data into ancient database. // Flush data into ancient database.
size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64())) size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
rawdb.WriteTxLookupEntries(batch, block) if bc.enableTxLookupIndex {
rawdb.WriteTxLookupEntries(batch, block)
}
stats.processed++ stats.processed++
} }
@ -1150,7 +1165,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Write all the data out into the database // Write all the data out into the database
rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()) rawdb.WriteBody(batch, block.Hash(), block.NumberU64(), block.Body())
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i]) rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
rawdb.WriteTxLookupEntries(batch, block) if bc.enableTxLookupIndex {
rawdb.WriteTxLookupEntries(batch, block)
}
stats.processed++ stats.processed++
if batch.BatchSize() >= batch.IdealBatchSize() { if batch.BatchSize() >= batch.IdealBatchSize() {
@ -1276,10 +1293,10 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
} }
} }
// Write the positional metadata for transaction/receipt lookups and preimages // Write the positional metadata for transaction/receipt lookups and preimages
if !bc.cacheConfig.DownloadOnly { if !bc.cacheConfig.DownloadOnly && bc.enableTxLookupIndex {
rawdb.WriteTxLookupEntries(bc.db, block) rawdb.WriteTxLookupEntries(bc.db, block)
} }
if stateDb != nil && !bc.cacheConfig.DownloadOnly { if stateDb != nil && bc.enablePreimages && !bc.cacheConfig.DownloadOnly {
rawdb.WritePreimages(bc.db, stateDb.Preimages()) rawdb.WritePreimages(bc.db, stateDb.Preimages())
} }
@ -1862,7 +1879,9 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
collectLogs(newChain[i].Hash(), false) collectLogs(newChain[i].Hash(), false)
// Write lookup entries for hash based transaction/receipt searches // Write lookup entries for hash based transaction/receipt searches
rawdb.WriteTxLookupEntries(bc.db, newChain[i]) if bc.enableTxLookupIndex {
rawdb.WriteTxLookupEntries(bc.db, newChain[i])
}
addedTxs = append(addedTxs, newChain[i].Transactions()...) addedTxs = append(addedTxs, newChain[i].Transactions()...)
} }
// When transactions get deleted from the database, the receipts that were // When transactions get deleted from the database, the receipts that were

View File

@ -28,6 +28,7 @@ import (
"time" "time"
"github.com/ledgerwatch/turbo-geth/common" "github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/consensus" "github.com/ledgerwatch/turbo-geth/consensus"
"github.com/ledgerwatch/turbo-geth/consensus/ethash" "github.com/ledgerwatch/turbo-geth/consensus/ethash"
"github.com/ledgerwatch/turbo-geth/core/rawdb" "github.com/ledgerwatch/turbo-geth/core/rawdb"
@ -1509,6 +1510,166 @@ func TestEIP155Transition(t *testing.T) {
} }
} }
func TestModes(t *testing.T) {
// run test on all combination of flags
runWithModesPermuations(
t,
doModesTest,
)
}
func doModesTest(history, preimages, receipts, txlookup bool) error {
fmt.Printf("h=%v, p=%v, r=%v, t=%v\n", history, preimages, receipts, txlookup)
// Configure and generate a sample block chain
var (
db = ethdb.NewMemDatabase()
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(1000000000)
deleteAddr = common.Address{1}
gspec = &Genesis{
Config: &params.ChainConfig{ChainID: big.NewInt(1), EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
}
genesis = gspec.MustCommit(db)
)
cacheConfig := &CacheConfig{
Disabled: true,
BlocksBeforePruning: 1024,
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
DownloadOnly: false,
NoHistory: !history,
}
blockchain, _ := NewBlockChain(db, cacheConfig, gspec.Config, ethash.NewFaker(), vm.Config{}, nil)
blockchain.EnableReceipts(receipts)
blockchain.EnablePreimages(preimages)
blockchain.EnableTxLookupIndex(txlookup)
ctx := blockchain.WithContext(context.Background(), big.NewInt(genesis.Number().Int64()+1))
defer blockchain.Stop()
blocks, _ := GenerateChain(ctx, gspec.Config, genesis, ethash.NewFaker(), db.MemCopy(), 4, func(i int, block *BlockGen) {
var (
tx *types.Transaction
err error
basicTx = func(signer types.Signer) (*types.Transaction, error) {
return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key)
}
)
switch i {
case 0:
tx, err = basicTx(types.HomesteadSigner{})
if err != nil {
panic(err)
}
block.AddTx(tx)
case 2:
tx, err = basicTx(types.HomesteadSigner{})
if err != nil {
panic(err)
}
block.AddTx(tx)
tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainID))
if err != nil {
panic(err)
}
block.AddTx(tx)
case 3:
tx, err = basicTx(types.HomesteadSigner{})
if err != nil {
panic(err)
}
block.AddTx(tx)
tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainID))
if err != nil {
panic(err)
}
block.AddTx(tx)
}
})
if _, err := blockchain.InsertChain(blocks); err != nil {
return err
}
for bucketName, shouldBeEmpty := range map[string]bool{
string(dbutils.AccountsHistoryBucket): !history,
string(dbutils.PreimagePrefix): !preimages,
string(dbutils.BlockReceiptsPrefix): !receipts,
string(dbutils.TxLookupPrefix): !txlookup,
} {
numberOfEntries := 0
err := db.Walk([]byte(bucketName), nil, 0, func(k, v []byte) (bool, error) {
// we ignore empty account history
//nolint:scopelint
if bucketName == string(dbutils.AccountsHistoryBucket) && len(v) == 0 {
return true, nil
}
numberOfEntries++
return true, nil
})
if err != nil {
return err
}
if bucketName == string(dbutils.BlockReceiptsPrefix) {
// we will always have a receipt for genesis
numberOfEntries--
}
if bucketName == string(dbutils.PreimagePrefix) {
// we will always have 2 preimages because GenerateChain interface does not
// allow us to set it to ignore them
// but if the preimages are enabled in BlockChain, we will have more than 2.
// TODO: with a better interface to GenerateChain allow to check preimages
numberOfEntries -= 2
}
if (shouldBeEmpty && numberOfEntries > 0) || (!shouldBeEmpty && numberOfEntries == 0) {
return fmt.Errorf("bucket '%s' should be empty? %v (actually %d entries)", bucketName, shouldBeEmpty, numberOfEntries)
}
}
return nil
}
func runWithModesPermuations(t *testing.T, testFunc func(bool, bool, bool, bool) error) {
err := runPermutation(testFunc, 0, true, true, true, true)
if err != nil {
t.Errorf("error while testing stuff: %v", err)
}
}
func runPermutation(testFunc func(bool, bool, bool, bool) error, current int, history, preimages, receipts, txlookup bool) error {
if current == 4 {
return testFunc(history, preimages, receipts, txlookup)
}
if err := runPermutation(testFunc, current+1, history, preimages, receipts, txlookup); err != nil {
return err
}
switch current {
case 0:
history = !history
case 1:
preimages = !preimages
case 2:
receipts = !receipts
case 3:
txlookup = !txlookup
default:
panic("unexpected current item")
}
return runPermutation(testFunc, current+1, history, preimages, receipts, txlookup)
}
func TestEIP161AccountRemoval(t *testing.T) { func TestEIP161AccountRemoval(t *testing.T) {
// Configure and generate a sample block chain // Configure and generate a sample block chain
var ( var (

View File

@ -21,11 +21,11 @@ import (
"encoding/binary" "encoding/binary"
"math/big" "math/big"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/params"
"github.com/ledgerwatch/turbo-geth/common" "github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/core/types" "github.com/ledgerwatch/turbo-geth/core/types"
"github.com/ledgerwatch/turbo-geth/log" "github.com/ledgerwatch/turbo-geth/log"
"github.com/ledgerwatch/turbo-geth/params"
"github.com/ledgerwatch/turbo-geth/rlp" "github.com/ledgerwatch/turbo-geth/rlp"
) )

View File

@ -162,6 +162,7 @@ type TrieDbState struct {
historical bool historical bool
noHistory bool noHistory bool
resolveReads bool resolveReads bool
savePreimages bool
pg *trie.ProofGenerator pg *trie.ProofGenerator
tp *trie.TriePruning tp *trie.TriePruning
} }
@ -186,6 +187,7 @@ func NewTrieDbState(root common.Hash, db ethdb.Database, blockNr uint64) (*TrieD
codeSizeCache: csc, codeSizeCache: csc,
pg: trie.NewProofGenerator(), pg: trie.NewProofGenerator(),
tp: tp, tp: tp,
savePreimages: true,
} }
t.SetTouchFunc(func(hex []byte, del bool) { t.SetTouchFunc(func(hex []byte, del bool) {
tp.Touch(hex, del) tp.Touch(hex, del)
@ -193,6 +195,10 @@ func NewTrieDbState(root common.Hash, db ethdb.Database, blockNr uint64) (*TrieD
return &tds, nil return &tds, nil
} }
func (tds *TrieDbState) EnablePreimages(ep bool) {
tds.savePreimages = ep
}
func (tds *TrieDbState) SetHistorical(h bool) { func (tds *TrieDbState) SetHistorical(h bool) {
tds.historical = h tds.historical = h
} }
@ -757,7 +763,7 @@ func (tds *TrieDbState) ReadAccountData(address common.Address) (*accounts.Accou
} }
func (tds *TrieDbState) savePreimage(save bool, hash, preimage []byte) error { func (tds *TrieDbState) savePreimage(save bool, hash, preimage []byte) error {
if !save { if !save || !tds.savePreimages {
return nil return nil
} }
// Following check is to minimise the overwriting the same value of preimage // Following check is to minimise the overwriting the same value of preimage

View File

@ -187,7 +187,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
TrieCleanNoPrefetch: config.NoPrefetch, TrieCleanNoPrefetch: config.NoPrefetch,
TrieTimeLimit: config.TrieTimeout, TrieTimeLimit: config.TrieTimeout,
DownloadOnly: config.DownloadOnly, DownloadOnly: config.DownloadOnly,
NoHistory: config.NoHistory, NoHistory: !config.StorageMode.History,
ArchiveSyncInterval: uint64(config.ArchiveSyncInterval), ArchiveSyncInterval: uint64(config.ArchiveSyncInterval),
} }
) )
@ -195,6 +195,11 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
eth.blockchain.EnableReceipts(config.StorageMode.Receipts)
eth.blockchain.EnableTxLookupIndex(config.StorageMode.TxIndex)
eth.blockchain.EnablePreimages(config.StorageMode.Preimages)
// Rewind the chain in case of an incompatible config upgrade. // Rewind the chain in case of an incompatible config upgrade.
if compat, ok := genesisErr.(*params.ConfigCompatError); ok { if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat) log.Warn("Rewinding chain to upgrade configuration", "err", compat)

View File

@ -17,6 +17,7 @@
package eth package eth
import ( import (
"fmt"
"math/big" "math/big"
"os" "os"
"os/user" "os/user"
@ -50,6 +51,7 @@ var DefaultConfig = Config{
TrieCleanCache: 256, TrieCleanCache: 256,
TrieDirtyCache: 256, TrieDirtyCache: 256,
TrieTimeout: 60 * time.Minute, TrieTimeout: 60 * time.Minute,
StorageMode: DefaultStorageMode,
Miner: miner.Config{ Miner: miner.Config{
GasFloor: 8000000, GasFloor: 8000000,
GasCeil: 8000000, GasCeil: 8000000,
@ -84,6 +86,52 @@ func init() {
} }
} }
type StorageMode struct {
History bool
Receipts bool
TxIndex bool
Preimages bool
}
var DefaultStorageMode = StorageMode{History: true, Receipts: false, TxIndex: true, Preimages: true}
func (m StorageMode) ToString() string {
modeString := ""
if m.History {
modeString += "h"
}
if m.Preimages {
modeString += "p"
}
if m.Receipts {
modeString += "r"
}
if m.TxIndex {
modeString += "t"
}
return modeString
}
func StorageModeFromString(flags string) (StorageMode, error) {
mode := StorageMode{}
for _, flag := range flags {
switch flag {
case 'h':
mode.History = true
case 'r':
mode.Receipts = true
case 't':
mode.TxIndex = true
case 'p':
mode.Preimages = true
default:
return mode, fmt.Errorf("unexpected flag found: %c", flag)
}
}
return mode, nil
}
//go:generate gencodec -type Config -formats toml -out gen_config.go //go:generate gencodec -type Config -formats toml -out gen_config.go
type Config struct { type Config struct {
@ -98,7 +146,8 @@ type Config struct {
NoPruning bool // Whether to disable pruning and flush everything to disk NoPruning bool // Whether to disable pruning and flush everything to disk
NoPrefetch bool // Whether to disable prefetching and only load state on demand NoPrefetch bool // Whether to disable prefetching and only load state on demand
NoHistory bool StorageMode StorageMode
// DownloadOnly is set when the node does not need to process the blocks, but simply // DownloadOnly is set when the node does not need to process the blocks, but simply
// download them // download them
DownloadOnly bool DownloadOnly bool

View File

@ -26,7 +26,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
Whitelist map[uint64]common.Hash `toml:"-"` Whitelist map[uint64]common.Hash `toml:"-"`
LightIngress int `toml:",omitempty"` LightIngress int `toml:",omitempty"`
LightEgress int `toml:",omitempty"` LightEgress int `toml:",omitempty"`
NoHistory bool StorageMode string
ArchiveSyncInterval int ArchiveSyncInterval int
LightServ int `toml:",omitempty"` LightServ int `toml:",omitempty"`
LightPeers int `toml:",omitempty"` LightPeers int `toml:",omitempty"`
@ -57,7 +57,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.NoPruning = c.NoPruning enc.NoPruning = c.NoPruning
enc.NoPrefetch = c.NoPrefetch enc.NoPrefetch = c.NoPrefetch
enc.Whitelist = c.Whitelist enc.Whitelist = c.Whitelist
enc.NoHistory = c.NoHistory enc.StorageMode = c.StorageMode.ToString()
enc.ArchiveSyncInterval = c.ArchiveSyncInterval enc.ArchiveSyncInterval = c.ArchiveSyncInterval
enc.LightServ = c.LightServ enc.LightServ = c.LightServ
enc.LightIngress = c.LightIngress enc.LightIngress = c.LightIngress
@ -95,7 +95,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
Whitelist map[uint64]common.Hash `toml:"-"` Whitelist map[uint64]common.Hash `toml:"-"`
LightIngress *int `toml:",omitempty"` LightIngress *int `toml:",omitempty"`
LightEgress *int `toml:",omitempty"` LightEgress *int `toml:",omitempty"`
NoHistory *bool Mode *string
ArchiveSyncInterval *int ArchiveSyncInterval *int
LightServ *int `toml:",omitempty"` LightServ *int `toml:",omitempty"`
LightPeers *int `toml:",omitempty"` LightPeers *int `toml:",omitempty"`
@ -141,8 +141,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.Whitelist != nil { if dec.Whitelist != nil {
c.Whitelist = dec.Whitelist c.Whitelist = dec.Whitelist
} }
if dec.NoHistory != nil { if dec.Mode != nil {
c.NoHistory = *dec.NoHistory mode, err := StorageModeFromString(*dec.Mode)
if err != nil {
return err
}
c.StorageMode = mode
} }
if dec.ArchiveSyncInterval != nil { if dec.ArchiveSyncInterval != nil {
c.ArchiveSyncInterval = *dec.ArchiveSyncInterval c.ArchiveSyncInterval = *dec.ArchiveSyncInterval

View File

@ -118,7 +118,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
} }
genesis := gspec.MustCommit(db) genesis := gspec.MustCommit(db)
chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec.Config, engine, vm.Config{}, nil) chain, _ := core.NewBlockChain(db, &core.CacheConfig{}, gspec.Config, engine, vm.Config{}, nil)
txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain) txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain)
// Generate a small n-block chain and an uncle block for it // Generate a small n-block chain and an uncle block for it