[THIN_HISTORY] Decouple changesets and history from ethdb.Database (mutation/bolt_db/badger_db) (#432)

* Prepare for refactoring

* Decoupling

* Further cleanup of mutation

* Further cleanup of mutation

* Remove some tests (temporarily)

* Fix linter

* Fix lint

* Fix lint

* Fix lint

* Fix lint

* Fix lint

* Fix lint

* Recover mutation_test cases

* Fix lint

* Fix WalkAsOf test

* Fix lint

* Fix TODO, linter

* Fix lint

* Fix lint

* Fix tests for THIN_HISTORY

* Fix lint

* Fix lint

* Reduce visibility of DecodeStorage

* Revert "Reduce visibility of DecodeStorage"

This reverts commit 2e11e16ea095b75a62cdddb77b9477eabc259e67.
This commit is contained in:
ledgerwatch 2020-04-09 18:23:29 +01:00 committed by GitHub
parent 0589f63d99
commit ad64ae2085
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 705 additions and 1157 deletions

View File

@ -130,8 +130,7 @@ func runCmd(ctx *cli.Context) error {
gen := readGenesis(ctx.GlobalString(GenesisFlag.Name))
genesisConfig = gen
db := ethdb.NewMemDatabase()
genesis, _, tds, _ := gen.ToBlock(db)
tds = state.NewTrieDbState(genesis.Root(), db, 0)
_, _, tds, _ = gen.ToBlock(db, false /* history */)
statedb = state.New(tds)
chainConfig = gen.Config
} else {

View File

@ -229,7 +229,7 @@ func initGenesis(ctx *cli.Context) error {
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
_, hash, _, err := core.SetupGenesisBlock(chaindb, genesis)
_, hash, _, err := core.SetupGenesisBlock(chaindb, genesis, false /* history */)
if err != nil {
utils.Fatalf("Failed to write genesis block: %v", err)
}

View File

@ -378,7 +378,7 @@ func (api *RetestethAPI) SetChainParams(_ context.Context, chainParams ChainPara
ParentHash: chainParams.Genesis.ParentHash,
Alloc: accounts,
}
chainConfig, genesisHash, _, err := core.SetupGenesisBlock(ethDb, genesis)
chainConfig, genesisHash, _, err := core.SetupGenesisBlock(ethDb, genesis, false /* history */)
if err != nil {
return false, err
}

View File

@ -630,7 +630,7 @@ func execToBlock(chaindata string, block uint64, fromScratch bool) {
defer stateDb.Close()
//_, _, _, err = core.SetupGenesisBlock(stateDb, core.DefaultGenesisBlock())
_, _, _, err = core.SetupGenesisBlockWithOverride(stateDb, nil, nil, nil)
_, _, _, err = core.SetupGenesisBlockWithOverride(stateDb, nil, nil, nil, false /* history */)
check(err)
bc, err := core.NewBlockChain(stateDb, nil, params.MainnetChainConfig, ethash.NewFaker(), vm.Config{}, nil)
check(err)
@ -1013,7 +1013,7 @@ func printFullNodeRLPs() {
}
func testDifficulty() {
genesisBlock, _, _, err := core.DefaultGenesisBlock().ToBlock(nil)
genesisBlock, _, _, err := core.DefaultGenesisBlock().ToBlock(nil, false /* history */)
check(err)
d1 := ethash.CalcDifficulty(params.MainnetChainConfig, 100000, genesisBlock.Header())
fmt.Printf("Block 1 difficulty: %d\n", d1)
@ -1954,6 +1954,7 @@ func indexSize(chaindata string) {
i := 0
maxLenAcc := 0
accountsOver4096 := 0
if err := db.Walk(dbutils.AccountsHistoryBucket, []byte{}, 0, func(k, v []byte) (b bool, e error) {
i++
if i%10_000_000 == 0 {
@ -1962,6 +1963,9 @@ func indexSize(chaindata string) {
if len(v) > maxLenAcc {
maxLenAcc = len(v)
}
if len(v) > 4096 {
accountsOver4096++
}
if err := csvAcc.Write([]string{common.Bytes2Hex(k), strconv.Itoa(len(v))}); err != nil {
panic(err)
}
@ -1973,6 +1977,7 @@ func indexSize(chaindata string) {
i = 0
maxLenSt := 0
storageOver4096 := 0
if err := db.Walk(dbutils.StorageHistoryBucket, []byte{}, 0, func(k, v []byte) (b bool, e error) {
i++
if i%10_000_000 == 0 {
@ -1982,6 +1987,9 @@ func indexSize(chaindata string) {
if len(v) > maxLenSt {
maxLenSt = len(v)
}
if len(v) > 4096 {
storageOver4096++
}
if err := csvStorage.Write([]string{common.Bytes2Hex(k), strconv.Itoa(len(v))}); err != nil {
panic(err)
}
@ -1994,6 +2002,8 @@ func indexSize(chaindata string) {
fmt.Println("Results:")
fmt.Println("maxLenAcc:", maxLenAcc)
fmt.Println("maxLenSt:", maxLenSt)
fmt.Println("account over 4096 index:", accountsOver4096)
fmt.Println("storage over 4096 index:", storageOver4096)
}
func main() {

View File

@ -82,7 +82,10 @@ func CheckChangeSets(genesis *core.Genesis, blockNum uint64, chaindata string, h
}
if !nocheck {
accountChanges := csw.GetAccountChanges()
accountChanges, err := csw.GetAccountChanges()
if err != nil {
return err
}
var expectedAccountChanges []byte
if debug.IsThinHistory() {
expectedAccountChanges, err = changeset.EncodeAccounts(accountChanges)
@ -104,7 +107,10 @@ func CheckChangeSets(genesis *core.Genesis, blockNum uint64, chaindata string, h
return nil
}
expectedStorageChanges := csw.GetStorageChanges()
expectedStorageChanges, err := csw.GetStorageChanges()
if err != nil {
return err
}
expectedtorageSerialized := make([]byte, 0)
if expectedStorageChanges.Len() > 0 {
if debug.IsThinHistory() {

View File

@ -191,10 +191,10 @@ func Stateless(
}
var preRoot common.Hash
if blockNum == 1 {
_, _, _, err = core.SetupGenesisBlock(stateDb, core.DefaultGenesisBlock())
check(err)
genesisBlock, _, _, err := core.DefaultGenesisBlock().ToBlock(nil)
_, _, _, err = core.SetupGenesisBlock(stateDb, core.DefaultGenesisBlock(), writeHistory)
check(err)
genesisBlock, _, _, err1 := core.DefaultGenesisBlock().ToBlock(nil, writeHistory)
check(err1)
preRoot = genesisBlock.Header().Root
} else {
block := bcb.GetBlockByNumber(blockNum - 1)
@ -441,11 +441,22 @@ func Stateless(
}
tds.SetBlockNr(blockNum)
err = statedb.CommitBlock(ctx, tds.DbStateWriter())
blockWriter := tds.DbStateWriter()
err = statedb.CommitBlock(ctx, blockWriter)
if err != nil {
fmt.Printf("Commiting block %d failed: %v", blockNum, err)
return
}
if writeHistory {
if err = blockWriter.WriteChangeSets(); err != nil {
fmt.Printf("Writing changesets for block %d failed: %v", blockNum, err)
return
}
if err = blockWriter.WriteHistory(); err != nil {
fmt.Printf("Writing history for block %d failed: %v", blockNum, err)
return
}
}
willSnapshot := interval > 0 && blockNum > 0 && blockNum >= ignoreOlderThan && blockNum%interval == 0

View File

@ -1713,7 +1713,7 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
var err error
chainDb = MakeChainDatabase(ctx, stack)
config, _, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
config, _, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx), false /* history */)
if err != nil {
Fatalf("%v", err)
}

View File

@ -11,13 +11,11 @@ import (
)
const (
DefaultIncarnation = ^uint64(1)
storageEnodingIndexSize = 4
storageEnodingStartElem = uint32(4)
storageEnodingLengthOfNumOfElements = 4
storageEnodingLengthOfDict = 2
storageEnodingLengthOfNumTypeOfElements = 2
storageEnodingLengthOfIncarnationKey = 4
)
var ErrNotFound = errors.New("not found")
@ -42,7 +40,6 @@ numOfUint32Values uint16
[len(val0), len(val0)+len(val1), ..., len(val0)+len(val1)+...+len(val_{numOfUint8Values-1})] []uint8
[len(valnumOfUint8Values), len(val0)+len(val1), ..., len(val0)+len(val1)+...+len(val_{numOfUint16Values-1})] []uint16
[len(valnumOfUint16Values), len(val0)+len(val1), ..., len(val0)+len(val1)+...+len(val_{numOfUint32Values-1})] []uint32
[elementNum:incarnation] - optional [uint32:uint64...]
*/
func EncodeStorage(s *ChangeSet) ([]byte, error) {
@ -53,20 +50,18 @@ func EncodeStorage(s *ChangeSet) ([]byte, error) {
//write numOfElements
binary.BigEndian.PutUint32(uint32Arr, uint32(n))
_, err := buf.Write(uint32Arr)
if err != nil {
if _, err := buf.Write(uint32Arr); err != nil {
return nil, err
}
addrHashesMap := make(map[common.Hash]uint32)
addrHashList := make([]byte, 0)
notDefaultIncarnationList := make([]byte, 0)
//collect information about unique addHashes and non default incarnations
nextIDAddrHash := uint32(0)
var addrHash common.Hash
for i := 0; i < n; i++ {
//copy addrHash
addrHash := common.Hash{}
copy(
addrHash[:],
s.Changes[i].Key[0:common.HashLength],
@ -76,30 +71,19 @@ func EncodeStorage(s *ChangeSet) ([]byte, error) {
if _, ok := addrHashesMap[addrHash]; !ok {
addrHashesMap[addrHash] = nextIDAddrHash
nextIDAddrHash++
addrHashList = append(addrHashList, addrHash.Bytes()...)
}
//collect non default incarnations
incarnation := binary.BigEndian.Uint64(s.Changes[i].Key[common.HashLength : common.HashLength+common.IncarnationLength])
if incarnation != DefaultIncarnation {
inc := make([]byte, 12)
binary.BigEndian.PutUint32(inc[0:storageEnodingLengthOfIncarnationKey], uint32(i))
binary.BigEndian.PutUint64(inc[storageEnodingLengthOfIncarnationKey:12], incarnation)
notDefaultIncarnationList = append(notDefaultIncarnationList, inc...)
addrHashList = append(addrHashList, addrHash[:]...)
}
}
//write numOfUniqAddrHashes
numOfUniqAddrHashes := make([]byte, storageEnodingLengthOfDict)
binary.BigEndian.PutUint16(numOfUniqAddrHashes, uint16(len(addrHashesMap)))
_, err = buf.Write(numOfUniqAddrHashes)
if err != nil {
if _, err := buf.Write(numOfUniqAddrHashes); err != nil {
return nil, err
}
//Write contiguous array of address hashes
_, err = buf.Write(addrHashList)
if err != nil {
if _, err := buf.Write(addrHashList); err != nil {
return nil, err
}
@ -112,8 +96,8 @@ func EncodeStorage(s *ChangeSet) ([]byte, error) {
keys := new(bytes.Buffer)
lengthOfValues := uint32(0)
row := make([]byte, lenOfAddr+common.HashLength)
for i := 0; i < len(s.Changes); i++ {
row := make([]byte, lenOfAddr+common.HashLength)
writeKeyRow(
addrHashesMap[common.BytesToHash(s.Changes[i].Key[0:common.HashLength])],
row[0:lenOfAddr],
@ -143,28 +127,18 @@ func EncodeStorage(s *ChangeSet) ([]byte, error) {
binary.BigEndian.PutUint16(lengthes[0:storageEnodingLengthOfNumTypeOfElements], numOfUint8)
binary.BigEndian.PutUint16(lengthes[storageEnodingLengthOfNumTypeOfElements:2*storageEnodingLengthOfNumTypeOfElements], numOfUint16)
binary.BigEndian.PutUint16(lengthes[2*storageEnodingLengthOfNumTypeOfElements:3*storageEnodingLengthOfNumTypeOfElements], numOfUint32)
_, err = buf.Write(keys.Bytes())
if err != nil {
if _, err := buf.Write(keys.Bytes()); err != nil {
return nil, err
}
_, err = buf.Write(lengthes)
if err != nil {
if _, err := buf.Write(lengthes); err != nil {
return nil, err
}
_, err = buf.Write(values.Bytes())
if err != nil {
if _, err := buf.Write(values.Bytes()); err != nil {
return nil, err
}
if len(notDefaultIncarnationList) > 0 {
_, err = buf.Write(notDefaultIncarnationList)
if err != nil {
return nil, err
}
}
byt := buf.Bytes()
return byt, nil
}
@ -204,28 +178,6 @@ func DecodeStorage(b []byte) (*ChangeSet, error) {
lenOfValsPos = lenOfValsPos + 3*storageEnodingLengthOfNumTypeOfElements
valuesPos := lenOfValsPos + uint32(numOfUint8) + uint32(numOfUint16*2) + uint32(numOfUint32*4)
incarnationPosition := lenOfValsPos + uint32(calculateIncarnationPos3(b[lenOfValsPos:], numOfUint8, numOfUint16, numOfUint32))
incarnationsLength := len(b[incarnationPosition:])
notDefaultIncarnation := make(map[uint32]uint64)
var (
id uint32
inc uint64
ok bool
)
if incarnationsLength > 0 {
if incarnationsLength%(4+common.IncarnationLength) != 0 {
return h, fmt.Errorf("decode: incarnatin part is incorrect(%d bytes)", len(b[incarnationPosition:]))
}
numOfIncarnations := incarnationsLength / (storageEnodingLengthOfIncarnationKey + common.IncarnationLength)
for i := 0; i < numOfIncarnations; i++ {
id = binary.BigEndian.Uint32(b[incarnationPosition : incarnationPosition+storageEnodingLengthOfIncarnationKey])
inc = binary.BigEndian.Uint64(b[incarnationPosition+storageEnodingLengthOfIncarnationKey : incarnationPosition+storageEnodingLengthOfIncarnationKey+8])
notDefaultIncarnation[id] = inc
incarnationPosition += (storageEnodingLengthOfIncarnationKey + common.IncarnationLength)
}
}
elementStart := storageEnodingStartElem + storageEnodingLengthOfDict + uint32(dictLen)*common.HashLength
key := make([]byte, common.HashLength*2+common.IncarnationLength)
@ -243,13 +195,7 @@ func DecodeStorage(b []byte) (*ChangeSet, error) {
key[common.HashLength+common.IncarnationLength:2*common.HashLength+common.IncarnationLength],
common.CopyBytes(b[elem+lenOfAddHash:elem+lenOfAddHash+common.HashLength]),
)
//set incarnation
if inc, ok = notDefaultIncarnation[i]; ok {
binary.BigEndian.PutUint64(key[common.HashLength:common.HashLength+common.IncarnationLength], inc)
} else {
binary.BigEndian.PutUint64(key[common.HashLength:common.HashLength+common.IncarnationLength], DefaultIncarnation)
}
binary.BigEndian.PutUint64(key[common.HashLength:common.HashLength+common.IncarnationLength], ^uint64(1))
h.Changes[i].Key = common.CopyBytes(key)
h.Changes[i].Value = findVal(b[lenOfValsPos:valuesPos], b[valuesPos:], i, numOfUint8, numOfUint16, numOfUint32)
}
@ -270,25 +216,6 @@ func getNumOfBytesByLen(n int) int {
}
}
func calculateIncarnationPos3(b []byte, numOfUint8, numOfUint16, numOfUint32 int) int {
res := 0
end := 0
switch {
case numOfUint32 > 0:
end = numOfUint8 + numOfUint16*2 + numOfUint32*4
res = int(binary.BigEndian.Uint32(b[end-4:end])) + end
case numOfUint16 > 0:
end = numOfUint8 + numOfUint16*2
res = int(binary.BigEndian.Uint16(b[end-2:end])) + end
case numOfUint8 > 0:
end = numOfUint8
res = int(b[end-1]) + end
default:
return 0
}
return res
}
func findVal(lenOfVals []byte, values []byte, i uint32, numOfUint8, numOfUint16, numOfUint32 int) []byte {
lenOfValStart := uint32(0)
lenOfValEnd := uint32(0)
@ -387,31 +314,6 @@ func (b StorageChangeSetBytes) Walk(f func(k, v []byte) error) error {
uint32(numOfUint16*2) +
uint32(numOfUint32*4)
incarnationPosition := lenOfValsPos + uint32(calculateIncarnationPos3(b[lenOfValsPos:], numOfUint8, numOfUint16, numOfUint32))
if uint32(len(b)) < incarnationPosition {
return fmt.Errorf("decode: input too short (%d bytes, expected at least %d bytes)", len(b), incarnationPosition)
}
incarnationsLength := len(b[incarnationPosition:])
notDefaultIncarnation := make(map[uint32]uint64)
var (
id uint32
inc uint64
ok bool
)
if incarnationsLength > 0 {
if incarnationsLength%(storageEnodingIndexSize+common.IncarnationLength) != 0 {
return fmt.Errorf("decode: incarnatin part is incorrect(%d bytes)", len(b[incarnationPosition:]))
}
numOfIncarnations := incarnationsLength / (storageEnodingIndexSize + common.IncarnationLength)
for i := 0; i < numOfIncarnations; i++ {
id = binary.BigEndian.Uint32(b[incarnationPosition : incarnationPosition+storageEnodingIndexSize])
inc = binary.BigEndian.Uint64(b[incarnationPosition+storageEnodingIndexSize : incarnationPosition+storageEnodingIndexSize+common.IncarnationLength])
notDefaultIncarnation[id] = inc
incarnationPosition += (storageEnodingIndexSize + common.IncarnationLength)
}
}
addrHashMap := make(map[uint32]common.Hash, numOfUniqueItems)
for i := uint32(0); i < uint32(numOfUniqueItems); i++ {
elemStart := storageEnodingStartElem + storageEnodingLengthOfDict + i*(common.HashLength)
@ -433,13 +335,7 @@ func (b StorageChangeSetBytes) Walk(f func(k, v []byte) error) error {
key[common.HashLength+common.IncarnationLength:2*common.HashLength+common.IncarnationLength],
b[elemStart+elemLength:elemStart+elemLength+common.HashLength],
)
//set incarnation
if inc, ok = notDefaultIncarnation[i]; ok {
binary.BigEndian.PutUint64(key[common.HashLength:common.HashLength+common.IncarnationLength], inc)
} else {
binary.BigEndian.PutUint64(key[common.HashLength:common.HashLength+common.IncarnationLength], DefaultIncarnation)
}
binary.BigEndian.PutUint64(key[common.HashLength:common.HashLength+common.IncarnationLength], ^uint64(1))
err := f(common.CopyBytes(key), findVal(b[lenOfValsPos:valuesPos], b[valuesPos:], i, numOfUint8, numOfUint16, numOfUint32))
if err != nil {
return err
@ -476,7 +372,6 @@ func (b StorageChangeSetBytes) Find(k []byte) ([]byte, error) {
}
}
if !found {
fmt.Println("addr")
return nil, ErrNotFound
}
@ -495,35 +390,6 @@ func (b StorageChangeSetBytes) Find(k []byte) ([]byte, error) {
uint32(numOfUint16*2) +
uint32(numOfUint32*4)
incarnationPosition := lenOfValsPos + uint32(calculateIncarnationPos3(b[lenOfValsPos:], numOfUint8, numOfUint16, numOfUint32))
if uint32(len(b)) < incarnationPosition {
return nil, fmt.Errorf("decode: input too short (%d bytes, expected at least %d bytes)", len(b), incarnationPosition)
}
incarnationsLength := len(b[incarnationPosition:])
//check that we have the same incarnation
keyIncarnation := binary.BigEndian.Uint64(k[common.HashLength : common.HashLength+common.IncarnationLength])
if !(keyIncarnation == DefaultIncarnation && incarnationsLength == 0) {
if incarnationsLength%(storageEnodingIndexSize+common.IncarnationLength) != 0 {
return nil, fmt.Errorf("decode: incarnatin part is incorrect(%d bytes)", len(b[incarnationPosition:]))
}
numOfIncarnations := incarnationsLength / (storageEnodingIndexSize + common.IncarnationLength)
incarnationIsCorrect := false
for i := 0; i < numOfIncarnations; i++ {
elemStart := incarnationPosition + uint32(i*(storageEnodingLengthOfIncarnationKey+common.IncarnationLength))
if addHashID != binary.BigEndian.Uint32(b[elemStart:elemStart+4]) {
continue
}
if binary.BigEndian.Uint64(b[elemStart+storageEnodingLengthOfIncarnationKey:elemStart+storageEnodingLengthOfIncarnationKey+8]) == keyIncarnation {
incarnationIsCorrect = true
}
}
if !incarnationIsCorrect {
fmt.Println("incarnationIsCorrect")
return nil, ErrNotFound
}
}
//here should be binary search too
elemLength := uint32(getNumOfBytesByLen(int(numOfUniqueItems)))
encodedAddHashID := make([]byte, elemLength)

View File

@ -3,7 +3,6 @@ package changeset
import (
"bytes"
"fmt"
"math/rand"
"reflect"
"sort"
"strconv"
@ -11,7 +10,6 @@ import (
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/common/hexutil"
"github.com/stretchr/testify/assert"
)
@ -86,7 +84,7 @@ func TestEncodingStorageWithtRandomIncarnation(t *testing.T) {
addrHash, _ := common.HashData([]byte("addrHash" + strconv.Itoa(i)))
key, _ := common.HashData([]byte("key" + strconv.Itoa(i)))
val, _ := common.HashData([]byte("val" + strconv.Itoa(i)))
err = ch.Add(dbutils.GenerateCompositeStorageKey(addrHash, rand.Uint64(), key), val.Bytes())
err = ch.Add(dbutils.GenerateCompositeStorageKey(addrHash, defaultIncarnation, key), val.Bytes())
if err != nil {
t.Error(err)
}
@ -226,11 +224,3 @@ func TestEncodingStorageWithoutNotDefaultIncarnationFind(t *testing.T) {
})
}
func TestFind(t *testing.T) {
// storage changes at block 51385
changes := hexutil.MustDecode("0x0000000a0002353e456a1b25b4640cbf753b6094458a4e38929a0c5bbe22904d9d08abc6d11adf396ae6730bdcd2e30c871da8978de3251900d45eaf15c0ba4d8a691c1d251300290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563003f4920f7f194a9a91a5d5422dc6313c329b82e533bce5e6614fbd13d4da7a32800b10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf601290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56301405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace0159bd035209cfbd05133d9c61cd860212636c4146228286761610b6e8811e537a018db697c2abd4284e7bb9aae7273fd67d061dd6ed4282b8382a3ed29d9cfaa1bb0198da4b407718e49fb0fe900da3b7fb2c3e0fed30f4148729225f24534e3e471b01b10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf601c2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b000a000000000101020a0b0b0b0b0c0d130e0429d069189e0013041115")
key := hexutil.MustDecode("0xdf396ae6730bdcd2e30c871da8978de3251900d45eaf15c0ba4d8a691c1d2513fffffffffffffffeb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6")
val, err := StorageChangeSetBytes(changes).Find(key)
assert.NoError(t, err)
assert.Equal(t, hexutil.MustDecode("0x11"), val)
}

View File

@ -404,7 +404,7 @@ func TestClique(t *testing.T) {
}
// Create a pristine blockchain with the genesis injected
db := ethdb.NewMemDatabase()
genesis.Commit(db)
genesis.MustCommit(db)
// Assemble a chain of headers from the cast votes
config := *params.TestChainConfig
@ -421,7 +421,7 @@ func TestClique(t *testing.T) {
continue
}
genesisBlock, _, _, _ := genesis.ToBlock(db)
genesisBlock, _, _, _ := genesis.ToBlock(db, false /* history */)
ctx := chain.WithContext(context.Background(), big.NewInt(genesisBlock.Number().Int64()+1))
blocks, _ := core.GenerateChain(ctx, &config, genesisBlock, engine, db, len(tt.votes), func(j int, gen *core.BlockGen) {
// Cast the vote contained in this block

View File

@ -1275,9 +1275,20 @@ func (bc *BlockChain) writeBlockWithState(ctx context.Context, block *types.Bloc
ctx = bc.WithContext(ctx, block.Number())
if stateDb != nil {
if err := stateDb.CommitBlock(ctx, tds.DbStateWriter()); err != nil {
blockWriter := tds.DbStateWriter()
if err := stateDb.CommitBlock(ctx, blockWriter); err != nil {
return NonStatTy, err
}
// Always write changesets
if err := blockWriter.WriteChangeSets(); err != nil {
return NonStatTy, err
}
// Optionally write history
if !bc.NoHistory() {
if err := blockWriter.WriteHistory(); err != nil {
return NonStatTy, err
}
}
}
if bc.enableReceipts && !bc.cacheConfig.DownloadOnly {
rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), receipts)

View File

@ -68,7 +68,10 @@ func newCanonical(engine consensus.Engine, n int, full bool) (context.Context, e
db = ethdb.NewMemDatabase()
}
genesis := new(Genesis).MustCommit(db)
genesis, _, err := new(Genesis).Commit(db, true /* history */)
if err != nil {
panic(err)
}
// Initialize a fresh chain with only a genesis block
cacheConfig := &CacheConfig{
@ -94,7 +97,7 @@ func newCanonical(engine consensus.Engine, n int, full bool) (context.Context, e
}
// Header-only chain requested
headers := makeHeaderChain(ctx, genesis.Header(), n, engine, db.MemCopy(), canonicalSeed)
_, err := blockchain.InsertHeaderChain(headers, 1)
_, err = blockchain.InsertHeaderChain(headers, 1)
return ctx, db, blockchain, err
}
@ -174,9 +177,6 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
parent := blockchain.GetBlockByHash(block.ParentHash())
tds := state.NewTrieDbState(parent.Root(), blockchain.db, parent.NumberU64())
statedb := state.New(tds)
if err = blockchain.db.DeleteTimestamp(block.NumberU64()); err != nil {
return err
}
receipts, _, usedGas, err := blockchain.Processor().Process(block, statedb, tds, vm.Config{})
if err != nil {
blockchain.reportBlock(block, receipts, err)
@ -190,7 +190,11 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
}
blockchain.chainmu.Lock()
tds.SetBlockNr(block.NumberU64())
if err := statedb.CommitBlock(ctx, tds.DbStateWriter()); err != nil {
blockWriter := tds.DbStateWriter()
if err := statedb.CommitBlock(ctx, blockWriter); err != nil {
return err
}
if err := blockWriter.WriteChangeSets(); err != nil {
return err
}
if _, err := blockchain.db.Commit(); err != nil {
@ -1532,7 +1536,7 @@ func doModesTest(history, preimages, receipts, txlookup bool) error {
Config: &params.ChainConfig{ChainID: big.NewInt(1), EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
}
genesis = gspec.MustCommit(db)
genesis, _, _ = gspec.Commit(db, history)
)
cacheConfig := &CacheConfig{

View File

@ -152,10 +152,15 @@ func (e *GenesisMismatchError) Error() string {
// error is a *params.ConfigCompatError and the new, unwritten config is returned.
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, *state.IntraBlockState, error) {
return SetupGenesisBlockWithOverride(db, genesis, nil, nil)
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis, history bool) (*params.ChainConfig, common.Hash, *state.IntraBlockState, error) {
return SetupGenesisBlockWithOverride(db, genesis, nil, nil, history)
}
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul *big.Int, overrideMuirGlacier *big.Int) (*params.ChainConfig, common.Hash, *state.IntraBlockState, error) {
func SetupGenesisBlockWithOverride(db ethdb.Database,
genesis *Genesis,
overrideIstanbul *big.Int,
overrideMuirGlacier *big.Int,
history bool,
) (*params.ChainConfig, common.Hash, *state.IntraBlockState, error) {
var stateDB *state.IntraBlockState
if genesis != nil && genesis.Config == nil {
@ -170,22 +175,22 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
} else {
log.Info("Writing custom genesis block")
}
block, stateDB, err := genesis.Commit(db)
block, stateDB1, err := genesis.Commit(db, history)
if err != nil {
return nil, common.Hash{}, nil, err
}
return genesis.Config, block.Hash(), stateDB, err
return genesis.Config, block.Hash(), stateDB1, err
}
// Check whether the genesis block is already written.
if genesis != nil {
block, stateDB, _, err := genesis.ToBlock(nil)
block, stateDB1, _, err := genesis.ToBlock(nil, history)
if err != nil {
return genesis.Config, common.Hash{}, nil, err
}
hash := block.Hash()
if hash != stored {
return genesis.Config, block.Hash(), stateDB, &GenesisMismatchError{stored, hash}
return genesis.Config, block.Hash(), stateDB1, &GenesisMismatchError{stored, hash}
}
}
@ -242,7 +247,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
// ToBlock creates the genesis block and writes state of a genesis specification
// to the given database (or discards it if nil).
func (g *Genesis) ToBlock(db ethdb.Database) (*types.Block, *state.IntraBlockState, *state.TrieDbState, error) {
func (g *Genesis) ToBlock(db ethdb.Database, history bool) (*types.Block, *state.IntraBlockState, *state.TrieDbState, error) {
if db == nil {
db = ethdb.NewMemDatabase()
}
@ -250,6 +255,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) (*types.Block, *state.IntraBlockSta
tds.StartNewBuffer()
statedb := state.New(tds)
tds.SetNoHistory(!history)
for addr, account := range g.Alloc {
statedb.AddBalance(addr, account.Balance)
statedb.SetCode(addr, account.Code)
@ -296,10 +302,10 @@ func (g *Genesis) ToBlock(db ethdb.Database) (*types.Block, *state.IntraBlockSta
// Commit writes the block and state of a genesis specification to the database.
// The block is committed as the canonical head block.
func (g *Genesis) Commit(db ethdb.Database) (*types.Block, *state.IntraBlockState, error) {
func (g *Genesis) Commit(db ethdb.Database, history bool) (*types.Block, *state.IntraBlockState, error) {
batch := db.NewBatch()
//fmt.Printf("Generating genesis\n")
block, statedb, tds, err := g.ToBlock(batch)
block, statedb, tds, err := g.ToBlock(batch, history)
if err != nil {
return nil, nil, err
}
@ -314,9 +320,20 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, *state.IntraBlockStat
return nil, nil, err
}
tds.SetBlockNr(0)
if err := statedb.CommitBlock(context.Background(), tds.DbStateWriter()); err != nil {
blockWriter := tds.DbStateWriter()
if err := statedb.CommitBlock(context.Background(), blockWriter); err != nil {
return nil, statedb, fmt.Errorf("cannot write state: %v", err)
}
// Always write changesets
if err := blockWriter.WriteChangeSets(); err != nil {
return nil, statedb, fmt.Errorf("cannot write change sets: %v", err)
}
// Optionally write history
if history {
if err := blockWriter.WriteHistory(); err != nil {
return nil, statedb, fmt.Errorf("cannot write history: %v", err)
}
}
if _, err := batch.Commit(); err != nil {
return nil, nil, err
}
@ -335,7 +352,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, *state.IntraBlockStat
// MustCommit writes the genesis block and state to db, panicking on error.
// The block is committed as the canonical head block.
func (g *Genesis) MustCommit(db ethdb.Database) *types.Block {
block, _, err := g.Commit(db)
block, _, err := g.Commit(db, true /* history */)
if err != nil {
panic(err)
}

View File

@ -33,11 +33,11 @@ import (
)
func TestDefaultGenesisBlock(t *testing.T) {
block, _, _, _ := DefaultGenesisBlock().ToBlock(nil)
block, _, _, _ := DefaultGenesisBlock().ToBlock(nil, true)
if block.Hash() != params.MainnetGenesisHash {
t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainnetGenesisHash)
}
block, _, _, _ = DefaultTestnetGenesisBlock().ToBlock(nil)
block, _, _, _ = DefaultTestnetGenesisBlock().ToBlock(nil, true)
if block.Hash() != params.TestnetGenesisHash {
t.Errorf("wrong testnet genesis hash, got %v, want %v", block.Hash(), params.TestnetGenesisHash)
}
@ -65,7 +65,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "genesis without ChainConfig",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *state.IntraBlockState, error) {
return SetupGenesisBlock(db, new(Genesis))
return SetupGenesisBlock(db, new(Genesis), true /* history */)
},
wantErr: errGenesisNoConfig,
wantConfig: params.AllEthashProtocolChanges,
@ -73,7 +73,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "no block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *state.IntraBlockState, error) {
return SetupGenesisBlock(db, nil)
return SetupGenesisBlock(db, nil, true /* history */)
},
wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig,
@ -82,7 +82,7 @@ func TestSetupGenesis(t *testing.T) {
name: "mainnet block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *state.IntraBlockState, error) {
DefaultGenesisBlock().MustCommit(db)
return SetupGenesisBlock(db, nil)
return SetupGenesisBlock(db, nil, true /* history */)
},
wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig,
@ -91,7 +91,7 @@ func TestSetupGenesis(t *testing.T) {
name: "custom block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *state.IntraBlockState, error) {
customg.MustCommit(db)
return SetupGenesisBlock(db, nil)
return SetupGenesisBlock(db, nil, true /* history */)
},
wantHash: customghash,
wantConfig: customg.Config,
@ -100,7 +100,7 @@ func TestSetupGenesis(t *testing.T) {
name: "custom block in DB, genesis == testnet",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *state.IntraBlockState, error) {
customg.MustCommit(db)
return SetupGenesisBlock(db, DefaultTestnetGenesisBlock())
return SetupGenesisBlock(db, DefaultTestnetGenesisBlock(), true /* history */)
},
wantErr: &GenesisMismatchError{Stored: customghash, New: params.TestnetGenesisHash},
wantHash: params.TestnetGenesisHash,
@ -110,7 +110,7 @@ func TestSetupGenesis(t *testing.T) {
name: "compatible config in DB",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *state.IntraBlockState, error) {
oldcustomg.MustCommit(db)
return SetupGenesisBlock(db, &customg)
return SetupGenesisBlock(db, &customg, true /* history */)
},
wantHash: customghash,
wantConfig: customg.Config,
@ -130,7 +130,7 @@ func TestSetupGenesis(t *testing.T) {
_, _ = bc.InsertChain(context.Background(), blocks)
bc.CurrentBlock()
// This should return a compatibility error.
return SetupGenesisBlock(db, &customg)
return SetupGenesisBlock(db, &customg, true /* history */)
},
wantHash: customghash,
wantConfig: customg.Config,

View File

@ -11,7 +11,6 @@ import (
"github.com/ledgerwatch/turbo-geth/common/debug"
"github.com/ledgerwatch/turbo-geth/common/hexutil"
"github.com/ledgerwatch/turbo-geth/core/types/accounts"
"github.com/ledgerwatch/turbo-geth/crypto"
)
// ChangeSetWriter is a mock StateWriter that accumulates changes in-memory into ChangeSets.
@ -29,24 +28,28 @@ func NewChangeSetWriter() *ChangeSetWriter {
}
}
func (w *ChangeSetWriter) GetAccountChanges() *changeset.ChangeSet {
func (w *ChangeSetWriter) GetAccountChanges() (*changeset.ChangeSet, error) {
cs := changeset.NewAccountChangeSet()
for key, val := range w.accountChanges {
if err := cs.Add(crypto.Keccak256(key.Bytes()), val); err != nil {
panic(err)
addrHash, err := common.HashData(key[:])
if err != nil {
return nil, err
}
if err := cs.Add(addrHash[:], val); err != nil {
return nil, err
}
}
return cs
return cs, nil
}
func (w *ChangeSetWriter) GetStorageChanges() *changeset.ChangeSet {
func (w *ChangeSetWriter) GetStorageChanges() (*changeset.ChangeSet, error) {
cs := changeset.NewStorageChangeSet()
for key, val := range w.storageChanges {
if err := cs.Add([]byte(key), val); err != nil {
panic(err)
return nil, err
}
}
return cs
return cs, nil
}
func (w *ChangeSetWriter) UpdateAccountData(ctx context.Context, address common.Address, original, account *accounts.Account) error {

View File

@ -29,6 +29,7 @@ import (
"sync/atomic"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/changeset"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/common/debug"
"github.com/ledgerwatch/turbo-geth/core/types/accounts"
@ -925,7 +926,7 @@ func (tds *TrieDbState) UnwindTo(blockNr uint64) error {
return err
}
for i := tds.blockNr; i > blockNr; i-- {
if err := tds.db.DeleteTimestamp(i); err != nil {
if err := tds.deleteTimestamp(i); err != nil {
return err
}
}
@ -935,6 +936,100 @@ func (tds *TrieDbState) UnwindTo(blockNr uint64) error {
return nil
}
func (tds *TrieDbState) deleteTimestamp(timestamp uint64) error {
changeSetKey := dbutils.EncodeTimestamp(timestamp)
changedAccounts, err := tds.db.Get(dbutils.AccountChangeSetBucket, changeSetKey)
if err != nil && err != ethdb.ErrKeyNotFound {
return err
}
changedStorage, err := tds.db.Get(dbutils.StorageChangeSetBucket, changeSetKey)
if err != nil && err != ethdb.ErrKeyNotFound {
return err
}
if debug.IsThinHistory() {
if len(changedAccounts) > 0 {
innerErr := changeset.AccountChangeSetBytes(changedAccounts).Walk(func(kk, _ []byte) error {
indexBytes, getErr := tds.db.Get(dbutils.AccountsHistoryBucket, kk)
if getErr != nil {
if getErr == ethdb.ErrKeyNotFound {
return nil
}
return getErr
}
index := dbutils.WrapHistoryIndex(indexBytes)
index.Remove(timestamp)
if index.Len() == 0 {
return tds.db.Delete(dbutils.AccountsHistoryBucket, kk)
}
return tds.db.Put(dbutils.AccountsHistoryBucket, kk, *index)
})
if innerErr != nil {
return innerErr
}
if err := tds.db.Delete(dbutils.AccountChangeSetBucket, changeSetKey); err != nil {
return err
}
}
if len(changedStorage) > 0 {
innerErr := changeset.StorageChangeSetBytes(changedStorage).Walk(func(kk, _ []byte) error {
indexBytes, getErr := tds.db.Get(dbutils.StorageHistoryBucket, kk)
if getErr != nil {
if getErr == ethdb.ErrKeyNotFound {
return nil
}
return getErr
}
index := dbutils.WrapHistoryIndex(indexBytes)
index.Remove(timestamp)
if index.Len() == 0 {
return tds.db.Delete(dbutils.StorageHistoryBucket, kk)
}
return tds.db.Put(dbutils.StorageHistoryBucket, kk, *index)
})
if innerErr != nil {
return innerErr
}
if err := tds.db.Delete(dbutils.StorageChangeSetBucket, changeSetKey); err != nil {
return err
}
}
} else {
if len(changedAccounts) > 0 {
innerErr := changeset.Walk(changedAccounts, func(kk, _ []byte) error {
composite, _ := dbutils.CompositeKeySuffix(kk, timestamp)
return tds.db.Delete(dbutils.AccountsHistoryBucket, composite)
})
if innerErr != nil {
return innerErr
}
if err := tds.db.Delete(dbutils.AccountChangeSetBucket, changeSetKey); err != nil {
return err
}
}
if len(changedStorage) > 0 {
innerErr := changeset.Walk(changedStorage, func(kk, _ []byte) error {
composite, _ := dbutils.CompositeKeySuffix(kk, timestamp)
return tds.db.Delete(dbutils.StorageHistoryBucket, composite)
})
if innerErr != nil {
return innerErr
}
if err := tds.db.Delete(dbutils.StorageChangeSetBucket, changeSetKey); err != nil {
return err
}
}
}
return nil
}
func (tds *TrieDbState) readAccountDataByHash(addrHash common.Hash) (*accounts.Account, error) {
if acc, ok := tds.GetAccount(addrHash); ok {
return acc, nil
@ -1293,8 +1388,9 @@ func (tds *TrieDbState) TrieStateWriter() *TrieStateWriter {
return &TrieStateWriter{tds: tds}
}
// DbStateWriter creates a writer that is designed to write changes into the database batch
func (tds *TrieDbState) DbStateWriter() *DbStateWriter {
return &DbStateWriter{tds: tds}
return &DbStateWriter{tds: tds, csw: NewChangeSetWriter()}
}
func accountsEqual(a1, a2 *accounts.Account) bool {

View File

@ -3,16 +3,20 @@ package state
import (
"bytes"
"context"
"fmt"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/changeset"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/common/debug"
"github.com/ledgerwatch/turbo-geth/core/types/accounts"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/trie"
)
type DbStateWriter struct {
tds *TrieDbState
csw *ChangeSetWriter
}
func originalAccountData(original *accounts.Account, omitHashes bool) []byte {
@ -35,6 +39,9 @@ func originalAccountData(original *accounts.Account, omitHashes bool) []byte {
}
func (dsw *DbStateWriter) UpdateAccountData(ctx context.Context, address common.Address, original, account *accounts.Account) error {
if err := dsw.csw.UpdateAccountData(ctx, address, original, account); err != nil {
return err
}
dataLen := account.EncodingLengthForStorage()
data := make([]byte, dataLen)
account.EncodeForStorage(data)
@ -43,42 +50,24 @@ func (dsw *DbStateWriter) UpdateAccountData(ctx context.Context, address common.
if err != nil {
return err
}
if err = dsw.tds.db.Put(dbutils.AccountsBucket, addrHash[:], data); err != nil {
return err
}
noHistory := dsw.tds.noHistory
// Don't write historical record if the account did not change
if accountsEqual(original, account) {
return nil
}
// we can reduce storage size for history there
// because we have accountHash+incarnation -> codehash of contract in separate bucket
// and we don't need root in history requests
omitHashes := debug.IsThinHistory()
originalData := originalAccountData(original, omitHashes)
return dsw.tds.db.PutS(dbutils.AccountsHistoryBucket, addrHash[:], originalData, dsw.tds.blockNr, noHistory)
return dsw.tds.db.Put(dbutils.AccountsBucket, addrHash[:], data)
}
func (dsw *DbStateWriter) DeleteAccount(ctx context.Context, address common.Address, original *accounts.Account) error {
if err := dsw.csw.DeleteAccount(ctx, address, original); err != nil {
return err
}
addrHash, err := dsw.tds.HashAddress(address, true /*save*/)
if err != nil {
return err
}
if err := dsw.tds.db.Delete(dbutils.AccountsBucket, addrHash[:]); err != nil {
return err
}
// We must keep root using thin history on deleting account as is
originalData := originalAccountData(original, false)
noHistory := dsw.tds.noHistory
return dsw.tds.db.PutS(dbutils.AccountsHistoryBucket, addrHash[:], originalData, dsw.tds.blockNr, noHistory)
return dsw.tds.db.Delete(dbutils.AccountsBucket, addrHash[:])
}
func (dsw *DbStateWriter) UpdateAccountCode(addrHash common.Hash, incarnation uint64, codeHash common.Hash, code []byte) error {
if err := dsw.csw.UpdateAccountCode(addrHash, incarnation, codeHash, code); err != nil {
return err
}
//save contract code mapping
if err := dsw.tds.db.Put(dbutils.CodeBucket, codeHash[:], code); err != nil {
return err
@ -91,6 +80,10 @@ func (dsw *DbStateWriter) UpdateAccountCode(addrHash common.Hash, incarnation ui
}
func (dsw *DbStateWriter) WriteAccountStorage(ctx context.Context, address common.Address, incarnation uint64, key, original, value *common.Hash) error {
// We delegate here first to let the changeSetWrite make its own decision on whether to proceed in case *original == *value
if err := dsw.csw.WriteAccountStorage(ctx, address, incarnation, key, original, value); err != nil {
return err
}
if *original == *value {
return nil
}
@ -109,23 +102,108 @@ func (dsw *DbStateWriter) WriteAccountStorage(ctx context.Context, address commo
compositeKey := dbutils.GenerateCompositeStorageKey(addrHash, incarnation, seckey)
if len(v) == 0 {
err = dsw.tds.db.Delete(dbutils.StorageBucket, compositeKey)
return dsw.tds.db.Delete(dbutils.StorageBucket, compositeKey)
} else {
err = dsw.tds.db.Put(dbutils.StorageBucket, compositeKey, vv)
return dsw.tds.db.Put(dbutils.StorageBucket, compositeKey, vv)
}
//fmt.Printf("WriteAccountStorage (db) %x %d %x: %x\n", address, incarnation, key, value)
if err != nil {
return err
}
noHistory := dsw.tds.noHistory
o := bytes.TrimLeft(original[:], "\x00")
originalValue := make([]byte, len(o))
copy(originalValue, o)
return dsw.tds.db.PutS(dbutils.StorageHistoryBucket, compositeKey, originalValue, dsw.tds.blockNr, noHistory)
}
func (dsw *DbStateWriter) CreateContract(address common.Address) error {
if err := dsw.csw.CreateContract(address); err != nil {
return err
}
return nil
}
// WriteChangeSets causes accumulated change sets to be written into
// the database (or batch) associated with the `dsw`
func (dsw *DbStateWriter) WriteChangeSets() error {
accountChanges, err := dsw.csw.GetAccountChanges()
if err != nil {
return err
}
var accountSerialised []byte
if debug.IsThinHistory() {
accountSerialised, err = changeset.EncodeAccounts(accountChanges)
} else {
accountSerialised, err = changeset.EncodeChangeSet(accountChanges)
}
if err != nil {
return err
}
key := dbutils.EncodeTimestamp(dsw.tds.blockNr)
if err = dsw.tds.db.Put(dbutils.AccountChangeSetBucket, key, accountSerialised); err != nil {
return err
}
storageChanges, err := dsw.csw.GetStorageChanges()
if err != nil {
return err
}
var storageSerialized []byte
if storageChanges.Len() > 0 {
if debug.IsThinHistory() {
storageSerialized, err = changeset.EncodeStorage(storageChanges)
} else {
storageSerialized, err = changeset.EncodeChangeSet(storageChanges)
}
if err != nil {
return err
}
if err = dsw.tds.db.Put(dbutils.StorageChangeSetBucket, key, storageSerialized); err != nil {
return err
}
}
return nil
}
func (dsw *DbStateWriter) WriteHistory() error {
accountChanges, err := dsw.csw.GetAccountChanges()
if err != nil {
return err
}
if debug.IsThinHistory() {
for _, change := range accountChanges.Changes {
value, err1 := dsw.tds.db.Get(dbutils.AccountsHistoryBucket, change.Key)
if err1 != nil && err1 != ethdb.ErrKeyNotFound {
return fmt.Errorf("db.Get failed: %w", err1)
}
index := dbutils.WrapHistoryIndex(value)
index.Append(dsw.tds.blockNr)
if err2 := dsw.tds.db.Put(dbutils.AccountsHistoryBucket, change.Key, *index); err2 != nil {
return err2
}
}
} else {
for _, change := range accountChanges.Changes {
composite, _ := dbutils.CompositeKeySuffix(change.Key, dsw.tds.blockNr)
if err2 := dsw.tds.db.Put(dbutils.AccountsHistoryBucket, composite, change.Value); err2 != nil {
return err2
}
}
}
storageChanges, err := dsw.csw.GetStorageChanges()
if err != nil {
return err
}
if debug.IsThinHistory() {
for _, change := range storageChanges.Changes {
value, err1 := dsw.tds.db.Get(dbutils.StorageHistoryBucket, change.Key)
if err1 != nil && err1 != ethdb.ErrKeyNotFound {
return fmt.Errorf("db.Get failed: %w", err1)
}
index := dbutils.WrapHistoryIndex(value)
index.Append(dsw.tds.blockNr)
if err := dsw.tds.db.Put(dbutils.StorageHistoryBucket, change.Key, *index); err != nil {
return err
}
}
} else {
for _, change := range storageChanges.Changes {
composite, _ := dbutils.CompositeKeySuffix(change.Key, dsw.tds.blockNr)
if err := dsw.tds.db.Put(dbutils.StorageHistoryBucket, composite, change.Value); err != nil {
return err
}
}
}
return nil
}

View File

@ -1,12 +1,14 @@
package ethdb
package state
import (
"bytes"
"context"
"github.com/ledgerwatch/turbo-geth/common/changeset"
"math/big"
"math/rand"
"reflect"
"sort"
"strconv"
"testing"
"github.com/davecgh/go-spew/spew"
@ -15,25 +17,34 @@ import (
"github.com/ledgerwatch/turbo-geth/common/debug"
"github.com/ledgerwatch/turbo-geth/core/types/accounts"
"github.com/ledgerwatch/turbo-geth/crypto"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/trie"
"github.com/stretchr/testify/assert"
)
func TestMutation_DeleteTimestamp(t *testing.T) {
db := NewMemDatabase()
db := ethdb.NewMemDatabase()
mutDB := db.NewBatch()
acc := make([]*accounts.Account, 10)
addr := make([]common.Address, 10)
addrHashes := make([]common.Hash, 10)
tds := NewTrieDbState(common.Hash{}, mutDB, 1)
blockWriter := tds.DbStateWriter()
ctx := context.Background()
emptyAccount := accounts.NewAccount()
for i := range acc {
acc[i], addrHashes[i] = randomAccount(t)
b := make([]byte, acc[i].EncodingLengthForStorage())
acc[i].EncodeForStorage(b)
err := mutDB.PutS(dbutils.AccountsHistoryBucket, addrHashes[i].Bytes(), b, 1, false)
if err != nil {
acc[i], addr[i], addrHashes[i] = randomAccount(t)
if err := blockWriter.UpdateAccountData(ctx, addr[i], &emptyAccount /* original */, acc[i]); err != nil {
t.Fatal(err)
}
}
if err := blockWriter.WriteChangeSets(); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteHistory(); err != nil {
t.Fatal(err)
}
_, err := mutDB.Commit()
if err != nil {
t.Fatal(err)
@ -69,7 +80,7 @@ func TestMutation_DeleteTimestamp(t *testing.T) {
}
}
err = mutDB.DeleteTimestamp(1)
err = tds.deleteTimestamp(1)
if err != nil {
t.Fatal(err)
}
@ -79,19 +90,19 @@ func TestMutation_DeleteTimestamp(t *testing.T) {
}
_, err = db.Get(dbutils.AccountChangeSetBucket, dbutils.EncodeTimestamp(1))
if err != ErrKeyNotFound {
if err != ethdb.ErrKeyNotFound {
t.Fatal("changeset must be deleted")
}
if debug.IsThinHistory() {
_, err = db.Get(dbutils.AccountsHistoryBucket, addrHashes[0].Bytes())
if err != ErrKeyNotFound {
if err != ethdb.ErrKeyNotFound {
t.Fatal("account must be deleted")
}
} else {
compositeKey, _ := dbutils.CompositeKeySuffix(addrHashes[0].Bytes(), 1)
_, err = db.Get(dbutils.AccountsHistoryBucket, compositeKey)
if err != ErrKeyNotFound {
if err != ethdb.ErrKeyNotFound {
t.Fatal("account must be deleted")
}
}
@ -101,7 +112,7 @@ func TestMutationCommit(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
db := NewMemDatabase()
db := ethdb.NewMemDatabase()
mutDB := db.NewBatch()
numOfAccounts := 5
@ -130,7 +141,7 @@ func TestMutationCommit(t *testing.T) {
t.Fatal("Accounts not equals")
}
compositeKey, _ := dbutils.CompositeKeySuffix(addrHash.Bytes(), 1)
compositeKey, _ := dbutils.CompositeKeySuffix(addrHash.Bytes(), 2)
b, err = db.Get(dbutils.AccountsHistoryBucket, compositeKey)
if err != nil {
t.Fatal("error on get account", i, err)
@ -148,7 +159,7 @@ func TestMutationCommit(t *testing.T) {
}
resAccStorage := make(map[common.Hash]common.Hash)
err = db.Walk(dbutils.StorageBucket, dbutils.GenerateStoragePrefix(addrHash, acc.Incarnation), common.HashLength+8, func(k, v []byte) (b bool, e error) {
err = db.Walk(dbutils.StorageBucket, dbutils.GenerateStoragePrefix(addrHash, acc.Incarnation), 8*(common.HashLength+8), func(k, v []byte) (b bool, e error) {
resAccStorage[common.BytesToHash(k[common.HashLength+8:])] = common.BytesToHash(v)
return true, nil
})
@ -159,11 +170,11 @@ func TestMutationCommit(t *testing.T) {
if !reflect.DeepEqual(resAccStorage, accStateStorage[i]) {
spew.Dump("res", resAccStorage)
spew.Dump("expected", accHistoryStateStorage[i])
t.Log("incorrect storage", i)
t.Fatal("incorrect storage", i)
}
resAccStorage = make(map[common.Hash]common.Hash)
err = db.Walk(dbutils.StorageHistoryBucket, dbutils.GenerateStoragePrefix(addrHash, acc.Incarnation), common.HashLength+8, func(k, v []byte) (b bool, e error) {
err = db.Walk(dbutils.StorageHistoryBucket, dbutils.GenerateStoragePrefix(addrHash, acc.Incarnation), 8*(common.HashLength+8), func(k, v []byte) (b bool, e error) {
resAccStorage[common.BytesToHash(k[common.HashLength+8:common.HashLength+8+common.HashLength])] = common.BytesToHash(v)
return true, nil
})
@ -178,7 +189,7 @@ func TestMutationCommit(t *testing.T) {
}
}
csData, err := db.Get(dbutils.AccountChangeSetBucket, dbutils.EncodeTimestamp(1))
csData, err := db.Get(dbutils.AccountChangeSetBucket, dbutils.EncodeTimestamp(2))
if err != nil {
t.Fatal(err)
}
@ -202,7 +213,7 @@ func TestMutationCommit(t *testing.T) {
t.Fatal("incorrect account changeset")
}
csData, err = db.Get(dbutils.StorageChangeSetBucket, dbutils.EncodeTimestamp(1))
csData, err = db.Get(dbutils.StorageChangeSetBucket, dbutils.EncodeTimestamp(2))
if err != nil {
t.Fatal(err)
}
@ -215,9 +226,13 @@ func TestMutationCommit(t *testing.T) {
for i, addrHash := range addrHashes {
for j := 0; j < numOfStateKeys; j++ {
key := common.Hash{uint8(i*100 + j)}
keyHash, err1 := common.HashData(key.Bytes())
if err1 != nil {
t.Fatal(err1)
}
value := common.Hash{uint8(10 + j)}
if err := expectedChangeSet.Add(dbutils.GenerateCompositeStorageKey(addrHash, accHistory[i].Incarnation, key), value.Bytes()); err != nil {
t.Fatal(err)
if err2 := expectedChangeSet.Add(dbutils.GenerateCompositeStorageKey(addrHash, accHistory[i].Incarnation, keyHash), value.Bytes()); err2 != nil {
t.Fatal(err2)
}
}
@ -242,7 +257,7 @@ func TestMutationCommitThinHistory(t *testing.T) {
t.Skip()
}
db := NewMemDatabase()
db := ethdb.NewMemDatabase()
mutDB := db.NewBatch()
numOfAccounts := 5
@ -287,7 +302,7 @@ func TestMutationCommitThinHistory(t *testing.T) {
}
resAccStorage := make(map[common.Hash]common.Hash)
err = db.Walk(dbutils.StorageBucket, dbutils.GenerateStoragePrefix(addrHash, acc.Incarnation), common.HashLength+8, func(k, v []byte) (b bool, e error) {
err = db.Walk(dbutils.StorageBucket, dbutils.GenerateStoragePrefix(addrHash, acc.Incarnation), 8*(common.HashLength+8), func(k, v []byte) (b bool, e error) {
resAccStorage[common.BytesToHash(k[common.HashLength+8:])] = common.BytesToHash(v)
return true, nil
})
@ -314,29 +329,35 @@ func TestMutationCommitThinHistory(t *testing.T) {
}
}
csData, err := db.Get(dbutils.AccountChangeSetBucket, dbutils.EncodeTimestamp(1))
csData, err := db.Get(dbutils.AccountChangeSetBucket, dbutils.EncodeTimestamp(2))
if err != nil {
t.Fatal(err)
}
expectedChangeSet := changeset.NewAccountChangeSet()
for i := range addrHashes {
b := make([]byte, accHistory[i].EncodingLengthForStorage())
accHistory[i].EncodeForStorage(b)
// Make ajustments for THIN_HISTORY
c := accHistory[i].SelfCopy()
copy(c.CodeHash[:], emptyCodeHash)
c.Root = trie.EmptyRoot
bLen := c.EncodingLengthForStorage()
b := make([]byte, bLen)
c.EncodeForStorage(b)
innerErr := expectedChangeSet.Add(addrHashes[i].Bytes(), b)
if innerErr != nil {
t.Fatal(innerErr)
}
}
expectedData, err := changeset.EncodeChangeSet(expectedChangeSet)
sort.Sort(expectedChangeSet)
expectedData, err := changeset.EncodeAccounts(expectedChangeSet)
assert.NoError(t, err)
if !bytes.Equal(csData, expectedData) {
spew.Dump("res", csData)
spew.Dump("expected", expectedData)
t.Fatal("incorrect changeset")
}
csData, err = db.Get(dbutils.StorageChangeSetBucket, dbutils.EncodeTimestamp(1))
csData, err = db.Get(dbutils.StorageChangeSetBucket, dbutils.EncodeTimestamp(2))
if err != nil {
t.Fatal(err)
}
@ -349,14 +370,17 @@ func TestMutationCommitThinHistory(t *testing.T) {
for i, addrHash := range addrHashes {
for j := 0; j < numOfStateKeys; j++ {
key := common.Hash{uint8(i*100 + j)}
keyHash, err1 := common.HashData(key.Bytes())
if err1 != nil {
t.Fatal(err1)
}
value := common.Hash{uint8(10 + j)}
err := expectedChangeSet.Add(dbutils.GenerateCompositeStorageKey(addrHash, accHistory[i].Incarnation, key), value.Bytes())
if err != nil {
t.Fatal(err)
if err2 := expectedChangeSet.Add(dbutils.GenerateCompositeStorageKey(addrHash, accHistory[i].Incarnation, keyHash), value.Bytes()); err2 != nil {
t.Fatal(err2)
}
}
}
sort.Sort(expectedChangeSet)
expectedData, err = changeset.EncodeStorage(expectedChangeSet)
assert.NoError(t, err)
if !bytes.Equal(csData, expectedData) {
@ -366,106 +390,117 @@ func TestMutationCommitThinHistory(t *testing.T) {
}
}
func generateAccountsWithStorageAndHistory(t *testing.T, db Putter, numOfAccounts, numOfStateKeys int) ([]common.Hash, []*accounts.Account, []map[common.Hash]common.Hash, []*accounts.Account, []map[common.Hash]common.Hash) {
func generateAccountsWithStorageAndHistory(t *testing.T, db ethdb.Database, numOfAccounts, numOfStateKeys int) ([]common.Hash, []*accounts.Account, []map[common.Hash]common.Hash, []*accounts.Account, []map[common.Hash]common.Hash) {
t.Helper()
accHistory := make([]*accounts.Account, numOfAccounts)
accState := make([]*accounts.Account, numOfAccounts)
accStateStorage := make([]map[common.Hash]common.Hash, numOfAccounts)
accHistoryStateStorage := make([]map[common.Hash]common.Hash, numOfAccounts)
addrs := make([]common.Address, numOfAccounts)
addrHashes := make([]common.Hash, numOfAccounts)
tds := NewTrieDbState(common.Hash{}, db, 1)
blockWriter := tds.DbStateWriter()
ctx := context.Background()
for i := range accHistory {
var b []byte
accHistory[i], addrHashes[i] = randomAccount(t)
accHistory[i], addrs[i], addrHashes[i] = randomAccount(t)
accHistory[i].Balance = *big.NewInt(100)
accHistory[i].CodeHash = common.Hash{uint8(10 + i)}
accHistory[i].Root = common.Hash{uint8(10 + i)}
accHistory[i].Incarnation = uint64(i)
b = make([]byte, accHistory[i].EncodingLengthForStorage())
accHistory[i].EncodeForStorage(b)
err := db.PutS(dbutils.AccountsHistoryBucket, addrHashes[i].Bytes(), b, 1, false)
if err != nil {
t.Fatal(err)
}
accHistory[i].Incarnation = uint64(i + 1)
accState[i] = accHistory[i].SelfCopy()
accState[i].Nonce++
accState[i].Balance = *big.NewInt(200)
b = make([]byte, accState[i].EncodingLengthForStorage())
accState[i].EncodeForStorage(b)
err = db.Put(dbutils.AccountsBucket, addrHashes[i].Bytes(), b)
if err != nil {
t.Fatal(err)
}
accStateStorage[i] = make(map[common.Hash]common.Hash)
accHistoryStateStorage[i] = make(map[common.Hash]common.Hash)
for j := 0; j < numOfStateKeys; j++ {
key := common.Hash{uint8(i*100 + j)}
value := common.Hash{uint8(j)}
accStateStorage[i][key] = value
err = db.Put(dbutils.StorageBucket, dbutils.GenerateCompositeStorageKey(addrHashes[i], accHistory[i].Incarnation, key), value.Bytes())
keyHash, err := common.HashData(key.Bytes())
if err != nil {
t.Fatal(err)
}
newValue := common.Hash{uint8(j)}
if newValue != (common.Hash{}) {
// Empty value is not considered to be present
accStateStorage[i][keyHash] = newValue
}
newValue := common.Hash{uint8(10 + j)}
accHistoryStateStorage[i][key] = newValue
err = db.PutS(
dbutils.StorageHistoryBucket,
dbutils.GenerateCompositeStorageKey(addrHashes[i], accHistory[i].Incarnation, key),
newValue.Bytes(),
1,
false,
)
if err != nil {
value := common.Hash{uint8(10 + j)}
accHistoryStateStorage[i][keyHash] = value
if err := blockWriter.WriteAccountStorage(ctx, addrs[i], accHistory[i].Incarnation, &key, &value, &newValue); err != nil {
t.Fatal(err)
}
}
if err := blockWriter.UpdateAccountData(ctx, addrs[i], accHistory[i] /* original */, accState[i]); err != nil {
t.Fatal(err)
}
}
tds.SetBlockNr(2)
if err := blockWriter.WriteChangeSets(); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteHistory(); err != nil {
t.Fatal(err)
}
return addrHashes, accState, accStateStorage, accHistory, accHistoryStateStorage
}
func TestMutation_GetAsOf(t *testing.T) {
db := NewMemDatabase()
db := ethdb.NewMemDatabase()
mutDB := db.NewBatch()
tds := NewTrieDbState(common.Hash{}, mutDB, 0)
blockWriter := tds.DbStateWriter()
ctx := context.Background()
emptyAccount := accounts.NewAccount()
acc, addrHash := randomAccount(t)
acc, addr, addrHash := randomAccount(t)
acc2 := acc.SelfCopy()
acc2.Nonce = 1
acc4 := acc.SelfCopy()
acc4.Nonce = 3
b := make([]byte, acc.EncodingLengthForStorage())
acc.EncodeForStorage(b)
err := db.Put(dbutils.AccountsBucket, addrHash.Bytes(), b)
if err != nil {
tds.SetBlockNr(0)
if err := blockWriter.UpdateAccountData(ctx, addr, &emptyAccount, acc2); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteChangeSets(); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteHistory(); err != nil {
t.Fatal(err)
}
b = make([]byte, acc2.EncodingLengthForStorage())
acc2.EncodeForStorage(b)
err = db.PutS(dbutils.AccountsHistoryBucket, addrHash.Bytes(), b, 2, false)
if err != nil {
blockWriter = tds.DbStateWriter()
tds.SetBlockNr(2)
if err := blockWriter.UpdateAccountData(ctx, addr, acc2, acc4); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteChangeSets(); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteHistory(); err != nil {
t.Fatal(err)
}
b = make([]byte, acc4.EncodingLengthForStorage())
acc4.EncodeForStorage(b)
err = db.PutS(dbutils.AccountsHistoryBucket, addrHash.Bytes(), b, 4, false)
if err != nil {
blockWriter = tds.DbStateWriter()
tds.SetBlockNr(4)
if err := blockWriter.UpdateAccountData(ctx, addr, acc4, acc); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteChangeSets(); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteHistory(); err != nil {
t.Fatal(err)
}
_, err = mutDB.Commit()
if err != nil {
if _, err := mutDB.Commit(); err != nil {
t.Fatal(err)
}
b, err = db.Get(dbutils.AccountsBucket, addrHash.Bytes())
b, err := db.Get(dbutils.AccountsBucket, addrHash.Bytes())
if err != nil {
t.Fatal(err)
}
@ -548,7 +583,7 @@ func TestMutation_GetAsOf(t *testing.T) {
}
}
func randomAccount(t *testing.T) (*accounts.Account, common.Hash) {
func randomAccount(t *testing.T) (*accounts.Account, common.Address, common.Hash) {
t.Helper()
key, err := crypto.GenerateKey()
if err != nil {
@ -562,5 +597,186 @@ func randomAccount(t *testing.T) (*accounts.Account, common.Hash) {
if err != nil {
t.Fatal(err)
}
return &acc, addrHash
return &acc, addr, addrHash
}
func TestBoltDB_WalkAsOf1(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
db := ethdb.NewMemDatabase()
tds := NewTrieDbState(common.Hash{}, db, 1)
blockWriter := tds.DbStateWriter()
ctx := context.Background()
emptyVal := common.Hash{}
block2Expected := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
block4Expected := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
block6Expected := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
//create state and history
for i := uint8(1); i <= 7; i++ {
addr := common.Address{i}
addrHash, _ := common.HashData(addr[:])
k := common.Hash{i}
keyHash, _ := common.HashData(k[:])
key := dbutils.GenerateCompositeStorageKey(addrHash, 1, keyHash)
val3 := common.BytesToHash([]byte("block 3 " + strconv.Itoa(int(i))))
val5 := common.BytesToHash([]byte("block 5 " + strconv.Itoa(int(i))))
val := common.BytesToHash([]byte("state " + strconv.Itoa(int(i))))
if i <= 2 {
if err := blockWriter.WriteAccountStorage(ctx, addr, 1, &k, &val3, &val); err != nil {
t.Fatal(err)
}
} else {
if err := blockWriter.WriteAccountStorage(ctx, addr, 1, &k, &val3, &val5); err != nil {
t.Fatal(err)
}
}
if err := block2Expected.Add(key, []byte("block 3 "+strconv.Itoa(int(i)))); err != nil {
t.Fatal(err)
}
}
tds.SetBlockNr(3)
if err := blockWriter.WriteChangeSets(); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteHistory(); err != nil {
t.Fatal(err)
}
blockWriter = tds.DbStateWriter()
for i := uint8(3); i <= 7; i++ {
addr := common.Address{i}
addrHash, _ := common.HashData(addr[:])
k := common.Hash{i}
keyHash, _ := common.HashData(k[:])
key := dbutils.GenerateCompositeStorageKey(addrHash, 1, keyHash)
val5 := common.BytesToHash([]byte("block 5 " + strconv.Itoa(int(i))))
val := common.BytesToHash([]byte("state " + strconv.Itoa(int(i))))
if i > 4 {
if err := blockWriter.WriteAccountStorage(ctx, addr, 1, &k, &val5, &emptyVal); err != nil {
t.Fatal(err)
}
} else {
if err := blockWriter.WriteAccountStorage(ctx, addr, 1, &k, &val5, &val); err != nil {
t.Fatal(err)
}
}
if err := block4Expected.Add(key, []byte("block 5 "+strconv.Itoa(int(i)))); err != nil {
t.Fatal(err)
}
}
tds.SetBlockNr(5)
if err := blockWriter.WriteChangeSets(); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteHistory(); err != nil {
t.Fatal(err)
}
blockWriter = tds.DbStateWriter()
for i := uint8(1); i < 5; i++ {
addr := common.Address{i}
addrHash, _ := common.HashData(addr[:])
k := common.Hash{i}
keyHash, _ := common.HashData(k[:])
key := dbutils.GenerateCompositeStorageKey(addrHash, uint64(1), keyHash)
val := []byte("state " + strconv.Itoa(int(i)))
err := block6Expected.Add(key, val)
if err != nil {
t.Fatal(err)
}
if i <= 2 {
err = block4Expected.Add(key, val)
if err != nil {
t.Fatal(err)
}
}
}
tds.SetBlockNr(6)
if err := blockWriter.WriteChangeSets(); err != nil {
t.Fatal(err)
}
if err := blockWriter.WriteHistory(); err != nil {
t.Fatal(err)
}
block2 := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
block4 := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
block6 := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
//walk and collect walkAsOf result
var err error
var startKey [72]byte
err = db.WalkAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, startKey[:], 0, 2, func(k []byte, v []byte) (b bool, e error) {
err = block2.Add(k, v)
if err != nil {
t.Fatal(err)
}
//fmt.Printf("%v - %v \n", common.BytesToHash(k).String(), string(v))
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = db.WalkAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, startKey[:], 0, 4, func(k []byte, v []byte) (b bool, e error) {
err = block4.Add(k, v)
if err != nil {
t.Fatal(err)
}
//fmt.Printf("%v - %v \n", common.BytesToHash(k).String(), string(v))
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = db.WalkAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, startKey[:], 0, 6, func(k []byte, v []byte) (b bool, e error) {
err = block6.Add(k, v)
if err != nil {
t.Fatal(err)
}
//fmt.Printf("%v - %v \n", common.BytesToHash(k).String(), string(v))
return true, nil
})
if err != nil {
t.Fatal(err)
}
sort.Sort(block2Expected)
if !reflect.DeepEqual(block2, block2Expected) {
spew.Dump("expected", block2Expected)
spew.Dump("current", block2)
t.Fatal("block 2 result is incorrect")
}
sort.Sort(block4Expected)
if !reflect.DeepEqual(block4, block4Expected) {
spew.Dump("expected", block4Expected)
spew.Dump("current", block4)
t.Fatal("block 4 result is incorrect")
}
sort.Sort(block6Expected)
if !reflect.DeepEqual(block6, block6Expected) {
spew.Dump("expected", block6Expected)
spew.Dump("current", block6)
t.Fatal("block 6 result is incorrect")
}
}

View File

@ -29,6 +29,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/debug"
"github.com/ledgerwatch/turbo-geth/core/state"
"github.com/ledgerwatch/turbo-geth/crypto"
"github.com/ledgerwatch/turbo-geth/ethdb"
@ -67,6 +68,9 @@ func (h resultHash) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h resultHash) Less(i, j int) bool { return bytes.Compare(h[i].Bytes(), h[j].Bytes()) < 0 }
func TestAccountRange(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
var (
db = ethdb.NewMemDatabase()
tds = state.NewTrieDbState(common.Hash{}, db, 0)
@ -160,6 +164,9 @@ func TestAccountRange(t *testing.T) {
}
func TestEmptyAccountRange(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
var (
statedb = state.NewDbState(ethdb.NewMemDatabase(), 0)
)
@ -177,6 +184,9 @@ func TestEmptyAccountRange(t *testing.T) {
}
func TestStorageRangeAt(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
// Create a state where account 0x010000... has a few storage entries.
var (
db = ethdb.NewMemDatabase()

View File

@ -147,7 +147,13 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
}
}
chainConfig, genesisHash, _, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideIstanbul, config.OverrideMuirGlacier)
chainConfig, genesisHash, _, genesisErr := core.SetupGenesisBlockWithOverride(
chainDb,
config.Genesis,
config.OverrideIstanbul,
config.OverrideMuirGlacier,
config.StorageMode.History,
)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr
}

View File

@ -1286,7 +1286,7 @@ func (pm *ProtocolManager) handleDebugMsg(p *debugPeer) error {
if err != nil {
return err
}
chainConfig, _, _, err := core.SetupGenesisBlock(ethDb, genesis)
chainConfig, _, _, err := core.SetupGenesisBlock(ethDb, genesis, true /* history */)
if err != nil {
return fmt.Errorf("SetupGenesisBlock: %w", err)
}

View File

@ -29,6 +29,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/debug"
"github.com/ledgerwatch/turbo-geth/consensus/ethash"
"github.com/ledgerwatch/turbo-geth/core"
"github.com/ledgerwatch/turbo-geth/core/types"
@ -647,6 +648,9 @@ func setUpDummyAccountsForFirehose(t *testing.T) (*ProtocolManager, *testFirehos
}
func TestFirehoseStateRanges(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
pm, peer := setUpDummyAccountsForFirehose(t)
defer peer.close()
@ -705,6 +709,9 @@ func TestFirehoseStateRanges(t *testing.T) {
}
func TestFirehoseTooManyLeaves(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
signer := types.HomesteadSigner{}
amount := big.NewInt(10)
generator := func(i int, block *core.BlockGen) {
@ -956,6 +963,9 @@ func setUpStorageContractB(t *testing.T) (*ProtocolManager, common.Address) {
}
func TestFirehoseStorageRanges(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
pm, addr := setUpStorageContractA(t)
peer, _ := newFirehoseTestPeer("peer", pm)
defer peer.close()
@ -1008,6 +1018,9 @@ func TestFirehoseStorageRanges(t *testing.T) {
// TestFirehoseStorageNodesA tests a trie with a branch node at the root and 2 leaf nodes.
func TestFirehoseStorageNodesA(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
pm, addr := setUpStorageContractA(t)
peer, _ := newFirehoseTestPeer("peer", pm)
defer peer.close()
@ -1045,6 +1058,9 @@ func TestFirehoseStorageNodesA(t *testing.T) {
// TestFirehoseStorageNodesB tests a trie with an extension node at the root,
// 1 intermediate branch node, and 2 leaf nodes.
func TestFirehoseStorageNodesB(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
pm, addr := setUpStorageContractB(t)
peer, _ := newFirehoseTestPeer("peer", pm)
defer peer.close()
@ -1118,6 +1134,9 @@ func TestFirehoseStorageNodesB(t *testing.T) {
}
func TestFirehoseStateNodes(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
pm, peer := setUpDummyAccountsForFirehose(t)
defer peer.close()
@ -1215,6 +1234,9 @@ func TestFirehoseStateNodes(t *testing.T) {
}
func TestFirehoseBytecode(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
// Define two accounts to simulate transactions with
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")

View File

@ -18,14 +18,12 @@ package ethdb
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"runtime"
"time"
"github.com/dgraph-io/badger/v2"
"github.com/ledgerwatch/turbo-geth/common/changeset"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/log"
)
@ -167,76 +165,6 @@ func (db *BadgerDatabase) Get(bucket, key []byte) ([]byte, error) {
return val, err
}
// PutS adds a new entry to the historical buckets:
// hBucket (unless changeSetBucketOnly) and AccountChangeSet.
func (db *BadgerDatabase) PutS(hBucket, key, value []byte, timestamp uint64, changeSetBucketOnly bool) error {
composite, encodedTS := dbutils.CompositeKeySuffix(key, timestamp)
hKey := bucketKey(hBucket, composite)
changeSetKey := bucketKey(dbutils.ChangeSetByIndexBucket(hBucket), encodedTS)
return db.db.Update(func(tx *badger.Txn) error {
if !changeSetBucketOnly {
if err := tx.Set(hKey, value); err != nil {
return err
}
}
var sh changeset.ChangeSet
err := sh.Add(key, value)
if err != nil {
return err
}
dat, err := changeset.EncodeChangeSet(&sh)
if err != nil {
fmt.Println(err)
log.Error("PutS Decode suffix err", "err", err)
return err
}
// sort.Sort(changeSetKey)
return tx.Set(changeSetKey, dat)
})
}
// DeleteTimestamp removes data for a given timestamp from all historical buckets (incl. AccountChangeSet).
func (db *BadgerDatabase) DeleteTimestamp(timestamp uint64) error {
encodedTS := dbutils.EncodeTimestamp(timestamp)
accountChangeSetKey := bucketKey(dbutils.AccountChangeSetBucket, encodedTS)
storageChangeSetKey := bucketKey(dbutils.StorageChangeSetBucket, encodedTS)
return db.db.Update(func(tx *badger.Txn) error {
f := func(changeSetKey []byte, hBucket []byte) error {
item, err := tx.Get(changeSetKey)
if err != nil {
return err
}
var changes []byte
err = item.Value(func(v []byte) error {
changes = v
return nil
})
if err != nil {
return err
}
err = changeset.Walk(changes, func(kk, _ []byte) error {
kk = append(kk, encodedTS...)
return tx.Delete(bucketKey(hBucket, kk))
})
if err != nil {
return err
}
return tx.Delete(item.Key())
}
err := f(accountChangeSetKey, dbutils.AccountsHistoryBucket)
if err != nil {
return err
}
return f(storageChangeSetKey, dbutils.StorageHistoryBucket)
})
}
// GetAsOf returns the value valid as of a given timestamp.
func (db *BadgerDatabase) GetAsOf(bucket, hBucket, key []byte, timestamp uint64) ([]byte, error) {
composite, _ := dbutils.CompositeKeySuffix(key, timestamp)
@ -419,8 +347,6 @@ func (db *BadgerDatabase) NewBatch() DbWithPendingMutations {
m := &mutation{
db: db,
puts: newPuts(),
accountChangeSetByBlock: make(map[uint64]*changeset.ChangeSet),
storageChangeSetByBlock: make(map[uint64]*changeset.ChangeSet),
}
return m
}

View File

@ -103,49 +103,6 @@ func (db *BoltDatabase) Put(bucket, key []byte, value []byte) error {
return err
}
// PutS adds a new entry to the historical buckets:
// hBucket (unless changeSetBucketOnly) and AccountChangeSet.
func (db *BoltDatabase) PutS(hBucket, key, value []byte, timestamp uint64, changeSetBucketOnly bool) error {
composite, encodedTS := dbutils.CompositeKeySuffix(key, timestamp)
changeSetKey := encodedTS
err := db.db.Update(func(tx *bolt.Tx) error {
if !changeSetBucketOnly {
hb, err := tx.CreateBucketIfNotExists(hBucket, false)
if err != nil {
return err
}
if debug.IsThinHistory() {
b, _ := hb.Get(key)
index := dbutils.WrapHistoryIndex(b)
index.Append(timestamp)
if err = hb.Put(key, *index); err != nil {
return err
}
} else {
if err = hb.Put(composite, value); err != nil {
return err
}
}
}
sb, err := tx.CreateBucketIfNotExists(dbutils.ChangeSetByIndexBucket(hBucket), true)
if err != nil {
return err
}
dat, _ := sb.Get(changeSetKey)
dat, err = addToChangeSet(hBucket, dat, key, value)
if err != nil {
log.Error("PutS DecodeChangeSet changeSet err", "err", err)
return err
}
// s.Sort(dat) not sorting it here. seems that this Puts is only for testing.
return sb.Put(changeSetKey, dat)
})
return err
}
func (db *BoltDatabase) MultiPut(tuples ...[]byte) (uint64, error) {
var savedTx *bolt.Tx
err := db.db.Update(func(tx *bolt.Tx) error {
@ -378,9 +335,13 @@ func (db *BoltDatabase) MultiWalk(bucket []byte, startkeys [][]byte, fixedbits [
return err
}
func (db *BoltDatabase) walkAsOfThin(bucket, hBucket, startkey []byte, fixedbits uint, timestamp uint64, walker func(k []byte, v []byte) (bool, error)) error {
panic("")
}
func (db *BoltDatabase) WalkAsOf(bucket, hBucket, startkey []byte, fixedbits uint, timestamp uint64, walker func(k []byte, v []byte) (bool, error)) error {
if debug.IsThinHistory() {
panic("WalkAsOf")
return db.walkAsOfThin(bucket, hBucket, startkey, fixedbits, timestamp, walker)
}
fixedbytes, mask := Bytesmask(fixedbits)
@ -613,57 +574,6 @@ func (db *BoltDatabase) Delete(bucket, key []byte) error {
return err
}
// DeleteTimestamp removes data for a given timestamp (block number)
// from all historical buckets (incl. AccountChangeSet, StorageChangeSet).
func (db *BoltDatabase) DeleteTimestamp(timestamp uint64) error {
encodedTS := dbutils.EncodeTimestamp(timestamp)
return db.db.Update(func(tx *bolt.Tx) error {
removeChangeSetAndHistory := func(changeSetBucket []byte, historyBucket []byte) error {
sb := tx.Bucket(changeSetBucket)
if sb == nil {
return nil
}
v, _ := sb.Get(encodedTS)
if len(v) == 0 {
return ErrKeyNotFound
}
hb := tx.Bucket(historyBucket)
if hb == nil {
return nil
}
var err error
if debug.IsThinHistory() {
if bytes.Equal(changeSetBucket, dbutils.AccountChangeSetBucket) {
err = changeset.AccountChangeSetBytes(v).Walk(func(kk, _ []byte) error {
kk = append(kk, encodedTS...)
return hb.Delete(kk)
})
} else {
err = changeset.StorageChangeSetBytes(v).Walk(func(kk, _ []byte) error {
kk = append(kk, encodedTS...)
return hb.Delete(kk)
})
}
} else {
err = changeset.Walk(v, func(kk, _ []byte) error {
kk = append(kk, encodedTS...)
return hb.Delete(kk)
})
}
if err != nil {
return err
}
return sb.Delete(encodedTS)
}
innerErr := removeChangeSetAndHistory(dbutils.AccountChangeSetBucket, dbutils.AccountsHistoryBucket)
if innerErr != nil {
return innerErr
}
return removeChangeSetAndHistory(dbutils.StorageChangeSetBucket, dbutils.StorageHistoryBucket)
})
}
func (db *BoltDatabase) DeleteBucket(bucket []byte) error {
err := db.db.Update(func(tx *bolt.Tx) error {
if err := tx.DeleteBucket(bucket); err != nil {
@ -714,8 +624,6 @@ func (db *BoltDatabase) NewBatch() DbWithPendingMutations {
m := &mutation{
db: db,
puts: newPuts(),
accountChangeSetByBlock: make(map[uint64]*changeset.ChangeSet),
storageChangeSetByBlock: make(map[uint64]*changeset.ChangeSet),
}
return m
}

View File

@ -1,339 +1 @@
package ethdb
import (
"fmt"
"github.com/ledgerwatch/turbo-geth/common/changeset"
"reflect"
"strconv"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/common/debug"
)
func TestBoltDB_WalkAsOf1(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
db := NewMemDatabase()
block2Expected := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
block4Expected := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
block6Expected := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
//create state and history
for i := uint8(1); i < 5; i++ {
key := dbutils.GenerateCompositeStorageKey(common.Hash{i}, uint64(1), common.Hash{i})
val := []byte("state " + strconv.Itoa(int(i)))
err := db.Put(dbutils.StorageBucket, key, val)
if err != nil {
t.Fatal(err)
}
err = block6Expected.Add(key, val)
if err != nil {
t.Fatal(err)
}
if i <= 2 {
err = block4Expected.Add(key, val)
if err != nil {
t.Fatal(err)
}
}
}
for i := uint8(1); i <= 7; i++ {
key := dbutils.GenerateCompositeStorageKey(common.Hash{i}, 1, common.Hash{i})
val := []byte("block 3 " + strconv.Itoa(int(i)))
err := db.PutS(
dbutils.StorageHistoryBucket,
key,
val,
3,
false,
)
if err != nil {
t.Fatal(err)
}
err = block2Expected.Add(key, val)
if err != nil {
t.Fatal(err)
}
}
for i := uint8(3); i <= 7; i++ {
key := dbutils.GenerateCompositeStorageKey(common.Hash{i}, 1, common.Hash{i})
val := []byte("block 5 " + strconv.Itoa(int(i)))
err := db.PutS(
dbutils.StorageHistoryBucket,
key,
val,
5,
false,
)
if err != nil {
t.Fatal(err)
}
err = block4Expected.Add(key, val)
if err != nil {
t.Fatal(err)
}
}
block2 := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
block4 := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
block6 := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
//walk and collect walkAsOf result
var err error
var startKey [72]byte
err = db.WalkAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, startKey[:], 0, 2, func(k []byte, v []byte) (b bool, e error) {
err = block2.Add(k, v)
if err != nil {
t.Fatal(err)
}
//fmt.Printf("%v - %v \n", common.BytesToHash(k).String(), string(v))
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = db.WalkAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, startKey[:], 0, 4, func(k []byte, v []byte) (b bool, e error) {
err = block4.Add(k, v)
if err != nil {
t.Fatal(err)
}
//fmt.Printf("%v - %v \n", common.BytesToHash(k).String(), string(v))
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = db.WalkAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, startKey[:], 0, 6, func(k []byte, v []byte) (b bool, e error) {
err = block6.Add(k, v)
if err != nil {
t.Fatal(err)
}
//fmt.Printf("%v - %v \n", common.BytesToHash(k).String(), string(v))
return true, nil
})
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(block2, block2Expected) {
spew.Dump("expected", block2Expected)
spew.Dump("current", block2)
t.Fatal("block 2 result is incorrect")
}
if !reflect.DeepEqual(block4, block4Expected) {
spew.Dump(block6)
t.Fatal("block 4 result is incorrect")
}
if !reflect.DeepEqual(block6, block6Expected) {
spew.Dump(block6)
t.Fatal("block 6 result is incorrect")
}
}
func TestBoltDB_MultiWalkAsOf(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
db := NewMemDatabase()
block2Expected := &changeset.ChangeSet{
Changes: []changeset.Change{
{
Key: dbutils.GenerateCompositeStorageKey(common.Hash{1}, 1, common.Hash{1}),
Value: []byte("block 3 " + strconv.Itoa(1)),
},
{
Key: dbutils.GenerateCompositeStorageKey(common.Hash{3}, 1, common.Hash{3}),
Value: []byte("block 3 " + strconv.Itoa(3)),
},
{
Key: dbutils.GenerateCompositeStorageKey(common.Hash{7}, 1, common.Hash{7}),
Value: []byte("block 3 " + strconv.Itoa(7)),
},
},
}
block4Expected := &changeset.ChangeSet{
Changes: []changeset.Change{
{
Key: dbutils.GenerateCompositeStorageKey(common.Hash{1}, 1, common.Hash{1}),
Value: []byte("state " + strconv.Itoa(1)),
},
{
Key: dbutils.GenerateCompositeStorageKey(common.Hash{3}, 1, common.Hash{3}),
Value: []byte("block 5 " + strconv.Itoa(3)),
},
{
Key: dbutils.GenerateCompositeStorageKey(common.Hash{7}, 1, common.Hash{7}),
Value: []byte("block 5 " + strconv.Itoa(7)),
},
},
}
block6Expected := &changeset.ChangeSet{
Changes: []changeset.Change{
{
Key: dbutils.GenerateCompositeStorageKey(common.Hash{1}, 1, common.Hash{1}),
Value: []byte("state " + strconv.Itoa(1)),
},
{
Key: dbutils.GenerateCompositeStorageKey(common.Hash{3}, 1, common.Hash{3}),
Value: []byte("state " + strconv.Itoa(3)),
},
},
}
//create state and history
for i := uint8(1); i < 5; i++ {
key := dbutils.GenerateCompositeStorageKey(common.Hash{i}, uint64(1), common.Hash{i})
val := []byte("state " + strconv.Itoa(int(i)))
err := db.Put(dbutils.StorageBucket, key, val)
if err != nil {
t.Fatal(err)
}
}
for i := uint8(1); i <= 7; i++ {
key := dbutils.GenerateCompositeStorageKey(common.Hash{i}, 1, common.Hash{i})
val := []byte("block 3 " + strconv.Itoa(int(i)))
err := db.PutS(
dbutils.StorageHistoryBucket,
key,
val,
3,
false,
)
if err != nil {
t.Fatal(err)
}
}
for i := uint8(3); i <= 7; i++ {
key := dbutils.GenerateCompositeStorageKey(common.Hash{i}, 1, common.Hash{i})
val := []byte("block 5 " + strconv.Itoa(int(i)))
err := db.PutS(
dbutils.StorageHistoryBucket,
key,
val,
5,
false,
)
if err != nil {
t.Fatal(err)
}
}
//walk and collect walkAsOf result
var err error
startKeys := [][]byte{
dbutils.GenerateCompositeStorageKey(common.Hash{1}, 1, common.Hash{1}),
dbutils.GenerateCompositeStorageKey(common.Hash{3}, 1, common.Hash{3}),
dbutils.GenerateCompositeStorageKey(common.Hash{7}, 1, common.Hash{7}),
}
fixedBits := []uint{
60,
60,
60,
}
if err != nil {
t.Fatal(err)
}
if err != nil {
t.Fatal(err)
}
if err != nil {
t.Fatal(err)
}
block2 := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
block4 := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
block6 := &changeset.ChangeSet{
Changes: make([]changeset.Change, 0),
}
err = db.MultiWalkAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, startKeys, fixedBits, 2, func(idx int, k []byte, v []byte) error {
fmt.Printf("%v - %s - %s\n", idx, string(k), string(v))
err = block2.Add(k, v)
if err != nil {
t.Fatal(err)
}
return nil
})
if err != nil {
t.Fatal(err)
}
err = db.MultiWalkAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, startKeys, fixedBits, 4, func(idx int, k []byte, v []byte) error {
fmt.Printf("%v - %s - %s\n", idx, string(k), string(v))
err = block4.Add(k, v)
if err != nil {
t.Fatal(err)
}
return nil
})
if err != nil {
t.Fatal(err)
}
err = db.MultiWalkAsOf(dbutils.StorageBucket, dbutils.StorageHistoryBucket, startKeys, fixedBits, 6, func(idx int, k []byte, v []byte) error {
fmt.Printf("%v - %s - %s\n", idx, string(k), string(v))
err = block6.Add(k, v)
if err != nil {
t.Fatal(err)
}
return nil
})
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(block2Expected, block2) {
spew.Dump(block2)
t.Fatal("block2")
}
if !reflect.DeepEqual(block4Expected, block4) {
spew.Dump(block4)
t.Fatal("block4")
}
if !reflect.DeepEqual(block6Expected, block6) {
spew.Dump(block6)
t.Fatal("block6")
}
}

View File

@ -1,9 +1,6 @@
package ethdb
import (
"bytes"
"github.com/ledgerwatch/turbo-geth/common/changeset"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
)
// Maximum length (in bytes of encoded timestamp)
@ -81,33 +78,3 @@ func decode7to8(b []byte) []byte {
}
return out
}
// addToChangeSet is not part the AccountChangeSet API, and it is only used in the test settings.
// In the production settings, ChangeSets encodings are never modified.
// In production settings (mutation.PutS) we always first populate AccountChangeSet object,
// then encode it once, and then only work with the encoding
func addToChangeSet(hb, b []byte, key []byte, value []byte) ([]byte, error) {
var (
cs *changeset.ChangeSet
err error
)
if bytes.Equal(hb, dbutils.AccountsHistoryBucket) {
cs, err = changeset.DecodeAccounts(b)
} else {
cs, err = changeset.DecodeStorage(b)
}
if err != nil {
return nil, err
}
err = cs.Add(key, value)
if err != nil {
return nil, err
}
if bytes.Equal(hb, dbutils.AccountsHistoryBucket) {
return changeset.EncodeAccounts(cs)
} else {
return changeset.EncodeStorage(cs)
}
}

View File

@ -31,11 +31,6 @@ var ErrKeyNotFound = errors.New("db: key not found")
type Putter interface {
// Put inserts or updates a single entry.
Put(bucket, key, value []byte) error
// PutS adds a new entry to the historical buckets:
// hBucket (unless changeSetBucketOnly) and ChangeSet.
// timestamp == block number
PutS(hBucket, key, value []byte, timestamp uint64, changeSetBucketOnly bool) error
}
// Getter wraps the database read operations.
@ -68,10 +63,6 @@ type Getter interface {
type Deleter interface {
// Delete removes a single entry.
Delete(bucket, key []byte) error
// DeleteTimestamp removes data for a given timestamp from all historical buckets (incl. ChangeSet).
// timestamp == block number
DeleteTimestamp(timestamp uint64) error
}
// Database wraps all database operations. All methods are safe for concurrent use.

View File

@ -1,7 +1,6 @@
package ethdb
import (
"bytes"
"fmt"
"sort"
"sync"
@ -9,9 +8,6 @@ import (
"github.com/ledgerwatch/bolt"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/changeset"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/common/debug"
)
type puts map[string]putsBucket //map[bucket]putsBucket
@ -20,7 +16,7 @@ func newPuts() puts {
return make(puts)
}
func (p puts) Set(bucket, key, value []byte) {
func (p puts) set(bucket, key, value []byte) {
var bucketPuts putsBucket
var ok bool
if bucketPuts, ok = p[string(bucket)]; !ok {
@ -31,21 +27,7 @@ func (p puts) Set(bucket, key, value []byte) {
}
func (p puts) Delete(bucket, key []byte) {
p.Set(bucket, key, nil)
}
func (p puts) SetStr(bucket string, key, value []byte) {
var bucketPuts putsBucket
var ok bool
if bucketPuts, ok = p[bucket]; !ok {
bucketPuts = make(putsBucket)
p[bucket] = bucketPuts
}
bucketPuts[string(key)] = value
}
func (p puts) DeleteStr(bucket string, key []byte) {
p.SetStr(bucket, key, nil)
p.set(bucket, key, nil)
}
func (p puts) Size() int {
@ -86,9 +68,6 @@ func (pb putsBucket) GetStr(key string) ([]byte, bool) {
type mutation struct {
puts puts // Map buckets to map[key]value
//map[blockNumber]listOfChangedKeys
accountChangeSetByBlock map[uint64]*changeset.ChangeSet
storageChangeSetByBlock map[uint64]*changeset.ChangeSet
mu sync.RWMutex
db Database
}
@ -125,31 +104,6 @@ func (m *mutation) Get(bucket, key []byte) ([]byte, error) {
return nil, ErrKeyNotFound
}
func (m *mutation) getChangeSetByBlockNoLock(bucket []byte, timestamp uint64) *changeset.ChangeSet {
switch {
case bytes.Equal(bucket, dbutils.AccountsHistoryBucket):
if _, ok := m.accountChangeSetByBlock[timestamp]; !ok {
if debug.IsThinHistory() {
m.accountChangeSetByBlock[timestamp] = changeset.NewAccountChangeSet()
} else {
m.accountChangeSetByBlock[timestamp] = changeset.NewChangeSet()
}
}
return m.accountChangeSetByBlock[timestamp]
case bytes.Equal(bucket, dbutils.StorageHistoryBucket):
if _, ok := m.storageChangeSetByBlock[timestamp]; !ok {
if debug.IsThinHistory() {
m.storageChangeSetByBlock[timestamp] = changeset.NewStorageChangeSet()
} else {
m.storageChangeSetByBlock[timestamp] = changeset.NewChangeSet()
}
}
return m.storageChangeSetByBlock[timestamp]
default:
panic("incorrect bucket")
}
}
func (m *mutation) getNoLock(bucket, key []byte) ([]byte, error) {
if t, ok := m.puts[string(bucket)]; ok {
if value, ok := t.Get(key); ok {
@ -193,31 +147,7 @@ func (m *mutation) Put(bucket, key []byte, value []byte) error {
m.mu.Lock()
defer m.mu.Unlock()
m.puts.Set(bucket, key, value)
return nil
}
// Assumes that bucket, key, and value won't be modified
func (m *mutation) PutS(hBucket, key, value []byte, timestamp uint64, noHistory bool) error {
//fmt.Printf("PutS bucket %x key %x value %x timestamp %d\n", bucket, key, value, timestamp)
m.mu.Lock()
defer m.mu.Unlock()
changeSet := m.getChangeSetByBlockNoLock(hBucket, timestamp)
err := changeSet.Add(key, value)
if err != nil {
return err
}
if noHistory {
return nil
}
if !debug.IsThinHistory() {
composite, _ := dbutils.CompositeKeySuffix(key, timestamp)
m.puts.Set(hBucket, composite, value)
}
m.puts.set(bucket, key, value)
return nil
}
@ -226,7 +156,7 @@ func (m *mutation) MultiPut(tuples ...[]byte) (uint64, error) {
defer m.mu.Unlock()
l := len(tuples)
for i := 0; i < l; i += 3 {
m.puts.Set(tuples[i], tuples[i+1], tuples[i+2])
m.puts.set(tuples[i], tuples[i+1], tuples[i+2])
}
return 0, nil
}
@ -280,98 +210,6 @@ func (m *mutation) Delete(bucket, key []byte) error {
return nil
}
// Deletes all keys with specified suffix(blockNum) from all the buckets
func (m *mutation) DeleteTimestamp(timestamp uint64) error {
changeSetKey := dbutils.EncodeTimestamp(timestamp)
changedAccounts, err := m.Get(dbutils.AccountChangeSetBucket, changeSetKey)
if err != nil && err != ErrKeyNotFound {
return err
}
changedStorage, err := m.Get(dbutils.StorageChangeSetBucket, changeSetKey)
if err != nil && err != ErrKeyNotFound {
return err
}
m.mu.Lock()
defer m.mu.Unlock()
if debug.IsThinHistory() {
if len(changedAccounts) > 0 {
innerErr := changeset.AccountChangeSetBytes(changedAccounts).Walk(func(kk, _ []byte) error {
indexBytes, getErr := m.getNoLock(dbutils.AccountsHistoryBucket, kk)
if getErr != nil {
return nil
}
index := dbutils.WrapHistoryIndex(indexBytes)
index.Remove(timestamp)
if index.Len() == 0 {
m.puts.DeleteStr(string(dbutils.AccountsHistoryBucket), kk)
} else {
m.puts.SetStr(string(dbutils.AccountsHistoryBucket), kk, *index)
}
return nil
})
if innerErr != nil {
return innerErr
}
m.puts.DeleteStr(string(dbutils.AccountChangeSetBucket), changeSetKey)
}
if len(changedStorage) > 0 {
innerErr := changeset.StorageChangeSetBytes(changedStorage).Walk(func(kk, _ []byte) error {
indexBytes, getErr := m.getNoLock(dbutils.StorageHistoryBucket, kk)
if getErr != nil {
return nil
}
index := dbutils.WrapHistoryIndex(indexBytes)
index.Remove(timestamp)
if index.Len() == 0 {
m.puts.DeleteStr(string(dbutils.StorageHistoryBucket), kk)
} else {
m.puts.SetStr(string(dbutils.StorageHistoryBucket), kk, *index)
}
return nil
})
if innerErr != nil {
return innerErr
}
m.puts.DeleteStr(string(dbutils.StorageChangeSetBucket), changeSetKey)
}
} else {
if len(changedAccounts) > 0 {
innerErr := changeset.Walk(changedAccounts, func(kk, _ []byte) error {
composite, _ := dbutils.CompositeKeySuffix(kk, timestamp)
m.puts.DeleteStr(string(dbutils.AccountsHistoryBucket), composite)
return nil
})
if innerErr != nil {
return innerErr
}
m.puts.DeleteStr(string(dbutils.AccountChangeSetBucket), changeSetKey)
}
if len(changedStorage) > 0 {
innerErr := changeset.Walk(changedStorage, func(kk, _ []byte) error {
composite, _ := dbutils.CompositeKeySuffix(kk, timestamp)
m.puts.DeleteStr(string(dbutils.StorageHistoryBucket), composite)
return nil
})
if innerErr != nil {
return innerErr
}
m.puts.DeleteStr(string(dbutils.StorageChangeSetBucket), changeSetKey)
}
}
return nil
}
func (m *mutation) Commit() (uint64, error) {
if m.db == nil {
return 0, nil
@ -379,92 +217,6 @@ func (m *mutation) Commit() (uint64, error) {
m.mu.Lock()
defer m.mu.Unlock()
// we need sorted timestamps for thin history index
accountTimestamps := make([]uint64, 0)
for ts := range m.accountChangeSetByBlock {
accountTimestamps = append(accountTimestamps, ts)
}
sort.Slice(accountTimestamps, func(i, j int) bool { return accountTimestamps[i] < accountTimestamps[j] })
for _, timestamp := range accountTimestamps {
changes := m.accountChangeSetByBlock[timestamp]
sort.Sort(changes)
if debug.IsThinHistory() {
changedKeys := changes.ChangedKeys()
for k := range changedKeys {
key := []byte(k)
value, err := m.getNoLock(dbutils.AccountsHistoryBucket, key)
if err != nil && err != ErrKeyNotFound {
return 0, fmt.Errorf("db.Get failed: %w", err)
}
index := dbutils.WrapHistoryIndex(value)
index.Append(timestamp)
m.puts.Set(dbutils.AccountsHistoryBucket, key, *index)
}
}
var (
dat []byte
err error
)
if debug.IsThinHistory() {
dat, err = changeset.EncodeAccounts(changes)
} else {
dat, err = changeset.EncodeChangeSet(changes)
}
if err != nil {
return 0, err
}
m.puts.Set(dbutils.AccountChangeSetBucket, dbutils.EncodeTimestamp(timestamp), dat)
}
storageTimestamps := make([]uint64, 0)
for ts := range m.storageChangeSetByBlock {
storageTimestamps = append(storageTimestamps, ts)
}
sort.Slice(storageTimestamps, func(i, j int) bool { return storageTimestamps[i] < storageTimestamps[j] })
for _, timestamp := range storageTimestamps {
changes := m.storageChangeSetByBlock[timestamp]
sort.Sort(changes)
var (
dat []byte
err error
)
if debug.IsThinHistory() {
changedKeys := changes.ChangedKeys()
for k := range changedKeys {
key := []byte(k)
value, innerErr := m.getNoLock(dbutils.StorageHistoryBucket, key)
if innerErr != nil && innerErr != ErrKeyNotFound {
return 0, fmt.Errorf("db.Get failed: %w", innerErr)
}
index := dbutils.WrapHistoryIndex(value)
index.Append(timestamp)
m.puts.Set(dbutils.StorageHistoryBucket, key, *index)
}
dat, err = changeset.EncodeStorage(changes)
if err != nil {
return 0, err
}
} else {
dat, err = changeset.EncodeChangeSet(changes)
if err != nil {
return 0, err
}
}
m.puts.Set(dbutils.StorageChangeSetBucket, dbutils.EncodeTimestamp(timestamp), dat)
}
m.accountChangeSetByBlock = make(map[uint64]*changeset.ChangeSet)
m.storageChangeSetByBlock = make(map[uint64]*changeset.ChangeSet)
tuples := common.NewTuples(m.puts.Size(), 3, 1)
for bucketStr, bt := range m.puts {
bucketB := []byte(bucketStr)
@ -488,8 +240,6 @@ func (m *mutation) Commit() (uint64, error) {
func (m *mutation) Rollback() {
m.mu.Lock()
defer m.mu.Unlock()
m.accountChangeSetByBlock = make(map[uint64]*changeset.ChangeSet)
m.storageChangeSetByBlock = make(map[uint64]*changeset.ChangeSet)
m.puts = make(puts)
}
@ -517,8 +267,6 @@ func (m *mutation) NewBatch() DbWithPendingMutations {
mm := &mutation{
db: m,
puts: newPuts(),
accountChangeSetByBlock: make(map[uint64]*changeset.ChangeSet),
storageChangeSetByBlock: make(map[uint64]*changeset.ChangeSet),
}
return mm
}
@ -563,7 +311,6 @@ type RWCounterDecorator struct {
type DBCounterStats struct {
Put uint64
PutS uint64
Get uint64
GetS uint64
GetAsOf uint64
@ -573,7 +320,6 @@ type DBCounterStats struct {
MultiWalk uint64
MultiWalkAsOf uint64
Delete uint64
DeleteTimestamp uint64
MultiPut uint64
}
@ -582,10 +328,6 @@ func (d *RWCounterDecorator) Put(bucket, key, value []byte) error {
return d.Database.Put(bucket, key, value)
}
func (d *RWCounterDecorator) PutS(hBucket, key, value []byte, timestamp uint64, changeSetBucketOnly bool) error {
atomic.AddUint64(&d.DBCounterStats.PutS, 1)
return d.Database.PutS(hBucket, key, value, timestamp, changeSetBucketOnly)
}
func (d *RWCounterDecorator) Get(bucket, key []byte) ([]byte, error) {
atomic.AddUint64(&d.DBCounterStats.Get, 1)
return d.Database.Get(bucket, key)
@ -619,10 +361,6 @@ func (d *RWCounterDecorator) Delete(bucket, key []byte) error {
atomic.AddUint64(&d.DBCounterStats.Delete, 1)
return d.Database.Delete(bucket, key)
}
func (d *RWCounterDecorator) DeleteTimestamp(timestamp uint64) error {
atomic.AddUint64(&d.DBCounterStats.DeleteTimestamp, 1)
return d.Database.DeleteTimestamp(timestamp)
}
func (d *RWCounterDecorator) MultiPut(tuples ...[]byte) (uint64, error) {
atomic.AddUint64(&d.DBCounterStats.MultiPut, 1)
return d.Database.MultiPut(tuples...)
@ -634,8 +372,6 @@ func (d *RWCounterDecorator) NewBatch() DbWithPendingMutations {
mm := &mutation{
db: d,
puts: newPuts(),
accountChangeSetByBlock: make(map[uint64]*changeset.ChangeSet),
storageChangeSetByBlock: make(map[uint64]*changeset.ChangeSet),
}
return mm
}

View File

@ -103,7 +103,7 @@ func (t *BlockTest) Run() error {
// import pre accounts & construct test genesis block & state root
db := ethdb.NewMemDatabase()
gblock, _, err := t.genesis(config).Commit(db)
gblock, _, err := t.genesis(config).Commit(db, false /* history */)
if err != nil {
return err
}

View File

@ -14,6 +14,7 @@ import (
"github.com/ledgerwatch/turbo-geth/accounts/abi/bind/backends"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/common/debug"
"github.com/ledgerwatch/turbo-geth/consensus/ethash"
"github.com/ledgerwatch/turbo-geth/core"
"github.com/ledgerwatch/turbo-geth/core/types"
@ -27,6 +28,9 @@ import (
// It generates several blocks with money transfer, checks that it's correct
// than prune two times with database state and history checks
func TestBasisAccountPruning(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
// Configure and generate a sample block chain
var (
db = ethdb.NewMemDatabase()
@ -181,6 +185,9 @@ func TestBasisAccountPruning(t *testing.T) {
// It generates several blocks with money transfer, with noHistory flag enabled, checks that history not saved, but changeset exesists for every block
// than prune two times with database state and history checks
func TestBasisAccountPruningNoHistory(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
// Configure and generate a sample block chain
var (
db = ethdb.NewMemDatabase()
@ -336,6 +343,9 @@ func TestBasisAccountPruningNoHistory(t *testing.T) {
// It deploys simple contract and makes several state changes, checks that state and history is correct,
// than prune to numBlock-1 with database state and history checks
func TestStoragePruning(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
// Configure and generate a sample block chain
var (
db = ethdb.NewMemDatabase()
@ -530,6 +540,9 @@ func TestStoragePruning(t *testing.T) {
//Simple E2E test that starts pruning an inserts blocks
func TestBasisAccountPruningStrategy(t *testing.T) {
if debug.IsThinHistory() {
t.Skip()
}
// Configure and generate a sample block chain
var (
db = ethdb.NewMemDatabase()

View File

@ -150,7 +150,7 @@ func (t *StateTest) Run(ctx context.Context, subtest StateSubtest, vmconfig vm.C
if !ok {
return nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
block, _, _, _ := t.genesis(config).ToBlock(nil)
block, _, _, _ := t.genesis(config).ToBlock(nil, false /* history */)
readBlockNr := block.Number().Uint64()
writeBlockNr := readBlockNr + 1
ctx = config.WithEIPsFlags(ctx, big.NewInt(int64(writeBlockNr)))