erigon-pulse/core/state/db_state_writer.go
ledgerwatch 5ea590c18e
State cache switching writes to reads during commit (#1368)
* State cache init

* More code

* Fix lint

* More tests

* More tests

* More tests

* Fix test

* Transformations

* remove writeQueue, before fixing the tests

* Fix tests

* Add more tests, incarnation to the code items

* Fix lint

* Fix lint

* Remove shards prototype, add incarnation to the state reader code

* Clean up and replace cache in call_traces stage

* fix flaky test

* Save changes

* Readers to use addrHash, writes - addresses

* Fix lint

* Fix lint

* More accurate tracking of size

* Optimise for smaller write batches

* Attempt to integrate state cache into Execution stage

* cacheSize to default flags

* Print correct cache sizes and batch sizes

* cacheSize in the integration

* Fix tests

* Fix lint

* Remove print

* Fix exec stage

* Fix test

* Refresh sequence on write

* No double increment

* heap.Remove

* Try to fix alignment

* Refactoring, adding hashItems

* More changes

* Fix compile errors

* Fix lint

* Wrapping cached reader

* Wrap writer into cached writer

* Turn state cache off by default

* Fix plain state writer

* Fix for code/storage mixup

* Fix tests

* Fix clique test

* Better fix for the tests

* Add test and fix some more

* Fix compile error|

* More functions

* Fixes

* Fix for the tests

* sepatate DeletedFlag and AbsentFlag

* Minor fixes

* Test refactoring

* More changes

* Fix some tests

* More test fixes

* More test fixes

* Fix lint

* Move blockchain_test to be able to use stagedsync

* More fixes

* Fixes and cleanup

* Fix tests in turbo/stages

* Fix lint

* Fix lint

* Intemediate

* Fix tests

* Intemediate

* More fixes

* Compilation fixes

* More fixes

* Fix compile errors

* More test fixes

* More fixes

* More test fixes

* Fix compile error

* Fixes

* Fix

* Fix

* More fixes

* Fixes

* More fixes and cleanup

* Further fix

* Check gas used and bloom with header

Co-authored-by: Alexey Sharp <alexeysharp@Alexeys-iMac.local>
2020-12-08 09:44:29 +00:00

208 lines
6.1 KiB
Go

package state
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"github.com/RoaringBitmap/roaring/roaring64"
"github.com/holiman/uint256"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/changeset"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/common/math"
"github.com/ledgerwatch/turbo-geth/core/rawdb"
"github.com/ledgerwatch/turbo-geth/core/types/accounts"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/ethdb/bitmapdb"
"github.com/ledgerwatch/turbo-geth/turbo/trie"
)
// This type is now used in GenerateChain to generate blockchains for the tests (core/chain_makers.go)
// Main mode of operation uses PlainDbStateWriter
var _ WriterWithChangeSets = (*DbStateWriter)(nil)
func NewDbStateWriter(db ethdb.Database, blockNr uint64) *DbStateWriter {
return &DbStateWriter{
db: db,
blockNr: blockNr,
pw: &PreimageWriter{db: db, savePreimages: false},
csw: NewChangeSetWriter(),
}
}
type DbStateWriter struct {
db ethdb.Database
pw *PreimageWriter
blockNr uint64
csw *ChangeSetWriter
}
func (dsw *DbStateWriter) ChangeSetWriter() *ChangeSetWriter {
return dsw.csw
}
func originalAccountData(original *accounts.Account, omitHashes bool) []byte {
var originalData []byte
if !original.Initialised {
originalData = []byte{}
} else if omitHashes {
testAcc := original.SelfCopy()
copy(testAcc.CodeHash[:], emptyCodeHash)
testAcc.Root = trie.EmptyRoot
originalDataLen := testAcc.EncodingLengthForStorage()
originalData = make([]byte, originalDataLen)
testAcc.EncodeForStorage(originalData)
} else {
originalDataLen := original.EncodingLengthForStorage()
originalData = make([]byte, originalDataLen)
original.EncodeForStorage(originalData)
}
return originalData
}
func (dsw *DbStateWriter) UpdateAccountData(ctx context.Context, address common.Address, original, account *accounts.Account) error {
if err := dsw.csw.UpdateAccountData(ctx, address, original, account); err != nil {
return err
}
addrHash, err := dsw.pw.HashAddress(address, true /*save*/)
if err != nil {
return err
}
value := make([]byte, account.EncodingLengthForStorage())
account.EncodeForStorage(value)
if err := dsw.db.Put(dbutils.CurrentStateBucket, addrHash[:], value); err != nil {
return err
}
return nil
}
func (dsw *DbStateWriter) DeleteAccount(ctx context.Context, address common.Address, original *accounts.Account) error {
if err := dsw.csw.DeleteAccount(ctx, address, original); err != nil {
return err
}
addrHash, err := dsw.pw.HashAddress(address, true /*save*/)
if err != nil {
return err
}
if err := rawdb.DeleteAccount(dsw.db, addrHash); err != nil {
return err
}
if original.Incarnation > 0 {
var b [8]byte
binary.BigEndian.PutUint64(b[:], original.Incarnation)
if err := dsw.db.Put(dbutils.IncarnationMapBucket, address[:], b[:]); err != nil {
return err
}
}
return nil
}
func (dsw *DbStateWriter) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error {
if err := dsw.csw.UpdateAccountCode(address, incarnation, codeHash, code); err != nil {
return err
}
//save contract code mapping
if err := dsw.db.Put(dbutils.CodeBucket, codeHash[:], code); err != nil {
return err
}
addrHash, err := common.HashData(address.Bytes())
if err != nil {
return err
}
//save contract to codeHash mapping
if err := dsw.db.Put(dbutils.ContractCodeBucket, dbutils.GenerateStoragePrefix(addrHash[:], incarnation), codeHash[:]); err != nil {
return err
}
return nil
}
func (dsw *DbStateWriter) WriteAccountStorage(ctx context.Context, address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error {
// We delegate here first to let the changeSetWrite make its own decision on whether to proceed in case *original == *value
if err := dsw.csw.WriteAccountStorage(ctx, address, incarnation, key, original, value); err != nil {
return err
}
if *original == *value {
return nil
}
seckey, err := dsw.pw.HashKey(key, true /*save*/)
if err != nil {
return err
}
addrHash, err := dsw.pw.HashAddress(address, false /*save*/)
if err != nil {
return err
}
compositeKey := dbutils.GenerateCompositeStorageKey(addrHash, incarnation, seckey)
v := value.Bytes()
if len(v) == 0 {
return dsw.db.Delete(dbutils.CurrentStateBucket, compositeKey, nil)
}
return dsw.db.Put(dbutils.CurrentStateBucket, compositeKey, v)
}
func (dsw *DbStateWriter) CreateContract(address common.Address) error {
if err := dsw.csw.CreateContract(address); err != nil {
return err
}
if err := dsw.db.Delete(dbutils.IncarnationMapBucket, address[:], nil); err != nil {
return err
}
return nil
}
// WriteChangeSets causes accumulated change sets to be written into
// the database (or batch) associated with the `dsw`
func (dsw *DbStateWriter) WriteChangeSets() error {
return nil
}
func (dsw *DbStateWriter) WriteHistory() error {
accountChanges, err := dsw.csw.GetAccountChanges()
if err != nil {
return err
}
err = writeIndex(dsw.blockNr, accountChanges, dbutils.AccountsHistoryBucket, dsw.db)
if err != nil {
return err
}
storageChanges, err := dsw.csw.GetStorageChanges()
if err != nil {
return err
}
err = writeIndex(dsw.blockNr, storageChanges, dbutils.StorageHistoryBucket, dsw.db)
if err != nil {
return err
}
return nil
}
func writeIndex(blocknum uint64, changes *changeset.ChangeSet, bucket string, changeDb ethdb.GetterPutter) error {
buf := bytes.NewBuffer(nil)
for _, change := range changes.Changes {
k := dbutils.CompositeKeyWithoutIncarnation(change.Key)
index, err := bitmapdb.Get64(changeDb, bucket, k, 0, math.MaxUint32)
if err != nil {
return fmt.Errorf("find chunk failed: %w", err)
}
index.Add(blocknum)
if err = bitmapdb.WalkChunkWithKeys64(k, index, bitmapdb.ChunkLimit, func(chunkKey []byte, chunk *roaring64.Bitmap) error {
buf.Reset()
if _, err = chunk.WriteTo(buf); err != nil {
return err
}
return changeDb.Put(bucket, chunkKey, common.CopyBytes(buf.Bytes()))
}); err != nil {
return err
}
}
return nil
}