erigon-pulse/core/state/db_state_reader.go

153 lines
4.0 KiB
Go
Raw Normal View History

package state
import (
"bytes"
"encoding/binary"
"errors"
"github.com/VictoriaMetrics/fastcache"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/core/types/accounts"
"github.com/ledgerwatch/turbo-geth/ethdb"
)
// Implements StateReader by wrapping database only, without trie
type DbStateReader struct {
db ethdb.Getter
accountCache *fastcache.Cache
storageCache *fastcache.Cache
codeCache *fastcache.Cache
codeSizeCache *fastcache.Cache
}
func NewDbStateReader(db ethdb.Getter) *DbStateReader {
return &DbStateReader{
db: db,
}
}
func (dbr *DbStateReader) SetAccountCache(accountCache *fastcache.Cache) {
dbr.accountCache = accountCache
}
func (dbr *DbStateReader) SetStorageCache(storageCache *fastcache.Cache) {
dbr.storageCache = storageCache
}
func (dbr *DbStateReader) SetCodeCache(codeCache *fastcache.Cache) {
dbr.codeCache = codeCache
}
func (dbr *DbStateReader) SetCodeSizeCache(codeSizeCache *fastcache.Cache) {
dbr.codeSizeCache = codeSizeCache
}
func (dbr *DbStateReader) ReadAccountData(address common.Address) (*accounts.Account, error) {
var enc []byte
var ok bool
if dbr.accountCache != nil {
enc, ok = dbr.accountCache.HasGet(nil, address[:])
}
if !ok {
var err error
if addrHash, err1 := common.HashData(address[:]); err1 == nil {
enc, err = dbr.db.GetOne(dbutils.HashedAccountsBucket, addrHash[:])
} else {
return nil, err1
}
if err != nil {
return nil, err
}
}
if !ok && dbr.accountCache != nil {
dbr.accountCache.Set(address[:], enc)
}
if enc == nil {
return nil, nil
}
acc := &accounts.Account{}
if err := acc.DecodeForStorage(enc); err != nil {
return nil, err
}
return acc, nil
}
func (dbr *DbStateReader) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) {
addrHash, err := common.HashData(address[:])
if err != nil {
return nil, err
}
seckey, err1 := common.HashData(key[:])
if err1 != nil {
return nil, err1
}
compositeKey := dbutils.GenerateCompositeStorageKey(addrHash, incarnation, seckey)
if dbr.storageCache != nil {
if enc, ok := dbr.storageCache.HasGet(nil, compositeKey); ok {
return enc, nil
}
}
enc, err2 := dbr.db.Get(dbutils.HashedStorageBucket, compositeKey)
if err2 != nil && !errors.Is(err2, ethdb.ErrKeyNotFound) {
return nil, err2
}
if dbr.storageCache != nil {
dbr.storageCache.Set(compositeKey, enc)
}
return enc, nil
}
State cache switching writes to reads during commit (#1368) * State cache init * More code * Fix lint * More tests * More tests * More tests * Fix test * Transformations * remove writeQueue, before fixing the tests * Fix tests * Add more tests, incarnation to the code items * Fix lint * Fix lint * Remove shards prototype, add incarnation to the state reader code * Clean up and replace cache in call_traces stage * fix flaky test * Save changes * Readers to use addrHash, writes - addresses * Fix lint * Fix lint * More accurate tracking of size * Optimise for smaller write batches * Attempt to integrate state cache into Execution stage * cacheSize to default flags * Print correct cache sizes and batch sizes * cacheSize in the integration * Fix tests * Fix lint * Remove print * Fix exec stage * Fix test * Refresh sequence on write * No double increment * heap.Remove * Try to fix alignment * Refactoring, adding hashItems * More changes * Fix compile errors * Fix lint * Wrapping cached reader * Wrap writer into cached writer * Turn state cache off by default * Fix plain state writer * Fix for code/storage mixup * Fix tests * Fix clique test * Better fix for the tests * Add test and fix some more * Fix compile error| * More functions * Fixes * Fix for the tests * sepatate DeletedFlag and AbsentFlag * Minor fixes * Test refactoring * More changes * Fix some tests * More test fixes * More test fixes * Fix lint * Move blockchain_test to be able to use stagedsync * More fixes * Fixes and cleanup * Fix tests in turbo/stages * Fix lint * Fix lint * Intemediate * Fix tests * Intemediate * More fixes * Compilation fixes * More fixes * Fix compile errors * More test fixes * More fixes * More test fixes * Fix compile error * Fixes * Fix * Fix * More fixes * Fixes * More fixes and cleanup * Further fix * Check gas used and bloom with header Co-authored-by: Alexey Sharp <alexeysharp@Alexeys-iMac.local>
2020-12-08 09:44:29 +00:00
func (dbr *DbStateReader) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) {
if bytes.Equal(codeHash[:], emptyCodeHash) {
return nil, nil
}
if dbr.codeCache != nil {
if code, ok := dbr.codeCache.HasGet(nil, address[:]); ok {
return code, nil
}
}
code, err := dbr.db.Get(dbutils.CodeBucket, codeHash[:])
if dbr.codeCache != nil && len(code) <= 1024 {
dbr.codeCache.Set(address[:], code)
}
if dbr.codeSizeCache != nil {
var b [4]byte
binary.BigEndian.PutUint32(b[:], uint32(len(code)))
dbr.codeSizeCache.Set(address[:], b[:])
}
return code, err
}
State cache switching writes to reads during commit (#1368) * State cache init * More code * Fix lint * More tests * More tests * More tests * Fix test * Transformations * remove writeQueue, before fixing the tests * Fix tests * Add more tests, incarnation to the code items * Fix lint * Fix lint * Remove shards prototype, add incarnation to the state reader code * Clean up and replace cache in call_traces stage * fix flaky test * Save changes * Readers to use addrHash, writes - addresses * Fix lint * Fix lint * More accurate tracking of size * Optimise for smaller write batches * Attempt to integrate state cache into Execution stage * cacheSize to default flags * Print correct cache sizes and batch sizes * cacheSize in the integration * Fix tests * Fix lint * Remove print * Fix exec stage * Fix test * Refresh sequence on write * No double increment * heap.Remove * Try to fix alignment * Refactoring, adding hashItems * More changes * Fix compile errors * Fix lint * Wrapping cached reader * Wrap writer into cached writer * Turn state cache off by default * Fix plain state writer * Fix for code/storage mixup * Fix tests * Fix clique test * Better fix for the tests * Add test and fix some more * Fix compile error| * More functions * Fixes * Fix for the tests * sepatate DeletedFlag and AbsentFlag * Minor fixes * Test refactoring * More changes * Fix some tests * More test fixes * More test fixes * Fix lint * Move blockchain_test to be able to use stagedsync * More fixes * Fixes and cleanup * Fix tests in turbo/stages * Fix lint * Fix lint * Intemediate * Fix tests * Intemediate * More fixes * Compilation fixes * More fixes * Fix compile errors * More test fixes * More fixes * More test fixes * Fix compile error * Fixes * Fix * Fix * More fixes * Fixes * More fixes and cleanup * Further fix * Check gas used and bloom with header Co-authored-by: Alexey Sharp <alexeysharp@Alexeys-iMac.local>
2020-12-08 09:44:29 +00:00
func (dbr *DbStateReader) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (codeSize int, err error) {
if bytes.Equal(codeHash[:], emptyCodeHash) {
return 0, nil
}
if dbr.codeSizeCache != nil {
if b, ok := dbr.codeSizeCache.HasGet(nil, address[:]); ok {
return int(binary.BigEndian.Uint32(b)), nil
}
}
var code []byte
code, err = dbr.db.Get(dbutils.CodeBucket, codeHash[:])
if err != nil {
return 0, err
}
if dbr.codeSizeCache != nil {
var b [4]byte
binary.BigEndian.PutUint32(b[:], uint32(len(code)))
dbr.codeSizeCache.Set(address[:], b[:])
}
return len(code), nil
}
func (dbr *DbStateReader) ReadAccountIncarnation(address common.Address) (uint64, error) {
if b, err := dbr.db.Get(dbutils.IncarnationMapBucket, address[:]); err == nil {
return binary.BigEndian.Uint64(b), nil
} else if errors.Is(err, ethdb.ErrKeyNotFound) {
return 0, nil
} else {
return 0, err
}
}