2019-05-27 13:51:49 +00:00
|
|
|
// Copyright 2019 The go-ethereum Authors
|
2017-06-27 13:57:06 +00:00
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2020-04-08 05:00:31 +00:00
|
|
|
//nolint:scopelint
|
2017-06-27 13:57:06 +00:00
|
|
|
package state
|
|
|
|
|
|
|
|
import (
|
2019-05-27 13:51:49 +00:00
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"encoding/binary"
|
2017-06-27 13:57:06 +00:00
|
|
|
"fmt"
|
2019-05-27 13:51:49 +00:00
|
|
|
"io"
|
|
|
|
"runtime"
|
|
|
|
"sort"
|
2019-12-10 13:12:21 +00:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2017-06-27 13:57:06 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/common"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/common/dbutils"
|
2020-04-18 20:09:44 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/core/rawdb"
|
2019-05-27 13:51:49 +00:00
|
|
|
"github.com/ledgerwatch/turbo-geth/core/types/accounts"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/ethdb"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/log"
|
|
|
|
"github.com/ledgerwatch/turbo-geth/trie"
|
2017-06-27 13:57:06 +00:00
|
|
|
)
|
|
|
|
|
2020-04-08 05:00:31 +00:00
|
|
|
// MaxTrieCacheSize is the trie cache size limit after which to evict trie nodes from memory.
|
|
|
|
var MaxTrieCacheSize = uint64(1024 * 1024)
|
2019-05-27 13:51:49 +00:00
|
|
|
|
2020-01-08 09:55:56 +00:00
|
|
|
const (
|
|
|
|
//FirstContractIncarnation - first incarnation for contract accounts. After 1 it increases by 1.
|
|
|
|
FirstContractIncarnation = 1
|
|
|
|
//NonContractIncarnation incarnation for non contracts
|
|
|
|
NonContractIncarnation = 0
|
|
|
|
)
|
2019-05-27 13:51:49 +00:00
|
|
|
|
|
|
|
type StateReader interface {
|
|
|
|
ReadAccountData(address common.Address) (*accounts.Account, error)
|
|
|
|
ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error)
|
2019-10-21 12:54:47 +00:00
|
|
|
ReadAccountCode(address common.Address, codeHash common.Hash) ([]byte, error)
|
|
|
|
ReadAccountCodeSize(address common.Address, codeHash common.Hash) (int, error)
|
2020-05-02 18:00:42 +00:00
|
|
|
ReadAccountIncarnation(address common.Address) (uint64, error)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type StateWriter interface {
|
|
|
|
UpdateAccountData(ctx context.Context, address common.Address, original, account *accounts.Account) error
|
2019-12-20 12:25:40 +00:00
|
|
|
UpdateAccountCode(addrHash common.Hash, incarnation uint64, codeHash common.Hash, code []byte) error
|
2019-05-27 13:51:49 +00:00
|
|
|
DeleteAccount(ctx context.Context, address common.Address, original *accounts.Account) error
|
|
|
|
WriteAccountStorage(ctx context.Context, address common.Address, incarnation uint64, key, original, value *common.Hash) error
|
2020-05-02 18:00:42 +00:00
|
|
|
CreateContract(address common.Address) error
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type NoopWriter struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewNoopWriter() *NoopWriter {
|
|
|
|
return &NoopWriter{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nw *NoopWriter) UpdateAccountData(_ context.Context, address common.Address, original, account *accounts.Account) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nw *NoopWriter) DeleteAccount(_ context.Context, address common.Address, original *accounts.Account) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-20 12:25:40 +00:00
|
|
|
func (nw *NoopWriter) UpdateAccountCode(addrHash common.Hash, incarnation uint64, codeHash common.Hash, code []byte) error {
|
2019-05-27 13:51:49 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nw *NoopWriter) WriteAccountStorage(_ context.Context, address common.Address, incarnation uint64, key, original, value *common.Hash) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-02 18:00:42 +00:00
|
|
|
func (nw *NoopWriter) CreateContract(address common.Address) error {
|
|
|
|
return nil
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Structure holding updates, deletes, and reads registered within one change period
|
|
|
|
// A change period can be transaction within a block, or a block within group of blocks
|
|
|
|
type Buffer struct {
|
2020-04-26 16:02:38 +00:00
|
|
|
codeReads map[common.Hash]common.Hash
|
|
|
|
codeSizeReads map[common.Hash]common.Hash
|
|
|
|
codeUpdates map[common.Hash][]byte
|
2020-04-20 22:31:17 +00:00
|
|
|
// storageUpdates structure collects the effects of the block (or transaction) execution. It does not necessarily
|
|
|
|
// include all the intermediate reads and write that happened. For example, if the storage of some contract has
|
|
|
|
// been modified, and then the contract has subsequently self-destructed, this structure will not contain any
|
|
|
|
// keys related to the storage of this contract, because they are irrelevant for the final state
|
2019-11-15 22:48:49 +00:00
|
|
|
storageUpdates map[common.Hash]map[common.Hash][]byte
|
2020-04-20 22:31:17 +00:00
|
|
|
// storageReads structure collects all the keys of items that have been modified (or also just read, if the
|
|
|
|
// tds.resolveReads flag is turned on, which happens during the generation of block witnesses).
|
|
|
|
// Even if the final results of the execution do not include some items, they will still be present in this structure.
|
|
|
|
// For example, if the storage of some contract has been modified, and then the contract has subsequently self-destructed,
|
|
|
|
// this structure will contain all the keys that have been modified or deleted prior to the self-destruction.
|
|
|
|
// It is important to keep them because they will be used to apply changes to the trie one after another.
|
|
|
|
// There is a potential for optimisation - we may actually skip all the intermediate modification of the trie if
|
|
|
|
// we know that in the end, the entire storage will be dropped. However, this optimisation has not yet been
|
|
|
|
// implemented.
|
2020-04-26 16:02:38 +00:00
|
|
|
storageReads map[common.Hash]map[common.Hash]struct{}
|
2020-04-20 22:31:17 +00:00
|
|
|
// accountUpdates structure collects the effects of the block (or transaxction) execution.
|
2019-05-27 13:51:49 +00:00
|
|
|
accountUpdates map[common.Hash]*accounts.Account
|
2020-04-20 22:31:17 +00:00
|
|
|
// accountReads structure collects all the address hashes of the accounts that have been modified (or also just read,
|
|
|
|
// if tds.resolveReads flag is turned on, which happens during the generation of block witnesses).
|
2020-04-26 16:02:38 +00:00
|
|
|
accountReads map[common.Hash]struct{}
|
|
|
|
deleted map[common.Hash]struct{}
|
|
|
|
created map[common.Hash]struct{}
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Prepares buffer for work or clears previous data
|
|
|
|
func (b *Buffer) initialise() {
|
2020-03-23 22:10:36 +00:00
|
|
|
b.codeReads = make(map[common.Hash]common.Hash)
|
2020-04-08 11:47:18 +00:00
|
|
|
b.codeSizeReads = make(map[common.Hash]common.Hash)
|
2020-03-23 22:10:36 +00:00
|
|
|
b.codeUpdates = make(map[common.Hash][]byte)
|
2019-11-15 22:48:49 +00:00
|
|
|
b.storageUpdates = make(map[common.Hash]map[common.Hash][]byte)
|
|
|
|
b.storageReads = make(map[common.Hash]map[common.Hash]struct{})
|
2019-05-27 13:51:49 +00:00
|
|
|
b.accountUpdates = make(map[common.Hash]*accounts.Account)
|
|
|
|
b.accountReads = make(map[common.Hash]struct{})
|
2019-11-15 22:48:49 +00:00
|
|
|
b.deleted = make(map[common.Hash]struct{})
|
|
|
|
b.created = make(map[common.Hash]struct{})
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2017-06-27 13:57:06 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
// Replaces account pointer with pointers to the copies
|
|
|
|
func (b *Buffer) detachAccounts() {
|
|
|
|
for addrHash, account := range b.accountUpdates {
|
|
|
|
if account != nil {
|
2019-12-20 12:25:40 +00:00
|
|
|
b.accountUpdates[addrHash] = account.SelfCopy()
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Merges the content of another buffer into this one
|
|
|
|
func (b *Buffer) merge(other *Buffer) {
|
2020-03-23 22:10:36 +00:00
|
|
|
for addrHash, codeHash := range other.codeReads {
|
|
|
|
b.codeReads[addrHash] = codeHash
|
|
|
|
}
|
|
|
|
|
|
|
|
for addrHash, code := range other.codeUpdates {
|
|
|
|
b.codeUpdates[addrHash] = code
|
|
|
|
}
|
|
|
|
|
2020-04-08 11:47:18 +00:00
|
|
|
for address, codeHash := range other.codeSizeReads {
|
|
|
|
b.codeSizeReads[address] = codeHash
|
|
|
|
}
|
|
|
|
|
2020-05-09 04:44:56 +00:00
|
|
|
for addrHash := range other.deleted {
|
|
|
|
b.deleted[addrHash] = struct{}{}
|
|
|
|
delete(b.storageUpdates, addrHash)
|
|
|
|
delete(b.codeUpdates, addrHash)
|
|
|
|
}
|
|
|
|
for addrHash := range other.created {
|
|
|
|
b.created[addrHash] = struct{}{}
|
|
|
|
delete(b.storageUpdates, addrHash)
|
|
|
|
}
|
2019-11-15 22:48:49 +00:00
|
|
|
for addrHash, om := range other.storageUpdates {
|
|
|
|
m, ok := b.storageUpdates[addrHash]
|
2019-05-27 13:51:49 +00:00
|
|
|
if !ok {
|
|
|
|
m = make(map[common.Hash][]byte)
|
2019-11-15 22:48:49 +00:00
|
|
|
b.storageUpdates[addrHash] = m
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
for keyHash, v := range om {
|
|
|
|
m[keyHash] = v
|
|
|
|
}
|
|
|
|
}
|
2019-11-15 22:48:49 +00:00
|
|
|
for addrHash, om := range other.storageReads {
|
|
|
|
m, ok := b.storageReads[addrHash]
|
2019-05-27 13:51:49 +00:00
|
|
|
if !ok {
|
|
|
|
m = make(map[common.Hash]struct{})
|
2019-11-15 22:48:49 +00:00
|
|
|
b.storageReads[addrHash] = m
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
for keyHash := range om {
|
|
|
|
m[keyHash] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for addrHash, account := range other.accountUpdates {
|
|
|
|
b.accountUpdates[addrHash] = account
|
|
|
|
}
|
|
|
|
for addrHash := range other.accountReads {
|
|
|
|
b.accountReads[addrHash] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TrieDbState implements StateReader by wrapping a trie and a database, where trie acts as a cache for the database
|
|
|
|
type TrieDbState struct {
|
2020-03-30 16:01:24 +00:00
|
|
|
t *trie.Trie
|
|
|
|
tMu *sync.Mutex
|
|
|
|
db ethdb.Database
|
|
|
|
blockNr uint64
|
|
|
|
buffers []*Buffer
|
|
|
|
aggregateBuffer *Buffer // Merge of all buffers
|
|
|
|
currentBuffer *Buffer
|
|
|
|
historical bool
|
|
|
|
noHistory bool
|
|
|
|
resolveReads bool
|
2020-05-12 14:24:43 +00:00
|
|
|
retainListBuilder *trie.RetainListBuilder
|
2020-04-08 05:00:31 +00:00
|
|
|
tp *trie.Eviction
|
2020-03-30 16:01:24 +00:00
|
|
|
newStream trie.Stream
|
|
|
|
hashBuilder *trie.HashBuilder
|
2020-05-12 14:24:43 +00:00
|
|
|
loader *trie.SubTrieLoader
|
2020-04-26 16:02:38 +00:00
|
|
|
pw *PreimageWriter
|
2020-04-26 21:58:26 +00:00
|
|
|
incarnationMap map[common.Address]uint64 // Temporary map of incarnation in case we cannot figure out from the database
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 13:21:15 +00:00
|
|
|
func NewTrieDbState(root common.Hash, db ethdb.Database, blockNr uint64) *TrieDbState {
|
2019-05-27 13:51:49 +00:00
|
|
|
t := trie.New(root)
|
2020-04-08 05:00:31 +00:00
|
|
|
tp := trie.NewEviction()
|
2019-05-27 13:51:49 +00:00
|
|
|
|
2019-12-10 13:12:21 +00:00
|
|
|
tds := &TrieDbState{
|
2020-01-15 13:56:50 +00:00
|
|
|
t: t,
|
|
|
|
tMu: new(sync.Mutex),
|
|
|
|
db: db,
|
|
|
|
blockNr: blockNr,
|
2020-05-12 14:24:43 +00:00
|
|
|
retainListBuilder: trie.NewRetainListBuilder(),
|
2020-01-15 13:56:50 +00:00
|
|
|
tp: tp,
|
2020-04-26 16:02:38 +00:00
|
|
|
pw: &PreimageWriter{db: db, savePreimages: true},
|
2020-01-30 13:16:12 +00:00
|
|
|
hashBuilder: trie.NewHashBuilder(false),
|
2020-04-26 21:58:26 +00:00
|
|
|
incarnationMap: make(map[common.Address]uint64),
|
2018-02-05 16:40:32 +00:00
|
|
|
}
|
2019-12-10 13:12:21 +00:00
|
|
|
|
2020-04-08 05:00:31 +00:00
|
|
|
tp.SetBlockNumber(blockNr)
|
|
|
|
|
|
|
|
t.AddObserver(tp)
|
|
|
|
t.AddObserver(NewIntermediateHashes(tds.db, tds.db))
|
2020-01-31 04:11:20 +00:00
|
|
|
|
2020-03-26 13:21:15 +00:00
|
|
|
return tds
|
2019-12-10 13:12:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 11:19:00 +00:00
|
|
|
func (tds *TrieDbState) EnablePreimages(ep bool) {
|
2020-04-26 16:02:38 +00:00
|
|
|
tds.pw.SetSavePreimages(ep)
|
2019-12-06 11:19:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
func (tds *TrieDbState) SetHistorical(h bool) {
|
|
|
|
tds.historical = h
|
2017-06-27 13:57:06 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
func (tds *TrieDbState) SetResolveReads(rr bool) {
|
|
|
|
tds.resolveReads = rr
|
2017-06-27 13:57:06 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
func (tds *TrieDbState) SetNoHistory(nh bool) {
|
|
|
|
tds.noHistory = nh
|
2017-06-27 13:57:06 +00:00
|
|
|
}
|
2020-02-12 13:52:59 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
func (tds *TrieDbState) Copy() *TrieDbState {
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.tMu.Lock()
|
2019-05-27 13:51:49 +00:00
|
|
|
tcopy := *tds.t
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.tMu.Unlock()
|
2019-05-27 13:51:49 +00:00
|
|
|
|
2019-12-10 13:12:21 +00:00
|
|
|
n := tds.getBlockNr()
|
2020-04-08 05:00:31 +00:00
|
|
|
tp := trie.NewEviction()
|
|
|
|
tp.SetBlockNumber(n)
|
2019-05-27 13:51:49 +00:00
|
|
|
|
|
|
|
cpy := TrieDbState{
|
2020-02-10 17:05:32 +00:00
|
|
|
t: &tcopy,
|
|
|
|
tMu: new(sync.Mutex),
|
|
|
|
db: tds.db,
|
|
|
|
blockNr: n,
|
|
|
|
tp: tp,
|
2020-04-26 16:02:38 +00:00
|
|
|
pw: &PreimageWriter{db: tds.db, savePreimages: true},
|
2020-02-10 17:05:32 +00:00
|
|
|
hashBuilder: trie.NewHashBuilder(false),
|
2020-04-26 21:58:26 +00:00
|
|
|
incarnationMap: make(map[common.Address]uint64),
|
2017-06-27 13:57:06 +00:00
|
|
|
}
|
2020-01-31 04:11:20 +00:00
|
|
|
|
2020-04-08 05:00:31 +00:00
|
|
|
cpy.t.AddObserver(tp)
|
|
|
|
cpy.t.AddObserver(NewIntermediateHashes(cpy.db, cpy.db))
|
2020-01-31 04:11:20 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
return &cpy
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tds *TrieDbState) Database() ethdb.Database {
|
|
|
|
return tds.db
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tds *TrieDbState) Trie() *trie.Trie {
|
|
|
|
return tds.t
|
2017-06-27 13:57:06 +00:00
|
|
|
}
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
func (tds *TrieDbState) StartNewBuffer() {
|
|
|
|
if tds.currentBuffer != nil {
|
|
|
|
if tds.aggregateBuffer == nil {
|
|
|
|
tds.aggregateBuffer = &Buffer{}
|
|
|
|
tds.aggregateBuffer.initialise()
|
|
|
|
}
|
|
|
|
tds.aggregateBuffer.merge(tds.currentBuffer)
|
|
|
|
tds.currentBuffer.detachAccounts()
|
|
|
|
}
|
|
|
|
tds.currentBuffer = &Buffer{}
|
|
|
|
tds.currentBuffer.initialise()
|
|
|
|
tds.buffers = append(tds.buffers, tds.currentBuffer)
|
|
|
|
}
|
|
|
|
|
2019-12-10 13:12:21 +00:00
|
|
|
func (tds *TrieDbState) WithNewBuffer() *TrieDbState {
|
|
|
|
aggregateBuffer := &Buffer{}
|
|
|
|
aggregateBuffer.initialise()
|
|
|
|
|
|
|
|
currentBuffer := &Buffer{}
|
|
|
|
currentBuffer.initialise()
|
|
|
|
|
|
|
|
buffers := []*Buffer{currentBuffer}
|
|
|
|
|
|
|
|
tds.tMu.Lock()
|
|
|
|
t := &TrieDbState{
|
2020-01-15 13:56:50 +00:00
|
|
|
t: tds.t,
|
|
|
|
tMu: tds.tMu,
|
|
|
|
db: tds.db,
|
|
|
|
blockNr: tds.getBlockNr(),
|
|
|
|
buffers: buffers,
|
|
|
|
aggregateBuffer: aggregateBuffer,
|
|
|
|
currentBuffer: currentBuffer,
|
|
|
|
historical: tds.historical,
|
|
|
|
noHistory: tds.noHistory,
|
|
|
|
resolveReads: tds.resolveReads,
|
2020-05-12 14:24:43 +00:00
|
|
|
retainListBuilder: tds.retainListBuilder,
|
2020-01-15 13:56:50 +00:00
|
|
|
tp: tds.tp,
|
2020-04-26 16:02:38 +00:00
|
|
|
pw: tds.pw,
|
2020-01-30 13:16:12 +00:00
|
|
|
hashBuilder: trie.NewHashBuilder(false),
|
2020-04-26 21:58:26 +00:00
|
|
|
incarnationMap: make(map[common.Address]uint64),
|
2019-12-10 13:12:21 +00:00
|
|
|
}
|
|
|
|
tds.tMu.Unlock()
|
|
|
|
|
|
|
|
return t
|
|
|
|
}
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
func (tds *TrieDbState) LastRoot() common.Hash {
|
2020-02-03 12:02:26 +00:00
|
|
|
if tds == nil || tds.tMu == nil {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
2019-05-27 13:51:49 +00:00
|
|
|
return tds.t.Hash()
|
|
|
|
}
|
|
|
|
|
2019-10-21 12:54:47 +00:00
|
|
|
// ComputeTrieRoots is a combination of `ResolveStateTrie` and `UpdateStateTrie`
|
2019-05-27 13:51:49 +00:00
|
|
|
// DESCRIBED: docs/programmers_guide/guide.md#organising-ethereum-state-into-a-merkle-tree
|
|
|
|
func (tds *TrieDbState) ComputeTrieRoots() ([]common.Hash, error) {
|
2020-04-01 16:04:41 +00:00
|
|
|
if _, err := tds.ResolveStateTrie(false, false); err != nil {
|
2019-10-21 12:54:47 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return tds.UpdateStateTrie()
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateStateTrie assumes that the state trie is already fully resolved, i.e. any operations
|
|
|
|
// will find necessary data inside the trie.
|
|
|
|
func (tds *TrieDbState) UpdateStateTrie() ([]common.Hash, error) {
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
|
|
|
|
2019-10-21 12:54:47 +00:00
|
|
|
roots, err := tds.updateTrieRoots(true)
|
2019-05-27 13:51:49 +00:00
|
|
|
tds.clearUpdates()
|
|
|
|
return roots, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tds *TrieDbState) PrintTrie(w io.Writer) {
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.tMu.Lock()
|
2019-12-19 10:44:43 +00:00
|
|
|
defer tds.tMu.Unlock()
|
2019-05-27 13:51:49 +00:00
|
|
|
tds.t.Print(w)
|
|
|
|
}
|
|
|
|
|
2020-04-20 22:31:17 +00:00
|
|
|
// buildStorageReads builds a sorted list of all storage key hashes that were modified
|
|
|
|
// (or also just read, if tds.resolveReads flag is turned on) within the
|
|
|
|
// period for which we are aggregating updates. It includes the keys of items that
|
|
|
|
// were nullified by subsequent updates - best example is the
|
|
|
|
// self-destruction of a contract, which nullifies all previous
|
|
|
|
// modifications of the contract's storage. In such case, all previously modified storage
|
|
|
|
// item updates would be inclided.
|
|
|
|
func (tds *TrieDbState) buildStorageReads() common.StorageKeys {
|
2019-11-15 22:48:49 +00:00
|
|
|
storageTouches := common.StorageKeys{}
|
2020-04-20 22:31:17 +00:00
|
|
|
for addrHash, m := range tds.aggregateBuffer.storageReads {
|
2019-05-27 13:51:49 +00:00
|
|
|
for keyHash := range m {
|
2019-11-15 22:48:49 +00:00
|
|
|
var storageKey common.StorageKey
|
|
|
|
copy(storageKey[:], addrHash[:])
|
|
|
|
copy(storageKey[common.HashLength:], keyHash[:])
|
|
|
|
storageTouches = append(storageTouches, storageKey)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2019-11-15 22:48:49 +00:00
|
|
|
}
|
2020-04-20 22:31:17 +00:00
|
|
|
sort.Sort(storageTouches)
|
|
|
|
return storageTouches
|
|
|
|
}
|
|
|
|
|
|
|
|
// buildStorageWrites builds a sorted list of all storage key hashes that were modified within the
|
|
|
|
// period for which we are aggregating updates. It skips the updates that
|
|
|
|
// were nullified by subsequent updates - best example is the
|
|
|
|
// self-destruction of a contract, which nullifies all previous
|
|
|
|
// modifications of the contract's storage. In such case, no storage
|
|
|
|
// item updates would be inclided.
|
|
|
|
func (tds *TrieDbState) buildStorageWrites() (common.StorageKeys, [][]byte) {
|
|
|
|
storageTouches := common.StorageKeys{}
|
|
|
|
for addrHash, m := range tds.aggregateBuffer.storageUpdates {
|
|
|
|
for keyHash := range m {
|
|
|
|
var storageKey common.StorageKey
|
|
|
|
copy(storageKey[:], addrHash[:])
|
|
|
|
copy(storageKey[common.HashLength:], keyHash[:])
|
|
|
|
storageTouches = append(storageTouches, storageKey)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
2019-11-15 22:48:49 +00:00
|
|
|
sort.Sort(storageTouches)
|
2020-04-20 22:31:17 +00:00
|
|
|
var addrHash common.Hash
|
|
|
|
var keyHash common.Hash
|
|
|
|
var values = make([][]byte, len(storageTouches))
|
|
|
|
for i, storageKey := range storageTouches {
|
|
|
|
copy(addrHash[:], storageKey[:])
|
|
|
|
copy(keyHash[:], storageKey[common.HashLength:])
|
|
|
|
values[i] = tds.aggregateBuffer.storageUpdates[addrHash][keyHash]
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2019-11-15 22:48:49 +00:00
|
|
|
return storageTouches, values
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Populate pending block proof so that it will be sufficient for accessing all storage slots in storageTouches
|
2019-11-15 22:48:49 +00:00
|
|
|
func (tds *TrieDbState) populateStorageBlockProof(storageTouches common.StorageKeys) error { //nolint
|
|
|
|
for _, storageKey := range storageTouches {
|
2020-05-12 14:24:43 +00:00
|
|
|
tds.retainListBuilder.AddStorageTouch(storageKey[:])
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-08 11:47:18 +00:00
|
|
|
func (tds *TrieDbState) buildCodeTouches() map[common.Hash]common.Hash {
|
2020-03-23 22:10:36 +00:00
|
|
|
return tds.aggregateBuffer.codeReads
|
|
|
|
}
|
|
|
|
|
2020-04-08 11:47:18 +00:00
|
|
|
func (tds *TrieDbState) buildCodeSizeTouches() map[common.Hash]common.Hash {
|
|
|
|
return tds.aggregateBuffer.codeSizeReads
|
|
|
|
}
|
|
|
|
|
2020-04-20 22:31:17 +00:00
|
|
|
// buildAccountReads builds a sorted list of all address hashes that were modified
|
|
|
|
// (or also just read, if tds.resolveReads flags is turned one) within the
|
|
|
|
// period for which we are aggregating update
|
|
|
|
func (tds *TrieDbState) buildAccountReads() common.Hashes {
|
|
|
|
accountTouches := common.Hashes{}
|
|
|
|
for addrHash := range tds.aggregateBuffer.accountReads {
|
|
|
|
accountTouches = append(accountTouches, addrHash)
|
|
|
|
}
|
|
|
|
sort.Sort(accountTouches)
|
|
|
|
return accountTouches
|
|
|
|
}
|
|
|
|
|
|
|
|
// buildAccountWrites builds a sorted list of all address hashes that were modified within the
|
|
|
|
// period for which we are aggregating updates.
|
2020-05-09 04:44:56 +00:00
|
|
|
func (tds *TrieDbState) buildAccountWrites() (common.Hashes, []*accounts.Account, [][]byte) {
|
2019-11-15 22:48:49 +00:00
|
|
|
accountTouches := common.Hashes{}
|
2019-11-21 15:56:39 +00:00
|
|
|
for addrHash, aValue := range tds.aggregateBuffer.accountUpdates {
|
|
|
|
if aValue != nil {
|
|
|
|
if _, ok := tds.aggregateBuffer.deleted[addrHash]; ok {
|
2020-04-20 22:31:17 +00:00
|
|
|
// This adds an extra entry that wipes out the storage of the accout in the stream
|
2019-11-21 15:56:39 +00:00
|
|
|
accountTouches = append(accountTouches, addrHash)
|
2020-05-02 18:00:57 +00:00
|
|
|
} else if _, ok1 := tds.aggregateBuffer.created[addrHash]; ok1 {
|
|
|
|
// This adds an extra entry that wipes out the storage of the accout in the stream
|
|
|
|
accountTouches = append(accountTouches, addrHash)
|
2019-11-21 15:56:39 +00:00
|
|
|
}
|
|
|
|
}
|
2019-11-15 22:48:49 +00:00
|
|
|
accountTouches = append(accountTouches, addrHash)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2019-11-15 22:48:49 +00:00
|
|
|
sort.Sort(accountTouches)
|
2020-05-09 04:44:56 +00:00
|
|
|
aValues := make([]*accounts.Account, len(accountTouches))
|
|
|
|
aCodes := make([][]byte, len(accountTouches))
|
2020-04-20 22:31:17 +00:00
|
|
|
for i, addrHash := range accountTouches {
|
|
|
|
if i < len(accountTouches)-1 && addrHash == accountTouches[i+1] {
|
|
|
|
aValues[i] = nil // Entry that would wipe out existing storage
|
|
|
|
} else {
|
|
|
|
a := tds.aggregateBuffer.accountUpdates[addrHash]
|
|
|
|
if a != nil {
|
|
|
|
if _, ok := tds.aggregateBuffer.storageUpdates[addrHash]; ok {
|
|
|
|
var ac accounts.Account
|
|
|
|
ac.Copy(a)
|
|
|
|
ac.Root = trie.EmptyRoot
|
|
|
|
a = &ac
|
2019-11-21 15:56:39 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-20 22:31:17 +00:00
|
|
|
aValues[i] = a
|
2020-05-09 04:44:56 +00:00
|
|
|
if code, ok := tds.aggregateBuffer.codeUpdates[addrHash]; ok {
|
|
|
|
aCodes[i] = code
|
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-09 04:44:56 +00:00
|
|
|
return accountTouches, aValues, aCodes
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-08 11:47:18 +00:00
|
|
|
func (tds *TrieDbState) resolveCodeTouches(
|
|
|
|
codeTouches map[common.Hash]common.Hash,
|
|
|
|
codeSizeTouches map[common.Hash]common.Hash,
|
2020-05-12 14:24:43 +00:00
|
|
|
loadFunc trie.LoadFunc,
|
2020-04-08 11:47:18 +00:00
|
|
|
) error {
|
2020-03-23 22:10:36 +00:00
|
|
|
firstRequest := true
|
|
|
|
for address, codeHash := range codeTouches {
|
2020-04-08 11:47:18 +00:00
|
|
|
delete(codeSizeTouches, codeHash)
|
2020-05-12 14:24:43 +00:00
|
|
|
if need, req := tds.t.NeedLoadCode(address, codeHash, true /*bytecode*/); need {
|
|
|
|
if tds.loader == nil {
|
|
|
|
tds.loader = trie.NewSubTrieLoader(tds.blockNr)
|
2020-04-08 11:47:18 +00:00
|
|
|
} else if firstRequest {
|
2020-05-12 14:24:43 +00:00
|
|
|
tds.loader.Reset(tds.blockNr)
|
2020-04-08 11:47:18 +00:00
|
|
|
}
|
|
|
|
firstRequest = false
|
2020-05-12 14:24:43 +00:00
|
|
|
tds.loader.AddCodeRequest(req)
|
2020-04-08 11:47:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for address, codeHash := range codeSizeTouches {
|
2020-05-12 14:24:43 +00:00
|
|
|
if need, req := tds.t.NeedLoadCode(address, codeHash, false /*bytecode*/); need {
|
|
|
|
if tds.loader == nil {
|
|
|
|
tds.loader = trie.NewSubTrieLoader(tds.blockNr)
|
2020-03-23 22:10:36 +00:00
|
|
|
} else if firstRequest {
|
2020-05-12 14:24:43 +00:00
|
|
|
tds.loader.Reset(tds.blockNr)
|
2020-03-23 22:10:36 +00:00
|
|
|
}
|
|
|
|
firstRequest = false
|
2020-05-12 14:24:43 +00:00
|
|
|
tds.loader.AddCodeRequest(req)
|
2020-03-23 22:10:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !firstRequest {
|
2020-05-12 14:24:43 +00:00
|
|
|
if _, err := loadFunc(tds.loader, nil, nil, nil); err != nil {
|
2020-05-12 07:22:45 +00:00
|
|
|
return err
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
2020-01-30 13:16:12 +00:00
|
|
|
return nil
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
func (tds *TrieDbState) resolveAccountAndStorageTouches(accountTouches common.Hashes, storageTouches common.StorageKeys, loadFunc trie.LoadFunc) error {
|
2020-05-12 07:22:45 +00:00
|
|
|
// Build the retain list
|
2020-05-12 14:24:43 +00:00
|
|
|
rl := trie.NewRetainList(0)
|
2020-05-02 18:00:57 +00:00
|
|
|
for _, addrHash := range accountTouches {
|
2020-05-12 07:22:45 +00:00
|
|
|
var nibbles = make([]byte, 2*len(addrHash))
|
|
|
|
for i, b := range addrHash[:] {
|
|
|
|
nibbles[i*2] = b / 16
|
|
|
|
nibbles[i*2+1] = b % 16
|
|
|
|
}
|
2020-05-12 14:24:43 +00:00
|
|
|
rl.AddHex(nibbles)
|
2020-05-02 18:00:57 +00:00
|
|
|
}
|
|
|
|
for _, storageKey := range storageTouches {
|
2020-05-12 07:22:45 +00:00
|
|
|
var nibbles = make([]byte, 2*len(storageKey))
|
|
|
|
for i, b := range storageKey[:] {
|
|
|
|
nibbles[i*2] = b / 16
|
|
|
|
nibbles[i*2+1] = b % 16
|
2020-05-02 18:00:57 +00:00
|
|
|
}
|
2020-05-12 14:24:43 +00:00
|
|
|
rl.AddHex(nibbles)
|
2020-05-02 18:00:57 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
dbPrefixes, fixedbits, hooks := tds.t.FindSubTriesToLoad(rl)
|
|
|
|
// FindSubTriesToLoad would have gone through the entire rs, so we need to rewind to the beginning
|
|
|
|
rl.Rewind()
|
|
|
|
loader := trie.NewSubTrieLoader(tds.blockNr)
|
|
|
|
subTries, err := loadFunc(loader, rl, dbPrefixes, fixedbits)
|
2020-05-12 07:22:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-05-02 18:00:57 +00:00
|
|
|
}
|
2020-05-14 17:12:33 +00:00
|
|
|
if err := tds.t.HookSubTries(subTries, hooks); err != nil {
|
|
|
|
for i, hash := range subTries.Hashes {
|
|
|
|
log.Error("Info for error", "dbPrefix", fmt.Sprintf("%x", dbPrefixes[i]), "fixedbits", fixedbits[i], "hash", hash)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2020-05-02 18:00:57 +00:00
|
|
|
}
|
|
|
|
|
2019-11-15 22:48:49 +00:00
|
|
|
func (tds *TrieDbState) populateAccountBlockProof(accountTouches common.Hashes) {
|
2019-05-27 13:51:49 +00:00
|
|
|
for _, addrHash := range accountTouches {
|
|
|
|
a := addrHash
|
2020-05-12 14:24:43 +00:00
|
|
|
tds.retainListBuilder.AddTouch(a[:])
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-21 12:54:47 +00:00
|
|
|
// ExtractTouches returns two lists of keys - for accounts and storage items correspondingly
|
|
|
|
// Each list is the collection of keys that have been "touched" (inserted, updated, or simply accessed)
|
|
|
|
// since the last invocation of `ExtractTouches`.
|
|
|
|
func (tds *TrieDbState) ExtractTouches() (accountTouches [][]byte, storageTouches [][]byte) {
|
2020-05-12 14:24:43 +00:00
|
|
|
return tds.retainListBuilder.ExtractTouches()
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
func (tds *TrieDbState) resolveStateTrieWithFunc(loadFunc trie.LoadFunc) error {
|
2019-05-27 13:51:49 +00:00
|
|
|
// Aggregating the current buffer, if any
|
|
|
|
if tds.currentBuffer != nil {
|
|
|
|
if tds.aggregateBuffer == nil {
|
|
|
|
tds.aggregateBuffer = &Buffer{}
|
|
|
|
tds.aggregateBuffer.initialise()
|
|
|
|
}
|
|
|
|
tds.aggregateBuffer.merge(tds.currentBuffer)
|
|
|
|
}
|
|
|
|
if tds.aggregateBuffer == nil {
|
2019-10-21 12:54:47 +00:00
|
|
|
return nil
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
// Prepare (resolve) storage tries so that actual modifications can proceed without database access
|
2020-04-20 22:31:17 +00:00
|
|
|
storageTouches := tds.buildStorageReads()
|
2019-05-27 13:51:49 +00:00
|
|
|
|
|
|
|
// Prepare (resolve) accounts trie so that actual modifications can proceed without database access
|
2020-04-20 22:31:17 +00:00
|
|
|
accountTouches := tds.buildAccountReads()
|
2020-03-23 22:10:36 +00:00
|
|
|
|
|
|
|
// Prepare (resolve) contract code reads so that actual modifications can proceed without database access
|
2020-04-08 11:47:18 +00:00
|
|
|
codeTouches := tds.buildCodeTouches()
|
|
|
|
|
|
|
|
// Prepare (resolve) contract code size reads so that actual modifications can proceed without database access
|
|
|
|
codeSizeTouches := tds.buildCodeSizeTouches()
|
2020-03-23 22:10:36 +00:00
|
|
|
|
2020-01-24 10:58:01 +00:00
|
|
|
var err error
|
2020-05-12 14:24:43 +00:00
|
|
|
if err = tds.resolveAccountAndStorageTouches(accountTouches, storageTouches, loadFunc); err != nil {
|
2019-10-21 12:54:47 +00:00
|
|
|
return err
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2019-12-20 12:25:40 +00:00
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
if err = tds.resolveCodeTouches(codeTouches, codeSizeTouches, loadFunc); err != nil {
|
2020-03-23 22:10:36 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
if tds.resolveReads {
|
|
|
|
tds.populateAccountBlockProof(accountTouches)
|
|
|
|
}
|
|
|
|
|
|
|
|
if tds.resolveReads {
|
|
|
|
if err := tds.populateStorageBlockProof(storageTouches); err != nil {
|
2019-10-21 12:54:47 +00:00
|
|
|
return err
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
2019-10-21 12:54:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-24 10:58:01 +00:00
|
|
|
// ResolveStateTrie resolves parts of the state trie that would be necessary for any updates
|
|
|
|
// (and reads, if `resolveReads` is set).
|
2020-04-01 16:04:41 +00:00
|
|
|
func (tds *TrieDbState) ResolveStateTrie(extractWitnesses bool, trace bool) ([]*trie.Witness, error) {
|
2020-01-24 10:58:01 +00:00
|
|
|
var witnesses []*trie.Witness
|
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
loadFunc := func(loader *trie.SubTrieLoader, rl *trie.RetainList, dbPrefixes [][]byte, fixedbits []int) (trie.SubTries, error) {
|
|
|
|
if loader == nil {
|
2020-05-12 07:22:45 +00:00
|
|
|
return trie.SubTries{}, nil
|
2020-01-24 10:58:01 +00:00
|
|
|
}
|
2020-05-12 14:24:43 +00:00
|
|
|
subTries, err := loader.LoadSubTries(tds.db, tds.blockNr, rl, dbPrefixes, fixedbits, trace)
|
2020-05-12 07:22:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return subTries, err
|
2020-01-24 10:58:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if !extractWitnesses {
|
2020-05-12 07:22:45 +00:00
|
|
|
return subTries, nil
|
2020-01-24 10:58:01 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
rl.Rewind()
|
|
|
|
witnesses, err = trie.ExtractWitnesses(subTries, trace, rl)
|
2020-05-12 07:22:45 +00:00
|
|
|
return subTries, err
|
2020-01-24 10:58:01 +00:00
|
|
|
}
|
2020-05-12 14:24:43 +00:00
|
|
|
if err := tds.resolveStateTrieWithFunc(loadFunc); err != nil {
|
2020-01-24 10:58:01 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return witnesses, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ResolveStateTrieStateless uses a witness DB to resolve subtries
|
|
|
|
func (tds *TrieDbState) ResolveStateTrieStateless(database trie.WitnessStorage) error {
|
|
|
|
var startPos int64
|
2020-05-12 14:24:43 +00:00
|
|
|
loadFunc := func(loader *trie.SubTrieLoader, rl *trie.RetainList, dbPrefixes [][]byte, fixedbits []int) (trie.SubTries, error) {
|
|
|
|
if loader == nil {
|
2020-05-12 07:22:45 +00:00
|
|
|
return trie.SubTries{}, nil
|
2020-01-24 10:58:01 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
subTries, pos, err := loader.LoadFromWitnessDb(database, tds.blockNr, uint32(MaxTrieCacheSize), startPos, len(dbPrefixes))
|
2020-01-24 10:58:01 +00:00
|
|
|
if err != nil {
|
2020-05-12 07:22:45 +00:00
|
|
|
return subTries, err
|
2020-01-24 10:58:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
startPos = pos
|
2020-05-12 07:22:45 +00:00
|
|
|
return subTries, nil
|
2020-01-24 10:58:01 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
return tds.resolveStateTrieWithFunc(loadFunc)
|
2020-01-24 10:58:01 +00:00
|
|
|
}
|
|
|
|
|
2019-11-15 22:48:49 +00:00
|
|
|
// CalcTrieRoots calculates trie roots without modifying the state trie
|
|
|
|
func (tds *TrieDbState) CalcTrieRoots(trace bool) (common.Hash, error) {
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
|
|
|
|
2019-11-15 22:48:49 +00:00
|
|
|
// Retrive the list of inserted/updated/deleted storage items (keys and values)
|
2020-04-20 22:31:17 +00:00
|
|
|
storageKeys, sValues := tds.buildStorageWrites()
|
2019-11-15 22:48:49 +00:00
|
|
|
if trace {
|
|
|
|
fmt.Printf("len(storageKeys)=%d, len(sValues)=%d\n", len(storageKeys), len(sValues))
|
|
|
|
}
|
|
|
|
// Retrive the list of inserted/updated/deleted accounts (keys and values)
|
2020-05-09 04:44:56 +00:00
|
|
|
accountKeys, aValues, aCodes := tds.buildAccountWrites()
|
2019-11-15 22:48:49 +00:00
|
|
|
if trace {
|
|
|
|
fmt.Printf("len(accountKeys)=%d, len(aValues)=%d\n", len(accountKeys), len(aValues))
|
|
|
|
}
|
2020-05-09 04:44:56 +00:00
|
|
|
var hb *trie.HashBuilder
|
|
|
|
if trace {
|
|
|
|
hb = trie.NewHashBuilder(true)
|
|
|
|
} else {
|
|
|
|
hb = tds.hashBuilder
|
|
|
|
}
|
2020-05-09 19:29:00 +00:00
|
|
|
if len(accountKeys) == 0 && len(storageKeys) == 0 {
|
|
|
|
return tds.t.Hash(), nil
|
|
|
|
}
|
2020-05-09 04:44:56 +00:00
|
|
|
return trie.HashWithModifications(tds.t, accountKeys, aValues, aCodes, storageKeys, sValues, common.HashLength, &tds.newStream, hb, trace)
|
2019-11-15 22:48:49 +00:00
|
|
|
}
|
|
|
|
|
2019-10-21 12:54:47 +00:00
|
|
|
// forward is `true` if the function is used to progress the state forward (by adding blocks)
|
|
|
|
// forward is `false` if the function is used to rewind the state (for reorgs, for example)
|
|
|
|
func (tds *TrieDbState) updateTrieRoots(forward bool) ([]common.Hash, error) {
|
2019-05-27 13:51:49 +00:00
|
|
|
accountUpdates := tds.aggregateBuffer.accountUpdates
|
|
|
|
// Perform actual updates on the tries, and compute one trie root per buffer
|
|
|
|
// These roots can be used to populate receipt.PostState on pre-Byzantium
|
|
|
|
roots := make([]common.Hash, len(tds.buffers))
|
|
|
|
for i, b := range tds.buffers {
|
2020-05-09 04:44:56 +00:00
|
|
|
// For the contracts that got deleted, we clear the storage
|
|
|
|
for addrHash := range b.deleted {
|
|
|
|
// The only difference between Delete and DeleteSubtree is that Delete would delete accountNode too,
|
|
|
|
// wherewas DeleteSubtree will keep the accountNode, but will make the storage sub-trie empty
|
|
|
|
tds.t.DeleteSubtree(addrHash[:])
|
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
// New contracts are being created at these addresses. Therefore, we need to clear the storage items
|
|
|
|
// that might be remaining in the trie and figure out the next incarnations
|
2019-11-15 22:48:49 +00:00
|
|
|
for addrHash := range b.created {
|
2019-05-27 13:51:49 +00:00
|
|
|
// The only difference between Delete and DeleteSubtree is that Delete would delete accountNode too,
|
|
|
|
// wherewas DeleteSubtree will keep the accountNode, but will make the storage sub-trie empty
|
2020-02-06 10:53:09 +00:00
|
|
|
tds.t.DeleteSubtree(addrHash[:])
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2019-12-20 12:25:40 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
for addrHash, account := range b.accountUpdates {
|
|
|
|
if account != nil {
|
2020-02-12 13:52:59 +00:00
|
|
|
//fmt.Println("updateTrieRoots b.accountUpdates", addrHash.String(), account.Incarnation)
|
2019-05-27 13:51:49 +00:00
|
|
|
tds.t.UpdateAccount(addrHash[:], account)
|
|
|
|
} else {
|
2020-02-06 10:53:09 +00:00
|
|
|
tds.t.Delete(addrHash[:])
|
2020-03-23 22:10:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for addrHash, newCode := range b.codeUpdates {
|
|
|
|
if err := tds.t.UpdateAccountCode(addrHash[:], newCode); err != nil {
|
|
|
|
return nil, err
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
2019-11-15 22:48:49 +00:00
|
|
|
for addrHash, m := range b.storageUpdates {
|
2019-05-27 13:51:49 +00:00
|
|
|
for keyHash, v := range m {
|
2019-11-15 22:48:49 +00:00
|
|
|
cKey := dbutils.GenerateCompositeTrieKey(addrHash, keyHash)
|
2019-05-27 13:51:49 +00:00
|
|
|
if len(v) > 0 {
|
|
|
|
//fmt.Printf("Update storage trie addrHash %x, keyHash %x: %x\n", addrHash, keyHash, v)
|
|
|
|
if forward {
|
2020-02-06 10:53:09 +00:00
|
|
|
tds.t.Update(cKey, v)
|
2019-05-27 13:51:49 +00:00
|
|
|
} else {
|
|
|
|
// If rewinding, it might not be possible to execute storage item update.
|
|
|
|
// If we rewind from the state where a contract does not exist anymore (it was self-destructed)
|
|
|
|
// to the point where it existed (with storage), then rewinding to the point of existence
|
|
|
|
// will not bring back the full storage trie. Instead there will be one hashNode.
|
|
|
|
// So we probe for this situation first
|
|
|
|
if _, ok := tds.t.Get(cKey); ok {
|
2020-02-06 10:53:09 +00:00
|
|
|
tds.t.Update(cKey, v)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if forward {
|
2020-02-06 10:53:09 +00:00
|
|
|
tds.t.Delete(cKey)
|
2019-05-27 13:51:49 +00:00
|
|
|
} else {
|
|
|
|
// If rewinding, it might not be possible to execute storage item update.
|
|
|
|
// If we rewind from the state where a contract does not exist anymore (it was self-destructed)
|
|
|
|
// to the point where it existed (with storage), then rewinding to the point of existence
|
|
|
|
// will not bring back the full storage trie. Instead there will be one hashNode.
|
|
|
|
// So we probe for this situation first
|
|
|
|
if _, ok := tds.t.Get(cKey); ok {
|
2020-02-06 10:53:09 +00:00
|
|
|
tds.t.Delete(cKey)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-20 12:25:40 +00:00
|
|
|
|
2020-04-15 09:33:22 +00:00
|
|
|
if account, ok := b.accountUpdates[addrHash]; ok && account != nil {
|
|
|
|
ok, root := tds.t.DeepHash(addrHash[:])
|
|
|
|
if ok {
|
|
|
|
account.Root = root
|
|
|
|
//fmt.Printf("(b)Set %x root for addrHash %x\n", root, addrHash)
|
|
|
|
} else {
|
|
|
|
//fmt.Printf("(b)Set empty root for addrHash %x\n", addrHash)
|
|
|
|
account.Root = trie.EmptyRoot
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2020-04-15 09:33:22 +00:00
|
|
|
}
|
|
|
|
if account, ok := accountUpdates[addrHash]; ok && account != nil {
|
|
|
|
ok, root := tds.t.DeepHash(addrHash[:])
|
|
|
|
if ok {
|
|
|
|
account.Root = root
|
|
|
|
//fmt.Printf("Set %x root for addrHash %x\n", root, addrHash)
|
|
|
|
} else {
|
|
|
|
//fmt.Printf("Set empty root for addrHash %x\n", addrHash)
|
|
|
|
account.Root = trie.EmptyRoot
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
roots[i] = tds.t.Hash()
|
|
|
|
}
|
2020-02-22 07:56:42 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
return roots, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tds *TrieDbState) clearUpdates() {
|
|
|
|
tds.buffers = nil
|
|
|
|
tds.currentBuffer = nil
|
|
|
|
tds.aggregateBuffer = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tds *TrieDbState) SetBlockNr(blockNr uint64) {
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.setBlockNr(blockNr)
|
2020-04-08 05:00:31 +00:00
|
|
|
tds.tp.SetBlockNumber(blockNr)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 13:12:21 +00:00
|
|
|
func (tds *TrieDbState) GetBlockNr() uint64 {
|
|
|
|
return tds.getBlockNr()
|
|
|
|
}
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
func (tds *TrieDbState) UnwindTo(blockNr uint64) error {
|
2020-04-12 18:36:27 +00:00
|
|
|
//fmt.Printf("Unwind from block %d to block %d\n", tds.blockNr, blockNr)
|
2019-05-27 13:51:49 +00:00
|
|
|
tds.StartNewBuffer()
|
|
|
|
b := tds.currentBuffer
|
2019-10-31 10:59:00 +00:00
|
|
|
|
2020-04-28 05:36:33 +00:00
|
|
|
accountMap, storageMap, err := tds.db.RewindData(tds.blockNr, blockNr)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for key, value := range accountMap {
|
|
|
|
var addrHash common.Hash
|
|
|
|
copy(addrHash[:], []byte(key))
|
|
|
|
if len(value) > 0 {
|
|
|
|
var acc accounts.Account
|
|
|
|
if err := acc.DecodeForStorage(value); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Fetch the code hash
|
|
|
|
if acc.Incarnation > 0 && acc.IsEmptyCodeHash() {
|
|
|
|
if codeHash, err := tds.db.Get(dbutils.ContractCodeBucket, dbutils.GenerateStoragePrefix(addrHash[:], acc.Incarnation)); err == nil {
|
|
|
|
copy(acc.CodeHash[:], codeHash)
|
2019-10-21 12:00:42 +00:00
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2020-04-28 05:36:33 +00:00
|
|
|
b.accountUpdates[addrHash] = &acc
|
|
|
|
if err := rawdb.WriteAccount(tds.db, addrHash, acc); err != nil {
|
|
|
|
return err
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2020-04-28 05:36:33 +00:00
|
|
|
} else {
|
|
|
|
b.accountUpdates[addrHash] = nil
|
|
|
|
if err := rawdb.DeleteAccount(tds.db, addrHash); err != nil {
|
|
|
|
return err
|
2020-04-20 22:31:17 +00:00
|
|
|
}
|
2020-04-28 05:36:33 +00:00
|
|
|
}
|
|
|
|
b.accountReads[addrHash] = struct{}{}
|
|
|
|
}
|
|
|
|
for key, value := range storageMap {
|
|
|
|
var addrHash common.Hash
|
|
|
|
copy(addrHash[:], []byte(key)[:common.HashLength])
|
|
|
|
var keyHash common.Hash
|
|
|
|
copy(keyHash[:], []byte(key)[common.HashLength+common.IncarnationLength:])
|
|
|
|
m, ok := b.storageUpdates[addrHash]
|
|
|
|
if !ok {
|
|
|
|
m = make(map[common.Hash][]byte)
|
|
|
|
b.storageUpdates[addrHash] = m
|
|
|
|
}
|
|
|
|
m1, ok1 := b.storageReads[addrHash]
|
|
|
|
if !ok1 {
|
|
|
|
m1 = make(map[common.Hash]struct{})
|
|
|
|
b.storageReads[addrHash] = m1
|
|
|
|
}
|
|
|
|
m1[keyHash] = struct{}{}
|
|
|
|
if len(value) > 0 {
|
|
|
|
m[keyHash] = value
|
|
|
|
if err := tds.db.Put(dbutils.CurrentStateBucket, []byte(key)[:common.HashLength+common.IncarnationLength+common.HashLength], value); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
m[keyHash] = nil
|
|
|
|
if err := tds.db.Delete(dbutils.CurrentStateBucket, []byte(key)[:common.HashLength+common.IncarnationLength+common.HashLength]); err != nil {
|
|
|
|
return err
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-01 16:04:41 +00:00
|
|
|
if _, err := tds.ResolveStateTrie(false, false); err != nil {
|
2019-10-21 12:54:47 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-12-10 13:12:21 +00:00
|
|
|
|
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
2019-10-21 12:54:47 +00:00
|
|
|
if _, err := tds.updateTrieRoots(false); err != nil {
|
2019-05-27 13:51:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
for i := tds.blockNr; i > blockNr; i-- {
|
2020-04-09 17:23:29 +00:00
|
|
|
if err := tds.deleteTimestamp(i); err != nil {
|
2019-05-27 13:51:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-04-28 05:36:33 +00:00
|
|
|
if err := tds.truncateHistory(blockNr, accountMap, storageMap); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
tds.clearUpdates()
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.setBlockNr(blockNr)
|
2019-05-27 13:51:49 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-09 17:23:29 +00:00
|
|
|
func (tds *TrieDbState) deleteTimestamp(timestamp uint64) error {
|
|
|
|
changeSetKey := dbutils.EncodeTimestamp(timestamp)
|
|
|
|
changedAccounts, err := tds.db.Get(dbutils.AccountChangeSetBucket, changeSetKey)
|
|
|
|
if err != nil && err != ethdb.ErrKeyNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
changedStorage, err := tds.db.Get(dbutils.StorageChangeSetBucket, changeSetKey)
|
|
|
|
if err != nil && err != ethdb.ErrKeyNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2020-04-15 09:33:22 +00:00
|
|
|
if len(changedAccounts) > 0 {
|
|
|
|
if err := tds.db.Delete(dbutils.AccountChangeSetBucket, changeSetKey); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(changedStorage) > 0 {
|
|
|
|
if err := tds.db.Delete(dbutils.StorageChangeSetBucket, changeSetKey); err != nil {
|
|
|
|
return err
|
2020-04-09 17:23:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-28 05:36:33 +00:00
|
|
|
func (tds *TrieDbState) truncateHistory(timestampTo uint64, accountMap map[string][]byte, storageMap map[string][]byte) error {
|
|
|
|
accountHistoryEffects := make(map[string][]byte)
|
|
|
|
startKey := make([]byte, common.HashLength+8)
|
|
|
|
for key := range accountMap {
|
|
|
|
copy(startKey, []byte(key))
|
|
|
|
binary.BigEndian.PutUint64(startKey[common.HashLength:], timestampTo)
|
2020-05-11 04:46:07 +00:00
|
|
|
if err := tds.db.Walk(dbutils.AccountsHistoryBucket, startKey, 8*common.HashLength, func(k, v []byte) (bool, error) {
|
2020-04-28 05:36:33 +00:00
|
|
|
timestamp := binary.BigEndian.Uint64(k[common.HashLength:]) // the last timestamp in the chunk
|
|
|
|
kStr := string(common.CopyBytes(k))
|
|
|
|
accountHistoryEffects[kStr] = nil
|
|
|
|
if timestamp > timestampTo {
|
|
|
|
// truncate the chunk
|
|
|
|
index := dbutils.WrapHistoryIndex(v)
|
|
|
|
index = index.TruncateGreater(timestampTo)
|
|
|
|
if len(index) > 8 { // If the chunk is empty after truncation, it gets simply deleted
|
|
|
|
// Truncated chunk becomes "the last chunk" with the timestamp 0xffff....ffff
|
|
|
|
lastK := make([]byte, len(k))
|
|
|
|
copy(lastK, k[:common.HashLength])
|
|
|
|
binary.BigEndian.PutUint64(lastK[common.HashLength:], ^uint64(0))
|
|
|
|
accountHistoryEffects[string(lastK)] = common.CopyBytes(index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
storageHistoryEffects := make(map[string][]byte)
|
|
|
|
startKey = make([]byte, 2*common.HashLength+8)
|
|
|
|
for key := range storageMap {
|
|
|
|
copy(startKey, []byte(key)[:common.HashLength])
|
|
|
|
copy(startKey[common.HashLength:], []byte(key)[common.HashLength+8:])
|
|
|
|
binary.BigEndian.PutUint64(startKey[2*common.HashLength:], timestampTo)
|
2020-05-11 04:46:07 +00:00
|
|
|
if err := tds.db.Walk(dbutils.StorageHistoryBucket, startKey, 8*2*common.HashLength, func(k, v []byte) (bool, error) {
|
2020-04-28 05:36:33 +00:00
|
|
|
timestamp := binary.BigEndian.Uint64(k[2*common.HashLength:]) // the last timestamp in the chunk
|
|
|
|
kStr := string(common.CopyBytes(k))
|
|
|
|
storageHistoryEffects[kStr] = nil
|
|
|
|
if timestamp > timestampTo {
|
|
|
|
index := dbutils.WrapHistoryIndex(v)
|
|
|
|
index = index.TruncateGreater(timestampTo)
|
|
|
|
if len(index) > 8 { // If the chunk is empty after truncation, it gets simply deleted
|
|
|
|
// Truncated chunk becomes "the last chunk" with the timestamp 0xffff....ffff
|
|
|
|
lastK := make([]byte, len(k))
|
|
|
|
copy(lastK, k[:2*common.HashLength])
|
|
|
|
binary.BigEndian.PutUint64(lastK[2*common.HashLength:], ^uint64(0))
|
|
|
|
storageHistoryEffects[string(lastK)] = common.CopyBytes(index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for key, value := range accountHistoryEffects {
|
|
|
|
if value == nil {
|
|
|
|
if err := tds.db.Delete(dbutils.AccountsHistoryBucket, []byte(key)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := tds.db.Put(dbutils.AccountsHistoryBucket, []byte(key), value); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for key, value := range storageHistoryEffects {
|
|
|
|
if value == nil {
|
|
|
|
if err := tds.db.Delete(dbutils.StorageHistoryBucket, []byte(key)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := tds.db.Put(dbutils.StorageHistoryBucket, []byte(key), value); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
func (tds *TrieDbState) readAccountDataByHash(addrHash common.Hash) (*accounts.Account, error) {
|
2019-12-19 10:44:43 +00:00
|
|
|
if acc, ok := tds.GetAccount(addrHash); ok {
|
2019-05-27 13:51:49 +00:00
|
|
|
return acc, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not present in the trie, try the database
|
|
|
|
var err error
|
|
|
|
var enc []byte
|
2020-04-18 20:09:44 +00:00
|
|
|
var a accounts.Account
|
2019-05-27 13:51:49 +00:00
|
|
|
if tds.historical {
|
2020-04-19 19:51:32 +00:00
|
|
|
enc, err = tds.db.GetAsOf(dbutils.CurrentStateBucket, dbutils.AccountsHistoryBucket, addrHash[:], tds.blockNr+1)
|
2019-05-27 13:51:49 +00:00
|
|
|
if err != nil {
|
|
|
|
enc = nil
|
|
|
|
}
|
2020-04-18 20:09:44 +00:00
|
|
|
if len(enc) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
if err := a.DecodeForStorage(enc); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
} else {
|
2020-04-18 20:09:44 +00:00
|
|
|
if ok, err := rawdb.ReadAccount(tds.db, addrHash, &a); err != nil {
|
|
|
|
return nil, err
|
|
|
|
} else if !ok {
|
|
|
|
return nil, nil
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-20 12:25:40 +00:00
|
|
|
|
2020-04-15 09:33:22 +00:00
|
|
|
if tds.historical && a.Incarnation > 0 {
|
2020-04-23 09:35:43 +00:00
|
|
|
codeHash, err := tds.db.Get(dbutils.ContractCodeBucket, dbutils.GenerateStoragePrefix(addrHash[:], a.Incarnation))
|
2019-12-20 12:25:40 +00:00
|
|
|
if err == nil {
|
|
|
|
a.CodeHash = common.BytesToHash(codeHash)
|
|
|
|
} else {
|
|
|
|
log.Error("Get code hash is incorrect", "err", err)
|
|
|
|
}
|
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
return &a, nil
|
|
|
|
}
|
|
|
|
|
2019-12-19 10:44:43 +00:00
|
|
|
func (tds *TrieDbState) GetAccount(addrHash common.Hash) (*accounts.Account, bool) {
|
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
|
|
|
acc, ok := tds.t.GetAccount(addrHash[:])
|
|
|
|
return acc, ok
|
|
|
|
}
|
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
func (tds *TrieDbState) ReadAccountData(address common.Address) (*accounts.Account, error) {
|
|
|
|
addrHash, err := common.HashData(address[:])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if tds.resolveReads {
|
2020-04-20 22:31:17 +00:00
|
|
|
tds.currentBuffer.accountReads[addrHash] = struct{}{}
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2019-12-20 12:25:40 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
return tds.readAccountDataByHash(addrHash)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tds *TrieDbState) GetKey(shaKey []byte) []byte {
|
|
|
|
key, _ := tds.db.Get(dbutils.PreimagePrefix, shaKey)
|
|
|
|
return key
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tds *TrieDbState) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) {
|
2020-04-26 16:02:38 +00:00
|
|
|
addrHash, err := tds.pw.HashAddress(address, false /*save*/)
|
2019-11-15 22:48:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
if tds.currentBuffer != nil {
|
2019-11-15 22:48:49 +00:00
|
|
|
if _, ok := tds.currentBuffer.deleted[addrHash]; ok {
|
2019-05-27 13:51:49 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if tds.aggregateBuffer != nil {
|
2019-11-15 22:48:49 +00:00
|
|
|
if _, ok := tds.aggregateBuffer.deleted[addrHash]; ok {
|
2019-05-27 13:51:49 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
}
|
2020-04-26 16:02:38 +00:00
|
|
|
seckey, err := tds.pw.HashKey(key, false /*save*/)
|
2019-05-27 13:51:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if tds.resolveReads {
|
2020-04-20 22:31:17 +00:00
|
|
|
m, ok := tds.currentBuffer.storageReads[addrHash]
|
|
|
|
if !ok {
|
|
|
|
m = make(map[common.Hash]struct{})
|
|
|
|
tds.currentBuffer.storageReads[addrHash] = m
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2020-04-20 22:31:17 +00:00
|
|
|
m[seckey] = struct{}{}
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
2020-02-06 10:53:09 +00:00
|
|
|
enc, ok := tds.t.Get(dbutils.GenerateCompositeTrieKey(addrHash, seckey))
|
2019-11-13 09:52:43 +00:00
|
|
|
if !ok {
|
2019-05-27 13:51:49 +00:00
|
|
|
// Not present in the trie, try database
|
|
|
|
if tds.historical {
|
2020-04-19 19:51:32 +00:00
|
|
|
enc, err = tds.db.GetAsOf(dbutils.CurrentStateBucket, dbutils.StorageHistoryBucket, dbutils.GenerateCompositeStorageKey(addrHash, incarnation, seckey), tds.blockNr)
|
2019-05-27 13:51:49 +00:00
|
|
|
if err != nil {
|
|
|
|
enc = nil
|
|
|
|
}
|
|
|
|
} else {
|
2020-04-19 19:51:32 +00:00
|
|
|
enc, err = tds.db.Get(dbutils.CurrentStateBucket, dbutils.GenerateCompositeStorageKey(addrHash, incarnation, seckey))
|
2019-05-27 13:51:49 +00:00
|
|
|
if err != nil {
|
|
|
|
enc = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return enc, nil
|
|
|
|
}
|
|
|
|
|
2020-03-23 22:10:36 +00:00
|
|
|
func (tds *TrieDbState) ReadCodeByHash(codeHash common.Hash) (code []byte, err error) {
|
|
|
|
if bytes.Equal(codeHash[:], emptyCodeHash) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
code, err = tds.db.Get(dbutils.CodeBucket, codeHash[:])
|
|
|
|
if tds.resolveReads {
|
|
|
|
// we have to be careful, because the code might change
|
|
|
|
// during the block executuion, so we are always
|
|
|
|
// storing the latest code hash
|
2020-05-12 14:24:43 +00:00
|
|
|
tds.retainListBuilder.ReadCode(codeHash)
|
2020-03-23 22:10:36 +00:00
|
|
|
}
|
|
|
|
return code, err
|
|
|
|
}
|
|
|
|
|
2020-03-28 13:56:24 +00:00
|
|
|
func (tds *TrieDbState) readAccountCodeFromTrie(addrHash []byte) ([]byte, bool) {
|
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
|
|
|
return tds.t.GetAccountCode(addrHash)
|
|
|
|
}
|
|
|
|
|
2020-04-08 11:47:18 +00:00
|
|
|
func (tds *TrieDbState) readAccountCodeSizeFromTrie(addrHash []byte) (int, bool) {
|
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
|
|
|
return tds.t.GetAccountCodeSize(addrHash)
|
|
|
|
}
|
|
|
|
|
2019-10-21 12:54:47 +00:00
|
|
|
func (tds *TrieDbState) ReadAccountCode(address common.Address, codeHash common.Hash) (code []byte, err error) {
|
2019-05-27 13:51:49 +00:00
|
|
|
if bytes.Equal(codeHash[:], emptyCodeHash) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2020-03-23 22:10:36 +00:00
|
|
|
|
2020-04-26 16:02:38 +00:00
|
|
|
addrHash, err := tds.pw.HashAddress(address, false /*save*/)
|
2020-03-23 22:10:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-03-28 13:56:24 +00:00
|
|
|
if cached, ok := tds.readAccountCodeFromTrie(addrHash[:]); ok {
|
2020-03-23 22:10:36 +00:00
|
|
|
code, err = cached, nil
|
2019-05-27 13:51:49 +00:00
|
|
|
} else {
|
|
|
|
code, err = tds.db.Get(dbutils.CodeBucket, codeHash[:])
|
|
|
|
}
|
|
|
|
if tds.resolveReads {
|
2019-10-21 12:54:47 +00:00
|
|
|
addrHash, err1 := common.HashData(address[:])
|
|
|
|
if err1 != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-04-20 22:31:17 +00:00
|
|
|
tds.currentBuffer.accountReads[addrHash] = struct{}{}
|
2020-03-23 22:10:36 +00:00
|
|
|
// we have to be careful, because the code might change
|
|
|
|
// during the block executuion, so we are always
|
|
|
|
// storing the latest code hash
|
|
|
|
tds.currentBuffer.codeReads[addrHash] = codeHash
|
2020-05-12 14:24:43 +00:00
|
|
|
tds.retainListBuilder.ReadCode(codeHash)
|
2017-06-27 13:57:06 +00:00
|
|
|
}
|
|
|
|
return code, err
|
|
|
|
}
|
|
|
|
|
2019-10-21 12:54:47 +00:00
|
|
|
func (tds *TrieDbState) ReadAccountCodeSize(address common.Address, codeHash common.Hash) (codeSize int, err error) {
|
2020-04-26 16:02:38 +00:00
|
|
|
addrHash, err := tds.pw.HashAddress(address, false /*save*/)
|
2020-03-23 22:10:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2020-04-08 11:47:18 +00:00
|
|
|
if cached, ok := tds.readAccountCodeSizeFromTrie(addrHash[:]); ok {
|
|
|
|
codeSize, err = cached, nil
|
2019-05-27 13:51:49 +00:00
|
|
|
} else {
|
2020-04-08 11:47:18 +00:00
|
|
|
var code []byte
|
|
|
|
code, err = tds.db.Get(dbutils.CodeBucket, codeHash[:])
|
2019-05-27 13:51:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
codeSize = len(code)
|
|
|
|
}
|
|
|
|
if tds.resolveReads {
|
2019-10-21 12:54:47 +00:00
|
|
|
addrHash, err1 := common.HashData(address[:])
|
|
|
|
if err1 != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2020-04-20 22:31:17 +00:00
|
|
|
tds.currentBuffer.accountReads[addrHash] = struct{}{}
|
2020-03-23 22:10:36 +00:00
|
|
|
// we have to be careful, because the code might change
|
|
|
|
// during the block executuion, so we are always
|
|
|
|
// storing the latest code hash
|
2020-04-08 11:47:18 +00:00
|
|
|
tds.currentBuffer.codeSizeReads[addrHash] = codeHash
|
|
|
|
// FIXME: support codeSize in witnesses if makes sense
|
2020-05-12 14:24:43 +00:00
|
|
|
tds.retainListBuilder.ReadCode(codeHash)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
return codeSize, nil
|
|
|
|
}
|
|
|
|
|
2020-05-02 18:00:42 +00:00
|
|
|
func (tds *TrieDbState) ReadAccountIncarnation(address common.Address) (uint64, error) {
|
2020-04-26 21:58:26 +00:00
|
|
|
if inc, ok := tds.incarnationMap[address]; ok {
|
|
|
|
return inc, nil
|
2020-04-25 14:50:32 +00:00
|
|
|
}
|
2020-05-02 18:00:42 +00:00
|
|
|
addrHash, err := tds.pw.HashAddress(address, false /*save*/)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
incarnation, found, err := ethdb.GetCurrentAccountIncarnation(tds.db, addrHash)
|
|
|
|
if err != nil {
|
2020-04-25 14:50:32 +00:00
|
|
|
return 0, err
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
if found {
|
2020-05-02 18:00:42 +00:00
|
|
|
return incarnation, nil
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2020-05-02 18:00:42 +00:00
|
|
|
return 0, nil
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var prevMemStats runtime.MemStats
|
|
|
|
|
|
|
|
type TrieStateWriter struct {
|
|
|
|
tds *TrieDbState
|
|
|
|
}
|
|
|
|
|
2020-04-08 05:00:31 +00:00
|
|
|
func (tds *TrieDbState) EvictTries(print bool) {
|
2019-12-10 13:12:21 +00:00
|
|
|
tds.tMu.Lock()
|
2019-12-19 10:44:43 +00:00
|
|
|
defer tds.tMu.Unlock()
|
2020-04-08 05:00:31 +00:00
|
|
|
strict := print
|
2020-04-26 21:58:26 +00:00
|
|
|
tds.incarnationMap = make(map[common.Address]uint64)
|
2019-05-27 13:51:49 +00:00
|
|
|
if print {
|
2020-04-08 05:00:31 +00:00
|
|
|
trieSize := tds.t.TrieSize()
|
|
|
|
fmt.Println("") // newline for better formatting
|
|
|
|
fmt.Printf("[Before] Actual nodes size: %d, accounted size: %d\n", trieSize, tds.tp.TotalSize())
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2020-04-08 05:00:31 +00:00
|
|
|
if strict {
|
|
|
|
actualAccounts := uint64(tds.t.NumberOfAccounts())
|
|
|
|
fmt.Println("number of leaves: ", actualAccounts)
|
|
|
|
accountedAccounts := tds.tp.NumberOf()
|
|
|
|
if actualAccounts != accountedAccounts {
|
|
|
|
panic(fmt.Errorf("account number mismatch: trie=%v eviction=%v", actualAccounts, accountedAccounts))
|
|
|
|
}
|
|
|
|
fmt.Printf("checking number --> ok\n")
|
|
|
|
|
|
|
|
actualSize := uint64(tds.t.TrieSize())
|
|
|
|
accountedSize := tds.tp.TotalSize()
|
|
|
|
|
|
|
|
if actualSize != accountedSize {
|
|
|
|
panic(fmt.Errorf("account size mismatch: trie=%v eviction=%v", actualSize, accountedSize))
|
|
|
|
}
|
|
|
|
fmt.Printf("checking size --> ok\n")
|
|
|
|
}
|
|
|
|
|
|
|
|
tds.tp.EvictToFitSize(tds.t, MaxTrieCacheSize)
|
|
|
|
|
|
|
|
if strict {
|
|
|
|
actualAccounts := uint64(tds.t.NumberOfAccounts())
|
|
|
|
fmt.Println("number of leaves: ", actualAccounts)
|
|
|
|
accountedAccounts := tds.tp.NumberOf()
|
|
|
|
if actualAccounts != accountedAccounts {
|
|
|
|
panic(fmt.Errorf("after eviction account number mismatch: trie=%v eviction=%v", actualAccounts, accountedAccounts))
|
|
|
|
}
|
|
|
|
fmt.Printf("checking number --> ok\n")
|
|
|
|
|
|
|
|
actualSize := uint64(tds.t.TrieSize())
|
|
|
|
accountedSize := tds.tp.TotalSize()
|
|
|
|
|
|
|
|
if actualSize != accountedSize {
|
|
|
|
panic(fmt.Errorf("after eviction account size mismatch: trie=%v eviction=%v", actualSize, accountedSize))
|
|
|
|
}
|
|
|
|
fmt.Printf("checking size --> ok\n")
|
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
|
|
|
|
if print {
|
2020-04-08 05:00:31 +00:00
|
|
|
trieSize := tds.t.TrieSize()
|
|
|
|
fmt.Printf("[After] Actual nodes size: %d, accounted size: %d\n", trieSize, tds.tp.TotalSize())
|
|
|
|
|
|
|
|
actualAccounts := uint64(tds.t.NumberOfAccounts())
|
|
|
|
fmt.Println("number of leaves: ", actualAccounts)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2019-12-10 13:12:21 +00:00
|
|
|
|
2019-05-27 13:51:49 +00:00
|
|
|
var m runtime.MemStats
|
|
|
|
runtime.ReadMemStats(&m)
|
2020-04-08 05:00:31 +00:00
|
|
|
log.Info("Memory", "nodes size", tds.tp.TotalSize(), "hashes", tds.t.HashMapSize(),
|
2020-02-08 22:18:20 +00:00
|
|
|
"alloc", int(m.Alloc/1024), "sys", int(m.Sys/1024), "numGC", int(m.NumGC))
|
2019-05-27 13:51:49 +00:00
|
|
|
if print {
|
2020-04-08 05:00:31 +00:00
|
|
|
fmt.Printf("Eviction done. Nodes size: %d, alloc: %d, sys: %d, numGC: %d\n", tds.tp.TotalSize(), int(m.Alloc/1024), int(m.Sys/1024), int(m.NumGC))
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tds *TrieDbState) TrieStateWriter() *TrieStateWriter {
|
|
|
|
return &TrieStateWriter{tds: tds}
|
|
|
|
}
|
|
|
|
|
2020-04-09 17:23:29 +00:00
|
|
|
// DbStateWriter creates a writer that is designed to write changes into the database batch
|
2019-05-27 13:51:49 +00:00
|
|
|
func (tds *TrieDbState) DbStateWriter() *DbStateWriter {
|
2020-05-08 04:52:55 +00:00
|
|
|
return &DbStateWriter{blockNr: tds.blockNr, db: tds.db, pw: tds.pw, csw: NewChangeSetWriter(), incarnationMap: tds.incarnationMap}
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (tsw *TrieStateWriter) UpdateAccountData(_ context.Context, address common.Address, original, account *accounts.Account) error {
|
2020-04-26 16:02:38 +00:00
|
|
|
addrHash, err := tsw.tds.pw.HashAddress(address, false /*save*/)
|
2019-05-27 13:51:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
tsw.tds.currentBuffer.accountUpdates[addrHash] = account
|
2020-04-20 22:31:17 +00:00
|
|
|
tsw.tds.currentBuffer.accountReads[addrHash] = struct{}{}
|
2019-05-27 13:51:49 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tsw *TrieStateWriter) DeleteAccount(_ context.Context, address common.Address, original *accounts.Account) error {
|
2020-04-26 16:02:38 +00:00
|
|
|
addrHash, err := tsw.tds.pw.HashAddress(address, false /*save*/)
|
2019-05-27 13:51:49 +00:00
|
|
|
if err != err {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
tsw.tds.currentBuffer.accountUpdates[addrHash] = nil
|
2020-04-20 22:31:17 +00:00
|
|
|
tsw.tds.currentBuffer.accountReads[addrHash] = struct{}{}
|
2019-11-15 22:48:49 +00:00
|
|
|
delete(tsw.tds.currentBuffer.storageUpdates, addrHash)
|
2020-05-09 04:44:56 +00:00
|
|
|
delete(tsw.tds.currentBuffer.codeUpdates, addrHash)
|
2019-11-15 22:48:49 +00:00
|
|
|
tsw.tds.currentBuffer.deleted[addrHash] = struct{}{}
|
2020-04-26 21:58:26 +00:00
|
|
|
if original.Incarnation > 0 {
|
2020-05-02 18:00:42 +00:00
|
|
|
tsw.tds.incarnationMap[address] = original.Incarnation
|
2020-04-26 21:58:26 +00:00
|
|
|
}
|
2019-05-27 13:51:49 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-20 12:25:40 +00:00
|
|
|
func (tsw *TrieStateWriter) UpdateAccountCode(addrHash common.Hash, incarnation uint64, codeHash common.Hash, code []byte) error {
|
2019-05-27 13:51:49 +00:00
|
|
|
if tsw.tds.resolveReads {
|
2020-05-12 14:24:43 +00:00
|
|
|
tsw.tds.retainListBuilder.CreateCode(codeHash)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2020-03-23 22:10:36 +00:00
|
|
|
tsw.tds.currentBuffer.codeUpdates[addrHash] = code
|
2019-05-27 13:51:49 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tsw *TrieStateWriter) WriteAccountStorage(_ context.Context, address common.Address, incarnation uint64, key, original, value *common.Hash) error {
|
2020-04-26 16:02:38 +00:00
|
|
|
addrHash, err := tsw.tds.pw.HashAddress(address, false /*save*/)
|
2019-05-27 13:51:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
v := bytes.TrimLeft(value[:], "\x00")
|
2019-11-15 22:48:49 +00:00
|
|
|
m, ok := tsw.tds.currentBuffer.storageUpdates[addrHash]
|
2019-05-27 13:51:49 +00:00
|
|
|
if !ok {
|
|
|
|
m = make(map[common.Hash][]byte)
|
2019-11-15 22:48:49 +00:00
|
|
|
tsw.tds.currentBuffer.storageUpdates[addrHash] = m
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
2020-04-20 22:31:17 +00:00
|
|
|
m1, ok1 := tsw.tds.currentBuffer.storageReads[addrHash]
|
|
|
|
if !ok1 {
|
|
|
|
m1 = make(map[common.Hash]struct{})
|
|
|
|
tsw.tds.currentBuffer.storageReads[addrHash] = m1
|
|
|
|
}
|
2020-04-26 16:02:38 +00:00
|
|
|
seckey, err := tsw.tds.pw.HashKey(key, false /*save*/)
|
2019-05-27 13:51:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-04-20 22:31:17 +00:00
|
|
|
m1[seckey] = struct{}{}
|
2019-05-27 13:51:49 +00:00
|
|
|
if len(v) > 0 {
|
2019-11-13 09:52:43 +00:00
|
|
|
m[seckey] = v
|
2019-05-27 13:51:49 +00:00
|
|
|
} else {
|
|
|
|
m[seckey] = nil
|
|
|
|
}
|
|
|
|
//fmt.Printf("WriteAccountStorage %x %x: %x, buffer %d\n", addrHash, seckey, value, len(tsw.tds.buffers))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-21 12:54:47 +00:00
|
|
|
// ExtractWitness produces block witness for the block just been processed, in a serialised form
|
2020-01-15 13:56:50 +00:00
|
|
|
func (tds *TrieDbState) ExtractWitness(trace bool, isBinary bool) (*trie.Witness, error) {
|
2020-05-12 14:24:43 +00:00
|
|
|
rs := tds.retainListBuilder.Build(isBinary)
|
2019-12-10 13:12:21 +00:00
|
|
|
|
2020-03-23 22:10:36 +00:00
|
|
|
return tds.makeBlockWitness(trace, rs, isBinary)
|
2019-05-27 13:51:49 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 15:40:30 +00:00
|
|
|
// ExtractWitness produces block witness for the block just been processed, in a serialised form
|
|
|
|
func (tds *TrieDbState) ExtractWitnessForPrefix(prefix []byte, trace bool, isBinary bool) (*trie.Witness, error) {
|
2020-05-12 14:24:43 +00:00
|
|
|
rs := tds.retainListBuilder.Build(isBinary)
|
2020-03-25 15:40:30 +00:00
|
|
|
|
|
|
|
return tds.makeBlockWitnessForPrefix(prefix, trace, rs, isBinary)
|
|
|
|
}
|
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
func (tds *TrieDbState) makeBlockWitnessForPrefix(prefix []byte, trace bool, rl trie.RetainDecider, isBinary bool) (*trie.Witness, error) {
|
2020-03-25 15:40:30 +00:00
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
|
|
|
|
|
|
|
t := tds.t
|
|
|
|
if isBinary {
|
|
|
|
t = trie.HexToBin(tds.t).Trie()
|
|
|
|
}
|
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
return t.ExtractWitnessForPrefix(prefix, trace, rl)
|
2020-03-25 15:40:30 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
func (tds *TrieDbState) makeBlockWitness(trace bool, rl trie.RetainDecider, isBinary bool) (*trie.Witness, error) {
|
2019-12-19 10:44:43 +00:00
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
|
|
|
|
2020-01-15 13:56:50 +00:00
|
|
|
t := tds.t
|
|
|
|
if isBinary {
|
|
|
|
t = trie.HexToBin(tds.t).Trie()
|
2019-12-19 10:44:43 +00:00
|
|
|
}
|
2020-01-15 13:56:50 +00:00
|
|
|
|
2020-05-12 14:24:43 +00:00
|
|
|
return t.ExtractWitness(trace, rl)
|
2019-12-19 10:44:43 +00:00
|
|
|
}
|
|
|
|
|
2020-05-02 18:00:42 +00:00
|
|
|
func (tsw *TrieStateWriter) CreateContract(address common.Address) error {
|
2020-04-26 16:02:38 +00:00
|
|
|
addrHash, err := tsw.tds.pw.HashAddress(address, true /*save*/)
|
2019-11-15 22:48:49 +00:00
|
|
|
if err != nil {
|
2020-05-02 18:00:42 +00:00
|
|
|
return err
|
2019-11-15 22:48:49 +00:00
|
|
|
}
|
|
|
|
tsw.tds.currentBuffer.created[addrHash] = struct{}{}
|
2020-04-20 22:31:17 +00:00
|
|
|
tsw.tds.currentBuffer.accountReads[addrHash] = struct{}{}
|
2020-05-09 04:44:56 +00:00
|
|
|
delete(tsw.tds.currentBuffer.storageUpdates, addrHash)
|
2020-05-02 18:00:42 +00:00
|
|
|
return nil
|
2018-02-05 16:40:32 +00:00
|
|
|
}
|
2019-10-18 12:11:50 +00:00
|
|
|
|
|
|
|
func (tds *TrieDbState) TriePruningDebugDump() string {
|
|
|
|
return tds.tp.DebugDump()
|
|
|
|
}
|
2019-12-10 13:12:21 +00:00
|
|
|
|
|
|
|
func (tds *TrieDbState) getBlockNr() uint64 {
|
|
|
|
return atomic.LoadUint64(&tds.blockNr)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tds *TrieDbState) setBlockNr(n uint64) {
|
|
|
|
atomic.StoreUint64(&tds.blockNr, n)
|
|
|
|
}
|
2020-02-06 10:53:09 +00:00
|
|
|
|
|
|
|
// GetNodeByHash gets node's RLP by hash.
|
|
|
|
func (tds *TrieDbState) GetNodeByHash(hash common.Hash) []byte {
|
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
|
|
|
|
|
|
|
return tds.t.GetNodeByHash(hash)
|
|
|
|
}
|
2020-04-22 10:14:33 +00:00
|
|
|
|
|
|
|
func (tds *TrieDbState) GetTrieHash() common.Hash {
|
|
|
|
tds.tMu.Lock()
|
|
|
|
defer tds.tMu.Unlock()
|
|
|
|
return tds.t.Hash()
|
|
|
|
}
|