erigon-pulse/cmd/state/stateless/witness_db.go

96 lines
2.1 KiB
Go
Raw Normal View History

package stateless
import (
"bytes"
"encoding/binary"
"encoding/csv"
"fmt"
Intermediate hash phase 3 (#377) * #remove debug prints * remove storage-mode="i" * minnet re-execute hack with checkpoints * minnet re-execute hack with checkpoints * rollback to master setup * mainnet re-exec hack * rollback some changes * v0 of "push down" functionality * move all logic to own functions * handle case when re-created account already has some storage * clear path for storage * try to rely on tree structure (but maybe need to rely on DB because can be intra-block re-creations of account) * fix some bugs with indexes, moving to tests * tests added * make linter happy * make linter happy * simplify logic * adjust comparison of keys with and without incarnation * test for keyIsBefore * test for keyIsBefore * better nibbles alignment * better nibbles alignment * cleanup * continue work on tests * simplify test * check tombstone existence before pushing it down. * put tombstone only when account deleted, not created * put tombstone only when account has storage * make linter happy * test for storage resolver * make fixedbytes work without incarnation * fix panic on short keys * use special comparison only when working with keys from cache * add blockNr for better tracing * fix: incorrect tombstone check * fix: incorrect tombstone check * trigger ci * hack for problem block * more test-cases * add test case for too long keys * speedup cached resolver by removing bucket creation transaction * remove parent type check in pruning, remove unused copy from mutation.put * dump resolving info on fail * dump resolving info on fail * set tombstone everytime for now to check if it will help * on unload: check parent type, not type of node * fix wrong order of checking node type * fix wrong order of checking node type * rebase to new master * make linter happy * rebase to new master * place tombstone only if acc has storage * rebase master * rebase master * rebase master * rebase master Co-authored-by: alex.sharov <alex.sharov@lazada.com>
2020-03-11 10:31:49 +00:00
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/trie"
)
var (
2020-08-10 23:55:32 +00:00
witnessesBucket = "witnesses"
)
type WitnessDBWriter struct {
storage ethdb.Database
statsWriter *csv.Writer
}
func NewWitnessDBWriter(storage ethdb.Database, statsWriter *csv.Writer) (*WitnessDBWriter, error) {
err := statsWriter.Write([]string{
"blockNum", "maxTrieSize", "witnessesSize",
})
if err != nil {
return nil, err
}
return &WitnessDBWriter{storage, statsWriter}, nil
}
func (db *WitnessDBWriter) MustUpsert(blockNumber uint64, maxTrieSize uint32, resolveWitnesses []*trie.Witness) {
key := deriveDbKey(blockNumber, maxTrieSize)
var buf bytes.Buffer
for i, witness := range resolveWitnesses {
if _, err := witness.WriteTo(&buf); err != nil {
panic(fmt.Errorf("error while writing witness to a buffer: %w", err))
}
if i < len(resolveWitnesses)-1 {
buf.WriteByte(byte(trie.OpNewTrie))
}
}
bytes := buf.Bytes()
batch := db.storage.NewBatch()
Intermediate hash phase 3 (#377) * #remove debug prints * remove storage-mode="i" * minnet re-execute hack with checkpoints * minnet re-execute hack with checkpoints * rollback to master setup * mainnet re-exec hack * rollback some changes * v0 of "push down" functionality * move all logic to own functions * handle case when re-created account already has some storage * clear path for storage * try to rely on tree structure (but maybe need to rely on DB because can be intra-block re-creations of account) * fix some bugs with indexes, moving to tests * tests added * make linter happy * make linter happy * simplify logic * adjust comparison of keys with and without incarnation * test for keyIsBefore * test for keyIsBefore * better nibbles alignment * better nibbles alignment * cleanup * continue work on tests * simplify test * check tombstone existence before pushing it down. * put tombstone only when account deleted, not created * put tombstone only when account has storage * make linter happy * test for storage resolver * make fixedbytes work without incarnation * fix panic on short keys * use special comparison only when working with keys from cache * add blockNr for better tracing * fix: incorrect tombstone check * fix: incorrect tombstone check * trigger ci * hack for problem block * more test-cases * add test case for too long keys * speedup cached resolver by removing bucket creation transaction * remove parent type check in pruning, remove unused copy from mutation.put * dump resolving info on fail * dump resolving info on fail * set tombstone everytime for now to check if it will help * on unload: check parent type, not type of node * fix wrong order of checking node type * fix wrong order of checking node type * rebase to new master * make linter happy * rebase to new master * place tombstone only if acc has storage * rebase master * rebase master * rebase master * rebase master Co-authored-by: alex.sharov <alex.sharov@lazada.com>
2020-03-11 10:31:49 +00:00
err := batch.Put(witnessesBucket, common.CopyBytes(key), common.CopyBytes(bytes))
if err != nil {
panic(fmt.Errorf("error while upserting witness: %w", err))
}
_, err = batch.Commit()
if err != nil {
panic(err)
}
err = db.statsWriter.Write([]string{
fmt.Sprintf("%v", blockNumber),
fmt.Sprintf("%v", maxTrieSize),
fmt.Sprintf("%v", len(bytes)),
})
if err != nil {
panic(fmt.Errorf("error while writing stats: %w", err))
}
db.statsWriter.Flush()
}
type WitnessDBReader struct {
getter ethdb.Getter
}
func NewWitnessDBReader(getter ethdb.Getter) *WitnessDBReader {
return &WitnessDBReader{getter}
}
func (db *WitnessDBReader) GetWitnessesForBlock(blockNumber uint64, maxTrieSize uint32) ([]byte, error) {
key := deriveDbKey(blockNumber, maxTrieSize)
return db.getter.Get(witnessesBucket, key)
}
func deriveDbKey(blockNumber uint64, maxTrieSize uint32) []byte {
buffer := make([]byte, 8+4)
binary.LittleEndian.PutUint64(buffer[:], blockNumber)
binary.LittleEndian.PutUint32(buffer[8:], maxTrieSize)
return buffer
}