2021-06-28 16:59:20 +00:00
|
|
|
/**
|
|
|
|
* Explore DB contents
|
|
|
|
*
|
|
|
|
* Given a beacon-chain DB, This tool provides many option to
|
|
|
|
* inspect and explore it. For every non-empty bucket, print
|
|
|
|
* the number of rows, bucket size,min/average/max size of values
|
|
|
|
*/
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2021-08-05 18:30:05 +00:00
|
|
|
"bytes"
|
2021-07-06 15:11:11 +00:00
|
|
|
"context"
|
2021-06-28 16:59:20 +00:00
|
|
|
"flag"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2021-07-16 22:24:16 +00:00
|
|
|
"sync"
|
2021-06-28 16:59:20 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/dustin/go-humanize"
|
2021-07-06 15:11:11 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
2021-07-23 16:11:21 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
2021-09-21 19:59:25 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/config/params"
|
2022-04-29 14:32:11 +00:00
|
|
|
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
2021-09-23 15:23:37 +00:00
|
|
|
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
2021-07-21 21:34:07 +00:00
|
|
|
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
2021-06-28 16:59:20 +00:00
|
|
|
log "github.com/sirupsen/logrus"
|
2021-07-06 15:11:11 +00:00
|
|
|
"github.com/status-im/keycard-go/hexutils"
|
2021-06-28 16:59:20 +00:00
|
|
|
bolt "go.etcd.io/bbolt"
|
|
|
|
)
|
|
|
|
|
2021-08-05 18:30:05 +00:00
|
|
|
const (
|
|
|
|
MaxUint64 = ^uint64(0)
|
|
|
|
maxSlotsToDisplay = 2000000
|
|
|
|
)
|
|
|
|
|
2021-06-28 16:59:20 +00:00
|
|
|
var (
|
2021-08-05 18:30:05 +00:00
|
|
|
datadir = flag.String("datadir", "", "Path to data directory.")
|
|
|
|
dbName = flag.String("dbname", "", "database name.")
|
|
|
|
command = flag.String("command", "", "command to execute.")
|
|
|
|
bucketName = flag.String("bucket-name", "", "bucket to show contents.")
|
|
|
|
rowLimit = flag.Uint64("limit", 10, "limit to rows.")
|
|
|
|
migrationName = flag.String("migration", "", "migration to cross check.")
|
|
|
|
destDatadir = flag.String("dest-datadir", "", "Path to destination data directory.")
|
2021-06-28 16:59:20 +00:00
|
|
|
)
|
|
|
|
|
2021-07-16 22:24:16 +00:00
|
|
|
// used to parallelize all the bucket stats
|
|
|
|
type bucketStat struct {
|
|
|
|
bucketName string
|
|
|
|
noOfRows uint64
|
|
|
|
totalKeySize uint64
|
|
|
|
totalValueSize uint64
|
|
|
|
minKeySize uint64
|
|
|
|
maxKeySize uint64
|
|
|
|
minValueSize uint64
|
|
|
|
maxValueSize uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// used to parallelize state bucket processing
|
|
|
|
type modifiedState struct {
|
2021-07-23 16:11:21 +00:00
|
|
|
state state.BeaconState
|
2021-07-16 22:24:16 +00:00
|
|
|
key []byte
|
|
|
|
valueSize uint64
|
|
|
|
rowCount uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// used to parallelize state summary bucket processing
|
|
|
|
type modifiedStateSummary struct {
|
|
|
|
slot types.Slot
|
|
|
|
root []byte
|
|
|
|
key []byte
|
|
|
|
valueSize uint64
|
|
|
|
rowCount uint64
|
|
|
|
}
|
|
|
|
|
2021-06-28 16:59:20 +00:00
|
|
|
func main() {
|
|
|
|
flag.Parse()
|
|
|
|
|
|
|
|
// Check for the mandatory flags.
|
|
|
|
if *datadir == "" {
|
2021-07-06 15:11:11 +00:00
|
|
|
log.Fatal("Please specify --datadir <db path> to read the database")
|
2021-06-28 16:59:20 +00:00
|
|
|
}
|
|
|
|
if *dbName == "" {
|
2021-07-06 15:11:11 +00:00
|
|
|
log.Fatal("Please specify --dbname <db file name> to specify the database file.")
|
2021-06-28 16:59:20 +00:00
|
|
|
}
|
|
|
|
|
2021-07-06 15:11:11 +00:00
|
|
|
// check if the database file is present.
|
|
|
|
dbNameWithPath := filepath.Join(*datadir, *dbName)
|
2021-08-05 18:30:05 +00:00
|
|
|
if _, err := os.Stat(dbNameWithPath); os.IsNotExist(err) {
|
2021-07-06 15:11:11 +00:00
|
|
|
log.Fatalf("could not locate database file : %s, %v", dbNameWithPath, err)
|
2021-06-28 16:59:20 +00:00
|
|
|
}
|
|
|
|
|
2021-08-05 18:30:05 +00:00
|
|
|
switch *command {
|
|
|
|
case "bucket-stats":
|
2021-07-16 22:24:16 +00:00
|
|
|
printBucketStats(dbNameWithPath)
|
2021-08-05 18:30:05 +00:00
|
|
|
case "bucket-content":
|
2021-07-06 15:11:11 +00:00
|
|
|
switch *bucketName {
|
2021-08-05 18:30:05 +00:00
|
|
|
case "state",
|
|
|
|
"state-summary":
|
2021-07-06 15:11:11 +00:00
|
|
|
printBucketContents(dbNameWithPath, *rowLimit, *bucketName)
|
|
|
|
default:
|
2021-08-05 18:30:05 +00:00
|
|
|
log.Fatal("Oops, given bucket is supported for now.")
|
|
|
|
}
|
|
|
|
case "migration-check":
|
|
|
|
destDbNameWithPath := filepath.Join(*destDatadir, *dbName)
|
|
|
|
if _, err := os.Stat(destDbNameWithPath); os.IsNotExist(err) {
|
|
|
|
log.Fatalf("could not locate destination database file : %s, %v", destDbNameWithPath, err)
|
|
|
|
}
|
|
|
|
switch *migrationName {
|
|
|
|
case "validator-entries":
|
|
|
|
checkValidatorMigration(dbNameWithPath, destDbNameWithPath)
|
|
|
|
default:
|
|
|
|
log.Fatal("Oops, given migration is not supported for now.")
|
2021-07-06 15:11:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-16 22:24:16 +00:00
|
|
|
func printBucketStats(dbNameWithPath string) {
|
|
|
|
ctx := context.Background()
|
|
|
|
groupSize := uint64(128)
|
|
|
|
doneC := make(chan bool)
|
|
|
|
statsC := make(chan *bucketStat, groupSize)
|
2021-08-05 18:30:05 +00:00
|
|
|
go readBucketStat(ctx, dbNameWithPath, statsC)
|
|
|
|
go printBucketStat(statsC, doneC)
|
2021-07-16 22:24:16 +00:00
|
|
|
<-doneC
|
|
|
|
}
|
|
|
|
|
|
|
|
func printBucketContents(dbNameWithPath string, rowLimit uint64, bucketName string) {
|
|
|
|
// get the keys within the supplied limit for the given bucket.
|
|
|
|
bucketNameInBytes := []byte(bucketName)
|
|
|
|
keys, sizes := keysOfBucket(dbNameWithPath, bucketNameInBytes, rowLimit)
|
|
|
|
|
|
|
|
// create a new KV Store.
|
|
|
|
dbDirectory := filepath.Dir(dbNameWithPath)
|
|
|
|
db, openErr := kv.NewKVStore(context.Background(), dbDirectory, &kv.Config{})
|
|
|
|
if openErr != nil {
|
|
|
|
log.Fatalf("could not open db, %v", openErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// don't forget to close it when ejecting out of this function.
|
|
|
|
defer func() {
|
|
|
|
closeErr := db.Close()
|
|
|
|
if closeErr != nil {
|
|
|
|
log.Fatalf("could not close db, %v", closeErr)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// retrieve every element for keys in the list and call the respective display function.
|
|
|
|
ctx := context.Background()
|
|
|
|
groupSize := uint64(128)
|
|
|
|
doneC := make(chan bool)
|
|
|
|
switch bucketName {
|
|
|
|
case "state":
|
|
|
|
stateC := make(chan *modifiedState, groupSize)
|
|
|
|
go readStates(ctx, db, stateC, keys, sizes)
|
|
|
|
go printStates(stateC, doneC)
|
|
|
|
|
|
|
|
case "state-summary":
|
|
|
|
stateSummaryC := make(chan *modifiedStateSummary, groupSize)
|
|
|
|
go readStateSummary(ctx, db, stateSummaryC, keys, sizes)
|
|
|
|
go printStateSummary(stateSummaryC, doneC)
|
|
|
|
}
|
|
|
|
<-doneC
|
|
|
|
}
|
|
|
|
|
2021-08-05 18:30:05 +00:00
|
|
|
func readBucketStat(ctx context.Context, dbNameWithPath string, statsC chan<- *bucketStat) {
|
2021-07-06 15:11:11 +00:00
|
|
|
// open the raw database file. If the file is busy, then exit.
|
|
|
|
db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second})
|
|
|
|
if openErr != nil {
|
|
|
|
log.Fatalf("could not open db to show bucket stats, %v", openErr)
|
2021-06-28 16:59:20 +00:00
|
|
|
}
|
2021-07-06 15:11:11 +00:00
|
|
|
|
|
|
|
// make sure we close the database before ejecting out of this function.
|
2021-06-28 16:59:20 +00:00
|
|
|
defer func() {
|
2021-07-06 15:11:11 +00:00
|
|
|
closeErr := db.Close()
|
|
|
|
if closeErr != nil {
|
|
|
|
log.Fatalf("could not close db after showing bucket stats, %v", closeErr)
|
|
|
|
}
|
2021-06-28 16:59:20 +00:00
|
|
|
}()
|
|
|
|
|
2021-07-06 15:11:11 +00:00
|
|
|
// get a list of all the existing buckets.
|
|
|
|
var buckets []string
|
|
|
|
if viewErr1 := db.View(func(tx *bolt.Tx) error {
|
2021-06-28 16:59:20 +00:00
|
|
|
return tx.ForEach(func(name []byte, buc *bolt.Bucket) error {
|
2021-07-06 15:11:11 +00:00
|
|
|
buckets = append(buckets, string(name))
|
2021-06-28 16:59:20 +00:00
|
|
|
return nil
|
|
|
|
})
|
2021-07-06 15:11:11 +00:00
|
|
|
}); viewErr1 != nil {
|
|
|
|
log.Fatalf("could not read buckets from db while getting list of buckets: %v", viewErr1)
|
2021-06-28 16:59:20 +00:00
|
|
|
}
|
|
|
|
|
2021-07-16 22:24:16 +00:00
|
|
|
// for every bucket, calculate the stats and send it for printing.
|
|
|
|
// calculate the state of all the buckets in parallel.
|
|
|
|
var wg sync.WaitGroup
|
2021-07-06 15:11:11 +00:00
|
|
|
for _, bName := range buckets {
|
2021-07-16 22:24:16 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(bukName string) {
|
|
|
|
defer wg.Done()
|
|
|
|
count := uint64(0)
|
|
|
|
minValueSize := ^uint64(0)
|
|
|
|
maxValueSize := uint64(0)
|
|
|
|
totalValueSize := uint64(0)
|
|
|
|
minKeySize := ^uint64(0)
|
|
|
|
maxKeySize := uint64(0)
|
|
|
|
totalKeySize := uint64(0)
|
|
|
|
if viewErr2 := db.View(func(tx *bolt.Tx) error {
|
|
|
|
b := tx.Bucket([]byte(bukName))
|
|
|
|
if forEachErr := b.ForEach(func(k, v []byte) error {
|
|
|
|
count++
|
|
|
|
valueSize := uint64(len(v))
|
|
|
|
if valueSize < minValueSize {
|
|
|
|
minValueSize = valueSize
|
|
|
|
}
|
|
|
|
if valueSize > maxValueSize {
|
|
|
|
maxValueSize = valueSize
|
|
|
|
}
|
|
|
|
totalValueSize += valueSize
|
2021-06-28 16:59:20 +00:00
|
|
|
|
2021-07-16 22:24:16 +00:00
|
|
|
keyize := uint64(len(k))
|
|
|
|
if keyize < minKeySize {
|
|
|
|
minKeySize = keyize
|
|
|
|
}
|
|
|
|
if keyize > maxKeySize {
|
|
|
|
maxKeySize = keyize
|
|
|
|
}
|
|
|
|
totalKeySize += uint64(len(k))
|
|
|
|
return nil
|
|
|
|
}); forEachErr != nil {
|
|
|
|
log.WithError(forEachErr).Errorf("could not process row %d for bucket: %s", count, bukName)
|
|
|
|
return forEachErr
|
2021-06-28 16:59:20 +00:00
|
|
|
}
|
|
|
|
return nil
|
2021-07-16 22:24:16 +00:00
|
|
|
}); viewErr2 != nil {
|
|
|
|
log.WithError(viewErr2).Errorf("could not get stats for bucket: %s", bukName)
|
|
|
|
return
|
2021-06-28 16:59:20 +00:00
|
|
|
}
|
2021-07-16 22:24:16 +00:00
|
|
|
stat := &bucketStat{
|
|
|
|
bucketName: bukName,
|
|
|
|
noOfRows: count,
|
|
|
|
totalKeySize: totalKeySize,
|
|
|
|
totalValueSize: totalValueSize,
|
|
|
|
minKeySize: minKeySize,
|
|
|
|
maxKeySize: maxKeySize,
|
|
|
|
minValueSize: minValueSize,
|
|
|
|
maxValueSize: maxValueSize,
|
|
|
|
}
|
|
|
|
statsC <- stat
|
|
|
|
}(bName)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
close(statsC)
|
|
|
|
}
|
|
|
|
|
|
|
|
func readStates(ctx context.Context, db *kv.Store, stateC chan<- *modifiedState, keys [][]byte, sizes []uint64) {
|
2021-08-05 18:30:05 +00:00
|
|
|
stateMap := make(map[uint64]*modifiedState)
|
2021-07-16 22:24:16 +00:00
|
|
|
for rowCount, key := range keys {
|
|
|
|
st, stateErr := db.State(ctx, bytesutil.ToBytes32(key))
|
|
|
|
if stateErr != nil {
|
|
|
|
log.WithError(stateErr).Errorf("could not get state for key : %s", hexutils.BytesToHex(key))
|
2021-06-28 16:59:20 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-07-16 22:24:16 +00:00
|
|
|
mst := &modifiedState{
|
|
|
|
state: st,
|
|
|
|
key: key,
|
|
|
|
valueSize: sizes[rowCount],
|
|
|
|
rowCount: uint64(rowCount),
|
2021-06-28 16:59:20 +00:00
|
|
|
}
|
2021-08-05 18:30:05 +00:00
|
|
|
stateMap[uint64(st.Slot())] = mst
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := uint64(0); i < maxSlotsToDisplay; i++ {
|
|
|
|
if _, ok := stateMap[i]; ok {
|
|
|
|
stateC <- stateMap[i]
|
|
|
|
}
|
2021-06-28 16:59:20 +00:00
|
|
|
}
|
2021-07-16 22:24:16 +00:00
|
|
|
close(stateC)
|
2021-06-28 16:59:20 +00:00
|
|
|
}
|
2021-07-06 15:11:11 +00:00
|
|
|
|
2021-07-16 22:24:16 +00:00
|
|
|
func readStateSummary(ctx context.Context, db *kv.Store, stateSummaryC chan<- *modifiedStateSummary, keys [][]byte, sizes []uint64) {
|
|
|
|
for rowCount, key := range keys {
|
|
|
|
ss, ssErr := db.StateSummary(ctx, bytesutil.ToBytes32(key))
|
|
|
|
if ssErr != nil {
|
|
|
|
log.WithError(ssErr).Errorf("could not get state summary for key : %s", hexutils.BytesToHex(key))
|
|
|
|
continue
|
2021-07-06 15:11:11 +00:00
|
|
|
}
|
2021-07-16 22:24:16 +00:00
|
|
|
mst := &modifiedStateSummary{
|
|
|
|
slot: ss.Slot,
|
|
|
|
root: ss.Root,
|
|
|
|
key: key,
|
|
|
|
valueSize: sizes[rowCount],
|
|
|
|
rowCount: uint64(rowCount),
|
|
|
|
}
|
|
|
|
stateSummaryC <- mst
|
|
|
|
}
|
|
|
|
close(stateSummaryC)
|
|
|
|
}
|
2021-07-06 15:11:11 +00:00
|
|
|
|
2021-08-05 18:30:05 +00:00
|
|
|
func printBucketStat(statsC <-chan *bucketStat, doneC chan<- bool) {
|
2021-07-16 22:24:16 +00:00
|
|
|
for stat := range statsC {
|
|
|
|
if stat.noOfRows != 0 {
|
|
|
|
averageValueSize := stat.totalValueSize / stat.noOfRows
|
|
|
|
averageKeySize := stat.totalKeySize / stat.noOfRows
|
|
|
|
log.Infof("------ %s ---------", stat.bucketName)
|
|
|
|
log.Infof("NumberOfRows = %d", stat.noOfRows)
|
|
|
|
log.Infof("TotalBucketSize = %s", humanize.Bytes(stat.totalValueSize+stat.totalKeySize))
|
|
|
|
log.Infof("KeySize = %s, (min = %s, avg = %s, max = %s)",
|
|
|
|
humanize.Bytes(stat.totalKeySize),
|
|
|
|
humanize.Bytes(stat.minKeySize),
|
|
|
|
humanize.Bytes(averageKeySize),
|
|
|
|
humanize.Bytes(stat.maxKeySize))
|
|
|
|
log.Infof("ValueSize = %s, (min = %s, avg = %s, max = %s)",
|
|
|
|
humanize.Bytes(stat.totalValueSize),
|
|
|
|
humanize.Bytes(stat.minValueSize),
|
|
|
|
humanize.Bytes(averageValueSize),
|
|
|
|
humanize.Bytes(stat.maxValueSize))
|
2021-07-06 15:11:11 +00:00
|
|
|
}
|
|
|
|
}
|
2021-07-16 22:24:16 +00:00
|
|
|
doneC <- true
|
2021-07-06 15:11:11 +00:00
|
|
|
}
|
|
|
|
|
2021-07-16 22:24:16 +00:00
|
|
|
func printStates(stateC <-chan *modifiedState, doneC chan<- bool) {
|
|
|
|
for mst := range stateC {
|
|
|
|
st := mst.state
|
2021-08-05 18:30:05 +00:00
|
|
|
log.Infof("---- row = %04d, slot = %8d, epoch = %8d, key = %s ----", mst.rowCount, st.Slot(), st.Slot()/params.BeaconConfig().SlotsPerEpoch, hexutils.BytesToHex(mst.key))
|
2021-07-16 22:24:16 +00:00
|
|
|
log.Infof("key : %s", hexutils.BytesToHex(mst.key))
|
|
|
|
log.Infof("value : compressed size = %s", humanize.Bytes(mst.valueSize))
|
2022-03-11 09:34:30 +00:00
|
|
|
t := time.Unix(int64(st.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
2021-07-16 22:24:16 +00:00
|
|
|
log.Infof("genesis_time : %s", t.Format(time.UnixDate))
|
2022-02-14 13:34:38 +00:00
|
|
|
log.Infof("genesis_validators_root : %s", hexutils.BytesToHex(st.GenesisValidatorsRoot()))
|
2021-07-16 22:24:16 +00:00
|
|
|
log.Infof("slot : %d", st.Slot())
|
|
|
|
log.Infof("fork : previous_version = %b, current_version = %b", st.Fork().PreviousVersion, st.Fork().CurrentVersion)
|
|
|
|
log.Infof("latest_block_header : sizeSSZ = %s", humanize.Bytes(uint64(st.LatestBlockHeader().SizeSSZ())))
|
|
|
|
size, count := sizeAndCountOfByteList(st.BlockRoots())
|
|
|
|
log.Infof("block_roots : size = %s, count = %d", humanize.Bytes(size), count)
|
|
|
|
size, count = sizeAndCountOfByteList(st.StateRoots())
|
|
|
|
log.Infof("state_roots : size = %s, count = %d", humanize.Bytes(size), count)
|
|
|
|
size, count = sizeAndCountOfByteList(st.HistoricalRoots())
|
|
|
|
log.Infof("historical_roots : size = %s, count = %d", humanize.Bytes(size), count)
|
|
|
|
log.Infof("eth1_data : sizeSSZ = %s", humanize.Bytes(uint64(st.Eth1Data().SizeSSZ())))
|
|
|
|
size, count = sizeAndCountGeneric(st.Eth1DataVotes(), nil)
|
|
|
|
log.Infof("eth1_data_votes : sizeSSZ = %s, count = %d", humanize.Bytes(size), count)
|
|
|
|
log.Infof("eth1_deposit_index : %d", st.Eth1DepositIndex())
|
|
|
|
size, count = sizeAndCountGeneric(st.Validators(), nil)
|
|
|
|
log.Infof("validators : sizeSSZ = %s, count = %d", humanize.Bytes(size), count)
|
|
|
|
size, count = sizeAndCountOfUin64List(st.Balances())
|
|
|
|
log.Infof("balances : size = %s, count = %d", humanize.Bytes(size), count)
|
|
|
|
size, count = sizeAndCountOfByteList(st.RandaoMixes())
|
|
|
|
log.Infof("randao_mixes : size = %s, count = %d", humanize.Bytes(size), count)
|
|
|
|
size, count = sizeAndCountOfUin64List(st.Slashings())
|
|
|
|
log.Infof("slashings : size = %s, count = %d", humanize.Bytes(size), count)
|
|
|
|
size, count = sizeAndCountGeneric(st.PreviousEpochAttestations())
|
|
|
|
log.Infof("previous_epoch_attestations : sizeSSZ = %s, count = %d", humanize.Bytes(size), count)
|
|
|
|
size, count = sizeAndCountGeneric(st.CurrentEpochAttestations())
|
|
|
|
log.Infof("current_epoch_attestations : sizeSSZ = %s, count = %d", humanize.Bytes(size), count)
|
|
|
|
log.Infof("justification_bits : size = %s, count = %d", humanize.Bytes(st.JustificationBits().Len()), st.JustificationBits().Count())
|
|
|
|
log.Infof("previous_justified_checkpoint : sizeSSZ = %s", humanize.Bytes(uint64(st.PreviousJustifiedCheckpoint().SizeSSZ())))
|
|
|
|
log.Infof("current_justified_checkpoint : sizeSSZ = %s", humanize.Bytes(uint64(st.CurrentJustifiedCheckpoint().SizeSSZ())))
|
|
|
|
log.Infof("finalized_checkpoint : sizeSSZ = %s", humanize.Bytes(uint64(st.FinalizedCheckpoint().SizeSSZ())))
|
|
|
|
|
2021-07-06 15:11:11 +00:00
|
|
|
}
|
2021-07-16 22:24:16 +00:00
|
|
|
doneC <- true
|
2021-07-06 15:11:11 +00:00
|
|
|
}
|
|
|
|
|
2021-07-16 22:24:16 +00:00
|
|
|
func printStateSummary(stateSummaryC <-chan *modifiedStateSummary, doneC chan<- bool) {
|
|
|
|
for msts := range stateSummaryC {
|
|
|
|
log.Infof("row : %04d, slot : %d, root = %s", msts.rowCount, msts.slot, hexutils.BytesToHex(msts.root))
|
2021-07-06 15:11:11 +00:00
|
|
|
}
|
2021-07-16 22:24:16 +00:00
|
|
|
doneC <- true
|
2021-07-06 15:11:11 +00:00
|
|
|
}
|
|
|
|
|
2021-08-05 18:30:05 +00:00
|
|
|
func checkValidatorMigration(dbNameWithPath, destDbNameWithPath string) {
|
|
|
|
// get the keys within the supplied limit for the given bucket.
|
|
|
|
sourceStateKeys, _ := keysOfBucket(dbNameWithPath, []byte("state"), MaxUint64)
|
|
|
|
destStateKeys, _ := keysOfBucket(destDbNameWithPath, []byte("state"), MaxUint64)
|
|
|
|
|
|
|
|
if len(destStateKeys) < len(sourceStateKeys) {
|
|
|
|
log.Fatalf("destination keys are lesser then source keys (%d/%d)", len(sourceStateKeys), len(destStateKeys))
|
|
|
|
}
|
|
|
|
|
|
|
|
// create the source and destination KV stores.
|
|
|
|
sourceDbDirectory := filepath.Dir(dbNameWithPath)
|
|
|
|
sourceDB, openErr := kv.NewKVStore(context.Background(), sourceDbDirectory, &kv.Config{})
|
|
|
|
if openErr != nil {
|
|
|
|
log.Fatalf("could not open sourceDB: %v", openErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
destinationDbDirectory := filepath.Dir(destDbNameWithPath)
|
|
|
|
destDB, openErr := kv.NewKVStore(context.Background(), destinationDbDirectory, &kv.Config{})
|
|
|
|
if openErr != nil {
|
|
|
|
// dirty hack alert: Ignore this prometheus error as we are opening two DB with same metric name
|
|
|
|
// if you want to avoid this then we should pass the metric name when opening the DB which touches
|
|
|
|
// too many places.
|
|
|
|
if openErr.Error() != "duplicate metrics collector registration attempted" {
|
|
|
|
log.Fatalf("could not open sourceDB, %v", openErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// don't forget to close it when ejecting out of this function.
|
|
|
|
defer func() {
|
|
|
|
closeErr := sourceDB.Close()
|
|
|
|
if closeErr != nil {
|
|
|
|
log.Fatalf("could not close sourceDB: %v", closeErr)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
defer func() {
|
|
|
|
closeErr := destDB.Close()
|
|
|
|
if closeErr != nil {
|
|
|
|
log.Fatalf("could not close sourceDB: %v", closeErr)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
failCount := 0
|
|
|
|
for rowCount, key := range sourceStateKeys[910:] {
|
|
|
|
sourceState, stateErr := sourceDB.State(ctx, bytesutil.ToBytes32(key))
|
|
|
|
if stateErr != nil {
|
|
|
|
log.Fatalf("could not get from source db, the state for key : %s, %v", hexutils.BytesToHex(key), stateErr)
|
|
|
|
}
|
|
|
|
destinationState, stateErr := destDB.State(ctx, bytesutil.ToBytes32(key))
|
|
|
|
if stateErr != nil {
|
|
|
|
log.Fatalf("could not get destination db, the state for key : %s, %v", hexutils.BytesToHex(key), stateErr)
|
|
|
|
}
|
|
|
|
if destinationState == nil {
|
|
|
|
log.Infof("could not find state in migrated DB: index = %d, slot = %d, epoch = %d, numOfValidators = %d, key = %s",
|
|
|
|
rowCount, sourceState.Slot(), sourceState.Slot()/params.BeaconConfig().SlotsPerEpoch, sourceState.NumValidators(), hexutils.BytesToHex(key))
|
|
|
|
failCount++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(sourceState.Validators()) != len(destinationState.Validators()) {
|
|
|
|
log.Fatalf("validator mismatch : source = %d, dest = %d", len(sourceState.Validators()), len(destinationState.Validators()))
|
|
|
|
}
|
|
|
|
sourceStateHash, err := sourceState.HashTreeRoot(ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("could not find hash of source state: %v", err)
|
|
|
|
}
|
|
|
|
destinationSatteHash, err := destinationState.HashTreeRoot(ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("could not find hash of destination state: %v", err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(sourceStateHash[:], destinationSatteHash[:]) {
|
|
|
|
log.Fatalf("state mismatch : key = %s", hexutils.BytesToHex(key))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Infof("number of state that did not match: %d", failCount)
|
|
|
|
}
|
|
|
|
|
2021-07-06 15:11:11 +00:00
|
|
|
func keysOfBucket(dbNameWithPath string, bucketName []byte, rowLimit uint64) ([][]byte, []uint64) {
|
|
|
|
// open the raw database file. If the file is busy, then exit.
|
|
|
|
db, openErr := bolt.Open(dbNameWithPath, 0600, &bolt.Options{Timeout: 1 * time.Second})
|
|
|
|
if openErr != nil {
|
|
|
|
log.Fatalf("could not open db while getting keys of a bucket, %v", openErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure we close the database before ejecting out of this function.
|
|
|
|
defer func() {
|
|
|
|
closeErr := db.Close()
|
|
|
|
if closeErr != nil {
|
|
|
|
log.Fatalf("could not close db while getting keys of a bucket, %v", closeErr)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// get all the keys of the given bucket.
|
|
|
|
var keys [][]byte
|
|
|
|
var sizes []uint64
|
|
|
|
if viewErr := db.View(func(tx *bolt.Tx) error {
|
|
|
|
b := tx.Bucket(bucketName)
|
|
|
|
c := b.Cursor()
|
|
|
|
count := uint64(0)
|
|
|
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
|
|
if count >= rowLimit {
|
|
|
|
return nil
|
|
|
|
}
|
2021-08-05 18:30:05 +00:00
|
|
|
actualKey := make([]byte, len(k))
|
|
|
|
actualSizes := make([]byte, len(v))
|
|
|
|
copy(actualKey, k)
|
|
|
|
copy(actualSizes, v)
|
|
|
|
keys = append(keys, actualKey)
|
2021-07-06 15:11:11 +00:00
|
|
|
sizes = append(sizes, uint64(len(v)))
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}); viewErr != nil {
|
|
|
|
log.Fatalf("could not read keys of bucket from db: %v", viewErr)
|
|
|
|
}
|
|
|
|
return keys, sizes
|
|
|
|
}
|
|
|
|
|
|
|
|
func sizeAndCountOfByteList(list [][]byte) (uint64, uint64) {
|
|
|
|
size := uint64(0)
|
|
|
|
count := uint64(0)
|
|
|
|
for _, root := range list {
|
|
|
|
size += uint64(len(root))
|
|
|
|
count += 1
|
|
|
|
}
|
|
|
|
return size, count
|
|
|
|
}
|
|
|
|
|
|
|
|
func sizeAndCountOfUin64List(list []uint64) (uint64, uint64) {
|
|
|
|
size := uint64(0)
|
|
|
|
count := uint64(0)
|
|
|
|
for i := 0; i < len(list); i++ {
|
|
|
|
size += uint64(8)
|
|
|
|
count += 1
|
|
|
|
}
|
|
|
|
return size, count
|
|
|
|
}
|
|
|
|
|
|
|
|
func sizeAndCountGeneric(genericItems interface{}, err error) (uint64, uint64) {
|
|
|
|
size := uint64(0)
|
|
|
|
count := uint64(0)
|
|
|
|
if err != nil {
|
|
|
|
return size, count
|
|
|
|
}
|
|
|
|
|
|
|
|
switch items := genericItems.(type) {
|
|
|
|
case []*ethpb.Eth1Data:
|
|
|
|
for _, item := range items {
|
|
|
|
size += uint64(item.SizeSSZ())
|
|
|
|
}
|
|
|
|
count = uint64(len(items))
|
|
|
|
case []*ethpb.Validator:
|
|
|
|
for _, item := range items {
|
|
|
|
size += uint64(item.SizeSSZ())
|
|
|
|
}
|
|
|
|
count = uint64(len(items))
|
2021-07-29 21:45:17 +00:00
|
|
|
case []*ethpb.PendingAttestation:
|
2021-07-06 15:11:11 +00:00
|
|
|
for _, item := range items {
|
|
|
|
size += uint64(item.SizeSSZ())
|
|
|
|
}
|
|
|
|
count = uint64(len(items))
|
|
|
|
default:
|
|
|
|
return 0, 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return size, count
|
|
|
|
}
|