mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2025-01-17 07:18:46 +00:00
cc741ed8af
* begin state service * begin on the state trie idea * created beacon state structure * add in the full clone getter * return by value instead * add all setters * new state setters are being completed * arrays roots exposed * close to finishing all these headerssss * functionality complete * added in proto benchmark test * test for compatibility * add test for compat * comments fixed * add clone * add clone * remove underlying copies * make it immutable * integrate it into chainservice * revert * wrap up comments for package * address all comments and godocs * address all comments * clone the pending attestation properly * properly clone remaining items * tests pass fixed bug * begin using it instead of head state * prevent nil pointer exceptions * begin using new struct in db * integrated new type into db package * add proper nil checks * using new state in archiver * refactored much of core * editing all the precompute functions * done with most core refactor * fixed up some bugs in the clone comparisons * append current epoch atts * add missing setters * add new setters * fix other core methods * fix up transition * main service and forkchoice * fix rpc * integrated to powchain * some more changes * fix build * improve processing of deposits * fix error * prevent panic * comment * fix process att * gaz * fix up att process * resolve existing review comments * resolve another batch of gh comments * resolve broken cpt state * revise testutil to use the new state * begin updating the state transition func to pass in more compartmentalized args * finish editing transition function to return errors * block operations pretty much done with refactor * state transition fully refactored * got epoch processing completed * fix build in fork choice * fixing more of the build * fix up broken sync package * it builds nowww it buildssss * revert registry changes * Recompute on Read (#4627) * compute on read * fix up eth1 data votes * looking into slashings bug introduced in core/ * able to advance more slots * add logging * can now sync with testnet yay * remove the leaves algorithm and other merkle imports * expose initialize unsafe funcs * Update beacon-chain/db/kv/state.go * lint Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * More Optimizations for New State (#4641) * map optimization * more optimizations * use a custom hasher * comment * block operations optimizations * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * fixed up various operations to use the validator index map access Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * archiver tests pass * fixing cache tests * cache tests passing * edited validator tests * powchain tests passing * halfway thru sync tests * more sync test fixes * add in tests for state/ * working through rpc tests * assignments tests passed * almost done with rpc/beacon tests * resolved painful validator test * fixed up even more tests * resolve tests * fix build * reduce a randao mixes copy * fixes under //beacon-chain/blockchain/... * build //beacon-chain/core/... * fixes * Runtime Optimizations (#4648) * parallelize shuffling * clean up * lint * fix build * use callback to read from registry * fix array roots and size map * new improvements * reduce hash allocs * improved shuffling * terence's review * use different method * raul's comment * new array roots * remove clone in pre-compute * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * raul's review * lint * fix build issues * fix visibility Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * fix visibility * build works for all * fix blockchain test * fix a few tests * fix more tests * update validator in slashing * archiver passing * fixed rpc/validator * progress on core tests * resolve broken rpc tests * blockchain tests passed * fix up some tests in core * fix message diff * remove unnecessary save * Save validator after slashing * Update validators one by one * another update * fix everything * fix more precompute tests * fix blocks tests * more elegant fix * more helper fixes * change back ? * fix test * fix skip slot * fix test * reset caches * fix testutil * raceoff fixed * passing * Retrieve cached state in the beginning * lint * Fixed tests part 1 * Fixed rest of the tests * Minor changes to avoid copying, small refactor to reduce deplicated code * Handle att req for slot 0 * New beacon state: Only populate merkle layers as needed, copy merkle layers on copy/clone. (#4689) * Only populate merkle layers as needed, copy merkle layers on copy/clone. * use custom copy * Make maps of correct size * slightly fast, doesn't wait for lock Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Target root can't be 0x00 * Don't use cache for current slot (may not be the right fix) * fixed up tests * Remove some copy for init sync. Not sure if it is safe enough for runtime though... testing... * Align with prev logic for process slots cachedState.Slot() < slot * Fix Initial Sync Flag (#4692) * fixes * fix up some test failures due to lack of nil checks * fix up some test failures due to lack of nil checks * fix up imports * revert some changes * imports Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * resolving further conflicts * Better skip slot cache (#4694) * Return copy of skip slot cache state, disable skip slot cache on sync * fix * Fix pruning * fix up issues with broken tests Co-authored-by: Nishant Das <nish1993@hotmail.com> Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com> Co-authored-by: shayzluf <thezluf@gmail.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
648 lines
20 KiB
Go
648 lines
20 KiB
Go
package forkchoice
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"reflect"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/go-bitfield"
|
|
"github.com/prysmaticlabs/go-ssz"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
|
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/prysmaticlabs/prysm/shared/stateutil"
|
|
)
|
|
|
|
func TestStore_OnBlock(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
|
|
genesisStateRoot, err := stateutil.HashTreeRootState(&pb.BeaconState{})
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
|
if err := db.SaveBlock(ctx, genesis); err != nil {
|
|
t.Error(err)
|
|
}
|
|
validGenesisRoot, err := ssz.HashTreeRoot(genesis.Block)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
if err := store.db.SaveState(ctx, &beaconstate.BeaconState{}, validGenesisRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
roots, err := blockTree1(db, validGenesisRoot[:])
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
random := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: validGenesisRoot[:]}}
|
|
if err := db.SaveBlock(ctx, random); err != nil {
|
|
t.Error(err)
|
|
}
|
|
randomParentRoot, err := ssz.HashTreeRoot(random.Block)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
if err := store.db.SaveState(ctx, &beaconstate.BeaconState{}, randomParentRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
randomParentRoot2 := roots[1]
|
|
if err := store.db.SaveState(ctx, &beaconstate.BeaconState{}, bytesutil.ToBytes32(randomParentRoot2)); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
tests := []struct {
|
|
name string
|
|
blk *ethpb.BeaconBlock
|
|
s *pb.BeaconState
|
|
time uint64
|
|
wantErrString string
|
|
}{
|
|
{
|
|
name: "parent block root does not have a state",
|
|
blk: ðpb.BeaconBlock{},
|
|
s: &pb.BeaconState{},
|
|
wantErrString: "pre state of slot 0 does not exist",
|
|
},
|
|
{
|
|
name: "block is from the feature",
|
|
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot[:], Slot: params.BeaconConfig().FarFutureEpoch},
|
|
s: &pb.BeaconState{},
|
|
wantErrString: "could not process slot from the future",
|
|
},
|
|
{
|
|
name: "could not get finalized block",
|
|
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot[:]},
|
|
s: &pb.BeaconState{},
|
|
wantErrString: "block from slot 0 is not a descendent of the current finalized block",
|
|
},
|
|
{
|
|
name: "same slot as finalized block",
|
|
blk: ðpb.BeaconBlock{Slot: 0, ParentRoot: randomParentRoot2},
|
|
s: &pb.BeaconState{},
|
|
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
if err := store.GenesisStore(ctx, ðpb.Checkpoint{Root: validGenesisRoot[:]}, ðpb.Checkpoint{Root: validGenesisRoot[:]}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
store.finalizedCheckpt.Root = roots[0]
|
|
|
|
_, err := store.OnBlock(ctx, ðpb.SignedBeaconBlock{Block: tt.blk})
|
|
if !strings.Contains(err.Error(), tt.wantErrString) {
|
|
t.Errorf("Store.OnBlock() error = %v, wantErr = %v", err, tt.wantErrString)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestStore_SaveNewValidators(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
preCount := 2 // validators 0 and validators 1
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Validators: []*ethpb.Validator{
|
|
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
|
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},
|
|
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}},
|
|
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}},
|
|
}})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.saveNewValidators(ctx, preCount, s); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if !db.HasValidatorIndex(ctx, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}) {
|
|
t.Error("Wanted validator saved in db")
|
|
}
|
|
if !db.HasValidatorIndex(ctx, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}) {
|
|
t.Error("Wanted validator saved in db")
|
|
}
|
|
if db.HasValidatorIndex(ctx, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}) {
|
|
t.Error("validator not suppose to be saved in db")
|
|
}
|
|
}
|
|
|
|
func TestStore_SavesNewBlockAttestations(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
a1 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b101}}
|
|
a2 := ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b110}}
|
|
r1, _ := ssz.HashTreeRoot(a1.Data)
|
|
r2, _ := ssz.HashTreeRoot(a2.Data)
|
|
|
|
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
saved, err := store.db.AttestationsByDataRoot(ctx, r1)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual([]*ethpb.Attestation{a1}, saved) {
|
|
t.Error("did not retrieve saved attestation")
|
|
}
|
|
|
|
saved, err = store.db.AttestationsByDataRoot(ctx, r2)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
|
t.Error("did not retrieve saved attestation")
|
|
}
|
|
|
|
a1 = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b111}}
|
|
a2 = ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b111}}
|
|
|
|
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
saved, err = store.db.AttestationsByDataRoot(ctx, r1)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual([]*ethpb.Attestation{a1}, saved) {
|
|
t.Error("did not retrieve saved attestation")
|
|
}
|
|
|
|
saved, err = store.db.AttestationsByDataRoot(ctx, r2)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
|
t.Error("did not retrieve saved attestation")
|
|
}
|
|
}
|
|
|
|
func TestRemoveStateSinceLastFinalized(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
params.UseMinimalConfig()
|
|
defer params.UseMainnetConfig()
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
|
|
// Save 100 blocks in DB, each has a state.
|
|
numBlocks := 100
|
|
totalBlocks := make([]*ethpb.SignedBeaconBlock, numBlocks)
|
|
blockRoots := make([][32]byte, 0)
|
|
for i := 0; i < len(totalBlocks); i++ {
|
|
totalBlocks[i] = ðpb.SignedBeaconBlock{
|
|
Block: ðpb.BeaconBlock{
|
|
Slot: uint64(i),
|
|
},
|
|
}
|
|
r, err := ssz.HashTreeRoot(totalBlocks[i].Block)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: uint64(i)})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveState(ctx, s, r); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveBlock(ctx, totalBlocks[i]); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
blockRoots = append(blockRoots, r)
|
|
if err := store.db.SaveHeadBlockRoot(ctx, r); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
// New finalized epoch: 1
|
|
finalizedEpoch := uint64(1)
|
|
finalizedSlot := finalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
|
endSlot := helpers.StartSlot(finalizedEpoch+1) - 1 // Inclusive
|
|
if err := store.rmStatesOlderThanLastFinalized(ctx, 0, endSlot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
for _, r := range blockRoots {
|
|
s, err := store.db.State(ctx, r)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
// Also verifies genesis state didnt get deleted
|
|
if s != nil && s.Slot() != finalizedSlot && s.Slot() != 0 && s.Slot() < endSlot {
|
|
t.Errorf("State with slot %d should not be in DB", s.Slot())
|
|
}
|
|
}
|
|
|
|
// New finalized epoch: 5
|
|
newFinalizedEpoch := uint64(5)
|
|
newFinalizedSlot := newFinalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
|
endSlot = helpers.StartSlot(newFinalizedEpoch+1) - 1 // Inclusive
|
|
if err := store.rmStatesOlderThanLastFinalized(ctx, helpers.StartSlot(finalizedEpoch+1)-1, endSlot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
for _, r := range blockRoots {
|
|
s, err := store.db.State(ctx, r)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
// Also verifies genesis state didnt get deleted
|
|
if s != nil && s.Slot() != newFinalizedSlot && s.Slot() != finalizedSlot && s.Slot() != 0 && s.Slot() < endSlot {
|
|
t.Errorf("State with slot %d should not be in DB", s.Slot())
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
params.UseMinimalConfig()
|
|
defer params.UseMainnetConfig()
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
store.genesisTime = uint64(time.Now().Unix())
|
|
|
|
update, err := store.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !update {
|
|
t.Error("Should be able to update justified, received false")
|
|
}
|
|
|
|
lastJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: []byte{'G'}}}
|
|
lastJustifiedRoot, _ := ssz.HashTreeRoot(lastJustifiedBlk.Block)
|
|
newJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: lastJustifiedRoot[:]}}
|
|
newJustifiedRoot, _ := ssz.HashTreeRoot(newJustifiedBlk.Block)
|
|
if err := store.db.SaveBlock(ctx, newJustifiedBlk); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveBlock(ctx, lastJustifiedBlk); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
|
store.genesisTime = uint64(time.Now().Unix()) - diff
|
|
store.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
|
update, err = store.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !update {
|
|
t.Error("Should be able to update justified, received false")
|
|
}
|
|
}
|
|
|
|
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
params.UseMinimalConfig()
|
|
defer params.UseMainnetConfig()
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
|
|
lastJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: []byte{'G'}}}
|
|
lastJustifiedRoot, _ := ssz.HashTreeRoot(lastJustifiedBlk.Block)
|
|
newJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: lastJustifiedRoot[:]}}
|
|
newJustifiedRoot, _ := ssz.HashTreeRoot(newJustifiedBlk.Block)
|
|
if err := store.db.SaveBlock(ctx, newJustifiedBlk); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveBlock(ctx, lastJustifiedBlk); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
|
store.genesisTime = uint64(time.Now().Unix()) - diff
|
|
store.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
|
|
|
update, err := store.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if update {
|
|
t.Error("Should not be able to update justified, received true")
|
|
}
|
|
}
|
|
|
|
func TestUpdateJustifiedCheckpoint_Update(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
params.UseMinimalConfig()
|
|
defer params.UseMainnetConfig()
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
store.genesisTime = uint64(time.Now().Unix())
|
|
|
|
store.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
|
store.bestJustifiedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: []byte{'B'}}
|
|
store.updateJustifiedCheckpoint()
|
|
|
|
if !bytes.Equal(store.justifiedCheckpt.Root, []byte{'B'}) {
|
|
t.Error("Justified check point root did not update")
|
|
}
|
|
}
|
|
|
|
func TestUpdateJustifiedCheckpoint_NoUpdate(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
params.UseMinimalConfig()
|
|
defer params.UseMainnetConfig()
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
store.genesisTime = uint64(time.Now().Unix()) - params.BeaconConfig().SecondsPerSlot
|
|
|
|
store.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
|
store.bestJustifiedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: []byte{'B'}}
|
|
store.updateJustifiedCheckpoint()
|
|
|
|
if bytes.Equal(store.justifiedCheckpt.Root, []byte{'B'}) {
|
|
t.Error("Justified check point root was not suppose to update")
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
|
|
// Save 5 blocks in DB, each has a state.
|
|
numBlocks := 5
|
|
totalBlocks := make([]*ethpb.SignedBeaconBlock, numBlocks)
|
|
blockRoots := make([][32]byte, 0)
|
|
for i := 0; i < len(totalBlocks); i++ {
|
|
totalBlocks[i] = ðpb.SignedBeaconBlock{
|
|
Block: ðpb.BeaconBlock{
|
|
Slot: uint64(i),
|
|
},
|
|
}
|
|
r, err := ssz.HashTreeRoot(totalBlocks[i].Block)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: uint64(i)})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveState(ctx, s, r); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveBlock(ctx, totalBlocks[i]); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
blockRoots = append(blockRoots, r)
|
|
}
|
|
if err := store.db.SaveHeadBlockRoot(ctx, blockRoots[0]); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.rmStatesOlderThanLastFinalized(ctx, 10, 11); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
// Since 5-10 are skip slots, block with slot 4 should be deleted
|
|
s, err := store.db.State(ctx, blockRoots[4])
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if s != nil {
|
|
t.Error("Did not delete state for start slot")
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestCachedPreState_CanGetFromCache(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: 1})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
r := [32]byte{'A'}
|
|
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
|
store.initSyncState[r] = s
|
|
|
|
wanted := "pre state of slot 1 does not exist"
|
|
if _, err := store.verifyBlkPreState(ctx, b); !strings.Contains(err.Error(), wanted) {
|
|
t.Fatal("Not expected error")
|
|
}
|
|
}
|
|
|
|
func TestCachedPreState_CanGetFromCacheWithFeature(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
config := &featureconfig.Flags{
|
|
InitSyncCacheState: true,
|
|
}
|
|
featureconfig.Init(config)
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: 1})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
r := [32]byte{'A'}
|
|
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
|
store.initSyncState[r] = s
|
|
|
|
received, err := store.verifyBlkPreState(ctx, b)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(s.InnerStateUnsafe(), received.InnerStateUnsafe()) {
|
|
t.Error("cached state not the same")
|
|
}
|
|
}
|
|
|
|
func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
r := [32]byte{'A'}
|
|
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
|
|
|
_, err := store.verifyBlkPreState(ctx, b)
|
|
wanted := "pre state of slot 1 does not exist"
|
|
if err.Error() != wanted {
|
|
t.Error("Did not get wanted error")
|
|
}
|
|
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: 1})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
store.db.SaveState(ctx, s, r)
|
|
|
|
received, err := store.verifyBlkPreState(ctx, b)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(s, received) {
|
|
t.Error("cached state not the same")
|
|
}
|
|
}
|
|
|
|
func TestSaveInitState_CanSaveDelete(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
|
|
config := &featureconfig.Flags{
|
|
InitSyncCacheState: true,
|
|
}
|
|
featureconfig.Init(config)
|
|
|
|
for i := uint64(0); i < 64; i++ {
|
|
b := ðpb.BeaconBlock{Slot: i}
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: i})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
r, _ := ssz.HashTreeRoot(b)
|
|
store.initSyncState[r] = s
|
|
}
|
|
|
|
// Set finalized root as slot 32
|
|
finalizedRoot, _ := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 32})
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{
|
|
Epoch: 1,
|
|
Root: finalizedRoot[:],
|
|
}})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if err := store.saveInitState(ctx, s); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Verify finalized state is saved in DB
|
|
finalizedState, err := store.db.State(ctx, finalizedRoot)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if finalizedState == nil {
|
|
t.Error("finalized state can't be nil")
|
|
}
|
|
|
|
// Verify cached state is properly pruned
|
|
if len(store.initSyncState) != int(params.BeaconConfig().SlotsPerEpoch) {
|
|
t.Errorf("wanted: %d, got: %d", len(store.initSyncState), params.BeaconConfig().SlotsPerEpoch)
|
|
}
|
|
}
|
|
|
|
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
signedBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
|
if err := db.SaveBlock(ctx, signedBlock); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
r, err := ssz.HashTreeRoot(signedBlock.Block)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
store.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
|
store.bestJustifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
|
store.initSyncState[r] = &beaconstate.BeaconState{}
|
|
if err := db.SaveState(ctx, &beaconstate.BeaconState{}, r); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Could update
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{
|
|
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: r[:]},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.updateJustified(context.Background(), s); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if store.bestJustifiedCheckpt.Epoch != s.CurrentJustifiedCheckpoint().Epoch {
|
|
t.Error("Incorrect justified epoch in store")
|
|
}
|
|
|
|
// Could not update
|
|
store.bestJustifiedCheckpt.Epoch = 2
|
|
if err := store.updateJustified(context.Background(), s); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if store.bestJustifiedCheckpt.Epoch != 2 {
|
|
t.Error("Incorrect justified epoch in store")
|
|
}
|
|
}
|
|
|
|
func TestFilterBlockRoots_CanFilter(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
store := NewForkChoiceService(ctx, db)
|
|
fBlock := ðpb.BeaconBlock{}
|
|
fRoot, _ := ssz.HashTreeRoot(fBlock)
|
|
hBlock := ðpb.BeaconBlock{Slot: 1}
|
|
headRoot, _ := ssz.HashTreeRoot(hBlock)
|
|
if err := store.db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: fBlock}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveState(ctx, &beaconstate.BeaconState{}, fRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: fRoot[:]}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: hBlock}); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveState(ctx, &beaconstate.BeaconState{}, headRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := store.db.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
roots := [][32]byte{{'C'}, {'D'}, headRoot, {'E'}, fRoot, {'F'}}
|
|
wanted := [][32]byte{{'C'}, {'D'}, {'E'}, {'F'}}
|
|
|
|
received, err := store.filterBlockRoots(ctx, roots)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if !reflect.DeepEqual(wanted, received) {
|
|
t.Error("Did not filter correctly")
|
|
}
|
|
}
|