mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-25 04:47:18 +00:00
cc741ed8af
* begin state service * begin on the state trie idea * created beacon state structure * add in the full clone getter * return by value instead * add all setters * new state setters are being completed * arrays roots exposed * close to finishing all these headerssss * functionality complete * added in proto benchmark test * test for compatibility * add test for compat * comments fixed * add clone * add clone * remove underlying copies * make it immutable * integrate it into chainservice * revert * wrap up comments for package * address all comments and godocs * address all comments * clone the pending attestation properly * properly clone remaining items * tests pass fixed bug * begin using it instead of head state * prevent nil pointer exceptions * begin using new struct in db * integrated new type into db package * add proper nil checks * using new state in archiver * refactored much of core * editing all the precompute functions * done with most core refactor * fixed up some bugs in the clone comparisons * append current epoch atts * add missing setters * add new setters * fix other core methods * fix up transition * main service and forkchoice * fix rpc * integrated to powchain * some more changes * fix build * improve processing of deposits * fix error * prevent panic * comment * fix process att * gaz * fix up att process * resolve existing review comments * resolve another batch of gh comments * resolve broken cpt state * revise testutil to use the new state * begin updating the state transition func to pass in more compartmentalized args * finish editing transition function to return errors * block operations pretty much done with refactor * state transition fully refactored * got epoch processing completed * fix build in fork choice * fixing more of the build * fix up broken sync package * it builds nowww it buildssss * revert registry changes * Recompute on Read (#4627) * compute on read * fix up eth1 data votes * looking into slashings bug introduced in core/ * able to advance more slots * add logging * can now sync with testnet yay * remove the leaves algorithm and other merkle imports * expose initialize unsafe funcs * Update beacon-chain/db/kv/state.go * lint Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * More Optimizations for New State (#4641) * map optimization * more optimizations * use a custom hasher * comment * block operations optimizations * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * fixed up various operations to use the validator index map access Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * archiver tests pass * fixing cache tests * cache tests passing * edited validator tests * powchain tests passing * halfway thru sync tests * more sync test fixes * add in tests for state/ * working through rpc tests * assignments tests passed * almost done with rpc/beacon tests * resolved painful validator test * fixed up even more tests * resolve tests * fix build * reduce a randao mixes copy * fixes under //beacon-chain/blockchain/... * build //beacon-chain/core/... * fixes * Runtime Optimizations (#4648) * parallelize shuffling * clean up * lint * fix build * use callback to read from registry * fix array roots and size map * new improvements * reduce hash allocs * improved shuffling * terence's review * use different method * raul's comment * new array roots * remove clone in pre-compute * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * raul's review * lint * fix build issues * fix visibility Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * fix visibility * build works for all * fix blockchain test * fix a few tests * fix more tests * update validator in slashing * archiver passing * fixed rpc/validator * progress on core tests * resolve broken rpc tests * blockchain tests passed * fix up some tests in core * fix message diff * remove unnecessary save * Save validator after slashing * Update validators one by one * another update * fix everything * fix more precompute tests * fix blocks tests * more elegant fix * more helper fixes * change back ? * fix test * fix skip slot * fix test * reset caches * fix testutil * raceoff fixed * passing * Retrieve cached state in the beginning * lint * Fixed tests part 1 * Fixed rest of the tests * Minor changes to avoid copying, small refactor to reduce deplicated code * Handle att req for slot 0 * New beacon state: Only populate merkle layers as needed, copy merkle layers on copy/clone. (#4689) * Only populate merkle layers as needed, copy merkle layers on copy/clone. * use custom copy * Make maps of correct size * slightly fast, doesn't wait for lock Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Target root can't be 0x00 * Don't use cache for current slot (may not be the right fix) * fixed up tests * Remove some copy for init sync. Not sure if it is safe enough for runtime though... testing... * Align with prev logic for process slots cachedState.Slot() < slot * Fix Initial Sync Flag (#4692) * fixes * fix up some test failures due to lack of nil checks * fix up some test failures due to lack of nil checks * fix up imports * revert some changes * imports Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * resolving further conflicts * Better skip slot cache (#4694) * Return copy of skip slot cache state, disable skip slot cache on sync * fix * Fix pruning * fix up issues with broken tests Co-authored-by: Nishant Das <nish1993@hotmail.com> Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com> Co-authored-by: shayzluf <thezluf@gmail.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
397 lines
12 KiB
Go
397 lines
12 KiB
Go
package blockchain
|
|
|
|
import (
|
|
"context"
|
|
"reflect"
|
|
"strings"
|
|
"testing"
|
|
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/go-ssz"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
|
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/prysmaticlabs/prysm/shared/testutil"
|
|
)
|
|
|
|
func TestStore_OnAttestation(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
cfg := &Config{BeaconDB: db}
|
|
service, err := NewService(ctx, cfg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
_, err = blockTree1(db, []byte{'g'})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
BlkWithOutState := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 0}}
|
|
if err := db.SaveBlock(ctx, BlkWithOutState); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
BlkWithOutStateRoot, _ := ssz.HashTreeRoot(BlkWithOutState.Block)
|
|
|
|
BlkWithStateBadAtt := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
|
if err := db.SaveBlock(ctx, BlkWithStateBadAtt); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
BlkWithStateBadAttRoot, _ := ssz.HashTreeRoot(BlkWithStateBadAtt.Block)
|
|
|
|
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{})
|
|
if err := s.SetSlot(100 * params.BeaconConfig().SlotsPerEpoch); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
BlkWithValidState := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
|
if err := db.SaveBlock(ctx, BlkWithValidState); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
BlkWithValidStateRoot, _ := ssz.HashTreeRoot(BlkWithValidState.Block)
|
|
s, _ = stateTrie.InitializeFromProto(&pb.BeaconState{
|
|
Fork: &pb.Fork{
|
|
Epoch: 0,
|
|
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
|
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
|
},
|
|
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
|
})
|
|
if err := service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
tests := []struct {
|
|
name string
|
|
a *ethpb.Attestation
|
|
s *pb.BeaconState
|
|
wantErr bool
|
|
wantErrString string
|
|
}{
|
|
{
|
|
name: "attestation's data slot not aligned with target vote",
|
|
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{}}},
|
|
s: &pb.BeaconState{},
|
|
wantErr: true,
|
|
wantErrString: "data slot is not in the same epoch as target 1 != 0",
|
|
},
|
|
{
|
|
name: "attestation's target root not in db",
|
|
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: []byte{'A'}}}},
|
|
s: &pb.BeaconState{},
|
|
wantErr: true,
|
|
wantErrString: "target root does not exist in db",
|
|
},
|
|
{
|
|
name: "no pre state for attestations's target block",
|
|
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
|
|
s: &pb.BeaconState{},
|
|
wantErr: true,
|
|
wantErrString: "pre state of target block 0 does not exist",
|
|
},
|
|
{
|
|
name: "process attestation doesn't match current epoch",
|
|
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
|
Root: BlkWithStateBadAttRoot[:]}}},
|
|
s: &pb.BeaconState{Slot: 100 * params.BeaconConfig().SlotsPerEpoch},
|
|
wantErr: true,
|
|
wantErrString: "does not match current epoch",
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
_, err := service.onAttestation(ctx, tt.a)
|
|
if tt.wantErr {
|
|
if !strings.Contains(err.Error(), tt.wantErrString) {
|
|
t.Errorf("Store.onAttestation() error = %v, wantErr = %v", err, tt.wantErrString)
|
|
}
|
|
} else {
|
|
t.Error(err)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestStore_SaveCheckpointState(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
params.UseDemoBeaconConfig()
|
|
|
|
cfg := &Config{BeaconDB: db}
|
|
service, err := NewService(ctx, cfg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{
|
|
Fork: &pb.Fork{
|
|
Epoch: 0,
|
|
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
|
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
|
},
|
|
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
|
StateRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
|
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
|
LatestBlockHeader: ðpb.BeaconBlockHeader{},
|
|
JustificationBits: []byte{0},
|
|
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
|
FinalizedCheckpoint: ðpb.Checkpoint{},
|
|
})
|
|
r := [32]byte{'g'}
|
|
if err := service.beaconDB.SaveState(ctx, s, r); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
|
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
|
service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
|
service.prevFinalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
|
|
|
cp1 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
|
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'}))
|
|
s1, err := service.getAttPreState(ctx, cp1)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
|
|
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
|
|
}
|
|
|
|
cp2 := ðpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
|
|
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'}))
|
|
s2, err := service.getAttPreState(ctx, cp2)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if s2.Slot() != 2*params.BeaconConfig().SlotsPerEpoch {
|
|
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot())
|
|
}
|
|
|
|
s1, err = service.getAttPreState(ctx, cp1)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
|
|
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
|
|
}
|
|
|
|
s1, err = service.checkpointState.StateByCheckpoint(cp1)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
|
|
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
|
|
}
|
|
|
|
s2, err = service.checkpointState.StateByCheckpoint(cp2)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if s2.Slot() != 2*params.BeaconConfig().SlotsPerEpoch {
|
|
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot())
|
|
}
|
|
|
|
s.SetSlot(params.BeaconConfig().SlotsPerEpoch + 1)
|
|
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
|
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
|
service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
|
service.prevFinalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
|
cp3 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'C'}}
|
|
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'}))
|
|
s3, err := service.getAttPreState(ctx, cp3)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if s3.Slot() != s.Slot() {
|
|
t.Errorf("Wanted state slot: %d, got: %d", s.Slot(), s3.Slot())
|
|
}
|
|
}
|
|
|
|
func TestStore_UpdateCheckpointState(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
cfg := &Config{BeaconDB: db}
|
|
service, err := NewService(ctx, cfg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
epoch := uint64(1)
|
|
baseState, _ := testutil.DeterministicGenesisState(t, 1)
|
|
baseState.SetSlot(epoch * params.BeaconConfig().SlotsPerEpoch)
|
|
checkpoint := ðpb.Checkpoint{Epoch: epoch}
|
|
service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root))
|
|
returned, err := service.getAttPreState(ctx, checkpoint)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if baseState.Slot() != returned.Slot() {
|
|
t.Error("Incorrectly returned base state")
|
|
}
|
|
|
|
cached, err := service.checkpointState.StateByCheckpoint(checkpoint)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if cached == nil {
|
|
t.Error("State should have been cached")
|
|
}
|
|
|
|
epoch = uint64(2)
|
|
newCheckpoint := ðpb.Checkpoint{Epoch: epoch}
|
|
service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root))
|
|
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
baseState, err = state.ProcessSlots(ctx, baseState, helpers.StartSlot(newCheckpoint.Epoch))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if baseState.Slot() != returned.Slot() {
|
|
t.Error("Incorrectly returned base state")
|
|
}
|
|
|
|
cached, err = service.checkpointState.StateByCheckpoint(newCheckpoint)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(returned, cached) {
|
|
t.Error("Incorrectly cached base state")
|
|
}
|
|
}
|
|
|
|
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
cfg := &Config{BeaconDB: db}
|
|
service, err := NewService(ctx, cfg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if err := service.verifyAttTargetEpoch(
|
|
ctx,
|
|
0,
|
|
params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
|
ðpb.Checkpoint{}); err != nil {
|
|
t.Error(err)
|
|
}
|
|
}
|
|
|
|
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
cfg := &Config{BeaconDB: db}
|
|
service, err := NewService(ctx, cfg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if err := service.verifyAttTargetEpoch(
|
|
ctx,
|
|
0,
|
|
params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
|
ðpb.Checkpoint{Epoch: 1}); err != nil {
|
|
t.Error(err)
|
|
}
|
|
}
|
|
|
|
func TestAttEpoch_NotMatch(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
cfg := &Config{BeaconDB: db}
|
|
service, err := NewService(ctx, cfg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if err := service.verifyAttTargetEpoch(
|
|
ctx,
|
|
0,
|
|
2*params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
|
ðpb.Checkpoint{}); !strings.Contains(err.Error(),
|
|
"target epoch 0 does not match current epoch 2 or prev epoch 1") {
|
|
t.Error("Did not receive wanted error")
|
|
}
|
|
}
|
|
|
|
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
cfg := &Config{BeaconDB: db}
|
|
service, err := NewService(ctx, cfg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
d := ðpb.AttestationData{}
|
|
if err := service.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "beacon block does not exist") {
|
|
t.Error("Did not receive the wanted error")
|
|
}
|
|
}
|
|
|
|
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
cfg := &Config{BeaconDB: db}
|
|
service, err := NewService(ctx, cfg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
|
service.beaconDB.SaveBlock(ctx, b)
|
|
r, _ := ssz.HashTreeRoot(b.Block)
|
|
d := ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
|
|
|
|
if err := service.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "could not process attestation for future block") {
|
|
t.Error("Did not receive the wanted error")
|
|
}
|
|
}
|
|
|
|
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
|
ctx := context.Background()
|
|
db := testDB.SetupDB(t)
|
|
defer testDB.TeardownDB(t, db)
|
|
|
|
cfg := &Config{BeaconDB: db}
|
|
service, err := NewService(ctx, cfg)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
|
service.beaconDB.SaveBlock(ctx, b)
|
|
r, _ := ssz.HashTreeRoot(b.Block)
|
|
d := ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
|
|
|
|
if err := service.verifyBeaconBlock(ctx, d); err != nil {
|
|
t.Error("Did not receive the wanted error")
|
|
}
|
|
}
|