mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2025-01-19 08:14:15 +00:00
81c53c26fb
* Update seed domains (#3872) * Remove Transfers (#3870) * Remove active index roots and compact committee roots (#3869) * Update inclusion reward (#3886) * Alter proposer selection logic (#3884) * Fix early committee bias (#3888) * Remove shards and committees (#3896) * Epoch spec tests v0.9 (#3907) * Block spec test v0.9 (#3905) * rm'ed in protobuf * build proto * build proto * build proto * fix core package * Gazelle * Fixed all the tests * Fixed static test * Comment out spec test for now * One more skip * fix-roundRobinSync (#3862) * Starting but need new seed function * Revert initial sync * Updated Proposer Slashing * Fixed all tests * Lint * Update inclusion reward * Fill randao mixes with eth1 data hash * Test * Fixing test part1 * All tests passing * One last test * Updated config * Build proto * Proper skip message * Conflict and fmt * Removed crosslinks and shards. Built * Format and gazelle * Fixed all the block package tests * Fixed all the helper tests * All epoch package tests pass * All core package tests pass * Fixed operation tests * Started fixing rpc test * RPC tests passed! * Fixed all init sync tests * All tests pass * Fixed blockchain tests * Lint * Lint * Preston's feedback * Starting * Remove container * Fixed block spec tests * All passing except for block_processing test * Failing block processing test * Starting * Add AggregateAndProof * All mainnet test passes * Update deposit contract (#3906) * Proto spec tests v0.9 (#3908) * Starting * Add AggregateAndProof * Unskip block util tests (#3910) * rm'ed in protobuf * build proto * build proto * build proto * fix core package * Gazelle * Fixed all the tests * Fixed static test * Comment out spec test for now * One more skip * fix-roundRobinSync (#3862) * Starting but need new seed function * Revert initial sync * Updated Proposer Slashing * Fixed all tests * Lint * Update inclusion reward * Fill randao mixes with eth1 data hash * Test * Fixing test part1 * All tests passing * One last test * Updated config * Build proto * Proper skip message * Conflict and fmt * Removed crosslinks and shards. Built * Format and gazelle * Fixed all the block package tests * Fixed all the helper tests * All epoch package tests pass * All core package tests pass * Fixed operation tests * Started fixing rpc test * RPC tests passed! * Fixed all init sync tests * All tests pass * Fixed blockchain tests * Lint * Lint * Preston's feedback * Starting * Remove container * Fixed block spec tests * All passing except for block_processing test * Failing block processing test * Starting * Add AggregateAndProof * All mainnet test passes * Unskip block util tests * Slot processing spec test V0.9 (#3912) * Starting * Add AggregateAndProof * Unskip slot processing mainnet test * Unskip minimal spec test for finalization (#3920) * Remove outdated interop tests (#3922) * Rm outdated interop tests * Rm test runner * Gazelle * Update validator to use proposer slot (#3919) * Fix committee assignment (#3931) * Replace shard with committee index (#3930) * Conflict * Clean up (#3933) * Remove shard filter in db (#3936) * Remove lightouse compatibility test (#3939) * Update Committee Cache for v0.9 (#3948) * Updated committee cache * Removed shuffled indices cache * Started testing run time * Lint * Fixed test * Safeguard against nil head state * address edge case * add test * Fixed TestRoundRobinSync by doubling the epochs * Unskip TestProtoCompatability (#3958) * Unskip TestProtoCompatability * Update WORKSPACE * Fix minimal config (#3959) * fix minimal configs * fix hardcoded value in test * Simplify verify att time (#3961) * update readme for deposit contract, regen bindings for vyper 0.1.0b12 (#3963) * update readme for deposit contract, regen bindings * medium * Check nil base state (#3964) * Copy Block When Receiving it From Sync (#3966) * copy block * clone for other service methods too * Change logging of Bitfield (#3956) * change logging of bits * preston's review * Unskip Beacon Server Test (#3962) * run test till the end * fix up proto message types * fmt * resolve broken tests * better error handling * fixing new logic to use archived proposer info * fix up logic * clip using the max effective balance * broken build fix with num arg mismatch * amend archive * archival logic changed * rename test * archive both proposer and attester seeds * page size 100 * further experiments * further experimentation, archivedProposerIndex seems wrong * test passes * rem log * fix broken test * fix test * gaz * fix imports * ethapis
297 lines
9.6 KiB
Go
297 lines
9.6 KiB
Go
package archiver
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"io/ioutil"
|
|
"reflect"
|
|
"testing"
|
|
|
|
"github.com/gogo/protobuf/proto"
|
|
"github.com/prysmaticlabs/go-bitfield"
|
|
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
|
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/prysmaticlabs/prysm/shared/testutil"
|
|
"github.com/sirupsen/logrus"
|
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
|
)
|
|
|
|
func init() {
|
|
logrus.SetLevel(logrus.DebugLevel)
|
|
logrus.SetOutput(ioutil.Discard)
|
|
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
|
}
|
|
|
|
func TestArchiverService_ReceivesNewChainHeadEvent(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
svc, beaconDB := setupService(t)
|
|
defer dbutil.TeardownDB(t, beaconDB)
|
|
svc.headFetcher = &mock.ChainService{
|
|
State: &pb.BeaconState{Slot: 1},
|
|
}
|
|
headRoot := [32]byte{1, 2, 3}
|
|
triggerNewHeadEvent(t, svc, headRoot)
|
|
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", headRoot))
|
|
testutil.AssertLogsContain(t, hook, "New chain head event")
|
|
}
|
|
|
|
func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
svc, beaconDB := setupService(t)
|
|
defer dbutil.TeardownDB(t, beaconDB)
|
|
// The head state is NOT an epoch end.
|
|
svc.headFetcher = &mock.ChainService{
|
|
State: &pb.BeaconState{Slot: params.BeaconConfig().SlotsPerEpoch - 3},
|
|
}
|
|
triggerNewHeadEvent(t, svc, [32]byte{})
|
|
|
|
// The context should have been canceled.
|
|
if svc.ctx.Err() != context.Canceled {
|
|
t.Error("context was not canceled")
|
|
}
|
|
testutil.AssertLogsContain(t, hook, "New chain head event")
|
|
// The service should ONLY log any archival logs if we receive a
|
|
// head slot that is an epoch end.
|
|
testutil.AssertLogsDoNotContain(t, hook, "Successfully archived")
|
|
}
|
|
|
|
func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
validatorCount := uint64(100)
|
|
headState := setupState(t, validatorCount)
|
|
svc, beaconDB := setupService(t)
|
|
defer dbutil.TeardownDB(t, beaconDB)
|
|
svc.headFetcher = &mock.ChainService{
|
|
State: headState,
|
|
}
|
|
triggerNewHeadEvent(t, svc, [32]byte{})
|
|
|
|
attestedBalance := uint64(1)
|
|
currentEpoch := helpers.CurrentEpoch(headState)
|
|
wanted := ðpb.ValidatorParticipation{
|
|
VotedEther: attestedBalance,
|
|
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
|
|
GlobalParticipationRate: float32(attestedBalance) / float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance),
|
|
}
|
|
|
|
retrieved, err := svc.beaconDB.ArchivedValidatorParticipation(svc.ctx, currentEpoch)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if !proto.Equal(wanted, retrieved) {
|
|
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch, wanted, retrieved)
|
|
}
|
|
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
|
}
|
|
|
|
func TestArchiverService_SavesIndicesAndBalances(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
validatorCount := uint64(100)
|
|
headState := setupState(t, validatorCount)
|
|
svc, beaconDB := setupService(t)
|
|
defer dbutil.TeardownDB(t, beaconDB)
|
|
svc.headFetcher = &mock.ChainService{
|
|
State: headState,
|
|
}
|
|
triggerNewHeadEvent(t, svc, [32]byte{})
|
|
|
|
retrieved, err := svc.beaconDB.ArchivedBalances(svc.ctx, helpers.CurrentEpoch(headState))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(headState.Balances, retrieved) {
|
|
t.Errorf(
|
|
"Wanted balances for epoch %d %v, retrieved %v",
|
|
helpers.CurrentEpoch(headState),
|
|
headState.Balances,
|
|
retrieved,
|
|
)
|
|
}
|
|
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
|
}
|
|
|
|
func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
validatorCount := uint64(100)
|
|
headState := setupState(t, validatorCount)
|
|
svc, beaconDB := setupService(t)
|
|
defer dbutil.TeardownDB(t, beaconDB)
|
|
svc.headFetcher = &mock.ChainService{
|
|
State: headState,
|
|
}
|
|
triggerNewHeadEvent(t, svc, [32]byte{})
|
|
|
|
currentEpoch := helpers.CurrentEpoch(headState)
|
|
committeeCount, err := helpers.CommitteeCountAtSlot(headState, helpers.StartSlot(currentEpoch))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
attesterSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconAttester)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
wanted := ðpb.ArchivedCommitteeInfo{
|
|
ProposerSeed: proposerSeed[:],
|
|
AttesterSeed: attesterSeed[:],
|
|
CommitteeCount: committeeCount * params.BeaconConfig().SlotsPerEpoch,
|
|
}
|
|
|
|
retrieved, err := svc.beaconDB.ArchivedCommitteeInfo(svc.ctx, helpers.CurrentEpoch(headState))
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(wanted, retrieved) {
|
|
t.Errorf(
|
|
"Wanted committee info for epoch %d %v, retrieved %v",
|
|
helpers.CurrentEpoch(headState),
|
|
wanted,
|
|
retrieved,
|
|
)
|
|
}
|
|
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
|
}
|
|
|
|
func TestArchiverService_SavesActivatedValidatorChanges(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
validatorCount := uint64(100)
|
|
headState := setupState(t, validatorCount)
|
|
svc, beaconDB := setupService(t)
|
|
defer dbutil.TeardownDB(t, beaconDB)
|
|
svc.headFetcher = &mock.ChainService{
|
|
State: headState,
|
|
}
|
|
currentEpoch := helpers.CurrentEpoch(headState)
|
|
delayedActEpoch := helpers.DelayedActivationExitEpoch(currentEpoch)
|
|
headState.Validators[4].ActivationEpoch = delayedActEpoch
|
|
headState.Validators[5].ActivationEpoch = delayedActEpoch
|
|
triggerNewHeadEvent(t, svc, [32]byte{})
|
|
|
|
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(retrieved.Activated, []uint64{4, 5}) {
|
|
t.Errorf("Wanted indices 4 5 activated, received %v", retrieved.Activated)
|
|
}
|
|
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
|
}
|
|
|
|
func TestArchiverService_SavesSlashedValidatorChanges(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
validatorCount := uint64(100)
|
|
headState := setupState(t, validatorCount)
|
|
svc, beaconDB := setupService(t)
|
|
defer dbutil.TeardownDB(t, beaconDB)
|
|
svc.headFetcher = &mock.ChainService{
|
|
State: headState,
|
|
}
|
|
currentEpoch := helpers.CurrentEpoch(headState)
|
|
headState.Validators[95].Slashed = true
|
|
headState.Validators[96].Slashed = true
|
|
triggerNewHeadEvent(t, svc, [32]byte{})
|
|
|
|
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(retrieved.Slashed, []uint64{95, 96}) {
|
|
t.Errorf("Wanted indices 95, 96 slashed, received %v", retrieved.Slashed)
|
|
}
|
|
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
|
}
|
|
|
|
func TestArchiverService_SavesExitedValidatorChanges(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
validatorCount := uint64(100)
|
|
headState := setupState(t, validatorCount)
|
|
svc, beaconDB := setupService(t)
|
|
defer dbutil.TeardownDB(t, beaconDB)
|
|
svc.headFetcher = &mock.ChainService{
|
|
State: headState,
|
|
}
|
|
currentEpoch := helpers.CurrentEpoch(headState)
|
|
headState.Validators[95].ExitEpoch = currentEpoch + 1
|
|
headState.Validators[95].WithdrawableEpoch = currentEpoch + 1 + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
|
triggerNewHeadEvent(t, svc, [32]byte{})
|
|
|
|
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(retrieved.Exited, []uint64{95}) {
|
|
t.Errorf("Wanted indices 95 exited, received %v", retrieved.Exited)
|
|
}
|
|
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
|
}
|
|
|
|
func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
|
|
validators := make([]*ethpb.Validator, validatorCount)
|
|
balances := make([]uint64, validatorCount)
|
|
for i := 0; i < len(validators); i++ {
|
|
validators[i] = ðpb.Validator{
|
|
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
|
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
|
}
|
|
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
|
}
|
|
|
|
atts := []*pb.PendingAttestation{{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}}}
|
|
|
|
// We initialize a head state that has attestations from participated
|
|
// validators in a simulated fashion.
|
|
return &pb.BeaconState{
|
|
Slot: (2 * params.BeaconConfig().SlotsPerEpoch) - 1,
|
|
Validators: validators,
|
|
Balances: balances,
|
|
BlockRoots: make([][]byte, 128),
|
|
Slashings: []uint64{0, 1e9, 1e9},
|
|
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
|
CurrentEpochAttestations: atts,
|
|
FinalizedCheckpoint: ðpb.Checkpoint{},
|
|
JustificationBits: bitfield.Bitvector4{0x00},
|
|
CurrentJustifiedCheckpoint: ðpb.Checkpoint{},
|
|
}
|
|
}
|
|
|
|
func setupService(t *testing.T) (*Service, db.Database) {
|
|
beaconDB := dbutil.SetupDB(t)
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
return &Service{
|
|
beaconDB: beaconDB,
|
|
ctx: ctx,
|
|
cancel: cancel,
|
|
newHeadRootChan: make(chan [32]byte, 0),
|
|
newHeadNotifier: &mock.ChainService{},
|
|
}, beaconDB
|
|
}
|
|
|
|
func triggerNewHeadEvent(t *testing.T, svc *Service, headRoot [32]byte) {
|
|
exitRoutine := make(chan bool)
|
|
go func() {
|
|
svc.run(svc.ctx)
|
|
<-exitRoutine
|
|
}()
|
|
|
|
svc.newHeadRootChan <- headRoot
|
|
if err := svc.Stop(); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
exitRoutine <- true
|
|
|
|
// The context should have been canceled.
|
|
if svc.ctx.Err() != context.Canceled {
|
|
t.Error("context was not canceled")
|
|
}
|
|
}
|