mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-25 21:07:18 +00:00
f8b4d8c57a
* Deprecate store WIP * fix spectests build * mock right interface * sync tests build * more tests builds * blockchain tests - TestFinalizedCheckpt_GenesisRootOk - TestCurrentJustifiedCheckpt_CanRetrieve - TestJustifiedCheckpt_GenesisRootOk - TestHeadRoot_CanRetrieve - TestHeadRoot_UseDB - TestService_ProcessAttestationsAndUpdateHead - TestService_VerifyWeakSubjectivityRoot - TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray - TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree - TestVerifyFinalizedConsistency_Ok - TestStore_OnBlock_ProtoArray - TestStore_OnBlock_DoublyLinkedTree - TestStore_OnBlockBatch_ProtoArray - TestStore_OnBlockBatch_DoublyLinkedTree - TestStore_OnBlockBatch_NotifyNewPayload - TestCachedPreState_CanGetFromStateSummary_ProtoArray - TestCachedPreState_CanGetFromStateSummary_DoublyLinkedTree * more blockchain tests - TestStore_OnBlockBatch_PruneOK_Protoarray - TestFillForkChoiceMissingBlocks_CanSave_ProtoArray - TestFillForkChoiceMissingBlocks_CanSave_DoublyLinkedTree - TestFillForkChoiceMissingBlocks_RootsMatch_Protoarray - TestFillForkChoiceMissingBlocks_RootsMatch_DoublyLinkedTree - TestFillForkChoiceMissingBlocks_FilterFinalized_ProtoArray - TestFillForkChoiceMissingBlocks_FilterFinalized_DoublyLinkedTree - TestVerifyBlkDescendant - Test_verifyBlkFinalizedSlot_invalidBlock - TestChainService_SaveHeadNoDB * update best justified from genesis * deal with nil head on saveHead * initialize prev justified checkpoint * update finalization correctly * update finalization logic * update finalization logic * track the wall clock on forkchoice spectests * export copies of checkpoints from blockchain package * do not use forkchoice's head on HeadRoot * Remove debug remain Co-authored-by: terencechain <terence@prysmaticlabs.com> * terence's review * add forkchoice types deps * wtf * debugging * init-sync: update justified and finalized checkpoints on db * call updateFinalized instead of only DB * remove debug symbols * safe copy headroot Co-authored-by: terencechain <terence@prysmaticlabs.com>
324 lines
10 KiB
Go
324 lines
10 KiB
Go
package blockchain
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
blockchainTesting "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
|
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
|
"github.com/prysmaticlabs/prysm/config/params"
|
|
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
|
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
|
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
|
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
|
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
|
"github.com/prysmaticlabs/prysm/testing/assert"
|
|
"github.com/prysmaticlabs/prysm/testing/require"
|
|
"github.com/prysmaticlabs/prysm/testing/util"
|
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
|
)
|
|
|
|
func TestService_ReceiveBlock(t *testing.T) {
|
|
ctx := context.Background()
|
|
|
|
genesis, keys := util.DeterministicGenesisState(t, 64)
|
|
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
|
|
blk, err := util.GenerateFullBlock(genesis, keys, conf, slot)
|
|
assert.NoError(t, err)
|
|
return blk
|
|
}
|
|
params.SetupTestConfigCleanup(t)
|
|
bc := params.BeaconConfig().Copy()
|
|
bc.ShardCommitteePeriod = 0 // Required for voluntary exits test in reasonable time.
|
|
params.OverrideBeaconConfig(bc)
|
|
|
|
type args struct {
|
|
block *ethpb.SignedBeaconBlock
|
|
}
|
|
tests := []struct {
|
|
name string
|
|
args args
|
|
wantedErr string
|
|
check func(*testing.T, *Service)
|
|
}{
|
|
{
|
|
name: "applies block with state transition",
|
|
args: args{
|
|
block: genFullBlock(t, util.DefaultBlockGenConfig(), 2 /*slot*/),
|
|
},
|
|
check: func(t *testing.T, s *Service) {
|
|
if hs := s.head.state.Slot(); hs != 2 {
|
|
t.Errorf("Unexpected state slot. Got %d but wanted %d", hs, 2)
|
|
}
|
|
if bs := s.head.block.Block().Slot(); bs != 2 {
|
|
t.Errorf("Unexpected head block slot. Got %d but wanted %d", bs, 2)
|
|
}
|
|
},
|
|
},
|
|
{
|
|
name: "saves attestations to pool",
|
|
args: args{
|
|
block: genFullBlock(t,
|
|
&util.BlockGenConfig{
|
|
NumProposerSlashings: 0,
|
|
NumAttesterSlashings: 0,
|
|
NumAttestations: 2,
|
|
NumDeposits: 0,
|
|
NumVoluntaryExits: 0,
|
|
},
|
|
1, /*slot*/
|
|
),
|
|
},
|
|
check: func(t *testing.T, s *Service) {
|
|
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 2 {
|
|
t.Errorf("Did not get the correct number of block attestations saved to the pool. "+
|
|
"Got %d but wanted %d", baCount, 2)
|
|
}
|
|
},
|
|
},
|
|
{
|
|
name: "updates exit pool",
|
|
args: args{
|
|
block: genFullBlock(t, &util.BlockGenConfig{
|
|
NumProposerSlashings: 0,
|
|
NumAttesterSlashings: 0,
|
|
NumAttestations: 0,
|
|
NumDeposits: 0,
|
|
NumVoluntaryExits: 3,
|
|
},
|
|
1, /*slot*/
|
|
),
|
|
},
|
|
check: func(t *testing.T, s *Service) {
|
|
pending := s.cfg.ExitPool.PendingExits(genesis, 1, true /* no limit */)
|
|
if len(pending) != 0 {
|
|
t.Errorf(
|
|
"Did not mark the correct number of exits. Got %d pending but wanted %d",
|
|
len(pending),
|
|
0,
|
|
)
|
|
}
|
|
},
|
|
},
|
|
{
|
|
name: "notifies block processed on state feed",
|
|
args: args{
|
|
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
|
},
|
|
check: func(t *testing.T, s *Service) {
|
|
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
|
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
|
}
|
|
},
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
beaconDB := testDB.SetupDB(t)
|
|
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
|
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
|
|
|
opts := []Option{
|
|
WithDatabase(beaconDB),
|
|
WithForkChoiceStore(protoarray.New()),
|
|
WithAttestationPool(attestations.NewPool()),
|
|
WithExitPool(voluntaryexits.NewPool()),
|
|
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
|
WithStateGen(stategen.New(beaconDB)),
|
|
WithFinalizedStateAtStartUp(genesis),
|
|
}
|
|
s, err := NewService(ctx, opts...)
|
|
require.NoError(t, err)
|
|
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
|
root, err := tt.args.block.Block.HashTreeRoot()
|
|
require.NoError(t, err)
|
|
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
|
|
require.NoError(t, err)
|
|
err = s.ReceiveBlock(ctx, wsb, root)
|
|
if tt.wantedErr != "" {
|
|
assert.ErrorContains(t, tt.wantedErr, err)
|
|
} else {
|
|
assert.NoError(t, err)
|
|
tt.check(t, s)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
|
ctx := context.Background()
|
|
genesis, keys := util.DeterministicGenesisState(t, 64)
|
|
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
|
assert.NoError(t, err)
|
|
beaconDB := testDB.SetupDB(t)
|
|
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
|
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
|
opts := []Option{
|
|
WithDatabase(beaconDB),
|
|
WithForkChoiceStore(protoarray.New()),
|
|
WithAttestationPool(attestations.NewPool()),
|
|
WithExitPool(voluntaryexits.NewPool()),
|
|
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
|
WithStateGen(stategen.New(beaconDB)),
|
|
}
|
|
|
|
s, err := NewService(ctx, opts...)
|
|
require.NoError(t, err)
|
|
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
|
root, err := b.Block.HashTreeRoot()
|
|
require.NoError(t, err)
|
|
wg := sync.WaitGroup{}
|
|
wg.Add(1)
|
|
go func() {
|
|
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
|
require.NoError(t, err)
|
|
require.NoError(t, s.ReceiveBlock(ctx, wsb, root))
|
|
wg.Done()
|
|
}()
|
|
wg.Wait()
|
|
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
|
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
|
}
|
|
// Verify fork choice has processed the block. (Genesis block and the new block)
|
|
assert.Equal(t, 2, s.cfg.ForkChoiceStore.NodeCount())
|
|
}
|
|
|
|
func TestService_ReceiveBlockBatch(t *testing.T) {
|
|
ctx := context.Background()
|
|
|
|
genesis, keys := util.DeterministicGenesisState(t, 64)
|
|
genFullBlock := func(t *testing.T, conf *util.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
|
|
blk, err := util.GenerateFullBlock(genesis, keys, conf, slot)
|
|
assert.NoError(t, err)
|
|
return blk
|
|
}
|
|
|
|
type args struct {
|
|
block *ethpb.SignedBeaconBlock
|
|
}
|
|
tests := []struct {
|
|
name string
|
|
args args
|
|
wantedErr string
|
|
check func(*testing.T, *Service)
|
|
}{
|
|
{
|
|
name: "applies block with state transition",
|
|
args: args{
|
|
block: genFullBlock(t, util.DefaultBlockGenConfig(), 2 /*slot*/),
|
|
},
|
|
check: func(t *testing.T, s *Service) {
|
|
assert.Equal(t, types.Slot(2), s.head.state.Slot(), "Incorrect head state slot")
|
|
assert.Equal(t, types.Slot(2), s.head.block.Block().Slot(), "Incorrect head block slot")
|
|
},
|
|
},
|
|
{
|
|
name: "notifies block processed on state feed",
|
|
args: args{
|
|
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
|
},
|
|
check: func(t *testing.T, s *Service) {
|
|
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
|
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
|
}
|
|
},
|
|
},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
beaconDB := testDB.SetupDB(t)
|
|
opts := []Option{
|
|
WithDatabase(beaconDB),
|
|
WithForkChoiceStore(protoarray.New()),
|
|
WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}),
|
|
WithStateGen(stategen.New(beaconDB)),
|
|
}
|
|
s, err := NewService(ctx, opts...)
|
|
require.NoError(t, err)
|
|
err = s.saveGenesisData(ctx, genesis)
|
|
require.NoError(t, err)
|
|
root, err := tt.args.block.Block.HashTreeRoot()
|
|
require.NoError(t, err)
|
|
wsb, err := wrapper.WrappedSignedBeaconBlock(tt.args.block)
|
|
require.NoError(t, err)
|
|
blks := []interfaces.SignedBeaconBlock{wsb}
|
|
roots := [][32]byte{root}
|
|
err = s.ReceiveBlockBatch(ctx, blks, roots)
|
|
if tt.wantedErr != "" {
|
|
assert.ErrorContains(t, tt.wantedErr, err)
|
|
} else {
|
|
assert.NoError(t, err)
|
|
tt.check(t, s)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestService_HasBlock(t *testing.T) {
|
|
opts := testServiceOptsWithDB(t)
|
|
opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{}))
|
|
s, err := NewService(context.Background(), opts...)
|
|
require.NoError(t, err)
|
|
r := [32]byte{'a'}
|
|
if s.HasBlock(context.Background(), r) {
|
|
t.Error("Should not have block")
|
|
}
|
|
wsb, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
|
require.NoError(t, err)
|
|
require.NoError(t, s.saveInitSyncBlock(context.Background(), r, wsb))
|
|
if !s.HasBlock(context.Background(), r) {
|
|
t.Error("Should have block")
|
|
}
|
|
b := util.NewBeaconBlock()
|
|
b.Block.Slot = 1
|
|
util.SaveBlock(t, context.Background(), s.cfg.BeaconDB, b)
|
|
r, err = b.Block.HashTreeRoot()
|
|
require.NoError(t, err)
|
|
require.Equal(t, true, s.HasBlock(context.Background(), r))
|
|
}
|
|
|
|
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
|
opts := testServiceOptsWithDB(t)
|
|
hook := logTest.NewGlobal()
|
|
s, err := NewService(context.Background(), opts...)
|
|
require.NoError(t, err)
|
|
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
|
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
|
|
|
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
|
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
|
|
}
|
|
|
|
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
opts := testServiceOptsWithDB(t)
|
|
s, err := NewService(context.Background(), opts...)
|
|
require.NoError(t, err)
|
|
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
|
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
|
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
|
s.genesisTime = time.Now()
|
|
|
|
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
|
assert.LogsContain(t, hook, "Exiting mode to save hot states in DB")
|
|
}
|
|
|
|
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
|
hook := logTest.NewGlobal()
|
|
opts := testServiceOptsWithDB(t)
|
|
s, err := NewService(context.Background(), opts...)
|
|
require.NoError(t, err)
|
|
s.genesisTime = time.Now()
|
|
|
|
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
|
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
|
}
|