mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-28 14:17:17 +00:00
cc741ed8af
* begin state service * begin on the state trie idea * created beacon state structure * add in the full clone getter * return by value instead * add all setters * new state setters are being completed * arrays roots exposed * close to finishing all these headerssss * functionality complete * added in proto benchmark test * test for compatibility * add test for compat * comments fixed * add clone * add clone * remove underlying copies * make it immutable * integrate it into chainservice * revert * wrap up comments for package * address all comments and godocs * address all comments * clone the pending attestation properly * properly clone remaining items * tests pass fixed bug * begin using it instead of head state * prevent nil pointer exceptions * begin using new struct in db * integrated new type into db package * add proper nil checks * using new state in archiver * refactored much of core * editing all the precompute functions * done with most core refactor * fixed up some bugs in the clone comparisons * append current epoch atts * add missing setters * add new setters * fix other core methods * fix up transition * main service and forkchoice * fix rpc * integrated to powchain * some more changes * fix build * improve processing of deposits * fix error * prevent panic * comment * fix process att * gaz * fix up att process * resolve existing review comments * resolve another batch of gh comments * resolve broken cpt state * revise testutil to use the new state * begin updating the state transition func to pass in more compartmentalized args * finish editing transition function to return errors * block operations pretty much done with refactor * state transition fully refactored * got epoch processing completed * fix build in fork choice * fixing more of the build * fix up broken sync package * it builds nowww it buildssss * revert registry changes * Recompute on Read (#4627) * compute on read * fix up eth1 data votes * looking into slashings bug introduced in core/ * able to advance more slots * add logging * can now sync with testnet yay * remove the leaves algorithm and other merkle imports * expose initialize unsafe funcs * Update beacon-chain/db/kv/state.go * lint Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * More Optimizations for New State (#4641) * map optimization * more optimizations * use a custom hasher * comment * block operations optimizations * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * fixed up various operations to use the validator index map access Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * archiver tests pass * fixing cache tests * cache tests passing * edited validator tests * powchain tests passing * halfway thru sync tests * more sync test fixes * add in tests for state/ * working through rpc tests * assignments tests passed * almost done with rpc/beacon tests * resolved painful validator test * fixed up even more tests * resolve tests * fix build * reduce a randao mixes copy * fixes under //beacon-chain/blockchain/... * build //beacon-chain/core/... * fixes * Runtime Optimizations (#4648) * parallelize shuffling * clean up * lint * fix build * use callback to read from registry * fix array roots and size map * new improvements * reduce hash allocs * improved shuffling * terence's review * use different method * raul's comment * new array roots * remove clone in pre-compute * Update beacon-chain/state/types.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * raul's review * lint * fix build issues * fix visibility Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * fix visibility * build works for all * fix blockchain test * fix a few tests * fix more tests * update validator in slashing * archiver passing * fixed rpc/validator * progress on core tests * resolve broken rpc tests * blockchain tests passed * fix up some tests in core * fix message diff * remove unnecessary save * Save validator after slashing * Update validators one by one * another update * fix everything * fix more precompute tests * fix blocks tests * more elegant fix * more helper fixes * change back ? * fix test * fix skip slot * fix test * reset caches * fix testutil * raceoff fixed * passing * Retrieve cached state in the beginning * lint * Fixed tests part 1 * Fixed rest of the tests * Minor changes to avoid copying, small refactor to reduce deplicated code * Handle att req for slot 0 * New beacon state: Only populate merkle layers as needed, copy merkle layers on copy/clone. (#4689) * Only populate merkle layers as needed, copy merkle layers on copy/clone. * use custom copy * Make maps of correct size * slightly fast, doesn't wait for lock Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> * Target root can't be 0x00 * Don't use cache for current slot (may not be the right fix) * fixed up tests * Remove some copy for init sync. Not sure if it is safe enough for runtime though... testing... * Align with prev logic for process slots cachedState.Slot() < slot * Fix Initial Sync Flag (#4692) * fixes * fix up some test failures due to lack of nil checks * fix up some test failures due to lack of nil checks * fix up imports * revert some changes * imports Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> * resolving further conflicts * Better skip slot cache (#4694) * Return copy of skip slot cache state, disable skip slot cache on sync * fix * Fix pruning * fix up issues with broken tests Co-authored-by: Nishant Das <nish1993@hotmail.com> Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com> Co-authored-by: shayzluf <thezluf@gmail.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
531 lines
17 KiB
Go
531 lines
17 KiB
Go
package beacon
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
"testing"
|
|
|
|
"github.com/gogo/protobuf/proto"
|
|
ptypes "github.com/gogo/protobuf/types"
|
|
"github.com/golang/mock/gomock"
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/go-ssz"
|
|
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
|
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
|
mockRPC "github.com/prysmaticlabs/prysm/beacon-chain/rpc/testing"
|
|
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
)
|
|
|
|
func TestServer_ListBlocks_NoResults(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
}
|
|
wanted := ðpb.ListBlocksResponse{
|
|
BlockContainers: make([]*ethpb.BeaconBlockContainer, 0),
|
|
TotalSize: int32(0),
|
|
NextPageToken: strconv.Itoa(0),
|
|
}
|
|
res, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
|
QueryFilter: ðpb.ListBlocksRequest_Slot{
|
|
Slot: 0,
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(wanted, res) {
|
|
t.Errorf("Wanted %v, received %v", wanted, res)
|
|
}
|
|
res, err = bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
|
QueryFilter: ðpb.ListBlocksRequest_Slot{
|
|
Slot: 0,
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(wanted, res) {
|
|
t.Errorf("Wanted %v, received %v", wanted, res)
|
|
}
|
|
res, err = bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
|
QueryFilter: ðpb.ListBlocksRequest_Root{
|
|
Root: make([]byte, 32),
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(wanted, res) {
|
|
t.Errorf("Wanted %v, received %v", wanted, res)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListBlocks_Genesis(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
ctx := context.Background()
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
}
|
|
|
|
// Should throw an error if no genesis block is found.
|
|
if _, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
|
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
|
Genesis: true,
|
|
},
|
|
}); err != nil && !strings.Contains(err.Error(), "Could not find genesis") {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
// Should return the proper genesis block if it exists.
|
|
parentRoot := [32]byte{1, 2, 3}
|
|
blk := ðpb.SignedBeaconBlock{
|
|
Block: ðpb.BeaconBlock{
|
|
Slot: 0,
|
|
ParentRoot: parentRoot[:],
|
|
},
|
|
}
|
|
root, err := ssz.HashTreeRoot(blk.Block)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err := db.SaveBlock(ctx, blk); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
wanted := ðpb.ListBlocksResponse{
|
|
BlockContainers: []*ethpb.BeaconBlockContainer{
|
|
{
|
|
Block: blk,
|
|
BlockRoot: root[:],
|
|
},
|
|
},
|
|
NextPageToken: "0",
|
|
TotalSize: 1,
|
|
}
|
|
res, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
|
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
|
Genesis: true,
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(wanted, res) {
|
|
t.Errorf("Wanted %v, received %v", wanted, res)
|
|
}
|
|
|
|
// Should throw an error if there is more than 1 block
|
|
// for the genesis slot.
|
|
if err := db.SaveBlock(ctx, blk); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if _, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
|
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
|
Genesis: true,
|
|
},
|
|
}); err != nil && !strings.Contains(err.Error(), "Found more than 1") {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
func TestServer_ListBlocks_Pagination(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
|
|
count := uint64(100)
|
|
blks := make([]*ethpb.SignedBeaconBlock, count)
|
|
blkContainers := make([]*ethpb.BeaconBlockContainer, count)
|
|
for i := uint64(0); i < count; i++ {
|
|
b := ðpb.SignedBeaconBlock{
|
|
Block: ðpb.BeaconBlock{
|
|
Slot: i,
|
|
},
|
|
}
|
|
root, err := ssz.HashTreeRoot(b.Block)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
blks[i] = b
|
|
blkContainers[i] = ðpb.BeaconBlockContainer{Block: b, BlockRoot: root[:]}
|
|
}
|
|
if err := db.SaveBlocks(ctx, blks); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
}
|
|
|
|
root6, err := ssz.HashTreeRoot(blks[6].Block)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
tests := []struct {
|
|
req *ethpb.ListBlocksRequest
|
|
res *ethpb.ListBlocksResponse
|
|
}{
|
|
{req: ðpb.ListBlocksRequest{
|
|
PageToken: strconv.Itoa(0),
|
|
QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 5},
|
|
PageSize: 3},
|
|
res: ðpb.ListBlocksResponse{
|
|
BlockContainers: []*ethpb.BeaconBlockContainer{{Block: ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 5}}, BlockRoot: blkContainers[5].BlockRoot}},
|
|
NextPageToken: "",
|
|
TotalSize: 1}},
|
|
{req: ðpb.ListBlocksRequest{
|
|
PageToken: strconv.Itoa(0),
|
|
QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]},
|
|
PageSize: 3},
|
|
res: ðpb.ListBlocksResponse{
|
|
BlockContainers: []*ethpb.BeaconBlockContainer{{Block: ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 6}}, BlockRoot: blkContainers[6].BlockRoot}},
|
|
TotalSize: 1}},
|
|
{req: ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]}},
|
|
res: ðpb.ListBlocksResponse{
|
|
BlockContainers: []*ethpb.BeaconBlockContainer{{Block: ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 6}}, BlockRoot: blkContainers[6].BlockRoot}},
|
|
TotalSize: 1}},
|
|
{req: ðpb.ListBlocksRequest{
|
|
PageToken: strconv.Itoa(0),
|
|
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 0},
|
|
PageSize: 100},
|
|
res: ðpb.ListBlocksResponse{
|
|
BlockContainers: blkContainers[0:params.BeaconConfig().SlotsPerEpoch],
|
|
NextPageToken: "",
|
|
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
|
{req: ðpb.ListBlocksRequest{
|
|
PageToken: strconv.Itoa(1),
|
|
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 5},
|
|
PageSize: 3},
|
|
res: ðpb.ListBlocksResponse{
|
|
BlockContainers: blkContainers[43:46],
|
|
NextPageToken: "2",
|
|
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
|
{req: ðpb.ListBlocksRequest{
|
|
PageToken: strconv.Itoa(1),
|
|
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 11},
|
|
PageSize: 7},
|
|
res: ðpb.ListBlocksResponse{
|
|
BlockContainers: blkContainers[95:96],
|
|
NextPageToken: "",
|
|
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
|
{req: ðpb.ListBlocksRequest{
|
|
PageToken: strconv.Itoa(0),
|
|
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 12},
|
|
PageSize: 4},
|
|
res: ðpb.ListBlocksResponse{
|
|
BlockContainers: blkContainers[96:100],
|
|
NextPageToken: "1",
|
|
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch / 2)}},
|
|
}
|
|
|
|
for i, test := range tests {
|
|
t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) {
|
|
res, err := bs.ListBlocks(ctx, test.req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !proto.Equal(res, test.res) {
|
|
t.Errorf("Incorrect blocks response, wanted %v, received %v", test.res, res)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestServer_ListBlocks_Errors(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
|
|
bs := &Server{BeaconDB: db}
|
|
exceedsMax := int32(flags.Get().MaxPageSize + 1)
|
|
|
|
wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, flags.Get().MaxPageSize)
|
|
req := ðpb.ListBlocksRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax}
|
|
if _, err := bs.ListBlocks(ctx, req); !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
|
|
wanted = "Must specify a filter criteria for fetching"
|
|
req = ðpb.ListBlocksRequest{}
|
|
if _, err := bs.ListBlocks(ctx, req); !strings.Contains(err.Error(), wanted) {
|
|
t.Errorf("Expected error %v, received %v", wanted, err)
|
|
}
|
|
|
|
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 0}}
|
|
res, err := bs.ListBlocks(ctx, req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(res.BlockContainers) != 0 {
|
|
t.Errorf("wanted empty list, got a list of %d", len(res.BlockContainers))
|
|
}
|
|
if res.TotalSize != 0 {
|
|
t.Errorf("wanted total size 0, got size %d", res.TotalSize)
|
|
}
|
|
|
|
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{}}
|
|
res, err = bs.ListBlocks(ctx, req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(res.BlockContainers) != 0 {
|
|
t.Errorf("wanted empty list, got a list of %d", len(res.BlockContainers))
|
|
}
|
|
if res.TotalSize != 0 {
|
|
t.Errorf("wanted total size 0, got size %d", res.TotalSize)
|
|
|
|
}
|
|
|
|
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
|
res, err = bs.ListBlocks(ctx, req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(res.BlockContainers) != 0 {
|
|
t.Errorf("wanted empty list, got a list of %d", len(res.BlockContainers))
|
|
}
|
|
if res.TotalSize != 0 {
|
|
t.Errorf("wanted total size 0, got size %d", res.TotalSize)
|
|
|
|
}
|
|
|
|
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
|
res, err = bs.ListBlocks(ctx, req)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if len(res.BlockContainers) != 0 {
|
|
t.Errorf("wanted empty list, got a list of %d", len(res.BlockContainers))
|
|
}
|
|
if res.TotalSize != 0 {
|
|
t.Errorf("wanted total size 0, got size %d", res.TotalSize)
|
|
|
|
}
|
|
}
|
|
|
|
func TestServer_GetChainHead_NoFinalizedBlock(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
s, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
|
|
Slot: 1,
|
|
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 3, Root: []byte{'A'}},
|
|
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 2, Root: []byte{'B'}},
|
|
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: []byte{'C'}},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{Block: ðpb.SignedBeaconBlock{}, State: s},
|
|
FinalizationFetcher: &mock.ChainService{
|
|
FinalizedCheckPoint: s.FinalizedCheckpoint(),
|
|
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
|
|
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
|
|
}
|
|
|
|
if _, err := bs.GetChainHead(context.Background(), nil); !strings.Contains(err.Error(), "Could not get finalized block") {
|
|
t.Fatal("Did not get wanted error")
|
|
}
|
|
}
|
|
|
|
func TestServer_GetChainHead(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
finalizedBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: []byte{'A'}}}
|
|
db.SaveBlock(context.Background(), finalizedBlock)
|
|
fRoot, _ := ssz.HashTreeRoot(finalizedBlock.Block)
|
|
justifiedBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2, ParentRoot: []byte{'B'}}}
|
|
db.SaveBlock(context.Background(), justifiedBlock)
|
|
jRoot, _ := ssz.HashTreeRoot(justifiedBlock.Block)
|
|
prevJustifiedBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 3, ParentRoot: []byte{'C'}}}
|
|
db.SaveBlock(context.Background(), prevJustifiedBlock)
|
|
pjRoot, _ := ssz.HashTreeRoot(prevJustifiedBlock.Block)
|
|
|
|
s, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
|
|
Slot: 1,
|
|
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 3, Root: pjRoot[:]},
|
|
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 2, Root: jRoot[:]},
|
|
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: fRoot[:]},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: s.PreviousJustifiedCheckpoint().Epoch*params.BeaconConfig().SlotsPerEpoch + 1}}
|
|
bs := &Server{
|
|
BeaconDB: db,
|
|
HeadFetcher: &mock.ChainService{Block: b, State: s},
|
|
FinalizationFetcher: &mock.ChainService{
|
|
FinalizedCheckPoint: s.FinalizedCheckpoint(),
|
|
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
|
|
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
|
|
}
|
|
|
|
head, err := bs.GetChainHead(context.Background(), nil)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if head.PreviousJustifiedEpoch != 3 {
|
|
t.Errorf("Wanted PreviousJustifiedEpoch: %d, got: %d",
|
|
3*params.BeaconConfig().SlotsPerEpoch, head.PreviousJustifiedEpoch)
|
|
}
|
|
if head.JustifiedEpoch != 2 {
|
|
t.Errorf("Wanted JustifiedEpoch: %d, got: %d",
|
|
2*params.BeaconConfig().SlotsPerEpoch, head.JustifiedEpoch)
|
|
}
|
|
if head.FinalizedEpoch != 1 {
|
|
t.Errorf("Wanted FinalizedEpoch: %d, got: %d",
|
|
1*params.BeaconConfig().SlotsPerEpoch, head.FinalizedEpoch)
|
|
}
|
|
if head.PreviousJustifiedSlot != 3 {
|
|
t.Errorf("Wanted PreviousJustifiedSlot: %d, got: %d",
|
|
3, head.PreviousJustifiedSlot)
|
|
}
|
|
if head.JustifiedSlot != 2 {
|
|
t.Errorf("Wanted JustifiedSlot: %d, got: %d",
|
|
2, head.JustifiedSlot)
|
|
}
|
|
if head.FinalizedSlot != 1 {
|
|
t.Errorf("Wanted FinalizedSlot: %d, got: %d",
|
|
1, head.FinalizedSlot)
|
|
}
|
|
if !bytes.Equal(pjRoot[:], head.PreviousJustifiedBlockRoot) {
|
|
t.Errorf("Wanted PreviousJustifiedBlockRoot: %v, got: %v",
|
|
pjRoot[:], head.PreviousJustifiedBlockRoot)
|
|
}
|
|
if !bytes.Equal(jRoot[:], head.JustifiedBlockRoot) {
|
|
t.Errorf("Wanted JustifiedBlockRoot: %v, got: %v",
|
|
jRoot[:], head.JustifiedBlockRoot)
|
|
}
|
|
if !bytes.Equal(fRoot[:], head.FinalizedBlockRoot) {
|
|
t.Errorf("Wanted FinalizedBlockRoot: %v, got: %v",
|
|
fRoot[:], head.FinalizedBlockRoot)
|
|
}
|
|
}
|
|
|
|
func TestServer_StreamChainHead_ContextCanceled(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
ctx := context.Background()
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
chainService := &mock.ChainService{}
|
|
server := &Server{
|
|
Ctx: ctx,
|
|
StateNotifier: chainService.StateNotifier(),
|
|
BeaconDB: db,
|
|
}
|
|
|
|
exitRoutine := make(chan bool)
|
|
ctrl := gomock.NewController(t)
|
|
defer ctrl.Finish()
|
|
mockStream := mockRPC.NewMockBeaconChain_StreamChainHeadServer(ctrl)
|
|
mockStream.EXPECT().Context().Return(ctx)
|
|
go func(tt *testing.T) {
|
|
if err := server.StreamChainHead(&ptypes.Empty{}, mockStream); !strings.Contains(err.Error(), "Context canceled") {
|
|
tt.Errorf("Could not call RPC method: %v", err)
|
|
}
|
|
<-exitRoutine
|
|
}(t)
|
|
cancel()
|
|
exitRoutine <- true
|
|
}
|
|
|
|
func TestServer_StreamChainHead_OnHeadUpdated(t *testing.T) {
|
|
db := dbTest.SetupDB(t)
|
|
defer dbTest.TeardownDB(t, db)
|
|
|
|
finalizedBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: []byte{'A'}}}
|
|
db.SaveBlock(context.Background(), finalizedBlock)
|
|
fRoot, _ := ssz.HashTreeRoot(finalizedBlock.Block)
|
|
justifiedBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2, ParentRoot: []byte{'B'}}}
|
|
db.SaveBlock(context.Background(), justifiedBlock)
|
|
jRoot, _ := ssz.HashTreeRoot(justifiedBlock.Block)
|
|
prevJustifiedBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 3, ParentRoot: []byte{'C'}}}
|
|
db.SaveBlock(context.Background(), prevJustifiedBlock)
|
|
pjRoot, _ := ssz.HashTreeRoot(prevJustifiedBlock.Block)
|
|
|
|
s, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
|
|
Slot: 1,
|
|
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 3, Root: pjRoot[:]},
|
|
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 2, Root: jRoot[:]},
|
|
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: fRoot[:]},
|
|
})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: s.PreviousJustifiedCheckpoint().Epoch*params.BeaconConfig().SlotsPerEpoch + 1}}
|
|
hRoot, _ := ssz.HashTreeRoot(b.Block)
|
|
|
|
chainService := &mock.ChainService{}
|
|
ctx := context.Background()
|
|
server := &Server{
|
|
Ctx: ctx,
|
|
HeadFetcher: &mock.ChainService{Block: b, State: s},
|
|
BeaconDB: db,
|
|
StateNotifier: chainService.StateNotifier(),
|
|
FinalizationFetcher: &mock.ChainService{
|
|
FinalizedCheckPoint: s.FinalizedCheckpoint(),
|
|
CurrentJustifiedCheckPoint: s.CurrentJustifiedCheckpoint(),
|
|
PreviousJustifiedCheckPoint: s.PreviousJustifiedCheckpoint()},
|
|
}
|
|
exitRoutine := make(chan bool)
|
|
ctrl := gomock.NewController(t)
|
|
defer ctrl.Finish()
|
|
mockStream := mockRPC.NewMockBeaconChain_StreamChainHeadServer(ctrl)
|
|
mockStream.EXPECT().Send(
|
|
ðpb.ChainHead{
|
|
HeadSlot: b.Block.Slot,
|
|
HeadEpoch: helpers.SlotToEpoch(b.Block.Slot),
|
|
HeadBlockRoot: hRoot[:],
|
|
FinalizedSlot: 1,
|
|
FinalizedEpoch: 1,
|
|
FinalizedBlockRoot: fRoot[:],
|
|
JustifiedSlot: 2,
|
|
JustifiedEpoch: 2,
|
|
JustifiedBlockRoot: jRoot[:],
|
|
PreviousJustifiedSlot: 3,
|
|
PreviousJustifiedEpoch: 3,
|
|
PreviousJustifiedBlockRoot: pjRoot[:],
|
|
},
|
|
).Do(func(arg0 interface{}) {
|
|
exitRoutine <- true
|
|
})
|
|
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
|
|
|
go func(tt *testing.T) {
|
|
if err := server.StreamChainHead(&ptypes.Empty{}, mockStream); err != nil {
|
|
tt.Errorf("Could not call RPC method: %v", err)
|
|
}
|
|
}(t)
|
|
|
|
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
|
for sent := 0; sent == 0; {
|
|
sent = server.StateNotifier.StateFeed().Send(&feed.Event{
|
|
Type: statefeed.BlockProcessed,
|
|
Data: &statefeed.BlockProcessedData{},
|
|
})
|
|
}
|
|
<-exitRoutine
|
|
}
|