prysm-pulse/beacon-chain/operations/service_test.go
terence tsao 1b5b8a57e0 Remove unused proto schemas (#3005)
* Update io_kubernetes_build commit hash to 1246899

* Update dependency build_bazel_rules_nodejs to v0.33.1

* Update dependency com_github_hashicorp_golang_lru to v0.5.1

* Update libp2p

* Update io_bazel_rules_k8s commit hash to e68d5d7

* Starting to remove old protos

* Bazel build proto passes

* Fixing pb version

* Cleaned up core package

* Fixing tests

* 6 tests failing

* Update proto bugs

* Fixed incorrect validator ordering proto

* Sync with master

* Update go-ssz commit

* Removed bad copies from v1alpha1 folder

* add json spec json to pb handler

* add nested proto example

* proto/testing test works

* fix refactoring build failures

* use merged ssz

* push latest changes

* used forked json encoding

* used forked json encoding

* fix warning

* fix build issues

* fix test and lint

* fix build

* lint
2019-07-22 10:03:57 -04:00

369 lines
11 KiB
Go

package operations
import (
"context"
"errors"
"fmt"
"reflect"
"sort"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
logTest "github.com/sirupsen/logrus/hooks/test"
)
// Ensure operations service implements intefaces.
var _ = OperationFeeds(&Service{})
type mockBroadcaster struct {
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) {
}
func TestStop_OK(t *testing.T) {
hook := logTest.NewGlobal()
opsService := NewOpsPoolService(context.Background(), &Config{})
if err := opsService.Stop(); err != nil {
t.Fatalf("Unable to stop operation service: %v", err)
}
msg := hook.LastEntry().Message
want := "Stopping service"
if msg != want {
t.Errorf("incorrect log, expected %s, got %s", want, msg)
}
// The context should have been canceled.
if opsService.ctx.Err() != context.Canceled {
t.Error("context was not canceled")
}
hook.Reset()
}
func TestServiceStatus_Error(t *testing.T) {
service := NewOpsPoolService(context.Background(), &Config{})
if service.Status() != nil {
t.Errorf("service status should be nil to begin with, got: %v", service.error)
}
err := errors.New("error error error")
service.error = err
if service.Status() != err {
t.Error("service status did not return wanted err")
}
}
func TestIncomingExits_Ok(t *testing.T) {
hook := logTest.NewGlobal()
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
service := NewOpsPoolService(context.Background(), &Config{BeaconDB: beaconDB})
exit := &ethpb.VoluntaryExit{Epoch: 100}
if err := service.HandleValidatorExits(context.Background(), exit); err != nil {
t.Error(err)
}
want := fmt.Sprintf("Exit request saved in DB")
testutil.AssertLogsContain(t, hook, want)
}
func TestIncomingAttestation_OK(t *testing.T) {
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
broadcaster := &mockBroadcaster{}
service := NewOpsPoolService(context.Background(), &Config{
BeaconDB: beaconDB,
P2P: broadcaster,
})
attestation := &ethpb.Attestation{
AggregationBits: []byte{'A'},
Data: &ethpb.AttestationData{
Crosslink: &ethpb.Crosslink{
Shard: 100,
}}}
if err := service.HandleAttestations(context.Background(), attestation); err != nil {
t.Error(err)
}
}
func TestRetrieveAttestations_OK(t *testing.T) {
helpers.ClearAllCaches()
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
service := NewOpsPoolService(context.Background(), &Config{BeaconDB: beaconDB})
// Save 140 attestations for test. During 1st retrieval we should get slot:1 - slot:61 attestations.
// The 1st retrieval is set at slot 64.
origAttestations := make([]*ethpb.Attestation, 140)
for i := 0; i < len(origAttestations); i++ {
origAttestations[i] = &ethpb.Attestation{
Data: &ethpb.AttestationData{
Crosslink: &ethpb.Crosslink{
Shard: uint64(i),
},
Source: &ethpb.Checkpoint{},
Target: &ethpb.Checkpoint{},
},
}
if err := service.beaconDB.SaveAttestation(context.Background(), origAttestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
if err := beaconDB.SaveState(context.Background(), &pb.BeaconState{
Slot: 64,
CurrentCrosslinks: []*ethpb.Crosslink{{
StartEpoch: 0,
DataRoot: params.BeaconConfig().ZeroHash[:]}}}); err != nil {
t.Fatal(err)
}
// Test we can retrieve attestations from slot1 - slot61.
attestations, err := service.PendingAttestations(context.Background())
if err != nil {
t.Fatalf("Could not retrieve attestations: %v", err)
}
sort.Slice(attestations, func(i, j int) bool {
return attestations[i].Data.Crosslink.Shard < attestations[j].Data.Crosslink.Shard
})
if !reflect.DeepEqual(attestations, origAttestations[0:127]) {
t.Error("Retrieved attestations did not match")
}
}
func TestRetrieveAttestations_PruneInvalidAtts(t *testing.T) {
helpers.ClearAllCaches()
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
service := NewOpsPoolService(context.Background(), &Config{BeaconDB: beaconDB})
// Save 140 attestations for slots 0 to 139.
origAttestations := make([]*ethpb.Attestation, 140)
shardDiff := uint64(192)
for i := 0; i < len(origAttestations); i++ {
origAttestations[i] = &ethpb.Attestation{
Data: &ethpb.AttestationData{
Crosslink: &ethpb.Crosslink{
Shard: uint64(i) - shardDiff,
},
Source: &ethpb.Checkpoint{},
Target: &ethpb.Checkpoint{},
},
}
if err := service.beaconDB.SaveAttestation(context.Background(), origAttestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
// At slot 200 only attestations up to from slot 137 to 139 are valid attestations.
if err := beaconDB.SaveState(context.Background(), &pb.BeaconState{
Slot: 200,
CurrentCrosslinks: []*ethpb.Crosslink{{
StartEpoch: 2,
DataRoot: params.BeaconConfig().ZeroHash[:]}}}); err != nil {
t.Fatal(err)
}
attestations, err := service.PendingAttestations(context.Background())
if err != nil {
t.Fatalf("Could not retrieve attestations: %v", err)
}
if !reflect.DeepEqual(attestations, origAttestations[137:]) {
t.Error("Incorrect pruned attestations")
}
// Verify the invalid attestations are deleted.
hash, err := hashutil.HashProto(origAttestations[1])
if err != nil {
t.Fatal(err)
}
if service.beaconDB.HasAttestation(hash) {
t.Error("Invalid attestation is not deleted")
}
}
func TestRemoveProcessedAttestations_Ok(t *testing.T) {
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
attestations := make([]*ethpb.Attestation, 10)
for i := 0; i < len(attestations); i++ {
attestations[i] = &ethpb.Attestation{
Data: &ethpb.AttestationData{
Crosslink: &ethpb.Crosslink{
Shard: uint64(i),
},
Source: &ethpb.Checkpoint{},
Target: &ethpb.Checkpoint{},
},
}
if err := s.beaconDB.SaveAttestation(context.Background(), attestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
if err := db.SaveState(context.Background(), &pb.BeaconState{
Slot: 15,
CurrentCrosslinks: []*ethpb.Crosslink{{
StartEpoch: 0,
DataRoot: params.BeaconConfig().ZeroHash[:]}}}); err != nil {
t.Fatal(err)
}
retrievedAtts, err := s.PendingAttestations(context.Background())
if err != nil {
t.Fatalf("Could not retrieve attestations: %v", err)
}
if !reflect.DeepEqual(attestations, retrievedAtts) {
t.Error("Retrieved attestations did not match prev generated attestations")
}
if err := s.removePendingAttestations(attestations); err != nil {
t.Fatalf("Could not remove pending attestations: %v", err)
}
retrievedAtts, _ = s.PendingAttestations(context.Background())
if len(retrievedAtts) != 0 {
t.Errorf("Attestation pool should be empty but got a length of %d", len(retrievedAtts))
}
}
func TestReceiveBlkRemoveOps_Ok(t *testing.T) {
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
attestations := make([]*ethpb.Attestation, 10)
for i := 0; i < len(attestations); i++ {
attestations[i] = &ethpb.Attestation{
Data: &ethpb.AttestationData{
Crosslink: &ethpb.Crosslink{
Shard: uint64(i),
},
Source: &ethpb.Checkpoint{},
Target: &ethpb.Checkpoint{},
},
}
if err := s.beaconDB.SaveAttestation(context.Background(), attestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
if err := db.SaveState(context.Background(), &pb.BeaconState{
Slot: 15,
CurrentCrosslinks: []*ethpb.Crosslink{{
StartEpoch: 0,
DataRoot: params.BeaconConfig().ZeroHash[:]}}}); err != nil {
t.Fatal(err)
}
atts, _ := s.PendingAttestations(context.Background())
if len(atts) != len(attestations) {
t.Errorf("Attestation pool should be %d but got a length of %d",
len(attestations), len(atts))
}
block := &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
Attestations: attestations,
},
}
s.incomingProcessedBlock <- block
if err := s.handleProcessedBlock(context.Background(), block); err != nil {
t.Error(err)
}
atts, _ = s.PendingAttestations(context.Background())
if len(atts) != 0 {
t.Errorf("Attestation pool should be empty but got a length of %d", len(atts))
}
}
func TestIsCanonical_CanGetCanonical(t *testing.T) {
t.Skip()
// TODO(#2307): This will be irrelevant after the revamp of our DB package post v0.6.
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
cb1 := &ethpb.BeaconBlock{Slot: 999, ParentRoot: []byte{'A'}}
if err := s.beaconDB.SaveBlock(cb1); err != nil {
t.Fatal(err)
}
if err := s.beaconDB.UpdateChainHead(context.Background(), cb1, &pb.BeaconState{}); err != nil {
t.Fatal(err)
}
r1, err := ssz.SigningRoot(cb1)
if err != nil {
t.Fatal(err)
}
att1 := &ethpb.Attestation{Data: &ethpb.AttestationData{BeaconBlockRoot: r1[:]}}
canonical, err := s.IsAttCanonical(context.Background(), att1)
if err != nil {
t.Fatal(err)
}
if !canonical {
t.Error("Attestation should be canonical")
}
cb2 := &ethpb.BeaconBlock{Slot: 1000, ParentRoot: []byte{'B'}}
if err := s.beaconDB.SaveBlock(cb2); err != nil {
t.Fatal(err)
}
if err := s.beaconDB.UpdateChainHead(context.Background(), cb2, &pb.BeaconState{}); err != nil {
t.Fatal(err)
}
canonical, err = s.IsAttCanonical(context.Background(), att1)
if err != nil {
t.Fatal(err)
}
if canonical {
t.Error("Attestation should not be canonical")
}
}
func TestIsCanonical_NilBlocks(t *testing.T) {
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
canonical, err := s.IsAttCanonical(context.Background(), &ethpb.Attestation{Data: &ethpb.AttestationData{}})
if err != nil {
t.Fatal(err)
}
if canonical {
t.Error("Attestation shouldn't be canonical")
}
cb1 := &ethpb.BeaconBlock{Slot: 999, ParentRoot: []byte{'A'}}
if err := s.beaconDB.SaveBlock(cb1); err != nil {
t.Fatal(err)
}
r1, err := ssz.SigningRoot(cb1)
if err != nil {
t.Fatal(err)
}
att1 := &ethpb.Attestation{Data: &ethpb.AttestationData{BeaconBlockRoot: r1[:]}}
canonical, err = s.IsAttCanonical(context.Background(), att1)
if err != nil {
t.Fatal(err)
}
if canonical {
t.Error("Attestation shouldn't be canonical")
}
}