prysm-pulse/beacon-chain/operations/service_test.go
Raul Jordan 053038446c
Allow 8 Validator Multinode Cluster to Run Indefinitely (#2050)
* plug forkchoice to blockchain service's block processing

* fixed tests

* more fixes...

* clean ups

* fixed test

* Update beacon-chain/blockchain/block_processing.go

* merged with 2006 and started fixing tests

* remove prints

* fixed tests

* lint

* include ops service

* if there's a skip slot, slot--

* fixed typo

* started working on test

* no fork choice in propose

* bleh, need to fix state generator first

* state gen takes input slot

* feedback

* fixed tests

* preston's feedback

* fmt

* removed extra logging

* add more logs

* fixed validator attest

* builds

* fixed save block

* children fix

* removed verbose logs

* fix fork choice

* right logs

* Add Prometheus Counter for Reorg (#2051)

* fetch every slot (#2052)

* test Fixes

* lint

* only regenerate state if there was a reorg

* better logging

* fixed seed

* better logging

* process skip slots in assignment requests

* fix lint

* disable state root computation

* filter attestations in regular sync

* log important items

* better info logs

* added spans to stategen

* span in stategen

* set validator deadline

* randao stuff

* disable sig verify

* lint

* lint

* save only using historical states

* use new goroutine for handling sync messages

* change default buffer sizes

* better p2p

* rem some useless logs

* lint

* sync tests complete

* complete tests

* tests fixed

* lint

* fix flakey att service

* PR feedback

* undo k8s changes

* Update beacon-chain/blockchain/block_processing.go

* Update beacon-chain/sync/regular_sync.go

* Add feature flag to enable compute state root

* add comment

* gazelle lint fix
2019-03-25 10:21:21 -05:00

282 lines
8.1 KiB
Go

package operations
import (
"context"
"errors"
"fmt"
"reflect"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
)
// Ensure operations service implements intefaces.
var _ = OperationFeeds(&Service{})
type mockBroadcaster struct {
broadcastCalled bool
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) {
mb.broadcastCalled = true
}
func init() {
logrus.SetLevel(logrus.DebugLevel)
}
func TestStop_OK(t *testing.T) {
hook := logTest.NewGlobal()
opsService := NewOpsPoolService(context.Background(), &Config{})
if err := opsService.Stop(); err != nil {
t.Fatalf("Unable to stop operation service: %v", err)
}
msg := hook.LastEntry().Message
want := "Stopping service"
if msg != want {
t.Errorf("incorrect log, expected %s, got %s", want, msg)
}
// The context should have been canceled.
if opsService.ctx.Err() != context.Canceled {
t.Error("context was not canceled")
}
hook.Reset()
}
func TestServiceStatus_Error(t *testing.T) {
service := NewOpsPoolService(context.Background(), &Config{})
if service.Status() != nil {
t.Errorf("service status should be nil to begin with, got: %v", service.error)
}
err := errors.New("error error error")
service.error = err
if service.Status() != err {
t.Error("service status did not return wanted err")
}
}
func TestRoutineContextClosing_Ok(t *testing.T) {
hook := logTest.NewGlobal()
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
exitRoutine := make(chan bool)
go func(tt *testing.T) {
s.removeOperations()
s.saveOperations()
<-exitRoutine
}(t)
s.cancel()
exitRoutine <- true
testutil.AssertLogsContain(t, hook, "operations service context closed, exiting remove goroutine")
testutil.AssertLogsContain(t, hook, "operations service context closed, exiting save goroutine")
}
func TestIncomingExits_Ok(t *testing.T) {
hook := logTest.NewGlobal()
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
service := NewOpsPoolService(context.Background(), &Config{BeaconDB: beaconDB})
exit := &pb.VoluntaryExit{Epoch: 100}
hash, err := hashutil.HashProto(exit)
if err != nil {
t.Fatalf("Could not hash exit proto: %v", err)
}
if err := service.HandleValidatorExits(context.Background(), exit); err != nil {
t.Error(err)
}
want := fmt.Sprintf("Exit request %#x saved in DB", hash)
testutil.AssertLogsContain(t, hook, want)
}
func TestIncomingAttestation_OK(t *testing.T) {
hook := logTest.NewGlobal()
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
broadcaster := &mockBroadcaster{}
service := NewOpsPoolService(context.Background(), &Config{
BeaconDB: beaconDB,
P2P: broadcaster,
})
attestation := &pb.Attestation{
AggregationBitfield: []byte{'A'},
Data: &pb.AttestationData{
Slot: 100,
}}
hash, err := hashutil.HashProto(attestation)
if err != nil {
t.Fatalf("Could not hash exit proto: %v", err)
}
if err := service.HandleAttestations(context.Background(), attestation); err != nil {
t.Error(err)
}
want := fmt.Sprintf("Attestation %#x saved in DB", hash)
testutil.AssertLogsContain(t, hook, want)
if !broadcaster.broadcastCalled {
t.Error("Attestation was not broadcasted")
}
}
func TestRetrieveAttestations_OK(t *testing.T) {
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
service := NewOpsPoolService(context.Background(), &Config{BeaconDB: beaconDB})
// Save 140 attestations for test. During 1st retrieval we should get slot:0 - slot:128 attestations,
// 2nd retrieval we should get slot:128 - slot:140 attestations.
// Max attestation config value is set to 128.
origAttestations := make([]*pb.Attestation, 140)
for i := 0; i < len(origAttestations); i++ {
origAttestations[i] = &pb.Attestation{
Data: &pb.AttestationData{
Slot: uint64(i),
Shard: uint64(i),
},
}
if err := service.beaconDB.SaveAttestation(context.Background(), origAttestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
// Test we can retrieve attestations from slot0 - slot127 (Max attestation amount).
attestations, err := service.PendingAttestations()
if err != nil {
t.Fatalf("Could not retrieve attestations: %v", err)
}
if !reflect.DeepEqual(attestations, origAttestations[0:params.BeaconConfig().MaxAttestations]) {
t.Errorf("Retrieved attestations did not match prev generated attestations for the first %d",
params.BeaconConfig().MaxAttestations)
}
}
func TestRemoveProcessedAttestations_Ok(t *testing.T) {
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
attestations := make([]*pb.Attestation, 10)
for i := 0; i < len(attestations); i++ {
attestations[i] = &pb.Attestation{
Data: &pb.AttestationData{
Slot: uint64(i),
Shard: uint64(i),
},
}
if err := s.beaconDB.SaveAttestation(context.Background(), attestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
retrievedAtts, err := s.PendingAttestations()
if err != nil {
t.Fatalf("Could not retrieve attestations: %v", err)
}
if !reflect.DeepEqual(attestations, retrievedAtts) {
t.Error("Retrieved attestations did not match prev generated attestations")
}
if err := s.removePendingAttestations(attestations); err != nil {
t.Fatalf("Could not remove pending attestations: %v", err)
}
retrievedAtts, _ = s.PendingAttestations()
if len(retrievedAtts) != 0 {
t.Errorf("Attestation pool should be empty but got a length of %d", len(retrievedAtts))
}
}
func TestCleanUpAttestations_OlderThanOneEpoch(t *testing.T) {
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
// Construct attestations for slot 0..99.
slot := uint64(99)
attestations := make([]*pb.Attestation, slot+1)
for i := 0; i < len(attestations); i++ {
attestations[i] = &pb.Attestation{
Data: &pb.AttestationData{
Slot: params.BeaconConfig().GenesisSlot + uint64(i),
Shard: uint64(i),
},
}
if err := s.beaconDB.SaveAttestation(context.Background(), attestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
// Assume current slot is 99. All the attestations before (99 - 64) should get removed.
if err := s.removeEpochOldAttestations(params.BeaconConfig().GenesisSlot + slot); err != nil {
t.Fatalf("Could not remove old attestations: %v", err)
}
attestations, err := s.beaconDB.Attestations()
if err != nil {
t.Fatalf("Could not retrieve attestations: %v", err)
}
for _, a := range attestations {
if a.Data.Slot < slot-params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Attestation slot %d can't be lower than %d",
a.Data.Slot, slot-params.BeaconConfig().SlotsPerEpoch)
}
}
}
func TestReceiveBlkRemoveOps_Ok(t *testing.T) {
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
attestations := make([]*pb.Attestation, 10)
for i := 0; i < len(attestations); i++ {
attestations[i] = &pb.Attestation{
Data: &pb.AttestationData{
Slot: uint64(i),
Shard: uint64(i),
},
}
if err := s.beaconDB.SaveAttestation(context.Background(), attestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
atts, _ := s.PendingAttestations()
if len(atts) != len(attestations) {
t.Errorf("Attestation pool should be %d but got a length of %d",
len(attestations), len(atts))
}
block := &pb.BeaconBlock{
Body: &pb.BeaconBlockBody{
Attestations: attestations,
},
}
s.incomingProcessedBlock <- block
if err := s.handleProcessedBlock(context.Background(), block); err != nil {
t.Error(err)
}
atts, _ = s.PendingAttestations()
if len(atts) != 0 {
t.Errorf("Attestation pool should be empty but got a length of %d", len(atts))
}
}