prysm-pulse/beacon-chain/operations/service_test.go

334 lines
10 KiB
Go
Raw Normal View History

package operations
import (
"context"
"errors"
"fmt"
"reflect"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
)
// Ensure operations service implements intefaces.
var _ = OperationFeeds(&Service{})
type mockBroadcaster struct {
broadcastCalled bool
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) {
mb.broadcastCalled = true
}
func init() {
logrus.SetLevel(logrus.DebugLevel)
}
func TestStop_OK(t *testing.T) {
hook := logTest.NewGlobal()
opsService := NewOpsPoolService(context.Background(), &Config{})
if err := opsService.Stop(); err != nil {
t.Fatalf("Unable to stop operation service: %v", err)
}
msg := hook.LastEntry().Message
want := "Stopping service"
if msg != want {
t.Errorf("incorrect log, expected %s, got %s", want, msg)
}
// The context should have been canceled.
if opsService.ctx.Err() != context.Canceled {
t.Error("context was not canceled")
}
hook.Reset()
}
func TestServiceStatus_Error(t *testing.T) {
service := NewOpsPoolService(context.Background(), &Config{})
if service.Status() != nil {
t.Errorf("service status should be nil to begin with, got: %v", service.error)
}
err := errors.New("error error error")
service.error = err
if service.Status() != err {
t.Error("service status did not return wanted err")
}
}
func TestRoutineContextClosing_Ok(t *testing.T) {
hook := logTest.NewGlobal()
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
exitRoutine := make(chan bool)
upgrading linter from gometalinter to golangci-lint (#2100) * upgrading linter from gometalinter to golangci-lint * fixed golangci-lint linting * removed linting before_script command * removed disable-all command * Fixed golang config file * fixed golang config file v2 * removed gosec issue rule * formatting * fixed travis build to run golangci-lint * Add install golangci-lint command * fixing golangci-lint script * removed https:// * Added golangci-lint cmd script * added go get for local lint install * created a before_script * add install before script * Added get script * added go mod download * removed go mod downloads * changed * removed before script * Added before script go get lint * added exit zero to see what went wrong * removed golang run script * removed before script * change lint command * verbose output * removed verbose * change linter enable and disable configuration * Update .golangci.yml Removed gotype as a linter * Update .golangci.yml Added typecheck linter * Update .golangci.yml Added fixed lint version * Update .golangci.yml Added gotype * Update .golangci.yml Added typecheck * removed env:lint * Added env lint * fixing lint upgrade * Changing travis configuration * FIxed spelling errors * disabled typecheck * Enabled typecheck * remove binary * Deleting lib binary * adding more linters * fixed constants * fix spelling * fixed all lint issues * Revert "Changing travis configuration" This reverts commit 334afe9d05e96261b01f275aa3ada20e7f36aac4. * Merge branch 'master' of https://github.com/prysmaticlabs/prysm into update-linter * Changed from Infof to Info * Fixing commits * fixing commits with linter config * added install * Fixing * fix log statement
2019-04-26 06:24:01 +00:00
go func() {
s.removeOperations()
s.saveOperations()
<-exitRoutine
upgrading linter from gometalinter to golangci-lint (#2100) * upgrading linter from gometalinter to golangci-lint * fixed golangci-lint linting * removed linting before_script command * removed disable-all command * Fixed golang config file * fixed golang config file v2 * removed gosec issue rule * formatting * fixed travis build to run golangci-lint * Add install golangci-lint command * fixing golangci-lint script * removed https:// * Added golangci-lint cmd script * added go get for local lint install * created a before_script * add install before script * Added get script * added go mod download * removed go mod downloads * changed * removed before script * Added before script go get lint * added exit zero to see what went wrong * removed golang run script * removed before script * change lint command * verbose output * removed verbose * change linter enable and disable configuration * Update .golangci.yml Removed gotype as a linter * Update .golangci.yml Added typecheck linter * Update .golangci.yml Added fixed lint version * Update .golangci.yml Added gotype * Update .golangci.yml Added typecheck * removed env:lint * Added env lint * fixing lint upgrade * Changing travis configuration * FIxed spelling errors * disabled typecheck * Enabled typecheck * remove binary * Deleting lib binary * adding more linters * fixed constants * fix spelling * fixed all lint issues * Revert "Changing travis configuration" This reverts commit 334afe9d05e96261b01f275aa3ada20e7f36aac4. * Merge branch 'master' of https://github.com/prysmaticlabs/prysm into update-linter * Changed from Infof to Info * Fixing commits * fixing commits with linter config * added install * Fixing * fix log statement
2019-04-26 06:24:01 +00:00
}()
s.cancel()
exitRoutine <- true
testutil.AssertLogsContain(t, hook, "operations service context closed, exiting remove goroutine")
testutil.AssertLogsContain(t, hook, "operations service context closed, exiting save goroutine")
}
func TestIncomingExits_Ok(t *testing.T) {
hook := logTest.NewGlobal()
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
service := NewOpsPoolService(context.Background(), &Config{BeaconDB: beaconDB})
exit := &pb.VoluntaryExit{Epoch: 100}
Allow 8 Validator Multinode Cluster to Run Indefinitely (#2050) * plug forkchoice to blockchain service's block processing * fixed tests * more fixes... * clean ups * fixed test * Update beacon-chain/blockchain/block_processing.go * merged with 2006 and started fixing tests * remove prints * fixed tests * lint * include ops service * if there's a skip slot, slot-- * fixed typo * started working on test * no fork choice in propose * bleh, need to fix state generator first * state gen takes input slot * feedback * fixed tests * preston's feedback * fmt * removed extra logging * add more logs * fixed validator attest * builds * fixed save block * children fix * removed verbose logs * fix fork choice * right logs * Add Prometheus Counter for Reorg (#2051) * fetch every slot (#2052) * test Fixes * lint * only regenerate state if there was a reorg * better logging * fixed seed * better logging * process skip slots in assignment requests * fix lint * disable state root computation * filter attestations in regular sync * log important items * better info logs * added spans to stategen * span in stategen * set validator deadline * randao stuff * disable sig verify * lint * lint * save only using historical states * use new goroutine for handling sync messages * change default buffer sizes * better p2p * rem some useless logs * lint * sync tests complete * complete tests * tests fixed * lint * fix flakey att service * PR feedback * undo k8s changes * Update beacon-chain/blockchain/block_processing.go * Update beacon-chain/sync/regular_sync.go * Add feature flag to enable compute state root * add comment * gazelle lint fix
2019-03-25 15:21:21 +00:00
if err := service.HandleValidatorExits(context.Background(), exit); err != nil {
t.Error(err)
}
want := fmt.Sprintf("Exit request saved in DB")
testutil.AssertLogsContain(t, hook, want)
}
func TestIncomingAttestation_OK(t *testing.T) {
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
broadcaster := &mockBroadcaster{}
service := NewOpsPoolService(context.Background(), &Config{
BeaconDB: beaconDB,
P2P: broadcaster,
})
attestation := &pb.Attestation{
AggregationBitfield: []byte{'A'},
Data: &pb.AttestationData{
Slot: 100,
}}
Allow 8 Validator Multinode Cluster to Run Indefinitely (#2050) * plug forkchoice to blockchain service's block processing * fixed tests * more fixes... * clean ups * fixed test * Update beacon-chain/blockchain/block_processing.go * merged with 2006 and started fixing tests * remove prints * fixed tests * lint * include ops service * if there's a skip slot, slot-- * fixed typo * started working on test * no fork choice in propose * bleh, need to fix state generator first * state gen takes input slot * feedback * fixed tests * preston's feedback * fmt * removed extra logging * add more logs * fixed validator attest * builds * fixed save block * children fix * removed verbose logs * fix fork choice * right logs * Add Prometheus Counter for Reorg (#2051) * fetch every slot (#2052) * test Fixes * lint * only regenerate state if there was a reorg * better logging * fixed seed * better logging * process skip slots in assignment requests * fix lint * disable state root computation * filter attestations in regular sync * log important items * better info logs * added spans to stategen * span in stategen * set validator deadline * randao stuff * disable sig verify * lint * lint * save only using historical states * use new goroutine for handling sync messages * change default buffer sizes * better p2p * rem some useless logs * lint * sync tests complete * complete tests * tests fixed * lint * fix flakey att service * PR feedback * undo k8s changes * Update beacon-chain/blockchain/block_processing.go * Update beacon-chain/sync/regular_sync.go * Add feature flag to enable compute state root * add comment * gazelle lint fix
2019-03-25 15:21:21 +00:00
if err := service.HandleAttestations(context.Background(), attestation); err != nil {
t.Error(err)
}
if !broadcaster.broadcastCalled {
t.Error("Attestation was not broadcasted")
}
}
func TestRetrieveAttestations_OK(t *testing.T) {
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
service := NewOpsPoolService(context.Background(), &Config{BeaconDB: beaconDB})
// Save 140 attestations for test. During 1st retrieval we should get slot:1 - slot:61 attestations.
// The 1st retrieval is set at slot 64.
origAttestations := make([]*pb.Attestation, 140)
for i := 0; i < len(origAttestations); i++ {
origAttestations[i] = &pb.Attestation{
Data: &pb.AttestationData{
Slot: params.BeaconConfig().GenesisSlot + uint64(i),
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:],
},
}
if err := service.beaconDB.SaveAttestation(context.Background(), origAttestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
if err := beaconDB.SaveState(context.Background(), &pb.BeaconState{
Slot: params.BeaconConfig().GenesisSlot + 64,
LatestCrosslinks: []*pb.Crosslink{{
Epoch: params.BeaconConfig().GenesisEpoch,
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:]}}}); err != nil {
t.Fatal(err)
}
// Test we can retrieve attestations from slot1 - slot61.
attestations, err := service.PendingAttestations(context.Background())
if err != nil {
t.Fatalf("Could not retrieve attestations: %v", err)
}
if !reflect.DeepEqual(attestations, origAttestations[1:128]) {
t.Error("Retrieved attestations did not match")
}
}
func TestRetrieveAttestations_PruneInvalidAtts(t *testing.T) {
beaconDB := internal.SetupDB(t)
defer internal.TeardownDB(t, beaconDB)
service := NewOpsPoolService(context.Background(), &Config{BeaconDB: beaconDB})
// Save 140 attestations for slots 0 to 139.
origAttestations := make([]*pb.Attestation, 140)
for i := 0; i < len(origAttestations); i++ {
origAttestations[i] = &pb.Attestation{
Data: &pb.AttestationData{
Slot: params.BeaconConfig().GenesisSlot + uint64(i),
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:],
},
}
if err := service.beaconDB.SaveAttestation(context.Background(), origAttestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
// At slot 200 only attestations up to from slot 137 to 139 are valid attestations.
if err := beaconDB.SaveState(context.Background(), &pb.BeaconState{
Slot: params.BeaconConfig().GenesisSlot + 200,
LatestCrosslinks: []*pb.Crosslink{{
Epoch: params.BeaconConfig().GenesisEpoch + 2,
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:]}}}); err != nil {
t.Fatal(err)
}
attestations, err := service.PendingAttestations(context.Background())
if err != nil {
t.Fatalf("Could not retrieve attestations: %v", err)
}
if !reflect.DeepEqual(attestations, origAttestations[137:]) {
t.Error("Incorrect pruned attestations")
}
// Verify the invalid attestations are deleted.
hash, err := hashutil.HashProto(origAttestations[136])
if err != nil {
t.Fatal(err)
}
if service.beaconDB.HasAttestation(hash) {
t.Error("Invalid attestation is not deleted")
}
}
func TestRemoveProcessedAttestations_Ok(t *testing.T) {
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
attestations := make([]*pb.Attestation, 10)
for i := 0; i < len(attestations); i++ {
attestations[i] = &pb.Attestation{
Data: &pb.AttestationData{
Slot: params.BeaconConfig().GenesisSlot + uint64(i),
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:],
},
}
if err := s.beaconDB.SaveAttestation(context.Background(), attestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
if err := db.SaveState(context.Background(), &pb.BeaconState{
Slot: params.BeaconConfig().GenesisSlot + 15,
LatestCrosslinks: []*pb.Crosslink{{
Epoch: params.BeaconConfig().GenesisEpoch,
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:]}}}); err != nil {
t.Fatal(err)
}
retrievedAtts, err := s.PendingAttestations(context.Background())
if err != nil {
t.Fatalf("Could not retrieve attestations: %v", err)
}
if !reflect.DeepEqual(attestations, retrievedAtts) {
t.Error("Retrieved attestations did not match prev generated attestations")
}
if err := s.removePendingAttestations(attestations); err != nil {
t.Fatalf("Could not remove pending attestations: %v", err)
}
retrievedAtts, _ = s.PendingAttestations(context.Background())
if len(retrievedAtts) != 0 {
t.Errorf("Attestation pool should be empty but got a length of %d", len(retrievedAtts))
}
}
func TestCleanUpAttestations_OlderThanOneEpoch(t *testing.T) {
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
// Construct attestations for slot 0..99.
slot := uint64(99)
attestations := make([]*pb.Attestation, slot+1)
for i := 0; i < len(attestations); i++ {
attestations[i] = &pb.Attestation{
Data: &pb.AttestationData{
Slot: params.BeaconConfig().GenesisSlot + uint64(i),
Shard: uint64(i),
},
}
if err := s.beaconDB.SaveAttestation(context.Background(), attestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
// Assume current slot is 99. All the attestations before (99 - 64) should get removed.
if err := s.removeEpochOldAttestations(params.BeaconConfig().GenesisSlot + slot); err != nil {
t.Fatalf("Could not remove old attestations: %v", err)
}
attestations, err := s.beaconDB.Attestations()
if err != nil {
t.Fatalf("Could not retrieve attestations: %v", err)
}
for _, a := range attestations {
if a.Data.Slot < slot-params.BeaconConfig().SlotsPerEpoch {
t.Errorf("Attestation slot %d can't be lower than %d",
a.Data.Slot, slot-params.BeaconConfig().SlotsPerEpoch)
}
}
}
func TestReceiveBlkRemoveOps_Ok(t *testing.T) {
db := internal.SetupDB(t)
defer internal.TeardownDB(t, db)
s := NewOpsPoolService(context.Background(), &Config{BeaconDB: db})
attestations := make([]*pb.Attestation, 10)
for i := 0; i < len(attestations); i++ {
attestations[i] = &pb.Attestation{
Data: &pb.AttestationData{
Slot: params.BeaconConfig().GenesisSlot + uint64(i),
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:],
},
}
if err := s.beaconDB.SaveAttestation(context.Background(), attestations[i]); err != nil {
t.Fatalf("Failed to save attestation: %v", err)
}
}
if err := db.SaveState(context.Background(), &pb.BeaconState{
Slot: params.BeaconConfig().GenesisSlot + 15,
LatestCrosslinks: []*pb.Crosslink{{
Epoch: params.BeaconConfig().GenesisEpoch,
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:]}}}); err != nil {
t.Fatal(err)
}
atts, _ := s.PendingAttestations(context.Background())
if len(atts) != len(attestations) {
t.Errorf("Attestation pool should be %d but got a length of %d",
len(attestations), len(atts))
}
block := &pb.BeaconBlock{
Body: &pb.BeaconBlockBody{
Attestations: attestations,
},
}
s.incomingProcessedBlock <- block
Allow 8 Validator Multinode Cluster to Run Indefinitely (#2050) * plug forkchoice to blockchain service's block processing * fixed tests * more fixes... * clean ups * fixed test * Update beacon-chain/blockchain/block_processing.go * merged with 2006 and started fixing tests * remove prints * fixed tests * lint * include ops service * if there's a skip slot, slot-- * fixed typo * started working on test * no fork choice in propose * bleh, need to fix state generator first * state gen takes input slot * feedback * fixed tests * preston's feedback * fmt * removed extra logging * add more logs * fixed validator attest * builds * fixed save block * children fix * removed verbose logs * fix fork choice * right logs * Add Prometheus Counter for Reorg (#2051) * fetch every slot (#2052) * test Fixes * lint * only regenerate state if there was a reorg * better logging * fixed seed * better logging * process skip slots in assignment requests * fix lint * disable state root computation * filter attestations in regular sync * log important items * better info logs * added spans to stategen * span in stategen * set validator deadline * randao stuff * disable sig verify * lint * lint * save only using historical states * use new goroutine for handling sync messages * change default buffer sizes * better p2p * rem some useless logs * lint * sync tests complete * complete tests * tests fixed * lint * fix flakey att service * PR feedback * undo k8s changes * Update beacon-chain/blockchain/block_processing.go * Update beacon-chain/sync/regular_sync.go * Add feature flag to enable compute state root * add comment * gazelle lint fix
2019-03-25 15:21:21 +00:00
if err := s.handleProcessedBlock(context.Background(), block); err != nil {
t.Error(err)
}
atts, _ = s.PendingAttestations(context.Background())
if len(atts) != 0 {
t.Errorf("Attestation pool should be empty but got a length of %d", len(atts))
}
}