prysm-pulse/slasher/beaconclient/receivers.go
Ivan Martinez 2f10b1c7b1
Change gogoproto compiler to protoc-gen-go-cast (#8697)
* Remove gogoproto compiler

* Remove more gogoproto

* Improvements

* Fix gengo

* More scripts

* Gazelle, fix deps

* Fix version and errors

* Fix gocast for arrays

* Fix ethapis

* Fixes

* Fix compile errors

* fix go.mod

* //proto/... builds

* Update for protov2

* temp fix compilation to move on

* Change everything to emptypb.empty

* Add grpc to proto/slashings

* Fix almost all build failures

* Oher build problems

* FIX THIS FUCKING THING

* gaz literally every .bazel

* Final touches

* Final final touches

* Fix proto

* Begin moving proto.Marshal to native

* Fix site_data

* Fixes

* Fix duplicate gateway

* Fix gateway target

* Fix ethapis

* Fixes from review

* Update

* Fix

* Fix status test

* Fix fuzz

* Add isprotoslice to fun

* Change DeepEqual to DeepSSZEqual for proto arrays

* Fix build

* Fix gaz

* Update go

* Fixes

* Fixes

* Add case for nil validators after copy

* Fix cast

* Fix test

* Fix imports

* Go mod

* Only use extension where needed

* Fixes

* Split gateway from gengo

* gaz

* go mod

* Add back hydrated state

* fix hydrate

* Fix proto.clone

* Fies

* Revert "Split gateway from gengo"

This reverts commit 7298bb2054d446e427d9af97e13b8fabe8695085.

* Revert "gaz"

This reverts commit ca952565701a88727e22302d6c8d60ac48d97255.

* Merge all gateway into one target

* go mod

* Gaz

* Add generate v1_gateway files

* run pb again

* goimports

* gaz

* Fix comments

* Fix protos

* Fix PR

* Fix protos

* Update grpc-gateway and ethapis

* Update ethapis and gen-go-cast

* Go tidy

* Reorder

* Fix ethapis

* fix spec tests

* Fix script

* Remove unused import

* Fix fuzz

* Fix gomod

* Update version

* Error if the cloned result is nil

* Handle optional slots

* ADd more empty checks to clone

* Undo fuzz changes

* Fix build.bazel

* Gaz

* Redo fuzz changes

* Undo some eth1data changes

* Update go.mod

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Undo clone beacon state

* Remove gogo proto more and unused v1_gateway

* Add manual fix for nil vals

* Fix gaz

* tidy

* Tidy again

* Add detailed error

* Revert "Add detailed error"

This reverts commit 59bc053dcd59569a54c95b07739d5a379665ec5d.

* Undo varint changes

* Fix nil validators in deposit test

* Commit

* Undo

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2021-05-17 18:32:04 +00:00

217 lines
6.8 KiB
Go

package beaconclient
import (
"context"
"errors"
"fmt"
"io"
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/slotutil"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
// reconnectPeriod is the frequency that we try to restart our
// streams when the beacon chain is node does not respond.
var reconnectPeriod = 5 * time.Second
// ReceiveBlocks starts a gRPC client stream listener to obtain
// blocks from the beacon node. Upon receiving a block, the service
// broadcasts it to a feed for other services in slasher to subscribe to.
func (s *Service) ReceiveBlocks(ctx context.Context) {
ctx, span := trace.StartSpan(ctx, "beaconclient.ReceiveBlocks")
defer span.End()
stream, err := s.cfg.BeaconClient.StreamBlocks(ctx, &ethpb.StreamBlocksRequest{} /* Prefers unverified block to catch slashing */)
if err != nil {
log.WithError(err).Error("Failed to retrieve blocks stream")
return
}
for {
res, err := stream.Recv()
// If the stream is closed, we stop the loop.
if errors.Is(err, io.EOF) {
break
}
// If context is canceled we stop the loop.
if ctx.Err() == context.Canceled {
log.WithError(ctx.Err()).Error("Context canceled - shutting down blocks receiver")
return
}
if err != nil {
if e, ok := status.FromError(err); ok {
switch e.Code() {
case codes.Canceled, codes.Internal, codes.Unavailable:
log.WithError(err).Infof("Trying to restart connection. rpc status: %v", e.Code())
err = s.restartBeaconConnection(ctx)
if err != nil {
log.WithError(err).Error("Could not restart beacon connection")
return
}
stream, err = s.cfg.BeaconClient.StreamBlocks(ctx, &ethpb.StreamBlocksRequest{} /* Prefers unverified block to catch slashing */)
if err != nil {
log.WithError(err).Error("Could not restart block stream")
return
}
log.Info("Block stream restarted...")
default:
log.WithError(err).Errorf("Could not receive block from beacon node. rpc status: %v", e.Code())
return
}
} else {
log.WithError(err).Error("Could not receive blocks from beacon node")
return
}
}
if res == nil {
continue
}
root, err := res.Block.HashTreeRoot()
if err != nil {
log.WithError(err).Error("Could not hash block")
return
}
log.WithFields(logrus.Fields{
"slot": res.Block.Slot,
"proposer_index": res.Block.ProposerIndex,
"root": fmt.Sprintf("%#x...", root[:8]),
}).Info("Received block from beacon node")
// We send the received block over the block feed.
s.blockFeed.Send(res)
}
}
// ReceiveAttestations starts a gRPC client stream listener to obtain
// attestations from the beacon node. Upon receiving an attestation, the service
// broadcasts it to a feed for other services in slasher to subscribe to.
func (s *Service) ReceiveAttestations(ctx context.Context) {
ctx, span := trace.StartSpan(ctx, "beaconclient.ReceiveAttestations")
defer span.End()
stream, err := s.cfg.BeaconClient.StreamIndexedAttestations(ctx, &emptypb.Empty{})
if err != nil {
log.WithError(err).Error("Failed to retrieve attestations stream")
return
}
go s.collectReceivedAttestations(ctx)
for {
res, err := stream.Recv()
// If the stream is closed, we stop the loop.
if errors.Is(err, io.EOF) {
log.Info("Attestation stream closed")
break
}
// If context is canceled we stop the loop.
if ctx.Err() == context.Canceled {
log.WithError(ctx.Err()).Error("Context canceled - shutting down attestations receiver")
return
}
if err != nil {
if e, ok := status.FromError(err); ok {
switch e.Code() {
case codes.Canceled, codes.Internal, codes.Unavailable:
log.WithError(err).Infof("Trying to restart connection. rpc status: %v", e.Code())
err = s.restartBeaconConnection(ctx)
if err != nil {
log.WithError(err).Error("Could not restart beacon connection")
return
}
stream, err = s.cfg.BeaconClient.StreamIndexedAttestations(ctx, &emptypb.Empty{})
if err != nil {
log.WithError(err).Error("Could not restart attestation stream")
return
}
log.Info("Attestation stream restarted...")
default:
log.WithError(err).Errorf("Could not receive attestations from beacon node. rpc status: %v", e.Code())
return
}
} else {
log.WithError(err).Error("Could not receive attestations from beacon node")
return
}
}
if res == nil {
continue
}
s.receivedAttestationsBuffer <- res
}
}
func (s *Service) collectReceivedAttestations(ctx context.Context) {
ctx, span := trace.StartSpan(ctx, "beaconclient.collectReceivedAttestations")
defer span.End()
var atts []*ethpb.IndexedAttestation
halfSlot := slotutil.DivideSlotBy(2 /* 1/2 slot duration */)
ticker := time.NewTicker(halfSlot)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if len(atts) > 0 {
s.collectedAttestationsBuffer <- atts
atts = []*ethpb.IndexedAttestation{}
}
case att := <-s.receivedAttestationsBuffer:
atts = append(atts, att)
case collectedAtts := <-s.collectedAttestationsBuffer:
if err := s.cfg.SlasherDB.SaveIndexedAttestations(ctx, collectedAtts); err != nil {
log.WithError(err).Error("Could not save indexed attestation")
continue
}
log.WithFields(logrus.Fields{
"amountSaved": len(collectedAtts),
"slot": collectedAtts[0].Data.Slot,
}).Info("Attestations saved to slasher DB")
slasherNumAttestationsReceived.Add(float64(len(collectedAtts)))
// After saving, we send the received attestation over the attestation feed.
for _, att := range collectedAtts {
log.WithFields(logrus.Fields{
"slot": att.Data.Slot,
"indices": att.AttestingIndices,
}).Debug("Sending attestation to detection service")
s.attestationFeed.Send(att)
}
case <-ctx.Done():
return
}
}
}
func (s *Service) restartBeaconConnection(ctx context.Context) error {
ticker := time.NewTicker(reconnectPeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if s.conn.GetState() == connectivity.TransientFailure || s.conn.GetState() == connectivity.Idle {
log.Debugf("Connection status %v", s.conn.GetState())
log.Info("Beacon node is still down")
continue
}
s, err := s.cfg.NodeClient.GetSyncStatus(ctx, &emptypb.Empty{})
if err != nil {
log.WithError(err).Error("Could not fetch sync status")
continue
}
if s == nil || s.Syncing {
log.Info("Waiting for beacon node to be fully synced...")
continue
}
log.Info("Beacon node is fully synced")
return nil
case <-ctx.Done():
log.Debug("Context closed, exiting reconnect routine")
return errors.New("context closed, no longer attempting to restart stream")
}
}
}