mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2025-01-14 22:18:20 +00:00
4ab0a91e51
* Begin adding DB to validator client Begin adding ValidatorProposalHistory Implement most of proposal history Finish tests Fix marking a proposal for the first time Change proposalhistory to not using bit shifting Add pb.go Change after proto/slashing added Finally fix protos Fix most tests Fix all tests for double proposal protection Start initialiing DB in validator client Add db to validator struct Add DB to ProposeBlock Fix test errors and begin mocking Fix test formatting and pass test for validator protection! Fix merge issues Fix renames Fix tests * Fix tests * Fix first startup on DB * Fix nil check tests * Fix E2E * Fix e2e flag * Fix comments * Fix for comments * Move proposal hepers to validator/client to keep DB clean * Add clear-db flag to validator client * Fix formatting * Clear out unintended changes * Fix build issues * Fix build issues * Gazelle * Fix mock test * Remove proposal history * Add terminal confirmation to DB clearing * Add interface for validatorDB, add context to DB functions * Add force-clear-db flag * Cleanup * Update validator/node/node.go Co-Authored-By: Raul Jordan <raul@prysmaticlabs.com> * Change db to clear file, not whole folder * Fix db test * Fix teardown test Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
346 lines
12 KiB
Go
346 lines
12 KiB
Go
// Package client represents the functionality to act as a validator.
|
|
package client
|
|
|
|
import (
|
|
"context"
|
|
"encoding/binary"
|
|
"fmt"
|
|
"io"
|
|
"sync"
|
|
"time"
|
|
|
|
ptypes "github.com/gogo/protobuf/types"
|
|
"github.com/pkg/errors"
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
|
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
|
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
|
"github.com/prysmaticlabs/prysm/validator/db"
|
|
"github.com/prysmaticlabs/prysm/validator/keymanager"
|
|
"github.com/sirupsen/logrus"
|
|
"go.opencensus.io/trace"
|
|
)
|
|
|
|
type validator struct {
|
|
genesisTime uint64
|
|
ticker *slotutil.SlotTicker
|
|
db *db.Store
|
|
duties *ethpb.DutiesResponse
|
|
validatorClient ethpb.BeaconNodeValidatorClient
|
|
beaconClient ethpb.BeaconChainClient
|
|
graffiti []byte
|
|
aggregatorClient pb.AggregatorServiceClient
|
|
node ethpb.NodeClient
|
|
keyManager keymanager.KeyManager
|
|
prevBalance map[[48]byte]uint64
|
|
logValidatorBalances bool
|
|
attLogs map[[32]byte]*attSubmitted
|
|
attLogsLock sync.Mutex
|
|
pubKeyToID map[[48]byte]uint64
|
|
pubKeyToIDLock sync.RWMutex
|
|
}
|
|
|
|
// Done cleans up the validator.
|
|
func (v *validator) Done() {
|
|
v.ticker.Done()
|
|
}
|
|
|
|
// WaitForChainStart checks whether the beacon node has started its runtime. That is,
|
|
// it calls to the beacon node which then verifies the ETH1.0 deposit contract logs to check
|
|
// for the ChainStart log to have been emitted. If so, it starts a ticker based on the ChainStart
|
|
// unix timestamp which will be used to keep track of time within the validator client.
|
|
func (v *validator) WaitForChainStart(ctx context.Context) error {
|
|
ctx, span := trace.StartSpan(ctx, "validator.WaitForChainStart")
|
|
defer span.End()
|
|
// First, check if the beacon chain has started.
|
|
stream, err := v.validatorClient.WaitForChainStart(ctx, &ptypes.Empty{})
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not setup beacon chain ChainStart streaming client")
|
|
}
|
|
for {
|
|
log.Info("Waiting for beacon chain start log from the ETH 1.0 deposit contract")
|
|
chainStartRes, err := stream.Recv()
|
|
// If the stream is closed, we stop the loop.
|
|
if err == io.EOF {
|
|
break
|
|
}
|
|
// If context is canceled we stop the loop.
|
|
if ctx.Err() == context.Canceled {
|
|
return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop")
|
|
}
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not receive ChainStart from stream")
|
|
}
|
|
v.genesisTime = chainStartRes.GenesisTime
|
|
break
|
|
}
|
|
// Once the ChainStart log is received, we update the genesis time of the validator client
|
|
// and begin a slot ticker used to track the current slot the beacon node is in.
|
|
v.ticker = slotutil.GetSlotTicker(time.Unix(int64(v.genesisTime), 0), params.BeaconConfig().SecondsPerSlot)
|
|
log.WithField("genesisTime", time.Unix(int64(v.genesisTime), 0)).Info("Beacon chain started")
|
|
return nil
|
|
}
|
|
|
|
// WaitForActivation checks whether the validator pubkey is in the active
|
|
// validator set. If not, this operation will block until an activation message is
|
|
// received.
|
|
func (v *validator) WaitForActivation(ctx context.Context) error {
|
|
ctx, span := trace.StartSpan(ctx, "validator.WaitForActivation")
|
|
defer span.End()
|
|
validatingKeys, err := v.keyManager.FetchValidatingKeys()
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not fetch validating keys")
|
|
}
|
|
req := ðpb.ValidatorActivationRequest{
|
|
PublicKeys: bytesutil.FromBytes48Array(validatingKeys),
|
|
}
|
|
stream, err := v.validatorClient.WaitForActivation(ctx, req)
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not setup validator WaitForActivation streaming client")
|
|
}
|
|
var validatorActivatedRecords [][]byte
|
|
for {
|
|
res, err := stream.Recv()
|
|
// If the stream is closed, we stop the loop.
|
|
if err == io.EOF {
|
|
break
|
|
}
|
|
// If context is canceled we stop the loop.
|
|
if ctx.Err() == context.Canceled {
|
|
return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop")
|
|
}
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not receive validator activation from stream")
|
|
}
|
|
log.Info("Waiting for validator to be activated in the beacon chain")
|
|
activatedKeys := v.checkAndLogValidatorStatus(res.Statuses)
|
|
|
|
if len(activatedKeys) > 0 {
|
|
validatorActivatedRecords = activatedKeys
|
|
break
|
|
}
|
|
}
|
|
for _, pubKey := range validatorActivatedRecords {
|
|
log.WithField("pubKey", fmt.Sprintf("%#x", bytesutil.Trunc(pubKey[:]))).Info("Validator activated")
|
|
}
|
|
v.ticker = slotutil.GetSlotTicker(time.Unix(int64(v.genesisTime), 0), params.BeaconConfig().SecondsPerSlot)
|
|
|
|
return nil
|
|
}
|
|
|
|
// WaitForSync checks whether the beacon node has sync to the latest head
|
|
func (v *validator) WaitForSync(ctx context.Context) error {
|
|
ctx, span := trace.StartSpan(ctx, "validator.WaitForSync")
|
|
defer span.End()
|
|
|
|
s, err := v.node.GetSyncStatus(ctx, &ptypes.Empty{})
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not get sync status")
|
|
}
|
|
if !s.Syncing {
|
|
return nil
|
|
}
|
|
|
|
for {
|
|
select {
|
|
// Poll every half slot
|
|
case <-time.After(time.Duration(params.BeaconConfig().SlotsPerEpoch/2) * time.Second):
|
|
s, err := v.node.GetSyncStatus(ctx, &ptypes.Empty{})
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not get sync status")
|
|
}
|
|
if !s.Syncing {
|
|
return nil
|
|
}
|
|
log.Info("Waiting for beacon node to sync to latest chain head")
|
|
case <-ctx.Done():
|
|
return errors.New("context has been canceled, exiting goroutine")
|
|
}
|
|
}
|
|
}
|
|
|
|
func (v *validator) checkAndLogValidatorStatus(validatorStatuses []*ethpb.ValidatorActivationResponse_Status) [][]byte {
|
|
var activatedKeys [][]byte
|
|
for _, status := range validatorStatuses {
|
|
log := log.WithFields(logrus.Fields{
|
|
"pubKey": fmt.Sprintf("%#x", bytesutil.Trunc(status.PublicKey[:])),
|
|
"status": status.Status.Status.String(),
|
|
})
|
|
if status.Status.Status == ethpb.ValidatorStatus_ACTIVE {
|
|
activatedKeys = append(activatedKeys, status.PublicKey)
|
|
continue
|
|
}
|
|
if status.Status.Status == ethpb.ValidatorStatus_EXITED {
|
|
log.Info("Validator exited")
|
|
continue
|
|
}
|
|
if status.Status.Status == ethpb.ValidatorStatus_DEPOSIT_RECEIVED {
|
|
log.WithField("expectedInclusionSlot", status.Status.DepositInclusionSlot).Info(
|
|
"Deposit for validator received but not processed into state")
|
|
continue
|
|
}
|
|
if uint64(status.Status.ActivationEpoch) == params.BeaconConfig().FarFutureEpoch {
|
|
log.WithFields(logrus.Fields{
|
|
"depositInclusionSlot": status.Status.DepositInclusionSlot,
|
|
"positionInActivationQueue": status.Status.PositionInActivationQueue,
|
|
}).Info("Waiting to be activated")
|
|
continue
|
|
}
|
|
log.WithFields(logrus.Fields{
|
|
"depositInclusionSlot": status.Status.DepositInclusionSlot,
|
|
"activationEpoch": status.Status.ActivationEpoch,
|
|
"positionInActivationQueue": status.Status.PositionInActivationQueue,
|
|
}).Info("Validator status")
|
|
}
|
|
return activatedKeys
|
|
}
|
|
|
|
// CanonicalHeadSlot returns the slot of canonical block currently found in the
|
|
// beacon chain via RPC.
|
|
func (v *validator) CanonicalHeadSlot(ctx context.Context) (uint64, error) {
|
|
ctx, span := trace.StartSpan(ctx, "validator.CanonicalHeadSlot")
|
|
defer span.End()
|
|
head, err := v.beaconClient.GetChainHead(ctx, &ptypes.Empty{})
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
return head.HeadSlot, nil
|
|
}
|
|
|
|
// NextSlot emits the next slot number at the start time of that slot.
|
|
func (v *validator) NextSlot() <-chan uint64 {
|
|
return v.ticker.C()
|
|
}
|
|
|
|
// SlotDeadline is the start time of the next slot.
|
|
func (v *validator) SlotDeadline(slot uint64) time.Time {
|
|
secs := (slot + 1) * params.BeaconConfig().SecondsPerSlot
|
|
return time.Unix(int64(v.genesisTime), 0 /*ns*/).Add(time.Duration(secs) * time.Second)
|
|
}
|
|
|
|
// UpdateDuties checks the slot number to determine if the validator's
|
|
// list of upcoming assignments needs to be updated. For example, at the
|
|
// beginning of a new epoch.
|
|
func (v *validator) UpdateDuties(ctx context.Context, slot uint64) error {
|
|
if slot%params.BeaconConfig().SlotsPerEpoch != 0 && v.duties != nil {
|
|
// Do nothing if not epoch start AND assignments already exist.
|
|
return nil
|
|
}
|
|
// Set deadline to end of epoch.
|
|
ctx, cancel := context.WithDeadline(ctx, v.SlotDeadline(helpers.StartSlot(helpers.SlotToEpoch(slot)+1)))
|
|
defer cancel()
|
|
ctx, span := trace.StartSpan(ctx, "validator.UpdateAssignments")
|
|
defer span.End()
|
|
|
|
validatingKeys, err := v.keyManager.FetchValidatingKeys()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
req := ðpb.DutiesRequest{
|
|
Epoch: slot / params.BeaconConfig().SlotsPerEpoch,
|
|
PublicKeys: bytesutil.FromBytes48Array(validatingKeys),
|
|
}
|
|
|
|
resp, err := v.validatorClient.GetDuties(ctx, req)
|
|
if err != nil {
|
|
v.duties = nil // Clear assignments so we know to retry the request.
|
|
log.Error(err)
|
|
return err
|
|
}
|
|
|
|
v.duties = resp
|
|
// Only log the full assignments output on epoch start to be less verbose.
|
|
if slot%params.BeaconConfig().SlotsPerEpoch == 0 {
|
|
v.pubKeyToIDLock.Lock()
|
|
defer v.pubKeyToIDLock.Unlock()
|
|
|
|
for _, duty := range v.duties.Duties {
|
|
if _, ok := v.pubKeyToID[bytesutil.ToBytes48(duty.PublicKey)]; !ok {
|
|
// TODO(4379): Make validator index part of the assignment respond.
|
|
res, err := v.validatorClient.ValidatorIndex(ctx, ðpb.ValidatorIndexRequest{PublicKey: duty.PublicKey})
|
|
if err != nil {
|
|
log.Warnf("Validator pub key %#x does not exist in beacon node", bytesutil.Trunc(duty.PublicKey))
|
|
continue
|
|
}
|
|
v.pubKeyToID[bytesutil.ToBytes48(duty.PublicKey)] = res.Index
|
|
}
|
|
lFields := logrus.Fields{
|
|
"pubKey": fmt.Sprintf("%#x", bytesutil.Trunc(duty.PublicKey)),
|
|
"validatorIndex": v.pubKeyToID[bytesutil.ToBytes48(duty.PublicKey)],
|
|
"committeeIndex": duty.CommitteeIndex,
|
|
"epoch": slot / params.BeaconConfig().SlotsPerEpoch,
|
|
"status": duty.Status,
|
|
}
|
|
|
|
if duty.Status == ethpb.ValidatorStatus_ACTIVE {
|
|
if duty.ProposerSlot > 0 {
|
|
lFields["proposerSlot"] = duty.ProposerSlot
|
|
}
|
|
lFields["attesterSlot"] = duty.AttesterSlot
|
|
}
|
|
|
|
log.WithFields(lFields).Info("New assignment")
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// RolesAt slot returns the validator roles at the given slot. Returns nil if the
|
|
// validator is known to not have a roles at the at slot. Returns UNKNOWN if the
|
|
// validator assignments are unknown. Otherwise returns a valid ValidatorRole map.
|
|
func (v *validator) RolesAt(ctx context.Context, slot uint64) (map[[48]byte][]pb.ValidatorRole, error) {
|
|
rolesAt := make(map[[48]byte][]pb.ValidatorRole)
|
|
for _, duty := range v.duties.Duties {
|
|
var roles []pb.ValidatorRole
|
|
|
|
if duty == nil {
|
|
continue
|
|
}
|
|
if duty.ProposerSlot == slot {
|
|
roles = append(roles, pb.ValidatorRole_PROPOSER)
|
|
}
|
|
if duty.AttesterSlot == slot {
|
|
roles = append(roles, pb.ValidatorRole_ATTESTER)
|
|
|
|
aggregator, err := v.isAggregator(ctx, duty.Committee, slot, bytesutil.ToBytes48(duty.PublicKey))
|
|
if err != nil {
|
|
return nil, errors.Wrap(err, "could not check if a validator is an aggregator")
|
|
}
|
|
if aggregator {
|
|
roles = append(roles, pb.ValidatorRole_AGGREGATOR)
|
|
}
|
|
|
|
}
|
|
if len(roles) == 0 {
|
|
roles = append(roles, pb.ValidatorRole_UNKNOWN)
|
|
}
|
|
|
|
var pubKey [48]byte
|
|
copy(pubKey[:], duty.PublicKey)
|
|
rolesAt[pubKey] = roles
|
|
}
|
|
return rolesAt, nil
|
|
}
|
|
|
|
// isAggregator checks if a validator is an aggregator of a given slot, it uses the selection algorithm outlined in:
|
|
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.0/specs/validator/0_beacon-chain-validator.md#aggregation-selection
|
|
func (v *validator) isAggregator(ctx context.Context, committee []uint64, slot uint64, pubKey [48]byte) (bool, error) {
|
|
modulo := uint64(1)
|
|
if len(committee)/int(params.BeaconConfig().TargetAggregatorsPerCommittee) > 1 {
|
|
modulo = uint64(len(committee)) / params.BeaconConfig().TargetAggregatorsPerCommittee
|
|
}
|
|
|
|
slotSig, err := v.signSlot(ctx, pubKey, slot)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
b := hashutil.Hash(slotSig)
|
|
|
|
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
|
|
}
|