mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-25 04:47:18 +00:00
39c33b82ad
* quick lazy balance cache proof of concept * WIP refactoring to use lazy cache * updating tests to use functional opts * updating the rest of the tests, all passing * use mock stategen where possible reduces the number of test cases that require db setup * rename test opt method for clear link * Update beacon-chain/blockchain/process_block.go Co-authored-by: terence tsao <terence@prysmaticlabs.com> * test assumption that zerohash is in db * remove unused MockDB (mocking stategen instead) * fix cache bug, switch to sync.Mutex * improve test coverage for the state cache * uncomment failing genesis test for discussion * gofmt * remove unused Service struct member * cleanup unused func input * combining type declaration in signature * don't export the state cache constructor * work around blockchain deps w/ new file service_test brings in a ton of dependencies that make bazel rules for blockchain complex, so just sticking these mocks in their own file simplifies things. * gofmt * remove intentionally failing test this test established that the zero root can't be used to look up the state, resulting in a change in another PR to update stategen to use the GenesisState db method instead when the zero root is detected. * fixed error introduced by develop refresh * fix import ordering * appease deepsource * remove unused function * godoc comments on new requires/assert * defensive constructor per terence's PR comment * more differentiated balance cache metric names Co-authored-by: kasey <kasey@users.noreply.github.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com> Co-authored-by: Raul Jordan <raul@prysmaticlabs.com> Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
184 lines
6.6 KiB
Go
184 lines
6.6 KiB
Go
package blockchain
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"fmt"
|
|
"time"
|
|
|
|
"github.com/pkg/errors"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
"github.com/prysmaticlabs/prysm/config/params"
|
|
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
|
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
|
"github.com/prysmaticlabs/prysm/time/slots"
|
|
"github.com/sirupsen/logrus"
|
|
"go.opencensus.io/trace"
|
|
)
|
|
|
|
// AttestationStateFetcher allows for retrieving a beacon state corresponding to the block
|
|
// root of an attestation's target checkpoint.
|
|
type AttestationStateFetcher interface {
|
|
AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error)
|
|
}
|
|
|
|
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
|
type AttestationReceiver interface {
|
|
AttestationStateFetcher
|
|
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
|
|
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
|
|
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
|
|
}
|
|
|
|
// ReceiveAttestationNoPubsub is a function that defines the operations that are performed on
|
|
// attestation that is received from regular sync. The operations consist of:
|
|
// 1. Validate attestation, update validator's latest vote
|
|
// 2. Apply fork choice to the processed attestation
|
|
// 3. Save latest head info
|
|
func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error {
|
|
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestationNoPubsub")
|
|
defer span.End()
|
|
|
|
if err := s.onAttestation(ctx, att); err != nil {
|
|
return errors.Wrap(err, "could not process attestation")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// AttestationTargetState returns the pre state of attestation.
|
|
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error) {
|
|
ss, err := slots.EpochStart(target.Epoch)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if err := slots.ValidateClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
|
return nil, err
|
|
}
|
|
return s.getAttPreState(ctx, target)
|
|
}
|
|
|
|
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
|
|
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
|
|
targetSlot, err := slots.EpochStart(a.Data.Target.Epoch)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
r, err := s.ancestor(ctx, a.Data.BeaconBlockRoot, targetSlot)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !bytes.Equal(a.Data.Target.Root, r) {
|
|
return errors.New("FFG and LMD votes are not consistent")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// VerifyFinalizedConsistency verifies input root is consistent with finalized store.
|
|
// When the input root is not be consistent with finalized store then we know it is not
|
|
// on the finalized check point that leads to current canonical chain and should be rejected accordingly.
|
|
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
|
|
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
|
|
// In this case, we could exit early to save on additional computation.
|
|
if s.cfg.ForkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
|
|
return nil
|
|
}
|
|
|
|
f := s.FinalizedCheckpt()
|
|
ss, err := slots.EpochStart(f.Epoch)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
r, err := s.ancestor(ctx, root, ss)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !bytes.Equal(f.Root, r) {
|
|
return errors.New("Root and finalized store are not consistent")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
|
func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- struct{}) {
|
|
// Wait for state to be initialized.
|
|
stateChannel := make(chan *feed.Event, 1)
|
|
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
|
subscribedToStateEvents <- struct{}{}
|
|
<-stateChannel
|
|
stateSub.Unsubscribe()
|
|
|
|
if s.genesisTime.IsZero() {
|
|
log.Warn("ProcessAttestations routine waiting for genesis time")
|
|
for s.genesisTime.IsZero() {
|
|
time.Sleep(1 * time.Second)
|
|
}
|
|
log.Warn("Genesis time received, now available to process attestations")
|
|
}
|
|
|
|
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
|
for {
|
|
select {
|
|
case <-s.ctx.Done():
|
|
return
|
|
case <-st.C():
|
|
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
|
// This covers the condition when the node is still initial syncing to the head of the chain.
|
|
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
|
continue
|
|
}
|
|
s.processAttestations(s.ctx)
|
|
|
|
balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
|
if err != nil {
|
|
log.Errorf("Unable to get justified balances for root %v w/ error %s", s.justifiedCheckpt.Root, err)
|
|
continue
|
|
}
|
|
if err := s.updateHead(s.ctx, balances); err != nil {
|
|
log.Warnf("Resolving fork due to new attestation: %v", err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
|
func (s *Service) processAttestations(ctx context.Context) {
|
|
atts := s.cfg.AttPool.ForkchoiceAttestations()
|
|
for _, a := range atts {
|
|
// Based on the spec, don't process the attestation until the subsequent slot.
|
|
// This delays consideration in the fork choice until their slot is in the past.
|
|
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
|
nextSlot := a.Data.Slot + 1
|
|
if err := slots.VerifyTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
|
continue
|
|
}
|
|
|
|
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
|
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
|
if !(hasState && hasBlock) {
|
|
continue
|
|
}
|
|
|
|
if err := s.cfg.AttPool.DeleteForkchoiceAttestation(a); err != nil {
|
|
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
|
}
|
|
|
|
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
|
|
continue
|
|
}
|
|
|
|
if err := s.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
|
log.WithFields(logrus.Fields{
|
|
"slot": a.Data.Slot,
|
|
"committeeIndex": a.Data.CommitteeIndex,
|
|
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
|
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
|
|
"aggregationCount": a.AggregationBits.Count(),
|
|
}).WithError(err).Warn("Could not process attestation for fork choice")
|
|
}
|
|
}
|
|
}
|