mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-26 05:17:22 +00:00
42b7a37281
* Add spans for AncestorRoot * Add span for ancestor by DB Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
433 lines
15 KiB
Go
433 lines
15 KiB
Go
package blockchain
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"fmt"
|
|
|
|
"github.com/pkg/errors"
|
|
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
|
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
|
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
|
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
|
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
|
"github.com/prysmaticlabs/prysm/shared/params"
|
|
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
|
"go.opencensus.io/trace"
|
|
)
|
|
|
|
// CurrentSlot returns the current slot based on time.
|
|
func (s *Service) CurrentSlot() uint64 {
|
|
return helpers.CurrentSlot(uint64(s.genesisTime.Unix()))
|
|
}
|
|
|
|
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
|
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
|
// is in the correct time window.
|
|
func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
|
|
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
|
|
defer span.End()
|
|
|
|
// Verify incoming block has a valid pre state.
|
|
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
preState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
|
if err != nil {
|
|
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
|
}
|
|
if preState == nil {
|
|
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot)
|
|
}
|
|
|
|
// Verify block slot time is not from the future.
|
|
if err := helpers.VerifySlotTime(preState.GenesisTime(), b.Slot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Verify block is later than the finalized epoch slot.
|
|
if err := s.verifyBlkFinalizedSlot(b); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return preState, nil
|
|
}
|
|
|
|
// verifyBlkPreState validates input block has a valid pre-state.
|
|
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) error {
|
|
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
|
|
defer span.End()
|
|
|
|
parentRoot := bytesutil.ToBytes32(b.ParentRoot)
|
|
// Loosen the check to HasBlock because state summary gets saved in batches
|
|
// during initial syncing. There's no risk given a state summary object is just a
|
|
// a subset of the block object.
|
|
if !s.stateGen.StateSummaryExists(ctx, parentRoot) && !s.beaconDB.HasBlock(ctx, parentRoot) {
|
|
return errors.New("could not reconstruct parent state")
|
|
}
|
|
|
|
if err := s.VerifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
|
return err
|
|
}
|
|
|
|
has, err := s.stateGen.HasState(ctx, parentRoot)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !has {
|
|
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
|
return errors.Wrap(err, "could not save initial sync blocks")
|
|
}
|
|
s.clearInitSyncBlocks()
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// VerifyBlkDescendant validates input block root is a descendant of the
|
|
// current finalized block root.
|
|
func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
|
|
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyBlkDescendant")
|
|
defer span.End()
|
|
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
|
finalizedBlkSigned, err := s.beaconDB.Block(ctx, fRoot)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
|
|
return errors.New("nil finalized block")
|
|
}
|
|
finalizedBlk := finalizedBlkSigned.Block
|
|
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not get finalized block root")
|
|
}
|
|
if bFinalizedRoot == nil {
|
|
return fmt.Errorf("no finalized block known for block %#x", bytesutil.Trunc(root[:]))
|
|
}
|
|
|
|
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
|
|
err := fmt.Errorf("block %#x is not a descendent of the current finalized block slot %d, %#x != %#x",
|
|
bytesutil.Trunc(root[:]), finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot),
|
|
bytesutil.Trunc(fRoot[:]))
|
|
traceutil.AnnotateError(span, err)
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
|
// to current finalized slot.
|
|
func (s *Service) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
|
|
finalizedSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if finalizedSlot >= b.Slot {
|
|
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot, finalizedSlot)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
|
|
// checkpoints in the fork choice if in the early slots of the epoch.
|
|
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
|
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
|
func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
|
|
if helpers.SlotsSinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
|
return true, nil
|
|
}
|
|
var newJustifiedBlockSigned *ethpb.SignedBeaconBlock
|
|
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
|
var err error
|
|
if s.hasInitSyncBlock(justifiedRoot) {
|
|
newJustifiedBlockSigned = s.getInitSyncBlock(justifiedRoot)
|
|
} else {
|
|
newJustifiedBlockSigned, err = s.beaconDB.Block(ctx, justifiedRoot)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
}
|
|
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.Block == nil {
|
|
return false, errors.New("nil new justified block")
|
|
}
|
|
|
|
newJustifiedBlock := newJustifiedBlockSigned.Block
|
|
jSlot, err := helpers.StartSlot(s.justifiedCheckpt.Epoch)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
if newJustifiedBlock.Slot <= jSlot {
|
|
return false, nil
|
|
}
|
|
var justifiedBlockSigned *ethpb.SignedBeaconBlock
|
|
cachedJustifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
|
if s.hasInitSyncBlock(cachedJustifiedRoot) {
|
|
justifiedBlockSigned = s.getInitSyncBlock(cachedJustifiedRoot)
|
|
} else {
|
|
justifiedBlockSigned, err = s.beaconDB.Block(ctx, cachedJustifiedRoot)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
}
|
|
|
|
if justifiedBlockSigned == nil || justifiedBlockSigned.Block == nil {
|
|
return false, errors.New("nil justified block")
|
|
}
|
|
justifiedBlock := justifiedBlockSigned.Block
|
|
b, err := s.ancestor(ctx, justifiedRoot[:], justifiedBlock.Slot)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
if !bytes.Equal(b, s.justifiedCheckpt.Root) {
|
|
return false, nil
|
|
}
|
|
return true, nil
|
|
}
|
|
|
|
func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconState) error {
|
|
cpt := state.CurrentJustifiedCheckpoint()
|
|
if cpt.Epoch > s.bestJustifiedCheckpt.Epoch {
|
|
s.bestJustifiedCheckpt = cpt
|
|
}
|
|
canUpdate, err := s.shouldUpdateCurrentJustified(ctx, cpt)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if canUpdate {
|
|
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
|
s.justifiedCheckpt = cpt
|
|
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
|
|
}
|
|
|
|
// This caches input checkpoint as justified for the service struct. It rotates current justified to previous justified,
|
|
// caches justified checkpoint balances for fork choice and save justified checkpoint in DB.
|
|
// This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing.
|
|
func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error {
|
|
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
|
s.justifiedCheckpt = cp
|
|
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
|
return err
|
|
}
|
|
|
|
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cp)
|
|
}
|
|
|
|
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
|
// Blocks need to be saved so that we can retrieve finalized block from
|
|
// DB when migrating states.
|
|
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
|
return err
|
|
}
|
|
s.clearInitSyncBlocks()
|
|
|
|
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, cp); err != nil {
|
|
return err
|
|
}
|
|
|
|
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
|
s.finalizedCheckpt = cp
|
|
|
|
fRoot := bytesutil.ToBytes32(cp.Root)
|
|
if err := s.stateGen.MigrateToCold(ctx, fRoot); err != nil {
|
|
return errors.Wrap(err, "could not migrate to cold")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// ancestor returns the block root of an ancestry block from the input block root.
|
|
//
|
|
// Spec pseudocode definition:
|
|
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
|
// block = store.blocks[root]
|
|
// if block.slot > slot:
|
|
// return get_ancestor(store, block.parent_root, slot)
|
|
// elif block.slot == slot:
|
|
// return root
|
|
// else:
|
|
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
|
// return root
|
|
func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
|
|
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
|
|
defer span.End()
|
|
|
|
r := bytesutil.ToBytes32(root)
|
|
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
|
|
// This is most optimal outcome.
|
|
ar, err := s.ancestorByForkChoiceStore(ctx, r, slot)
|
|
if err != nil {
|
|
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
|
|
// This is the second line of defense for retrieving ancestor root.
|
|
ar, err = s.ancestorByDB(ctx, r, slot)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
return ar, nil
|
|
}
|
|
|
|
// This retrieves an ancestor root using fork choice store. The look up is looping through the a flat array structure.
|
|
func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slot uint64) ([]byte, error) {
|
|
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByForkChoiceStore")
|
|
defer span.End()
|
|
|
|
if !s.forkChoiceStore.HasParent(r) {
|
|
return nil, errors.New("could not find root in fork choice store")
|
|
}
|
|
return s.forkChoiceStore.AncestorRoot(ctx, r, slot)
|
|
}
|
|
|
|
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
|
|
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]byte, error) {
|
|
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByDB")
|
|
defer span.End()
|
|
|
|
// Stop recursive ancestry lookup if context is cancelled.
|
|
if ctx.Err() != nil {
|
|
return nil, ctx.Err()
|
|
}
|
|
|
|
signed, err := s.beaconDB.Block(ctx, r)
|
|
if err != nil {
|
|
return nil, errors.Wrap(err, "could not get ancestor block")
|
|
}
|
|
|
|
if s.hasInitSyncBlock(r) {
|
|
signed = s.getInitSyncBlock(r)
|
|
}
|
|
|
|
if signed == nil || signed.Block == nil {
|
|
return nil, errors.New("nil block")
|
|
}
|
|
b := signed.Block
|
|
if b.Slot == slot || b.Slot < slot {
|
|
return r[:], nil
|
|
}
|
|
|
|
return s.ancestorByDB(ctx, bytesutil.ToBytes32(b.ParentRoot), slot)
|
|
}
|
|
|
|
// This updates justified check point in store, if the new justified is later than stored justified or
|
|
// the store's justified is not in chain with finalized check point.
|
|
//
|
|
// Spec definition:
|
|
// # Potentially update justified if different from store
|
|
// if store.justified_checkpoint != state.current_justified_checkpoint:
|
|
// # Update justified if new justified is later than store justified
|
|
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
|
// store.justified_checkpoint = state.current_justified_checkpoint
|
|
// return
|
|
// # Update justified if store justified is not in chain with finalized checkpoint
|
|
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
|
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
|
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
|
// store.justified_checkpoint = state.current_justified_checkpoint
|
|
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *stateTrie.BeaconState) error {
|
|
// Update justified if it's different than the one cached in the store.
|
|
if !attestationutil.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
|
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
|
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
|
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
|
}
|
|
|
|
// Update justified if store justified is not in chain with finalized check point.
|
|
finalizedSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
|
anc, err := s.ancestor(ctx, justifiedRoot[:], finalizedSlot)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if !bytes.Equal(anc, s.finalizedCheckpt.Root) {
|
|
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
|
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
|
// This is useful for block tree visualizer and additional vote accounting.
|
|
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock,
|
|
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
|
pendingNodes := make([]*ethpb.BeaconBlock, 0)
|
|
|
|
parentRoot := bytesutil.ToBytes32(blk.ParentRoot)
|
|
slot := blk.Slot
|
|
// Fork choice only matters from last finalized slot.
|
|
fSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
higherThanFinalized := slot > fSlot
|
|
// As long as parent node is not in fork choice store, and parent node is in DB.
|
|
for !s.forkChoiceStore.HasNode(parentRoot) && s.beaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
|
b, err := s.beaconDB.Block(ctx, parentRoot)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
pendingNodes = append(pendingNodes, b.Block)
|
|
parentRoot = bytesutil.ToBytes32(b.Block.ParentRoot)
|
|
slot = b.Block.Slot
|
|
higherThanFinalized = slot > fSlot
|
|
}
|
|
|
|
// Insert parent nodes to fork choice store in reverse order.
|
|
// Lower slots should be at the end of the list.
|
|
for i := len(pendingNodes) - 1; i >= 0; i-- {
|
|
b := pendingNodes[i]
|
|
r, err := b.HashTreeRoot()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
|
b.Slot, r, bytesutil.ToBytes32(b.ParentRoot), bytesutil.ToBytes32(b.Body.Graffiti),
|
|
jCheckpoint.Epoch,
|
|
fCheckpoint.Epoch); err != nil {
|
|
return errors.Wrap(err, "could not process block for proto array fork choice")
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// The deletes input attestations from the attestation pool, so proposers don't include them in a block for the future.
|
|
func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
|
|
for _, att := range atts {
|
|
if helpers.IsAggregated(att) {
|
|
if err := s.attPool.DeleteAggregatedAttestation(att); err != nil {
|
|
return err
|
|
}
|
|
} else {
|
|
if err := s.attPool.DeleteUnaggregatedAttestation(att); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling
|
|
// fork choice justification routine.
|
|
func (s *Service) ensureRootNotZeros(root [32]byte) [32]byte {
|
|
if root == params.BeaconConfig().ZeroHash {
|
|
return s.genesisRoot
|
|
}
|
|
return root
|
|
}
|