mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-23 03:51:29 +00:00
3b9e974a45
* Add epoch number and root in not a checkpt in forkchoice log * Update beacon-chain/blockchain/process_attestation_helpers.go Co-authored-by: Nishant Das <nishdas93@gmail.com> * Fix test * Fix typo --------- Co-authored-by: Nishant Das <nishdas93@gmail.com>
130 lines
5.0 KiB
Go
130 lines
5.0 KiB
Go
package blockchain
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strconv"
|
|
|
|
"github.com/pkg/errors"
|
|
"github.com/prysmaticlabs/prysm/v4/async"
|
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
|
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
|
)
|
|
|
|
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
|
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
|
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
|
|
headEpoch := slots.ToEpoch(s.HeadSlot())
|
|
if c.Epoch == headEpoch {
|
|
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
|
|
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
|
|
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
|
return s.HeadStateReadOnly(ctx)
|
|
}
|
|
}
|
|
}
|
|
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
|
|
// allowing us to behave smarter in terms of how this function is used concurrently.
|
|
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
|
lock := async.NewMultilock(string(c.Root) + epochKey)
|
|
lock.Lock()
|
|
defer lock.Unlock()
|
|
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
|
|
if err != nil {
|
|
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
|
}
|
|
if cachedState != nil && !cachedState.IsNil() {
|
|
return cachedState, nil
|
|
}
|
|
// Try the next slot cache for the early epoch calls, this should mostly have been covered already
|
|
// but is cheap
|
|
slot, err := slots.EpochStart(c.Epoch)
|
|
if err != nil {
|
|
return nil, errors.Wrap(err, "could not compute epoch start")
|
|
}
|
|
cachedState = transition.NextSlotState(c.Root, slot)
|
|
if cachedState != nil && !cachedState.IsNil() {
|
|
if cachedState.Slot() != slot {
|
|
cachedState, err = transition.ProcessSlots(ctx, cachedState, slot)
|
|
if err != nil {
|
|
return nil, errors.Wrap(err, "could not process slots")
|
|
}
|
|
}
|
|
if err := s.checkpointStateCache.AddCheckpointState(c, cachedState); err != nil {
|
|
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
|
|
}
|
|
return cachedState, nil
|
|
}
|
|
|
|
// Do not process attestations for old non viable checkpoints otherwise
|
|
ok, err := s.cfg.ForkChoiceStore.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: [32]byte(c.Root), Epoch: c.Epoch})
|
|
if err != nil {
|
|
return nil, errors.Wrap(err, "could not check checkpoint condition in forkchoice")
|
|
}
|
|
if !ok {
|
|
return nil, errors.Wrap(ErrNotCheckpoint, fmt.Sprintf("epoch %d root %#x", c.Epoch, c.Root))
|
|
}
|
|
|
|
// Fallback to state regeneration.
|
|
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
|
if err != nil {
|
|
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
|
|
}
|
|
|
|
epochStartSlot, err := slots.EpochStart(c.Epoch)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
baseState, err = transition.ProcessSlotsIfPossible(ctx, baseState, epochStartSlot)
|
|
if err != nil {
|
|
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
|
}
|
|
|
|
// Sharing the same state across caches is perfectly fine here, the fetching
|
|
// of attestation prestate is by far the most accessed state fetching pattern in
|
|
// the beacon node. An extra state instance cached isn't an issue in the bigger
|
|
// picture.
|
|
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
|
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
|
|
}
|
|
return baseState, nil
|
|
}
|
|
|
|
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
|
func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
|
currentSlot := primitives.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
|
currentEpoch := slots.ToEpoch(currentSlot)
|
|
var prevEpoch primitives.Epoch
|
|
// Prevents previous epoch under flow
|
|
if currentEpoch > 1 {
|
|
prevEpoch = currentEpoch - 1
|
|
}
|
|
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
|
|
return fmt.Errorf("target epoch %d does not match current epoch %d or prev epoch %d", c.Epoch, currentEpoch, prevEpoch)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// verifyBeaconBlock verifies beacon head block is known and not from the future.
|
|
func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.AttestationData) error {
|
|
r := bytesutil.ToBytes32(data.BeaconBlockRoot)
|
|
b, err := s.getBlock(ctx, r)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
|
return err
|
|
}
|
|
if b.Block().Slot() > data.Slot {
|
|
return fmt.Errorf("could not process attestation for future block, block.Slot=%d > attestation.Data.Slot=%d", b.Block().Slot(), data.Slot)
|
|
}
|
|
return nil
|
|
}
|