mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-22 03:30:35 +00:00
Implement should_override_forkchoice_update (#11981)
* Implement should_override_forchoice_update * add tests * remove unused exported function * go mod tidy * fix go mod * Fix go mod tidy * Fix context import * mod tidy * fix test * Terence's review * fix test --------- Co-authored-by: terence tsao <terence@prysmaticlabs.com>
This commit is contained in:
parent
b687d29d02
commit
166f7119ec
@ -16,25 +16,25 @@ var errNilState = errors.New("nil state")
|
||||
// UnrealizedCheckpoints returns the justification and finalization checkpoints of the
|
||||
// given state as if it was progressed with empty slots until the next epoch. It
|
||||
// also returns the total active balance during the epoch.
|
||||
func UnrealizedCheckpoints(st state.BeaconState) (uint64, *ethpb.Checkpoint, *ethpb.Checkpoint, error) {
|
||||
func UnrealizedCheckpoints(st state.BeaconState) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) {
|
||||
if st == nil || st.IsNil() {
|
||||
return 0, nil, nil, errNilState
|
||||
return nil, nil, errNilState
|
||||
}
|
||||
|
||||
if slots.ToEpoch(st.Slot()) <= params.BeaconConfig().GenesisEpoch+1 {
|
||||
jc := st.CurrentJustifiedCheckpoint()
|
||||
fc := st.FinalizedCheckpoint()
|
||||
return 0, jc, fc, nil
|
||||
return jc, fc, nil
|
||||
}
|
||||
|
||||
activeBalance, prevTarget, currentTarget, err := st.UnrealizedCheckpointBalances()
|
||||
if err != nil {
|
||||
return 0, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
justification := processJustificationBits(st, activeBalance, prevTarget, currentTarget)
|
||||
jc, fc, err := computeCheckpoints(st, justification)
|
||||
return activeBalance, jc, fc, err
|
||||
return jc, fc, err
|
||||
}
|
||||
|
||||
// ProcessJustificationAndFinalizationPreCompute processes justification and finalization during
|
||||
|
@ -242,12 +242,10 @@ func TestUnrealizedCheckpoints(t *testing.T) {
|
||||
_, _, err = altair.InitializePrecomputeValidators(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
|
||||
ab, jc, fc, err := precompute.UnrealizedCheckpoints(state)
|
||||
jc, fc, err := precompute.UnrealizedCheckpoints(state)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, test.expectedJustified, jc.Epoch)
|
||||
require.DeepEqual(t, test.expectedFinalized, fc.Epoch)
|
||||
eb := params.BeaconConfig().MinGenesisActiveValidatorCount * params.BeaconConfig().MaxEffectiveBalance
|
||||
require.Equal(t, eb, ab)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ go_library(
|
||||
"on_tick.go",
|
||||
"optimistic_sync.go",
|
||||
"proposer_boost.go",
|
||||
"reorg_late_blocks.go",
|
||||
"store.go",
|
||||
"types.go",
|
||||
"unrealized_justification.go",
|
||||
@ -53,6 +54,7 @@ go_test(
|
||||
"on_tick_test.go",
|
||||
"optimistic_sync_test.go",
|
||||
"proposer_boost_test.go",
|
||||
"reorg_late_blocks_test.go",
|
||||
"store_test.go",
|
||||
"unrealized_justification_test.go",
|
||||
"vote_test.go",
|
||||
|
@ -709,5 +709,14 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
|
||||
return errors.Wrap(err, "could not get justified balances")
|
||||
}
|
||||
f.justifiedBalances = balances
|
||||
f.store.committeeWeight = 0
|
||||
f.numActiveValidators = 0
|
||||
for _, val := range balances {
|
||||
if val > 0 {
|
||||
f.store.committeeWeight += val
|
||||
f.numActiveValidators++
|
||||
}
|
||||
}
|
||||
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
return nil
|
||||
}
|
||||
|
@ -785,3 +785,16 @@ func TestWeight(t *testing.T) {
|
||||
require.ErrorIs(t, err, ErrNilNode)
|
||||
require.Equal(t, uint64(0), w)
|
||||
}
|
||||
|
||||
func TestForkchoice_UpdateJustifiedBalances(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
balances := []uint64{10, 0, 0, 40, 50, 60, 0, 80, 90, 100}
|
||||
f.balancesByRoot = func(context.Context, [32]byte) ([]uint64, error) {
|
||||
return balances, nil
|
||||
}
|
||||
require.NoError(t, f.updateJustifiedBalances(context.Background(), [32]byte{}))
|
||||
require.Equal(t, uint64(7), f.numActiveValidators)
|
||||
require.Equal(t, uint64(430)/32, f.store.committeeWeight)
|
||||
require.DeepEqual(t, balances, f.justifiedBalances)
|
||||
|
||||
}
|
||||
|
@ -9,8 +9,17 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
// orphanLateBlockFirstThreshold is the number of seconds after which we
|
||||
// consider a block to be late, and thus a candidate to being reorged.
|
||||
const orphanLateBlockFirstThreshold = 4
|
||||
|
||||
// processAttestationsThreshold is the number of seconds after which we
|
||||
// process attestations for the current slot
|
||||
const processAttestationsThreshold = 10
|
||||
|
||||
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
|
||||
// using the current balance stored in each node. This function requires a lock
|
||||
// in Store.nodesLock
|
||||
@ -123,6 +132,26 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
// arrivedEarly returns whether this node was inserted before the first
|
||||
// threshold to orphan a block.
|
||||
// Note that genesisTime has seconds granularity, therefore we use a strict
|
||||
// inequality < here. For example a block that arrives 3.9999 seconds into the
|
||||
// slot will have secs = 3 below.
|
||||
func (n *Node) arrivedEarly(genesisTime uint64) (bool, error) {
|
||||
secs, err := slots.SecondsSinceSlotStart(n.slot, genesisTime, n.timestamp)
|
||||
return secs < orphanLateBlockFirstThreshold, err
|
||||
}
|
||||
|
||||
// arrivedAfterOrphanCheck returns whether this block was inserted after the
|
||||
// intermediate checkpoint to check for candidate of being orphaned.
|
||||
// Note that genesisTime has seconds granularity, therefore we use an
|
||||
// inequality >= here. For example a block that arrives 10.00001 seconds into the
|
||||
// slot will have secs = 10 below.
|
||||
func (n *Node) arrivedAfterOrphanCheck(genesisTime uint64) (bool, error) {
|
||||
secs, err := slots.SecondsSinceSlotStart(n.slot, genesisTime, n.timestamp)
|
||||
return secs >= processAttestationsThreshold, err
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]*v1.ForkChoiceNode, error) {
|
||||
if ctx.Err() != nil {
|
||||
@ -162,21 +191,3 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// VotedFraction returns the fraction of the committee that voted directly for
|
||||
// this node.
|
||||
func (f *ForkChoice) VotedFraction(root [32]byte) (uint64, error) {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
// Avoid division by zero before a block is inserted.
|
||||
if f.store.committeeBalance == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return 0, ErrNilNode
|
||||
}
|
||||
return node.balance * 100 / f.store.committeeBalance, nil
|
||||
}
|
||||
|
@ -260,44 +260,71 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_VotedFraction(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
|
||||
// early block
|
||||
driftGenesisTime(f, 1, 1)
|
||||
root := [32]byte{'a'}
|
||||
f.justifiedBalances = []uint64{10}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err := f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, early)
|
||||
late, err := f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, late)
|
||||
|
||||
// late block
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
root = [32]byte{'b'}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, root, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, early)
|
||||
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, late)
|
||||
|
||||
// No division by zero error
|
||||
vote, err := f.VotedFraction([32]byte{'b'})
|
||||
// very late block
|
||||
driftGenesisTime(f, 3, processAttestationsThreshold+1)
|
||||
root = [32]byte{'c'}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), vote)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, early)
|
||||
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, late)
|
||||
|
||||
// Zero balance in the node
|
||||
f.store.committeeBalance = 100 * params.BeaconConfig().MaxEffectiveBalance
|
||||
vote, err = f.VotedFraction([32]byte{'b'})
|
||||
// block from the future
|
||||
root = [32]byte{'d'}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, root, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), vote)
|
||||
|
||||
// Attestations are not counted until we process Head
|
||||
f.justifiedBalances = []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance}
|
||||
_, err = f.Head(context.Background())
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, [32]byte{'b'}, 2)
|
||||
vote, err = f.VotedFraction([32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), vote)
|
||||
|
||||
// After we call head the voted fraction is obtained.
|
||||
_, err = f.Head(context.Background())
|
||||
require.NoError(t, err)
|
||||
vote, err = f.VotedFraction([32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), vote)
|
||||
|
||||
// Check for non-existent root
|
||||
_, err = f.VotedFraction([32]byte{'c'})
|
||||
require.ErrorIs(t, err, ErrNilNode)
|
||||
require.Equal(t, root, headRoot)
|
||||
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
|
||||
require.ErrorContains(t, "invalid timestamp", err)
|
||||
require.Equal(t, true, early)
|
||||
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
|
||||
require.ErrorContains(t, "invalid timestamp", err)
|
||||
require.Equal(t, false, late)
|
||||
}
|
||||
|
@ -2,8 +2,9 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
)
|
||||
|
||||
@ -15,29 +16,44 @@ func (f *ForkChoice) resetBoostedProposerRoot(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Given a list of validator balances, we compute the proposer boost score
|
||||
// that should be given to a proposer based on their committee weight, derived from
|
||||
// the total active balances, the size of a committee, and a boost score constant.
|
||||
// IMPORTANT: The caller MUST pass in a list of validator balances where balances > 0 refer to active
|
||||
// validators while balances == 0 are for inactive validators.
|
||||
func computeProposerBoostScore(validatorBalances []uint64) (score uint64, err error) {
|
||||
totalActiveBalance := uint64(0)
|
||||
numActive := uint64(0)
|
||||
for _, balance := range validatorBalances {
|
||||
// We only consider balances > 0. The input slice should be constructed
|
||||
// as balance > 0 for all active validators and 0 for inactive ones.
|
||||
if balance == 0 {
|
||||
continue
|
||||
// applyProposerBoostScore applies the current proposer boost scores to the
|
||||
// relevant nodes. This function requires a lock in Store.nodesLock.
|
||||
func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
s := f.store
|
||||
s.proposerBoostLock.Lock()
|
||||
defer s.proposerBoostLock.Unlock()
|
||||
|
||||
// acquire checkpoints lock for the justified balances
|
||||
s.checkpointsLock.RLock()
|
||||
defer s.checkpointsLock.RUnlock()
|
||||
|
||||
proposerScore := uint64(0)
|
||||
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
|
||||
if !ok || previousNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid prev root %#x", s.previousProposerBoostRoot))
|
||||
} else {
|
||||
previousNode.balance -= s.previousProposerBoostScore
|
||||
}
|
||||
totalActiveBalance += balance
|
||||
numActive += 1
|
||||
}
|
||||
if numActive == 0 {
|
||||
// Should never happen.
|
||||
err = errors.New("no active validators")
|
||||
return
|
||||
|
||||
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
|
||||
if !ok || currentNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid current root %#x", s.proposerBoostRoot))
|
||||
} else {
|
||||
proposerScore = (s.committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
|
||||
currentNode.balance += proposerScore
|
||||
}
|
||||
}
|
||||
committeeWeight := totalActiveBalance / uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
score = (committeeWeight * params.BeaconConfig().ProposerScoreBoost) / 100
|
||||
return
|
||||
s.previousProposerBoostRoot = s.proposerBoostRoot
|
||||
s.previousProposerBoostScore = proposerScore
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerBoost of fork choice store.
|
||||
func (s *Store) proposerBoost() [fieldparams.RootLength]byte {
|
||||
s.proposerBoostLock.RLock()
|
||||
defer s.proposerBoostLock.RUnlock()
|
||||
return s.proposerBoostRoot
|
||||
}
|
||||
|
@ -34,6 +34,8 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
t.Run("back-propagates boost score to ancestors after proposer boosting", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
f.justifiedBalances = balances
|
||||
f.store.committeeWeight = uint64(len(balances)*10) / uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
f.numActiveValidators = uint64(len(balances))
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
headRoot, err := f.Head(ctx)
|
||||
@ -154,8 +156,8 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// Each of the nodes received one attestation accounting for 10.
|
||||
// Node D is the only one with a proposer boost still applied:
|
||||
//
|
||||
// (A: 48) -> (B: 38) -> (C: 10)
|
||||
// \--------------->(D: 18)
|
||||
// (1: 48) -> (2: 38) -> (3: 10)
|
||||
// \--------------->(4: 18)
|
||||
//
|
||||
node1 := f.store.nodeByRoot[indexToHash(1)]
|
||||
require.Equal(t, node1.weight, uint64(48))
|
||||
@ -321,6 +323,8 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// Block D received at N+3 — D is head
|
||||
f := setup(jEpoch, fEpoch)
|
||||
f.justifiedBalances = balances
|
||||
f.store.committeeWeight = uint64(len(balances)*10) / uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
f.numActiveValidators = uint64(len(balances))
|
||||
a := zeroHash
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
@ -462,27 +466,6 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestForkChoice_computeProposerBoostScore(t *testing.T) {
|
||||
t.Run("nil justified balances throws error", func(t *testing.T) {
|
||||
_, err := computeProposerBoostScore(nil)
|
||||
require.ErrorContains(t, "no active validators", err)
|
||||
})
|
||||
t.Run("normal active balances computes score", func(t *testing.T) {
|
||||
validatorBalances := make([]uint64, 64) // Num validators
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = 10
|
||||
}
|
||||
// Avg balance is 10, and the number of validators is 64.
|
||||
// With a committee size of num validators (64) / slots per epoch (32) == 2.
|
||||
// we then have a committee weight of avg balance * committee size = 10 * 2 = 20.
|
||||
// The score then becomes 10 * PROPOSER_SCORE_BOOST // 100, which is
|
||||
// 20 * 40 / 100 = 8.
|
||||
score, err := computeProposerBoostScore(validatorBalances)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(8), score)
|
||||
})
|
||||
}
|
||||
|
||||
// Regression test (11053)
|
||||
func TestForkChoice_missingProposerBoostRoots(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
@ -0,0 +1,82 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
)
|
||||
|
||||
// ShouldOverrideFCU returns whether the current forkchoice head is weak
|
||||
// and thus may be reorged when proposing the next block.
|
||||
// This function should only be called if the following two conditions are
|
||||
// satisfied:
|
||||
// 1- It is immediately after receiving a block that may be subject to a reorg
|
||||
//
|
||||
// or
|
||||
//
|
||||
// It is right after processAttestationsThreshold and we have processed the
|
||||
// current slots attestations.
|
||||
//
|
||||
// 2- The caller has already called Forkchoice.Head() so that forkchoice has
|
||||
// been updated.
|
||||
// 3- The beacon node is serving a validator that will propose during the next
|
||||
// slot.
|
||||
//
|
||||
// This function only applies an heuristic to decide if the beacon will update
|
||||
// the engine's view of head with the parent block or the incoming block. It
|
||||
// does not guarantee an attempted reorg. This will only be decided later at
|
||||
// proposal time by calling GetProposerHead.
|
||||
func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
override = false
|
||||
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
// We only need to override FCU if our current head is from the current
|
||||
// slot. This differs from the spec implementation in that we assume
|
||||
// that we will call this function in the previous slot to proposing.
|
||||
head := f.store.headNode
|
||||
if head == nil {
|
||||
return
|
||||
}
|
||||
if head.slot != slots.CurrentSlot(f.store.genesisTime) {
|
||||
return
|
||||
}
|
||||
// Do not reorg on epoch boundaries
|
||||
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
|
||||
return
|
||||
}
|
||||
// Only reorg blocks that arrive late
|
||||
early, err := head.arrivedEarly(f.store.genesisTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not check if block arrived early")
|
||||
return
|
||||
}
|
||||
if early {
|
||||
return
|
||||
}
|
||||
// Only reorg if we have been finalizing
|
||||
f.store.checkpointsLock.RLock()
|
||||
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
|
||||
f.store.checkpointsLock.RUnlock()
|
||||
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
|
||||
return
|
||||
}
|
||||
// Only orphan a single block
|
||||
parent := head.parent
|
||||
if parent == nil {
|
||||
return
|
||||
}
|
||||
if head.slot > parent.slot+1 {
|
||||
return
|
||||
}
|
||||
// Do not orphan a block that has higher justification than the parent
|
||||
// if head.unrealizedJustifiedEpoch > parent.unrealizedJustifiedEpoch {
|
||||
// return
|
||||
// }
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgWeightThreshold {
|
||||
return
|
||||
}
|
||||
return true
|
||||
}
|
@ -0,0 +1,87 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
|
||||
func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
f.numActiveValidators = 640
|
||||
f.justifiedBalances = make([]uint64, f.numActiveValidators)
|
||||
for i := range f.justifiedBalances {
|
||||
f.justifiedBalances[i] = uint64(10)
|
||||
f.store.committeeWeight += uint64(10)
|
||||
}
|
||||
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
ctx := context.Background()
|
||||
driftGenesisTime(f, 1, 0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
f.ProcessAttestation(ctx, []uint64{0, 1, 2}, root, 0)
|
||||
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
require.Equal(t, true, f.ShouldOverrideFCU())
|
||||
|
||||
})
|
||||
t.Run("head is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode
|
||||
f.store.headNode = nil
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode = saved
|
||||
})
|
||||
t.Run("head is not from current slot", func(t *testing.T) {
|
||||
driftGenesisTime(f, 3, 0)
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
})
|
||||
t.Run("head is from epoch boundary", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
driftGenesisTime(f, params.BeaconConfig().SlotsPerEpoch-1, 0)
|
||||
f.store.headNode.slot = params.BeaconConfig().SlotsPerEpoch - 1
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
f.store.headNode.slot = saved
|
||||
})
|
||||
t.Run("head is early", func(t *testing.T) {
|
||||
saved := f.store.headNode.timestamp
|
||||
f.store.headNode.timestamp = saved - 2
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.timestamp = saved
|
||||
})
|
||||
t.Run("chain not finalizing", func(t *testing.T) {
|
||||
saved := f.store.headNode.slot
|
||||
f.store.headNode.slot = 97
|
||||
driftGenesisTime(f, 97, orphanLateBlockFirstThreshold+1)
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.slot = saved
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
})
|
||||
t.Run("Not single block reorg", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent.slot
|
||||
f.store.headNode.parent.slot = 0
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent.slot = saved
|
||||
})
|
||||
t.Run("parent is nil", func(t *testing.T) {
|
||||
saved := f.store.headNode.parent
|
||||
f.store.headNode.parent = nil
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
f.store.headNode.parent = saved
|
||||
})
|
||||
t.Run("Head is strong", func(t *testing.T) {
|
||||
f.store.headNode.weight = f.store.committeeWeight
|
||||
require.Equal(t, false, f.ShouldOverrideFCU())
|
||||
})
|
||||
}
|
@ -13,53 +13,6 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// applyProposerBoostScore applies the current proposer boost scores to the
|
||||
// relevant nodes. This function requires a lock in Store.nodesLock.
|
||||
func (f *ForkChoice) applyProposerBoostScore() error {
|
||||
s := f.store
|
||||
s.proposerBoostLock.Lock()
|
||||
defer s.proposerBoostLock.Unlock()
|
||||
|
||||
// acquire checkpoints lock for the justified balances
|
||||
s.checkpointsLock.RLock()
|
||||
defer s.checkpointsLock.RUnlock()
|
||||
newBalances := f.justifiedBalances
|
||||
|
||||
proposerScore := uint64(0)
|
||||
var err error
|
||||
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
|
||||
if !ok || previousNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid prev root %#x", s.previousProposerBoostRoot))
|
||||
} else {
|
||||
previousNode.balance -= s.previousProposerBoostScore
|
||||
}
|
||||
}
|
||||
|
||||
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
|
||||
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
|
||||
if !ok || currentNode == nil {
|
||||
log.WithError(errInvalidProposerBoostRoot).Errorf(fmt.Sprintf("invalid current root %#x", s.proposerBoostRoot))
|
||||
} else {
|
||||
proposerScore, err = computeProposerBoostScore(newBalances)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentNode.balance += proposerScore
|
||||
}
|
||||
}
|
||||
s.previousProposerBoostRoot = s.proposerBoostRoot
|
||||
s.previousProposerBoostScore = proposerScore
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerBoost of fork choice store.
|
||||
func (s *Store) proposerBoost() [fieldparams.RootLength]byte {
|
||||
s.proposerBoostLock.RLock()
|
||||
defer s.proposerBoostLock.RUnlock()
|
||||
return s.proposerBoostRoot
|
||||
}
|
||||
|
||||
// head starts from justified root and then follows the best descendant links
|
||||
// to find the best block for head. This function assumes a lock on s.nodesLock
|
||||
func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
|
@ -11,12 +11,13 @@ import (
|
||||
|
||||
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
|
||||
type ForkChoice struct {
|
||||
store *Store
|
||||
votes []Vote // tracks individual validator's last vote.
|
||||
votesLock sync.RWMutex
|
||||
balances []uint64 // tracks individual validator's balances last accounted in votes.
|
||||
justifiedBalances []uint64 // tracks individual validator's last justified balances.
|
||||
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
|
||||
store *Store
|
||||
votes []Vote // tracks individual validator's last vote.
|
||||
votesLock sync.RWMutex
|
||||
balances []uint64 // tracks individual validator's balances last accounted in votes.
|
||||
justifiedBalances []uint64 // tracks individual validator's last justified balances.
|
||||
numActiveValidators uint64 // tracks the total number of active validators. Requires a checkpoints lock to read/write
|
||||
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
|
||||
}
|
||||
|
||||
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
|
||||
@ -30,6 +31,7 @@ type Store struct {
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostScore uint64 // previous proposer boosted root score.
|
||||
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch. Requires a checkpoints lock to read/write
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
@ -43,7 +45,6 @@ type Store struct {
|
||||
highestReceivedNode *Node // The highest slot node.
|
||||
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.
|
||||
allTipsAreInvalid bool // tracks if all tips are not viable for head
|
||||
committeeBalance uint64 // tracks the total active validator balance divided by slots per epoch. Requires a lock on nodes to read/write
|
||||
}
|
||||
|
||||
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.
|
||||
|
@ -104,14 +104,12 @@ func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Chec
|
||||
return jc, fc
|
||||
}
|
||||
|
||||
ab, uj, uf, err := precompute.UnrealizedCheckpoints(state)
|
||||
uj, uf, err := precompute.UnrealizedCheckpoints(state)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not compute unrealized checkpoints")
|
||||
uj, uf = jc, fc
|
||||
}
|
||||
|
||||
s.committeeBalance = ab / uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// Update store's unrealized checkpoints.
|
||||
if uj.Epoch > s.unrealizedJustifiedCheckpoint.Epoch {
|
||||
s.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
|
@ -62,7 +62,6 @@ type Getter interface {
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ForkChoiceDump(context.Context) (*v1.ForkChoiceDump, error)
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
VotedFraction(root [32]byte) (uint64, error)
|
||||
}
|
||||
|
||||
// Setter allows to set forkchoice information
|
||||
|
@ -137,7 +137,7 @@ func TestGetSpec(t *testing.T) {
|
||||
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 103, len(resp.Data))
|
||||
assert.Equal(t, 105, len(resp.Data))
|
||||
for k, v := range resp.Data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@ -358,6 +358,10 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "75", v)
|
||||
case "MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":
|
||||
assert.Equal(t, "76", v)
|
||||
case "REORG_MAX_EPOCHS_SINCE_FINALIZATION":
|
||||
assert.Equal(t, "2", v)
|
||||
case "REORG_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "20", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
|
@ -67,8 +67,10 @@ type BeaconChainConfig struct {
|
||||
SecondsPerETH1Block uint64 `yaml:"SECONDS_PER_ETH1_BLOCK" spec:"true"` // SecondsPerETH1Block is the approximate time for a single eth1 block to be produced.
|
||||
|
||||
// Fork choice algorithm constants.
|
||||
ProposerScoreBoost uint64 `yaml:"PROPOSER_SCORE_BOOST" spec:"true"` // ProposerScoreBoost defines a value that is a % of the committee weight for fork-choice boosting.
|
||||
IntervalsPerSlot uint64 `yaml:"INTERVALS_PER_SLOT" spec:"true"` // IntervalsPerSlot defines the number of fork choice intervals in a slot defined in the fork choice spec.
|
||||
ProposerScoreBoost uint64 `yaml:"PROPOSER_SCORE_BOOST" spec:"true"` // ProposerScoreBoost defines a value that is a % of the committee weight for fork-choice boosting.
|
||||
ReorgWeightThreshold uint64 `yaml:"REORG_WEIGHT_THRESHOLD" spec:"true"` // ReorgWeightThreshold defines a value that is a % of the committee weight to consider a block weak and subject to being orphaned.
|
||||
ReorgMaxEpochsSinceFinalization primitives.Epoch `yaml:"REORG_MAX_EPOCHS_SINCE_FINALIZATION" spec:"true"` // This defines a limit to consider safe to orphan a block if the network is finalizing
|
||||
IntervalsPerSlot uint64 `yaml:"INTERVALS_PER_SLOT" spec:"true"` // IntervalsPerSlot defines the number of fork choice intervals in a slot defined in the fork choice spec.
|
||||
|
||||
// Ethereum PoW parameters.
|
||||
DepositChainID uint64 `yaml:"DEPOSIT_CHAIN_ID" spec:"true"` // DepositChainID of the eth1 network. This used for replay protection.
|
||||
|
@ -114,8 +114,10 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
SafeSlotsToUpdateJustified: 8,
|
||||
|
||||
// Fork choice algorithm constants.
|
||||
ProposerScoreBoost: 40,
|
||||
IntervalsPerSlot: 3,
|
||||
ProposerScoreBoost: 40,
|
||||
ReorgWeightThreshold: 20,
|
||||
ReorgMaxEpochsSinceFinalization: 2,
|
||||
IntervalsPerSlot: 3,
|
||||
|
||||
// Ethereum PoW parameters.
|
||||
DepositChainID: 1, // Chain ID of eth1 mainnet.
|
||||
|
@ -230,3 +230,12 @@ func SyncCommitteePeriodStartEpoch(e primitives.Epoch) (primitives.Epoch, error)
|
||||
}
|
||||
return primitives.Epoch(startEpoch), nil
|
||||
}
|
||||
|
||||
// SecondsSinceSlotStart returns the number of seconds transcurred since the
|
||||
// given slot start time
|
||||
func SecondsSinceSlotStart(s primitives.Slot, genesisTime uint64, timeStamp uint64) (uint64, error) {
|
||||
if timeStamp < genesisTime+uint64(s)*params.BeaconConfig().SecondsPerSlot {
|
||||
return 0, errors.New("could not compute seconds since slot start: invalid timestamp")
|
||||
}
|
||||
return timeStamp - genesisTime - uint64(s)*params.BeaconConfig().SecondsPerSlot, nil
|
||||
}
|
||||
|
@ -460,3 +460,26 @@ func TestSyncCommitteePeriodStartEpoch(t *testing.T) {
|
||||
require.Equal(t, test.wanted, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSecondsSinceSlotStart(t *testing.T) {
|
||||
tests := []struct {
|
||||
slot primitives.Slot
|
||||
genesisTime uint64
|
||||
timeStamp uint64
|
||||
wanted uint64
|
||||
wantedErr bool
|
||||
}{
|
||||
{},
|
||||
{slot: 1, timeStamp: 1, wantedErr: true},
|
||||
{slot: 1, timeStamp: params.BeaconConfig().SecondsPerSlot + 2, wanted: 2},
|
||||
}
|
||||
for _, test := range tests {
|
||||
w, err := SecondsSinceSlotStart(test.slot, test.genesisTime, test.timeStamp)
|
||||
if err != nil {
|
||||
require.Equal(t, true, test.wantedErr)
|
||||
} else {
|
||||
require.Equal(t, false, test.wantedErr)
|
||||
require.Equal(t, w, test.wanted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user