From 4b033f4cc75e53201a5791c124c93f151e9fd285 Mon Sep 17 00:00:00 2001 From: Preston Van Loon Date: Fri, 18 Nov 2022 14:12:19 -0500 Subject: [PATCH] Update go to 1.19.3 (#11630) * Update go to 1.19.3 * Update other items to 1.19 * Update golangci-lint to latest release * Run gofmt -s with go1.19 * Huge gofmt changes Co-authored-by: Raul Jordan --- .github/workflows/go.yml | 12 +- .golangci.yml | 2 +- WORKSPACE | 2 +- api/client/beacon/doc.go | 1 - async/multilock.go | 4 +- async/multilock_test.go | 4 +- beacon-chain/blockchain/pow_block.go | 43 ++--- .../blockchain/process_attestation.go | 29 ++-- beacon-chain/blockchain/process_block.go | 81 ++++----- .../blockchain/process_block_helpers.go | 19 ++- beacon-chain/blockchain/process_block_test.go | 7 +- beacon-chain/blockchain/receive_block.go | 6 +- beacon-chain/core/altair/attestation.go | 103 ++++++------ beacon-chain/core/altair/block.go | 45 ++--- beacon-chain/core/altair/epoch_precompute.go | 20 ++- beacon-chain/core/altair/epoch_spec.go | 14 +- beacon-chain/core/altair/reward.go | 22 +-- beacon-chain/core/altair/sync_committee.go | 82 ++++----- beacon-chain/core/altair/transition.go | 25 +-- beacon-chain/core/altair/upgrade.go | 112 ++++++------ beacon-chain/core/blocks/attestation.go | 27 +-- beacon-chain/core/blocks/deposit.go | 68 ++++---- beacon-chain/core/blocks/eth1_data.go | 9 +- beacon-chain/core/blocks/exit.go | 105 ++++++------ beacon-chain/core/blocks/header.go | 81 ++++----- beacon-chain/core/blocks/payload.go | 78 +++++---- beacon-chain/core/blocks/randao.go | 30 ++-- beacon-chain/core/blocks/withdrawals.go | 24 +-- beacon-chain/core/epoch/epoch_processing.go | 159 +++++++++--------- .../precompute/justification_finalization.go | 90 +++++----- .../epoch/precompute/reward_penalty_test.go | 9 +- beacon-chain/core/helpers/beacon_committee.go | 88 +++++----- beacon-chain/core/helpers/block.go | 24 +-- beacon-chain/core/helpers/randao.go | 24 +-- .../core/helpers/rewards_penalties.go | 78 +++++---- beacon-chain/core/helpers/shuffle.go | 66 ++++---- beacon-chain/core/helpers/validators.go | 143 ++++++++-------- .../core/helpers/weak_subjectivity.go | 72 ++++---- beacon-chain/core/signing/domain.go | 15 +- beacon-chain/core/signing/signing_root.go | 72 ++++---- beacon-chain/core/time/slot_epoch.go | 27 +-- beacon-chain/core/transition/state.go | 78 +++++---- beacon-chain/core/transition/transition.go | 62 +++---- .../transition/transition_no_verify_sig.go | 97 +++++------ beacon-chain/core/validators/validator.go | 78 ++++----- beacon-chain/db/filters/filter.go | 18 +- .../db/kv/finalized_block_roots_test.go | 4 +- beacon-chain/execution/engine_client.go | 17 +- .../forkchoice/doubly-linked-tree/on_tick.go | 25 +-- .../optimistic_sync_test.go | 35 ++-- .../doubly-linked-tree/store_test.go | 21 ++- .../unrealized_justification_test.go | 51 +++--- beacon-chain/rpc/eth/node/node.go | 13 +- .../v1alpha1/validator/proposer_eth1data.go | 20 +-- .../validator/proposer_execution_payload.go | 24 +-- beacon-chain/rpc/statefetcher/fetcher.go | 24 +-- beacon-chain/slasher/chunks.go | 95 ++++++----- beacon-chain/slasher/detect_attestations.go | 24 +-- beacon-chain/slasher/doc.go | 16 +- beacon-chain/slasher/params.go | 63 ++++--- beacon-chain/state/state-native/doc.go | 22 +-- beacon-chain/state/stategen/replay_test.go | 32 ++-- .../sync/pending_blocks_queue_test.go | 14 +- config/features/config.go | 26 +-- container/leaky-bucket/leakybucket.go | 1 - container/slice/slice.go | 11 +- container/trie/sparse_merkle.go | 5 +- contracts/deposit/deposit.go | 19 ++- crypto/bls/blst/doc.go | 1 - crypto/bls/blst/secret_key.go | 3 +- crypto/bls/blst/signature.go | 34 ++-- crypto/rand/rand.go | 30 ++-- encoding/ssz/equality/deep_equal.go | 2 +- fuzzbuzz.yaml | 2 +- go.mod | 2 +- monitoring/journald/journald_linux.go | 2 +- runtime/interop/generate_genesis_state.go | 6 +- testing/endtoend/endtoend_test.go | 1 - time/slots/slottime.go | 25 +-- tools/analyzers/errcheck/analyzer.go | 14 +- tools/analyzers/gocognit/analyzer.go | 3 +- tools/blocktree/main.go | 16 +- tools/interop/export-genesis/main.go | 3 +- tools/replay-http/main.go | 3 +- validator/client/attest.go | 5 +- validator/keymanager/remote/doc.go | 60 +++---- validator/rpc/slashing.go | 6 +- .../slashing-protection-history/import.go | 38 +++-- 88 files changed, 1603 insertions(+), 1470 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index a46933291..6ff354d1a 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -26,10 +26,10 @@ jobs: steps: - name: Checkout uses: actions/checkout@v2 - - name: Set up Go 1.18 + - name: Set up Go 1.19 uses: actions/setup-go@v3 with: - go-version: 1.18 + go-version: 1.19 - name: Run Gosec Security Scanner run: | # https://github.com/securego/gosec/issues/469 export PATH=$PATH:$(go env GOPATH)/bin @@ -43,16 +43,16 @@ jobs: - name: Checkout uses: actions/checkout@v2 - - name: Set up Go 1.18 + - name: Set up Go 1.19 uses: actions/setup-go@v3 with: - go-version: 1.18 + go-version: 1.19 id: go - name: Golangci-lint uses: golangci/golangci-lint-action@v3 with: - version: v1.47.2 + version: v1.50.1 args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number build: @@ -62,7 +62,7 @@ jobs: - name: Set up Go 1.x uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: 1.19 id: go - name: Check out code into the Go module directory diff --git a/.golangci.yml b/.golangci.yml index 03df11a50..a63857b2b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -6,7 +6,7 @@ run: - proto - tools/analyzers timeout: 10m - go: '1.18' + go: '1.19' linters: disable-all: true diff --git a/WORKSPACE b/WORKSPACE index 1aad60b15..79b7adf8d 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -176,7 +176,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe go_rules_dependencies() go_register_toolchains( - go_version = "1.18.5", + go_version = "1.19.3", nogo = "@//:nogo", ) diff --git a/api/client/beacon/doc.go b/api/client/beacon/doc.go index ed7def487..3dd4b235c 100644 --- a/api/client/beacon/doc.go +++ b/api/client/beacon/doc.go @@ -1,6 +1,5 @@ /* Package beacon provides a client for interacting with the standard Eth Beacon Node API. Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/ - */ package beacon diff --git a/async/multilock.go b/async/multilock.go index 2e81725cb..69c0675f1 100644 --- a/async/multilock.go +++ b/async/multilock.go @@ -3,7 +3,9 @@ Copyright 2017 Albert Tedja Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/async/multilock_test.go b/async/multilock_test.go index b962f4dbc..571460a37 100644 --- a/async/multilock_test.go +++ b/async/multilock_test.go @@ -3,7 +3,9 @@ Copyright 2017 Albert Tedja Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/beacon-chain/blockchain/pow_block.go b/beacon-chain/blockchain/pow_block.go index cc9fc8b01..aed9842bb 100644 --- a/beacon-chain/blockchain/pow_block.go +++ b/beacon-chain/blockchain/pow_block.go @@ -22,20 +22,21 @@ import ( // validateMergeBlock validates terminal block hash in the event of manual overrides before checking for total difficulty. // // def validate_merge_block(block: BeaconBlock) -> None: -// if TERMINAL_BLOCK_HASH != Hash32(): -// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached. -// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH -// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH -// return // -// pow_block = get_pow_block(block.body.execution_payload.parent_hash) -// # Check if `pow_block` is available -// assert pow_block is not None -// pow_parent = get_pow_block(pow_block.parent_hash) -// # Check if `pow_parent` is available -// assert pow_parent is not None -// # Check if `pow_block` is a valid terminal PoW block -// assert is_valid_terminal_pow_block(pow_block, pow_parent) +// if TERMINAL_BLOCK_HASH != Hash32(): +// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached. +// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH +// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH +// return +// +// pow_block = get_pow_block(block.body.execution_payload.parent_hash) +// # Check if `pow_block` is available +// assert pow_block is not None +// pow_parent = get_pow_block(pow_block.parent_hash) +// # Check if `pow_parent` is available +// assert pow_parent is not None +// # Check if `pow_block` is a valid terminal PoW block +// assert is_valid_terminal_pow_block(pow_block, pow_parent) func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBeaconBlock) error { if err := blocks.BeaconBlockIsNil(b); err != nil { return err @@ -105,10 +106,11 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([] // validateTerminalBlockHash validates if the merge block is a valid terminal PoW block. // spec code: // if TERMINAL_BLOCK_HASH != Hash32(): -// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached. -// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH -// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH -// return +// +// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached. +// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH +// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH +// return func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionData) error { if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} { return nil @@ -125,9 +127,10 @@ func validateTerminalBlockHash(blkSlot types.Slot, payload interfaces.ExecutionD // validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty. // // def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool: -// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY -// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY -// return is_total_difficulty_reached and is_parent_total_difficulty_valid +// +// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY +// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY +// return is_total_difficulty_reached and is_parent_total_difficulty_valid func validateTerminalBlockDifficulties(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) { b, ok := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10) if !ok { diff --git a/beacon-chain/blockchain/process_attestation.go b/beacon-chain/blockchain/process_attestation.go index cd5863ef7..759e0bd6e 100644 --- a/beacon-chain/blockchain/process_attestation.go +++ b/beacon-chain/blockchain/process_attestation.go @@ -19,23 +19,24 @@ import ( // The delay is handled by the caller in `processAttestations`. // // Spec pseudocode definition: -// def on_attestation(store: Store, attestation: Attestation) -> None: -// """ -// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire. // -// An ``attestation`` that is asserted as invalid may be valid at a later time, -// consider scheduling it for later processing in such case. -// """ -// validate_on_attestation(store, attestation) -// store_target_checkpoint_state(store, attestation.data.target) +// def on_attestation(store: Store, attestation: Attestation) -> None: +// """ +// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire. // -// # Get state at the `target` to fully validate attestation -// target_state = store.checkpoint_states[attestation.data.target] -// indexed_attestation = get_indexed_attestation(target_state, attestation) -// assert is_valid_indexed_attestation(target_state, indexed_attestation) +// An ``attestation`` that is asserted as invalid may be valid at a later time, +// consider scheduling it for later processing in such case. +// """ +// validate_on_attestation(store, attestation) +// store_target_checkpoint_state(store, attestation.data.target) // -// # Update latest messages for attesting indices -// update_latest_messages(store, indexed_attestation.attesting_indices, attestation) +// # Get state at the `target` to fully validate attestation +// target_state = store.checkpoint_states[attestation.data.target] +// indexed_attestation = get_indexed_attestation(target_state, attestation) +// assert is_valid_indexed_attestation(target_state, indexed_attestation) +// +// # Update latest messages for attesting indices +// update_latest_messages(store, indexed_attestation.attesting_indices, attestation) func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error { ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation") defer span.End() diff --git a/beacon-chain/blockchain/process_block.go b/beacon-chain/blockchain/process_block.go index affc62342..58debd082 100644 --- a/beacon-chain/blockchain/process_block.go +++ b/beacon-chain/blockchain/process_block.go @@ -45,52 +45,53 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch) // computation in this method and methods it calls into. // // Spec pseudocode definition: -// def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: -// block = signed_block.message -// # Parent block must be known -// assert block.parent_root in store.block_states -// # Make a copy of the state to avoid mutability issues -// pre_state = copy(store.block_states[block.parent_root]) -// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past. -// assert get_current_slot(store) >= block.slot // -// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) -// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) -// assert block.slot > finalized_slot -// # Check block is a descendant of the finalized block at the checkpoint finalized slot -// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root +// def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: +// block = signed_block.message +// # Parent block must be known +// assert block.parent_root in store.block_states +// # Make a copy of the state to avoid mutability issues +// pre_state = copy(store.block_states[block.parent_root]) +// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past. +// assert get_current_slot(store) >= block.slot // -// # Check the block is valid and compute the post-state -// state = pre_state.copy() -// state_transition(state, signed_block, True) -// # Add new block to the store -// store.blocks[hash_tree_root(block)] = block -// # Add new state for this block to the store -// store.block_states[hash_tree_root(block)] = state +// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) +// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) +// assert block.slot > finalized_slot +// # Check block is a descendant of the finalized block at the checkpoint finalized slot +// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root // -// # Update justified checkpoint -// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: -// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: -// store.best_justified_checkpoint = state.current_justified_checkpoint -// if should_update_justified_checkpoint(store, state.current_justified_checkpoint): -// store.justified_checkpoint = state.current_justified_checkpoint +// # Check the block is valid and compute the post-state +// state = pre_state.copy() +// state_transition(state, signed_block, True) +// # Add new block to the store +// store.blocks[hash_tree_root(block)] = block +// # Add new state for this block to the store +// store.block_states[hash_tree_root(block)] = state // -// # Update finalized checkpoint -// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: -// store.finalized_checkpoint = state.finalized_checkpoint +// # Update justified checkpoint +// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: +// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: +// store.best_justified_checkpoint = state.current_justified_checkpoint +// if should_update_justified_checkpoint(store, state.current_justified_checkpoint): +// store.justified_checkpoint = state.current_justified_checkpoint // -// # Potentially update justified if different from store -// if store.justified_checkpoint != state.current_justified_checkpoint: -// # Update justified if new justified is later than store justified -// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: -// store.justified_checkpoint = state.current_justified_checkpoint -// return +// # Update finalized checkpoint +// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: +// store.finalized_checkpoint = state.finalized_checkpoint // -// # Update justified if store justified is not in chain with finalized checkpoint -// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) -// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot) -// if ancestor_at_finalized_slot != store.finalized_checkpoint.root: -// store.justified_checkpoint = state.current_justified_checkpoint +// # Potentially update justified if different from store +// if store.justified_checkpoint != state.current_justified_checkpoint: +// # Update justified if new justified is later than store justified +// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: +// store.justified_checkpoint = state.current_justified_checkpoint +// return +// +// # Update justified if store justified is not in chain with finalized checkpoint +// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) +// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot) +// if ancestor_at_finalized_slot != store.finalized_checkpoint.root: +// store.justified_checkpoint = state.current_justified_checkpoint func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlock, blockRoot [32]byte) error { ctx, span := trace.StartSpan(ctx, "blockChain.onBlock") defer span.End() diff --git a/beacon-chain/blockchain/process_block_helpers.go b/beacon-chain/blockchain/process_block_helpers.go index 74def66bd..b323cf735 100644 --- a/beacon-chain/blockchain/process_block_helpers.go +++ b/beacon-chain/blockchain/process_block_helpers.go @@ -185,15 +185,16 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err // ancestor returns the block root of an ancestry block from the input block root. // // Spec pseudocode definition: -// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: -// block = store.blocks[root] -// if block.slot > slot: -// return get_ancestor(store, block.parent_root, slot) -// elif block.slot == slot: -// return root -// else: -// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot -// return root +// +// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: +// block = store.blocks[root] +// if block.slot > slot: +// return get_ancestor(store, block.parent_root, slot) +// elif block.slot == slot: +// return root +// else: +// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot +// return root func (s *Service) ancestor(ctx context.Context, root []byte, slot types.Slot) ([]byte, error) { ctx, span := trace.StartSpan(ctx, "blockChain.ancestor") defer span.End() diff --git a/beacon-chain/blockchain/process_block_test.go b/beacon-chain/blockchain/process_block_test.go index 84b33798e..9ac6342d4 100644 --- a/beacon-chain/blockchain/process_block_test.go +++ b/beacon-chain/blockchain/process_block_test.go @@ -446,9 +446,12 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) { } // blockTree1 constructs the following tree: -// /- B1 +// +// /- B1 +// // B0 /- B5 - B7 -// \- B3 - B4 - B6 - B8 +// +// \- B3 - B4 - B6 - B8 func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byte, error) { genesisRoot = bytesutil.PadTo(genesisRoot, 32) b0 := util.NewBeaconBlock() diff --git a/beacon-chain/blockchain/receive_block.go b/beacon-chain/blockchain/receive_block.go index 97b1c6232..0a3640771 100644 --- a/beacon-chain/blockchain/receive_block.go +++ b/beacon-chain/blockchain/receive_block.go @@ -32,9 +32,9 @@ type SlashingReceiver interface { // ReceiveBlock is a function that defines the operations (minus pubsub) // that are performed on a received block. The operations consist of: -// 1. Validate block, apply state transition and update checkpoints -// 2. Apply fork choice to the processed block -// 3. Save latest head info +// 1. Validate block, apply state transition and update checkpoints +// 2. Apply fork choice to the processed block +// 3. Save latest head info func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, blockRoot [32]byte) error { ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock") defer span.End() diff --git a/beacon-chain/core/altair/attestation.go b/beacon-chain/core/altair/attestation.go index d8b0d406c..b53c393a0 100644 --- a/beacon-chain/core/altair/attestation.go +++ b/beacon-chain/core/altair/attestation.go @@ -82,23 +82,24 @@ func ProcessAttestationNoVerifySignature( // the proposer in state. // // Spec code: -// # Update epoch participation flags -// if data.target.epoch == get_current_epoch(state): -// epoch_participation = state.current_epoch_participation -// else: -// epoch_participation = state.previous_epoch_participation // -// proposer_reward_numerator = 0 -// for index in get_attesting_indices(state, data, attestation.aggregation_bits): -// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): -// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index): -// epoch_participation[index] = add_flag(epoch_participation[index], flag_index) -// proposer_reward_numerator += get_base_reward(state, index) * weight +// # Update epoch participation flags +// if data.target.epoch == get_current_epoch(state): +// epoch_participation = state.current_epoch_participation +// else: +// epoch_participation = state.previous_epoch_participation // -// # Reward proposer -// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT -// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) -// increase_balance(state, get_beacon_proposer_index(state), proposer_reward) +// proposer_reward_numerator = 0 +// for index in get_attesting_indices(state, data, attestation.aggregation_bits): +// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): +// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index): +// epoch_participation[index] = add_flag(epoch_participation[index], flag_index) +// proposer_reward_numerator += get_base_reward(state, index) * weight +// +// # Reward proposer +// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT +// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) +// increase_balance(state, get_beacon_proposer_index(state), proposer_reward) func SetParticipationAndRewardProposer( ctx context.Context, beaconState state.BeaconState, @@ -157,12 +158,13 @@ func AddValidatorFlag(flag, flagPosition uint8) (uint8, error) { // EpochParticipation sets and returns the proposer reward numerator and epoch participation. // // Spec code: -// proposer_reward_numerator = 0 -// for index in get_attesting_indices(state, data, attestation.aggregation_bits): -// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): -// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index): -// epoch_participation[index] = add_flag(epoch_participation[index], flag_index) -// proposer_reward_numerator += get_base_reward(state, index) * weight +// +// proposer_reward_numerator = 0 +// for index in get_attesting_indices(state, data, attestation.aggregation_bits): +// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): +// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index): +// epoch_participation[index] = add_flag(epoch_participation[index], flag_index) +// proposer_reward_numerator += get_base_reward(state, index) * weight func EpochParticipation(beaconState state.BeaconState, indices []uint64, epochParticipation []byte, participatedFlags map[uint8]bool, totalBalance uint64) (uint64, []byte, error) { cfg := params.BeaconConfig() sourceFlagIndex := cfg.TimelySourceFlagIndex @@ -218,9 +220,10 @@ func EpochParticipation(beaconState state.BeaconState, indices []uint64, epochPa // RewardProposer rewards proposer by increasing proposer's balance with input reward numerator and calculated reward denominator. // // Spec code: -// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT -// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) -// increase_balance(state, get_beacon_proposer_index(state), proposer_reward) +// +// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT +// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator) +// increase_balance(state, get_beacon_proposer_index(state), proposer_reward) func RewardProposer(ctx context.Context, beaconState state.BeaconState, proposerRewardNumerator uint64) error { cfg := params.BeaconConfig() d := (cfg.WeightDenominator - cfg.ProposerWeight) * cfg.WeightDenominator / cfg.ProposerWeight @@ -238,31 +241,32 @@ func RewardProposer(ctx context.Context, beaconState state.BeaconState, proposer // // Spec code: // def get_attestation_participation_flag_indices(state: BeaconState, -// data: AttestationData, -// inclusion_delay: uint64) -> Sequence[int]: -// """ -// Return the flag indices that are satisfied by an attestation. -// """ -// if data.target.epoch == get_current_epoch(state): -// justified_checkpoint = state.current_justified_checkpoint -// else: -// justified_checkpoint = state.previous_justified_checkpoint // -// # Matching roots -// is_matching_source = data.source == justified_checkpoint -// is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch) -// is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot) -// assert is_matching_source +// data: AttestationData, +// inclusion_delay: uint64) -> Sequence[int]: +// """ +// Return the flag indices that are satisfied by an attestation. +// """ +// if data.target.epoch == get_current_epoch(state): +// justified_checkpoint = state.current_justified_checkpoint +// else: +// justified_checkpoint = state.previous_justified_checkpoint // -// participation_flag_indices = [] -// if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH): -// participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX) -// if is_matching_target and inclusion_delay <= SLOTS_PER_EPOCH: -// participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX) -// if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY: -// participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX) +// # Matching roots +// is_matching_source = data.source == justified_checkpoint +// is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch) +// is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot) +// assert is_matching_source // -// return participation_flag_indices +// participation_flag_indices = [] +// if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH): +// participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX) +// if is_matching_target and inclusion_delay <= SLOTS_PER_EPOCH: +// participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX) +// if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY: +// participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX) +// +// return participation_flag_indices func AttestationParticipationFlagIndices(beaconState state.BeaconState, data *ethpb.AttestationData, delay types.Slot) (map[uint8]bool, error) { currEpoch := time.CurrentEpoch(beaconState) var justifiedCheckpt *ethpb.Checkpoint @@ -304,9 +308,10 @@ func AttestationParticipationFlagIndices(beaconState state.BeaconState, data *et // MatchingStatus returns the matching statues for attestation data's source target and head. // // Spec code: -// is_matching_source = data.source == justified_checkpoint -// is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch) -// is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot) +// +// is_matching_source = data.source == justified_checkpoint +// is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch) +// is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot) func MatchingStatus(beaconState state.BeaconState, data *ethpb.AttestationData, cp *ethpb.Checkpoint) (matchedSrc, matchedTgt, matchedHead bool, err error) { matchedSrc = attestation.CheckPointIsEqual(data.Source, cp) diff --git a/beacon-chain/core/altair/block.go b/beacon-chain/core/altair/block.go index b2035c04b..97dc79473 100644 --- a/beacon-chain/core/altair/block.go +++ b/beacon-chain/core/altair/block.go @@ -20,30 +20,31 @@ import ( // // Spec code: // def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> None: -// # Verify sync committee aggregate signature signing over the previous slot block root -// committee_pubkeys = state.current_sync_committee.pubkeys -// participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit] -// previous_slot = max(state.slot, Slot(1)) - Slot(1) -// domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot)) -// signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain) -// assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature) // -// # Compute participant and proposer rewards -// total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT -// total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments) -// max_participant_rewards = Gwei(total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH) -// participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE) -// proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)) +// # Verify sync committee aggregate signature signing over the previous slot block root +// committee_pubkeys = state.current_sync_committee.pubkeys +// participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit] +// previous_slot = max(state.slot, Slot(1)) - Slot(1) +// domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot)) +// signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain) +// assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature) // -// # Apply participant and proposer rewards -// all_pubkeys = [v.pubkey for v in state.validators] -// committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys] -// for participant_index, participation_bit in zip(committee_indices, sync_aggregate.sync_committee_bits): -// if participation_bit: -// increase_balance(state, participant_index, participant_reward) -// increase_balance(state, get_beacon_proposer_index(state), proposer_reward) -// else: -// decrease_balance(state, participant_index, participant_reward) +// # Compute participant and proposer rewards +// total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT +// total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments) +// max_participant_rewards = Gwei(total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH) +// participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE) +// proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)) +// +// # Apply participant and proposer rewards +// all_pubkeys = [v.pubkey for v in state.validators] +// committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys] +// for participant_index, participation_bit in zip(committee_indices, sync_aggregate.sync_committee_bits): +// if participation_bit: +// increase_balance(state, participant_index, participant_reward) +// increase_balance(state, get_beacon_proposer_index(state), proposer_reward) +// else: +// decrease_balance(state, participant_index, participant_reward) func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, error) { votedKeys, votedIndices, didntVoteIndices, err := FilterSyncCommitteeVotes(s, sync) if err != nil { diff --git a/beacon-chain/core/altair/epoch_precompute.go b/beacon-chain/core/altair/epoch_precompute.go index c8634c8f4..7b52496c9 100644 --- a/beacon-chain/core/altair/epoch_precompute.go +++ b/beacon-chain/core/altair/epoch_precompute.go @@ -68,9 +68,10 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco // For fully inactive validators and perfect active validators, the effect is the same as before Altair. // For a validator is inactive and the chain fails to finalize, the inactivity score increases by a fixed number, the total loss after N epochs is proportional to N**2/2. // For imperfectly active validators. The inactivity score's behavior is specified by this function: -// If a validator fails to submit an attestation with the correct target, their inactivity score goes up by 4. -// If they successfully submit an attestation with the correct source and target, their inactivity score drops by 1 -// If the chain has recently finalized, each validator's score drops by 16. +// +// If a validator fails to submit an attestation with the correct target, their inactivity score goes up by 4. +// If they successfully submit an attestation with the correct source and target, their inactivity score drops by 1 +// If the chain has recently finalized, each validator's score drops by 16. func ProcessInactivityScores( ctx context.Context, beaconState state.BeaconState, @@ -132,12 +133,13 @@ func ProcessInactivityScores( // it also tracks and updates epoch attesting balances. // Spec code: // if epoch == get_current_epoch(state): -// epoch_participation = state.current_epoch_participation -// else: -// epoch_participation = state.previous_epoch_participation -// active_validator_indices = get_active_validator_indices(state, epoch) -// participating_indices = [i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)] -// return set(filter(lambda index: not state.validators[index].slashed, participating_indices)) +// +// epoch_participation = state.current_epoch_participation +// else: +// epoch_participation = state.previous_epoch_participation +// active_validator_indices = get_active_validator_indices(state, epoch) +// participating_indices = [i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)] +// return set(filter(lambda index: not state.validators[index].slashed, participating_indices)) func ProcessEpochParticipation( ctx context.Context, beaconState state.BeaconState, diff --git a/beacon-chain/core/altair/epoch_spec.go b/beacon-chain/core/altair/epoch_spec.go index ff61a7e07..6b3229695 100644 --- a/beacon-chain/core/altair/epoch_spec.go +++ b/beacon-chain/core/altair/epoch_spec.go @@ -14,10 +14,11 @@ import ( // // Spec code: // def process_sync_committee_updates(state: BeaconState) -> None: -// next_epoch = get_current_epoch(state) + Epoch(1) -// if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0: -// state.current_sync_committee = state.next_sync_committee -// state.next_sync_committee = get_next_sync_committee(state) +// +// next_epoch = get_current_epoch(state) + Epoch(1) +// if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0: +// state.current_sync_committee = state.next_sync_committee +// state.next_sync_committee = get_next_sync_committee(state) func ProcessSyncCommitteeUpdates(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) { nextEpoch := time.NextEpoch(beaconState) if nextEpoch%params.BeaconConfig().EpochsPerSyncCommitteePeriod == 0 { @@ -46,8 +47,9 @@ func ProcessSyncCommitteeUpdates(ctx context.Context, beaconState state.BeaconSt // // Spec code: // def process_participation_flag_updates(state: BeaconState) -> None: -// state.previous_epoch_participation = state.current_epoch_participation -// state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))] +// +// state.previous_epoch_participation = state.current_epoch_participation +// state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))] func ProcessParticipationFlagUpdates(beaconState state.BeaconState) (state.BeaconState, error) { c, err := beaconState.CurrentEpochParticipation() if err != nil { diff --git a/beacon-chain/core/altair/reward.go b/beacon-chain/core/altair/reward.go index 7d95001bd..d9f20363a 100644 --- a/beacon-chain/core/altair/reward.go +++ b/beacon-chain/core/altair/reward.go @@ -13,16 +13,17 @@ import ( // individual validator's base reward. // // Spec code: -// def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: -// """ -// Return the base reward for the validator defined by ``index`` with respect to the current ``state``. // -// Note: An optimally performing validator can earn one base reward per epoch over a long time horizon. -// This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal -// and sync committees). -// """ -// increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT -// return Gwei(increments * get_base_reward_per_increment(state)) +// def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: +// """ +// Return the base reward for the validator defined by ``index`` with respect to the current ``state``. +// +// Note: An optimally performing validator can earn one base reward per epoch over a long time horizon. +// This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal +// and sync committees). +// """ +// increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT +// return Gwei(increments * get_base_reward_per_increment(state)) func BaseReward(s state.ReadOnlyBeaconState, index types.ValidatorIndex) (uint64, error) { totalBalance, err := helpers.TotalActiveBalance(s) if err != nil { @@ -50,7 +51,8 @@ func BaseRewardWithTotalBalance(s state.ReadOnlyBeaconState, index types.Validat // // Spec code: // def get_base_reward_per_increment(state: BeaconState) -> Gwei: -// return Gwei(EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR // integer_squareroot(get_total_active_balance(state))) +// +// return Gwei(EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR // integer_squareroot(get_total_active_balance(state))) func BaseRewardPerIncrement(activeBalance uint64) (uint64, error) { if activeBalance == 0 { return 0, errors.New("active balance can't be 0") diff --git a/beacon-chain/core/altair/sync_committee.go b/beacon-chain/core/altair/sync_committee.go index 6169be34f..3a3cfddba 100644 --- a/beacon-chain/core/altair/sync_committee.go +++ b/beacon-chain/core/altair/sync_committee.go @@ -47,13 +47,14 @@ func ValidateNilSyncContribution(s *ethpb.SignedContributionAndProof) error { // // Spec code: // def get_next_sync_committee(state: BeaconState) -> SyncCommittee: -// """ -// Return the next sync committee, with possible pubkey duplicates. -// """ -// indices = get_next_sync_committee_indices(state) -// pubkeys = [state.validators[index].pubkey for index in indices] -// aggregate_pubkey = bls.AggregatePKs(pubkeys) -// return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey) +// +// """ +// Return the next sync committee, with possible pubkey duplicates. +// """ +// indices = get_next_sync_committee_indices(state) +// pubkeys = [state.validators[index].pubkey for index in indices] +// aggregate_pubkey = bls.AggregatePKs(pubkeys) +// return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey) func NextSyncCommittee(ctx context.Context, s state.BeaconState) (*ethpb.SyncCommittee, error) { indices, err := NextSyncCommitteeIndices(ctx, s) if err != nil { @@ -78,26 +79,27 @@ func NextSyncCommittee(ctx context.Context, s state.BeaconState) (*ethpb.SyncCom // // Spec code: // def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]: -// """ -// Return the sync committee indices, with possible duplicates, for the next sync committee. -// """ -// epoch = Epoch(get_current_epoch(state) + 1) // -// MAX_RANDOM_BYTE = 2**8 - 1 -// active_validator_indices = get_active_validator_indices(state, epoch) -// active_validator_count = uint64(len(active_validator_indices)) -// seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE) -// i = 0 -// sync_committee_indices: List[ValidatorIndex] = [] -// while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE: -// shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed) -// candidate_index = active_validator_indices[shuffled_index] -// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] -// effective_balance = state.validators[candidate_index].effective_balance -// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: -// sync_committee_indices.append(candidate_index) -// i += 1 -// return sync_committee_indices +// """ +// Return the sync committee indices, with possible duplicates, for the next sync committee. +// """ +// epoch = Epoch(get_current_epoch(state) + 1) +// +// MAX_RANDOM_BYTE = 2**8 - 1 +// active_validator_indices = get_active_validator_indices(state, epoch) +// active_validator_count = uint64(len(active_validator_indices)) +// seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE) +// i = 0 +// sync_committee_indices: List[ValidatorIndex] = [] +// while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE: +// shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed) +// candidate_index = active_validator_indices[shuffled_index] +// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] +// effective_balance = state.validators[candidate_index].effective_balance +// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: +// sync_committee_indices.append(candidate_index) +// i += 1 +// return sync_committee_indices func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]types.ValidatorIndex, error) { epoch := coreTime.NextEpoch(s) indices, err := helpers.ActiveValidatorIndices(ctx, s, epoch) @@ -144,18 +146,19 @@ func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]types // SyncSubCommitteePubkeys returns the pubkeys participating in a sync subcommittee. // // def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]: -// # Committees assigned to `slot` sign for `slot - 1` -// # This creates the exceptional logic below when transitioning between sync committee periods -// next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1)) -// if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch): -// sync_committee = state.current_sync_committee -// else: -// sync_committee = state.next_sync_committee // -// # Return pubkeys for the subcommittee index -// sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT -// i = subcommittee_index * sync_subcommittee_size -// return sync_committee.pubkeys[i:i + sync_subcommittee_size] +// # Committees assigned to `slot` sign for `slot - 1` +// # This creates the exceptional logic below when transitioning between sync committee periods +// next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1)) +// if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch): +// sync_committee = state.current_sync_committee +// else: +// sync_committee = state.next_sync_committee +// +// # Return pubkeys for the subcommittee index +// sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT +// i = subcommittee_index * sync_subcommittee_size +// return sync_committee.pubkeys[i:i + sync_subcommittee_size] func SyncSubCommitteePubkeys(syncCommittee *ethpb.SyncCommittee, subComIdx types.CommitteeIndex) ([][]byte, error) { cfg := params.BeaconConfig() subCommSize := cfg.SyncCommitteeSize / cfg.SyncCommitteeSubnetCount @@ -172,8 +175,9 @@ func SyncSubCommitteePubkeys(syncCommittee *ethpb.SyncCommittee, subComIdx types // aggregator. // // def is_sync_committee_aggregator(signature: BLSSignature) -> bool: -// modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE) -// return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0 +// +// modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE) +// return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0 func IsSyncCommitteeAggregator(sig []byte) (bool, error) { if len(sig) != fieldparams.BLSSignatureLength { return false, errors.New("incorrect sig length") diff --git a/beacon-chain/core/altair/transition.go b/beacon-chain/core/altair/transition.go index 3dfd2c1b9..b0c40c769 100644 --- a/beacon-chain/core/altair/transition.go +++ b/beacon-chain/core/altair/transition.go @@ -15,18 +15,19 @@ import ( // // Spec code: // def process_epoch(state: BeaconState) -> None: -// process_justification_and_finalization(state) # [Modified in Altair] -// process_inactivity_updates(state) # [New in Altair] -// process_rewards_and_penalties(state) # [Modified in Altair] -// process_registry_updates(state) -// process_slashings(state) # [Modified in Altair] -// process_eth1_data_reset(state) -// process_effective_balance_updates(state) -// process_slashings_reset(state) -// process_randao_mixes_reset(state) -// process_historical_roots_update(state) -// process_participation_flag_updates(state) # [New in Altair] -// process_sync_committee_updates(state) # [New in Altair] +// +// process_justification_and_finalization(state) # [Modified in Altair] +// process_inactivity_updates(state) # [New in Altair] +// process_rewards_and_penalties(state) # [Modified in Altair] +// process_registry_updates(state) +// process_slashings(state) # [Modified in Altair] +// process_eth1_data_reset(state) +// process_effective_balance_updates(state) +// process_slashings_reset(state) +// process_randao_mixes_reset(state) +// process_historical_roots_update(state) +// process_participation_flag_updates(state) # [New in Altair] +// process_sync_committee_updates(state) # [New in Altair] func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) { ctx, span := trace.StartSpan(ctx, "altair.ProcessEpoch") defer span.End() diff --git a/beacon-chain/core/altair/upgrade.go b/beacon-chain/core/altair/upgrade.go index 399cdfc11..d97271963 100644 --- a/beacon-chain/core/altair/upgrade.go +++ b/beacon-chain/core/altair/upgrade.go @@ -16,52 +16,53 @@ import ( // // Spec code: // def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState: -// epoch = phase0.get_current_epoch(pre) -// post = BeaconState( -// # Versioning -// genesis_time=pre.genesis_time, -// genesis_validators_root=pre.genesis_validators_root, -// slot=pre.slot, -// fork=Fork( -// previous_version=pre.fork.current_version, -// current_version=ALTAIR_FORK_VERSION, -// epoch=epoch, -// ), -// # History -// latest_block_header=pre.latest_block_header, -// block_roots=pre.block_roots, -// state_roots=pre.state_roots, -// historical_roots=pre.historical_roots, -// # Eth1 -// eth1_data=pre.eth1_data, -// eth1_data_votes=pre.eth1_data_votes, -// eth1_deposit_index=pre.eth1_deposit_index, -// # Registry -// validators=pre.validators, -// balances=pre.balances, -// # Randomness -// randao_mixes=pre.randao_mixes, -// # Slashings -// slashings=pre.slashings, -// # Participation -// previous_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))], -// current_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))], -// # Finality -// justification_bits=pre.justification_bits, -// previous_justified_checkpoint=pre.previous_justified_checkpoint, -// current_justified_checkpoint=pre.current_justified_checkpoint, -// finalized_checkpoint=pre.finalized_checkpoint, -// # Inactivity -// inactivity_scores=[uint64(0) for _ in range(len(pre.validators))], -// ) -// # Fill in previous epoch participation from the pre state's pending attestations -// translate_participation(post, pre.previous_epoch_attestations) // -// # Fill in sync committees -// # Note: A duplicate committee is assigned for the current and next committee at the fork boundary -// post.current_sync_committee = get_next_sync_committee(post) -// post.next_sync_committee = get_next_sync_committee(post) -// return post +// epoch = phase0.get_current_epoch(pre) +// post = BeaconState( +// # Versioning +// genesis_time=pre.genesis_time, +// genesis_validators_root=pre.genesis_validators_root, +// slot=pre.slot, +// fork=Fork( +// previous_version=pre.fork.current_version, +// current_version=ALTAIR_FORK_VERSION, +// epoch=epoch, +// ), +// # History +// latest_block_header=pre.latest_block_header, +// block_roots=pre.block_roots, +// state_roots=pre.state_roots, +// historical_roots=pre.historical_roots, +// # Eth1 +// eth1_data=pre.eth1_data, +// eth1_data_votes=pre.eth1_data_votes, +// eth1_deposit_index=pre.eth1_deposit_index, +// # Registry +// validators=pre.validators, +// balances=pre.balances, +// # Randomness +// randao_mixes=pre.randao_mixes, +// # Slashings +// slashings=pre.slashings, +// # Participation +// previous_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))], +// current_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))], +// # Finality +// justification_bits=pre.justification_bits, +// previous_justified_checkpoint=pre.previous_justified_checkpoint, +// current_justified_checkpoint=pre.current_justified_checkpoint, +// finalized_checkpoint=pre.finalized_checkpoint, +// # Inactivity +// inactivity_scores=[uint64(0) for _ in range(len(pre.validators))], +// ) +// # Fill in previous epoch participation from the pre state's pending attestations +// translate_participation(post, pre.previous_epoch_attestations) +// +// # Fill in sync committees +// # Note: A duplicate committee is assigned for the current and next committee at the fork boundary +// post.current_sync_committee = get_next_sync_committee(post) +// post.next_sync_committee = get_next_sync_committee(post) +// return post func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.BeaconState, error) { epoch := time.CurrentEpoch(state) @@ -126,17 +127,18 @@ func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.Beacon // // Spec code: // def translate_participation(state: BeaconState, pending_attestations: Sequence[phase0.PendingAttestation]) -> None: -// for attestation in pending_attestations: -// data = attestation.data -// inclusion_delay = attestation.inclusion_delay -// # Translate attestation inclusion info to flag indices -// participation_flag_indices = get_attestation_participation_flag_indices(state, data, inclusion_delay) // -// # Apply flags to all attesting validators -// epoch_participation = state.previous_epoch_participation -// for index in get_attesting_indices(state, data, attestation.aggregation_bits): -// for flag_index in participation_flag_indices: -// epoch_participation[index] = add_flag(epoch_participation[index], flag_index) +// for attestation in pending_attestations: +// data = attestation.data +// inclusion_delay = attestation.inclusion_delay +// # Translate attestation inclusion info to flag indices +// participation_flag_indices = get_attestation_participation_flag_indices(state, data, inclusion_delay) +// +// # Apply flags to all attesting validators +// epoch_participation = state.previous_epoch_participation +// for index in get_attesting_indices(state, data, attestation.aggregation_bits): +// for flag_index in participation_flag_indices: +// epoch_participation[index] = add_flag(epoch_participation[index], flag_index) func TranslateParticipation(ctx context.Context, state state.BeaconState, atts []*ethpb.PendingAttestation) (state.BeaconState, error) { epochParticipation, err := state.PreviousEpochParticipation() if err != nil { diff --git a/beacon-chain/core/blocks/attestation.go b/beacon-chain/core/blocks/attestation.go index f7bae1a31..65a613efa 100644 --- a/beacon-chain/core/blocks/attestation.go +++ b/beacon-chain/core/blocks/attestation.go @@ -185,19 +185,20 @@ func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyB // VerifyIndexedAttestation determines the validity of an indexed attestation. // // Spec pseudocode definition: -// def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool: -// """ -// Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature. -// """ -// # Verify indices are sorted and unique -// indices = indexed_attestation.attesting_indices -// if len(indices) == 0 or not indices == sorted(set(indices)): -// return False -// # Verify aggregate signature -// pubkeys = [state.validators[i].pubkey for i in indices] -// domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch) -// signing_root = compute_signing_root(indexed_attestation.data, domain) -// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature) +// +// def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool: +// """ +// Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature. +// """ +// # Verify indices are sorted and unique +// indices = indexed_attestation.attesting_indices +// if len(indices) == 0 or not indices == sorted(set(indices)): +// return False +// # Verify aggregate signature +// pubkeys = [state.validators[i].pubkey for i in indices] +// domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch) +// signing_root = compute_signing_root(indexed_attestation.data, domain) +// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature) func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBeaconState, indexedAtt *ethpb.IndexedAttestation) error { ctx, span := trace.StartSpan(ctx, "core.VerifyIndexedAttestation") defer span.End() diff --git a/beacon-chain/core/blocks/deposit.go b/beacon-chain/core/blocks/deposit.go index 017662b71..b8842481a 100644 --- a/beacon-chain/core/blocks/deposit.go +++ b/beacon-chain/core/blocks/deposit.go @@ -71,8 +71,9 @@ func ActivateValidatorWithEffectiveBalance(beaconState state.BeaconState, deposi // into the beacon chain. // // Spec pseudocode definition: -// For each deposit in block.body.deposits: -// process_deposit(state, deposit) +// +// For each deposit in block.body.deposits: +// process_deposit(state, deposit) func ProcessDeposits( ctx context.Context, beaconState state.BeaconState, @@ -120,40 +121,41 @@ func BatchVerifyDepositsSignatures(ctx context.Context, deposits []*ethpb.Deposi // // Spec pseudocode definition: // def process_deposit(state: BeaconState, deposit: Deposit) -> None: -// # Verify the Merkle branch -// assert is_valid_merkle_branch( -// leaf=hash_tree_root(deposit.data), -// branch=deposit.proof, -// depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in -// index=state.eth1_deposit_index, -// root=state.eth1_data.deposit_root, -// ) // -// # Deposits must be processed in order -// state.eth1_deposit_index += 1 +// # Verify the Merkle branch +// assert is_valid_merkle_branch( +// leaf=hash_tree_root(deposit.data), +// branch=deposit.proof, +// depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in +// index=state.eth1_deposit_index, +// root=state.eth1_data.deposit_root, +// ) // -// pubkey = deposit.data.pubkey -// amount = deposit.data.amount -// validator_pubkeys = [v.pubkey for v in state.validators] -// if pubkey not in validator_pubkeys: -// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract -// deposit_message = DepositMessage( -// pubkey=deposit.data.pubkey, -// withdrawal_credentials=deposit.data.withdrawal_credentials, -// amount=deposit.data.amount, -// ) -// domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks -// signing_root = compute_signing_root(deposit_message, domain) -// if not bls.Verify(pubkey, signing_root, deposit.data.signature): -// return +// # Deposits must be processed in order +// state.eth1_deposit_index += 1 // -// # Add validator and balance entries -// state.validators.append(get_validator_from_deposit(state, deposit)) -// state.balances.append(amount) -// else: -// # Increase balance by deposit amount -// index = ValidatorIndex(validator_pubkeys.index(pubkey)) -// increase_balance(state, index, amount) +// pubkey = deposit.data.pubkey +// amount = deposit.data.amount +// validator_pubkeys = [v.pubkey for v in state.validators] +// if pubkey not in validator_pubkeys: +// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract +// deposit_message = DepositMessage( +// pubkey=deposit.data.pubkey, +// withdrawal_credentials=deposit.data.withdrawal_credentials, +// amount=deposit.data.amount, +// ) +// domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks +// signing_root = compute_signing_root(deposit_message, domain) +// if not bls.Verify(pubkey, signing_root, deposit.data.signature): +// return +// +// # Add validator and balance entries +// state.validators.append(get_validator_from_deposit(state, deposit)) +// state.balances.append(amount) +// else: +// # Increase balance by deposit amount +// index = ValidatorIndex(validator_pubkeys.index(pubkey)) +// increase_balance(state, index, amount) func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, bool, error) { var newValidator bool if err := verifyDeposit(beaconState, deposit); err != nil { diff --git a/beacon-chain/core/blocks/eth1_data.go b/beacon-chain/core/blocks/eth1_data.go index 527d48a7e..b3bef1439 100644 --- a/beacon-chain/core/blocks/eth1_data.go +++ b/beacon-chain/core/blocks/eth1_data.go @@ -15,10 +15,11 @@ import ( // into the beacon state. // // Official spec definition: -// def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None: -// state.eth1_data_votes.append(body.eth1_data) -// if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: -// state.eth1_data = body.eth1_data +// +// def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None: +// state.eth1_data_votes.append(body.eth1_data) +// if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: +// state.eth1_data = body.eth1_data func ProcessEth1DataInBlock(_ context.Context, beaconState state.BeaconState, eth1Data *ethpb.Eth1Data) (state.BeaconState, error) { if beaconState == nil || beaconState.IsNil() { return nil, errors.New("nil state") diff --git a/beacon-chain/core/blocks/exit.go b/beacon-chain/core/blocks/exit.go index 3473447db..f5a5dd0fc 100644 --- a/beacon-chain/core/blocks/exit.go +++ b/beacon-chain/core/blocks/exit.go @@ -27,23 +27,24 @@ var ValidatorCannotExitYetMsg = "validator has not been active long enough to ex // should exit the state's validator registry. // // Spec pseudocode definition: -// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: -// voluntary_exit = signed_voluntary_exit.message -// validator = state.validators[voluntary_exit.validator_index] -// # Verify the validator is active -// assert is_active_validator(validator, get_current_epoch(state)) -// # Verify exit has not been initiated -// assert validator.exit_epoch == FAR_FUTURE_EPOCH -// # Exits must specify an epoch when they become valid; they are not valid before then -// assert get_current_epoch(state) >= voluntary_exit.epoch -// # Verify the validator has been active long enough -// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD -// # Verify signature -// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) -// signing_root = compute_signing_root(voluntary_exit, domain) -// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) -// # Initiate exit -// initiate_validator_exit(state, voluntary_exit.validator_index) +// +// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: +// voluntary_exit = signed_voluntary_exit.message +// validator = state.validators[voluntary_exit.validator_index] +// # Verify the validator is active +// assert is_active_validator(validator, get_current_epoch(state)) +// # Verify exit has not been initiated +// assert validator.exit_epoch == FAR_FUTURE_EPOCH +// # Exits must specify an epoch when they become valid; they are not valid before then +// assert get_current_epoch(state) >= voluntary_exit.epoch +// # Verify the validator has been active long enough +// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD +// # Verify signature +// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) +// signing_root = compute_signing_root(voluntary_exit, domain) +// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) +// # Initiate exit +// initiate_validator_exit(state, voluntary_exit.validator_index) func ProcessVoluntaryExits( ctx context.Context, beaconState state.BeaconState, @@ -71,23 +72,24 @@ func ProcessVoluntaryExits( // VerifyExitAndSignature implements the spec defined validation for voluntary exits. // // Spec pseudocode definition: -// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: -// voluntary_exit = signed_voluntary_exit.message -// validator = state.validators[voluntary_exit.validator_index] -// # Verify the validator is active -// assert is_active_validator(validator, get_current_epoch(state)) -// # Verify exit has not been initiated -// assert validator.exit_epoch == FAR_FUTURE_EPOCH -// # Exits must specify an epoch when they become valid; they are not valid before then -// assert get_current_epoch(state) >= voluntary_exit.epoch -// # Verify the validator has been active long enough -// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD -// # Verify signature -// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) -// signing_root = compute_signing_root(voluntary_exit, domain) -// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) -// # Initiate exit -// initiate_validator_exit(state, voluntary_exit.validator_index) +// +// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: +// voluntary_exit = signed_voluntary_exit.message +// validator = state.validators[voluntary_exit.validator_index] +// # Verify the validator is active +// assert is_active_validator(validator, get_current_epoch(state)) +// # Verify exit has not been initiated +// assert validator.exit_epoch == FAR_FUTURE_EPOCH +// # Exits must specify an epoch when they become valid; they are not valid before then +// assert get_current_epoch(state) >= voluntary_exit.epoch +// # Verify the validator has been active long enough +// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD +// # Verify signature +// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) +// signing_root = compute_signing_root(voluntary_exit, domain) +// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) +// # Initiate exit +// initiate_validator_exit(state, voluntary_exit.validator_index) func VerifyExitAndSignature( validator state.ReadOnlyValidator, currentSlot types.Slot, @@ -117,23 +119,24 @@ func VerifyExitAndSignature( // verifyExitConditions implements the spec defined validation for voluntary exits(excluding signatures). // // Spec pseudocode definition: -// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: -// voluntary_exit = signed_voluntary_exit.message -// validator = state.validators[voluntary_exit.validator_index] -// # Verify the validator is active -// assert is_active_validator(validator, get_current_epoch(state)) -// # Verify exit has not been initiated -// assert validator.exit_epoch == FAR_FUTURE_EPOCH -// # Exits must specify an epoch when they become valid; they are not valid before then -// assert get_current_epoch(state) >= voluntary_exit.epoch -// # Verify the validator has been active long enough -// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD -// # Verify signature -// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) -// signing_root = compute_signing_root(voluntary_exit, domain) -// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) -// # Initiate exit -// initiate_validator_exit(state, voluntary_exit.validator_index) +// +// def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: +// voluntary_exit = signed_voluntary_exit.message +// validator = state.validators[voluntary_exit.validator_index] +// # Verify the validator is active +// assert is_active_validator(validator, get_current_epoch(state)) +// # Verify exit has not been initiated +// assert validator.exit_epoch == FAR_FUTURE_EPOCH +// # Exits must specify an epoch when they become valid; they are not valid before then +// assert get_current_epoch(state) >= voluntary_exit.epoch +// # Verify the validator has been active long enough +// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD +// # Verify signature +// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) +// signing_root = compute_signing_root(voluntary_exit, domain) +// assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) +// # Initiate exit +// initiate_validator_exit(state, voluntary_exit.validator_index) func verifyExitConditions(validator state.ReadOnlyValidator, currentSlot types.Slot, exit *ethpb.VoluntaryExit) error { currentEpoch := slots.ToEpoch(currentSlot) // Verify the validator is active. diff --git a/beacon-chain/core/blocks/header.go b/beacon-chain/core/blocks/header.go index 9c391cb3a..5c9aee8be 100644 --- a/beacon-chain/core/blocks/header.go +++ b/beacon-chain/core/blocks/header.go @@ -18,27 +18,27 @@ import ( // // Spec pseudocode definition: // -// def process_block_header(state: BeaconState, block: BeaconBlock) -> None: -// # Verify that the slots match -// assert block.slot == state.slot -// # Verify that the block is newer than latest block header -// assert block.slot > state.latest_block_header.slot -// # Verify that proposer index is the correct index -// assert block.proposer_index == get_beacon_proposer_index(state) -// # Verify that the parent matches -// assert block.parent_root == hash_tree_root(state.latest_block_header) -// # Cache current block as the new latest block -// state.latest_block_header = BeaconBlockHeader( -// slot=block.slot, -// proposer_index=block.proposer_index, -// parent_root=block.parent_root, -// state_root=Bytes32(), # Overwritten in the next process_slot call -// body_root=hash_tree_root(block.body), -// ) +// def process_block_header(state: BeaconState, block: BeaconBlock) -> None: +// # Verify that the slots match +// assert block.slot == state.slot +// # Verify that the block is newer than latest block header +// assert block.slot > state.latest_block_header.slot +// # Verify that proposer index is the correct index +// assert block.proposer_index == get_beacon_proposer_index(state) +// # Verify that the parent matches +// assert block.parent_root == hash_tree_root(state.latest_block_header) +// # Cache current block as the new latest block +// state.latest_block_header = BeaconBlockHeader( +// slot=block.slot, +// proposer_index=block.proposer_index, +// parent_root=block.parent_root, +// state_root=Bytes32(), # Overwritten in the next process_slot call +// body_root=hash_tree_root(block.body), +// ) // -// # Verify proposer is not slashed -// proposer = state.validators[block.proposer_index] -// assert not proposer.slashed +// # Verify proposer is not slashed +// proposer = state.validators[block.proposer_index] +// assert not proposer.slashed func ProcessBlockHeader( ctx context.Context, beaconState state.BeaconState, @@ -73,27 +73,28 @@ func ProcessBlockHeader( // using a unsigned block. // // Spec pseudocode definition: -// def process_block_header(state: BeaconState, block: BeaconBlock) -> None: -// # Verify that the slots match -// assert block.slot == state.slot -// # Verify that the block is newer than latest block header -// assert block.slot > state.latest_block_header.slot -// # Verify that proposer index is the correct index -// assert block.proposer_index == get_beacon_proposer_index(state) -// # Verify that the parent matches -// assert block.parent_root == hash_tree_root(state.latest_block_header) -// # Cache current block as the new latest block -// state.latest_block_header = BeaconBlockHeader( -// slot=block.slot, -// proposer_index=block.proposer_index, -// parent_root=block.parent_root, -// state_root=Bytes32(), # Overwritten in the next process_slot call -// body_root=hash_tree_root(block.body), -// ) // -// # Verify proposer is not slashed -// proposer = state.validators[block.proposer_index] -// assert not proposer.slashed +// def process_block_header(state: BeaconState, block: BeaconBlock) -> None: +// # Verify that the slots match +// assert block.slot == state.slot +// # Verify that the block is newer than latest block header +// assert block.slot > state.latest_block_header.slot +// # Verify that proposer index is the correct index +// assert block.proposer_index == get_beacon_proposer_index(state) +// # Verify that the parent matches +// assert block.parent_root == hash_tree_root(state.latest_block_header) +// # Cache current block as the new latest block +// state.latest_block_header = BeaconBlockHeader( +// slot=block.slot, +// proposer_index=block.proposer_index, +// parent_root=block.parent_root, +// state_root=Bytes32(), # Overwritten in the next process_slot call +// body_root=hash_tree_root(block.body), +// ) +// +// # Verify proposer is not slashed +// proposer = state.validators[block.proposer_index] +// assert not proposer.slashed func ProcessBlockHeaderNoVerify( ctx context.Context, beaconState state.BeaconState, diff --git a/beacon-chain/core/blocks/payload.go b/beacon-chain/core/blocks/payload.go index 542cb9690..e2f085ae3 100644 --- a/beacon-chain/core/blocks/payload.go +++ b/beacon-chain/core/blocks/payload.go @@ -25,7 +25,8 @@ var ( // // Spec code: // def is_merge_transition_complete(state: BeaconState) -> bool: -// return state.latest_execution_payload_header != ExecutionPayloadHeader() +// +// return state.latest_execution_payload_header != ExecutionPayloadHeader() func IsMergeTransitionComplete(st state.BeaconState) (bool, error) { if st == nil { return false, errors.New("nil state") @@ -51,7 +52,8 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) { // // Spec code: // def is_execution_block(block: BeaconBlock) -> bool: -// return block.body.execution_payload != ExecutionPayload() +// +// return block.body.execution_payload != ExecutionPayload() func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) { if body == nil { return false, errors.New("nil block body") @@ -76,7 +78,8 @@ func IsExecutionBlock(body interfaces.BeaconBlockBody) (bool, error) { // // Spec code: // def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool: -// return is_merge_block(state, body) or is_merge_complete(state) +// +// return is_merge_block(state, body) or is_merge_complete(state) func IsExecutionEnabled(st state.BeaconState, body interfaces.BeaconBlockBody) (bool, error) { if st == nil || body == nil { return false, errors.New("nil state or block body") @@ -116,9 +119,10 @@ func IsPreBellatrixVersion(v int) bool { // These validation steps ONLY apply to post merge. // // Spec code: -// # Verify consistency of the parent hash with respect to the previous execution payload header -// if is_merge_complete(state): -// assert payload.parent_hash == state.latest_execution_payload_header.block_hash +// +// # Verify consistency of the parent hash with respect to the previous execution payload header +// if is_merge_complete(state): +// assert payload.parent_hash == state.latest_execution_payload_header.block_hash func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces.ExecutionData) error { complete, err := IsMergeTransitionComplete(st) if err != nil { @@ -141,10 +145,11 @@ func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload interfaces. // These validation steps apply to both pre merge and post merge. // // Spec code: -// # Verify random -// assert payload.random == get_randao_mix(state, get_current_epoch(state)) -// # Verify timestamp -// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot) +// +// # Verify random +// assert payload.random == get_randao_mix(state, get_current_epoch(state)) +// # Verify timestamp +// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot) func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) error { random, err := helpers.RandaoMix(st, time.CurrentEpoch(st)) if err != nil { @@ -170,32 +175,33 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err // // Spec code: // def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None: -// # Verify consistency of the parent hash with respect to the previous execution payload header -// if is_merge_complete(state): -// assert payload.parent_hash == state.latest_execution_payload_header.block_hash -// # Verify random -// assert payload.random == get_randao_mix(state, get_current_epoch(state)) -// # Verify timestamp -// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot) -// # Verify the execution payload is valid -// assert execution_engine.execute_payload(payload) -// # Cache execution payload header -// state.latest_execution_payload_header = ExecutionPayloadHeader( -// parent_hash=payload.parent_hash, -// FeeRecipient=payload.FeeRecipient, -// state_root=payload.state_root, -// receipt_root=payload.receipt_root, -// logs_bloom=payload.logs_bloom, -// random=payload.random, -// block_number=payload.block_number, -// gas_limit=payload.gas_limit, -// gas_used=payload.gas_used, -// timestamp=payload.timestamp, -// extra_data=payload.extra_data, -// base_fee_per_gas=payload.base_fee_per_gas, -// block_hash=payload.block_hash, -// transactions_root=hash_tree_root(payload.transactions), -// ) +// +// # Verify consistency of the parent hash with respect to the previous execution payload header +// if is_merge_complete(state): +// assert payload.parent_hash == state.latest_execution_payload_header.block_hash +// # Verify random +// assert payload.random == get_randao_mix(state, get_current_epoch(state)) +// # Verify timestamp +// assert payload.timestamp == compute_timestamp_at_slot(state, state.slot) +// # Verify the execution payload is valid +// assert execution_engine.execute_payload(payload) +// # Cache execution payload header +// state.latest_execution_payload_header = ExecutionPayloadHeader( +// parent_hash=payload.parent_hash, +// FeeRecipient=payload.FeeRecipient, +// state_root=payload.state_root, +// receipt_root=payload.receipt_root, +// logs_bloom=payload.logs_bloom, +// random=payload.random, +// block_number=payload.block_number, +// gas_limit=payload.gas_limit, +// gas_used=payload.gas_used, +// timestamp=payload.timestamp, +// extra_data=payload.extra_data, +// base_fee_per_gas=payload.base_fee_per_gas, +// block_hash=payload.block_hash, +// transactions_root=hash_tree_root(payload.transactions), +// ) func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) { if st.Version() >= version.Capella { withdrawals, err := payload.Withdrawals() diff --git a/beacon-chain/core/blocks/randao.go b/beacon-chain/core/blocks/randao.go index 8323b563a..b8088ed19 100644 --- a/beacon-chain/core/blocks/randao.go +++ b/beacon-chain/core/blocks/randao.go @@ -17,15 +17,16 @@ import ( // in the beacon state's latest randao mixes slice. // // Spec pseudocode definition: -// def process_randao(state: BeaconState, body: BeaconBlockBody) -> None: -// epoch = get_current_epoch(state) -// # Verify RANDAO reveal -// proposer = state.validators[get_beacon_proposer_index(state)] -// signing_root = compute_signing_root(epoch, get_domain(state, DOMAIN_RANDAO)) -// assert bls.Verify(proposer.pubkey, signing_root, body.randao_reveal) -// # Mix in RANDAO reveal -// mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal)) -// state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix +// +// def process_randao(state: BeaconState, body: BeaconBlockBody) -> None: +// epoch = get_current_epoch(state) +// # Verify RANDAO reveal +// proposer = state.validators[get_beacon_proposer_index(state)] +// signing_root = compute_signing_root(epoch, get_domain(state, DOMAIN_RANDAO)) +// assert bls.Verify(proposer.pubkey, signing_root, body.randao_reveal) +// # Mix in RANDAO reveal +// mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal)) +// state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix func ProcessRandao( ctx context.Context, beaconState state.BeaconState, @@ -56,11 +57,12 @@ func ProcessRandao( // in the beacon state's latest randao mixes slice. // // Spec pseudocode definition: -// # Mix it in -// state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = ( -// xor(get_randao_mix(state, get_current_epoch(state)), -// hash(body.randao_reveal)) -// ) +// +// # Mix it in +// state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = ( +// xor(get_randao_mix(state, get_current_epoch(state)), +// hash(body.randao_reveal)) +// ) func ProcessRandaoNoVerify( beaconState state.BeaconState, randaoReveal []byte, diff --git a/beacon-chain/core/blocks/withdrawals.go b/beacon-chain/core/blocks/withdrawals.go index 348770a14..8d3e35c24 100644 --- a/beacon-chain/core/blocks/withdrawals.go +++ b/beacon-chain/core/blocks/withdrawals.go @@ -23,22 +23,22 @@ const executionToBLSPadding = 12 // // Spec pseudocode definition: // -//def process_bls_to_execution_change(state: BeaconState, signed_address_change: SignedBLSToExecutionChange) -> None: -// validator = state.validators[address_change.validator_index] +// def process_bls_to_execution_change(state: BeaconState, signed_address_change: SignedBLSToExecutionChange) -> None: // -// assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX -// assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:] +// validator = state.validators[address_change.validator_index] // -// domain = get_domain(state, DOMAIN_BLS_TO_EXECUTION_CHANGE) -// signing_root = compute_signing_root(address_change, domain) -// assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature) +// assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX +// assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:] // -// validator.withdrawal_credentials = ( -// ETH1_ADDRESS_WITHDRAWAL_PREFIX -// + b'\x00' * 11 -// + address_change.to_execution_address -// ) +// domain = get_domain(state, DOMAIN_BLS_TO_EXECUTION_CHANGE) +// signing_root = compute_signing_root(address_change, domain) +// assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature) // +// validator.withdrawal_credentials = ( +// ETH1_ADDRESS_WITHDRAWAL_PREFIX +// + b'\x00' * 11 +// + address_change.to_execution_address +// ) func ProcessBLSToExecutionChange(st state.BeaconState, signed *ethpb.SignedBLSToExecutionChange) (state.BeaconState, error) { if signed == nil { return st, errNilSignedWithdrawalMessage diff --git a/beacon-chain/core/epoch/epoch_processing.go b/beacon-chain/core/epoch/epoch_processing.go index 828e2795d..7ebae510e 100644 --- a/beacon-chain/core/epoch/epoch_processing.go +++ b/beacon-chain/core/epoch/epoch_processing.go @@ -49,12 +49,13 @@ func (s sortableIndices) Less(i, j int) bool { // need to get attesting balance from attestations. // // Spec pseudocode definition: -// def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei: -// """ -// Return the combined effective balance of the set of unslashed validators participating in ``attestations``. -// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. -// """ -// return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) +// +// def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei: +// """ +// Return the combined effective balance of the set of unslashed validators participating in ``attestations``. +// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. +// """ +// return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) func AttestingBalance(ctx context.Context, state state.ReadOnlyBeaconState, atts []*ethpb.PendingAttestation) (uint64, error) { indices, err := UnslashedAttestingIndices(ctx, state, atts) if err != nil { @@ -67,25 +68,26 @@ func AttestingBalance(ctx context.Context, state state.ReadOnlyBeaconState, atts // the amount to rotate is determined churn limit. // // Spec pseudocode definition: -// def process_registry_updates(state: BeaconState) -> None: -// # Process activation eligibility and ejections -// for index, validator in enumerate(state.validators): -// if is_eligible_for_activation_queue(validator): -// validator.activation_eligibility_epoch = get_current_epoch(state) + 1 // -// if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE: -// initiate_validator_exit(state, ValidatorIndex(index)) +// def process_registry_updates(state: BeaconState) -> None: +// # Process activation eligibility and ejections +// for index, validator in enumerate(state.validators): +// if is_eligible_for_activation_queue(validator): +// validator.activation_eligibility_epoch = get_current_epoch(state) + 1 // -// # Queue validators eligible for activation and not yet dequeued for activation -// activation_queue = sorted([ -// index for index, validator in enumerate(state.validators) -// if is_eligible_for_activation(state, validator) -// # Order by the sequence of activation_eligibility_epoch setting and then index -// ], key=lambda index: (state.validators[index].activation_eligibility_epoch, index)) -// # Dequeued validators for activation up to churn limit -// for index in activation_queue[:get_validator_churn_limit(state)]: -// validator = state.validators[index] -// validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) +// if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE: +// initiate_validator_exit(state, ValidatorIndex(index)) +// +// # Queue validators eligible for activation and not yet dequeued for activation +// activation_queue = sorted([ +// index for index, validator in enumerate(state.validators) +// if is_eligible_for_activation(state, validator) +// # Order by the sequence of activation_eligibility_epoch setting and then index +// ], key=lambda index: (state.validators[index].activation_eligibility_epoch, index)) +// # Dequeued validators for activation up to churn limit +// for index in activation_queue[:get_validator_churn_limit(state)]: +// validator = state.validators[index] +// validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state.BeaconState, error) { currentEpoch := time.CurrentEpoch(state) vals := state.Validators() @@ -155,16 +157,16 @@ func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state // ProcessSlashings processes the slashed validators during epoch processing, // -// def process_slashings(state: BeaconState) -> None: -// epoch = get_current_epoch(state) -// total_balance = get_total_active_balance(state) -// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance) -// for index, validator in enumerate(state.validators): -// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch: -// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow -// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance -// penalty = penalty_numerator // total_balance * increment -// decrease_balance(state, ValidatorIndex(index), penalty) +// def process_slashings(state: BeaconState) -> None: +// epoch = get_current_epoch(state) +// total_balance = get_total_active_balance(state) +// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance) +// for index, validator in enumerate(state.validators): +// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch: +// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow +// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance +// penalty = penalty_numerator // total_balance * increment +// decrease_balance(state, ValidatorIndex(index), penalty) func ProcessSlashings(state state.BeaconState, slashingMultiplier uint64) (state.BeaconState, error) { currentEpoch := time.CurrentEpoch(state) totalBalance, err := helpers.TotalActiveBalance(state) @@ -207,11 +209,12 @@ func ProcessSlashings(state state.BeaconState, slashingMultiplier uint64) (state // ProcessEth1DataReset processes updates to ETH1 data votes during epoch processing. // // Spec pseudocode definition: -// def process_eth1_data_reset(state: BeaconState) -> None: -// next_epoch = Epoch(get_current_epoch(state) + 1) -// # Reset eth1 data votes -// if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: -// state.eth1_data_votes = [] +// +// def process_eth1_data_reset(state: BeaconState) -> None: +// next_epoch = Epoch(get_current_epoch(state) + 1) +// # Reset eth1 data votes +// if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: +// state.eth1_data_votes = [] func ProcessEth1DataReset(state state.BeaconState) (state.BeaconState, error) { currentEpoch := time.CurrentEpoch(state) nextEpoch := currentEpoch + 1 @@ -229,18 +232,19 @@ func ProcessEth1DataReset(state state.BeaconState) (state.BeaconState, error) { // ProcessEffectiveBalanceUpdates processes effective balance updates during epoch processing. // // Spec pseudocode definition: -// def process_effective_balance_updates(state: BeaconState) -> None: -// # Update effective balances with hysteresis -// for index, validator in enumerate(state.validators): -// balance = state.balances[index] -// HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT) -// DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER -// UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER -// if ( -// balance + DOWNWARD_THRESHOLD < validator.effective_balance -// or validator.effective_balance + UPWARD_THRESHOLD < balance -// ): -// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) +// +// def process_effective_balance_updates(state: BeaconState) -> None: +// # Update effective balances with hysteresis +// for index, validator in enumerate(state.validators): +// balance = state.balances[index] +// HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT) +// DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER +// UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER +// if ( +// balance + DOWNWARD_THRESHOLD < validator.effective_balance +// or validator.effective_balance + UPWARD_THRESHOLD < balance +// ): +// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) func ProcessEffectiveBalanceUpdates(state state.BeaconState) (state.BeaconState, error) { effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement maxEffBalance := params.BeaconConfig().MaxEffectiveBalance @@ -285,10 +289,11 @@ func ProcessEffectiveBalanceUpdates(state state.BeaconState) (state.BeaconState, // ProcessSlashingsReset processes the total slashing balances updates during epoch processing. // // Spec pseudocode definition: -// def process_slashings_reset(state: BeaconState) -> None: -// next_epoch = Epoch(get_current_epoch(state) + 1) -// # Reset slashings -// state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0) +// +// def process_slashings_reset(state: BeaconState) -> None: +// next_epoch = Epoch(get_current_epoch(state) + 1) +// # Reset slashings +// state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0) func ProcessSlashingsReset(state state.BeaconState) (state.BeaconState, error) { currentEpoch := time.CurrentEpoch(state) nextEpoch := currentEpoch + 1 @@ -314,11 +319,12 @@ func ProcessSlashingsReset(state state.BeaconState) (state.BeaconState, error) { // ProcessRandaoMixesReset processes the final updates to RANDAO mix during epoch processing. // // Spec pseudocode definition: -// def process_randao_mixes_reset(state: BeaconState) -> None: -// current_epoch = get_current_epoch(state) -// next_epoch = Epoch(current_epoch + 1) -// # Set randao mix -// state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch) +// +// def process_randao_mixes_reset(state: BeaconState) -> None: +// current_epoch = get_current_epoch(state) +// next_epoch = Epoch(current_epoch + 1) +// # Set randao mix +// state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch) func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error) { currentEpoch := time.CurrentEpoch(state) nextEpoch := currentEpoch + 1 @@ -346,12 +352,13 @@ func ProcessRandaoMixesReset(state state.BeaconState) (state.BeaconState, error) // ProcessHistoricalRootsUpdate processes the updates to historical root accumulator during epoch processing. // // Spec pseudocode definition: -// def process_historical_roots_update(state: BeaconState) -> None: -// # Set historical root accumulator -// next_epoch = Epoch(get_current_epoch(state) + 1) -// if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: -// historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots) -// state.historical_roots.append(hash_tree_root(historical_batch)) +// +// def process_historical_roots_update(state: BeaconState) -> None: +// # Set historical root accumulator +// next_epoch = Epoch(get_current_epoch(state) + 1) +// if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: +// historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots) +// state.historical_roots.append(hash_tree_root(historical_batch)) func ProcessHistoricalRootsUpdate(state state.BeaconState) (state.BeaconState, error) { currentEpoch := time.CurrentEpoch(state) nextEpoch := currentEpoch + 1 @@ -378,10 +385,11 @@ func ProcessHistoricalRootsUpdate(state state.BeaconState) (state.BeaconState, e // ProcessParticipationRecordUpdates rotates current/previous epoch attestations during epoch processing. // // Spec pseudocode definition: -// def process_participation_record_updates(state: BeaconState) -> None: -// # Rotate current/previous epoch attestations -// state.previous_epoch_attestations = state.current_epoch_attestations -// state.current_epoch_attestations = [] +// +// def process_participation_record_updates(state: BeaconState) -> None: +// # Rotate current/previous epoch attestations +// state.previous_epoch_attestations = state.current_epoch_attestations +// state.current_epoch_attestations = [] func ProcessParticipationRecordUpdates(state state.BeaconState) (state.BeaconState, error) { if err := state.RotateAttestations(); err != nil { return nil, err @@ -436,12 +444,13 @@ func ProcessFinalUpdates(state state.BeaconState) (state.BeaconState, error) { // it sorts the indices and filters out the slashed ones. // // Spec pseudocode definition: -// def get_unslashed_attesting_indices(state: BeaconState, -// attestations: Sequence[PendingAttestation]) -> Set[ValidatorIndex]: -// output = set() # type: Set[ValidatorIndex] -// for a in attestations: -// output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits)) -// return set(filter(lambda index: not state.validators[index].slashed, output)) +// +// def get_unslashed_attesting_indices(state: BeaconState, +// attestations: Sequence[PendingAttestation]) -> Set[ValidatorIndex]: +// output = set() # type: Set[ValidatorIndex] +// for a in attestations: +// output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits)) +// return set(filter(lambda index: not state.validators[index].slashed, output)) func UnslashedAttestingIndices(ctx context.Context, state state.ReadOnlyBeaconState, atts []*ethpb.PendingAttestation) ([]types.ValidatorIndex, error) { var setIndices []types.ValidatorIndex seen := make(map[uint64]bool) diff --git a/beacon-chain/core/epoch/precompute/justification_finalization.go b/beacon-chain/core/epoch/precompute/justification_finalization.go index 5831d31f0..33e702096 100644 --- a/beacon-chain/core/epoch/precompute/justification_finalization.go +++ b/beacon-chain/core/epoch/precompute/justification_finalization.go @@ -42,17 +42,18 @@ func UnrealizedCheckpoints(st state.BeaconState) (uint64, *ethpb.Checkpoint, *et // Note: this is an optimized version by passing in precomputed total and attesting balances. // // Spec pseudocode definition: -// def process_justification_and_finalization(state: BeaconState) -> None: -// # Initial FFG checkpoint values have a `0x00` stub for `root`. -// # Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub. -// if get_current_epoch(state) <= GENESIS_EPOCH + 1: -// return -// previous_attestations = get_matching_target_attestations(state, get_previous_epoch(state)) -// current_attestations = get_matching_target_attestations(state, get_current_epoch(state)) -// total_active_balance = get_total_active_balance(state) -// previous_target_balance = get_attesting_balance(state, previous_attestations) -// current_target_balance = get_attesting_balance(state, current_attestations) -// weigh_justification_and_finalization(state, total_active_balance, previous_target_balance, current_target_balance) +// +// def process_justification_and_finalization(state: BeaconState) -> None: +// # Initial FFG checkpoint values have a `0x00` stub for `root`. +// # Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub. +// if get_current_epoch(state) <= GENESIS_EPOCH + 1: +// return +// previous_attestations = get_matching_target_attestations(state, get_previous_epoch(state)) +// current_attestations = get_matching_target_attestations(state, get_current_epoch(state)) +// total_active_balance = get_total_active_balance(state) +// previous_target_balance = get_attesting_balance(state, previous_attestations) +// current_target_balance = get_attesting_balance(state, current_attestations) +// weigh_justification_and_finalization(state, total_active_balance, previous_target_balance, current_target_balance) func ProcessJustificationAndFinalizationPreCompute(state state.BeaconState, pBal *Balance) (state.BeaconState, error) { canProcessSlot, err := slots.EpochStart(2 /*epoch*/) if err != nil { @@ -113,41 +114,42 @@ func weighJustificationAndFinalization(state state.BeaconState, newBits bitfield // checkpoints at epoch transition // Spec pseudocode definition: // def weigh_justification_and_finalization(state: BeaconState, -// total_active_balance: Gwei, -// previous_epoch_target_balance: Gwei, -// current_epoch_target_balance: Gwei) -> None: -// previous_epoch = get_previous_epoch(state) -// current_epoch = get_current_epoch(state) -// old_previous_justified_checkpoint = state.previous_justified_checkpoint -// old_current_justified_checkpoint = state.current_justified_checkpoint // -// # Process justifications -// state.previous_justified_checkpoint = state.current_justified_checkpoint -// state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1] -// state.justification_bits[0] = 0b0 -// if previous_epoch_target_balance * 3 >= total_active_balance * 2: -// state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch, -// root=get_block_root(state, previous_epoch)) -// state.justification_bits[1] = 0b1 -// if current_epoch_target_balance * 3 >= total_active_balance * 2: -// state.current_justified_checkpoint = Checkpoint(epoch=current_epoch, -// root=get_block_root(state, current_epoch)) -// state.justification_bits[0] = 0b1 +// total_active_balance: Gwei, +// previous_epoch_target_balance: Gwei, +// current_epoch_target_balance: Gwei) -> None: +// previous_epoch = get_previous_epoch(state) +// current_epoch = get_current_epoch(state) +// old_previous_justified_checkpoint = state.previous_justified_checkpoint +// old_current_justified_checkpoint = state.current_justified_checkpoint // -// # Process finalizations -// bits = state.justification_bits -// # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source -// if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch: -// state.finalized_checkpoint = old_previous_justified_checkpoint -// # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source -// if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch: -// state.finalized_checkpoint = old_previous_justified_checkpoint -// # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source -// if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch: -// state.finalized_checkpoint = old_current_justified_checkpoint -// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source -// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch: -// state.finalized_checkpoint = old_current_justified_checkpoint +// # Process justifications +// state.previous_justified_checkpoint = state.current_justified_checkpoint +// state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1] +// state.justification_bits[0] = 0b0 +// if previous_epoch_target_balance * 3 >= total_active_balance * 2: +// state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch, +// root=get_block_root(state, previous_epoch)) +// state.justification_bits[1] = 0b1 +// if current_epoch_target_balance * 3 >= total_active_balance * 2: +// state.current_justified_checkpoint = Checkpoint(epoch=current_epoch, +// root=get_block_root(state, current_epoch)) +// state.justification_bits[0] = 0b1 +// +// # Process finalizations +// bits = state.justification_bits +// # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source +// if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch: +// state.finalized_checkpoint = old_previous_justified_checkpoint +// # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source +// if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch: +// state.finalized_checkpoint = old_previous_justified_checkpoint +// # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source +// if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch: +// state.finalized_checkpoint = old_current_justified_checkpoint +// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source +// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch: +// state.finalized_checkpoint = old_current_justified_checkpoint func computeCheckpoints(state state.BeaconState, newBits bitfield.Bitvector4) (*ethpb.Checkpoint, *ethpb.Checkpoint, error) { prevEpoch := time.PrevEpoch(state) currentEpoch := time.CurrentEpoch(state) diff --git a/beacon-chain/core/epoch/precompute/reward_penalty_test.go b/beacon-chain/core/epoch/precompute/reward_penalty_test.go index 6f7fe6e4b..e748d3d45 100644 --- a/beacon-chain/core/epoch/precompute/reward_penalty_test.go +++ b/beacon-chain/core/epoch/precompute/reward_penalty_test.go @@ -358,10 +358,11 @@ func TestProposerDeltaPrecompute_SlashedCase(t *testing.T) { // individual validator's base reward quotient. // // Spec pseudocode definition: -// def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: -// total_balance = get_total_active_balance(state) -// effective_balance = state.validators[index].effective_balance -// return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH) +// +// def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: +// total_balance = get_total_active_balance(state) +// effective_balance = state.validators[index].effective_balance +// return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH) func baseReward(state state.ReadOnlyBeaconState, index types.ValidatorIndex) (uint64, error) { totalBalance, err := helpers.TotalActiveBalance(state) if err != nil { diff --git a/beacon-chain/core/helpers/beacon_committee.go b/beacon-chain/core/helpers/beacon_committee.go index 6059d224d..da87b3af3 100644 --- a/beacon-chain/core/helpers/beacon_committee.go +++ b/beacon-chain/core/helpers/beacon_committee.go @@ -35,14 +35,15 @@ var ( // count. // // Spec pseudocode definition: -// def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64: -// """ -// Return the number of committees in each slot for the given ``epoch``. -// """ -// return max(uint64(1), min( -// MAX_COMMITTEES_PER_SLOT, -// uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, -// )) +// +// def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64: +// """ +// Return the number of committees in each slot for the given ``epoch``. +// """ +// return max(uint64(1), min( +// MAX_COMMITTEES_PER_SLOT, +// uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, +// )) func SlotCommitteeCount(activeValidatorCount uint64) uint64 { var committeesPerSlot = activeValidatorCount / uint64(params.BeaconConfig().SlotsPerEpoch) / params.BeaconConfig().TargetCommitteeSize @@ -61,18 +62,19 @@ func SlotCommitteeCount(activeValidatorCount uint64) uint64 { // becomes expensive, consider using BeaconCommittee below. // // Spec pseudocode definition: -// def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]: -// """ -// Return the beacon committee at ``slot`` for ``index``. -// """ -// epoch = compute_epoch_at_slot(slot) -// committees_per_slot = get_committee_count_per_slot(state, epoch) -// return compute_committee( -// indices=get_active_validator_indices(state, epoch), -// seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), -// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index, -// count=committees_per_slot * SLOTS_PER_EPOCH, -// ) +// +// def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]: +// """ +// Return the beacon committee at ``slot`` for ``index``. +// """ +// epoch = compute_epoch_at_slot(slot) +// committees_per_slot = get_committee_count_per_slot(state, epoch) +// return compute_committee( +// indices=get_active_validator_indices(state, epoch), +// seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), +// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index, +// count=committees_per_slot * SLOTS_PER_EPOCH, +// ) func BeaconCommitteeFromState(ctx context.Context, state state.ReadOnlyBeaconState, slot types.Slot, committeeIndex types.CommitteeIndex) ([]types.ValidatorIndex, error) { epoch := slots.ToEpoch(slot) seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester) @@ -101,18 +103,19 @@ func BeaconCommitteeFromState(ctx context.Context, state state.ReadOnlyBeaconSta // from the spec definition. Having them as an argument allows for cheaper computation run time. // // Spec pseudocode definition: -// def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]: -// """ -// Return the beacon committee at ``slot`` for ``index``. -// """ -// epoch = compute_epoch_at_slot(slot) -// committees_per_slot = get_committee_count_per_slot(state, epoch) -// return compute_committee( -// indices=get_active_validator_indices(state, epoch), -// seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), -// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index, -// count=committees_per_slot * SLOTS_PER_EPOCH, -// ) +// +// def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]: +// """ +// Return the beacon committee at ``slot`` for ``index``. +// """ +// epoch = compute_epoch_at_slot(slot) +// committees_per_slot = get_committee_count_per_slot(state, epoch) +// return compute_committee( +// indices=get_active_validator_indices(state, epoch), +// seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), +// index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index, +// count=committees_per_slot * SLOTS_PER_EPOCH, +// ) func BeaconCommittee( ctx context.Context, validatorIndices []types.ValidatorIndex, @@ -381,16 +384,17 @@ func ClearCache() { // validator indices and seed. // // Spec pseudocode definition: -// def compute_committee(indices: Sequence[ValidatorIndex], -// seed: Bytes32, -// index: uint64, -// count: uint64) -> Sequence[ValidatorIndex]: -// """ -// Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``. -// """ -// start = (len(indices) * index) // count -// end = (len(indices) * uint64(index + 1)) // count -// return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)] +// +// def compute_committee(indices: Sequence[ValidatorIndex], +// seed: Bytes32, +// index: uint64, +// count: uint64) -> Sequence[ValidatorIndex]: +// """ +// Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``. +// """ +// start = (len(indices) * index) // count +// end = (len(indices) * uint64(index + 1)) // count +// return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)] func computeCommittee( indices []types.ValidatorIndex, seed [32]byte, diff --git a/beacon-chain/core/helpers/block.go b/beacon-chain/core/helpers/block.go index 00c963f6e..3c2531a87 100644 --- a/beacon-chain/core/helpers/block.go +++ b/beacon-chain/core/helpers/block.go @@ -14,12 +14,13 @@ import ( // It returns an error if the requested block root is not within the slot range. // // Spec pseudocode definition: -// def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root: -// """ -// Return the block root at a recent ``slot``. -// """ -// assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT -// return state.block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] +// +// def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root: +// """ +// Return the block root at a recent ``slot``. +// """ +// assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT +// return state.block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] func BlockRootAtSlot(state state.ReadOnlyBeaconState, slot types.Slot) ([]byte, error) { if math.MaxUint64-slot < params.BeaconConfig().SlotsPerHistoricalRoot { return []byte{}, errors.New("slot overflows uint64") @@ -42,11 +43,12 @@ func StateRootAtSlot(state state.ReadOnlyBeaconState, slot types.Slot) ([]byte, // BlockRoot returns the block root stored in the BeaconState for epoch start slot. // // Spec pseudocode definition: -// def get_block_root(state: BeaconState, epoch: Epoch) -> Root: -// """ -// Return the block root at the start of a recent ``epoch``. -// """ -// return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch)) +// +// def get_block_root(state: BeaconState, epoch: Epoch) -> Root: +// """ +// Return the block root at the start of a recent ``epoch``. +// """ +// return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch)) func BlockRoot(state state.ReadOnlyBeaconState, epoch types.Epoch) ([]byte, error) { s, err := slots.EpochStart(epoch) if err != nil { diff --git a/beacon-chain/core/helpers/randao.go b/beacon-chain/core/helpers/randao.go index 7b779c6ae..6796cee99 100644 --- a/beacon-chain/core/helpers/randao.go +++ b/beacon-chain/core/helpers/randao.go @@ -12,12 +12,13 @@ import ( // Seed returns the randao seed used for shuffling of a given epoch. // // Spec pseudocode definition: -// def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32: -// """ -// Return the seed at ``epoch``. -// """ -// mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow -// return hash(domain_type + uint_to_bytes(epoch) + mix) +// +// def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32: +// """ +// Return the seed at ``epoch``. +// """ +// mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow +// return hash(domain_type + uint_to_bytes(epoch) + mix) func Seed(state state.ReadOnlyBeaconState, epoch types.Epoch, domain [bls.DomainByteLength]byte) ([32]byte, error) { // See https://github.com/ethereum/consensus-specs/pull/1296 for // rationale on why offset has to look down by 1. @@ -40,11 +41,12 @@ func Seed(state state.ReadOnlyBeaconState, epoch types.Epoch, domain [bls.Domain // of a given slot. It is used to shuffle validators. // // Spec pseudocode definition: -// def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32: -// """ -// Return the randao mix at a recent ``epoch``. -// """ -// return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] +// +// def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32: +// """ +// Return the randao mix at a recent ``epoch``. +// """ +// return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] func RandaoMix(state state.ReadOnlyBeaconState, epoch types.Epoch) ([]byte, error) { return state.RandaoMixAtIndex(uint64(epoch % params.BeaconConfig().EpochsPerHistoricalVector)) } diff --git a/beacon-chain/core/helpers/rewards_penalties.go b/beacon-chain/core/helpers/rewards_penalties.go index 187d7ebaa..a12a7471b 100644 --- a/beacon-chain/core/helpers/rewards_penalties.go +++ b/beacon-chain/core/helpers/rewards_penalties.go @@ -17,13 +17,14 @@ var balanceCache = cache.NewEffectiveBalanceCache() // of input validators. // // Spec pseudocode definition: -// def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei: -// """ -// Return the combined effective balance of the ``indices``. -// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. -// Math safe up to ~10B ETH, afterwhich this overflows uint64. -// """ -// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices]))) +// +// def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei: +// """ +// Return the combined effective balance of the ``indices``. +// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. +// Math safe up to ~10B ETH, afterwhich this overflows uint64. +// """ +// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices]))) func TotalBalance(state state.ReadOnlyValidators, indices []types.ValidatorIndex) uint64 { total := uint64(0) @@ -47,12 +48,13 @@ func TotalBalance(state state.ReadOnlyValidators, indices []types.ValidatorIndex // of active validators. // // Spec pseudocode definition: -// def get_total_active_balance(state: BeaconState) -> Gwei: -// """ -// Return the combined effective balance of the active validators. -// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. -// """ -// return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state)))) +// +// def get_total_active_balance(state: BeaconState) -> Gwei: +// """ +// Return the combined effective balance of the active validators. +// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. +// """ +// return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state)))) func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) { bal, err := balanceCache.Get(s) switch { @@ -88,11 +90,12 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) { // IncreaseBalance increases validator with the given 'index' balance by 'delta' in Gwei. // // Spec pseudocode definition: -// def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: -// """ -// Increase the validator balance at index ``index`` by ``delta``. -// """ -// state.balances[index] += delta +// +// def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: +// """ +// Increase the validator balance at index ``index`` by ``delta``. +// """ +// state.balances[index] += delta func IncreaseBalance(state state.BeaconState, idx types.ValidatorIndex, delta uint64) error { balAtIdx, err := state.BalanceAtIndex(idx) if err != nil { @@ -110,11 +113,12 @@ func IncreaseBalance(state state.BeaconState, idx types.ValidatorIndex, delta ui // the post balance. // // Spec pseudocode definition: -// def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: -// """ -// Increase the validator balance at index ``index`` by ``delta``. -// """ -// state.balances[index] += delta +// +// def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: +// """ +// Increase the validator balance at index ``index`` by ``delta``. +// """ +// state.balances[index] += delta func IncreaseBalanceWithVal(currBalance, delta uint64) (uint64, error) { return mathutil.Add64(currBalance, delta) } @@ -122,11 +126,12 @@ func IncreaseBalanceWithVal(currBalance, delta uint64) (uint64, error) { // DecreaseBalance decreases validator with the given 'index' balance by 'delta' in Gwei. // // Spec pseudocode definition: -// def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: -// """ -// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection. -// """ -// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta +// +// def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: +// """ +// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection. +// """ +// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta func DecreaseBalance(state state.BeaconState, idx types.ValidatorIndex, delta uint64) error { balAtIdx, err := state.BalanceAtIndex(idx) if err != nil { @@ -140,11 +145,12 @@ func DecreaseBalance(state state.BeaconState, idx types.ValidatorIndex, delta ui // the post balance. // // Spec pseudocode definition: -// def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: -// """ -// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection. -// """ -// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta +// +// def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: +// """ +// Decrease the validator balance at index ``index`` by ``delta``, with underflow protection. +// """ +// state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta func DecreaseBalanceWithVal(currBalance, delta uint64) uint64 { if delta > currBalance { return 0 @@ -156,7 +162,8 @@ func DecreaseBalanceWithVal(currBalance, delta uint64) uint64 { // // Spec code: // def is_in_inactivity_leak(state: BeaconState) -> bool: -// return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY +// +// return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY func IsInInactivityLeak(prevEpoch, finalizedEpoch types.Epoch) bool { return FinalityDelay(prevEpoch, finalizedEpoch) > params.BeaconConfig().MinEpochsToInactivityPenalty } @@ -165,7 +172,8 @@ func IsInInactivityLeak(prevEpoch, finalizedEpoch types.Epoch) bool { // // Spec code: // def get_finality_delay(state: BeaconState) -> uint64: -// return get_previous_epoch(state) - state.finalized_checkpoint.epoch +// +// return get_previous_epoch(state) - state.finalized_checkpoint.epoch func FinalityDelay(prevEpoch, finalizedEpoch types.Epoch) types.Epoch { return prevEpoch - finalizedEpoch } diff --git a/beacon-chain/core/helpers/shuffle.go b/beacon-chain/core/helpers/shuffle.go index 05fc39339..8bdfa3da3 100644 --- a/beacon-chain/core/helpers/shuffle.go +++ b/beacon-chain/core/helpers/shuffle.go @@ -31,7 +31,7 @@ func SplitIndices(l []uint64, n uint64) [][]uint64 { return divided } -// ShuffledIndex returns `p(index)` in a pseudorandom permutation `p` of `0...list_size - 1` with ``seed`` as entropy. +// ShuffledIndex returns `p(index)` in a pseudorandom permutation `p` of `0...list_size - 1` with “seed“ as entropy. // We utilize 'swap or not' shuffling in this implementation; we are allocating the memory with the seed that stays // constant between iterations instead of reallocating it each iteration as in the spec. This implementation is based // on the original implementation from protolambda, https://github.com/protolambda/eth2-shuffle @@ -47,28 +47,29 @@ func UnShuffledIndex(index types.ValidatorIndex, indexCount uint64, seed [32]byt // ComputeShuffledIndex returns the shuffled validator index corresponding to seed and index count. // Spec pseudocode definition: -// def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64: -// """ -// Return the shuffled index corresponding to ``seed`` (and ``index_count``). -// """ -// assert index < index_count // -// # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf) -// # See the 'generalized domain' algorithm on page 3 -// for current_round in range(SHUFFLE_ROUND_COUNT): -// pivot = bytes_to_uint64(hash(seed + uint_to_bytes(uint8(current_round)))[0:8]) % index_count -// flip = (pivot + index_count - index) % index_count -// position = max(index, flip) -// source = hash( -// seed -// + uint_to_bytes(uint8(current_round)) -// + uint_to_bytes(uint32(position // 256)) -// ) -// byte = uint8(source[(position % 256) // 8]) -// bit = (byte >> (position % 8)) % 2 -// index = flip if bit else index +// def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64: +// """ +// Return the shuffled index corresponding to ``seed`` (and ``index_count``). +// """ +// assert index < index_count // -// return index +// # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf) +// # See the 'generalized domain' algorithm on page 3 +// for current_round in range(SHUFFLE_ROUND_COUNT): +// pivot = bytes_to_uint64(hash(seed + uint_to_bytes(uint8(current_round)))[0:8]) % index_count +// flip = (pivot + index_count - index) % index_count +// position = max(index, flip) +// source = hash( +// seed +// + uint_to_bytes(uint8(current_round)) +// + uint_to_bytes(uint32(position // 256)) +// ) +// byte = uint8(source[(position % 256) // 8]) +// bit = (byte >> (position % 8)) % 2 +// index = flip if bit else index +// +// return index func ComputeShuffledIndex(index types.ValidatorIndex, indexCount uint64, seed [32]byte, shuffle bool) (types.ValidatorIndex, error) { if params.BeaconConfig().ShuffleRoundCount == 0 { return index, nil @@ -135,20 +136,21 @@ func ComputeShuffledIndex(index types.ValidatorIndex, indexCount uint64, seed [3 return index, nil } -// ShuffleList returns list of shuffled indexes in a pseudorandom permutation `p` of `0...list_size - 1` with ``seed`` as entropy. +// ShuffleList returns list of shuffled indexes in a pseudorandom permutation `p` of `0...list_size - 1` with “seed“ as entropy. // We utilize 'swap or not' shuffling in this implementation; we are allocating the memory with the seed that stays // constant between iterations instead of reallocating it each iteration as in the spec. This implementation is based // on the original implementation from protolambda, https://github.com/protolambda/eth2-shuffle -// improvements: -// - seed is always the first 32 bytes of the hash input, we just copy it into the buffer one time. -// - add round byte to seed and hash that part of the buffer. -// - split up the for-loop in two: -// 1. Handle the part from 0 (incl) to pivot (incl). This is mirrored around (pivot / 2). -// 2. Handle the part from pivot (excl) to N (excl). This is mirrored around ((pivot / 2) + (size/2)). -// - hash source every 256 iterations. -// - change byteV every 8 iterations. -// - we start at the edges, and work back to the mirror point. -// this makes us process each pear exactly once (instead of unnecessarily twice, like in the spec). +// +// improvements: +// - seed is always the first 32 bytes of the hash input, we just copy it into the buffer one time. +// - add round byte to seed and hash that part of the buffer. +// - split up the for-loop in two: +// 1. Handle the part from 0 (incl) to pivot (incl). This is mirrored around (pivot / 2). +// 2. Handle the part from pivot (excl) to N (excl). This is mirrored around ((pivot / 2) + (size/2)). +// - hash source every 256 iterations. +// - change byteV every 8 iterations. +// - we start at the edges, and work back to the mirror point. +// this makes us process each pear exactly once (instead of unnecessarily twice, like in the spec). func ShuffleList(input []types.ValidatorIndex, seed [32]byte) ([]types.ValidatorIndex, error) { return innerShuffleList(input, seed, true /* shuffle */) } diff --git a/beacon-chain/core/helpers/validators.go b/beacon-chain/core/helpers/validators.go index 0c10cba05..d3e021fe9 100644 --- a/beacon-chain/core/helpers/validators.go +++ b/beacon-chain/core/helpers/validators.go @@ -28,11 +28,12 @@ var CommitteeCacheInProgressHit = promauto.NewCounter(prometheus.CounterOpts{ // is active or not. // // Spec pseudocode definition: -// def is_active_validator(validator: Validator, epoch: Epoch) -> bool: -// """ -// Check if ``validator`` is active. -// """ -// return validator.activation_epoch <= epoch < validator.exit_epoch +// +// def is_active_validator(validator: Validator, epoch: Epoch) -> bool: +// """ +// Check if ``validator`` is active. +// """ +// return validator.activation_epoch <= epoch < validator.exit_epoch func IsActiveValidator(validator *ethpb.Validator, epoch types.Epoch) bool { return checkValidatorActiveStatus(validator.ActivationEpoch, validator.ExitEpoch, epoch) } @@ -50,11 +51,12 @@ func checkValidatorActiveStatus(activationEpoch, exitEpoch, epoch types.Epoch) b // is slashable or not. // // Spec pseudocode definition: -// def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: -// """ -// Check if ``validator`` is slashable. -// """ -// return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch) +// +// def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: +// """ +// Check if ``validator`` is slashable. +// """ +// return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch) func IsSlashableValidator(activationEpoch, withdrawableEpoch types.Epoch, slashed bool, epoch types.Epoch) bool { return checkValidatorSlashable(activationEpoch, withdrawableEpoch, slashed, epoch) } @@ -78,11 +80,12 @@ func checkValidatorSlashable(activationEpoch, withdrawableEpoch types.Epoch, sla // need the active validator indices for some specific reason. // // Spec pseudocode definition: -// def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]: -// """ -// Return the sequence of active validator indices at ``epoch``. -// """ -// return [ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch)] +// +// def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]: +// """ +// Return the sequence of active validator indices at ``epoch``. +// """ +// return [ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch)] func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.ValidatorIndex, error) { seed, err := Seed(s, epoch, params.BeaconConfig().DomainBeaconAttester) if err != nil { @@ -186,11 +189,12 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc // the validator is eligible for activation and exit. // // Spec pseudocode definition: -// def compute_activation_exit_epoch(epoch: Epoch) -> Epoch: -// """ -// Return the epoch during which validator activations and exits initiated in ``epoch`` take effect. -// """ -// return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD) +// +// def compute_activation_exit_epoch(epoch: Epoch) -> Epoch: +// """ +// Return the epoch during which validator activations and exits initiated in ``epoch`` take effect. +// """ +// return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD) func ActivationExitEpoch(epoch types.Epoch) types.Epoch { return epoch + 1 + params.BeaconConfig().MaxSeedLookahead } @@ -199,12 +203,13 @@ func ActivationExitEpoch(epoch types.Epoch) types.Epoch { // enter and exit validator pool for an epoch. // // Spec pseudocode definition: -// def get_validator_churn_limit(state: BeaconState) -> uint64: -// """ -// Return the validator churn limit for the current epoch. -// """ -// active_validator_indices = get_active_validator_indices(state, get_current_epoch(state)) -// return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT) +// +// def get_validator_churn_limit(state: BeaconState) -> uint64: +// """ +// Return the validator churn limit for the current epoch. +// """ +// active_validator_indices = get_active_validator_indices(state, get_current_epoch(state)) +// return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT) func ValidatorChurnLimit(activeValidatorCount uint64) (uint64, error) { churnLimit := activeValidatorCount / params.BeaconConfig().ChurnLimitQuotient if churnLimit < params.BeaconConfig().MinPerEpochChurnLimit { @@ -216,14 +221,15 @@ func ValidatorChurnLimit(activeValidatorCount uint64) (uint64, error) { // BeaconProposerIndex returns proposer index of a current slot. // // Spec pseudocode definition: -// def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: -// """ -// Return the beacon proposer index at the current slot. -// """ -// epoch = get_current_epoch(state) -// seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot)) -// indices = get_active_validator_indices(state, epoch) -// return compute_proposer_index(state, indices, seed) +// +// def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: +// """ +// Return the beacon proposer index at the current slot. +// """ +// epoch = get_current_epoch(state) +// seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot)) +// indices = get_active_validator_indices(state, epoch) +// return compute_proposer_index(state, indices, seed) func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (types.ValidatorIndex, error) { e := time.CurrentEpoch(state) // The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31) @@ -274,21 +280,22 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) ( // ComputeProposerIndex returns the index sampled by effective balance, which is used to calculate proposer. // // Spec pseudocode definition: -// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex: -// """ -// Return from ``indices`` a random index sampled by effective balance. -// """ -// assert len(indices) > 0 -// MAX_RANDOM_BYTE = 2**8 - 1 -// i = uint64(0) -// total = uint64(len(indices)) -// while True: -// candidate_index = indices[compute_shuffled_index(i % total, total, seed)] -// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] -// effective_balance = state.validators[candidate_index].effective_balance -// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: -// return candidate_index -// i += 1 +// +// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex: +// """ +// Return from ``indices`` a random index sampled by effective balance. +// """ +// assert len(indices) > 0 +// MAX_RANDOM_BYTE = 2**8 - 1 +// i = uint64(0) +// total = uint64(len(indices)) +// while True: +// candidate_index = indices[compute_shuffled_index(i % total, total, seed)] +// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] +// effective_balance = state.validators[candidate_index].effective_balance +// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: +// return candidate_index +// i += 1 func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []types.ValidatorIndex, seed [32]byte) (types.ValidatorIndex, error) { length := uint64(len(activeIndices)) if length == 0 { @@ -324,14 +331,15 @@ func ComputeProposerIndex(bState state.ReadOnlyValidators, activeIndices []types // be placed into the activation queue. // // Spec pseudocode definition: -// def is_eligible_for_activation_queue(validator: Validator) -> bool: -// """ -// Check if ``validator`` is eligible to be placed into the activation queue. -// """ -// return ( -// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH -// and validator.effective_balance == MAX_EFFECTIVE_BALANCE -// ) +// +// def is_eligible_for_activation_queue(validator: Validator) -> bool: +// """ +// Check if ``validator`` is eligible to be placed into the activation queue. +// """ +// return ( +// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH +// and validator.effective_balance == MAX_EFFECTIVE_BALANCE +// ) func IsEligibleForActivationQueue(validator *ethpb.Validator) bool { return isEligibileForActivationQueue(validator.ActivationEligibilityEpoch, validator.EffectiveBalance) } @@ -351,16 +359,17 @@ func isEligibileForActivationQueue(activationEligibilityEpoch types.Epoch, effec // IsEligibleForActivation checks if the validator is eligible for activation. // // Spec pseudocode definition: -// def is_eligible_for_activation(state: BeaconState, validator: Validator) -> bool: -// """ -// Check if ``validator`` is eligible for activation. -// """ -// return ( -// # Placement in queue is finalized -// validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch -// # Has not yet been activated -// and validator.activation_epoch == FAR_FUTURE_EPOCH -// ) +// +// def is_eligible_for_activation(state: BeaconState, validator: Validator) -> bool: +// """ +// Check if ``validator`` is eligible for activation. +// """ +// return ( +// # Placement in queue is finalized +// validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch +// # Has not yet been activated +// and validator.activation_epoch == FAR_FUTURE_EPOCH +// ) func IsEligibleForActivation(state state.ReadOnlyCheckpoint, validator *ethpb.Validator) bool { finalizedEpoch := state.FinalizedCheckpointEpoch() return isEligibleForActivation(validator.ActivationEligibilityEpoch, validator.ActivationEpoch, finalizedEpoch) diff --git a/beacon-chain/core/helpers/weak_subjectivity.go b/beacon-chain/core/helpers/weak_subjectivity.go index b2f2bba97..162f2c7ca 100644 --- a/beacon-chain/core/helpers/weak_subjectivity.go +++ b/beacon-chain/core/helpers/weak_subjectivity.go @@ -25,36 +25,37 @@ import ( // https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/weak-subjectivity.md#calculating-the-weak-subjectivity-period // // def compute_weak_subjectivity_period(state: BeaconState) -> uint64: -// """ -// Returns the weak subjectivity period for the current ``state``. -// This computation takes into account the effect of: -// - validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and -// - validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch). -// A detailed calculation can be found at: -// https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf -// """ -// ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY -// N = len(get_active_validator_indices(state, get_current_epoch(state))) -// t = get_total_active_balance(state) // N // ETH_TO_GWEI -// T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI -// delta = get_validator_churn_limit(state) -// Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH -// D = SAFETY_DECAY // -// if T * (200 + 3 * D) < t * (200 + 12 * D): -// epochs_for_validator_set_churn = ( -// N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T)) -// ) -// epochs_for_balance_top_ups = ( -// N * (200 + 3 * D) // (600 * Delta) -// ) -// ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups) -// else: -// ws_period += ( -// 3 * N * D * t // (200 * Delta * (T - t)) -// ) +// """ +// Returns the weak subjectivity period for the current ``state``. +// This computation takes into account the effect of: +// - validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and +// - validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch). +// A detailed calculation can be found at: +// https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf +// """ +// ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY +// N = len(get_active_validator_indices(state, get_current_epoch(state))) +// t = get_total_active_balance(state) // N // ETH_TO_GWEI +// T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI +// delta = get_validator_churn_limit(state) +// Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH +// D = SAFETY_DECAY // -// return ws_period +// if T * (200 + 3 * D) < t * (200 + 12 * D): +// epochs_for_validator_set_churn = ( +// N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T)) +// ) +// epochs_for_balance_top_ups = ( +// N * (200 + 3 * D) // (600 * Delta) +// ) +// ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups) +// else: +// ws_period += ( +// 3 * N * D * t // (200 * Delta * (T - t)) +// ) +// +// return ws_period func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconState, cfg *params.BeaconChainConfig) (types.Epoch, error) { // Weak subjectivity period cannot be smaller than withdrawal delay. wsp := uint64(cfg.MinValidatorWithdrawabilityDelay) @@ -114,14 +115,15 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS // https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/weak-subjectivity.md#checking-for-stale-weak-subjectivity-checkpoint // // def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint) -> bool: -// # Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint -// assert ws_state.latest_block_header.state_root == ws_checkpoint.root -// assert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch // -// ws_period = compute_weak_subjectivity_period(ws_state) -// ws_state_epoch = compute_epoch_at_slot(ws_state.slot) -// current_epoch = compute_epoch_at_slot(get_current_slot(store)) -// return current_epoch <= ws_state_epoch + ws_period +// # Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint +// assert ws_state.latest_block_header.state_root == ws_checkpoint.root +// assert compute_epoch_at_slot(ws_state.slot) == ws_checkpoint.epoch +// +// ws_period = compute_weak_subjectivity_period(ws_state) +// ws_state_epoch = compute_epoch_at_slot(ws_state.slot) +// current_epoch = compute_epoch_at_slot(get_current_slot(store)) +// return current_epoch <= ws_state_epoch + ws_period func IsWithinWeakSubjectivityPeriod( ctx context.Context, currentEpoch types.Epoch, wsState state.ReadOnlyBeaconState, wsStateRoot [fieldparams.RootLength]byte, wsEpoch types.Epoch, cfg *params.BeaconChainConfig) (bool, error) { // Make sure that incoming objects are not nil. diff --git a/beacon-chain/core/signing/domain.go b/beacon-chain/core/signing/domain.go index 2cbcad48d..97ce9925e 100644 --- a/beacon-chain/core/signing/domain.go +++ b/beacon-chain/core/signing/domain.go @@ -10,13 +10,14 @@ import ( // Domain returns the domain version for BLS private key to sign and verify. // // Spec pseudocode definition: -// def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch=None) -> Domain: -// """ -// Return the signature domain (fork version concatenated with domain type) of a message. -// """ -// epoch = get_current_epoch(state) if epoch is None else epoch -// fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version -// return compute_domain(domain_type, fork_version, state.genesis_validators_root) +// +// def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch=None) -> Domain: +// """ +// Return the signature domain (fork version concatenated with domain type) of a message. +// """ +// epoch = get_current_epoch(state) if epoch is None else epoch +// fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version +// return compute_domain(domain_type, fork_version, state.genesis_validators_root) func Domain(fork *eth.Fork, epoch types.Epoch, domainType [bls.DomainByteLength]byte, genesisRoot []byte) ([]byte, error) { if fork == nil { return []byte{}, errors.New("nil fork or domain type") diff --git a/beacon-chain/core/signing/signing_root.go b/beacon-chain/core/signing/signing_root.go index 0ad7670ad..9859ffc9c 100644 --- a/beacon-chain/core/signing/signing_root.go +++ b/beacon-chain/core/signing/signing_root.go @@ -37,14 +37,15 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch types.Epoch, obj f // ComputeSigningRoot computes the root of the object by calculating the hash tree root of the signing data with the given domain. // // Spec pseudocode definition: -// def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root: -// """ -// Return the signing root for the corresponding signing data. -// """ -// return hash_tree_root(SigningData( -// object_root=hash_tree_root(ssz_object), -// domain=domain, -// )) +// +// def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root: +// """ +// Return the signing root for the corresponding signing data. +// """ +// return hash_tree_root(SigningData( +// object_root=hash_tree_root(ssz_object), +// domain=domain, +// )) func ComputeSigningRoot(object fssz.HashRoot, domain []byte) ([32]byte, error) { return signingData(object.HashTreeRoot, domain) } @@ -160,15 +161,16 @@ func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byt // array as the fork version. // // def compute_domain(domain_type: DomainType, fork_version: Version=None, genesis_validators_root: Root=None) -> Domain: -// """ -// Return the domain for the ``domain_type`` and ``fork_version``. -// """ -// if fork_version is None: -// fork_version = GENESIS_FORK_VERSION -// if genesis_validators_root is None: -// genesis_validators_root = Root() # all bytes zero by default -// fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root) -// return Domain(domain_type + fork_data_root[:28]) +// +// """ +// Return the domain for the ``domain_type`` and ``fork_version``. +// """ +// if fork_version is None: +// fork_version = GENESIS_FORK_VERSION +// if genesis_validators_root is None: +// genesis_validators_root = Root() # all bytes zero by default +// fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root) +// return Domain(domain_type + fork_data_root[:28]) func ComputeDomain(domainType [DomainByteLength]byte, forkVersion, genesisValidatorsRoot []byte) ([]byte, error) { if forkVersion == nil { forkVersion = params.BeaconConfig().GenesisForkVersion @@ -195,19 +197,20 @@ func domain(domainType [DomainByteLength]byte, forkDataRoot []byte) []byte { return b } -// this returns the 32byte fork data root for the ``current_version`` and ``genesis_validators_root``. +// this returns the 32byte fork data root for the “current_version“ and “genesis_validators_root“. // This is used primarily in signature domains to avoid collisions across forks/chains. // // Spec pseudocode definition: -// def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root: -// """ -// Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``. -// This is used primarily in signature domains to avoid collisions across forks/chains. -// """ -// return hash_tree_root(ForkData( -// current_version=current_version, -// genesis_validators_root=genesis_validators_root, -// )) +// +// def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root: +// """ +// Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``. +// This is used primarily in signature domains to avoid collisions across forks/chains. +// """ +// return hash_tree_root(ForkData( +// current_version=current_version, +// genesis_validators_root=genesis_validators_root, +// )) func computeForkDataRoot(version, root []byte) ([32]byte, error) { r, err := (ðpb.ForkData{ CurrentVersion: version, @@ -222,13 +225,14 @@ func computeForkDataRoot(version, root []byte) ([32]byte, error) { // ComputeForkDigest returns the fork for the current version and genesis validators root // // Spec pseudocode definition: -// def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest: -// """ -// Return the 4-byte fork digest for the ``current_version`` and ``genesis_validators_root``. -// This is a digest primarily used for domain separation on the p2p layer. -// 4-bytes suffices for practical separation of forks/chains. -// """ -// return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4]) +// +// def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest: +// """ +// Return the 4-byte fork digest for the ``current_version`` and ``genesis_validators_root``. +// This is a digest primarily used for domain separation on the p2p layer. +// 4-bytes suffices for practical separation of forks/chains. +// """ +// return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4]) func ComputeForkDigest(version, genesisValidatorsRoot []byte) ([4]byte, error) { dataRoot, err := computeForkDataRoot(version, genesisValidatorsRoot) if err != nil { diff --git a/beacon-chain/core/time/slot_epoch.go b/beacon-chain/core/time/slot_epoch.go index 41989ec28..1bbb2319d 100644 --- a/beacon-chain/core/time/slot_epoch.go +++ b/beacon-chain/core/time/slot_epoch.go @@ -12,11 +12,12 @@ import ( // the slot number stored in beacon state. // // Spec pseudocode definition: -// def get_current_epoch(state: BeaconState) -> Epoch: -// """ -// Return the current epoch. -// """ -// return compute_epoch_at_slot(state.slot) +// +// def get_current_epoch(state: BeaconState) -> Epoch: +// """ +// Return the current epoch. +// """ +// return compute_epoch_at_slot(state.slot) func CurrentEpoch(state state.ReadOnlyBeaconState) types.Epoch { return slots.ToEpoch(state.Slot()) } @@ -26,12 +27,13 @@ func CurrentEpoch(state state.ReadOnlyBeaconState) types.Epoch { // underflow condition. // // Spec pseudocode definition: -// def get_previous_epoch(state: BeaconState) -> Epoch: -// """` -// Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``). -// """ -// current_epoch = get_current_epoch(state) -// return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else Epoch(current_epoch - 1) +// +// def get_previous_epoch(state: BeaconState) -> Epoch: +// """` +// Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``). +// """ +// current_epoch = get_current_epoch(state) +// return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else Epoch(current_epoch - 1) func PrevEpoch(state state.ReadOnlyBeaconState) types.Epoch { currentEpoch := CurrentEpoch(state) if currentEpoch == 0 { @@ -83,7 +85,8 @@ func CanUpgradeToCapella(slot types.Slot) bool { // The epoch can be processed at the end of the last slot of every epoch. // // Spec pseudocode definition: -// If (state.slot + 1) % SLOTS_PER_EPOCH == 0: +// +// If (state.slot + 1) % SLOTS_PER_EPOCH == 0: func CanProcessEpoch(state state.ReadOnlyBeaconState) bool { return (state.Slot()+1)%params.BeaconConfig().SlotsPerEpoch == 0 } diff --git a/beacon-chain/core/transition/state.go b/beacon-chain/core/transition/state.go index 5a37a631f..7ebdb3b03 100644 --- a/beacon-chain/core/transition/state.go +++ b/beacon-chain/core/transition/state.go @@ -17,41 +17,43 @@ import ( // full deposits were made to the deposit contract and the ChainStart log gets emitted. // // Spec pseudocode definition: -// def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32, -// eth1_timestamp: uint64, -// deposits: Sequence[Deposit]) -> BeaconState: -// fork = Fork( -// previous_version=GENESIS_FORK_VERSION, -// current_version=GENESIS_FORK_VERSION, -// epoch=GENESIS_EPOCH, -// ) -// state = BeaconState( -// genesis_time=eth1_timestamp + GENESIS_DELAY, -// fork=fork, -// eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), -// latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), -// randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy -// ) // -// # Process deposits -// leaves = list(map(lambda deposit: deposit.data, deposits)) -// for index, deposit in enumerate(deposits): -// deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1]) -// state.eth1_data.deposit_root = hash_tree_root(deposit_data_list) -// process_deposit(state, deposit) +// def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32, +// eth1_timestamp: uint64, +// deposits: Sequence[Deposit]) -> BeaconState: +// fork = Fork( +// previous_version=GENESIS_FORK_VERSION, +// current_version=GENESIS_FORK_VERSION, +// epoch=GENESIS_EPOCH, +// ) +// state = BeaconState( +// genesis_time=eth1_timestamp + GENESIS_DELAY, +// fork=fork, +// eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), +// latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), +// randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy +// ) // -// # Process activations -// for index, validator in enumerate(state.validators): -// balance = state.balances[index] -// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) -// if validator.effective_balance == MAX_EFFECTIVE_BALANCE: -// validator.activation_eligibility_epoch = GENESIS_EPOCH -// validator.activation_epoch = GENESIS_EPOCH +// # Process deposits +// leaves = list(map(lambda deposit: deposit.data, deposits)) +// for index, deposit in enumerate(deposits): +// deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1]) +// state.eth1_data.deposit_root = hash_tree_root(deposit_data_list) +// process_deposit(state, deposit) // -// # Set genesis validators root for domain separation and chain versioning -// state.genesis_validators_root = hash_tree_root(state.validators) +// # Process activations +// for index, validator in enumerate(state.validators): +// balance = state.balances[index] +// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) +// if validator.effective_balance == MAX_EFFECTIVE_BALANCE: +// validator.activation_eligibility_epoch = GENESIS_EPOCH +// validator.activation_epoch = GENESIS_EPOCH +// +// # Set genesis validators root for domain separation and chain versioning +// state.genesis_validators_root = hash_tree_root(state.validators) +// +// return state // -// return state // This method differs from the spec so as to process deposits beforehand instead of the end of the function. func GenesisBeaconState(ctx context.Context, deposits []*ethpb.Deposit, genesisTime uint64, eth1Data *ethpb.Eth1Data) (state.BeaconState, error) { st, err := EmptyGenesisState() @@ -211,12 +213,14 @@ func EmptyGenesisState() (state.BeaconState, error) { // if the minimum genesis time arrived already. // // Spec pseudocode definition: -// def is_valid_genesis_state(state: BeaconState) -> bool: -// if state.genesis_time < MIN_GENESIS_TIME: -// return False -// if len(get_active_validator_indices(state, GENESIS_EPOCH)) < MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: -// return False -// return True +// +// def is_valid_genesis_state(state: BeaconState) -> bool: +// if state.genesis_time < MIN_GENESIS_TIME: +// return False +// if len(get_active_validator_indices(state, GENESIS_EPOCH)) < MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: +// return False +// return True +// // This method has been modified from the spec to allow whole states not to be saved // but instead only cache the relevant information. func IsValidGenesisState(chainStartDepositCount, currentTime uint64) bool { diff --git a/beacon-chain/core/transition/transition.go b/beacon-chain/core/transition/transition.go index a71b3ca43..c523441e1 100644 --- a/beacon-chain/core/transition/transition.go +++ b/beacon-chain/core/transition/transition.go @@ -33,18 +33,19 @@ import ( // See: ExecuteStateTransitionNoVerifyAnySig // // Spec pseudocode definition: -// def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None: -// block = signed_block.message -// # Process slots (including those with no blocks) since block -// process_slots(state, block.slot) -// # Verify signature -// if validate_result: -// assert verify_block_signature(state, signed_block) -// # Process block -// process_block(state, block) -// # Verify state root -// if validate_result: -// assert block.state_root == hash_tree_root(state) +// +// def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None: +// block = signed_block.message +// # Process slots (including those with no blocks) since block +// process_slots(state, block.slot) +// # Verify signature +// if validate_result: +// assert verify_block_signature(state, signed_block) +// # Process block +// process_block(state, block) +// # Verify state root +// if validate_result: +// assert block.state_root == hash_tree_root(state) func ExecuteStateTransition( ctx context.Context, state state.BeaconState, @@ -80,16 +81,16 @@ func ExecuteStateTransition( // It happens regardless if there's an incoming block or not. // Spec pseudocode definition: // -// def process_slot(state: BeaconState) -> None: -// # Cache state root -// previous_state_root = hash_tree_root(state) -// state.state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root -// # Cache latest block header state root -// if state.latest_block_header.state_root == Bytes32(): -// state.latest_block_header.state_root = previous_state_root -// # Cache block root -// previous_block_root = hash_tree_root(state.latest_block_header) -// state.block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_block_root +// def process_slot(state: BeaconState) -> None: +// # Cache state root +// previous_state_root = hash_tree_root(state) +// state.state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root +// # Cache latest block header state root +// if state.latest_block_header.state_root == Bytes32(): +// state.latest_block_header.state_root = previous_state_root +// # Cache block root +// previous_block_root = hash_tree_root(state.latest_block_header) +// state.block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_block_root func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconState, error) { ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlot") defer span.End() @@ -177,14 +178,15 @@ func ProcessSlotsIfPossible(ctx context.Context, state state.BeaconState, target // ProcessSlots process through skip slots and apply epoch transition when it's needed // // Spec pseudocode definition: -// def process_slots(state: BeaconState, slot: Slot) -> None: -// assert state.slot < slot -// while state.slot < slot: -// process_slot(state) -// # Process epoch on the start slot of the next epoch -// if (state.slot + 1) % SLOTS_PER_EPOCH == 0: -// process_epoch(state) -// state.slot = Slot(state.slot + 1) +// +// def process_slots(state: BeaconState, slot: Slot) -> None: +// assert state.slot < slot +// while state.slot < slot: +// process_slot(state) +// # Process epoch on the start slot of the next epoch +// if (state.slot + 1) % SLOTS_PER_EPOCH == 0: +// process_epoch(state) +// state.slot = Slot(state.slot + 1) func ProcessSlots(ctx context.Context, state state.BeaconState, slot types.Slot) (state.BeaconState, error) { ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlots") defer span.End() diff --git a/beacon-chain/core/transition/transition_no_verify_sig.go b/beacon-chain/core/transition/transition_no_verify_sig.go index 699cff739..650220e34 100644 --- a/beacon-chain/core/transition/transition_no_verify_sig.go +++ b/beacon-chain/core/transition/transition_no_verify_sig.go @@ -28,18 +28,19 @@ import ( // This method also modifies the passed in state. // // Spec pseudocode definition: -// def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None: -// block = signed_block.message -// # Process slots (including those with no blocks) since block -// process_slots(state, block.slot) -// # Verify signature -// if validate_result: -// assert verify_block_signature(state, signed_block) -// # Process block -// process_block(state, block) -// # Verify state root -// if validate_result: -// assert block.state_root == hash_tree_root(state) +// +// def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None: +// block = signed_block.message +// # Process slots (including those with no blocks) since block +// process_slots(state, block.slot) +// # Verify signature +// if validate_result: +// assert verify_block_signature(state, signed_block) +// # Process block +// process_block(state, block) +// # Verify state root +// if validate_result: +// assert block.state_root == hash_tree_root(state) func ExecuteStateTransitionNoVerifyAnySig( ctx context.Context, st state.BeaconState, @@ -94,18 +95,19 @@ func ExecuteStateTransitionNoVerifyAnySig( // This is used for proposer to compute state root before proposing a new block, and this does not modify state. // // Spec pseudocode definition: -// def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None: -// block = signed_block.message -// # Process slots (including those with no blocks) since block -// process_slots(state, block.slot) -// # Verify signature -// if validate_result: -// assert verify_block_signature(state, signed_block) -// # Process block -// process_block(state, block) -// # Verify state root -// if validate_result: -// assert block.state_root == hash_tree_root(state) +// +// def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None: +// block = signed_block.message +// # Process slots (including those with no blocks) since block +// process_slots(state, block.slot) +// # Verify signature +// if validate_result: +// assert verify_block_signature(state, signed_block) +// # Process block +// process_block(state, block) +// # Verify state root +// if validate_result: +// assert block.state_root == hash_tree_root(state) func CalculateStateRoot( ctx context.Context, state state.BeaconState, @@ -151,11 +153,11 @@ func CalculateStateRoot( // // Spec pseudocode definition: // -// def process_block(state: BeaconState, block: BeaconBlock) -> None: -// process_block_header(state, block) -// process_randao(state, block.body) -// process_eth1_data(state, block.body) -// process_operations(state, block.body) +// def process_block(state: BeaconState, block: BeaconBlock) -> None: +// process_block_header(state, block) +// process_randao(state, block.body) +// process_eth1_data(state, block.body) +// process_operations(state, block.body) func ProcessBlockNoVerifyAnySig( ctx context.Context, st state.BeaconState, @@ -209,19 +211,19 @@ func ProcessBlockNoVerifyAnySig( // // Spec pseudocode definition: // -// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: -// # Verify that outstanding deposits are processed up to the maximum number of deposits -// assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) +// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: +// # Verify that outstanding deposits are processed up to the maximum number of deposits +// assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) // -// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: -// for operation in operations: -// fn(state, operation) +// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: +// for operation in operations: +// fn(state, operation) // -// for_ops(body.proposer_slashings, process_proposer_slashing) -// for_ops(body.attester_slashings, process_attester_slashing) -// for_ops(body.attestations, process_attestation) -// for_ops(body.deposits, process_deposit) -// for_ops(body.voluntary_exits, process_voluntary_exit) +// for_ops(body.proposer_slashings, process_proposer_slashing) +// for_ops(body.attester_slashings, process_attester_slashing) +// for_ops(body.attestations, process_attestation) +// for_ops(body.deposits, process_deposit) +// for_ops(body.voluntary_exits, process_voluntary_exit) func ProcessOperationsNoVerifyAttsSigs( ctx context.Context, state state.BeaconState, @@ -260,13 +262,14 @@ func ProcessOperationsNoVerifyAttsSigs( // // Spec pseudocode definition: // def process_block(state: BeaconState, block: BeaconBlock) -> None: -// process_block_header(state, block) -// if is_execution_enabled(state, block.body): -// process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [New in Bellatrix] -// process_randao(state, block.body) -// process_eth1_data(state, block.body) -// process_operations(state, block.body) -// process_sync_aggregate(state, block.body.sync_aggregate) +// +// process_block_header(state, block) +// if is_execution_enabled(state, block.body): +// process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [New in Bellatrix] +// process_randao(state, block.body) +// process_eth1_data(state, block.body) +// process_operations(state, block.body) +// process_sync_aggregate(state, block.body.sync_aggregate) func ProcessBlockForStateRoot( ctx context.Context, state state.BeaconState, diff --git a/beacon-chain/core/validators/validator.go b/beacon-chain/core/validators/validator.go index d24d17637..d196b3e6f 100644 --- a/beacon-chain/core/validators/validator.go +++ b/beacon-chain/core/validators/validator.go @@ -22,25 +22,26 @@ import ( // validator with correct voluntary exit parameters. // // Spec pseudocode definition: -// def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: -// """ -// Initiate the exit of the validator with index ``index``. -// """ -// # Return if validator already initiated exit -// validator = state.validators[index] -// if validator.exit_epoch != FAR_FUTURE_EPOCH: -// return // -// # Compute exit queue epoch -// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH] -// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))]) -// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch]) -// if exit_queue_churn >= get_validator_churn_limit(state): -// exit_queue_epoch += Epoch(1) +// def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: +// """ +// Initiate the exit of the validator with index ``index``. +// """ +// # Return if validator already initiated exit +// validator = state.validators[index] +// if validator.exit_epoch != FAR_FUTURE_EPOCH: +// return // -// # Set validator exit epoch and withdrawable epoch -// validator.exit_epoch = exit_queue_epoch -// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY) +// # Compute exit queue epoch +// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH] +// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))]) +// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch]) +// if exit_queue_churn >= get_validator_churn_limit(state): +// exit_queue_epoch += Epoch(1) +// +// # Set validator exit epoch and withdrawable epoch +// validator.exit_epoch = exit_queue_epoch +// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY) func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx types.ValidatorIndex) (state.BeaconState, error) { validator, err := s.ValidatorAtIndex(idx) if err != nil { @@ -114,28 +115,29 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx types.V // the whistleblower's balance. // // Spec pseudocode definition: -// def slash_validator(state: BeaconState, -// slashed_index: ValidatorIndex, -// whistleblower_index: ValidatorIndex=None) -> None: -// """ -// Slash the validator with index ``slashed_index``. -// """ -// epoch = get_current_epoch(state) -// initiate_validator_exit(state, slashed_index) -// validator = state.validators[slashed_index] -// validator.slashed = True -// validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR)) -// state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance -// decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT) // -// # Apply proposer and whistleblower rewards -// proposer_index = get_beacon_proposer_index(state) -// if whistleblower_index is None: -// whistleblower_index = proposer_index -// whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT) -// proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT) -// increase_balance(state, proposer_index, proposer_reward) -// increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) +// def slash_validator(state: BeaconState, +// slashed_index: ValidatorIndex, +// whistleblower_index: ValidatorIndex=None) -> None: +// """ +// Slash the validator with index ``slashed_index``. +// """ +// epoch = get_current_epoch(state) +// initiate_validator_exit(state, slashed_index) +// validator = state.validators[slashed_index] +// validator.slashed = True +// validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR)) +// state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance +// decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT) +// +// # Apply proposer and whistleblower rewards +// proposer_index = get_beacon_proposer_index(state) +// if whistleblower_index is None: +// whistleblower_index = proposer_index +// whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT) +// proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT) +// increase_balance(state, proposer_index, proposer_reward) +// increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) func SlashValidator( ctx context.Context, s state.BeaconState, diff --git a/beacon-chain/db/filters/filter.go b/beacon-chain/db/filters/filter.go index 88aa43670..fa8b7d686 100644 --- a/beacon-chain/db/filters/filter.go +++ b/beacon-chain/db/filters/filter.go @@ -3,15 +3,15 @@ // For example, one can specify a filter query for data by start epoch + end epoch + shard // for attestations, build a filter as follows, and respond to it accordingly: // -// f := filters.NewFilter().SetStartEpoch(3).SetEndEpoch(5) -// for k, v := range f.Filters() { -// switch k { -// case filters.StartEpoch: -// // Verify data matches filter criteria... -// case filters.EndEpoch: -// // Verify data matches filter criteria... -// } -// } +// f := filters.NewFilter().SetStartEpoch(3).SetEndEpoch(5) +// for k, v := range f.Filters() { +// switch k { +// case filters.StartEpoch: +// // Verify data matches filter criteria... +// case filters.EndEpoch: +// // Verify data matches filter criteria... +// } +// } package filters import types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives" diff --git a/beacon-chain/db/kv/finalized_block_roots_test.go b/beacon-chain/db/kv/finalized_block_roots_test.go index a2db80eaa..85f36153c 100644 --- a/beacon-chain/db/kv/finalized_block_roots_test.go +++ b/beacon-chain/db/kv/finalized_block_roots_test.go @@ -76,7 +76,9 @@ func TestStore_IsFinalizedBlockGenesis(t *testing.T) { // Example: // 0 1 2 3 4 5 6 slot // a <- b <-- d <- e <- f <- g roots -// ^- c +// +// ^- c +// // Imagine that epochs are 2 slots and that epoch 1, 2, and 3 are finalized. Checkpoint roots would // be c, e, and g. In this scenario, c was a finalized checkpoint root but no block built upon it so // it should not be considered "final and canonical" in the view at slot 6. diff --git a/beacon-chain/execution/engine_client.go b/beacon-chain/execution/engine_client.go index 0e3218055..fb2ea1b06 100644 --- a/beacon-chain/execution/engine_client.go +++ b/beacon-chain/execution/engine_client.go @@ -214,15 +214,16 @@ func (s *Service) ExchangeTransitionConfiguration( // // Spec code: // def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]: -// # `pow_chain` abstractly represents all blocks in the PoW chain -// for block in pow_chain: -// parent = pow_chain[block.parent_hash] -// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY -// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY -// if block_reached_ttd and not parent_reached_ttd: -// return block // -// return None +// # `pow_chain` abstractly represents all blocks in the PoW chain +// for block in pow_chain: +// parent = pow_chain[block.parent_hash] +// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY +// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY +// if block_reached_ttd and not parent_reached_ttd: +// return block +// +// return None func (s *Service) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) { ttd := new(big.Int) ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10) diff --git a/beacon-chain/forkchoice/doubly-linked-tree/on_tick.go b/beacon-chain/forkchoice/doubly-linked-tree/on_tick.go index 552d8d733..db2b8efb1 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/on_tick.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/on_tick.go @@ -15,20 +15,21 @@ import ( // This should only be called at the start of every slot interval. // // Spec pseudocode definition: -// # Reset store.proposer_boost_root if this is a new slot -// if current_slot > previous_slot: -// store.proposer_boost_root = Root() // -// # Not a new epoch, return -// if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0): -// return +// # Reset store.proposer_boost_root if this is a new slot +// if current_slot > previous_slot: +// store.proposer_boost_root = Root() // -// # Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain -// if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch: -// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) -// ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot) -// if ancestor_at_finalized_slot == store.finalized_checkpoint.root: -// store.justified_checkpoint = store.best_justified_checkpoint +// # Not a new epoch, return +// if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0): +// return +// +// # Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain +// if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch: +// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) +// ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot) +// if ancestor_at_finalized_slot == store.finalized_checkpoint.root: +// store.justified_checkpoint = store.best_justified_checkpoint func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error { // Reset proposer boost root if err := f.ResetBoostedProposerRoot(ctx); err != nil { diff --git a/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go b/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go index f6c2fad2d..e3a7a41c4 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/optimistic_sync_test.go @@ -13,16 +13,15 @@ import ( // We test the algorithm to update a node from SYNCING to INVALID // We start with the same diagram as above: // -// E -- F -// / -// C -- D -// / \ -// A -- B G -- H -- I -// \ \ -// J -- K -- L +// E -- F +// / +// C -- D +// / \ +// A -- B G -- H -- I +// \ \ +// J -- K -- L // // And every block in the Fork choice is optimistic. -// func TestPruneInvalid(t *testing.T) { tests := []struct { root [32]byte // the root of the new INVALID block @@ -286,12 +285,12 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) { // Pow | Pos // -// CA -- A -- B -- C-----D -// \ \--------------E -// \ -// ----------------------F -- G -// B is INVALID +// CA -- A -- B -- C-----D +// \ \--------------E +// \ +// ----------------------F -- G // +// B is INVALID func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) { ctx := context.Background() f := setup(1, 1) @@ -339,12 +338,12 @@ func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) { // Pow | Pos // -// CA -------- B -- C-----D -// \ \--------------E -// \ -// --A -------------------------F -- G -// B is INVALID +// CA -------- B -- C-----D +// \ \--------------E +// \ +// --A -------------------------F -- G // +// B is INVALID func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) { ctx := context.Background() f := setup(1, 1) diff --git a/beacon-chain/forkchoice/doubly-linked-tree/store_test.go b/beacon-chain/forkchoice/doubly-linked-tree/store_test.go index 9d877d968..4ee2fe14f 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/store_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/store_test.go @@ -215,8 +215,9 @@ func TestStore_Prune_ReturnEarly(t *testing.T) { // This unit tests starts with a simple branch like this // -// - 1 +// - 1 // / +// // -- 0 -- 2 // // And we finalize 1. As a result only 1 should survive @@ -237,17 +238,15 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) { } // This test starts with the following branching diagram -/// We start with the following diagram -// -// E -- F -// / -// C -- D -// / \ -// A -- B G -- H -- I -// \ \ -// J -- K -- L -// +// / We start with the following diagram // +// E -- F +// / +// C -- D +// / \ +// A -- B G -- H -- I +// \ \ +// J -- K -- L func TestStore_tips(t *testing.T) { ctx := context.Background() f := setup(1, 1) diff --git a/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go b/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go index 8e78f58c3..18c6dacb6 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/unrealized_justification_test.go @@ -53,17 +53,18 @@ func TestStore_UpdateUnrealizedCheckpoints(t *testing.T) { } +// Epoch 2 | Epoch 3 // -// Epoch 2 | Epoch 3 -// | -// C | -// / | -// A <-- B | -// \ | -// ---- D +// | +// C | +// / | // -// B is the first block that justifies A. +// A <-- B | // +// \ | +// ---- D +// +// B is the first block that justifies A. func TestStore_LongFork(t *testing.T) { f := setup(1, 1) ctx := context.Background() @@ -104,18 +105,17 @@ func TestStore_LongFork(t *testing.T) { require.Equal(t, [32]byte{'c'}, headRoot) } +// Epoch 1 Epoch 2 Epoch 3 +// | | +// | | // +// A <-- B <-- C <-- D <-- E <-- F <-- G <-- H | // -// Epoch 1 Epoch 2 Epoch 3 -// | | -// | | -// A <-- B <-- C <-- D <-- E <-- F <-- G <-- H | -// | \ | -// | --------------- I -// | | -// -// E justifies A. G justifies E. +// | \ | +// | --------------- I +// | | // +// E justifies A. G justifies E. func TestStore_NoDeadLock(t *testing.T) { f := setup(0, 0) ctx := context.Background() @@ -187,17 +187,16 @@ func TestStore_NoDeadLock(t *testing.T) { require.Equal(t, types.Epoch(1), f.FinalizedCheckpoint().Epoch) } -// Epoch 1 | Epoch 2 -// | -// -- D (late) -// / | -// A <- B <- C | -// \ | -// -- -- -- E <- F <- G <- H -// | +// Epoch 1 | Epoch 2 +// | +// -- D (late) +// / | +// A <- B <- C | +// \ | +// -- -- -- E <- F <- G <- H +// | // // D justifies and comes late. -// func TestStore_ForkNextEpoch(t *testing.T) { resetCfg := features.InitWithReset(&features.Flags{ EnableDefensivePull: true, diff --git a/beacon-chain/rpc/eth/node/node.go b/beacon-chain/rpc/eth/node/node.go index 37a155722..94de9e6b7 100644 --- a/beacon-chain/rpc/eth/node/node.go +++ b/beacon-chain/rpc/eth/node/node.go @@ -283,12 +283,13 @@ func (ns *Server) GetSyncStatus(ctx context.Context, _ *emptypb.Empty) (*ethpb.S // GetHealth returns node health status in http status codes. Useful for load balancers. // Response Usage: -// "200": -// description: Node is ready -// "206": -// description: Node is syncing but can serve incomplete data -// "503": -// description: Node not initialized or having issues +// +// "200": +// description: Node is ready +// "206": +// description: Node is syncing but can serve incomplete data +// "503": +// description: Node not initialized or having issues func (ns *Server) GetHealth(ctx context.Context, _ *emptypb.Empty) (*emptypb.Empty, error) { ctx, span := trace.StartSpan(ctx, "node.GetHealth") defer span.End() diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_eth1data.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_eth1data.go index 97675686c..9caec9744 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_eth1data.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_eth1data.go @@ -20,16 +20,16 @@ import ( // eth1DataMajorityVote determines the appropriate eth1data for a block proposal using // an algorithm called Voting with the Majority. The algorithm works as follows: -// - Determine the timestamp for the start slot for the eth1 voting period. -// - Determine the earliest and latest timestamps that a valid block can have. -// - Determine the first block not before the earliest timestamp. This block is the lower bound. -// - Determine the last block not after the latest timestamp. This block is the upper bound. -// - If the last block is too early, use current eth1data from the beacon state. -// - Filter out votes on unknown blocks and blocks which are outside of the range determined by the lower and upper bounds. -// - If no blocks are left after filtering votes, use eth1data from the latest valid block. -// - Otherwise: -// - Determine the vote with the highest count. Prefer the vote with the highest eth1 block height in the event of a tie. -// - This vote's block is the eth1 block to use for the block proposal. +// - Determine the timestamp for the start slot for the eth1 voting period. +// - Determine the earliest and latest timestamps that a valid block can have. +// - Determine the first block not before the earliest timestamp. This block is the lower bound. +// - Determine the last block not after the latest timestamp. This block is the upper bound. +// - If the last block is too early, use current eth1data from the beacon state. +// - Filter out votes on unknown blocks and blocks which are outside of the range determined by the lower and upper bounds. +// - If no blocks are left after filtering votes, use eth1data from the latest valid block. +// - Otherwise: +// - Determine the vote with the highest count. Prefer the vote with the highest eth1 block height in the event of a tie. +// - This vote's block is the eth1 block to use for the block proposal. func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState state.BeaconState) (*ethpb.Eth1Data, error) { ctx, cancel := context.WithTimeout(ctx, eth1dataTimeout) defer cancel() diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload.go index 2630ab9e0..6538b2b89 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload.go @@ -186,14 +186,15 @@ func warnIfFeeRecipientDiffers(payload *enginev1.ExecutionPayload, feeRecipient // // Spec code: // def get_terminal_pow_block(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]: -// if TERMINAL_BLOCK_HASH != Hash32(): -// # Terminal block hash override takes precedence over terminal total difficulty -// if TERMINAL_BLOCK_HASH in pow_chain: -// return pow_chain[TERMINAL_BLOCK_HASH] -// else: -// return None // -// return get_pow_block_at_terminal_total_difficulty(pow_chain) +// if TERMINAL_BLOCK_HASH != Hash32(): +// # Terminal block hash override takes precedence over terminal total difficulty +// if TERMINAL_BLOCK_HASH in pow_chain: +// return pow_chain[TERMINAL_BLOCK_HASH] +// else: +// return None +// +// return get_pow_block_at_terminal_total_difficulty(pow_chain) func (vs *Server) getTerminalBlockHashIfExists(ctx context.Context, transitionTime uint64) ([]byte, bool, error) { terminalBlockHash := params.BeaconConfig().TerminalBlockHash // Terminal block hash override takes precedence over terminal total difficulty. @@ -214,10 +215,11 @@ func (vs *Server) getTerminalBlockHashIfExists(ctx context.Context, transitionTi // activationEpochNotReached returns true if activation epoch has not been reach. // Which satisfy the following conditions in spec: -// is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32() -// is_activation_epoch_reached = get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH -// if is_terminal_block_hash_set and not is_activation_epoch_reached: -// return True +// +// is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32() +// is_activation_epoch_reached = get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH +// if is_terminal_block_hash_set and not is_activation_epoch_reached: +// return True func activationEpochNotReached(slot types.Slot) bool { terminalBlockHashSet := bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) != [32]byte{} if terminalBlockHashSet { diff --git a/beacon-chain/rpc/statefetcher/fetcher.go b/beacon-chain/rpc/statefetcher/fetcher.go index a13cffc1d..65221d9a9 100644 --- a/beacon-chain/rpc/statefetcher/fetcher.go +++ b/beacon-chain/rpc/statefetcher/fetcher.go @@ -87,12 +87,12 @@ type StateProvider struct { } // State returns the BeaconState for a given identifier. The identifier can be one of: -// - "head" (canonical head in node's view) -// - "genesis" -// - "finalized" -// - "justified" -// - -// - +// - "head" (canonical head in node's view) +// - "genesis" +// - "finalized" +// - "justified" +// - +// - func (p *StateProvider) State(ctx context.Context, stateId []byte) (state.BeaconState, error) { var ( s state.BeaconState @@ -141,12 +141,12 @@ func (p *StateProvider) State(ctx context.Context, stateId []byte) (state.Beacon } // StateRoot returns a beacon state root for a given identifier. The identifier can be one of: -// - "head" (canonical head in node's view) -// - "genesis" -// - "finalized" -// - "justified" -// - -// - +// - "head" (canonical head in node's view) +// - "genesis" +// - "finalized" +// - "justified" +// - +// - func (p *StateProvider) StateRoot(ctx context.Context, stateId []byte) (root []byte, err error) { stateIdString := strings.ToLower(string(stateId)) switch stateIdString { diff --git a/beacon-chain/slasher/chunks.go b/beacon-chain/slasher/chunks.go index e9b400266..70148a9aa 100644 --- a/beacon-chain/slasher/chunks.go +++ b/beacon-chain/slasher/chunks.go @@ -56,29 +56,29 @@ type Chunker interface { // Under ideal network conditions, where every target epoch immediately follows its source, // min spans for a validator will look as follows: // -// min_spans = [2, 2, 2, ..., 2] +// min_spans = [2, 2, 2, ..., 2] // // Next, we can chunk this list of min spans into chunks of length C. For C = 2, for example: // -// chunk0 chunk1 chunkN -// { } { } { } -// chunked_min_spans = [[2, 2], [2, 2], ..., [2, 2]] +// chunk0 chunk1 chunkN +// { } { } { } +// chunked_min_spans = [[2, 2], [2, 2], ..., [2, 2]] // // Finally, we can store each chunk index for K validators into a single flat slice. For K = 3: // -// val0 val1 val2 -// { } { } { } -// chunk_0_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] +// val0 val1 val2 +// { } { } { } +// chunk_0_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] // -// val0 val1 val2 -// { } { } { } -// chunk_1_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] +// val0 val1 val2 +// { } { } { } +// chunk_1_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] // -// ... +// ... // -// val0 val1 val2 -// { } { } { } -// chunk_N_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] +// val0 val1 val2 +// { } { } { } +// chunk_N_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] // // MinSpanChunksSlice represents the data structure above for a single chunk index. type MinSpanChunksSlice struct { @@ -175,7 +175,7 @@ func (m *MaxSpanChunksSlice) Chunk() []uint16 { // within the min span chunks slice. Recall that for an incoming attestation, B, and an // existing attestation, A: // -// B surrounds A if and only if B.target > min_spans[B.source] +// B surrounds A if and only if B.target > min_spans[B.source] // // That is, this condition is sufficient to check if an incoming attestation // is surrounding a previous one. We also check if we indeed have an existing @@ -222,7 +222,7 @@ func (m *MinSpanChunksSlice) CheckSlashable( // within the max span chunks slice. Recall that for an incoming attestation, B, and an // existing attestation, A: // -// B surrounds A if and only if B.target < max_spans[B.source] +// B surrounds A if and only if B.target < max_spans[B.source] // // That is, this condition is sufficient to check if an incoming attestation // is surrounded by a previous one. We also check if we indeed have an existing @@ -278,19 +278,19 @@ func (m *MaxSpanChunksSlice) CheckSlashable( // Recall that a MinSpanChunksSlice struct represents a single slice for a chunk index // from the collection below: // -// val0 val1 val2 -// { } { } { } -// chunk_0_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] +// val0 val1 val2 +// { } { } { } +// chunk_0_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] // -// val0 val1 val2 -// { } { } { } -// chunk_1_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] +// val0 val1 val2 +// { } { } { } +// chunk_1_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] // -// ... +// ... // -// val0 val1 val2 -// { } { } { } -// chunk_N_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] +// val0 val1 val2 +// { } { } { } +// chunk_N_for_validators_0_to_2 = [[2, 2], [2, 2], [2, 2]] // // Let's take a look at how this update will look for a real set of min span chunk: // For the purposes of a simple example, let's set H = 2, meaning a min span @@ -301,12 +301,11 @@ func (m *MaxSpanChunksSlice) CheckSlashable( // 4 down to 3. First, we find out which chunk epoch 4 falls into, which is calculated as: // chunk_idx = (epoch % H) / C = (4 % 2) / 2 = 0 // -// -// val0 val1 val2 -// { } { } { } -// chunk_0_for_validators_0_to_3 = [[2, 2], [2, 2], [2, 2]] -// | -// |-> epoch 4 for validator 0 +// val0 val1 val2 +// { } { } { } +// chunk_0_for_validators_0_to_3 = [[2, 2], [2, 2], [2, 2]] +// | +// |-> epoch 4 for validator 0 // // Next up, we proceed with the update process for validator index 0, starting at epoch 4 // all the way down to epoch 2. We will need to go down the array as far as we can get. If the @@ -452,18 +451,18 @@ func (_ *MaxSpanChunksSlice) StartEpoch( // NextChunkStartEpoch given an epoch, determines the start epoch of the next chunk. For min // span chunks, this will be the last epoch of chunk index = (current chunk - 1). For example: // -// chunk0 chunk1 chunk2 -// | | | -// max_spans_val_i = [[-, -, -], [-, -, -], [-, -, -]] +// chunk0 chunk1 chunk2 +// | | | +// max_spans_val_i = [[-, -, -], [-, -, -], [-, -, -]] // // If C = chunkSize is 3 epochs per chunk, and we input start epoch of chunk 1 which is 3 then the next start // epoch is the last epoch of chunk 0, which is epoch 2. This is computed as: // -// last_epoch(chunkIndex(startEpoch)-1) -// last_epoch(chunkIndex(3) - 1) -// last_epoch(1 - 1) -// last_epoch(0) -// 2 +// last_epoch(chunkIndex(startEpoch)-1) +// last_epoch(chunkIndex(3) - 1) +// last_epoch(1 - 1) +// last_epoch(0) +// 2 func (m *MinSpanChunksSlice) NextChunkStartEpoch(startEpoch types.Epoch) types.Epoch { prevChunkIdx := m.params.chunkIndex(startEpoch) if prevChunkIdx > 0 { @@ -475,18 +474,18 @@ func (m *MinSpanChunksSlice) NextChunkStartEpoch(startEpoch types.Epoch) types.E // NextChunkStartEpoch given an epoch, determines the start epoch of the next chunk. For max // span chunks, this will be the start epoch of chunk index = (current chunk + 1). For example: // -// chunk0 chunk1 chunk2 -// | | | -// max_spans_val_i = [[-, -, -], [-, -, -], [-, -, -]] +// chunk0 chunk1 chunk2 +// | | | +// max_spans_val_i = [[-, -, -], [-, -, -], [-, -, -]] // // If C = chunkSize is 3 epochs per chunk, and we input start epoch of chunk 1 which is 3. The next start // epoch is the start epoch of chunk 2, which is epoch 4. This is computed as: // -// first_epoch(chunkIndex(startEpoch)+1) -// first_epoch(chunkIndex(3)+1) -// first_epoch(1 + 1) -// first_epoch(2) -// 4 +// first_epoch(chunkIndex(startEpoch)+1) +// first_epoch(chunkIndex(3)+1) +// first_epoch(1 + 1) +// first_epoch(2) +// 4 func (m *MaxSpanChunksSlice) NextChunkStartEpoch(startEpoch types.Epoch) types.Epoch { return m.params.firstEpoch(m.params.chunkIndex(startEpoch) + 1) } diff --git a/beacon-chain/slasher/detect_attestations.go b/beacon-chain/slasher/detect_attestations.go index b8533c26b..f63dcf4b5 100644 --- a/beacon-chain/slasher/detect_attestations.go +++ b/beacon-chain/slasher/detect_attestations.go @@ -72,11 +72,11 @@ func (s *Service) checkSlashableAttestations( // as the current epoch in time, we perform slashing detection. // The process is as follows given a list of attestations: // -// 1. Check for attester double votes using the list of attestations. -// 2. Group the attestations by chunk index. -// 3. Update the min and max spans for those grouped attestations, check if any slashings are -// found in the process -// 4. Update the latest written epoch for all validators involved to the current epoch. +// 1. Check for attester double votes using the list of attestations. +// 2. Group the attestations by chunk index. +// 3. Update the min and max spans for those grouped attestations, check if any slashings are +// found in the process +// 4. Update the latest written epoch for all validators involved to the current epoch. // // This function performs a lot of critical actions and is split into smaller helpers for cleanliness. func (s *Service) detectAllAttesterSlashings( @@ -239,13 +239,13 @@ func (s *Service) epochUpdateForValidator( } // Updates spans and detects any slashable attester offenses along the way. -// 1. Determine the chunks we need to use for updating for the validator indices -// in a validator chunk index, then retrieve those chunks from the database. -// 2. Using the chunks from step (1): -// for every attestation by chunk index: -// for each validator in the attestation's attesting indices: -// - Check if the attestation is slashable, if so return a slashing object. -// 3. Save the updated chunks to disk. +// 1. Determine the chunks we need to use for updating for the validator indices +// in a validator chunk index, then retrieve those chunks from the database. +// 2. Using the chunks from step (1): +// for every attestation by chunk index: +// for each validator in the attestation's attesting indices: +// - Check if the attestation is slashable, if so return a slashing object. +// 3. Save the updated chunks to disk. func (s *Service) updateSpans( ctx context.Context, updatedChunks map[uint64]Chunker, diff --git a/beacon-chain/slasher/doc.go b/beacon-chain/slasher/doc.go index 35f860459..fa2c9f492 100644 --- a/beacon-chain/slasher/doc.go +++ b/beacon-chain/slasher/doc.go @@ -17,25 +17,25 @@ // with length = H where H is the amount of epochs worth of history // we want to persist for slashing detection. // -// validator_1_min_span = [2, 2, 2, ..., 2] -// validator_1_max_span = [0, 0, 0, ..., 0] +// validator_1_min_span = [2, 2, 2, ..., 2] +// validator_1_max_span = [0, 0, 0, ..., 0] // // Instead of always dealing with length H arrays, which can be prohibitively // expensive to handle in memory, we split these arrays into chunks of length C. // For C = 3, for example, the 0th chunk of validator 1's min and max spans would look // as follows: // -// validator_1_min_span_chunk_0 = [2, 2, 2] -// validator_1_max_span_chunk_0 = [2, 2, 2] +// validator_1_min_span_chunk_0 = [2, 2, 2] +// validator_1_max_span_chunk_0 = [2, 2, 2] // // Next, on disk, we take chunks for K validators, and store them as flat slices. // For example, if H = 3, C = 3, and K = 3, then we can store 3 validators' chunks as a flat // slice as follows: // -// val0 val1 val2 -// | | | -// { } { } { } -// [2, 2, 2, 2, 2, 2, 2, 2, 2] +// val0 val1 val2 +// | | | +// { } { } { } +// [2, 2, 2, 2, 2, 2, 2, 2, 2] // // This is known as 2D chunking, pioneered by the Sigma Prime team here: // https://hackmd.io/@sproul/min-max-slasher. The parameters H, C, and K will be diff --git a/beacon-chain/slasher/params.go b/beacon-chain/slasher/params.go index 4fd59defb..d59b15b81 100644 --- a/beacon-chain/slasher/params.go +++ b/beacon-chain/slasher/params.go @@ -44,10 +44,9 @@ func DefaultParams() *Parameters { // if we are keeping 6 epochs worth of data, and we have chunks of size 2, then epoch // 4 will fall into chunk index (4 % 6) / 2 = 2. // -// span = [-, -, -, -, -, -] -// chunked = [[-, -], [-, -], [-, -]] -// |-> epoch 4, chunk idx 2 -// +// span = [-, -, -, -, -, -] +// chunked = [[-, -], [-, -], [-, -]] +// |-> epoch 4, chunk idx 2 func (p *Parameters) chunkIndex(epoch types.Epoch) uint64 { return uint64(epoch.Mod(uint64(p.historyLength)).Div(p.chunkSize)) } @@ -63,12 +62,11 @@ func (p *Parameters) validatorChunkIndex(validatorIndex types.ValidatorIndex) ui // For example, if we have chunks of length 3 and we ask to give us the // first epoch of chunk1, then: // -// chunk0 chunk1 chunk2 -// | | | -// [[-, -, -], [-, -, -], [-, -, -], ...] -// | -// -> first epoch of chunk 1 equals 3 -// +// chunk0 chunk1 chunk2 +// | | | +// [[-, -, -], [-, -, -], [-, -, -], ...] +// | +// -> first epoch of chunk 1 equals 3 func (p *Parameters) firstEpoch(chunkIndex uint64) types.Epoch { return types.Epoch(chunkIndex * p.chunkSize) } @@ -77,12 +75,11 @@ func (p *Parameters) firstEpoch(chunkIndex uint64) types.Epoch { // For example, if we have chunks of length 3 and we ask to give us the // last epoch of chunk1, then: // -// chunk0 chunk1 chunk2 -// | | | -// [[-, -, -], [-, -, -], [-, -, -], ...] -// | -// -> last epoch of chunk 1 equals 5 -// +// chunk0 chunk1 chunk2 +// | | | +// [[-, -, -], [-, -, -], [-, -, -], ...] +// | +// -> last epoch of chunk 1 equals 5 func (p *Parameters) lastEpoch(chunkIndex uint64) types.Epoch { return p.firstEpoch(chunkIndex).Add(p.chunkSize - 1) } @@ -92,24 +89,23 @@ func (p *Parameters) lastEpoch(chunkIndex uint64) types.Epoch { // chunk of size C. For example, if C = 3 and K = 3, the data we store // on disk is a flat slice as follows: // -// val0 val1 val2 -// | | | -// { } { } { } -// [-, -, -, -, -, -, -, -, -] +// val0 val1 val2 +// | | | +// { } { } { } +// [-, -, -, -, -, -, -, -, -] // // Then, figuring out the exact cell index for epoch 1 for validator 2 is computed // with (validatorIndex % K)*C + (epoch % C), which gives us: // -// (2 % 3)*3 + (1 % 3) = -// (2*3) + (1) = -// 7 -// -// val0 val1 val2 -// | | | -// { } { } { } -// [-, -, -, -, -, -, -, -, -] -// |-> epoch 1 for val2 +// (2 % 3)*3 + (1 % 3) = +// (2*3) + (1) = +// 7 // +// val0 val1 val2 +// | | | +// { } { } { } +// [-, -, -, -, -, -, -, -, -] +// |-> epoch 1 for val2 func (p *Parameters) cellIndex(validatorIndex types.ValidatorIndex, epoch types.Epoch) uint64 { validatorChunkOffset := p.validatorOffset(validatorIndex) chunkOffset := p.chunkOffset(epoch) @@ -134,17 +130,16 @@ func (p *Parameters) validatorOffset(validatorIndex types.ValidatorIndex) uint64 // If chunkSize C = 3 and validatorChunkSize K = 3, and historyLength H = 12, // if we are looking for epoch 6 and validator 6, then // -// validatorChunkIndex = 6 / 3 = 2 -// chunkIndex = (6 % historyLength) / 3 = (6 % 12) / 3 = 2 +// validatorChunkIndex = 6 / 3 = 2 +// chunkIndex = (6 % historyLength) / 3 = (6 % 12) / 3 = 2 // // Then we compute how many chunks there are per max span, known as the "width" // -// width = H / C = 12 / 3 = 4 +// width = H / C = 12 / 3 = 4 // // So every span has 4 chunks. Then, we have a disk key calculated by // -// validatorChunkIndex * width + chunkIndex = 2*4 + 2 = 10 -// +// validatorChunkIndex * width + chunkIndex = 2*4 + 2 = 10 func (p *Parameters) flatSliceID(validatorChunkIndex, chunkIndex uint64) []byte { width := p.historyLength.Div(p.chunkSize) return ssz.MarshalUint64(make([]byte, 0), uint64(width.Mul(validatorChunkIndex).Add(chunkIndex))) diff --git a/beacon-chain/state/state-native/doc.go b/beacon-chain/state/state-native/doc.go index 53459e610..0f79c5100 100644 --- a/beacon-chain/state/state-native/doc.go +++ b/beacon-chain/state/state-native/doc.go @@ -9,20 +9,20 @@ // allowing it to be used by other package-level functions that already hold a lock. // Hence the functions look something like this: // -// func (b *BeaconState) Foo() uint64 { -// // Read lock. -// b.lock.RLock() -// defer b.lock.RUnlock() +// func (b *BeaconState) Foo() uint64 { +// // Read lock. +// b.lock.RLock() +// defer b.lock.RUnlock() // -// // Internal getter. -// return b.foo() -// } +// // Internal getter. +// return b.foo() +// } // -// func (b *BeaconState) foo() uint64 { -// (...) // Some processing logic. +// func (b *BeaconState) foo() uint64 { +// (...) // Some processing logic. // -// return b.foo -// } +// return b.foo +// } // // Although it is technically possible to remove the short-circuit conditions // from the external function, that would require every read to obtain a lock diff --git a/beacon-chain/state/stategen/replay_test.go b/beacon-chain/state/stategen/replay_test.go index 28ebaca18..c4edd03c1 100644 --- a/beacon-chain/state/stategen/replay_test.go +++ b/beacon-chain/state/stategen/replay_test.go @@ -360,8 +360,9 @@ func TestLoadBlocks_BadStart(t *testing.T) { // tree1 constructs the following tree: // B0 - B1 - - B3 -- B5 -// \- B2 -- B4 -- B6 ----- B8 -// \- B7 +// +// \- B2 -- B4 -- B6 ----- B8 +// \- B7 func tree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) { b0 := util.NewBeaconBlock() b0.Block.Slot = 0 @@ -449,10 +450,11 @@ func tree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, // tree2 constructs the following tree: // B0 - B1 -// \- B2 -// \- B2 -// \- B2 -// \- B2 -- B3 +// +// \- B2 +// \- B2 +// \- B2 +// \- B2 -- B3 func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) { b0 := util.NewBeaconBlock() b0.Block.Slot = 0 @@ -531,10 +533,11 @@ func tree2(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, // tree3 constructs the following tree: // B0 - B1 -// \- B2 -// \- B2 -// \- B2 -// \- B2 +// +// \- B2 +// \- B2 +// \- B2 +// \- B2 func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) { b0 := util.NewBeaconBlock() b0.Block.Slot = 0 @@ -607,10 +610,11 @@ func tree3(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, // tree4 constructs the following tree: // B0 -// \- B2 -// \- B2 -// \- B2 -// \- B2 +// +// \- B2 +// \- B2 +// \- B2 +// \- B2 func tree4(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][32]byte, []*ethpb.SignedBeaconBlock, error) { b0 := util.NewBeaconBlock() b0.Block.Slot = 0 diff --git a/beacon-chain/sync/pending_blocks_queue_test.go b/beacon-chain/sync/pending_blocks_queue_test.go index e567a602a..653cc8eb5 100644 --- a/beacon-chain/sync/pending_blocks_queue_test.go +++ b/beacon-chain/sync/pending_blocks_queue_test.go @@ -31,9 +31,12 @@ import ( "github.com/prysmaticlabs/prysm/v3/testing/util" ) -// /- b1 - b2 +// /- b1 - b2 +// // b0 -// \- b3 +// +// \- b3 +// // Test b1 was missing then received and we can process b0 -> b1 -> b2 func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) { db := dbtest.SetupDB(t) @@ -348,9 +351,12 @@ func TestRegularSyncBeaconBlockSubscriber_DoNotReprocessBlock(t *testing.T) { assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block") } -// /- b1 - b2 - b5 +// /- b1 - b2 - b5 +// // b0 -// \- b3 - b4 +// +// \- b3 - b4 +// // Test b2 and b3 were missed, after receiving them we can process 2 chains. func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testing.T) { db := dbtest.SetupDB(t) diff --git a/config/features/config.go b/config/features/config.go index dc0757f80..d9e2245cd 100644 --- a/config/features/config.go +++ b/config/features/config.go @@ -3,19 +3,19 @@ Package features defines which features are enabled for runtime in order to selectively enable certain features to maintain a stable runtime. The process for implementing new features using this package is as follows: - 1. Add a new CMD flag in flags.go, and place it in the proper list(s) var for its client. - 2. Add a condition for the flag in the proper Configure function(s) below. - 3. Place any "new" behavior in the `if flagEnabled` statement. - 4. Place any "previous" behavior in the `else` statement. - 5. Ensure any tests using the new feature fail if the flag isn't enabled. - 5a. Use the following to enable your flag for tests: - cfg := &featureconfig.Flags{ - VerifyAttestationSigs: true, - } - resetCfg := featureconfig.InitWithReset(cfg) - defer resetCfg() - 6. Add the string for the flags that should be running within E2E to E2EValidatorFlags - and E2EBeaconChainFlags. + 1. Add a new CMD flag in flags.go, and place it in the proper list(s) var for its client. + 2. Add a condition for the flag in the proper Configure function(s) below. + 3. Place any "new" behavior in the `if flagEnabled` statement. + 4. Place any "previous" behavior in the `else` statement. + 5. Ensure any tests using the new feature fail if the flag isn't enabled. + 5a. Use the following to enable your flag for tests: + cfg := &featureconfig.Flags{ + VerifyAttestationSigs: true, + } + resetCfg := featureconfig.InitWithReset(cfg) + defer resetCfg() + 6. Add the string for the flags that should be running within E2E to E2EValidatorFlags + and E2EBeaconChainFlags. */ package features diff --git a/container/leaky-bucket/leakybucket.go b/container/leaky-bucket/leakybucket.go index d32982181..1ca8ca680 100644 --- a/container/leaky-bucket/leakybucket.go +++ b/container/leaky-bucket/leakybucket.go @@ -18,7 +18,6 @@ This means it is the exact mirror of a token bucket. n := b.Add(1) // n == 0 - A Collector is a convenient way to keep track of multiple LeakyBucket's. Buckets are associated with string keys for fast lookup. It can dynamically add new buckets and automatically remove them as they become empty, freeing diff --git a/container/slice/slice.go b/container/slice/slice.go index d1b30787e..dab8dcfb7 100644 --- a/container/slice/slice.go +++ b/container/slice/slice.go @@ -296,11 +296,12 @@ func SplitCommaSeparated(arr []string) []string { // // Spec pseudocode definition: // def get_split_offset(list_size: int, chunks: int, index: int) -> int: -// """ -// Returns a value such that for a list L, chunk count k and index i, -// split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] -// """ -// return (list_size * index) // chunks +// +// """ +// Returns a value such that for a list L, chunk count k and index i, +// split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] +// """ +// return (list_size * index) // chunks func SplitOffset(listSize, chunks, index uint64) uint64 { return (listSize * index) / chunks } diff --git a/container/trie/sparse_merkle.go b/container/trie/sparse_merkle.go index 2f690ca45..ce362c7cc 100644 --- a/container/trie/sparse_merkle.go +++ b/container/trie/sparse_merkle.go @@ -101,8 +101,9 @@ func (m *SparseMerkleTrie) Items() [][]byte { } // HashTreeRoot of the Merkle trie as defined in the deposit contract. -// Spec Definition: -// sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24))) +// +// Spec Definition: +// sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24))) func (m *SparseMerkleTrie) HashTreeRoot() ([32]byte, error) { enc := [32]byte{} depositCount := uint64(len(m.originalItems)) diff --git a/contracts/deposit/deposit.go b/contracts/deposit/deposit.go index 117e18033..99e5e43cb 100644 --- a/contracts/deposit/deposit.go +++ b/contracts/deposit/deposit.go @@ -16,13 +16,14 @@ import ( // signed by the deposit key. // // Spec details about general deposit workflow: -// To submit a deposit: // -// - Pack the validator's initialization parameters into deposit_data, a Deposit_Data SSZ object. -// - Let amount be the amount in Gwei to be deposited by the validator where MIN_DEPOSIT_AMOUNT <= amount <= MAX_EFFECTIVE_BALANCE. -// - Set deposit_data.amount = amount. -// - Let signature be the result of bls_sign of the signing_root(deposit_data) with domain=compute_domain(DOMAIN_DEPOSIT). (Deposits are valid regardless of fork version, compute_domain will default to zeroes there). -// - Send a transaction on the Ethereum 1.0 chain to DEPOSIT_CONTRACT_ADDRESS executing `deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])` along with a deposit of amount Gwei. +// To submit a deposit: +// +// - Pack the validator's initialization parameters into deposit_data, a Deposit_Data SSZ object. +// - Let amount be the amount in Gwei to be deposited by the validator where MIN_DEPOSIT_AMOUNT <= amount <= MAX_EFFECTIVE_BALANCE. +// - Set deposit_data.amount = amount. +// - Let signature be the result of bls_sign of the signing_root(deposit_data) with domain=compute_domain(DOMAIN_DEPOSIT). (Deposits are valid regardless of fork version, compute_domain will default to zeroes there). +// - Send a transaction on the Ethereum 1.0 chain to DEPOSIT_CONTRACT_ADDRESS executing `deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])` along with a deposit of amount Gwei. // // See: https://github.com/ethereum/consensus-specs/blob/master/specs/validator/0_beacon-chain-validator.md#submit-deposit func DepositInput(depositKey, withdrawalKey bls.SecretKey, amountInGwei uint64) (*ethpb.Deposit_Data, [32]byte, error) { @@ -68,8 +69,10 @@ func DepositInput(depositKey, withdrawalKey bls.SecretKey, amountInGwei uint64) // address. // // The specification is as follows: -// withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE -// withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:] +// +// withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE +// withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:] +// // where withdrawal_credentials is of type bytes32. func WithdrawalCredentialsHash(withdrawalKey bls.SecretKey) []byte { h := hash.Hash(withdrawalKey.PublicKey().Marshal()) diff --git a/crypto/bls/blst/doc.go b/crypto/bls/blst/doc.go index 010e3ec56..76a2e9aeb 100644 --- a/crypto/bls/blst/doc.go +++ b/crypto/bls/blst/doc.go @@ -3,5 +3,4 @@ // verifying and aggregating BLS signatures used by Ethereum. // // This implementation uses the library written by Supranational, blst. -// package blst diff --git a/crypto/bls/blst/secret_key.go b/crypto/bls/blst/secret_key.go index 53126a723..da95a67dc 100644 --- a/crypto/bls/blst/secret_key.go +++ b/crypto/bls/blst/secret_key.go @@ -67,7 +67,8 @@ func IsZero(sKey []byte) bool { // // In IETF draft BLS specification: // Sign(SK, message) -> signature: a signing algorithm that generates -// a deterministic signature given a secret key SK and a message. +// +// a deterministic signature given a secret key SK and a message. // // In Ethereum proof of stake specification: // def Sign(SK: int, message: Bytes) -> BLSSignature diff --git a/crypto/bls/blst/signature.go b/crypto/bls/blst/signature.go index dc354d7e2..819a2fbfd 100644 --- a/crypto/bls/blst/signature.go +++ b/crypto/bls/blst/signature.go @@ -85,8 +85,9 @@ func MultipleSignaturesFromBytes(multiSigs [][]byte) ([]common.Signature, error) // // In IETF draft BLS specification: // Verify(PK, message, signature) -> VALID or INVALID: a verification -// algorithm that outputs VALID if signature is a valid signature of -// message under public key PK, and INVALID otherwise. +// +// algorithm that outputs VALID if signature is a valid signature of +// message under public key PK, and INVALID otherwise. // // In the Ethereum proof of stake specification: // def Verify(PK: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool @@ -103,10 +104,11 @@ func (s *Signature) Verify(pubKey common.PublicKey, msg []byte) bool { // // In IETF draft BLS specification: // AggregateVerify((PK_1, message_1), ..., (PK_n, message_n), -// signature) -> VALID or INVALID: an aggregate verification -// algorithm that outputs VALID if signature is a valid aggregated -// signature for a collection of public keys and messages, and -// outputs INVALID otherwise. +// +// signature) -> VALID or INVALID: an aggregate verification +// algorithm that outputs VALID if signature is a valid aggregated +// signature for a collection of public keys and messages, and +// outputs INVALID otherwise. // // In the Ethereum proof of stake specification: // def AggregateVerify(pairs: Sequence[PK: BLSPubkey, message: Bytes], signature: BLSSignature) -> bool @@ -134,9 +136,10 @@ func (s *Signature) AggregateVerify(pubKeys []common.PublicKey, msgs [][32]byte) // // In IETF draft BLS specification: // FastAggregateVerify(PK_1, ..., PK_n, message, signature) -> VALID -// or INVALID: a verification algorithm for the aggregate of multiple -// signatures on the same message. This function is faster than -// AggregateVerify. +// +// or INVALID: a verification algorithm for the aggregate of multiple +// signatures on the same message. This function is faster than +// AggregateVerify. // // In the Ethereum proof of stake specification: // def FastAggregateVerify(PKs: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool @@ -156,12 +159,13 @@ func (s *Signature) FastAggregateVerify(pubKeys []common.PublicKey, msg [32]byte // // Spec code: // def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool: -// """ -// Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty. -// """ -// if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY: -// return True -// return bls.FastAggregateVerify(pubkeys, message, signature) +// +// """ +// Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty. +// """ +// if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY: +// return True +// return bls.FastAggregateVerify(pubkeys, message, signature) func (s *Signature) Eth2FastAggregateVerify(pubKeys []common.PublicKey, msg [32]byte) bool { if len(pubKeys) == 0 && bytes.Equal(s.Marshal(), common.InfiniteSignature[:]) { return true diff --git a/crypto/rand/rand.go b/crypto/rand/rand.go index 006dc3d56..5e7ecdb73 100644 --- a/crypto/rand/rand.go +++ b/crypto/rand/rand.go @@ -7,26 +7,26 @@ This limits the scope of code that needs to be hardened. There are two modes, one for deterministic and another non-deterministic randomness: 1. If deterministic pseudo-random generator is enough, use: - import "github.com/prysmaticlabs/prysm/v3/crypto/rand" - randGen := rand.NewDeterministicGenerator() - randGen.Intn(32) // or any other func defined in math.rand API + import "github.com/prysmaticlabs/prysm/v3/crypto/rand" + randGen := rand.NewDeterministicGenerator() + randGen.Intn(32) // or any other func defined in math.rand API - In this mode, only seed is generated using cryptographically secure source (crypto/rand). So, - once seed is obtained, and generator is seeded, the next generations are deterministic, thus fast. - However given that we only seed this 63 bits from crypto/rand and use math/rand to generate the outputs, - this method is not cryptographically secure. This is directly stated in the math/rand package, - https://github.com/golang/go/blob/release-branch.go1.17/src/math/rand/rand.go#L15. For any security - sensitive work this particular generator is NOT to be used. + In this mode, only seed is generated using cryptographically secure source (crypto/rand). So, + once seed is obtained, and generator is seeded, the next generations are deterministic, thus fast. + However given that we only seed this 63 bits from crypto/rand and use math/rand to generate the outputs, + this method is not cryptographically secure. This is directly stated in the math/rand package, + https://github.com/golang/go/blob/release-branch.go1.17/src/math/rand/rand.go#L15. For any security + sensitive work this particular generator is NOT to be used. 2. For cryptographically secure non-deterministic mode (CSPRNG), use: - import "github.com/prysmaticlabs/prysm/v3/crypto/rand" - randGen := rand.NewGenerator() - randGen.Intn(32) // or any other func defined in math.rand API + import "github.com/prysmaticlabs/prysm/v3/crypto/rand" + randGen := rand.NewGenerator() + randGen.Intn(32) // or any other func defined in math.rand API - Again, any of the functions from `math/rand` can be used, however, they all use custom source - of randomness (crypto/rand), on every step. This makes randomness non-deterministic. However, - you take a performance hit -- as it is an order of magnitude slower. + Again, any of the functions from `math/rand` can be used, however, they all use custom source + of randomness (crypto/rand), on every step. This makes randomness non-deterministic. However, + you take a performance hit -- as it is an order of magnitude slower. */ package rand diff --git a/encoding/ssz/equality/deep_equal.go b/encoding/ssz/equality/deep_equal.go index 854117e04..7dcbf6ba9 100644 --- a/encoding/ssz/equality/deep_equal.go +++ b/encoding/ssz/equality/deep_equal.go @@ -247,7 +247,7 @@ func deepValueBaseTypeEqual(v1, v2 reflect.Value) bool { } } -// DeepEqual reports whether two SSZ-able values x and y are ``deeply equal,'' defined as follows: +// DeepEqual reports whether two SSZ-able values x and y are “deeply equal,” defined as follows: // Two values of identical type are deeply equal if one of the following cases applies: // // Values of distinct types are never deeply equal. diff --git a/fuzzbuzz.yaml b/fuzzbuzz.yaml index 25a012e06..3a24165c9 100644 --- a/fuzzbuzz.yaml +++ b/fuzzbuzz.yaml @@ -1,6 +1,6 @@ base: language: go - version: 1.18 + version: 1.19 build_tags: - fuzz - develop diff --git a/go.mod b/go.mod index f9fab3bae..88473233a 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/prysmaticlabs/prysm/v3 -go 1.18 +go 1.19 require ( contrib.go.opencensus.io/exporter/jaeger v0.2.1 diff --git a/monitoring/journald/journald_linux.go b/monitoring/journald/journald_linux.go index bbd697c1a..1045e3f52 100644 --- a/monitoring/journald/journald_linux.go +++ b/monitoring/journald/journald_linux.go @@ -6,7 +6,7 @@ import ( "github.com/wercker/journalhook" ) -//Enable enables the journald logrus hook +// Enable enables the journald logrus hook func Enable() error { journalhook.Enable() return nil diff --git a/runtime/interop/generate_genesis_state.go b/runtime/interop/generate_genesis_state.go index fd1a6e959..064e0f526 100644 --- a/runtime/interop/generate_genesis_state.go +++ b/runtime/interop/generate_genesis_state.go @@ -186,8 +186,10 @@ func createDepositData(privKey bls.SecretKey, pubKey bls.PublicKey) (*ethpb.Depo // address. // // The specification is as follows: -// withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE -// withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:] +// +// withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE +// withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:] +// // where withdrawal_credentials is of type bytes32. func withdrawalCredentialsHash(pubKey []byte) []byte { h := hash.Hash(pubKey) diff --git a/testing/endtoend/endtoend_test.go b/testing/endtoend/endtoend_test.go index 1b3267ca5..f8dfed55e 100644 --- a/testing/endtoend/endtoend_test.go +++ b/testing/endtoend/endtoend_test.go @@ -568,7 +568,6 @@ func (r *testRunner) executeProvidedEvaluators(currentEpoch uint64, conns []*grp // Along with that we will also take a single lighthouse node and its validator offline. // After 1 epoch we will then attempt to bring it online again. // -// // 2) Then we will start testing optimistic sync by engaging our engine proxy. // After the proxy has been sending `SYNCING` responses to the beacon node, we // will test this with our optimistic sync evaluator to ensure everything works diff --git a/time/slots/slottime.go b/time/slots/slottime.go index 03d6673aa..06d085e91 100644 --- a/time/slots/slottime.go +++ b/time/slots/slottime.go @@ -65,11 +65,12 @@ func AbsoluteValueSlotDifference(x, y types.Slot) uint64 { // ToEpoch returns the epoch number of the input slot. // // Spec pseudocode definition: -// def compute_epoch_at_slot(slot: Slot) -> Epoch: -// """ -// Return the epoch number at ``slot``. -// """ -// return Epoch(slot // SLOTS_PER_EPOCH) +// +// def compute_epoch_at_slot(slot: Slot) -> Epoch: +// """ +// Return the epoch number at ``slot``. +// """ +// return Epoch(slot // SLOTS_PER_EPOCH) func ToEpoch(slot types.Slot) types.Epoch { return types.Epoch(slot.DivSlot(params.BeaconConfig().SlotsPerEpoch)) } @@ -78,11 +79,12 @@ func ToEpoch(slot types.Slot) types.Epoch { // current epoch. // // Spec pseudocode definition: -// def compute_start_slot_at_epoch(epoch: Epoch) -> Slot: -// """ -// Return the start slot of ``epoch``. -// """ -// return Slot(epoch * SLOTS_PER_EPOCH) +// +// def compute_start_slot_at_epoch(epoch: Epoch) -> Slot: +// """ +// Return the start slot of ``epoch``. +// """ +// return Slot(epoch * SLOTS_PER_EPOCH) func EpochStart(epoch types.Epoch) (types.Slot, error) { slot, err := params.BeaconConfig().SlotsPerEpoch.SafeMul(uint64(epoch)) if err != nil { @@ -213,7 +215,8 @@ func PrevSlot(slot types.Slot) types.Slot { // // Spec code: // def compute_sync_committee_period(epoch: Epoch) -> uint64: -// return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD +// +// return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD func SyncCommitteePeriod(e types.Epoch) uint64 { return uint64(e / params.BeaconConfig().EpochsPerSyncCommitteePeriod) } diff --git a/tools/analyzers/errcheck/analyzer.go b/tools/analyzers/errcheck/analyzer.go index 5f09c0d80..d4be49f82 100644 --- a/tools/analyzers/errcheck/analyzer.go +++ b/tools/analyzers/errcheck/analyzer.go @@ -313,16 +313,16 @@ func namesForExcludeCheck(pass *analysis.Pass, call *ast.CallExpr) []string { // // For example, say we have: // -// type Inner interface {Method()} -// type Middle interface {Inner} -// type Outer interface {Middle} -// type T struct {Outer} -// type U struct {T} -// type V struct {U} +// type Inner interface {Method()} +// type Middle interface {Inner} +// type Outer interface {Middle} +// type T struct {Outer} +// type U struct {T} +// type V struct {U} // // And then the selector: // -// V.Method +// V.Method // // We'll return [Outer, Middle, Inner] by first walking through the embedded structs // until we reach the Outer interface, then descending through the embedded interfaces diff --git a/tools/analyzers/gocognit/analyzer.go b/tools/analyzers/gocognit/analyzer.go index c69bfc0c0..479f092ca 100644 --- a/tools/analyzers/gocognit/analyzer.go +++ b/tools/analyzers/gocognit/analyzer.go @@ -25,7 +25,8 @@ var Analyzer = &analysis.Analyzer{ // Recommended thresholds according to the 2008 presentation titled // "Software Quality Metrics to Identify Risk" by Thomas McCabe Jr. // -// 1 - 10 Simple procedure, little risk +// 1 - 10 Simple procedure, little risk +// // 11 - 20 More complex, moderate risk // 21 - 50 Complex, high risk // > 50 Untestable code, very high risk diff --git a/tools/blocktree/main.go b/tools/blocktree/main.go index ca2724098..2a8796f6c 100644 --- a/tools/blocktree/main.go +++ b/tools/blocktree/main.go @@ -1,11 +1,11 @@ -/** - * Block tree graph viz - * - * Given a DB, start slot and end slot. This tool computes the graphviz data - * needed to construct the block tree in graphviz data format. Then one can paste - * the data in a Graph rendering engine (ie. http://www.webgraphviz.com/) to see the visual format. - - */ +/* +* + - Block tree graph viz + * + - Given a DB, start slot and end slot. This tool computes the graphviz data + - needed to construct the block tree in graphviz data format. Then one can paste + - the data in a Graph rendering engine (ie. http://www.webgraphviz.com/) to see the visual format. +*/ package main import ( diff --git a/tools/interop/export-genesis/main.go b/tools/interop/export-genesis/main.go index 96f1b719a..4556eb2d6 100644 --- a/tools/interop/export-genesis/main.go +++ b/tools/interop/export-genesis/main.go @@ -11,7 +11,8 @@ import ( // A basic tool to extract genesis.ssz from existing beaconchain.db. // ex: -// bazel run //tools/interop/export-genesis:export-genesis -- /tmp/data/beaconchaindata /tmp/genesis.ssz +// +// bazel run //tools/interop/export-genesis:export-genesis -- /tmp/data/beaconchaindata /tmp/genesis.ssz func main() { if len(os.Args) < 3 { fmt.Println("Usage: ./main /path/to/datadir /path/to/output/genesis.ssz") diff --git a/tools/replay-http/main.go b/tools/replay-http/main.go index c35d2fffd..517afad2a 100644 --- a/tools/replay-http/main.go +++ b/tools/replay-http/main.go @@ -1,4 +1,5 @@ -/** +/* +* Tool for replaying http requests from a file of base64 encoded, line-delimited Go http raw requests. Credits to https://gist.github.com/kasey/c9e663eae5baebbf8fbe548c2b1d961b. */ diff --git a/validator/client/attest.go b/validator/client/attest.go index aec981d94..de1bb3beb 100644 --- a/validator/client/attest.go +++ b/validator/client/attest.go @@ -247,8 +247,9 @@ func (v *validator) saveAttesterIndexToData(data *ethpb.AttestationData, index t } // waitOneThirdOrValidBlock waits until (a) or (b) whichever comes first: -// (a) the validator has received a valid block that is the same slot as input slot -// (b) one-third of the slot has transpired (SECONDS_PER_SLOT / 3 seconds after the start of slot) +// +// (a) the validator has received a valid block that is the same slot as input slot +// (b) one-third of the slot has transpired (SECONDS_PER_SLOT / 3 seconds after the start of slot) func (v *validator) waitOneThirdOrValidBlock(ctx context.Context, slot types.Slot) { ctx, span := trace.StartSpan(ctx, "validator.waitOneThirdOrValidBlock") defer span.End() diff --git a/validator/keymanager/remote/doc.go b/validator/keymanager/remote/doc.go index ea464c9e7..7970b8c02 100644 --- a/validator/keymanager/remote/doc.go +++ b/validator/keymanager/remote/doc.go @@ -14,46 +14,46 @@ the remote server. Remote sign requests are defined by the following protobuf schema: - // SignRequest is a message type used by a keymanager - // as part of Prysm's accounts implementation. - message SignRequest { - // 48 byte public key corresponding to an associated private key - // being requested to sign data. - bytes public_key = 1; + // SignRequest is a message type used by a keymanager + // as part of Prysm's accounts implementation. + message SignRequest { + // 48 byte public key corresponding to an associated private key + // being requested to sign data. + bytes public_key = 1; - // Raw bytes signing root the client is requesting to sign. The client is - // expected to determine these raw bytes from the appropriate BLS - // signing domain as well as the signing root of the data structure - // the bytes represent. - bytes signing_root = 2; - } + // Raw bytes signing root the client is requesting to sign. The client is + // expected to determine these raw bytes from the appropriate BLS + // signing domain as well as the signing root of the data structure + // the bytes represent. + bytes signing_root = 2; + } Remote signing responses will contain a BLS12-381 signature along with the status of the signing response from the remote server, signifying the request either failed, was denied, or completed successfully. - message SignResponse { - enum Status { - UNKNOWN = 0; - SUCCEEDED = 1; - DENIED = 2; - FAILED = 3; - } + message SignResponse { + enum Status { + UNKNOWN = 0; + SUCCEEDED = 1; + DENIED = 2; + FAILED = 3; + } - // BLS12-381 signature for the data specified in the request. - bytes signature = 1; - } + // BLS12-381 signature for the data specified in the request. + bytes signature = 1; + } The remote keymanager can be customized via a keymanageropts.json file which requires the following schema: - { - "remote_address": "remoteserver.com:4000", // Remote gRPC server address. - "remote_cert": { - "crt_path": "/home/eth2/certs/client.crt", // Client certificate path. - "ca_crt_path": "/home/eth2/certs/ca.crt", // Certificate authority cert path. - "key_path": "/home/eth2/certs/client.key", // Client key path. - } - } + { + "remote_address": "remoteserver.com:4000", // Remote gRPC server address. + "remote_cert": { + "crt_path": "/home/eth2/certs/client.crt", // Client certificate path. + "ca_crt_path": "/home/eth2/certs/ca.crt", // Certificate authority cert path. + "key_path": "/home/eth2/certs/client.key", // Client key path. + } + } */ package remote diff --git a/validator/rpc/slashing.go b/validator/rpc/slashing.go index eecf4ab09..ec9552783 100644 --- a/validator/rpc/slashing.go +++ b/validator/rpc/slashing.go @@ -19,9 +19,9 @@ import ( // easy to migrate machines or Ethereum consensus clients. // // Steps: -// 1. Call the function which exports the data from -// the validator's db into an EIP standard slashing protection format. -// 2. Format and send JSON in the response. +// 1. Call the function which exports the data from +// the validator's db into an EIP standard slashing protection format. +// 2. Format and send JSON in the response. func (s *Server) ExportSlashingProtection(ctx context.Context, _ *empty.Empty) (*pb.ExportSlashingProtectionResponse, error) { if s.valDB == nil { return nil, errors.New("err finding validator database at path") diff --git a/validator/slashing-protection-history/import.go b/validator/slashing-protection-history/import.go index 024e74d3e..ab4e49bc5 100644 --- a/validator/slashing-protection-history/import.go +++ b/validator/slashing-protection-history/import.go @@ -174,18 +174,19 @@ func validateMetadata(ctx context.Context, validatorDB db.Database, interchangeJ // We create a map of pubKey -> []*SignedBlock. Then, for each public key we observe, // we append to this map. This allows us to handle valid input JSON data such as: // -// "0x2932232930: { -// SignedBlocks: [Slot: 5, Slot: 6, Slot: 7], -// }, -// "0x2932232930: { -// SignedBlocks: [Slot: 5, Slot: 10, Slot: 11], -// } +// "0x2932232930: { +// SignedBlocks: [Slot: 5, Slot: 6, Slot: 7], +// }, +// +// "0x2932232930: { +// SignedBlocks: [Slot: 5, Slot: 10, Slot: 11], +// } // // Which should be properly parsed as: // -// "0x2932232930: { -// SignedBlocks: [Slot: 5, Slot: 5, Slot: 6, Slot: 7, Slot: 10, Slot: 11], -// } +// "0x2932232930: { +// SignedBlocks: [Slot: 5, Slot: 5, Slot: 6, Slot: 7, Slot: 10, Slot: 11], +// } func parseBlocksForUniquePublicKeys(data []*format.ProtectionData) (map[[fieldparams.BLSPubkeyLength]byte][]*format.SignedBlock, error) { signedBlocksByPubKey := make(map[[fieldparams.BLSPubkeyLength]byte][]*format.SignedBlock) for _, validatorData := range data { @@ -206,18 +207,19 @@ func parseBlocksForUniquePublicKeys(data []*format.ProtectionData) (map[[fieldpa // We create a map of pubKey -> []*SignedAttestation. Then, for each public key we observe, // we append to this map. This allows us to handle valid input JSON data such as: // -// "0x2932232930: { -// SignedAttestations: [{Source: 5, Target: 6}, {Source: 6, Target: 7}], -// }, -// "0x2932232930: { -// SignedAttestations: [{Source: 5, Target: 6}], -// } +// "0x2932232930: { +// SignedAttestations: [{Source: 5, Target: 6}, {Source: 6, Target: 7}], +// }, +// +// "0x2932232930: { +// SignedAttestations: [{Source: 5, Target: 6}], +// } // // Which should be properly parsed as: // -// "0x2932232930: { -// SignedAttestations: [{Source: 5, Target: 6}, {Source: 5, Target: 6}, {Source: 6, Target: 7}], -// } +// "0x2932232930: { +// SignedAttestations: [{Source: 5, Target: 6}, {Source: 5, Target: 6}, {Source: 6, Target: 7}], +// } func parseAttestationsForUniquePublicKeys(data []*format.ProtectionData) (map[[fieldparams.BLSPubkeyLength]byte][]*format.SignedAttestation, error) { signedAttestationsByPubKey := make(map[[fieldparams.BLSPubkeyLength]byte][]*format.SignedAttestation) for _, validatorData := range data {