```python def integer_squareroot(n: uint64) -> uint64: """ Return the largest integer ``x`` such that ``x**2 <= n``. """ x = n y = (x + 1) // 2 while y < x: x = y y = (x + n // x) // 2 return x ``` ```python def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32: """ Return the exclusive-or of two 32-byte strings. """ return Bytes32(a ^ b for a, b in zip(bytes_1, bytes_2)) ``` ```python def bytes_to_uint64(data: bytes) -> uint64: """ Return the integer deserialization of ``data`` interpreted as ``ENDIANNESS``-endian. """ return uint64(int.from_bytes(data, ENDIANNESS)) ``` ```python def is_active_validator(validator: Validator, epoch: Epoch) -> bool: """ Check if ``validator`` is active. """ return validator.activation_epoch <= epoch < validator.exit_epoch ``` ```python def is_eligible_for_activation_queue(validator: Validator) -> bool: """ Check if ``validator`` is eligible to be placed into the activation queue. """ return ( validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and validator.effective_balance == MAX_EFFECTIVE_BALANCE ) ``` ```python def is_eligible_for_activation(state: BeaconState, validator: Validator) -> bool: """ Check if ``validator`` is eligible for activation. """ return ( # Placement in queue is finalized validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch # Has not yet been activated and validator.activation_epoch == FAR_FUTURE_EPOCH ) ``` ```python def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: """ Check if ``validator`` is slashable. """ return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch) ``` ```python def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool: """ Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG rules. """ return ( # Double vote (data_1 != data_2 and data_1.target.epoch == data_2.target.epoch) or # Surround vote (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch) ) ``` ```python def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool: """ Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature. """ # Verify indices are sorted and unique indices = indexed_attestation.attesting_indices if len(indices) == 0 or not indices == sorted(set(indices)): return False # Verify aggregate signature pubkeys = [state.validators[i].pubkey for i in indices] domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch) signing_root = compute_signing_root(indexed_attestation.data, domain) return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature) ``` ```python def is_valid_merkle_branch(leaf: Bytes32, branch: Sequence[Bytes32], depth: uint64, index: uint64, root: Root) -> bool: """ Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ``branch``. """ value = leaf for i in range(depth): if index // (2**i) % 2: value = hash(branch[i] + value) else: value = hash(value + branch[i]) return value == root ``` ```python def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64: """ Return the shuffled index corresponding to ``seed`` (and ``index_count``). """ assert index < index_count # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf) # See the 'generalized domain' algorithm on page 3 for current_round in range(SHUFFLE_ROUND_COUNT): pivot = bytes_to_uint64(hash(seed + uint_to_bytes(uint8(current_round)))[0:8]) % index_count flip = (pivot + index_count - index) % index_count position = max(index, flip) source = hash( seed + uint_to_bytes(uint8(current_round)) + uint_to_bytes(uint32(position // 256)) ) byte = uint8(source[(position % 256) // 8]) bit = (byte >> (position % 8)) % 2 index = flip if bit else index return index ``` ```python def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex: """ Return from ``indices`` a random index sampled by effective balance. """ assert len(indices) > 0 MAX_RANDOM_BYTE = 2**8 - 1 i = uint64(0) total = uint64(len(indices)) while True: candidate_index = indices[compute_shuffled_index(i % total, total, seed)] random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32] effective_balance = state.validators[candidate_index].effective_balance if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: return candidate_index i += 1 ``` ```python def compute_committee(indices: Sequence[ValidatorIndex], seed: Bytes32, index: uint64, count: uint64) -> Sequence[ValidatorIndex]: """ Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``. """ start = (len(indices) * index) // count end = (len(indices) * uint64(index + 1)) // count return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)] ``` ```python def compute_epoch_at_slot(slot: Slot) -> Epoch: """ Return the epoch number at ``slot``. """ return Epoch(slot // SLOTS_PER_EPOCH) ``` ```python def compute_start_slot_at_epoch(epoch: Epoch) -> Slot: """ Return the start slot of ``epoch``. """ return Slot(epoch * SLOTS_PER_EPOCH) ``` ```python def compute_activation_exit_epoch(epoch: Epoch) -> Epoch: """ Return the epoch during which validator activations and exits initiated in ``epoch`` take effect. """ return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD) ``` ```python def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root: """ Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``. This is used primarily in signature domains to avoid collisions across forks/chains. """ return hash_tree_root(ForkData( current_version=current_version, genesis_validators_root=genesis_validators_root, )) ``` ```python def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest: """ Return the 4-byte fork digest for the ``current_version`` and ``genesis_validators_root``. This is a digest primarily used for domain separation on the p2p layer. 4-bytes suffices for practical separation of forks/chains. """ return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4]) ``` ```python def compute_domain(domain_type: DomainType, fork_version: Version=None, genesis_validators_root: Root=None) -> Domain: """ Return the domain for the ``domain_type`` and ``fork_version``. """ if fork_version is None: fork_version = GENESIS_FORK_VERSION if genesis_validators_root is None: genesis_validators_root = Root() # all bytes zero by default fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root) return Domain(domain_type + fork_data_root[:28]) ``` ```python def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root: """ Return the signing root for the corresponding signing data. """ return hash_tree_root(SigningData( object_root=hash_tree_root(ssz_object), domain=domain, )) ``` ```python def get_current_epoch(state: BeaconState) -> Epoch: """ Return the current epoch. """ return compute_epoch_at_slot(state.slot) ``` ```python def get_previous_epoch(state: BeaconState) -> Epoch: """` Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``). """ current_epoch = get_current_epoch(state) return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else Epoch(current_epoch - 1) ``` ```python def get_block_root(state: BeaconState, epoch: Epoch) -> Root: """ Return the block root at the start of a recent ``epoch``. """ return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch)) ``` ```python def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root: """ Return the block root at a recent ``slot``. """ assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT return state.block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] ``` ```python def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32: """ Return the randao mix at a recent ``epoch``. """ return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] ``` ```python def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]: """ Return the sequence of active validator indices at ``epoch``. """ return [ValidatorIndex(i) for i, v in enumerate(state.validators) if is_active_validator(v, epoch)] ``` ```python def get_validator_churn_limit(state: BeaconState) -> uint64: """ Return the validator churn limit for the current epoch. """ active_validator_indices = get_active_validator_indices(state, get_current_epoch(state)) return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT) ``` ```python def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32: """ Return the seed at ``epoch``. """ mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow return hash(domain_type + uint_to_bytes(epoch) + mix) ``` ```python def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64: """ Return the number of committees in each slot for the given ``epoch``. """ return max(uint64(1), min( MAX_COMMITTEES_PER_SLOT, uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, )) ``` ```python def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Sequence[ValidatorIndex]: """ Return the beacon committee at ``slot`` for ``index``. """ epoch = compute_epoch_at_slot(slot) committees_per_slot = get_committee_count_per_slot(state, epoch) return compute_committee( indices=get_active_validator_indices(state, epoch), seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), index=(slot % SLOTS_PER_EPOCH) * committees_per_slot + index, count=committees_per_slot * SLOTS_PER_EPOCH, ) ``` ```python def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: """ Return the beacon proposer index at the current slot. """ epoch = get_current_epoch(state) seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot)) indices = get_active_validator_indices(state, epoch) return compute_proposer_index(state, indices, seed) ``` ```python def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei: """ Return the combined effective balance of the ``indices``. ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. Math safe up to ~10B ETH, afterwhich this overflows uint64. """ return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices]))) ``` ```python def get_total_active_balance(state: BeaconState) -> Gwei: """ Return the combined effective balance of the active validators. Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. """ return get_total_balance(state, set(get_active_validator_indices(state, get_current_epoch(state)))) ``` ```python def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch=None) -> Domain: """ Return the signature domain (fork version concatenated with domain type) of a message. """ epoch = get_current_epoch(state) if epoch is None else epoch fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version return compute_domain(domain_type, fork_version, state.genesis_validators_root) ``` ```python def get_indexed_attestation(state: BeaconState, attestation: Attestation) -> IndexedAttestation: """ Return the indexed attestation corresponding to ``attestation``. """ attesting_indices = get_attesting_indices(state, attestation.data, attestation.aggregation_bits) return IndexedAttestation( attesting_indices=sorted(attesting_indices), data=attestation.data, signature=attestation.signature, ) ``` ```python def get_attesting_indices(state: BeaconState, data: AttestationData, bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]) -> Set[ValidatorIndex]: """ Return the set of attesting indices corresponding to ``data`` and ``bits``. """ committee = get_beacon_committee(state, data.slot, data.index) return set(index for i, index in enumerate(committee) if bits[i]) ``` ```python def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: """ Increase the validator balance at index ``index`` by ``delta``. """ state.balances[index] += delta ``` ```python def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None: """ Decrease the validator balance at index ``index`` by ``delta``, with underflow protection. """ state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta ``` ```python def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: """ Initiate the exit of the validator with index ``index``. """ # Return if validator already initiated exit validator = state.validators[index] if validator.exit_epoch != FAR_FUTURE_EPOCH: return # Compute exit queue epoch exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH] exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))]) exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch]) if exit_queue_churn >= get_validator_churn_limit(state): exit_queue_epoch += Epoch(1) # Set validator exit epoch and withdrawable epoch validator.exit_epoch = exit_queue_epoch validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY) ``` ```python def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistleblower_index: ValidatorIndex=None) -> None: """ Slash the validator with index ``slashed_index``. """ epoch = get_current_epoch(state) initiate_validator_exit(state, slashed_index) validator = state.validators[slashed_index] validator.slashed = True validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR)) state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT) # Apply proposer and whistleblower rewards proposer_index = get_beacon_proposer_index(state) if whistleblower_index is None: whistleblower_index = proposer_index whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT) proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT) increase_balance(state, proposer_index, proposer_reward) increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward)) ``` ```python def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32, eth1_timestamp: uint64, deposits: Sequence[Deposit]) -> BeaconState: fork = Fork( previous_version=GENESIS_FORK_VERSION, current_version=GENESIS_FORK_VERSION, epoch=GENESIS_EPOCH, ) state = BeaconState( genesis_time=eth1_timestamp + GENESIS_DELAY, fork=fork, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy ) # Process deposits leaves = list(map(lambda deposit: deposit.data, deposits)) for index, deposit in enumerate(deposits): deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1]) state.eth1_data.deposit_root = hash_tree_root(deposit_data_list) process_deposit(state, deposit) # Process activations for index, validator in enumerate(state.validators): balance = state.balances[index] validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) if validator.effective_balance == MAX_EFFECTIVE_BALANCE: validator.activation_eligibility_epoch = GENESIS_EPOCH validator.activation_epoch = GENESIS_EPOCH # Set genesis validators root for domain separation and chain versioning state.genesis_validators_root = hash_tree_root(state.validators) return state ``` ```python def is_valid_genesis_state(state: BeaconState) -> bool: if state.genesis_time < MIN_GENESIS_TIME: return False if len(get_active_validator_indices(state, GENESIS_EPOCH)) < MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: return False return True ``` ```python def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None: block = signed_block.message # Process slots (including those with no blocks) since block process_slots(state, block.slot) # Verify signature if validate_result: assert verify_block_signature(state, signed_block) # Process block process_block(state, block) # Verify state root if validate_result: assert block.state_root == hash_tree_root(state) ``` ```python def verify_block_signature(state: BeaconState, signed_block: SignedBeaconBlock) -> bool: proposer = state.validators[signed_block.message.proposer_index] signing_root = compute_signing_root(signed_block.message, get_domain(state, DOMAIN_BEACON_PROPOSER)) return bls.Verify(proposer.pubkey, signing_root, signed_block.signature) ``` ```python def process_slots(state: BeaconState, slot: Slot) -> None: assert state.slot < slot while state.slot < slot: process_slot(state) # Process epoch on the start slot of the next epoch if (state.slot + 1) % SLOTS_PER_EPOCH == 0: process_epoch(state) state.slot = Slot(state.slot + 1) ``` ```python def process_slot(state: BeaconState) -> None: # Cache state root previous_state_root = hash_tree_root(state) state.state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root # Cache latest block header state root if state.latest_block_header.state_root == Bytes32(): state.latest_block_header.state_root = previous_state_root # Cache block root previous_block_root = hash_tree_root(state.latest_block_header) state.block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_block_root ``` ```python def process_epoch(state: BeaconState) -> None: process_justification_and_finalization(state) process_rewards_and_penalties(state) process_registry_updates(state) process_slashings(state) process_eth1_data_reset(state) process_effective_balance_updates(state) process_slashings_reset(state) process_randao_mixes_reset(state) process_historical_roots_update(state) process_participation_record_updates(state) ``` ```python def get_matching_source_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]: assert epoch in (get_previous_epoch(state), get_current_epoch(state)) return state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations ``` ```python def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]: return [ a for a in get_matching_source_attestations(state, epoch) if a.data.target.root == get_block_root(state, epoch) ] ``` ```python def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]: return [ a for a in get_matching_target_attestations(state, epoch) if a.data.beacon_block_root == get_block_root_at_slot(state, a.data.slot) ] ``` ```python def get_unslashed_attesting_indices(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Set[ValidatorIndex]: output = set() # type: Set[ValidatorIndex] for a in attestations: output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits)) return set(filter(lambda index: not state.validators[index].slashed, output)) ``` ```python def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei: """ Return the combined effective balance of the set of unslashed validators participating in ``attestations``. Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. """ return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) ``` ```python def process_justification_and_finalization(state: BeaconState) -> None: # Initial FFG checkpoint values have a `0x00` stub for `root`. # Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub. if get_current_epoch(state) <= GENESIS_EPOCH + 1: return previous_attestations = get_matching_target_attestations(state, get_previous_epoch(state)) current_attestations = get_matching_target_attestations(state, get_current_epoch(state)) total_active_balance = get_total_active_balance(state) previous_target_balance = get_attesting_balance(state, previous_attestations) current_target_balance = get_attesting_balance(state, current_attestations) weigh_justification_and_finalization(state, total_active_balance, previous_target_balance, current_target_balance) ``` ```python def weigh_justification_and_finalization(state: BeaconState, total_active_balance: Gwei, previous_epoch_target_balance: Gwei, current_epoch_target_balance: Gwei) -> None: previous_epoch = get_previous_epoch(state) current_epoch = get_current_epoch(state) old_previous_justified_checkpoint = state.previous_justified_checkpoint old_current_justified_checkpoint = state.current_justified_checkpoint # Process justifications state.previous_justified_checkpoint = state.current_justified_checkpoint state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1] state.justification_bits[0] = 0b0 if previous_epoch_target_balance * 3 >= total_active_balance * 2: state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch, root=get_block_root(state, previous_epoch)) state.justification_bits[1] = 0b1 if current_epoch_target_balance * 3 >= total_active_balance * 2: state.current_justified_checkpoint = Checkpoint(epoch=current_epoch, root=get_block_root(state, current_epoch)) state.justification_bits[0] = 0b1 # Process finalizations bits = state.justification_bits # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch: state.finalized_checkpoint = old_previous_justified_checkpoint # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch: state.finalized_checkpoint = old_previous_justified_checkpoint # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch: state.finalized_checkpoint = old_current_justified_checkpoint # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch: state.finalized_checkpoint = old_current_justified_checkpoint ``` ```python def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: total_balance = get_total_active_balance(state) effective_balance = state.validators[index].effective_balance return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH) ``` ```python def get_proposer_reward(state: BeaconState, attesting_index: ValidatorIndex) -> Gwei: return Gwei(get_base_reward(state, attesting_index) // PROPOSER_REWARD_QUOTIENT) ``` ```python def get_finality_delay(state: BeaconState) -> uint64: return get_previous_epoch(state) - state.finalized_checkpoint.epoch ``` ```python def is_in_inactivity_leak(state: BeaconState) -> bool: return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY ``` ```python def get_eligible_validator_indices(state: BeaconState) -> Sequence[ValidatorIndex]: previous_epoch = get_previous_epoch(state) return [ ValidatorIndex(index) for index, v in enumerate(state.validators) if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch) ] ``` ```python def get_attestation_component_deltas(state: BeaconState, attestations: Sequence[PendingAttestation] ) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Helper with shared logic for use by get source, target, and head deltas functions """ rewards = [Gwei(0)] * len(state.validators) penalties = [Gwei(0)] * len(state.validators) total_balance = get_total_active_balance(state) unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations) attesting_balance = get_total_balance(state, unslashed_attesting_indices) for index in get_eligible_validator_indices(state): if index in unslashed_attesting_indices: increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balance totals to avoid uint64 overflow if is_in_inactivity_leak(state): # Since full base reward will be canceled out by inactivity penalty deltas, # optimal participation receives full base reward compensation here. rewards[index] += get_base_reward(state, index) else: reward_numerator = get_base_reward(state, index) * (attesting_balance // increment) rewards[index] += reward_numerator // (total_balance // increment) else: penalties[index] += get_base_reward(state, index) return rewards, penalties ``` ```python def get_source_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return attester micro-rewards/penalties for source-vote for each validator. """ matching_source_attestations = get_matching_source_attestations(state, get_previous_epoch(state)) return get_attestation_component_deltas(state, matching_source_attestations) ``` ```python def get_target_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return attester micro-rewards/penalties for target-vote for each validator. """ matching_target_attestations = get_matching_target_attestations(state, get_previous_epoch(state)) return get_attestation_component_deltas(state, matching_target_attestations) ``` ```python def get_head_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return attester micro-rewards/penalties for head-vote for each validator. """ matching_head_attestations = get_matching_head_attestations(state, get_previous_epoch(state)) return get_attestation_component_deltas(state, matching_head_attestations) ``` ```python def get_inclusion_delay_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return proposer and inclusion delay micro-rewards/penalties for each validator. """ rewards = [Gwei(0) for _ in range(len(state.validators))] matching_source_attestations = get_matching_source_attestations(state, get_previous_epoch(state)) for index in get_unslashed_attesting_indices(state, matching_source_attestations): attestation = min([ a for a in matching_source_attestations if index in get_attesting_indices(state, a.data, a.aggregation_bits) ], key=lambda a: a.inclusion_delay) rewards[attestation.proposer_index] += get_proposer_reward(state, index) max_attester_reward = Gwei(get_base_reward(state, index) - get_proposer_reward(state, index)) rewards[index] += Gwei(max_attester_reward // attestation.inclusion_delay) # No penalties associated with inclusion delay penalties = [Gwei(0) for _ in range(len(state.validators))] return rewards, penalties ``` ```python def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return inactivity reward/penalty deltas for each validator. """ penalties = [Gwei(0) for _ in range(len(state.validators))] if is_in_inactivity_leak(state): matching_target_attestations = get_matching_target_attestations(state, get_previous_epoch(state)) matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations) for index in get_eligible_validator_indices(state): # If validator is performing optimally this cancels all rewards for a neutral balance base_reward = get_base_reward(state, index) penalties[index] += Gwei(BASE_REWARDS_PER_EPOCH * base_reward - get_proposer_reward(state, index)) if index not in matching_target_attesting_indices: effective_balance = state.validators[index].effective_balance penalties[index] += Gwei(effective_balance * get_finality_delay(state) // INACTIVITY_PENALTY_QUOTIENT) # No rewards associated with inactivity penalties rewards = [Gwei(0) for _ in range(len(state.validators))] return rewards, penalties ``` ```python def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]: """ Return attestation reward/penalty deltas for each validator. """ source_rewards, source_penalties = get_source_deltas(state) target_rewards, target_penalties = get_target_deltas(state) head_rewards, head_penalties = get_head_deltas(state) inclusion_delay_rewards, _ = get_inclusion_delay_deltas(state) _, inactivity_penalties = get_inactivity_penalty_deltas(state) rewards = [ source_rewards[i] + target_rewards[i] + head_rewards[i] + inclusion_delay_rewards[i] for i in range(len(state.validators)) ] penalties = [ source_penalties[i] + target_penalties[i] + head_penalties[i] + inactivity_penalties[i] for i in range(len(state.validators)) ] return rewards, penalties ``` ```python def process_rewards_and_penalties(state: BeaconState) -> None: # No rewards are applied at the end of `GENESIS_EPOCH` because rewards are for work done in the previous epoch if get_current_epoch(state) == GENESIS_EPOCH: return rewards, penalties = get_attestation_deltas(state) for index in range(len(state.validators)): increase_balance(state, ValidatorIndex(index), rewards[index]) decrease_balance(state, ValidatorIndex(index), penalties[index]) ``` ```python def process_registry_updates(state: BeaconState) -> None: # Process activation eligibility and ejections for index, validator in enumerate(state.validators): if is_eligible_for_activation_queue(validator): validator.activation_eligibility_epoch = get_current_epoch(state) + 1 if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE: initiate_validator_exit(state, ValidatorIndex(index)) # Queue validators eligible for activation and not yet dequeued for activation activation_queue = sorted([ index for index, validator in enumerate(state.validators) if is_eligible_for_activation(state, validator) # Order by the sequence of activation_eligibility_epoch setting and then index ], key=lambda index: (state.validators[index].activation_eligibility_epoch, index)) # Dequeued validators for activation up to churn limit for index in activation_queue[:get_validator_churn_limit(state)]: validator = state.validators[index] validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) ``` ```python def process_slashings(state: BeaconState) -> None: epoch = get_current_epoch(state) total_balance = get_total_active_balance(state) adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance) for index, validator in enumerate(state.validators): if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch: increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance penalty = penalty_numerator // total_balance * increment decrease_balance(state, ValidatorIndex(index), penalty) ``` ```python def process_eth1_data_reset(state: BeaconState) -> None: next_epoch = Epoch(get_current_epoch(state) + 1) # Reset eth1 data votes if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: state.eth1_data_votes = [] ``` ```python def process_effective_balance_updates(state: BeaconState) -> None: # Update effective balances with hysteresis for index, validator in enumerate(state.validators): balance = state.balances[index] HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT) DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER if ( balance + DOWNWARD_THRESHOLD < validator.effective_balance or validator.effective_balance + UPWARD_THRESHOLD < balance ): validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) ``` ```python def process_slashings_reset(state: BeaconState) -> None: next_epoch = Epoch(get_current_epoch(state) + 1) # Reset slashings state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0) ``` ```python def process_randao_mixes_reset(state: BeaconState) -> None: current_epoch = get_current_epoch(state) next_epoch = Epoch(current_epoch + 1) # Set randao mix state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch) ``` ```python def process_historical_roots_update(state: BeaconState) -> None: # Set historical root accumulator next_epoch = Epoch(get_current_epoch(state) + 1) if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0: historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots) state.historical_roots.append(hash_tree_root(historical_batch)) ``` ```python def process_participation_record_updates(state: BeaconState) -> None: # Rotate current/previous epoch attestations state.previous_epoch_attestations = state.current_epoch_attestations state.current_epoch_attestations = [] ``` ```python def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) process_randao(state, block.body) process_eth1_data(state, block.body) process_operations(state, block.body) ``` ```python def process_block_header(state: BeaconState, block: BeaconBlock) -> None: # Verify that the slots match assert block.slot == state.slot # Verify that the block is newer than latest block header assert block.slot > state.latest_block_header.slot # Verify that proposer index is the correct index assert block.proposer_index == get_beacon_proposer_index(state) # Verify that the parent matches assert block.parent_root == hash_tree_root(state.latest_block_header) # Cache current block as the new latest block state.latest_block_header = BeaconBlockHeader( slot=block.slot, proposer_index=block.proposer_index, parent_root=block.parent_root, state_root=Bytes32(), # Overwritten in the next process_slot call body_root=hash_tree_root(block.body), ) # Verify proposer is not slashed proposer = state.validators[block.proposer_index] assert not proposer.slashed ``` ```python def process_randao(state: BeaconState, body: BeaconBlockBody) -> None: epoch = get_current_epoch(state) # Verify RANDAO reveal proposer = state.validators[get_beacon_proposer_index(state)] signing_root = compute_signing_root(epoch, get_domain(state, DOMAIN_RANDAO)) assert bls.Verify(proposer.pubkey, signing_root, body.randao_reveal) # Mix in RANDAO reveal mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal)) state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix ``` ```python def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None: state.eth1_data_votes.append(body.eth1_data) if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: state.eth1_data = body.eth1_data ``` ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # Verify that outstanding deposits are processed up to the maximum number of deposits assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: for operation in operations: fn(state, operation) for_ops(body.proposer_slashings, process_proposer_slashing) for_ops(body.attester_slashings, process_attester_slashing) for_ops(body.attestations, process_attestation) for_ops(body.deposits, process_deposit) for_ops(body.voluntary_exits, process_voluntary_exit) ``` ```python def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None: header_1 = proposer_slashing.signed_header_1.message header_2 = proposer_slashing.signed_header_2.message # Verify header slots match assert header_1.slot == header_2.slot # Verify header proposer indices match assert header_1.proposer_index == header_2.proposer_index # Verify the headers are different assert header_1 != header_2 # Verify the proposer is slashable proposer = state.validators[header_1.proposer_index] assert is_slashable_validator(proposer, get_current_epoch(state)) # Verify signatures for signed_header in (proposer_slashing.signed_header_1, proposer_slashing.signed_header_2): domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(signed_header.message.slot)) signing_root = compute_signing_root(signed_header.message, domain) assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature) slash_validator(state, header_1.proposer_index) ``` ```python def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None: attestation_1 = attester_slashing.attestation_1 attestation_2 = attester_slashing.attestation_2 assert is_slashable_attestation_data(attestation_1.data, attestation_2.data) assert is_valid_indexed_attestation(state, attestation_1) assert is_valid_indexed_attestation(state, attestation_2) slashed_any = False indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices) for index in sorted(indices): if is_slashable_validator(state.validators[index], get_current_epoch(state)): slash_validator(state, index) slashed_any = True assert slashed_any ``` ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.target.epoch == compute_epoch_at_slot(data.slot) assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH assert data.index < get_committee_count_per_slot(state, data.target.epoch) committee = get_beacon_committee(state, data.slot, data.index) assert len(attestation.aggregation_bits) == len(committee) pending_attestation = PendingAttestation( data=data, aggregation_bits=attestation.aggregation_bits, inclusion_delay=state.slot - data.slot, proposer_index=get_beacon_proposer_index(state), ) if data.target.epoch == get_current_epoch(state): assert data.source == state.current_justified_checkpoint state.current_epoch_attestations.append(pending_attestation) else: assert data.source == state.previous_justified_checkpoint state.previous_epoch_attestations.append(pending_attestation) # Verify signature assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) ``` ```python def get_validator_from_deposit(state: BeaconState, deposit: Deposit) -> Validator: amount = deposit.data.amount effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) return Validator( pubkey=deposit.data.pubkey, withdrawal_credentials=deposit.data.withdrawal_credentials, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, withdrawable_epoch=FAR_FUTURE_EPOCH, effective_balance=effective_balance, ) ``` ```python def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Verify the Merkle branch assert is_valid_merkle_branch( leaf=hash_tree_root(deposit.data), branch=deposit.proof, depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in index=state.eth1_deposit_index, root=state.eth1_data.deposit_root, ) # Deposits must be processed in order state.eth1_deposit_index += 1 pubkey = deposit.data.pubkey amount = deposit.data.amount validator_pubkeys = [v.pubkey for v in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract deposit_message = DepositMessage( pubkey=deposit.data.pubkey, withdrawal_credentials=deposit.data.withdrawal_credentials, amount=deposit.data.amount, ) domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks signing_root = compute_signing_root(deposit_message, domain) if not bls.Verify(pubkey, signing_root, deposit.data.signature): return # Add validator and balance entries state.validators.append(get_validator_from_deposit(state, deposit)) state.balances.append(amount) else: # Increase balance by deposit amount index = ValidatorIndex(validator_pubkeys.index(pubkey)) increase_balance(state, index, amount) ``` ```python def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: voluntary_exit = signed_voluntary_exit.message validator = state.validators[voluntary_exit.validator_index] # Verify the validator is active assert is_active_validator(validator, get_current_epoch(state)) # Verify exit has not been initiated assert validator.exit_epoch == FAR_FUTURE_EPOCH # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= voluntary_exit.epoch # Verify the validator has been active long enough assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD # Verify signature domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) signing_root = compute_signing_root(voluntary_exit, domain) assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature) # Initiate exit initiate_validator_exit(state, voluntary_exit.validator_index) ```