mirror of
https://gitlab.com/pulsechaincom/lighthouse-pulse.git
synced 2025-01-10 21:11:22 +00:00
6656cb00e4
* Start updating types * WIP * Signature hacking * Existing EF tests passing with fake_crypto * Updates * Delete outdated API spec * The refactor continues * It compiles * WIP test fixes * All release tests passing bar genesis state parsing * Update and test YamlConfig * Update to spec v0.10 compatible BLS * Updates to BLS EF tests * Add EF test for AggregateVerify And delete unused hash2curve tests for uncompressed points * Update EF tests to v0.10.1 * Use optional block root correctly in block proc * Use genesis fork in deposit domain. All tests pass * Cargo fmt * Fast aggregate verify test * Update REST API docs * Cargo fmt * Fix unused import * Bump spec tags to v0.10.1 * Add `seconds_per_eth1_block` to chainspec * Update to timestamp based eth1 voting scheme * Return None from `get_votes_to_consider` if block cache is empty * Handle overflows in `is_candidate_block` * Revert to failing tests * Fix eth1 data sets test * Choose default vote according to spec * Fix collect_valid_votes tests * Fix `get_votes_to_consider` to choose all eligible blocks * Uncomment winning_vote tests * Add comments; remove unused code * Reduce seconds_per_eth1_block for simulation * Addressed review comments * Add test for default vote case * Fix logs * Remove unused functions * Meter default eth1 votes * Fix comments * Address review comments; remove unused dependency * Add first attempt at attestation proc. re-write * Add version 2 of attestation processing * Minor fixes * Add validator pubkey cache * Make get_indexed_attestation take a committee * Link signature processing into new attn verification * First working version * Ensure pubkey cache is updated * Add more metrics, slight optimizations * Clone committee cache during attestation processing * Update shuffling cache during block processing * Remove old commented-out code * Fix shuffling cache insert bug * Used indexed attestation in fork choice * Restructure attn processing, add metrics * Add more detailed metrics * Tidy, fix failing tests * Fix failing tests, tidy * Disable/delete two outdated tests * Tidy * Add pubkey cache persistence file * Add more comments * Integrate persistence file into builder * Add pubkey cache tests * Add data_dir to beacon chain builder * Remove Option in pubkey cache persistence file * Ensure consistency between datadir/data_dir * Fix failing network test * Tidy * Fix todos * Add attestation processing tests * Add another test * Only run attestation tests in release * Make attestation tests MainnetEthSpec * Address Michael's comments * Remove redundant check * Fix warning * Fix failing test Co-authored-by: Michael Sproul <micsproul@gmail.com> Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>
146 lines
4.3 KiB
Rust
146 lines
4.3 KiB
Rust
#![cfg(not(debug_assertions))]
|
|
|
|
#[macro_use]
|
|
extern crate lazy_static;
|
|
|
|
use beacon_chain::{
|
|
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
|
|
BeaconChain, BeaconChainTypes,
|
|
};
|
|
use sloggers::{null::NullLoggerBuilder, Build};
|
|
use std::sync::Arc;
|
|
use store::{DiskStore, StoreConfig};
|
|
use tempfile::{tempdir, TempDir};
|
|
use types::{EthSpec, Keypair, MinimalEthSpec};
|
|
|
|
type E = MinimalEthSpec;
|
|
|
|
// Should ideally be divisible by 3.
|
|
pub const VALIDATOR_COUNT: usize = 24;
|
|
|
|
lazy_static! {
|
|
/// A cached set of keys.
|
|
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
}
|
|
|
|
fn get_store(db_path: &TempDir) -> Arc<DiskStore<E>> {
|
|
let spec = E::default_spec();
|
|
let hot_path = db_path.path().join("hot_db");
|
|
let cold_path = db_path.path().join("cold_db");
|
|
let config = StoreConfig::default();
|
|
let log = NullLoggerBuilder.build().expect("logger should build");
|
|
Arc::new(
|
|
DiskStore::open(&hot_path, &cold_path, config, spec, log)
|
|
.expect("disk store should initialize"),
|
|
)
|
|
}
|
|
|
|
#[test]
|
|
fn finalizes_after_resuming_from_db() {
|
|
let validator_count = 16;
|
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 8;
|
|
let first_half = num_blocks_produced / 2;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = BeaconChainHarness::new_with_disk_store(
|
|
MinimalEthSpec,
|
|
store.clone(),
|
|
KEYPAIRS[0..validator_count].to_vec(),
|
|
);
|
|
|
|
harness.advance_slot();
|
|
|
|
harness.extend_chain(
|
|
first_half as usize,
|
|
BlockStrategy::OnCanonicalHead,
|
|
AttestationStrategy::AllValidators,
|
|
);
|
|
|
|
assert!(
|
|
harness
|
|
.chain
|
|
.head()
|
|
.expect("should read head")
|
|
.beacon_state
|
|
.finalized_checkpoint
|
|
.epoch
|
|
> 0,
|
|
"the chain should have already finalized"
|
|
);
|
|
|
|
let latest_slot = harness.chain.slot().expect("should have a slot");
|
|
|
|
harness.chain.persist().expect("should persist the chain");
|
|
|
|
let data_dir = harness.data_dir;
|
|
let original_chain = harness.chain;
|
|
|
|
let resumed_harness = BeaconChainHarness::resume_from_disk_store(
|
|
MinimalEthSpec,
|
|
store,
|
|
KEYPAIRS[0..validator_count].to_vec(),
|
|
data_dir,
|
|
);
|
|
|
|
assert_chains_pretty_much_the_same(&original_chain, &resumed_harness.chain);
|
|
|
|
// Set the slot clock of the resumed harness to be in the slot following the previous harness.
|
|
//
|
|
// This allows us to produce the block at the next slot.
|
|
resumed_harness
|
|
.chain
|
|
.slot_clock
|
|
.set_slot(latest_slot.as_u64() + 1);
|
|
|
|
resumed_harness.extend_chain(
|
|
(num_blocks_produced - first_half) as usize,
|
|
BlockStrategy::OnCanonicalHead,
|
|
AttestationStrategy::AllValidators,
|
|
);
|
|
|
|
let state = &resumed_harness
|
|
.chain
|
|
.head()
|
|
.expect("should read head")
|
|
.beacon_state;
|
|
assert_eq!(
|
|
state.slot, num_blocks_produced,
|
|
"head should be at the current slot"
|
|
);
|
|
assert_eq!(
|
|
state.current_epoch(),
|
|
num_blocks_produced / MinimalEthSpec::slots_per_epoch(),
|
|
"head should be at the expected epoch"
|
|
);
|
|
assert_eq!(
|
|
state.current_justified_checkpoint.epoch,
|
|
state.current_epoch() - 1,
|
|
"the head should be justified one behind the current epoch"
|
|
);
|
|
assert_eq!(
|
|
state.finalized_checkpoint.epoch,
|
|
state.current_epoch() - 2,
|
|
"the head should be finalized two behind the current epoch"
|
|
);
|
|
}
|
|
|
|
/// Checks that two chains are the same, for the purpose of this tests.
|
|
///
|
|
/// Several fields that are hard/impossible to check are ignored (e.g., the store).
|
|
fn assert_chains_pretty_much_the_same<T: BeaconChainTypes>(a: &BeaconChain<T>, b: &BeaconChain<T>) {
|
|
assert_eq!(a.spec, b.spec, "spec should be equal");
|
|
assert_eq!(a.op_pool, b.op_pool, "op_pool should be equal");
|
|
assert_eq!(a.head(), b.head(), "head() should be equal");
|
|
assert_eq!(a.heads(), b.heads(), "heads() should be equal");
|
|
assert_eq!(
|
|
a.genesis_block_root, b.genesis_block_root,
|
|
"genesis_block_root should be equal"
|
|
);
|
|
assert!(
|
|
a.fork_choice == b.fork_choice,
|
|
"fork_choice should be equal"
|
|
);
|
|
}
|