mirror of
https://gitlab.com/pulsechaincom/lighthouse-pulse.git
synced 2024-12-24 20:47:17 +00:00
775d222299
## Proposed Changes With proposer boosting implemented (#2822) we have an opportunity to re-org out late blocks. This PR adds three flags to the BN to control this behaviour: * `--disable-proposer-reorgs`: turn aggressive re-orging off (it's on by default). * `--proposer-reorg-threshold N`: attempt to orphan blocks with less than N% of the committee vote. If this parameter isn't set then N defaults to 20% when the feature is enabled. * `--proposer-reorg-epochs-since-finalization N`: only attempt to re-org late blocks when the number of epochs since finalization is less than or equal to N. The default is 2 epochs, meaning re-orgs will only be attempted when the chain is finalizing optimally. For safety Lighthouse will only attempt a re-org under very specific conditions: 1. The block being proposed is 1 slot after the canonical head, and the canonical head is 1 slot after its parent. i.e. at slot `n + 1` rather than building on the block from slot `n` we build on the block from slot `n - 1`. 2. The current canonical head received less than N% of the committee vote. N should be set depending on the proposer boost fraction itself, the fraction of the network that is believed to be applying it, and the size of the largest entity that could be hoarding votes. 3. The current canonical head arrived after the attestation deadline from our perspective. This condition was only added to support suppression of forkchoiceUpdated messages, but makes intuitive sense. 4. The block is being proposed in the first 2 seconds of the slot. This gives it time to propagate and receive the proposer boost. ## Additional Info For the initial idea and background, see: https://github.com/ethereum/consensus-specs/pull/2353#issuecomment-950238004 There is also a specification for this feature here: https://github.com/ethereum/consensus-specs/pull/3034 Co-authored-by: Michael Sproul <micsproul@gmail.com> Co-authored-by: pawan <pawandhananjay@gmail.com>
191 lines
6.1 KiB
Rust
191 lines
6.1 KiB
Rust
use beacon_chain::{
|
|
test_utils::{BeaconChainHarness, BoxedMutator, EphemeralHarnessType},
|
|
BeaconChain, BeaconChainTypes,
|
|
};
|
|
use directory::DEFAULT_ROOT_DIR;
|
|
use eth2::{BeaconNodeHttpClient, Timeouts};
|
|
use http_api::{Config, Context};
|
|
use lighthouse_network::{
|
|
discv5::enr::{CombinedKey, EnrBuilder},
|
|
libp2p::{core::connection::ConnectionId, swarm::NetworkBehaviour},
|
|
rpc::methods::{MetaData, MetaDataV2},
|
|
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState},
|
|
ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager,
|
|
};
|
|
use logging::test_logger;
|
|
use network::{NetworkReceivers, NetworkSenders};
|
|
use sensitive_url::SensitiveUrl;
|
|
use slog::Logger;
|
|
use std::future::Future;
|
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
|
use std::sync::Arc;
|
|
use std::time::Duration;
|
|
use store::MemoryStore;
|
|
use tokio::sync::oneshot;
|
|
use types::{ChainSpec, EthSpec};
|
|
|
|
pub const TCP_PORT: u16 = 42;
|
|
pub const UDP_PORT: u16 = 42;
|
|
pub const SEQ_NUMBER: u64 = 0;
|
|
pub const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0/tcp/9000";
|
|
|
|
/// HTTP API tester that allows interaction with the underlying beacon chain harness.
|
|
pub struct InteractiveTester<E: EthSpec> {
|
|
pub harness: BeaconChainHarness<EphemeralHarnessType<E>>,
|
|
pub client: BeaconNodeHttpClient,
|
|
pub network_rx: NetworkReceivers<E>,
|
|
_server_shutdown: oneshot::Sender<()>,
|
|
}
|
|
|
|
/// The result of calling `create_api_server`.
|
|
///
|
|
/// Glue-type between `tests::ApiTester` and `InteractiveTester`.
|
|
pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> {
|
|
pub server: SFut,
|
|
pub listening_socket: SocketAddr,
|
|
pub shutdown_tx: oneshot::Sender<()>,
|
|
pub network_rx: NetworkReceivers<E>,
|
|
pub local_enr: Enr,
|
|
pub external_peer_id: PeerId,
|
|
}
|
|
|
|
type Mutator<E> = BoxedMutator<E, MemoryStore<E>, MemoryStore<E>>;
|
|
|
|
impl<E: EthSpec> InteractiveTester<E> {
|
|
pub async fn new(spec: Option<ChainSpec>, validator_count: usize) -> Self {
|
|
Self::new_with_mutator(spec, validator_count, None).await
|
|
}
|
|
|
|
pub async fn new_with_mutator(
|
|
spec: Option<ChainSpec>,
|
|
validator_count: usize,
|
|
mutator: Option<Mutator<E>>,
|
|
) -> Self {
|
|
let mut harness_builder = BeaconChainHarness::builder(E::default())
|
|
.spec_or_default(spec)
|
|
.deterministic_keypairs(validator_count)
|
|
.logger(test_logger())
|
|
.mock_execution_layer()
|
|
.fresh_ephemeral_store();
|
|
|
|
if let Some(mutator) = mutator {
|
|
harness_builder = harness_builder.initial_mutator(mutator);
|
|
}
|
|
|
|
let harness = harness_builder.build();
|
|
|
|
let ApiServer {
|
|
server,
|
|
listening_socket,
|
|
shutdown_tx: _server_shutdown,
|
|
network_rx,
|
|
..
|
|
} = create_api_server(harness.chain.clone(), harness.logger().clone()).await;
|
|
|
|
tokio::spawn(server);
|
|
|
|
let client = BeaconNodeHttpClient::new(
|
|
SensitiveUrl::parse(&format!(
|
|
"http://{}:{}",
|
|
listening_socket.ip(),
|
|
listening_socket.port()
|
|
))
|
|
.unwrap(),
|
|
Timeouts::set_all(Duration::from_secs(1)),
|
|
);
|
|
|
|
Self {
|
|
harness,
|
|
client,
|
|
network_rx,
|
|
_server_shutdown,
|
|
}
|
|
}
|
|
}
|
|
|
|
pub async fn create_api_server<T: BeaconChainTypes>(
|
|
chain: Arc<BeaconChain<T>>,
|
|
log: Logger,
|
|
) -> ApiServer<T::EthSpec, impl Future<Output = ()>> {
|
|
// Get a random unused port.
|
|
let port = unused_port::unused_tcp_port().unwrap();
|
|
create_api_server_on_port(chain, log, port).await
|
|
}
|
|
|
|
pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
|
chain: Arc<BeaconChain<T>>,
|
|
log: Logger,
|
|
port: u16,
|
|
) -> ApiServer<T::EthSpec, impl Future<Output = ()>> {
|
|
let (network_senders, network_receivers) = NetworkSenders::new();
|
|
|
|
// Default metadata
|
|
let meta_data = MetaData::V2(MetaDataV2 {
|
|
seq_number: SEQ_NUMBER,
|
|
attnets: EnrAttestationBitfield::<T::EthSpec>::default(),
|
|
syncnets: EnrSyncCommitteeBitfield::<T::EthSpec>::default(),
|
|
});
|
|
let enr_key = CombinedKey::generate_secp256k1();
|
|
let enr = EnrBuilder::new("v4").build(&enr_key).unwrap();
|
|
let network_globals = Arc::new(NetworkGlobals::new(
|
|
enr.clone(),
|
|
TCP_PORT,
|
|
UDP_PORT,
|
|
meta_data,
|
|
vec![],
|
|
&log,
|
|
));
|
|
|
|
// Only a peer manager can add peers, so we create a dummy manager.
|
|
let config = lighthouse_network::peer_manager::config::Config::default();
|
|
let mut pm = PeerManager::new(config, network_globals.clone(), &log).unwrap();
|
|
|
|
// add a peer
|
|
let peer_id = PeerId::random();
|
|
|
|
let connected_point = ConnectedPoint::Listener {
|
|
local_addr: EXTERNAL_ADDR.parse().unwrap(),
|
|
send_back_addr: EXTERNAL_ADDR.parse().unwrap(),
|
|
};
|
|
let con_id = ConnectionId::new(1);
|
|
pm.inject_connection_established(&peer_id, &con_id, &connected_point, None, 0);
|
|
*network_globals.sync_state.write() = SyncState::Synced;
|
|
|
|
let eth1_service =
|
|
eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap();
|
|
|
|
let context = Arc::new(Context {
|
|
config: Config {
|
|
enabled: true,
|
|
listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
|
|
listen_port: port,
|
|
allow_origin: None,
|
|
tls_config: None,
|
|
allow_sync_stalled: false,
|
|
data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR),
|
|
spec_fork_name: None,
|
|
},
|
|
chain: Some(chain.clone()),
|
|
network_senders: Some(network_senders),
|
|
network_globals: Some(network_globals),
|
|
eth1_service: Some(eth1_service),
|
|
log,
|
|
});
|
|
let ctx = context.clone();
|
|
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
|
let server_shutdown = async {
|
|
// It's not really interesting why this triggered, just that it happened.
|
|
let _ = shutdown_rx.await;
|
|
};
|
|
let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap();
|
|
|
|
ApiServer {
|
|
server,
|
|
listening_socket,
|
|
shutdown_tx,
|
|
network_rx: network_receivers,
|
|
local_enr: enr,
|
|
external_peer_id: peer_id,
|
|
}
|
|
}
|