mirror of
https://gitlab.com/pulsechaincom/lighthouse-pulse.git
synced 2024-12-21 19:20:40 +00:00
Activate clippy::manual_let_else
lint (#4889)
## Issue Addressed #4888 ## Proposed Changes Enabled `clippy::manual_let_else` lint and resolved the warning messages.
This commit is contained in:
parent
a9f9dc241d
commit
4ce01ddd11
1
Makefile
1
Makefile
@ -208,6 +208,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine
|
||||
lint:
|
||||
cargo clippy --workspace --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \
|
||||
-D clippy::fn_to_numeric_cast_any \
|
||||
-D clippy::manual_let_else \
|
||||
-D warnings \
|
||||
-A clippy::derive_partial_eq_without_eq \
|
||||
-A clippy::from-over-into \
|
||||
|
@ -601,11 +601,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
spec: &ChainSpec,
|
||||
log: &Logger,
|
||||
) -> Result<Option<BeaconForkChoice<T>>, Error> {
|
||||
let persisted_fork_choice =
|
||||
match store.get_item::<PersistedForkChoice>(&FORK_CHOICE_DB_KEY)? {
|
||||
Some(fc) => fc,
|
||||
None => return Ok(None),
|
||||
};
|
||||
let Some(persisted_fork_choice) =
|
||||
store.get_item::<PersistedForkChoice>(&FORK_CHOICE_DB_KEY)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let fc_store =
|
||||
BeaconForkChoiceStore::from_persisted(persisted_fork_choice.fork_choice_store, store)?;
|
||||
@ -3485,9 +3485,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
) -> Result<(), BlockError<T::EthSpec>> {
|
||||
// Only perform the weak subjectivity check if it was configured.
|
||||
let wss_checkpoint = if let Some(checkpoint) = self.config.weak_subjectivity_checkpoint {
|
||||
checkpoint
|
||||
} else {
|
||||
let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint else {
|
||||
return Ok(());
|
||||
};
|
||||
// Note: we're using the finalized checkpoint from the head state, rather than fork
|
||||
@ -5336,14 +5334,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
)
|
||||
.await??;
|
||||
|
||||
let (forkchoice_update_params, pre_payload_attributes) =
|
||||
if let Some((fcu, Some(pre_payload))) = maybe_prep_data {
|
||||
(fcu, pre_payload)
|
||||
} else {
|
||||
// Appropriate log messages have already been logged above and in
|
||||
// `get_pre_payload_attributes`.
|
||||
return Ok(());
|
||||
};
|
||||
let Some((forkchoice_update_params, Some(pre_payload_attributes))) = maybe_prep_data else {
|
||||
// Appropriate log messages have already been logged above and in
|
||||
// `get_pre_payload_attributes`.
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// If the execution layer doesn't have any proposer data for this validator then we assume
|
||||
// it's not connected to this BN and no action is required.
|
||||
@ -5436,23 +5431,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
}
|
||||
|
||||
let till_prepare_slot =
|
||||
if let Some(duration) = self.slot_clock.duration_to_slot(prepare_slot) {
|
||||
duration
|
||||
} else {
|
||||
// `SlotClock::duration_to_slot` will return `None` when we are past the start
|
||||
// of `prepare_slot`. Don't bother sending a `forkchoiceUpdated` in that case,
|
||||
// it's too late.
|
||||
//
|
||||
// This scenario might occur on an overloaded/under-resourced node.
|
||||
warn!(
|
||||
self.log,
|
||||
"Delayed proposer preparation";
|
||||
"prepare_slot" => prepare_slot,
|
||||
"validator" => proposer,
|
||||
);
|
||||
return Ok(());
|
||||
};
|
||||
let Some(till_prepare_slot) = self.slot_clock.duration_to_slot(prepare_slot) else {
|
||||
// `SlotClock::duration_to_slot` will return `None` when we are past the start
|
||||
// of `prepare_slot`. Don't bother sending a `forkchoiceUpdated` in that case,
|
||||
// it's too late.
|
||||
//
|
||||
// This scenario might occur on an overloaded/under-resourced node.
|
||||
warn!(
|
||||
self.log,
|
||||
"Delayed proposer preparation";
|
||||
"prepare_slot" => prepare_slot,
|
||||
"validator" => proposer,
|
||||
);
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// If we are close enough to the proposal slot, send an fcU, which will have payload
|
||||
// attributes filled in by the execution layer cache we just primed.
|
||||
|
@ -451,23 +451,21 @@ async fn availability_cache_maintenance_service<T: BeaconChainTypes>(
|
||||
let additional_delay = (epoch_duration * 3) / 4;
|
||||
tokio::time::sleep(duration + additional_delay).await;
|
||||
|
||||
let deneb_fork_epoch = match chain.spec.deneb_fork_epoch {
|
||||
Some(epoch) => epoch,
|
||||
None => break, // shutdown service if deneb fork epoch not set
|
||||
let Some(deneb_fork_epoch) = chain.spec.deneb_fork_epoch else {
|
||||
// shutdown service if deneb fork epoch not set
|
||||
break;
|
||||
};
|
||||
|
||||
debug!(
|
||||
chain.log,
|
||||
"Availability cache maintenance service firing";
|
||||
);
|
||||
|
||||
let current_epoch = match chain
|
||||
let Some(current_epoch) = chain
|
||||
.slot_clock
|
||||
.now()
|
||||
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
|
||||
{
|
||||
Some(epoch) => epoch,
|
||||
None => continue, // we'll have to try again next time I suppose..
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if current_epoch < deneb_fork_epoch {
|
||||
|
@ -547,9 +547,8 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
.peek_lru()
|
||||
.map(|(key, value)| (*key, value.clone()));
|
||||
|
||||
let (lru_root, lru_pending_components) = match lru_entry {
|
||||
Some((r, p)) => (r, p),
|
||||
None => break,
|
||||
let Some((lru_root, lru_pending_components)) = lru_entry else {
|
||||
break;
|
||||
};
|
||||
|
||||
if lru_pending_components
|
||||
@ -605,9 +604,8 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
let delete_if_outdated = |cache: &OverflowLRUCache<T>,
|
||||
block_data: Option<BlockData>|
|
||||
-> Result<(), AvailabilityCheckError> {
|
||||
let block_data = match block_data {
|
||||
Some(block_data) => block_data,
|
||||
None => return Ok(()),
|
||||
let Some(block_data) = block_data else {
|
||||
return Ok(());
|
||||
};
|
||||
let not_in_store_keys = !cache.critical.read().store_keys.contains(&block_data.root);
|
||||
if not_in_store_keys {
|
||||
|
@ -99,9 +99,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Option<Attestation<E>>, Error> {
|
||||
let lock = self.item.read();
|
||||
let item = if let Some(item) = lock.as_ref() {
|
||||
item
|
||||
} else {
|
||||
let Some(item) = lock.as_ref() else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
|
@ -16,15 +16,14 @@ pub fn upgrade_to_v12<T: BeaconChainTypes>(
|
||||
let spec = db.get_chain_spec();
|
||||
|
||||
// Load a V5 op pool and transform it to V12.
|
||||
let PersistedOperationPoolV5 {
|
||||
let Some(PersistedOperationPoolV5 {
|
||||
attestations_v5,
|
||||
sync_contributions,
|
||||
attester_slashings_v5,
|
||||
proposer_slashings_v5,
|
||||
voluntary_exits_v5,
|
||||
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||
op_pool
|
||||
} else {
|
||||
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||
else {
|
||||
debug!(log, "Nothing to do, no operation pool stored");
|
||||
return Ok(vec![]);
|
||||
};
|
||||
@ -168,15 +167,14 @@ pub fn downgrade_from_v12<T: BeaconChainTypes>(
|
||||
log: Logger,
|
||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||
// Load a V12 op pool and transform it to V5.
|
||||
let PersistedOperationPoolV12::<T::EthSpec> {
|
||||
let Some(PersistedOperationPoolV12::<T::EthSpec> {
|
||||
attestations,
|
||||
sync_contributions,
|
||||
attester_slashings,
|
||||
proposer_slashings,
|
||||
voluntary_exits,
|
||||
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||
op_pool_v12
|
||||
} else {
|
||||
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||
else {
|
||||
debug!(log, "Nothing to do, no operation pool stored");
|
||||
return Ok(vec![]);
|
||||
};
|
||||
|
@ -18,19 +18,14 @@ fn get_slot_clock<T: BeaconChainTypes>(
|
||||
log: &Logger,
|
||||
) -> Result<Option<T::SlotClock>, Error> {
|
||||
let spec = db.get_chain_spec();
|
||||
let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? {
|
||||
block
|
||||
} else {
|
||||
let Some(genesis_block) = db.get_blinded_block(&Hash256::zero())? else {
|
||||
error!(log, "Missing genesis block");
|
||||
return Ok(None);
|
||||
};
|
||||
let genesis_state =
|
||||
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
|
||||
state
|
||||
} else {
|
||||
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
||||
return Ok(None);
|
||||
};
|
||||
let Some(genesis_state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? else {
|
||||
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
||||
return Ok(None);
|
||||
};
|
||||
Ok(Some(T::SlotClock::new(
|
||||
spec.genesis_slot,
|
||||
Duration::from_secs(genesis_state.genesis_time()),
|
||||
@ -43,15 +38,14 @@ pub fn upgrade_to_v14<T: BeaconChainTypes>(
|
||||
log: Logger,
|
||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||
// Load a V12 op pool and transform it to V14.
|
||||
let PersistedOperationPoolV12::<T::EthSpec> {
|
||||
let Some(PersistedOperationPoolV12::<T::EthSpec> {
|
||||
attestations,
|
||||
sync_contributions,
|
||||
attester_slashings,
|
||||
proposer_slashings,
|
||||
voluntary_exits,
|
||||
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||
op_pool_v12
|
||||
} else {
|
||||
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||
else {
|
||||
debug!(log, "Nothing to do, no operation pool stored");
|
||||
return Ok(vec![]);
|
||||
};
|
||||
@ -94,16 +88,15 @@ pub fn downgrade_from_v14<T: BeaconChainTypes>(
|
||||
}
|
||||
|
||||
// Load a V14 op pool and transform it to V12.
|
||||
let PersistedOperationPoolV14::<T::EthSpec> {
|
||||
let Some(PersistedOperationPoolV14::<T::EthSpec> {
|
||||
attestations,
|
||||
sync_contributions,
|
||||
attester_slashings,
|
||||
proposer_slashings,
|
||||
voluntary_exits,
|
||||
bls_to_execution_changes,
|
||||
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||
op_pool
|
||||
} else {
|
||||
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||
else {
|
||||
debug!(log, "Nothing to do, no operation pool stored");
|
||||
return Ok(vec![]);
|
||||
};
|
||||
|
@ -11,16 +11,15 @@ pub fn upgrade_to_v15<T: BeaconChainTypes>(
|
||||
log: Logger,
|
||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||
// Load a V14 op pool and transform it to V15.
|
||||
let PersistedOperationPoolV14::<T::EthSpec> {
|
||||
let Some(PersistedOperationPoolV14::<T::EthSpec> {
|
||||
attestations,
|
||||
sync_contributions,
|
||||
attester_slashings,
|
||||
proposer_slashings,
|
||||
voluntary_exits,
|
||||
bls_to_execution_changes,
|
||||
} = if let Some(op_pool_v14) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||
op_pool_v14
|
||||
} else {
|
||||
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||
else {
|
||||
debug!(log, "Nothing to do, no operation pool stored");
|
||||
return Ok(vec![]);
|
||||
};
|
||||
@ -43,7 +42,7 @@ pub fn downgrade_from_v15<T: BeaconChainTypes>(
|
||||
log: Logger,
|
||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||
// Load a V15 op pool and transform it to V14.
|
||||
let PersistedOperationPoolV15::<T::EthSpec> {
|
||||
let Some(PersistedOperationPoolV15::<T::EthSpec> {
|
||||
attestations,
|
||||
sync_contributions,
|
||||
attester_slashings,
|
||||
@ -51,9 +50,8 @@ pub fn downgrade_from_v15<T: BeaconChainTypes>(
|
||||
voluntary_exits,
|
||||
bls_to_execution_changes,
|
||||
capella_bls_change_broadcast_indices,
|
||||
} = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||
op_pool
|
||||
} else {
|
||||
}) = db.get_item(&OP_POOL_DB_KEY)?
|
||||
else {
|
||||
debug!(log, "Nothing to do, no operation pool stored");
|
||||
return Ok(vec![]);
|
||||
};
|
||||
|
@ -17,19 +17,14 @@ fn get_slot_clock<T: BeaconChainTypes>(
|
||||
log: &Logger,
|
||||
) -> Result<Option<T::SlotClock>, Error> {
|
||||
let spec = db.get_chain_spec();
|
||||
let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? {
|
||||
block
|
||||
} else {
|
||||
let Some(genesis_block) = db.get_blinded_block(&Hash256::zero())? else {
|
||||
error!(log, "Missing genesis block");
|
||||
return Ok(None);
|
||||
};
|
||||
let genesis_state =
|
||||
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
|
||||
state
|
||||
} else {
|
||||
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
||||
return Ok(None);
|
||||
};
|
||||
let Some(genesis_state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? else {
|
||||
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
||||
return Ok(None);
|
||||
};
|
||||
Ok(Some(T::SlotClock::new(
|
||||
spec.genesis_slot,
|
||||
Duration::from_secs(genesis_state.genesis_time()),
|
||||
|
@ -113,14 +113,11 @@ async fn state_advance_timer<T: BeaconChainTypes>(
|
||||
let slot_duration = slot_clock.slot_duration();
|
||||
|
||||
loop {
|
||||
let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() {
|
||||
Some(duration) => duration,
|
||||
None => {
|
||||
error!(log, "Failed to read slot clock");
|
||||
// If we can't read the slot clock, just wait another slot.
|
||||
sleep(slot_duration).await;
|
||||
continue;
|
||||
}
|
||||
let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() else {
|
||||
error!(log, "Failed to read slot clock");
|
||||
// If we can't read the slot clock, just wait another slot.
|
||||
sleep(slot_duration).await;
|
||||
continue;
|
||||
};
|
||||
|
||||
// Run the state advance 3/4 of the way through the slot (9s on mainnet).
|
||||
|
@ -1799,13 +1799,11 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
};
|
||||
}
|
||||
|
||||
let block = if let Some(block) = engine
|
||||
let Some(block) = engine
|
||||
.api
|
||||
.get_block_by_hash_with_txns::<T>(hash, fork)
|
||||
.await?
|
||||
{
|
||||
block
|
||||
} else {
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
|
@ -426,9 +426,7 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
}
|
||||
|
||||
pub fn new_payload(&mut self, payload: ExecutionPayload<T>) -> PayloadStatusV1 {
|
||||
let parent = if let Some(parent) = self.blocks.get(&payload.parent_hash()) {
|
||||
parent
|
||||
} else {
|
||||
let Some(parent) = self.blocks.get(&payload.parent_hash()) else {
|
||||
return PayloadStatusV1 {
|
||||
status: PayloadStatusV1Status::Syncing,
|
||||
latest_valid_hash: None,
|
||||
|
@ -30,9 +30,7 @@ pub fn sync_committee_duties<T: BeaconChainTypes>(
|
||||
request_indices: &[u64],
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<SyncDuties, warp::reject::Rejection> {
|
||||
let altair_fork_epoch = if let Some(altair_fork_epoch) = chain.spec.altair_fork_epoch {
|
||||
altair_fork_epoch
|
||||
} else {
|
||||
let Some(altair_fork_epoch) = chain.spec.altair_fork_epoch else {
|
||||
// Empty response for networks with Altair disabled.
|
||||
return Ok(convert_to_response(vec![], false));
|
||||
};
|
||||
|
@ -135,9 +135,8 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyInboundCodec<TSpec> {
|
||||
if self.protocol.versioned_protocol == SupportedProtocol::MetaDataV2 {
|
||||
return Ok(Some(InboundRequest::MetaData(MetadataRequest::new_v2())));
|
||||
}
|
||||
let length = match handle_length(&mut self.inner, &mut self.len, src)? {
|
||||
Some(len) => len,
|
||||
None => return Ok(None),
|
||||
let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of
|
||||
@ -277,9 +276,8 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
let length = match handle_length(&mut self.inner, &mut self.len, src)? {
|
||||
Some(len) => len,
|
||||
None => return Ok(None),
|
||||
let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of
|
||||
@ -324,9 +322,8 @@ impl<TSpec: EthSpec> OutboundCodec<OutboundRequest<TSpec>> for SSZSnappyOutbound
|
||||
&mut self,
|
||||
src: &mut BytesMut,
|
||||
) -> Result<Option<Self::CodecErrorType>, RPCError> {
|
||||
let length = match handle_length(&mut self.inner, &mut self.len, src)? {
|
||||
Some(len) => len,
|
||||
None => return Ok(None),
|
||||
let Some(length) = handle_length(&mut self.inner, &mut self.len, src)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Should not attempt to decode rpc chunks with `length > max_packet_size` or not within bounds of
|
||||
|
@ -286,9 +286,7 @@ where
|
||||
// wrong state a response will fail silently.
|
||||
fn send_response(&mut self, inbound_id: SubstreamId, response: RPCCodedResponse<TSpec>) {
|
||||
// check if the stream matching the response still exists
|
||||
let inbound_info = if let Some(info) = self.inbound_substreams.get_mut(&inbound_id) {
|
||||
info
|
||||
} else {
|
||||
let Some(inbound_info) = self.inbound_substreams.get_mut(&inbound_id) else {
|
||||
if !matches!(response, RPCCodedResponse::StreamTermination(..)) {
|
||||
// the stream is closed after sending the expected number of responses
|
||||
trace!(self.log, "Inbound stream has expired. Response not sent";
|
||||
@ -296,7 +294,6 @@ where
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
// If the response we are sending is an error, report back for handling
|
||||
if let RPCCodedResponse::Error(ref code, ref reason) = response {
|
||||
self.events_out.push(Err(HandlerErr::Inbound {
|
||||
|
@ -205,9 +205,8 @@ impl GossipCache {
|
||||
GossipKind::LightClientFinalityUpdate => self.light_client_finality_update,
|
||||
GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update,
|
||||
};
|
||||
let expire_timeout = match expire_timeout {
|
||||
Some(expire_timeout) => expire_timeout,
|
||||
None => return,
|
||||
let Some(expire_timeout) = expire_timeout else {
|
||||
return;
|
||||
};
|
||||
match self
|
||||
.topic_msgs
|
||||
|
@ -350,17 +350,14 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
return;
|
||||
}
|
||||
};
|
||||
let bootstrap = match LightClientBootstrap::from_beacon_state(&mut beacon_state) {
|
||||
Ok(bootstrap) => bootstrap,
|
||||
Err(_) => {
|
||||
self.send_error_response(
|
||||
peer_id,
|
||||
RPCResponseErrorCode::ResourceUnavailable,
|
||||
"Bootstrap not available".into(),
|
||||
request_id,
|
||||
);
|
||||
return;
|
||||
}
|
||||
let Ok(bootstrap) = LightClientBootstrap::from_beacon_state(&mut beacon_state) else {
|
||||
self.send_error_response(
|
||||
peer_id,
|
||||
RPCResponseErrorCode::ResourceUnavailable,
|
||||
"Bootstrap not available".into(),
|
||||
request_id,
|
||||
);
|
||||
return;
|
||||
};
|
||||
self.send_response(
|
||||
peer_id,
|
||||
|
@ -115,34 +115,31 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
||||
duplicate_cache: DuplicateCache,
|
||||
) {
|
||||
// Check if the block is already being imported through another source
|
||||
let handle = match duplicate_cache.check_and_insert(block_root) {
|
||||
Some(handle) => handle,
|
||||
None => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Gossip block is being processed";
|
||||
"action" => "sending rpc block to reprocessing queue",
|
||||
"block_root" => %block_root,
|
||||
);
|
||||
let Some(handle) = duplicate_cache.check_and_insert(block_root) else {
|
||||
debug!(
|
||||
self.log,
|
||||
"Gossip block is being processed";
|
||||
"action" => "sending rpc block to reprocessing queue",
|
||||
"block_root" => %block_root,
|
||||
);
|
||||
|
||||
// Send message to work reprocess queue to retry the block
|
||||
let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns(
|
||||
block_root,
|
||||
block,
|
||||
seen_timestamp,
|
||||
process_type,
|
||||
);
|
||||
let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock {
|
||||
beacon_block_root: block_root,
|
||||
process_fn,
|
||||
ignore_fn,
|
||||
});
|
||||
// Send message to work reprocess queue to retry the block
|
||||
let (process_fn, ignore_fn) = self.clone().generate_rpc_beacon_block_fns(
|
||||
block_root,
|
||||
block,
|
||||
seen_timestamp,
|
||||
process_type,
|
||||
);
|
||||
let reprocess_msg = ReprocessQueueMessage::RpcBlock(QueuedRpcBlock {
|
||||
beacon_block_root: block_root,
|
||||
process_fn,
|
||||
ignore_fn,
|
||||
});
|
||||
|
||||
if reprocess_tx.try_send(reprocess_msg).is_err() {
|
||||
error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root)
|
||||
};
|
||||
return;
|
||||
}
|
||||
if reprocess_tx.try_send(reprocess_msg).is_err() {
|
||||
error!(self.log, "Failed to inform block import"; "source" => "rpc", "block_root" => %block_root)
|
||||
};
|
||||
return;
|
||||
};
|
||||
|
||||
// Returns `true` if the time now is after the 4s attestation deadline.
|
||||
|
@ -509,16 +509,13 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
||||
return Ok(ProcessResult::Successful);
|
||||
}
|
||||
|
||||
let batch = match self.batches.get_mut(&batch_id) {
|
||||
Some(batch) => batch,
|
||||
None => {
|
||||
return self
|
||||
.fail_sync(BackFillError::InvalidSyncState(format!(
|
||||
"Trying to process a batch that does not exist: {}",
|
||||
batch_id
|
||||
)))
|
||||
.map(|_| ProcessResult::Successful);
|
||||
}
|
||||
let Some(batch) = self.batches.get_mut(&batch_id) else {
|
||||
return self
|
||||
.fail_sync(BackFillError::InvalidSyncState(format!(
|
||||
"Trying to process a batch that does not exist: {}",
|
||||
batch_id
|
||||
)))
|
||||
.map(|_| ProcessResult::Successful);
|
||||
};
|
||||
|
||||
// NOTE: We send empty batches to the processor in order to trigger the block processor
|
||||
@ -909,9 +906,8 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
||||
network: &mut SyncNetworkContext<T>,
|
||||
batch_id: BatchId,
|
||||
) -> Result<(), BackFillError> {
|
||||
let batch = match self.batches.get_mut(&batch_id) {
|
||||
Some(batch) => batch,
|
||||
None => return Ok(()),
|
||||
let Some(batch) = self.batches.get_mut(&batch_id) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Find a peer to request the batch
|
||||
|
@ -1015,15 +1015,12 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||
BlockProcessingResult::Ok(AvailabilityProcessingStatus::Imported(_))
|
||||
| BlockProcessingResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => {
|
||||
// Check if the beacon processor is available
|
||||
let beacon_processor = match cx.beacon_processor_if_enabled() {
|
||||
Some(beacon_processor) => beacon_processor,
|
||||
None => {
|
||||
return trace!(
|
||||
self.log,
|
||||
"Dropping parent chain segment that was ready for processing.";
|
||||
parent_lookup
|
||||
);
|
||||
}
|
||||
let Some(beacon_processor) = cx.beacon_processor_if_enabled() else {
|
||||
return trace!(
|
||||
self.log,
|
||||
"Dropping parent chain segment that was ready for processing.";
|
||||
parent_lookup
|
||||
);
|
||||
};
|
||||
let (chain_hash, blocks, hashes, block_request) =
|
||||
parent_lookup.parts_for_processing();
|
||||
@ -1195,11 +1192,8 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||
result: BatchProcessResult,
|
||||
cx: &SyncNetworkContext<T>,
|
||||
) {
|
||||
let request = match self.processing_parent_lookups.remove(&chain_hash) {
|
||||
Some((_hashes, request)) => request,
|
||||
None => {
|
||||
return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result)
|
||||
}
|
||||
let Some((_hashes, request)) = self.processing_parent_lookups.remove(&chain_hash) else {
|
||||
return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result);
|
||||
};
|
||||
|
||||
debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result);
|
||||
|
@ -294,19 +294,15 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
return Ok(KeepChain);
|
||||
}
|
||||
|
||||
let beacon_processor = match network.beacon_processor_if_enabled() {
|
||||
Some(beacon_processor) => beacon_processor,
|
||||
None => return Ok(KeepChain),
|
||||
let Some(beacon_processor) = network.beacon_processor_if_enabled() else {
|
||||
return Ok(KeepChain);
|
||||
};
|
||||
|
||||
let batch = match self.batches.get_mut(&batch_id) {
|
||||
Some(batch) => batch,
|
||||
None => {
|
||||
return Err(RemoveChain::WrongChainState(format!(
|
||||
"Trying to process a batch that does not exist: {}",
|
||||
batch_id
|
||||
)));
|
||||
}
|
||||
let Some(batch) = self.batches.get_mut(&batch_id) else {
|
||||
return Err(RemoveChain::WrongChainState(format!(
|
||||
"Trying to process a batch that does not exist: {}",
|
||||
batch_id
|
||||
)));
|
||||
};
|
||||
|
||||
// NOTE: We send empty batches to the processor in order to trigger the block processor
|
||||
@ -874,9 +870,8 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
network: &mut SyncNetworkContext<T>,
|
||||
batch_id: BatchId,
|
||||
) -> ProcessingResult {
|
||||
let batch = match self.batches.get_mut(&batch_id) {
|
||||
Some(batch) => batch,
|
||||
None => return Ok(KeepChain),
|
||||
let Some(batch) = self.batches.get_mut(&batch_id) else {
|
||||
return Ok(KeepChain);
|
||||
};
|
||||
|
||||
// Find a peer to request the batch
|
||||
|
@ -432,9 +432,8 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
}
|
||||
|
||||
// Load the blinded block.
|
||||
let blinded_block = match self.get_blinded_block(block_root)? {
|
||||
Some(block) => block,
|
||||
None => return Ok(None),
|
||||
let Some(blinded_block) = self.get_blinded_block(block_root)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// If the block is after the split point then we should have the full execution payload
|
||||
@ -2053,12 +2052,9 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
|
||||
/// Try to prune blobs, approximating the current epoch from the split slot.
|
||||
pub fn try_prune_most_blobs(&self, force: bool) -> Result<(), Error> {
|
||||
let deneb_fork_epoch = match self.spec.deneb_fork_epoch {
|
||||
Some(epoch) => epoch,
|
||||
None => {
|
||||
debug!(self.log, "Deneb fork is disabled");
|
||||
return Ok(());
|
||||
}
|
||||
let Some(deneb_fork_epoch) = self.spec.deneb_fork_epoch else {
|
||||
debug!(self.log, "Deneb fork is disabled");
|
||||
return Ok(());
|
||||
};
|
||||
// The current epoch is >= split_epoch + 2. It could be greater if the database is
|
||||
// configured to delay updating the split or finalization has ceased. In this instance we
|
||||
|
@ -17,9 +17,7 @@ where
|
||||
Cold: ItemStore<E>,
|
||||
{
|
||||
pub fn reconstruct_historic_states(self: &Arc<Self>) -> Result<(), Error> {
|
||||
let mut anchor = if let Some(anchor) = self.get_anchor_info() {
|
||||
anchor
|
||||
} else {
|
||||
let Some(mut anchor) = self.get_anchor_info() else {
|
||||
// Nothing to do, history is complete.
|
||||
return Ok(());
|
||||
};
|
||||
|
@ -16,12 +16,10 @@ pub fn spawn_timer<T: BeaconChainTypes>(
|
||||
let log = executor.log().clone();
|
||||
let timer_future = async move {
|
||||
loop {
|
||||
let duration_to_next_slot = match beacon_chain.slot_clock.duration_to_next_slot() {
|
||||
Some(duration) => duration,
|
||||
None => {
|
||||
warn!(log, "Unable to determine duration to next slot");
|
||||
return;
|
||||
}
|
||||
let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot()
|
||||
else {
|
||||
warn!(log, "Unable to determine duration to next slot");
|
||||
return;
|
||||
};
|
||||
|
||||
sleep(duration_to_next_slot).await;
|
||||
|
@ -19,19 +19,16 @@ pub fn compare_fields_derive(input: TokenStream) -> TokenStream {
|
||||
let name = &item.ident;
|
||||
let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl();
|
||||
|
||||
let struct_data = match &item.data {
|
||||
syn::Data::Struct(s) => s,
|
||||
_ => panic!("compare_fields_derive only supports structs."),
|
||||
let syn::Data::Struct(struct_data) = &item.data else {
|
||||
panic!("compare_fields_derive only supports structs.");
|
||||
};
|
||||
|
||||
let mut quotes = vec![];
|
||||
|
||||
for field in struct_data.fields.iter() {
|
||||
let ident_a = match &field.ident {
|
||||
Some(ref ident) => ident,
|
||||
_ => panic!("compare_fields_derive only supports named struct fields."),
|
||||
let Some(ident_a) = &field.ident else {
|
||||
panic!("compare_fields_derive only supports named struct fields.");
|
||||
};
|
||||
|
||||
let field_name = ident_a.to_string();
|
||||
let ident_b = ident_a.clone();
|
||||
|
||||
|
@ -916,9 +916,8 @@ impl BeaconNodeHttpClient {
|
||||
Error,
|
||||
> {
|
||||
let path = self.get_beacon_blocks_path(block_id)?;
|
||||
let response = match self.get_response(path, |b| b).await.optional()? {
|
||||
Some(res) => res,
|
||||
None => return Ok(None),
|
||||
let Some(response) = self.get_response(path, |b| b).await.optional()? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
Ok(Some(response.json().await?))
|
||||
@ -932,9 +931,8 @@ impl BeaconNodeHttpClient {
|
||||
block_id: BlockId,
|
||||
) -> Result<Option<GenericResponse<BlobSidecarList<T>>>, Error> {
|
||||
let path = self.get_blobs_path(block_id)?;
|
||||
let response = match self.get_response(path, |b| b).await.optional()? {
|
||||
Some(res) => res,
|
||||
None => return Ok(None),
|
||||
let Some(response) = self.get_response(path, |b| b).await.optional()? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
Ok(Some(response.json().await?))
|
||||
@ -951,9 +949,8 @@ impl BeaconNodeHttpClient {
|
||||
Error,
|
||||
> {
|
||||
let path = self.get_beacon_blinded_blocks_path(block_id)?;
|
||||
let response = match self.get_response(path, |b| b).await.optional()? {
|
||||
Some(res) => res,
|
||||
None => return Ok(None),
|
||||
let Some(response) = self.get_response(path, |b| b).await.optional()? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
Ok(Some(response.json().await?))
|
||||
|
@ -20,9 +20,8 @@ pub fn test_random_derive(input: TokenStream) -> TokenStream {
|
||||
let name = &derived_input.ident;
|
||||
let (impl_generics, ty_generics, where_clause) = &derived_input.generics.split_for_impl();
|
||||
|
||||
let struct_data = match &derived_input.data {
|
||||
syn::Data::Struct(s) => s,
|
||||
_ => panic!("test_random_derive only supports structs."),
|
||||
let syn::Data::Struct(struct_data) = &derived_input.data else {
|
||||
panic!("test_random_derive only supports structs.");
|
||||
};
|
||||
|
||||
// Build quotes for fields that should be generated and those that should be built from
|
||||
|
@ -1035,13 +1035,11 @@ impl ProtoArray {
|
||||
.epoch
|
||||
.start_slot(E::slots_per_epoch());
|
||||
|
||||
let mut node = if let Some(node) = self
|
||||
let Some(mut node) = self
|
||||
.indices
|
||||
.get(&root)
|
||||
.and_then(|index| self.nodes.get(*index))
|
||||
{
|
||||
node
|
||||
} else {
|
||||
else {
|
||||
// An unknown root is not a finalized descendant. This line can only
|
||||
// be reached if the user supplies a root that is not known to fork
|
||||
// choice.
|
||||
|
@ -99,9 +99,8 @@ pub fn verify_signature_sets<'a>(
|
||||
|
||||
// Aggregate all the public keys.
|
||||
// Public keys have already been checked for subgroup and infinity
|
||||
let agg_pk = match blst_core::AggregatePublicKey::aggregate(&signing_keys, false) {
|
||||
Ok(agg_pk) => agg_pk,
|
||||
Err(_) => return false,
|
||||
let Ok(agg_pk) = blst_core::AggregatePublicKey::aggregate(&signing_keys, false) else {
|
||||
return false;
|
||||
};
|
||||
pks.push(agg_pk.to_public_key());
|
||||
}
|
||||
|
@ -254,12 +254,9 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
|
||||
}
|
||||
|
||||
// Disable file logging if no path is specified.
|
||||
let path = match config.path {
|
||||
Some(path) => path,
|
||||
None => {
|
||||
self.log = Some(stdout_logger);
|
||||
return Ok(self);
|
||||
}
|
||||
let Some(path) = config.path else {
|
||||
self.log = Some(stdout_logger);
|
||||
return Ok(self);
|
||||
};
|
||||
|
||||
// Ensure directories are created becfore the logfile.
|
||||
|
@ -159,9 +159,8 @@ pub trait TargetArrayChunk: Sized + serde::Serialize + serde::de::DeserializeOwn
|
||||
config: &Config,
|
||||
) -> Result<Option<Self>, Error> {
|
||||
let disk_key = config.disk_key(validator_chunk_index, chunk_index);
|
||||
let chunk_bytes = match txn.get(Self::select_db(db), &disk_key.to_be_bytes())? {
|
||||
Some(chunk_bytes) => chunk_bytes,
|
||||
None => return Ok(None),
|
||||
let Some(chunk_bytes) = txn.get(Self::select_db(db), &disk_key.to_be_bytes())? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let chunk = bincode::deserialize_from(ZlibDecoder::new(chunk_bytes.borrow()))?;
|
||||
@ -448,11 +447,9 @@ pub fn apply_attestation_for_validator<E: EthSpec, T: TargetArrayChunk>(
|
||||
return Ok(slashing_status);
|
||||
}
|
||||
|
||||
let mut start_epoch = if let Some(start_epoch) =
|
||||
let Some(mut start_epoch) =
|
||||
T::first_start_epoch(attestation.data.source.epoch, current_epoch, config)
|
||||
{
|
||||
start_epoch
|
||||
} else {
|
||||
else {
|
||||
return Ok(slashing_status);
|
||||
};
|
||||
|
||||
@ -536,12 +533,10 @@ pub fn epoch_update_for_validator<E: EthSpec, T: TargetArrayChunk>(
|
||||
current_epoch: Epoch,
|
||||
config: &Config,
|
||||
) -> Result<(), Error> {
|
||||
let previous_current_epoch =
|
||||
if let Some(epoch) = db.get_current_epoch_for_validator(validator_index, txn)? {
|
||||
epoch
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
let Some(previous_current_epoch) = db.get_current_epoch_for_validator(validator_index, txn)?
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let mut epoch = previous_current_epoch;
|
||||
|
||||
|
@ -51,13 +51,10 @@ impl<E: EthSpec> Case for MerkleProofValidity<E> {
|
||||
fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> {
|
||||
let mut state = self.state.clone();
|
||||
state.initialize_tree_hash_cache();
|
||||
let proof = match state.compute_merkle_proof(self.merkle_proof.leaf_index) {
|
||||
Ok(proof) => proof,
|
||||
Err(_) => {
|
||||
return Err(Error::FailedToParseTest(
|
||||
"Could not retrieve merkle proof".to_string(),
|
||||
))
|
||||
}
|
||||
let Ok(proof) = state.compute_merkle_proof(self.merkle_proof.leaf_index) else {
|
||||
return Err(Error::FailedToParseTest(
|
||||
"Could not retrieve merkle proof".to_string(),
|
||||
));
|
||||
};
|
||||
let proof_len = proof.len();
|
||||
let branch_len = self.merkle_proof.branch.len();
|
||||
|
@ -525,9 +525,7 @@ impl DoppelgangerService {
|
||||
}
|
||||
|
||||
// Resolve the index from the server response back to a public key.
|
||||
let pubkey = if let Some(pubkey) = indices_map.get(&response.index) {
|
||||
pubkey
|
||||
} else {
|
||||
let Some(pubkey) = indices_map.get(&response.index) else {
|
||||
crit!(
|
||||
self.log,
|
||||
"Inconsistent indices map";
|
||||
|
@ -607,9 +607,7 @@ pub async fn fill_in_aggregation_proofs<T: SlotClock + 'static, E: EthSpec>(
|
||||
|
||||
// Add to global storage (we add regularly so the proofs can be used ASAP).
|
||||
let sync_map = duties_service.sync_duties.committees.read();
|
||||
let committee_duties = if let Some(duties) = sync_map.get(&sync_committee_period) {
|
||||
duties
|
||||
} else {
|
||||
let Some(committee_duties) = sync_map.get(&sync_committee_period) else {
|
||||
debug!(
|
||||
log,
|
||||
"Missing sync duties";
|
||||
|
@ -158,13 +158,11 @@ impl<T: SlotClock + 'static, E: EthSpec> SyncCommitteeService<T, E> {
|
||||
.checked_sub(slot_duration / 3)
|
||||
.unwrap_or_else(|| Duration::from_secs(0));
|
||||
|
||||
let slot_duties = if let Some(duties) = self
|
||||
let Some(slot_duties) = self
|
||||
.duties_service
|
||||
.sync_duties
|
||||
.get_duties_for_slot::<E>(slot, &self.duties_service.spec)
|
||||
{
|
||||
duties
|
||||
} else {
|
||||
else {
|
||||
debug!(log, "No duties known for slot {}", slot);
|
||||
return Ok(());
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user