diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 4c73da695..5c6de7b18 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2948,6 +2948,8 @@ impl BeaconChain { ops.push(StoreOp::PutState(block.state_root(), &state)); if let Some(blobs) = blobs { + //FIXME(sean) using this for debugging for now + info!(self.log, "Writing blobs to store"; "block_root" => ?block_root); ops.push(StoreOp::PutBlobs(block_root, blobs)); }; let txn_lock = self.store.hot_db.begin_rw_transaction(); diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index ea4ed6e14..1b05c7d39 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -87,6 +87,8 @@ pub enum BlobError { /// We were unable to process this sync committee message due to an internal error. It's unclear if the /// sync committee message is valid. BeaconChainError(BeaconChainError), + /// No blobs for the specified block where we would expect blobs. + MissingBlobs, } impl From for BlobError { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 589c0656d..b9e65bc0a 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -579,10 +579,8 @@ pub fn signature_verify_chain_segment( let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len()); for (block_root, block) in &chain_segment { - let mut consensus_context = ConsensusContext::new(block.slot()) - .set_current_block_root(*block_root) - //FIXME(sean) Consider removing this is we pass the blob wrapper everywhere - .set_blobs_sidecar(block.blobs_sidecar()); + let mut consensus_context = + ConsensusContext::new(block.slot()).set_current_block_root(*block_root); signature_verifier.include_all_signatures(block.block(), &mut consensus_context)?; @@ -936,8 +934,7 @@ impl GossipVerifiedBlock { .set_current_block_root(block_root) .set_proposer_index(block.message().proposer_index()) .set_blobs_sidecar_validated(true) // Validated in `validate_blob_for_gossip` - .set_blobs_verified_vs_txs(true) // Validated in `validate_blob_for_gossip` - .set_blobs_sidecar(block.blobs_sidecar()); // TODO: potentially remove + .set_blobs_verified_vs_txs(true); Ok(Self { block, @@ -1009,9 +1006,8 @@ impl SignatureVerifiedBlock { let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - let mut consensus_context = ConsensusContext::new(block.slot()) - .set_current_block_root(block_root) - .set_blobs_sidecar(block.blobs_sidecar()); + let mut consensus_context = + ConsensusContext::new(block.slot()).set_current_block_root(block_root); signature_verifier.include_all_signatures(block.block(), &mut consensus_context)?; @@ -1564,49 +1560,51 @@ impl ExecutionPendingBlock { * Verify kzg proofs and kzg commitments against transactions if required */ //FIXME(sean) should this be prior to applying attestions to fork choice above? done in parallel? - if let Some(ref sidecar) = consensus_context.blobs_sidecar() { - if let Some(data_availability_boundary) = chain.data_availability_boundary() { - if block_slot.epoch(T::EthSpec::slots_per_epoch()) > data_availability_boundary { - let kzg = chain.kzg.as_ref().ok_or(BlockError::BlobValidation( - BlobError::TrustedSetupNotInitialized, - ))?; - let transactions = block - .message() - .body() - .execution_payload_eip4844() - .map(|payload| payload.transactions()) - .map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))? - .ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?; - let kzg_commitments = - block.message().body().blob_kzg_commitments().map_err(|_| { - BlockError::BlobValidation(BlobError::KzgCommitmentMissing) - })?; - if !consensus_context.blobs_sidecar_validated() { - if !kzg_utils::validate_blobs_sidecar( - &kzg, - block.slot(), - block_root, - kzg_commitments, - sidecar, - ) - .map_err(|e| BlockError::BlobValidation(BlobError::KzgError(e)))? - { - return Err(BlockError::BlobValidation(BlobError::InvalidKzgProof)); - } - } - if !consensus_context.blobs_verified_vs_txs() - && verify_kzg_commitments_against_transactions::( - transactions, - kzg_commitments, - ) - //FIXME(sean) we should maybe just map this error so we have more info about the mismatch - .is_err() + if let Some(data_availability_boundary) = chain.data_availability_boundary() { + if block_slot.epoch(T::EthSpec::slots_per_epoch()) >= data_availability_boundary { + let sidecar = block + .blobs() + .ok_or(BlockError::BlobValidation(BlobError::MissingBlobs))?; + let kzg = chain.kzg.as_ref().ok_or(BlockError::BlobValidation( + BlobError::TrustedSetupNotInitialized, + ))?; + let transactions = block + .message() + .body() + .execution_payload_eip4844() + .map(|payload| payload.transactions()) + .map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))? + .ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?; + let kzg_commitments = block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_| BlockError::BlobValidation(BlobError::KzgCommitmentMissing))?; + if !consensus_context.blobs_sidecar_validated() { + if !kzg_utils::validate_blobs_sidecar( + &kzg, + block.slot(), + block_root, + kzg_commitments, + sidecar, + ) + .map_err(|e| BlockError::BlobValidation(BlobError::KzgError(e)))? { - return Err(BlockError::BlobValidation( - BlobError::TransactionCommitmentMismatch, - )); + return Err(BlockError::BlobValidation(BlobError::InvalidKzgProof)); } } + if !consensus_context.blobs_verified_vs_txs() + && verify_kzg_commitments_against_transactions::( + transactions, + kzg_commitments, + ) + //FIXME(sean) we should maybe just map this error so we have more info about the mismatch + .is_err() + { + return Err(BlockError::BlobValidation( + BlobError::TransactionCommitmentMismatch, + )); + } } } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index f1c9b5331..085f5036f 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -9,6 +9,7 @@ use slot_clock::SlotClock; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; +use types::signed_block_and_blobs::BlockWrapper; use types::{ AbstractExecPayload, BlindedPayload, BlobsSidecar, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, @@ -32,12 +33,19 @@ pub async fn publish_block( // Send the block, regardless of whether or not it is valid. The API // specification is very clear that this is the desired behaviour. - let message = if matches!(block.as_ref(), &SignedBeaconBlock::Eip4844(_)) { + let wrapped_block = if matches!(block.as_ref(), &SignedBeaconBlock::Eip4844(_)) { if let Some(sidecar) = chain.blob_cache.pop(&block_root) { - PubsubMessage::BeaconBlockAndBlobsSidecars(SignedBeaconBlockAndBlobsSidecar { - beacon_block: block.clone(), + let block_and_blobs = SignedBeaconBlockAndBlobsSidecar { + beacon_block: block, blobs_sidecar: Arc::new(sidecar), - }) + }; + crate::publish_pubsub_message( + network_tx, + PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blobs.clone()), + )?; + BlockWrapper::BlockAndBlob { + block_sidecar_pair: block_and_blobs, + } } else { //FIXME(sean): This should probably return a specific no-blob-cached error code, beacon API coordination required return Err(warp_utils::reject::broadcast_without_import(format!( @@ -45,18 +53,19 @@ pub async fn publish_block( ))); } } else { - PubsubMessage::BeaconBlock(block.clone()) + crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; + BlockWrapper::Block { block } }; - crate::publish_pubsub_message(network_tx, message)?; // Determine the delay after the start of the slot, register it with metrics. + let block = wrapped_block.block(); let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); match chain .process_block( block_root, - block.clone(), + wrapped_block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes, ) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index f85ec980d..64401308c 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -473,6 +473,11 @@ impl PeerManager { RPCError::ErrorResponse(code, _) => match code { RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError, RPCResponseErrorCode::ResourceUnavailable => { + // Don't ban on this because we want to retry with a block by root request. + if matches!(protocol, Protocol::BlobsByRoot) { + return; + } + // NOTE: This error only makes sense for the `BlocksByRange` and `BlocksByRoot` // protocols. // diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index a70aac349..ce6e30ebf 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -531,9 +531,6 @@ fn handle_v2_request( Protocol::BlocksByRoot => Ok(Some(InboundRequest::BlocksByRoot(BlocksByRootRequest { block_roots: VariableList::from_ssz_bytes(decoded_buffer)?, }))), - Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange( - BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, - ))), // MetaData requests return early from InboundUpgrade and do not reach the decoder. // Handle this case just for completeness. Protocol::MetaData => { @@ -826,12 +823,25 @@ mod tests { } } + fn blbrange_request() -> BlobsByRangeRequest { + BlobsByRangeRequest { + start_slot: 0, + count: 10, + } + } + fn bbroot_request() -> BlocksByRootRequest { BlocksByRootRequest { block_roots: VariableList::from(vec![Hash256::zero()]), } } + fn blbroot_request() -> BlobsByRootRequest { + BlobsByRootRequest { + block_roots: VariableList::from(vec![Hash256::zero()]), + } + } + fn ping_message() -> Ping { Ping { data: 1 } } @@ -1454,6 +1464,8 @@ mod tests { OutboundRequest::Goodbye(GoodbyeReason::Fault), OutboundRequest::BlocksByRange(bbrange_request()), OutboundRequest::BlocksByRoot(bbroot_request()), + OutboundRequest::BlobsByRange(blbrange_request()), + OutboundRequest::BlobsByRoot(blbroot_request()), OutboundRequest::MetaData(PhantomData::), ]; for req in requests.iter() { diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 549fb2209..9adf7699b 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -75,6 +75,8 @@ pub enum NetworkEvent { id: AppReqId, /// The peer to which this request was sent. peer_id: PeerId, + /// The error of the failed request. + error: RPCError, }, RequestReceived { /// The peer that sent the request. @@ -1177,9 +1179,9 @@ impl Network { &error, ConnectionDirection::Outgoing, ); - // inform failures of requests comming outside the behaviour + // inform failures of requests coming outside the behaviour if let RequestId::Application(id) = id { - Some(NetworkEvent::RPCFailed { peer_id, id }) + Some(NetworkEvent::RPCFailed { peer_id, id, error }) } else { None } diff --git a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs index 87e8a3fc4..6eae7eed5 100644 --- a/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/rpc_methods.rs @@ -254,6 +254,14 @@ impl Worker { "peer" => %peer_id, "request_root" => ?root ); + self.send_error_response( + peer_id, + RPCResponseErrorCode::ResourceUnavailable, + "No blob for requested block".into(), + request_id, + ); + send_response = false; + break; } Ok((None, Some(_))) => { debug!( diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 5675cb0ad..31f209204 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -11,6 +11,7 @@ use crate::error; use crate::service::{NetworkMessage, RequestId}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::prelude::*; +use lighthouse_network::rpc::RPCError; use lighthouse_network::{ MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response, }; @@ -58,6 +59,7 @@ pub enum RouterMessage { RPCFailed { peer_id: PeerId, request_id: RequestId, + error: RPCError, }, /// A gossip message has been received. The fields are: message id, the peer that sent us this /// message, the message itself and a bool which indicates if the message should be processed @@ -140,8 +142,9 @@ impl Router { RouterMessage::RPCFailed { peer_id, request_id, + error, } => { - self.processor.on_rpc_error(peer_id, request_id); + self.processor.on_rpc_error(peer_id, request_id, error); } RouterMessage::PubsubMessage(id, peer_id, gossip, should_process) => { self.handle_gossip(id, peer_id, gossip, should_process); diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 97c2b2264..5ee0e367b 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -103,12 +103,13 @@ impl Processor { /// An error occurred during an RPC request. The state is maintained by the sync manager, so /// this function notifies the sync manager of the error. - pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId) { + pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { // Check if the failed RPC belongs to sync if let RequestId::Sync(request_id) = request_id { self.send_to_sync(SyncMessage::RpcError { peer_id, request_id, + error, }); } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4568ed1a2..201494a34 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -499,10 +499,11 @@ impl NetworkService { response, }); } - NetworkEvent::RPCFailed { id, peer_id } => { + NetworkEvent::RPCFailed { id, peer_id, error } => { self.send_to_router(RouterMessage::RPCFailed { peer_id, request_id: id, + error, }); } NetworkEvent::StatusPeer(peer_id) => { diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 3b0600775..ca633ba76 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -6,6 +6,7 @@ use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; use futures::StreamExt; use itertools::{Either, Itertools}; +use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; use lighthouse_network::{PeerAction, PeerId}; use lru_cache::LRUTimeCache; use slog::{debug, error, trace, warn, Logger}; @@ -40,6 +41,13 @@ pub type RootBlockTuple = (Hash256, BlockWrapper); const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3; +/// This is used to resolve the scenario where we request a parent from before the data availability +/// boundary and need to retry with a request for only the block. +pub enum ForceBlockRequest { + True, + False, +} + pub(crate) struct BlockLookups { /// Parent chain lookups being downloaded. parent_lookups: SmallVec<[ParentLookup; 3]>, @@ -165,7 +173,7 @@ impl BlockLookups { } let parent_lookup = ParentLookup::new(block_root, block, peer_id); - self.request_parent(parent_lookup, cx); + self.request_parent(parent_lookup, cx, ForceBlockRequest::False); } /* Lookup responses */ @@ -291,7 +299,7 @@ impl BlockLookups { cx.report_peer(peer_id, PeerAction::LowToleranceError, e); // We try again if possible. - self.request_parent(parent_lookup, cx); + self.request_parent(parent_lookup, cx, ForceBlockRequest::False); } VerifyError::PreviousFailure { parent_root } => { debug!( @@ -367,7 +375,7 @@ impl BlockLookups { { let parent_lookup = self.parent_lookups.remove(pos); trace!(self.log, "Parent lookup's peer disconnected"; &parent_lookup); - self.request_parent(parent_lookup, cx); + self.request_parent(parent_lookup, cx, ForceBlockRequest::False); } } @@ -377,6 +385,7 @@ impl BlockLookups { id: Id, peer_id: PeerId, cx: &mut SyncNetworkContext, + error: RPCError, ) { if let Some(pos) = self .parent_lookups @@ -386,7 +395,19 @@ impl BlockLookups { let mut parent_lookup = self.parent_lookups.remove(pos); parent_lookup.download_failed(); trace!(self.log, "Parent lookup request failed"; &parent_lookup); - self.request_parent(parent_lookup, cx); + + // `ResourceUnavailable` indicates we requested a parent block from prior to the 4844 fork epoch. + let force_block_request = if let RPCError::ErrorResponse( + RPCResponseErrorCode::ResourceUnavailable, + _, + ) = error + { + debug!(self.log, "RPC parent lookup for block and blobs failed. Retrying the request for just a block"; "peer_id" => %peer_id); + ForceBlockRequest::True + } else { + ForceBlockRequest::False + }; + self.request_parent(parent_lookup, cx, force_block_request); } else { return debug!(self.log, "RPC failure for a parent lookup request that was not found"; "peer_id" => %peer_id); }; @@ -542,7 +563,7 @@ impl BlockLookups { // need to keep looking for parents // add the block back to the queue and continue the search parent_lookup.add_block(block); - self.request_parent(parent_lookup, cx); + self.request_parent(parent_lookup, cx, ForceBlockRequest::False); } BlockProcessResult::Ok | BlockProcessResult::Err(BlockError::BlockIsAlreadyKnown { .. }) => { @@ -604,7 +625,7 @@ impl BlockLookups { // Try again if possible parent_lookup.processing_failed(); - self.request_parent(parent_lookup, cx); + self.request_parent(parent_lookup, cx, ForceBlockRequest::False); } BlockProcessResult::Ignored => { // Beacon processor signalled to ignore the block processing result. @@ -697,8 +718,9 @@ impl BlockLookups { &mut self, mut parent_lookup: ParentLookup, cx: &mut SyncNetworkContext, + force_block_request: ForceBlockRequest, ) { - match parent_lookup.request_parent(cx) { + match parent_lookup.request_parent(cx, force_block_request) { Err(e) => { debug!(self.log, "Failed to request parent"; &parent_lookup, "error" => e.as_static()); match e { diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 0e9036cee..fd17e18db 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -6,6 +6,7 @@ use store::{Hash256, SignedBeaconBlock}; use strum::IntoStaticStr; use types::signed_block_and_blobs::BlockWrapper; +use crate::sync::block_lookups::ForceBlockRequest; use crate::sync::{ manager::{Id, SLOT_IMPORT_TOLERANCE}, network_context::SyncNetworkContext, @@ -72,14 +73,18 @@ impl ParentLookup { } /// Attempts to request the next unknown parent. If the request fails, it should be removed. - pub fn request_parent(&mut self, cx: &mut SyncNetworkContext) -> Result<(), RequestError> { + pub fn request_parent( + &mut self, + cx: &mut SyncNetworkContext, + force_block_request: ForceBlockRequest, + ) -> Result<(), RequestError> { // check to make sure this request hasn't failed if self.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { return Err(RequestError::ChainTooLong); } let (peer_id, request) = self.current_parent_request.request_block()?; - match cx.parent_lookup_request(peer_id, request) { + match cx.parent_lookup_request(peer_id, request, force_block_request) { Ok(request_id) => { self.current_parent_request_id = Some(request_id); Ok(()) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c55e90cf4..60105d422 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -45,6 +45,7 @@ use crate::sync::range_sync::ExpectedBatchTy; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; use futures::StreamExt; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; +use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; use lighthouse_network::types::{NetworkGlobals, SyncState}; use lighthouse_network::SyncInfo; use lighthouse_network::{PeerAction, PeerId}; @@ -131,6 +132,7 @@ pub enum SyncMessage { RpcError { peer_id: PeerId, request_id: RequestId, + error: RPCError, }, /// A batch has been processed by the block processor thread. @@ -282,7 +284,7 @@ impl SyncManager { } /// Handles RPC errors related to requests that were emitted from the sync manager. - fn inject_error(&mut self, peer_id: PeerId, request_id: RequestId) { + fn inject_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { trace!(self.log, "Sync manager received a failed RPC"); match request_id { RequestId::SingleBlock { id } => { @@ -291,7 +293,7 @@ impl SyncManager { } RequestId::ParentLookup { id } => { self.block_lookups - .parent_lookup_failed(id, peer_id, &mut self.network); + .parent_lookup_failed(id, peer_id, &mut self.network, error); } RequestId::BackFillSync { id } => { if let Some(batch_id) = self @@ -603,7 +605,8 @@ impl SyncManager { SyncMessage::RpcError { peer_id, request_id, - } => self.inject_error(peer_id, request_id), + error, + } => self.inject_error(peer_id, request_id, error), SyncMessage::BlockProcessed { process_type, result, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 5a96e1924..5917c7ecc 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -6,6 +6,7 @@ use super::range_sync::{BatchId, ChainId, ExpectedBatchTy}; use crate::beacon_processor::WorkEvent; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; +use crate::sync::block_lookups::ForceBlockRequest; use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; use fnv::FnvHashMap; use lighthouse_network::rpc::methods::BlobsByRangeRequest; @@ -504,11 +505,13 @@ impl SyncNetworkContext { &mut self, peer_id: PeerId, request: BlocksByRootRequest, + force_block_request: ForceBlockRequest, ) -> Result { let request = if self .chain .is_data_availability_check_required() .map_err(|_| "Unable to read slot clock")? + && matches!(force_block_request, ForceBlockRequest::False) { trace!( self.log, diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index d9453d364..f5585426c 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -185,13 +185,4 @@ impl ConsensusContext { pub fn blobs_verified_vs_txs(&self) -> bool { self.blobs_verified_vs_txs } - - pub fn set_blobs_sidecar(mut self, blobs_sidecar: Option>>) -> Self { - self.blobs_sidecar = blobs_sidecar; - self - } - - pub fn blobs_sidecar(&self) -> Option>> { - self.blobs_sidecar.clone() - } } diff --git a/consensus/types/src/signed_block_and_blobs.rs b/consensus/types/src/signed_block_and_blobs.rs index 9b4517eb4..09ff89e7b 100644 --- a/consensus/types/src/signed_block_and_blobs.rs +++ b/consensus/types/src/signed_block_and_blobs.rs @@ -66,14 +66,6 @@ impl BlockWrapper { } } } - pub fn blobs_sidecar(&self) -> Option>> { - match self { - BlockWrapper::Block { block: _ } => None, - BlockWrapper::BlockAndBlob { block_sidecar_pair } => { - Some(block_sidecar_pair.blobs_sidecar.clone()) - } - } - } pub fn blobs(&self) -> Option<&BlobsSidecar> { match self { @@ -84,6 +76,15 @@ impl BlockWrapper { } } + pub fn blobs_cloned(&self) -> Option>> { + match self { + BlockWrapper::Block { block: _ } => None, + BlockWrapper::BlockAndBlob { block_sidecar_pair } => { + Some(block_sidecar_pair.blobs_sidecar.clone()) + } + } + } + pub fn message(&self) -> crate::BeaconBlockRef { match self { BlockWrapper::Block { block } => block.message(),