mirror of
https://gitlab.com/pulsechaincom/lighthouse-pulse.git
synced 2025-01-08 20:11:22 +00:00
9667dc2f03
## Issue Addressed Closes #1891 Closes #1784 ## Proposed Changes Implement checkpoint sync for Lighthouse, enabling it to start from a weak subjectivity checkpoint. ## Additional Info - [x] Return unavailable status for out-of-range blocks requested by peers (#2561) - [x] Implement sync daemon for fetching historical blocks (#2561) - [x] Verify chain hashes (either in `historical_blocks.rs` or the calling module) - [x] Consistency check for initial block + state - [x] Fetch the initial state and block from a beacon node HTTP endpoint - [x] Don't crash fetching beacon states by slot from the API - [x] Background service for state reconstruction, triggered by CLI flag or API call. Considered out of scope for this PR: - Drop the requirement to provide the `--checkpoint-block` (this would require some pretty heavy refactoring of block verification) Co-authored-by: Diva M <divma@protonmail.com>
76 lines
2.2 KiB
Rust
76 lines
2.2 KiB
Rust
use crate::chunked_vector::{chunk_key, Chunk, ChunkError, Field};
|
|
use crate::{Error, KeyValueStore, KeyValueStoreOp};
|
|
use types::EthSpec;
|
|
|
|
/// Buffered writer for chunked vectors (block roots mainly).
|
|
pub struct ChunkWriter<'a, F, E, S>
|
|
where
|
|
F: Field<E>,
|
|
E: EthSpec,
|
|
S: KeyValueStore<E>,
|
|
{
|
|
/// Buffered chunk awaiting writing to disk (always dirty).
|
|
chunk: Chunk<F::Value>,
|
|
/// Chunk index of `chunk`.
|
|
index: usize,
|
|
store: &'a S,
|
|
}
|
|
|
|
impl<'a, F, E, S> ChunkWriter<'a, F, E, S>
|
|
where
|
|
F: Field<E>,
|
|
E: EthSpec,
|
|
S: KeyValueStore<E>,
|
|
{
|
|
pub fn new(store: &'a S, vindex: usize) -> Result<Self, Error> {
|
|
let chunk_index = F::chunk_index(vindex);
|
|
let chunk = Chunk::load(store, F::column(), &chunk_key(chunk_index))?
|
|
.unwrap_or_else(|| Chunk::new(vec![F::Value::default(); F::chunk_size()]));
|
|
|
|
Ok(Self {
|
|
chunk,
|
|
index: chunk_index,
|
|
store,
|
|
})
|
|
}
|
|
|
|
/// Set the value at a given vector index, writing the current chunk and moving on if necessary.
|
|
pub fn set(
|
|
&mut self,
|
|
vindex: usize,
|
|
value: F::Value,
|
|
batch: &mut Vec<KeyValueStoreOp>,
|
|
) -> Result<(), Error> {
|
|
let chunk_index = F::chunk_index(vindex);
|
|
|
|
// Advance to the next chunk.
|
|
if chunk_index != self.index {
|
|
self.write(batch)?;
|
|
*self = Self::new(self.store, vindex)?;
|
|
}
|
|
|
|
let i = vindex % F::chunk_size();
|
|
let existing_value = &self.chunk.values[i];
|
|
|
|
if existing_value == &value || existing_value == &F::Value::default() {
|
|
self.chunk.values[i] = value;
|
|
Ok(())
|
|
} else {
|
|
Err(ChunkError::Inconsistent {
|
|
field: F::column(),
|
|
chunk_index,
|
|
existing_value: format!("{:?}", existing_value),
|
|
new_value: format!("{:?}", value),
|
|
}
|
|
.into())
|
|
}
|
|
}
|
|
|
|
/// Write the current chunk to disk.
|
|
///
|
|
/// Should be called before the writer is dropped, in order to write the final chunk to disk.
|
|
pub fn write(&self, batch: &mut Vec<KeyValueStoreOp>) -> Result<(), Error> {
|
|
self.chunk.store(F::column(), &chunk_key(self.index), batch)
|
|
}
|
|
}
|