Skip to content

Commit

Permalink
Merge branch 'unstable' into subnet-sampling
Browse files Browse the repository at this point in the history
# Conflicts:
#	beacon_node/beacon_chain/src/data_availability_checker.rs
#	beacon_node/http_api/src/publish_blocks.rs
#	beacon_node/lighthouse_network/src/types/globals.rs
#	beacon_node/network/src/sync/manager.rs
  • Loading branch information
jimmygchen committed Sep 26, 2024
2 parents 35f683c + 50d8375 commit 07afa6c
Show file tree
Hide file tree
Showing 111 changed files with 1,874 additions and 1,111 deletions.
10 changes: 9 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ as the canonical staking deposit contract address.
The [Lighthouse Book](https://lighthouse-book.sigmaprime.io) contains information for users and
developers.

The Lighthouse team maintains a blog at [lighthouse-blog.sigmaprime.io][blog] which contains periodic
The Lighthouse team maintains a blog at [https://blog.sigmaprime.io/tag/lighthouse][blog] which contains periodic
progress updates, roadmap insights and interesting findings.

## Branches
Expand Down
10 changes: 3 additions & 7 deletions beacon_node/beacon_chain/benches/benches.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
use std::sync::Arc;

use beacon_chain::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns};
use beacon_chain::test_utils::get_kzg;
use criterion::{black_box, criterion_group, criterion_main, Criterion};

use bls::Signature;
use eth2_network_config::TRUSTED_SETUP_BYTES;
use kzg::{Kzg, KzgCommitment, TrustedSetup};
use kzg::KzgCommitment;
use types::{
beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, ChainSpec,
EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock,
Expand Down Expand Up @@ -35,11 +35,7 @@ fn all_benches(c: &mut Criterion) {
type E = MainnetEthSpec;
let spec = Arc::new(E::default_spec());

let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES)
.map_err(|e| format!("Unable to read trusted setup file: {}", e))
.expect("should have trusted setup");
let kzg = Arc::new(Kzg::new_from_trusted_setup(trusted_setup).expect("should create kzg"));

let kzg = get_kzg(&spec);
for blob_count in [1, 2, 3, 6] {
let kzg = kzg.clone();
let (signed_block, blob_sidecars) = create_test_block_and_blobs::<E>(blob_count, &spec);
Expand Down
4 changes: 3 additions & 1 deletion beacon_node/beacon_chain/src/beacon_block_streamer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -711,6 +711,7 @@ mod tests {
use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches};
use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType};
use execution_layer::test_utils::Block;
use std::sync::Arc;
use std::sync::LazyLock;
use tokio::sync::mpsc;
use types::{
Expand All @@ -725,7 +726,7 @@ mod tests {

fn get_harness(
validator_count: usize,
spec: ChainSpec,
spec: Arc<ChainSpec>,
) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> {
let harness = BeaconChainHarness::builder(MinimalEthSpec)
.spec(spec)
Expand Down Expand Up @@ -756,6 +757,7 @@ mod tests {
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));
spec.electra_fork_epoch = Some(Epoch::new(electra_fork_epoch as u64));
let spec = Arc::new(spec);

let harness = get_harness(VALIDATOR_COUNT, spec.clone());
// go to bellatrix fork
Expand Down
57 changes: 37 additions & 20 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ type ReqRespPreImportCache<E> = HashMap<Hash256, Arc<SignedBeaconBlock<E>>>;
/// Represents the "Beacon Chain" component of Ethereum 2.0. Allows import of blocks and block
/// operations and chooses a canonical head.
pub struct BeaconChain<T: BeaconChainTypes> {
pub spec: ChainSpec,
pub spec: Arc<ChainSpec>,
/// Configuration for `BeaconChain` runtime behaviour.
pub config: ChainConfig,
/// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB.
Expand Down Expand Up @@ -497,7 +497,7 @@ pub struct BeaconChain<T: BeaconChainTypes> {
/// they are collected and combined.
pub data_availability_checker: Arc<DataAvailabilityChecker<T>>,
/// The KZG trusted setup used by this chain.
pub kzg: Option<Arc<Kzg>>,
pub kzg: Arc<Kzg>,
}

pub enum BeaconBlockResponseWrapper<E: EthSpec> {
Expand Down Expand Up @@ -2740,7 +2740,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// If the block is relevant, add it to the filtered chain segment.
Ok(_) => filtered_chain_segment.push((block_root, block)),
// If the block is already known, simply ignore this block.
Err(BlockError::BlockIsAlreadyKnown(_)) => continue,
//
// Note that `check_block_relevancy` is incapable of returning
// `DuplicateImportStatusUnknown` so we don't need to handle that case here.
Err(BlockError::DuplicateFullyImported(_)) => continue,
// If the block is the genesis block, simply ignore this block.
Err(BlockError::GenesisBlock) => continue,
// If the block is is for a finalized slot, simply ignore this block.
Expand Down Expand Up @@ -2886,7 +2889,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
}
}
Err(BlockError::BlockIsAlreadyKnown(block_root)) => {
Err(BlockError::DuplicateFullyImported(block_root)) => {
debug!(self.log,
"Ignoring already known blocks while processing chain segment";
"block_root" => ?block_root);
Expand Down Expand Up @@ -2977,6 +2980,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub async fn process_gossip_blob(
self: &Arc<Self>,
blob: GossipVerifiedBlob<T>,
publish_fn: impl FnOnce() -> Result<(), BlockError>,
) -> Result<AvailabilityProcessingStatus, BlockError> {
let block_root = blob.block_root();

Expand All @@ -2987,7 +2991,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.fork_choice_read_lock()
.contains_block(&block_root)
{
return Err(BlockError::BlockIsAlreadyKnown(blob.block_root()));
return Err(BlockError::DuplicateFullyImported(blob.block_root()));
}

// No need to process and import blobs beyond the PeerDAS epoch.
Expand All @@ -3003,7 +3007,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
}

let r = self.check_gossip_blob_availability_and_import(blob).await;
let r = self
.check_gossip_blob_availability_and_import(blob, publish_fn)
.await;
self.remove_notified(&block_root, r)
}

Expand All @@ -3012,6 +3018,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub async fn process_gossip_data_columns(
self: &Arc<Self>,
data_columns: Vec<GossipVerifiedDataColumn<T>>,
publish_fn: impl FnOnce() -> Result<(), BlockError>,
) -> Result<
(
AvailabilityProcessingStatus,
Expand All @@ -3037,11 +3044,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.fork_choice_read_lock()
.contains_block(&block_root)
{
return Err(BlockError::BlockIsAlreadyKnown(block_root));
return Err(BlockError::DuplicateFullyImported(block_root));
}

let r = self
.check_gossip_data_columns_availability_and_import(slot, block_root, data_columns)
.check_gossip_data_columns_availability_and_import(
slot,
block_root,
data_columns,
publish_fn,
)
.await;
self.remove_notified_custody_columns(&block_root, r)
}
Expand All @@ -3061,7 +3073,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.fork_choice_read_lock()
.contains_block(&block_root)
{
return Err(BlockError::BlockIsAlreadyKnown(block_root));
return Err(BlockError::DuplicateFullyImported(block_root));
}

// Reject RPC blobs referencing unknown parents. Otherwise we allow potentially invalid data
Expand Down Expand Up @@ -3127,7 +3139,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.fork_choice_read_lock()
.contains_block(&block_root)
{
return Err(BlockError::BlockIsAlreadyKnown(block_root));
return Err(BlockError::DuplicateFullyImported(block_root));
}

// Reject RPC columns referencing unknown parents. Otherwise we allow potentially invalid data
Expand Down Expand Up @@ -3225,7 +3237,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
unverified_block: B,
notify_execution_layer: NotifyExecutionLayer,
block_source: BlockImportSource,
publish_fn: impl FnOnce() -> Result<(), BlockError> + Send + 'static,
publish_fn: impl FnOnce() -> Result<(), BlockError>,
) -> Result<AvailabilityProcessingStatus, BlockError> {
// Start the Prometheus timer.
let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
Expand Down Expand Up @@ -3407,22 +3419,25 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let availability = self
.data_availability_checker
.put_pending_executed_block(block)?;
self.process_availability(slot, availability).await
self.process_availability(slot, availability, || Ok(()))
.await
}

/// Checks if the provided blob can make any cached blocks available, and imports immediately
/// if so, otherwise caches the blob in the data availability checker.
async fn check_gossip_blob_availability_and_import(
self: &Arc<Self>,
blob: GossipVerifiedBlob<T>,
publish_fn: impl FnOnce() -> Result<(), BlockError>,
) -> Result<AvailabilityProcessingStatus, BlockError> {
let slot = blob.slot();
if let Some(slasher) = self.slasher.as_ref() {
slasher.accept_block_header(blob.signed_block_header());
}
let availability = self.data_availability_checker.put_gossip_blob(blob)?;

self.process_availability(slot, availability).await
self.process_availability(slot, availability, publish_fn)
.await
}

/// Checks if the provided data column can make any cached blocks available, and imports immediately
Expand All @@ -3432,6 +3447,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
slot: Slot,
block_root: Hash256,
data_columns: Vec<GossipVerifiedDataColumn<T>>,
publish_fn: impl FnOnce() -> Result<(), BlockError>,
) -> Result<
(
AvailabilityProcessingStatus,
Expand All @@ -3449,7 +3465,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.data_availability_checker
.put_gossip_data_columns(slot, block_root, data_columns)?;

self.process_availability(slot, availability)
self.process_availability(slot, availability, publish_fn)
.await
.map(|result| (result, data_columns_to_publish))
}
Expand Down Expand Up @@ -3490,7 +3506,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.data_availability_checker
.put_rpc_blobs(block_root, epoch, blobs)?;

self.process_availability(slot, availability).await
self.process_availability(slot, availability, || Ok(()))
.await
}

/// Checks if the provided columns can make any cached blocks available, and imports immediately
Expand Down Expand Up @@ -3538,7 +3555,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
custody_columns,
)?;

self.process_availability(slot, availability)
self.process_availability(slot, availability, || Ok(()))
.await
.map(|result| (result, data_columns_to_publish))
}
Expand All @@ -3551,9 +3568,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self: &Arc<Self>,
slot: Slot,
availability: Availability<T::EthSpec>,
publish_fn: impl FnOnce() -> Result<(), BlockError>,
) -> Result<AvailabilityProcessingStatus, BlockError> {
match availability {
Availability::Available(block) => {
publish_fn()?;
// Block is fully available, import into fork choice
self.import_available_block(block).await
}
Expand Down Expand Up @@ -5684,10 +5703,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {

let kzg_proofs = Vec::from(proofs);

let kzg = self
.kzg
.as_ref()
.ok_or(BlockProductionError::TrustedSetupNotInitialized)?;
let kzg = self.kzg.as_ref();

kzg_utils::validate_blobs::<T::EthSpec>(
kzg,
expected_kzg_commitments,
Expand Down
15 changes: 3 additions & 12 deletions beacon_node/beacon_chain/src/blob_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -115,13 +115,6 @@ pub enum GossipBlobError {
index: u64,
},

/// `Kzg` struct hasn't been initialized. This is an internal error.
///
/// ## Peer scoring
///
/// The peer isn't faulty, This is an internal error.
KzgNotInitialized,

/// The kzg verification failed.
///
/// ## Peer scoring
Expand Down Expand Up @@ -559,11 +552,9 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
}

// Kzg verification for gossip blob sidecar
let kzg = chain
.kzg
.as_ref()
.ok_or(GossipBlobError::KzgNotInitialized)?;
let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar, kzg, seen_timestamp)
let kzg = chain.kzg.as_ref();

let kzg_verified_blob = KzgVerifiedBlob::new(blob_sidecar.clone(), kzg, seen_timestamp)
.map_err(GossipBlobError::KzgError)?;
let blob_sidecar = &kzg_verified_blob.blob;

Expand Down
Loading

0 comments on commit 07afa6c

Please sign in to comment.