Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 5 additions & 8 deletions beacon_node/lighthouse_network/src/service/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use crate::rpc::{
use crate::types::{
attestation_sync_committee_topics, fork_core_topics, subnet_from_topic_hash, GossipEncoding,
GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS,
BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS,
BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS,
};
use crate::EnrExt;
use crate::Eth2Enr;
Expand Down Expand Up @@ -285,26 +285,23 @@ impl<E: EthSpec> Network<E> {

let max_topics = ctx.chain_spec.attestation_subnet_count as usize
+ SYNC_COMMITTEE_SUBNET_COUNT as usize
+ ctx.chain_spec.blob_sidecar_subnet_count as usize
+ ctx.chain_spec.blob_sidecar_subnet_count_electra as usize
+ ctx.chain_spec.data_column_sidecar_subnet_count as usize
+ BASE_CORE_TOPICS.len()
+ ALTAIR_CORE_TOPICS.len()
+ CAPELLA_CORE_TOPICS.len()
+ DENEB_CORE_TOPICS.len()
+ CAPELLA_CORE_TOPICS.len() // 0 core deneb and electra topics
+ LIGHT_CLIENT_GOSSIP_TOPICS.len();

let possible_fork_digests = ctx.fork_context.all_fork_digests();
let filter = gossipsub::MaxCountSubscriptionFilter {
filter: utils::create_whitelist_filter(
possible_fork_digests,
ctx.chain_spec.attestation_subnet_count,
&ctx.chain_spec,
SYNC_COMMITTEE_SUBNET_COUNT,
ctx.chain_spec.blob_sidecar_subnet_count,
ctx.chain_spec.data_column_sidecar_subnet_count,
),
// during a fork we subscribe to both the old and new topics
max_subscribed_topics: max_topics * 4,
// 418 in theory = (64 attestation + 4 sync committee + 7 core topics + 6 blob topics + 128 column topics) * 2
// 424 in theory = (64 attestation + 4 sync committee + 7 core topics + 9 blob topics + 128 column topics) * 2
max_subscriptions_per_request: max_topics * 2,
};

Expand Down
15 changes: 9 additions & 6 deletions beacon_node/lighthouse_network/src/service/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -236,10 +236,8 @@ pub fn load_or_build_metadata<E: EthSpec>(
/// possible fork digests.
pub(crate) fn create_whitelist_filter(
possible_fork_digests: Vec<[u8; 4]>,
attestation_subnet_count: u64,
spec: &ChainSpec,
sync_committee_subnet_count: u64,
blob_sidecar_subnet_count: u64,
data_column_sidecar_subnet_count: u64,
) -> gossipsub::WhitelistSubscriptionFilter {
let mut possible_hashes = HashSet::new();
for fork_digest in possible_fork_digests {
Expand All @@ -259,16 +257,21 @@ pub(crate) fn create_whitelist_filter(
add(BlsToExecutionChange);
add(LightClientFinalityUpdate);
add(LightClientOptimisticUpdate);
for id in 0..attestation_subnet_count {
for id in 0..spec.attestation_subnet_count {
add(Attestation(SubnetId::new(id)));
}
for id in 0..sync_committee_subnet_count {
add(SyncCommitteeMessage(SyncSubnetId::new(id)));
}
for id in 0..blob_sidecar_subnet_count {
let blob_subnet_count = if spec.electra_fork_epoch.is_some() {
spec.blob_sidecar_subnet_count_electra
} else {
spec.blob_sidecar_subnet_count
};
for id in 0..blob_subnet_count {
add(BlobSidecar(id));
}
for id in 0..data_column_sidecar_subnet_count {
for id in 0..spec.data_column_sidecar_subnet_count {
add(DataColumnSidecar(DataColumnSubnetId::new(id)));
}
}
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/lighthouse_network/src/types/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,5 @@ pub use sync_state::{BackFillState, SyncState};
pub use topics::{
attestation_sync_committee_topics, core_topics_to_subscribe, fork_core_topics,
subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, ALTAIR_CORE_TOPICS,
BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS,
BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS,
};
31 changes: 21 additions & 10 deletions beacon_node/lighthouse_network/src/types/topics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [
GossipKind::LightClientOptimisticUpdate,
];

pub const DENEB_CORE_TOPICS: [GossipKind; 0] = [];

/// Returns the core topics associated with each fork that are new to the previous fork
pub fn fork_core_topics<E: EthSpec>(fork_name: &ForkName, spec: &ChainSpec) -> Vec<GossipKind> {
match fork_name {
Expand All @@ -56,11 +54,16 @@ pub fn fork_core_topics<E: EthSpec>(fork_name: &ForkName, spec: &ChainSpec) -> V
for i in 0..spec.blob_sidecar_subnet_count {
deneb_blob_topics.push(GossipKind::BlobSidecar(i));
}
let mut deneb_topics = DENEB_CORE_TOPICS.to_vec();
deneb_topics.append(&mut deneb_blob_topics);
deneb_topics
deneb_blob_topics
}
ForkName::Electra => {
// All of electra blob topics are core topics
let mut electra_blob_topics = Vec::new();
for i in 0..spec.blob_sidecar_subnet_count_electra {
electra_blob_topics.push(GossipKind::BlobSidecar(i));
}
electra_blob_topics
}
ForkName::Electra => vec![],
ForkName::Fulu => vec![],
}
}
Expand Down Expand Up @@ -88,7 +91,12 @@ pub fn core_topics_to_subscribe<E: EthSpec>(
topics.extend(previous_fork_topics);
current_fork = previous_fork;
}
// Remove duplicates
topics
.into_iter()
.collect::<std::collections::HashSet<_>>()
.into_iter()
.collect()
}

/// A gossipsub topic which encapsulates the type of messages that should be sent and received over
Expand Down Expand Up @@ -467,16 +475,19 @@ mod tests {
type E = MainnetEthSpec;
let spec = E::default_spec();
let mut all_topics = Vec::new();
let mut electra_core_topics = fork_core_topics::<E>(&ForkName::Electra, &spec);
let mut deneb_core_topics = fork_core_topics::<E>(&ForkName::Deneb, &spec);
all_topics.append(&mut electra_core_topics);
all_topics.append(&mut deneb_core_topics);
all_topics.extend(CAPELLA_CORE_TOPICS);
all_topics.extend(ALTAIR_CORE_TOPICS);
all_topics.extend(BASE_CORE_TOPICS);

let latest_fork = *ForkName::list_all().last().unwrap();
assert_eq!(
core_topics_to_subscribe::<E>(latest_fork, &spec),
all_topics
);
let core_topics = core_topics_to_subscribe::<E>(latest_fork, &spec);
// Need to check all the topics exist in an order independent manner
for topic in all_topics {
assert!(core_topics.contains(&topic));
}
}
}
52 changes: 49 additions & 3 deletions consensus/types/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,6 @@ pub struct ChainSpec {
pub max_pending_partials_per_withdrawals_sweep: u64,
pub min_per_epoch_churn_limit_electra: u64,
pub max_per_epoch_activation_exit_churn_limit: u64,
pub max_blobs_per_block_electra: u64,

/*
* Fulu hard fork params
Expand Down Expand Up @@ -240,6 +239,13 @@ pub struct ChainSpec {
pub blob_sidecar_subnet_count: u64,
max_blobs_per_block: u64,

/*
* Networking Electra
*/
max_blobs_per_block_electra: u64,
pub blob_sidecar_subnet_count_electra: u64,
pub max_request_blob_sidecars_electra: u64,

/*
* Networking Derived
*
Expand Down Expand Up @@ -618,6 +624,14 @@ impl ChainSpec {
}
}

pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize {
if fork_name.electra_enabled() {
self.max_request_blob_sidecars_electra as usize
} else {
self.max_request_blob_sidecars as usize
}
}

/// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`.
pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 {
self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch))
Expand Down Expand Up @@ -830,7 +844,6 @@ impl ChainSpec {
u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?)
})
.expect("calculation does not overflow"),
max_blobs_per_block_electra: default_max_blobs_per_block_electra(),

/*
* Fulu hard fork params
Expand Down Expand Up @@ -886,6 +899,13 @@ impl ChainSpec {
max_blobs_by_root_request: default_max_blobs_by_root_request(),
max_data_columns_by_root_request: default_data_columns_by_root_request(),

/*
* Networking Electra specific
*/
max_blobs_per_block_electra: default_max_blobs_per_block_electra(),
blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(),
max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(),

/*
* Application specific
*/
Expand Down Expand Up @@ -1161,7 +1181,6 @@ impl ChainSpec {
u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?)
})
.expect("calculation does not overflow"),
max_blobs_per_block_electra: default_max_blobs_per_block_electra(),

/*
* Fulu hard fork params
Expand Down Expand Up @@ -1216,6 +1235,13 @@ impl ChainSpec {
max_blobs_by_root_request: default_max_blobs_by_root_request(),
max_data_columns_by_root_request: default_data_columns_by_root_request(),

/*
* Networking Electra specific
*/
max_blobs_per_block_electra: default_max_blobs_per_block_electra(),
blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(),
max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(),

/*
* Application specific
*/
Expand Down Expand Up @@ -1421,6 +1447,12 @@ pub struct Config {
#[serde(default = "default_max_blobs_per_block_electra")]
#[serde(with = "serde_utils::quoted_u64")]
max_blobs_per_block_electra: u64,
#[serde(default = "default_blob_sidecar_subnet_count_electra")]
#[serde(with = "serde_utils::quoted_u64")]
pub blob_sidecar_subnet_count_electra: u64,
#[serde(default = "default_max_request_blob_sidecars_electra")]
#[serde(with = "serde_utils::quoted_u64")]
max_request_blob_sidecars_electra: u64,

#[serde(default = "default_custody_requirement")]
#[serde(with = "serde_utils::quoted_u64")]
Expand Down Expand Up @@ -1555,6 +1587,14 @@ const fn default_max_blobs_per_block() -> u64 {
6
}

const fn default_blob_sidecar_subnet_count_electra() -> u64 {
9
}

const fn default_max_request_blob_sidecars_electra() -> u64 {
1152
}

const fn default_min_per_epoch_churn_limit_electra() -> u64 {
128_000_000_000
}
Expand Down Expand Up @@ -1787,6 +1827,8 @@ impl Config {
max_per_epoch_activation_exit_churn_limit: spec
.max_per_epoch_activation_exit_churn_limit,
max_blobs_per_block_electra: spec.max_blobs_per_block_electra,
blob_sidecar_subnet_count_electra: spec.blob_sidecar_subnet_count_electra,
max_request_blob_sidecars_electra: spec.max_request_blob_sidecars_electra,

custody_requirement: spec.custody_requirement,
data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count,
Expand Down Expand Up @@ -1865,6 +1907,8 @@ impl Config {
min_per_epoch_churn_limit_electra,
max_per_epoch_activation_exit_churn_limit,
max_blobs_per_block_electra,
blob_sidecar_subnet_count_electra,
max_request_blob_sidecars_electra,
custody_requirement,
data_column_sidecar_subnet_count,
number_of_columns,
Expand Down Expand Up @@ -1935,6 +1979,8 @@ impl Config {
min_per_epoch_churn_limit_electra,
max_per_epoch_activation_exit_churn_limit,
max_blobs_per_block_electra,
max_request_blob_sidecars_electra,
blob_sidecar_subnet_count_electra,

// We need to re-derive any values that might have changed in the config.
max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks),
Expand Down
Loading