From 1f6850fae2807c1d3f0e281524e0b1b9ab230e67 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 10 Jan 2025 06:43:29 +0530 Subject: [PATCH 01/26] Rust 1.84 lints (#6781) * Fix few lints * Fix remaining lints * Use fully qualified syntax --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 +++++----- beacon_node/beacon_chain/src/canonical_head.rs | 6 +----- .../beacon_chain/src/data_availability_checker.rs | 6 +++--- .../overflow_lru_cache.rs | 9 +++------ .../beacon_chain/src/early_attester_cache.rs | 2 +- beacon_node/beacon_chain/src/eth1_chain.rs | 2 +- beacon_node/beacon_chain/src/execution_payload.rs | 8 ++++---- .../beacon_chain/src/graffiti_calculator.rs | 7 ++----- .../beacon_chain/src/observed_aggregates.rs | 2 +- beacon_node/beacon_chain/src/observed_attesters.rs | 4 ++-- .../beacon_chain/src/observed_data_sidecars.rs | 2 +- beacon_node/beacon_chain/src/shuffling_cache.rs | 2 +- beacon_node/beacon_processor/src/lib.rs | 2 +- beacon_node/client/src/builder.rs | 2 +- beacon_node/client/src/notifier.rs | 8 +++----- beacon_node/execution_layer/src/engine_api/http.rs | 10 ++++------ beacon_node/execution_layer/src/lib.rs | 2 +- beacon_node/execution_layer/src/payload_status.rs | 2 +- .../src/test_utils/mock_execution_layer.rs | 2 +- beacon_node/genesis/src/eth1_genesis_service.rs | 2 +- beacon_node/http_api/src/lib.rs | 4 ++-- beacon_node/http_api/src/validator.rs | 2 +- beacon_node/http_api/tests/interactive_tests.rs | 2 +- beacon_node/http_api/tests/tests.rs | 2 +- .../lighthouse_network/gossipsub/src/backoff.rs | 2 +- .../lighthouse_network/gossipsub/src/behaviour.rs | 3 +-- beacon_node/lighthouse_network/src/config.rs | 6 +++--- .../src/discovery/subnet_predicate.rs | 4 ++-- .../lighthouse_network/src/peer_manager/peerdb.rs | 2 +- .../src/peer_manager/peerdb/peer_info.rs | 4 ++-- .../src/network_beacon_processor/gossip_methods.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 14 +++++--------- beacon_node/store/src/forwards_iter.rs | 4 ++-- beacon_node/store/src/reconstruct.rs | 2 +- beacon_node/store/src/state_cache.rs | 8 ++------ common/account_utils/src/validator_definitions.rs | 2 +- common/logging/src/lib.rs | 2 +- consensus/proto_array/src/proto_array.rs | 10 +++++----- consensus/state_processing/src/genesis.rs | 12 ++++++------ .../per_block_processing/altair/sync_committee.rs | 2 +- .../epoch_processing_summary.rs | 12 ++++++------ consensus/types/src/beacon_block_body.rs | 2 +- consensus/types/src/beacon_state.rs | 2 +- .../src/beacon_state/progressive_balances_cache.rs | 2 +- consensus/types/src/chain_spec.rs | 10 ++++------ consensus/types/src/deposit_tree_snapshot.rs | 3 +-- consensus/types/src/graffiti.rs | 2 +- lcli/src/transition_blocks.rs | 4 ++-- slasher/src/database.rs | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 2 +- testing/ef_tests/src/cases/operations.rs | 4 ++-- testing/ef_tests/src/decode.rs | 2 +- validator_client/doppelganger_service/src/lib.rs | 2 +- .../slashing_protection/src/slashing_database.rs | 4 +--- .../validator_services/src/preparation_service.rs | 2 +- validator_client/validator_services/src/sync.rs | 2 +- validator_manager/src/create_validators.rs | 4 ++-- validator_manager/src/delete_validators.rs | 2 +- validator_manager/src/import_validators.rs | 2 +- validator_manager/src/list_validators.rs | 2 +- validator_manager/src/move_validators.rs | 2 +- 61 files changed, 110 insertions(+), 138 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 80766d57b33..7bbb9ff74d6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -573,7 +573,7 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()); let is_canonical = self .block_root_at_slot(block_slot, WhenSlotSkipped::None)? - .map_or(false, |canonical_root| block_root == &canonical_root); + .is_some_and(|canonical_root| block_root == &canonical_root); Ok(block_slot <= finalized_slot && is_canonical) } @@ -604,7 +604,7 @@ impl BeaconChain { let slot_is_finalized = state_slot <= finalized_slot; let canonical = self .state_root_at_slot(state_slot)? - .map_or(false, |canonical_root| state_root == &canonical_root); + .is_some_and(|canonical_root| state_root == &canonical_root); Ok(FinalizationAndCanonicity { slot_is_finalized, canonical, @@ -5118,9 +5118,9 @@ impl BeaconChain { .start_of(slot) .unwrap_or_else(|| Duration::from_secs(0)), ); - block_delays.observed.map_or(false, |delay| { - delay >= self.slot_clock.unagg_attestation_production_delay() - }) + block_delays + .observed + .is_some_and(|delay| delay >= self.slot_clock.unagg_attestation_production_delay()) } /// Produce a block for some `slot` upon the given `state`. diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 4f92f5ec8f9..4e21372efb7 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1254,11 +1254,7 @@ pub fn find_reorg_slot( ($state: ident, $block_root: ident) => { std::iter::once(Ok(($state.slot(), $block_root))) .chain($state.rev_iter_block_roots(spec)) - .skip_while(|result| { - result - .as_ref() - .map_or(false, |(slot, _)| *slot > lowest_slot) - }) + .skip_while(|result| result.as_ref().is_ok_and(|(slot, _)| *slot > lowest_slot)) }; } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 72806a74d27..f6002ea0ac9 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -519,13 +519,13 @@ impl DataAvailabilityChecker { /// Returns true if the given epoch lies within the da boundary and false otherwise. pub fn da_check_required_for_epoch(&self, block_epoch: Epoch) -> bool { self.data_availability_boundary() - .map_or(false, |da_epoch| block_epoch >= da_epoch) + .is_some_and(|da_epoch| block_epoch >= da_epoch) } /// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch. pub fn is_deneb(&self) -> bool { - self.slot_clock.now().map_or(false, |slot| { - self.spec.deneb_fork_epoch.map_or(false, |deneb_epoch| { + self.slot_clock.now().is_some_and(|slot| { + self.spec.deneb_fork_epoch.is_some_and(|deneb_epoch| { let now_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); now_epoch >= deneb_epoch }) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 40361574aff..5ce023038d2 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -228,13 +228,10 @@ impl PendingComponents { ); let all_blobs_received = block_kzg_commitments_count_opt - .map_or(false, |num_expected_blobs| { - num_expected_blobs == num_received_blobs - }); + .is_some_and(|num_expected_blobs| num_expected_blobs == num_received_blobs); - let all_columns_received = expected_columns_opt.map_or(false, |num_expected_columns| { - num_expected_columns == num_received_columns - }); + let all_columns_received = expected_columns_opt + .is_some_and(|num_expected_columns| num_expected_columns == num_received_columns); all_blobs_received || all_columns_received } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 606610a7483..c94ea0e9414 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -145,7 +145,7 @@ impl EarlyAttesterCache { self.item .read() .as_ref() - .map_or(false, |item| item.beacon_block_root == block_root) + .is_some_and(|item| item.beacon_block_root == block_root) } /// Returns the block, if `block_root` matches the cached item. diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index cb6e4c34f3e..ad4f106517f 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -153,7 +153,7 @@ fn get_sync_status( // Lighthouse is "cached and ready" when it has cached enough blocks to cover the start of the // current voting period. let lighthouse_is_cached_and_ready = - latest_cached_block_timestamp.map_or(false, |t| t >= voting_target_timestamp); + latest_cached_block_timestamp.is_some_and(|t| t >= voting_target_timestamp); Some(Eth1SyncStatusData { head_block_number, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 92d24c53c00..502a7918a11 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -127,9 +127,9 @@ impl PayloadNotifier { /// contains a few extra checks by running `partially_verify_execution_payload` first: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload -async fn notify_new_payload<'a, T: BeaconChainTypes>( +async fn notify_new_payload( chain: &Arc>, - block: BeaconBlockRef<'a, T::EthSpec>, + block: BeaconBlockRef<'_, T::EthSpec>, ) -> Result { let execution_layer = chain .execution_layer @@ -230,9 +230,9 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( /// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block -pub async fn validate_merge_block<'a, T: BeaconChainTypes>( +pub async fn validate_merge_block( chain: &Arc>, - block: BeaconBlockRef<'a, T::EthSpec>, + block: BeaconBlockRef<'_, T::EthSpec>, allow_optimistic_import: AllowOptimisticImport, ) -> Result<(), BlockError> { let spec = &chain.spec; diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index 4373164d62f..8692d374ed5 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -293,10 +293,7 @@ mod tests { .await .unwrap(); - let version_bytes = std::cmp::min( - lighthouse_version::VERSION.as_bytes().len(), - GRAFFITI_BYTES_LEN, - ); + let version_bytes = std::cmp::min(lighthouse_version::VERSION.len(), GRAFFITI_BYTES_LEN); // grab the slice of the graffiti that corresponds to the lighthouse version let graffiti_slice = &harness.chain.graffiti_calculator.get_graffiti(None).await.0[..version_bytes]; @@ -361,7 +358,7 @@ mod tests { let graffiti_str = "nice graffiti bro"; let mut graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; - graffiti_bytes[..graffiti_str.as_bytes().len()].copy_from_slice(graffiti_str.as_bytes()); + graffiti_bytes[..graffiti_str.len()].copy_from_slice(graffiti_str.as_bytes()); let found_graffiti = harness .chain diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index dec012fb929..20ed36ace75 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -293,7 +293,7 @@ impl SlotHashSet { Ok(self .map .get(&root) - .map_or(false, |agg| agg.iter().any(|val| item.is_subset(val)))) + .is_some_and(|agg| agg.iter().any(|val| item.is_subset(val)))) } /// The number of observed items in `self`. diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index efb95f57a96..5bba8e4d8e3 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -130,7 +130,7 @@ impl Item<()> for EpochBitfield { fn get(&self, validator_index: usize) -> Option<()> { self.bitfield .get(validator_index) - .map_or(false, |bit| *bit) + .is_some_and(|bit| *bit) .then_some(()) } } @@ -336,7 +336,7 @@ impl, E: EthSpec> AutoPruningEpochContainer { let exists = self .items .get(&epoch) - .map_or(false, |item| item.get(validator_index).is_some()); + .is_some_and(|item| item.get(validator_index).is_some()); Ok(exists) } diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index 53f8c71f54e..a9f46640645 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -118,7 +118,7 @@ impl ObservedDataSidecars { slot: data_sidecar.slot(), proposer: data_sidecar.block_proposer_index(), }) - .map_or(false, |indices| indices.contains(&data_sidecar.index())); + .is_some_and(|indices| indices.contains(&data_sidecar.index())); Ok(is_known) } diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index da1d60db17d..67ca72254b6 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -253,7 +253,7 @@ impl BlockShufflingIds { } else if self .previous .as_ref() - .map_or(false, |id| id.shuffling_epoch == epoch) + .is_some_and(|id| id.shuffling_epoch == epoch) { self.previous.clone() } else if epoch == self.next.shuffling_epoch { diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 2a69b04c916..0edda2f95b8 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -1022,7 +1022,7 @@ impl BeaconProcessor { let can_spawn = self.current_workers < self.config.max_workers; let drop_during_sync = work_event .as_ref() - .map_or(false, |event| event.drop_during_sync); + .is_some_and(|event| event.drop_during_sync); let idle_tx = idle_tx.clone(); let modified_queue_id = match work_event { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 7c6a253aca4..24c66158225 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -910,7 +910,7 @@ where .forkchoice_update_parameters(); if params .head_hash - .map_or(false, |hash| hash != ExecutionBlockHash::zero()) + .is_some_and(|hash| hash != ExecutionBlockHash::zero()) { // Spawn a new task to update the EE without waiting for it to complete. let inner_chain = beacon_chain.clone(); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index f686c2c650b..e88803e94f3 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -197,7 +197,7 @@ pub fn spawn_notifier( ); let speed = speedo.slots_per_second(); - let display_speed = speed.map_or(false, |speed| speed != 0.0); + let display_speed = speed.is_some_and(|speed| speed != 0.0); if display_speed { info!( @@ -233,7 +233,7 @@ pub fn spawn_notifier( ); let speed = speedo.slots_per_second(); - let display_speed = speed.map_or(false, |speed| speed != 0.0); + let display_speed = speed.is_some_and(|speed| speed != 0.0); if display_speed { info!( @@ -339,9 +339,7 @@ async fn bellatrix_readiness_logging( .message() .body() .execution_payload() - .map_or(false, |payload| { - payload.parent_hash() != ExecutionBlockHash::zero() - }); + .is_ok_and(|payload| payload.parent_hash() != ExecutionBlockHash::zero()); let has_execution_layer = beacon_chain.execution_layer.is_some(); diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 33dc60d0378..e2a81c072c4 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -158,9 +158,7 @@ pub mod deposit_log { }; let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec) - .map_or(false, |(public_key, signature, msg)| { - signature.verify(&public_key, msg) - }); + .is_some_and(|(public_key, signature, msg)| signature.verify(&public_key, msg)); Ok(DepositLog { deposit_data, @@ -592,7 +590,7 @@ impl CachedResponse { /// returns `true` if the entry's age is >= age_limit pub fn older_than(&self, age_limit: Option) -> bool { - age_limit.map_or(false, |limit| self.age() >= limit) + age_limit.is_some_and(|limit| self.age() >= limit) } } @@ -720,9 +718,9 @@ impl HttpJsonRpc { .await } - pub async fn get_block_by_number<'a>( + pub async fn get_block_by_number( &self, - query: BlockByNumberQuery<'a>, + query: BlockByNumberQuery<'_>, ) -> Result, Error> { let params = json!([query, RETURN_FULL_TRANSACTION_OBJECTS]); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index ae0dca9833f..f3b12b21d17 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -2095,7 +2095,7 @@ fn verify_builder_bid( payload: header.timestamp(), expected: payload_attributes.timestamp(), })) - } else if block_number.map_or(false, |n| n != header.block_number()) { + } else if block_number.is_some_and(|n| n != header.block_number()) { Err(Box::new(InvalidBuilderPayload::BlockNumber { payload: header.block_number(), expected: block_number, diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index 5405fd70099..cf0be8ed0d8 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -41,7 +41,7 @@ pub fn process_payload_status( PayloadStatusV1Status::Valid => { if response .latest_valid_hash - .map_or(false, |h| h == head_block_hash) + .is_some_and(|h| h == head_block_hash) { // The response is only valid if `latest_valid_hash` is not `null` and // equal to the provided `block_hash`. diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 48372a39be1..dc90d91c0f2 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -318,7 +318,7 @@ impl MockExecutionLayer { (self, block_hash) } - pub async fn with_terminal_block<'a, U, V>(self, func: U) -> Self + pub async fn with_terminal_block(self, func: U) -> Self where U: Fn(ChainSpec, ExecutionLayer, Option) -> V, V: Future, diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 3981833a5cb..b5f4bd50ee2 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -270,7 +270,7 @@ impl Eth1GenesisService { // Ignore any block that has already been processed or update the highest processed // block. - if highest_processed_block.map_or(false, |highest| highest >= block.number) { + if highest_processed_block.is_some_and(|highest| highest >= block.number) { continue; } else { self.stats diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 23d177da785..febdf692590 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1164,7 +1164,7 @@ pub fn serve( .map_err(warp_utils::reject::beacon_chain_error)? // Ignore any skip-slots immediately following the parent. .find(|res| { - res.as_ref().map_or(false, |(root, _)| *root != parent_root) + res.as_ref().is_ok_and(|(root, _)| *root != parent_root) }) .transpose() .map_err(warp_utils::reject::beacon_chain_error)? @@ -1249,7 +1249,7 @@ pub fn serve( let canonical = chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) .map_err(warp_utils::reject::beacon_chain_error)? - .map_or(false, |canonical| root == canonical); + .is_some_and(|canonical| root == canonical); let data = api_types::BlockHeaderData { root, diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs index 7f11ddd8f43..baa41e33ed5 100644 --- a/beacon_node/http_api/src/validator.rs +++ b/beacon_node/http_api/src/validator.rs @@ -14,7 +14,7 @@ pub fn pubkey_to_validator_index( state .validators() .get(index) - .map_or(false, |v| v.pubkey == *pubkey) + .is_some_and(|v| v.pubkey == *pubkey) }) .map(Result::Ok) .transpose() diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index e45dcf221cc..8cfcf5d93e9 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -161,7 +161,7 @@ impl ForkChoiceUpdates { update .payload_attributes .as_ref() - .map_or(false, |payload_attributes| { + .is_some_and(|payload_attributes| { payload_attributes.timestamp() == proposal_timestamp }) }) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 7007a14466c..1efe44a613c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1278,7 +1278,7 @@ impl ApiTester { .chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) .unwrap() - .map_or(false, |canonical| block_root == canonical); + .is_some_and(|canonical| block_root == canonical); assert_eq!(result.canonical, canonical, "{:?}", block_id); assert_eq!(result.root, block_root, "{:?}", block_id); diff --git a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs index 537d2319c29..0d77e2cd0f9 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs @@ -124,7 +124,7 @@ impl BackoffStorage { pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool { self.backoffs .get(topic) - .map_or(false, |m| m.contains_key(peer)) + .is_some_and(|m| m.contains_key(peer)) } pub(crate) fn get_backoff_time(&self, topic: &TopicHash, peer: &PeerId) -> Option { diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index c4e20e43972..6528e737a33 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -1770,8 +1770,7 @@ where // reject messages claiming to be from ourselves but not locally published let self_published = !self.config.allow_self_origin() && if let Some(own_id) = self.publish_config.get_own_id() { - own_id != propagation_source - && raw_message.source.as_ref().map_or(false, |s| s == own_id) + own_id != propagation_source && raw_message.source.as_ref() == Some(own_id) } else { self.published_message_ids.contains(msg_id) }; diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 21f3dc830f4..8a93b1185db 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -166,7 +166,7 @@ impl Config { tcp_port, }); self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), disc_port); - self.discv5_config.table_filter = |enr| enr.ip4().as_ref().map_or(false, is_global_ipv4) + self.discv5_config.table_filter = |enr| enr.ip4().as_ref().is_some_and(is_global_ipv4) } /// Sets the listening address to use an ipv6 address. The discv5 ip_mode and table filter is @@ -187,7 +187,7 @@ impl Config { }); self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), disc_port); - self.discv5_config.table_filter = |enr| enr.ip6().as_ref().map_or(false, is_global_ipv6) + self.discv5_config.table_filter = |enr| enr.ip6().as_ref().is_some_and(is_global_ipv6) } /// Sets the listening address to use both an ipv4 and ipv6 address. The discv5 ip_mode and @@ -317,7 +317,7 @@ impl Default for Config { .filter_rate_limiter(filter_rate_limiter) .filter_max_bans_per_ip(Some(5)) .filter_max_nodes_per_ip(Some(10)) - .table_filter(|enr| enr.ip4().map_or(false, |ip| is_global_ipv4(&ip))) // Filter non-global IPs + .table_filter(|enr| enr.ip4().is_some_and(|ip| is_global_ipv4(&ip))) // Filter non-global IPs .ban_duration(Some(Duration::from_secs(3600))) .ping_interval(Duration::from_secs(300)) .build(); diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 02ff0cc3ca4..751f8dbb83a 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -35,7 +35,7 @@ where .unwrap_or(false), Subnet::SyncCommittee(s) => sync_committee_bitfield .as_ref() - .map_or(false, |b| b.get(*s.deref() as usize).unwrap_or(false)), + .is_ok_and(|b| b.get(*s.deref() as usize).unwrap_or(false)), Subnet::DataColumn(s) => { if let Ok(custody_subnet_count) = enr.custody_subnet_count::(&spec) { DataColumnSubnetId::compute_custody_subnets::( @@ -43,7 +43,7 @@ where custody_subnet_count, &spec, ) - .map_or(false, |mut subnets| subnets.contains(s)) + .is_ok_and(|mut subnets| subnets.contains(s)) } else { false } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index d2effd4d037..22a3df1ae85 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1305,7 +1305,7 @@ impl BannedPeersCount { pub fn ip_is_banned(&self, ip: &IpAddr) -> bool { self.banned_peers_per_ip .get(ip) - .map_or(false, |count| *count > BANNED_PEERS_PER_IP_THRESHOLD) + .is_some_and(|count| *count > BANNED_PEERS_PER_IP_THRESHOLD) } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index ee8c27f474c..27c8463a55e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -99,7 +99,7 @@ impl PeerInfo { Subnet::SyncCommittee(id) => { return meta_data .syncnets() - .map_or(false, |s| s.get(**id as usize).unwrap_or(false)) + .is_ok_and(|s| s.get(**id as usize).unwrap_or(false)) } Subnet::DataColumn(column) => return self.custody_subnets.contains(column), } @@ -264,7 +264,7 @@ impl PeerInfo { /// Reports if this peer has some future validator duty in which case it is valuable to keep it. pub fn has_future_duty(&self) -> bool { - self.min_ttl.map_or(false, |i| i >= Instant::now()) + self.min_ttl.is_some_and(|i| i >= Instant::now()) } /// Returns score of the peer. diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index f3c48e42f0b..6b5753e96a9 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -3129,7 +3129,7 @@ impl NetworkBeaconProcessor { .chain .slot_clock .now() - .map_or(false, |current_slot| sync_message_slot == current_slot); + .is_some_and(|current_slot| sync_message_slot == current_slot); self.propagate_if_timely(is_timely, message_id, peer_id) } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index d01c73118c6..d8183de7529 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -186,7 +186,7 @@ impl OperationPool { self.sync_contributions.write().retain(|_, contributions| { // All the contributions in this bucket have the same data, so we only need to // check the first one. - contributions.first().map_or(false, |contribution| { + contributions.first().is_some_and(|contribution| { current_slot <= contribution.slot.saturating_add(Slot::new(1)) }) }); @@ -401,7 +401,7 @@ impl OperationPool { && state .validators() .get(slashing.as_inner().signed_header_1.message.proposer_index as usize) - .map_or(false, |validator| !validator.slashed) + .is_some_and(|validator| !validator.slashed) }, |slashing| slashing.as_inner().clone(), E::MaxProposerSlashings::to_usize(), @@ -484,7 +484,7 @@ impl OperationPool { validator.exit_epoch > head_state.finalized_checkpoint().epoch }, ) - .map_or(false, |indices| !indices.is_empty()); + .is_ok_and(|indices| !indices.is_empty()); signature_ok && slashing_ok }); @@ -583,9 +583,7 @@ impl OperationPool { address_change.signature_is_still_valid(&state.fork()) && state .get_validator(address_change.as_inner().message.validator_index as usize) - .map_or(false, |validator| { - !validator.has_execution_withdrawal_credential(spec) - }) + .is_ok_and(|validator| !validator.has_execution_withdrawal_credential(spec)) }, |address_change| address_change.as_inner().clone(), E::MaxBlsToExecutionChanges::to_usize(), @@ -609,9 +607,7 @@ impl OperationPool { address_change.signature_is_still_valid(&state.fork()) && state .get_validator(address_change.as_inner().message.validator_index as usize) - .map_or(false, |validator| { - !validator.has_eth1_withdrawal_credential(spec) - }) + .is_ok_and(|validator| !validator.has_eth1_withdrawal_credential(spec)) }, |address_change| address_change.as_inner().clone(), usize::MAX, diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 27769a310ac..955bd33b30f 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -265,7 +265,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> // `end_slot`. If it tries to continue further a `NoContinuationData` error will be // returned. let continuation_data = - if end_slot.map_or(false, |end_slot| end_slot < freezer_upper_bound) { + if end_slot.is_some_and(|end_slot| end_slot < freezer_upper_bound) { None } else { Some(Box::new(get_state()?)) @@ -306,7 +306,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> None => { // If the iterator has an end slot (inclusive) which has already been // covered by the (exclusive) frozen forwards iterator, then we're done! - if end_slot.map_or(false, |end_slot| iter.end_slot == end_slot + 1) { + if end_slot.is_some_and(|end_slot| iter.end_slot == end_slot + 1) { *self = Finished; return Ok(None); } diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 9bec83a35ca..2a3b208aae8 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -111,7 +111,7 @@ where self.store_cold_state(&state_root, &state, &mut io_batch)?; let batch_complete = - num_blocks.map_or(false, |n_blocks| slot == lower_limit_slot + n_blocks as u64); + num_blocks.is_some_and(|n_blocks| slot == lower_limit_slot + n_blocks as u64); let reconstruction_complete = slot + 1 == upper_limit_slot; // Commit the I/O batch if: diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 5c1faa7f2fd..96e4de46393 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -77,9 +77,7 @@ impl StateCache { if self .finalized_state .as_ref() - .map_or(false, |finalized_state| { - state.slot() < finalized_state.state.slot() - }) + .is_some_and(|finalized_state| state.slot() < finalized_state.state.slot()) { return Err(Error::FinalizedStateDecreasingSlot); } @@ -127,9 +125,7 @@ impl StateCache { if self .finalized_state .as_ref() - .map_or(false, |finalized_state| { - finalized_state.state_root == state_root - }) + .is_some_and(|finalized_state| finalized_state.state_root == state_root) { return Ok(PutStateOutcome::Finalized); } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index a4850fc1c63..24f6861daa2 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -435,7 +435,7 @@ pub fn recursively_find_voting_keystores>( && dir_entry .file_name() .to_str() - .map_or(false, is_voting_keystore) + .is_some_and(is_voting_keystore) { matches.push(dir_entry.path()) } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 7fe7f79506c..0ddd867c2f8 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -204,7 +204,7 @@ impl TimeLatch { pub fn elapsed(&mut self) -> bool { let now = Instant::now(); - let is_elapsed = self.0.map_or(false, |elapse_time| now > elapse_time); + let is_elapsed = self.0.is_some_and(|elapse_time| now > elapse_time); if is_elapsed || self.0.is_none() { self.0 = Some(now + LOG_DEBOUNCE_INTERVAL); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 38ea1411994..5d0bee4c853 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -468,7 +468,7 @@ impl ProtoArray { // 1. The `head_block_root` is a descendant of `latest_valid_ancestor_hash` // 2. The `latest_valid_ancestor_hash` is equal to or a descendant of the finalized block. let latest_valid_ancestor_is_descendant = - latest_valid_ancestor_root.map_or(false, |ancestor_root| { + latest_valid_ancestor_root.is_some_and(|ancestor_root| { self.is_descendant(ancestor_root, head_block_root) && self.is_finalized_checkpoint_or_descendant::(ancestor_root) }); @@ -505,13 +505,13 @@ impl ProtoArray { // head. if node .best_child - .map_or(false, |i| invalidated_indices.contains(&i)) + .is_some_and(|i| invalidated_indices.contains(&i)) { node.best_child = None } if node .best_descendant - .map_or(false, |i| invalidated_indices.contains(&i)) + .is_some_and(|i| invalidated_indices.contains(&i)) { node.best_descendant = None } @@ -999,7 +999,7 @@ impl ProtoArray { node.unrealized_finalized_checkpoint, node.unrealized_justified_checkpoint, ] { - if checkpoint.map_or(false, |cp| cp == self.finalized_checkpoint) { + if checkpoint.is_some_and(|cp| cp == self.finalized_checkpoint) { return true; } } @@ -1037,7 +1037,7 @@ impl ProtoArray { .find(|node| { node.execution_status .block_hash() - .map_or(false, |node_block_hash| node_block_hash == *block_hash) + .is_some_and(|node_block_hash| node_block_hash == *block_hash) }) .map(|node| node.root) } diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 00697def5d2..ccff3d80c03 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -53,7 +53,7 @@ pub fn initialize_beacon_state_from_eth1( // https://github.com/ethereum/eth2.0-specs/pull/2323 if spec .altair_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { upgrade_to_altair(&mut state, spec)?; @@ -63,7 +63,7 @@ pub fn initialize_beacon_state_from_eth1( // Similarly, perform an upgrade to the merge if configured from genesis. if spec .bellatrix_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { // this will set state.latest_execution_payload_header = ExecutionPayloadHeaderBellatrix::default() upgrade_to_bellatrix(&mut state, spec)?; @@ -81,7 +81,7 @@ pub fn initialize_beacon_state_from_eth1( // Upgrade to capella if configured from genesis if spec .capella_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { upgrade_to_capella(&mut state, spec)?; @@ -98,7 +98,7 @@ pub fn initialize_beacon_state_from_eth1( // Upgrade to deneb if configured from genesis if spec .deneb_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { upgrade_to_deneb(&mut state, spec)?; @@ -115,7 +115,7 @@ pub fn initialize_beacon_state_from_eth1( // Upgrade to electra if configured from genesis. if spec .electra_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { let post = upgrade_state_to_electra(&mut state, Epoch::new(0), Epoch::new(0), spec)?; state = post; @@ -153,7 +153,7 @@ pub fn initialize_beacon_state_from_eth1( pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSpec) -> bool { state .get_active_validator_indices(E::genesis_epoch(), spec) - .map_or(false, |active_validators| { + .is_ok_and(|active_validators| { state.genesis_time() >= spec.min_genesis_time && active_validators.len() as u64 >= spec.min_genesis_active_validator_count }) diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 210db4c9c15..08cfd9cba84 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -38,7 +38,7 @@ pub fn process_sync_aggregate( )?; // If signature set is `None` then the signature is valid (infinity). - if signature_set.map_or(false, |signature| !signature.verify()) { + if signature_set.is_some_and(|signature| !signature.verify()) { return Err(SyncAggregateInvalid::SignatureInvalid.into()); } } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 952ab3f6498..5508b80807f 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -151,7 +151,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => statuses .get(val_index) - .map_or(false, |s| s.is_active_in_current_epoch && !s.is_slashed), + .is_some_and(|s| s.is_active_in_current_epoch && !s.is_slashed), EpochProcessingSummary::Altair { participation, .. } => { participation.is_active_and_unslashed(val_index, participation.current_epoch) } @@ -176,7 +176,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_current_epoch_target_attester)), + .is_some_and(|s| s.is_current_epoch_target_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_current_epoch_unslashed_participating_index( val_index, @@ -247,7 +247,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => statuses .get(val_index) - .map_or(false, |s| s.is_active_in_previous_epoch && !s.is_slashed), + .is_some_and(|s| s.is_active_in_previous_epoch && !s.is_slashed), EpochProcessingSummary::Altair { participation, .. } => { participation.is_active_and_unslashed(val_index, participation.previous_epoch) } @@ -267,7 +267,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_previous_epoch_target_attester)), + .is_some_and(|s| s.is_previous_epoch_target_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_previous_epoch_unslashed_participating_index( val_index, @@ -294,7 +294,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_previous_epoch_head_attester)), + .is_some_and(|s| s.is_previous_epoch_head_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_previous_epoch_unslashed_participating_index(val_index, TIMELY_HEAD_FLAG_INDEX), } @@ -318,7 +318,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_previous_epoch_attester)), + .is_some_and(|s| s.is_previous_epoch_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_previous_epoch_unslashed_participating_index( val_index, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index b896dc46932..f7a701fed6d 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -283,7 +283,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, /// Return `true` if this block body has a non-zero number of blobs. pub fn has_blobs(self) -> bool { self.blob_kzg_commitments() - .map_or(false, |blobs| !blobs.is_empty()) + .is_ok_and(|blobs| !blobs.is_empty()) } pub fn attestations_len(&self) -> usize { diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index ad4484b86ae..05f28744faf 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1856,7 +1856,7 @@ impl BeaconState { pub fn committee_cache_is_initialized(&self, relative_epoch: RelativeEpoch) -> bool { let i = Self::committee_cache_index(relative_epoch); - self.committee_cache_at_index(i).map_or(false, |cache| { + self.committee_cache_at_index(i).is_ok_and(|cache| { cache.is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) }) } diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index fd5e51313f7..bc258ef68de 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -145,7 +145,7 @@ impl ProgressiveBalancesCache { pub fn is_initialized_at(&self, epoch: Epoch) -> bool { self.inner .as_ref() - .map_or(false, |inner| inner.current_epoch == epoch) + .is_some_and(|inner| inner.current_epoch == epoch) } /// When a new target attestation has been processed, we update the cached diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 0b33a76ff19..9d3308cf234 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -420,16 +420,14 @@ impl ChainSpec { /// Returns true if the given epoch is greater than or equal to the `EIP7594_FORK_EPOCH`. pub fn is_peer_das_enabled_for_epoch(&self, block_epoch: Epoch) -> bool { - self.eip7594_fork_epoch.map_or(false, |eip7594_fork_epoch| { - block_epoch >= eip7594_fork_epoch - }) + self.eip7594_fork_epoch + .is_some_and(|eip7594_fork_epoch| block_epoch >= eip7594_fork_epoch) } /// Returns true if `EIP7594_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. pub fn is_peer_das_scheduled(&self) -> bool { - self.eip7594_fork_epoch.map_or(false, |eip7594_fork_epoch| { - eip7594_fork_epoch != self.far_future_epoch - }) + self.eip7594_fork_epoch + .is_some_and(|eip7594_fork_epoch| eip7594_fork_epoch != self.far_future_epoch) } /// Returns a full `Fork` struct for a given epoch. diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index df1064daba0..2f9df8758b5 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -72,8 +72,7 @@ impl DepositTreeSnapshot { Some(Hash256::from_slice(&deposit_root)) } pub fn is_valid(&self) -> bool { - self.calculate_root() - .map_or(false, |calculated| self.deposit_root == calculated) + self.calculate_root() == Some(self.deposit_root) } } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 08f8573c6d1..f781aacabdf 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -57,7 +57,7 @@ impl FromStr for GraffitiString { type Err = String; fn from_str(s: &str) -> Result { - if s.as_bytes().len() > GRAFFITI_BYTES_LEN { + if s.len() > GRAFFITI_BYTES_LEN { return Err(format!( "Graffiti exceeds max length {}", GRAFFITI_BYTES_LEN diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 94d95a0d1c4..ecfa04fc816 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -223,7 +223,7 @@ pub fn run( .update_tree_hash_cache() .map_err(|e| format!("Unable to build THC: {:?}", e))?; - if state_root_opt.map_or(false, |expected| expected != state_root) { + if state_root_opt.is_some_and(|expected| expected != state_root) { return Err(format!( "State root mismatch! Expected {}, computed {}", state_root_opt.unwrap(), @@ -331,7 +331,7 @@ fn do_transition( .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; debug!("Initial tree hash: {:?}", t.elapsed()); - if state_root_opt.map_or(false, |expected| expected != state_root) { + if state_root_opt.is_some_and(|expected| expected != state_root) { return Err(format!( "State root mismatch! Expected {}, computed {}", state_root_opt.unwrap(), diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 20b4a337711..e2b49dca297 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -406,7 +406,7 @@ impl SlasherDB { ) -> Result<(), Error> { // Don't update maximum if new target is less than or equal to previous. In the case of // no previous we *do* want to update. - if previous_max_target.map_or(false, |prev_max| max_target <= prev_max) { + if previous_max_target.is_some_and(|prev_max| max_target <= prev_max) { return Ok(()); } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 427bcf5e9c5..a1c74389a78 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -523,7 +523,7 @@ impl Tester { || Ok(()), ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); - let success = blob_success && result.as_ref().map_or(false, |inner| inner.is_ok()); + let success = blob_success && result.as_ref().is_ok_and(|inner| inner.is_ok()); if success != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 54ca52447f4..d8cade296bc 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -322,7 +322,7 @@ impl Operation for BeaconBlockBody> { let valid = extra .execution_metadata .as_ref() - .map_or(false, |e| e.execution_valid); + .is_some_and(|e| e.execution_valid); if valid { process_execution_payload::>(state, self.to_ref(), spec) } else { @@ -377,7 +377,7 @@ impl Operation for BeaconBlockBody> { let valid = extra .execution_metadata .as_ref() - .map_or(false, |e| e.execution_valid); + .is_some_and(|e| e.execution_valid); if valid { process_execution_payload::>(state, self.to_ref(), spec) } else { diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index 757b9bf3c43..eb88ac6af1f 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -28,7 +28,7 @@ pub fn log_file_access>(file_accessed: P) { writeln!(&mut file, "{:?}", file_accessed.as_ref()).expect("should write to file"); - file.unlock().expect("unable to unlock file"); + fs2::FileExt::unlock(&file).expect("unable to unlock file"); } pub fn yaml_decode(string: &str) -> Result { diff --git a/validator_client/doppelganger_service/src/lib.rs b/validator_client/doppelganger_service/src/lib.rs index 35228fe3546..4a593c2700e 100644 --- a/validator_client/doppelganger_service/src/lib.rs +++ b/validator_client/doppelganger_service/src/lib.rs @@ -162,7 +162,7 @@ impl DoppelgangerState { /// If the BN fails to respond to either of these requests, simply return an empty response. /// This behaviour is to help prevent spurious failures on the BN from needlessly preventing /// doppelganger progression. -async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( +async fn beacon_node_liveness( beacon_nodes: Arc>, log: Logger, current_epoch: Epoch, diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index baaf930c68b..71611339f94 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -1113,9 +1113,7 @@ fn max_or(opt_x: Option, y: T) -> T { /// /// If prev is `None` and `new` is `Some` then `true` is returned. fn monotonic(new: Option, prev: Option) -> bool { - new.map_or(false, |new_val| { - prev.map_or(true, |prev_val| new_val >= prev_val) - }) + new.is_some_and(|new_val| prev.map_or(true, |prev_val| new_val >= prev_val)) } /// The result of importing a single entry from an interchange file. diff --git a/validator_client/validator_services/src/preparation_service.rs b/validator_client/validator_services/src/preparation_service.rs index 480f4af2b3c..fe6eab3a8a7 100644 --- a/validator_client/validator_services/src/preparation_service.rs +++ b/validator_client/validator_services/src/preparation_service.rs @@ -258,7 +258,7 @@ impl PreparationService { .slot_clock .now() .map_or(E::genesis_epoch(), |slot| slot.epoch(E::slots_per_epoch())); - spec.bellatrix_fork_epoch.map_or(false, |fork_epoch| { + spec.bellatrix_fork_epoch.is_some_and(|fork_epoch| { current_epoch + PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS >= fork_epoch }) } diff --git a/validator_client/validator_services/src/sync.rs b/validator_client/validator_services/src/sync.rs index af501326f42..dd3e05088e5 100644 --- a/validator_client/validator_services/src/sync.rs +++ b/validator_client/validator_services/src/sync.rs @@ -94,7 +94,7 @@ impl SyncDutiesMap { self.committees .read() .get(&committee_period) - .map_or(false, |committee_duties| { + .is_some_and(|committee_duties| { let validator_duties = committee_duties.validators.read(); validator_indices .iter() diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index d4403b46131..b40fe61a820 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -286,7 +286,7 @@ struct ValidatorsAndDeposits { } impl ValidatorsAndDeposits { - async fn new<'a, E: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result { + async fn new(config: CreateConfig, spec: &ChainSpec) -> Result { let CreateConfig { // The output path is handled upstream. output_path: _, @@ -545,7 +545,7 @@ pub async fn cli_run( } } -async fn run<'a, E: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result<(), String> { +async fn run(config: CreateConfig, spec: &ChainSpec) -> Result<(), String> { let output_path = config.output_path.clone(); if !output_path.exists() { diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs index a2d6c062fa2..5ef647c5af1 100644 --- a/validator_manager/src/delete_validators.rs +++ b/validator_manager/src/delete_validators.rs @@ -86,7 +86,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: DeleteConfig) -> Result<(), String> { +async fn run(config: DeleteConfig) -> Result<(), String> { let DeleteConfig { vc_url, vc_token_path, diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 3cebc10bb38..63c7ca45965 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -209,7 +209,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: ImportConfig) -> Result<(), String> { +async fn run(config: ImportConfig) -> Result<(), String> { let ImportConfig { validators_file_path, keystore_file_path, diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs index e3deb0b21af..a0a1c5fb400 100644 --- a/validator_manager/src/list_validators.rs +++ b/validator_manager/src/list_validators.rs @@ -58,7 +58,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: ListConfig) -> Result, String> { +async fn run(config: ListConfig) -> Result, String> { let ListConfig { vc_url, vc_token_path, diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 4d0820f5a8b..abac0716738 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -277,7 +277,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: MoveConfig) -> Result<(), String> { +async fn run(config: MoveConfig) -> Result<(), String> { let MoveConfig { src_vc_url, src_vc_token_path, From a244aa3a6971572c65dd1c68c726547a1d38c033 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Fri, 10 Jan 2025 08:13:32 +0700 Subject: [PATCH 02/26] Add libssl install to udeps task (#6777) * Add libssl install to udeps task * Use HTTPS --- .github/workflows/test-suite.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 45f3b757e74..0ee9dbb622f 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -392,6 +392,10 @@ jobs: cache: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Fetch libssl1.1 + run: wget https://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb + - name: Install libssl1.1 + run: sudo dpkg -i libssl1.1_1.1.1f-1ubuntu2_amd64.deb - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config From 722573f7ed102bda56f2778984989151980a50ff Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 10 Jan 2025 13:25:20 +0800 Subject: [PATCH 03/26] Use oldest_block_slot to break off pruning payloads (#6745) * Use oldest_block_slot to break of pruning payloads * Update beacon_node/store/src/hot_cold_store.rs Co-authored-by: Michael Sproul * Merge remote-tracking branch 'origin/unstable' into anchor_slot_pruning --- beacon_node/store/src/hot_cold_store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index da3e6d4ebcb..c6148e53144 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2629,7 +2629,7 @@ impl, Cold: ItemStore> HotColdDB "Pruning finalized payloads"; "info" => "you may notice degraded I/O performance while this runs" ); - let anchor_slot = self.get_anchor_info().anchor_slot; + let anchor_info = self.get_anchor_info(); let mut ops = vec![]; let mut last_pruned_block_root = None; @@ -2670,10 +2670,10 @@ impl, Cold: ItemStore> HotColdDB ops.push(StoreOp::DeleteExecutionPayload(block_root)); } - if slot == anchor_slot { + if slot <= anchor_info.oldest_block_slot { info!( self.log, - "Payload pruning reached anchor state"; + "Payload pruning reached anchor oldest block slot"; "slot" => slot ); break; From ecdf2d891fb78d84e500ca8cf3a9deb9706263cf Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 10 Jan 2025 09:25:23 +0400 Subject: [PATCH 04/26] Add Fulu boilerplate (#6695) * Add Fulu boilerplate * Add more boilerplate * Change fulu_time to osaka_time * Merge branch 'unstable' into fulu-boilerplate * Fix tests * Merge branch 'unstable' into fulu-boilerplate * More test fixes * Apply suggestions * Remove `get_payload` boilerplate * Add lightclient fulu types and fix beacon-chain-tests * Disable Fulu in ef-tests * Reduce boilerplate for future forks * Small fixes * One more fix * Apply suggestions * Merge branch 'unstable' into fulu-boilerplate * Fix lints --- Makefile | 2 +- .../beacon_chain/src/attestation_rewards.rs | 11 +- .../beacon_chain/src/beacon_block_streamer.rs | 9 +- beacon_node/beacon_chain/src/beacon_chain.rs | 72 ++++++-- .../beacon_chain/src/execution_payload.rs | 20 +-- .../beacon_chain/src/fulu_readiness.rs | 114 ++++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 52 ++++-- .../beacon_chain/tests/block_verification.rs | 9 + beacon_node/beacon_chain/tests/store_tests.rs | 1 + .../beacon_chain/tests/validator_monitor.rs | 1 + beacon_node/client/src/notifier.rs | 58 ++++++ beacon_node/execution_layer/src/engine_api.rs | 60 ++++++- .../execution_layer/src/engine_api/http.rs | 90 ++++++++-- .../src/engine_api/json_structures.rs | 87 ++++++++- .../src/engine_api/new_payload_request.rs | 29 ++- beacon_node/execution_layer/src/lib.rs | 13 +- .../test_utils/execution_block_generator.rs | 49 ++++- .../src/test_utils/handle_rpc.rs | 99 ++++++++++- .../src/test_utils/mock_builder.rs | 54 +++++- .../src/test_utils/mock_execution_layer.rs | 3 + .../execution_layer/src/test_utils/mod.rs | 9 + .../tests/broadcast_validation_tests.rs | 36 ++-- beacon_node/http_api/tests/tests.rs | 4 +- .../lighthouse_network/src/rpc/codec.rs | 106 ++++++++--- .../lighthouse_network/src/rpc/protocol.rs | 38 +++- .../lighthouse_network/src/types/pubsub.rs | 9 +- .../lighthouse_network/src/types/topics.rs | 1 + .../lighthouse_network/tests/common.rs | 3 + beacon_node/network/src/sync/tests/lookups.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 71 +++----- beacon_node/src/lib.rs | 1 + .../store/src/impls/execution_payload.rs | 23 ++- beacon_node/store/src/partial_beacon_state.rs | 66 +++++-- common/eth2/src/types.rs | 33 ++-- .../chiado/config.yaml | 9 +- .../gnosis/config.yaml | 9 +- .../holesky/config.yaml | 7 +- .../mainnet/config.yaml | 20 +-- .../sepolia/config.yaml | 2 +- consensus/fork_choice/src/fork_choice.rs | 14 +- consensus/fork_choice/tests/tests.rs | 2 +- .../common/get_attestation_participation.rs | 21 +-- .../src/common/slash_validator.rs | 13 +- consensus/state_processing/src/genesis.rs | 20 ++- .../src/per_block_processing.rs | 119 +++++++------ .../process_operations.rs | 39 ++-- .../per_block_processing/signature_sets.rs | 22 ++- .../verify_attestation.rs | 23 +-- .../src/per_epoch_processing.rs | 11 +- .../src/per_slot_processing.rs | 7 +- consensus/state_processing/src/upgrade.rs | 2 + .../state_processing/src/upgrade/fulu.rs | 94 ++++++++++ consensus/types/presets/gnosis/fulu.yaml | 3 + consensus/types/presets/mainnet/fulu.yaml | 3 + consensus/types/presets/minimal/fulu.yaml | 3 + consensus/types/src/beacon_block.rs | 163 ++++++++++++++++- consensus/types/src/beacon_block_body.rs | 124 ++++++++++++- consensus/types/src/beacon_state.rs | 167 ++++++++++++++---- .../progressive_balances_cache.rs | 9 +- consensus/types/src/builder_bid.rs | 12 +- consensus/types/src/chain_spec.rs | 74 ++++++-- consensus/types/src/config_and_preset.rs | 37 ++-- consensus/types/src/execution_payload.rs | 24 ++- .../types/src/execution_payload_header.rs | 91 ++++++++-- consensus/types/src/fork_context.rs | 7 + consensus/types/src/fork_name.rs | 32 +++- consensus/types/src/lib.rs | 36 ++-- consensus/types/src/light_client_bootstrap.rs | 27 ++- .../types/src/light_client_finality_update.rs | 33 +++- consensus/types/src/light_client_header.rs | 60 ++++++- .../src/light_client_optimistic_update.rs | 23 ++- consensus/types/src/light_client_update.rs | 47 ++++- consensus/types/src/payload.rs | 44 ++++- consensus/types/src/preset.rs | 18 ++ consensus/types/src/signed_beacon_block.rs | 75 +++++++- lcli/src/mock_el.rs | 2 + testing/ef_tests/src/cases/common.rs | 1 + .../ef_tests/src/cases/epoch_processing.rs | 129 ++++++-------- testing/ef_tests/src/cases/fork.rs | 3 +- .../src/cases/merkle_proof_validity.rs | 8 +- testing/ef_tests/src/cases/operations.rs | 49 +++-- testing/ef_tests/src/cases/transition.rs | 8 + testing/ef_tests/src/handler.rs | 10 +- testing/simulator/src/basic_sim.rs | 4 +- testing/simulator/src/fallback_sim.rs | 4 +- testing/simulator/src/local_network.rs | 5 + .../beacon_node_fallback/src/lib.rs | 8 + validator_client/http_api/src/test_utils.rs | 4 +- validator_client/http_api/src/tests.rs | 4 +- .../signing_method/src/web3signer.rs | 6 + 91 files changed, 2359 insertions(+), 668 deletions(-) create mode 100644 beacon_node/beacon_chain/src/fulu_readiness.rs create mode 100644 consensus/state_processing/src/upgrade/fulu.rs create mode 100644 consensus/types/presets/gnosis/fulu.yaml create mode 100644 consensus/types/presets/mainnet/fulu.yaml create mode 100644 consensus/types/presets/minimal/fulu.yaml diff --git a/Makefile b/Makefile index 8faf8a2e54b..4d95f50c5ce 100644 --- a/Makefile +++ b/Makefile @@ -30,7 +30,7 @@ PROFILE ?= release # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair bellatrix capella deneb electra +FORKS=phase0 altair bellatrix capella deneb electra fulu # Extra flags for Cargo CARGO_INSTALL_EXTRA_FLAGS?= diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 87b7384ea68..3b37b09e402 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -51,13 +51,10 @@ impl BeaconChain { .get_state(&state_root, Some(state_slot))? .ok_or(BeaconChainError::MissingBeaconState(state_root))?; - match state { - BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => self.compute_attestation_rewards_altair(state, validators), + if state.fork_name_unchecked().altair_enabled() { + self.compute_attestation_rewards_altair(state, validators) + } else { + self.compute_attestation_rewards_base(state, validators) } } diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index b76dba88fd0..32ec7768680 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -15,7 +15,7 @@ use types::{ }; use types::{ ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadElectra, - ExecutionPayloadHeader, + ExecutionPayloadFulu, ExecutionPayloadHeader, }; #[derive(PartialEq)] @@ -99,6 +99,7 @@ fn reconstruct_default_header_block( ForkName::Capella => ExecutionPayloadCapella::default().into(), ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Electra => ExecutionPayloadElectra::default().into(), + ForkName::Fulu => ExecutionPayloadFulu::default().into(), ForkName::Base | ForkName::Altair => { return Err(Error::PayloadReconstruction(format!( "Block with fork variant {} has execution payload", @@ -742,13 +743,14 @@ mod tests { } #[tokio::test] - async fn check_all_blocks_from_altair_to_electra() { + async fn check_all_blocks_from_altair_to_fulu() { let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; - let num_epochs = 10; + let num_epochs = 12; let bellatrix_fork_epoch = 2usize; let capella_fork_epoch = 4usize; let deneb_fork_epoch = 6usize; let electra_fork_epoch = 8usize; + let fulu_fork_epoch = 10usize; let num_blocks_produced = num_epochs * slots_per_epoch; let mut spec = test_spec::(); @@ -757,6 +759,7 @@ mod tests { spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); spec.electra_fork_epoch = Some(Epoch::new(electra_fork_epoch as u64)); + spec.fulu_fork_epoch = Some(Epoch::new(fulu_fork_epoch as u64)); let spec = Arc::new(spec); let harness = get_harness(VALIDATOR_COUNT, spec.clone()); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7bbb9ff74d6..d84cd9615aa 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5317,23 +5317,19 @@ impl BeaconChain { // If required, start the process of loading an execution payload from the EL early. This // allows it to run concurrently with things like attestation packing. - let prepare_payload_handle = match &state { - BeaconState::Base(_) | BeaconState::Altair(_) => None, - BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => { - let prepare_payload_handle = get_execution_payload( - self.clone(), - &state, - parent_root, - proposer_index, - builder_params, - builder_boost_factor, - block_production_version, - )?; - Some(prepare_payload_handle) - } + let prepare_payload_handle = if state.fork_name_unchecked().bellatrix_enabled() { + let prepare_payload_handle = get_execution_payload( + self.clone(), + &state, + parent_root, + proposer_index, + builder_params, + builder_boost_factor, + block_production_version, + )?; + Some(prepare_payload_handle) + } else { + None }; let (mut proposer_slashings, mut attester_slashings, mut voluntary_exits) = @@ -5751,6 +5747,48 @@ impl BeaconChain { execution_payload_value, ) } + BeaconState::Fulu(_) => { + let ( + payload, + kzg_commitments, + maybe_blobs_and_proofs, + maybe_requests, + execution_payload_value, + ) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); + + ( + BeaconBlock::Fulu(BeaconBlockFulu { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings_electra.into(), + attestations: attestations_electra.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: payload + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + bls_to_execution_changes: bls_to_execution_changes.into(), + blob_kzg_commitments: kzg_commitments + .ok_or(BlockProductionError::InvalidPayloadFork)?, + execution_requests: maybe_requests + .ok_or(BlockProductionError::MissingExecutionRequests)?, + }, + }), + maybe_blobs_and_proofs, + execution_payload_value, + ) + } }; let block = SignedBeaconBlock::from_block( diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 502a7918a11..720f98e2986 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -374,19 +374,15 @@ pub fn get_execution_payload( let latest_execution_payload_header = state.latest_execution_payload_header()?; let latest_execution_payload_header_block_hash = latest_execution_payload_header.block_hash(); let latest_execution_payload_header_gas_limit = latest_execution_payload_header.gas_limit(); - let withdrawals = match state { - &BeaconState::Capella(_) | &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { - Some(get_expected_withdrawals(state, spec)?.0.into()) - } - &BeaconState::Bellatrix(_) => None, - // These shouldn't happen but they're here to make the pattern irrefutable - &BeaconState::Base(_) | &BeaconState::Altair(_) => None, + let withdrawals = if state.fork_name_unchecked().capella_enabled() { + Some(get_expected_withdrawals(state, spec)?.0.into()) + } else { + None }; - let parent_beacon_block_root = match state { - BeaconState::Deneb(_) | BeaconState::Electra(_) => Some(parent_block_root), - BeaconState::Bellatrix(_) | BeaconState::Capella(_) => None, - // These shouldn't happen but they're here to make the pattern irrefutable - BeaconState::Base(_) | BeaconState::Altair(_) => None, + let parent_beacon_block_root = if state.fork_name_unchecked().deneb_enabled() { + Some(parent_block_root) + } else { + None }; // Spawn a task to obtain the execution payload from the EL via a series of async calls. The diff --git a/beacon_node/beacon_chain/src/fulu_readiness.rs b/beacon_node/beacon_chain/src/fulu_readiness.rs new file mode 100644 index 00000000000..71494623f83 --- /dev/null +++ b/beacon_node/beacon_chain/src/fulu_readiness.rs @@ -0,0 +1,114 @@ +//! Provides tools for checking if a node is ready for the Fulu upgrade. + +use crate::{BeaconChain, BeaconChainTypes}; +use execution_layer::http::{ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V5}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::time::Duration; +use types::*; + +/// The time before the Fulu fork when we will start issuing warnings about preparation. +use super::bellatrix_readiness::SECONDS_IN_A_WEEK; +pub const FULU_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; +pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type")] +pub enum FuluReadiness { + /// The execution engine is fulu-enabled (as far as we can tell) + Ready, + /// We are connected to an execution engine which doesn't support the V5 engine api methods + V5MethodsNotSupported { error: String }, + /// The transition configuration with the EL failed, there might be a problem with + /// connectivity, authentication or a difference in configuration. + ExchangeCapabilitiesFailed { error: String }, + /// The user has not configured an execution endpoint + NoExecutionEndpoint, +} + +impl fmt::Display for FuluReadiness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + FuluReadiness::Ready => { + write!(f, "This node appears ready for Fulu.") + } + FuluReadiness::ExchangeCapabilitiesFailed { error } => write!( + f, + "Could not exchange capabilities with the \ + execution endpoint: {}", + error + ), + FuluReadiness::NoExecutionEndpoint => write!( + f, + "The --execution-endpoint flag is not specified, this is a \ + requirement post-merge" + ), + FuluReadiness::V5MethodsNotSupported { error } => write!( + f, + "Execution endpoint does not support Fulu methods: {}", + error + ), + } + } +} + +impl BeaconChain { + /// Returns `true` if fulu epoch is set and Fulu fork has occurred or will + /// occur within `FULU_READINESS_PREPARATION_SECONDS` + pub fn is_time_to_prepare_for_fulu(&self, current_slot: Slot) -> bool { + if let Some(fulu_epoch) = self.spec.fulu_fork_epoch { + let fulu_slot = fulu_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let fulu_readiness_preparation_slots = + FULU_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + // Return `true` if Fulu has happened or is within the preparation time. + current_slot + fulu_readiness_preparation_slots > fulu_slot + } else { + // The Fulu fork epoch has not been defined yet, no need to prepare. + false + } + } + + /// Attempts to connect to the EL and confirm that it is ready for fulu. + pub async fn check_fulu_readiness(&self) -> FuluReadiness { + if let Some(el) = self.execution_layer.as_ref() { + match el + .get_engine_capabilities(Some(Duration::from_secs( + ENGINE_CAPABILITIES_REFRESH_INTERVAL, + ))) + .await + { + Err(e) => { + // The EL was either unreachable or responded with an error + FuluReadiness::ExchangeCapabilitiesFailed { + error: format!("{:?}", e), + } + } + Ok(capabilities) => { + let mut missing_methods = String::from("Required Methods Unsupported:"); + let mut all_good = true; + if !capabilities.get_payload_v5 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V5); + all_good = false; + } + if !capabilities.new_payload_v5 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_NEW_PAYLOAD_V5); + all_good = false; + } + + if all_good { + FuluReadiness::Ready + } else { + FuluReadiness::V5MethodsNotSupported { + error: missing_methods, + } + } + } + } + } else { + FuluReadiness::NoExecutionEndpoint + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index d9728b9fd41..4783945eb15 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -31,6 +31,7 @@ pub mod execution_payload; pub mod fetch_blobs; pub mod fork_choice_signal; pub mod fork_revert; +pub mod fulu_readiness; pub mod graffiti_calculator; mod head_tracker; pub mod historical_blocks; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 093ee0c44b4..d37398e4e02 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -501,6 +501,9 @@ where spec.electra_fork_epoch.map(|epoch| { genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); + mock.server.execution_block_generator().osaka_time = spec.fulu_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); self } @@ -623,6 +626,9 @@ pub fn mock_execution_layer_from_parts( let prague_time = spec.electra_fork_epoch.map(|epoch| { HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); + let osaka_time = spec.fulu_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); let kzg = get_kzg(spec); @@ -632,6 +638,7 @@ pub fn mock_execution_layer_from_parts( shanghai_time, cancun_time, prague_time, + osaka_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec.clone(), Some(kzg), @@ -913,15 +920,12 @@ where &self.spec, )); - let block_contents: SignedBlockContentsTuple = match *signed_block { - SignedBeaconBlock::Base(_) - | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Bellatrix(_) - | SignedBeaconBlock::Capella(_) => (signed_block, None), - SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => { + let block_contents: SignedBlockContentsTuple = + if signed_block.fork_name_unchecked().deneb_enabled() { (signed_block, block_response.blob_items) - } - }; + } else { + (signed_block, None) + }; (block_contents, block_response.state) } @@ -977,15 +981,12 @@ where &self.spec, )); - let block_contents: SignedBlockContentsTuple = match *signed_block { - SignedBeaconBlock::Base(_) - | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Bellatrix(_) - | SignedBeaconBlock::Capella(_) => (signed_block, None), - SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => { + let block_contents: SignedBlockContentsTuple = + if signed_block.fork_name_unchecked().deneb_enabled() { (signed_block, block_response.blob_items) - } - }; + } else { + (signed_block, None) + }; (block_contents, pre_state) } @@ -2863,6 +2864,25 @@ pub fn generate_rand_block_and_blobs( message.body.blob_kzg_commitments = bundle.commitments.clone(); bundle } + SignedBeaconBlock::Fulu(SignedBeaconBlockFulu { + ref mut message, .. + }) => { + // Get either zero blobs or a random number of blobs between 1 and Max Blobs. + let payload: &mut FullPayloadFulu = &mut message.body.execution_payload; + let num_blobs = match num_blobs { + NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Number(n) => n, + NumBlobs::None => 0, + }; + let (bundle, transactions) = + execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); + payload.execution_payload.transactions = <_>::default(); + for tx in Vec::from(transactions) { + payload.execution_payload.transactions.push(tx).unwrap(); + } + message.body.blob_kzg_commitments = bundle.commitments.clone(); + bundle + } _ => return (block, blob_sidecars), }; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index f094a173eec..103734b2247 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -754,6 +754,11 @@ async fn invalid_signature_attester_slashing() { .push(attester_slashing.as_electra().unwrap().clone()) .expect("should update attester slashing"); } + BeaconBlockBodyRefMut::Fulu(ref mut blk) => { + blk.attester_slashings + .push(attester_slashing.as_electra().unwrap().clone()) + .expect("should update attester slashing"); + } } snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); @@ -809,6 +814,10 @@ async fn invalid_signature_attestation() { .attestations .get_mut(0) .map(|att| att.signature = junk_aggregate_signature()), + BeaconBlockBodyRefMut::Fulu(ref mut blk) => blk + .attestations + .get_mut(0) + .map(|att| att.signature = junk_aggregate_signature()), }; if block.body().attestations_len() > 0 { diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index e1258ccdea7..ed97b8d6345 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -147,6 +147,7 @@ async fn light_client_bootstrap_test() { LightClientBootstrap::Capella(lc_bootstrap) => lc_bootstrap.header.beacon.slot, LightClientBootstrap::Deneb(lc_bootstrap) => lc_bootstrap.header.beacon.slot, LightClientBootstrap::Electra(lc_bootstrap) => lc_bootstrap.header.beacon.slot, + LightClientBootstrap::Fulu(lc_bootstrap) => lc_bootstrap.header.beacon.slot, }; assert_eq!( diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index b4a54d26676..91de4fe2702 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -214,6 +214,7 @@ async fn produces_missed_blocks() { ForkName::Capella => 11, ForkName::Deneb => 3, ForkName::Electra => 1, + ForkName::Fulu => 6, }; let harness2 = get_harness(validator_count, vec![validator_index_to_monitor]); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index e88803e94f3..0c3b1578d6e 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -4,6 +4,7 @@ use beacon_chain::{ capella_readiness::CapellaReadiness, deneb_readiness::DenebReadiness, electra_readiness::ElectraReadiness, + fulu_readiness::FuluReadiness, BeaconChain, BeaconChainTypes, ExecutionStatus, }; use lighthouse_network::{types::SyncState, NetworkGlobals}; @@ -315,6 +316,7 @@ pub fn spawn_notifier( capella_readiness_logging(current_slot, &beacon_chain, &log).await; deneb_readiness_logging(current_slot, &beacon_chain, &log).await; electra_readiness_logging(current_slot, &beacon_chain, &log).await; + fulu_readiness_logging(current_slot, &beacon_chain, &log).await; } }; @@ -586,6 +588,62 @@ async fn electra_readiness_logging( } } +/// Provides some helpful logging to users to indicate if their node is ready for Fulu. +async fn fulu_readiness_logging( + current_slot: Slot, + beacon_chain: &BeaconChain, + log: &Logger, +) { + let fulu_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_state + .fork_name_unchecked() + .fulu_enabled(); + + let has_execution_layer = beacon_chain.execution_layer.is_some(); + + if fulu_completed && has_execution_layer + || !beacon_chain.is_time_to_prepare_for_fulu(current_slot) + { + return; + } + + if fulu_completed && !has_execution_layer { + error!( + log, + "Execution endpoint required"; + "info" => "you need a Fulu enabled execution engine to validate blocks." + ); + return; + } + + match beacon_chain.check_fulu_readiness().await { + FuluReadiness::Ready => { + info!( + log, + "Ready for Fulu"; + "info" => "ensure the execution endpoint is updated to the latest Fulu release" + ) + } + readiness @ FuluReadiness::ExchangeCapabilitiesFailed { error: _ } => { + error!( + log, + "Not ready for Fulu"; + "hint" => "the execution endpoint may be offline", + "info" => %readiness, + ) + } + readiness => warn!( + log, + "Not ready for Fulu"; + "hint" => "try updating the execution endpoint", + "info" => %readiness, + ), + } +} + async fn genesis_execution_payload_logging( beacon_chain: &BeaconChain, log: &Logger, diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 083aaf2e258..b9d878b1f86 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -3,8 +3,8 @@ use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_BLOBS_V1, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, - ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, - ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, + ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, + ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, }; use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, @@ -24,7 +24,7 @@ pub use types::{ }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionRequests, KzgProofs, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionRequests, KzgProofs, }; use types::{Graffiti, GRAFFITI_BYTES_LEN}; @@ -35,7 +35,7 @@ mod new_payload_request; pub use new_payload_request::{ NewPayloadRequest, NewPayloadRequestBellatrix, NewPayloadRequestCapella, - NewPayloadRequestDeneb, NewPayloadRequestElectra, + NewPayloadRequestDeneb, NewPayloadRequestElectra, NewPayloadRequestFulu, }; pub const LATEST_TAG: &str = "latest"; @@ -261,7 +261,7 @@ pub struct ProposeBlindedBlockResponse { } #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes(derive(Clone, Debug, PartialEq),), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), @@ -281,12 +281,14 @@ pub struct GetPayloadResponse { pub execution_payload: ExecutionPayloadDeneb, #[superstruct(only(Electra), partial_getter(rename = "execution_payload_electra"))] pub execution_payload: ExecutionPayloadElectra, + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] + pub execution_payload: ExecutionPayloadFulu, pub block_value: Uint256, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Deneb, Electra, Fulu))] pub blobs_bundle: BlobsBundle, - #[superstruct(only(Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] pub should_override_builder: bool, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub requests: ExecutionRequests, } @@ -354,6 +356,12 @@ impl From> Some(inner.blobs_bundle), Some(inner.requests), ), + GetPayloadResponse::Fulu(inner) => ( + ExecutionPayload::Fulu(inner.execution_payload), + inner.block_value, + Some(inner.blobs_bundle), + Some(inner.requests), + ), } } } @@ -487,6 +495,34 @@ impl ExecutionPayloadBodyV1 { )) } } + ExecutionPayloadHeader::Fulu(header) => { + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Fulu(ExecutionPayloadFulu { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) + } else { + Err(format!( + "block {} is post capella but payload body doesn't have withdrawals", + header.block_hash + )) + } + } } } } @@ -497,6 +533,7 @@ pub struct EngineCapabilities { pub new_payload_v2: bool, pub new_payload_v3: bool, pub new_payload_v4: bool, + pub new_payload_v5: bool, pub forkchoice_updated_v1: bool, pub forkchoice_updated_v2: bool, pub forkchoice_updated_v3: bool, @@ -506,6 +543,7 @@ pub struct EngineCapabilities { pub get_payload_v2: bool, pub get_payload_v3: bool, pub get_payload_v4: bool, + pub get_payload_v5: bool, pub get_client_version_v1: bool, pub get_blobs_v1: bool, } @@ -525,6 +563,9 @@ impl EngineCapabilities { if self.new_payload_v4 { response.push(ENGINE_NEW_PAYLOAD_V4); } + if self.new_payload_v5 { + response.push(ENGINE_NEW_PAYLOAD_V5); + } if self.forkchoice_updated_v1 { response.push(ENGINE_FORKCHOICE_UPDATED_V1); } @@ -552,6 +593,9 @@ impl EngineCapabilities { if self.get_payload_v4 { response.push(ENGINE_GET_PAYLOAD_V4); } + if self.get_payload_v5 { + response.push(ENGINE_GET_PAYLOAD_V5); + } if self.get_client_version_v1 { response.push(ENGINE_GET_CLIENT_VERSION_V1); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index e2a81c072c4..1fd9f81d46f 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -35,12 +35,14 @@ pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2"; pub const ENGINE_NEW_PAYLOAD_V3: &str = "engine_newPayloadV3"; pub const ENGINE_NEW_PAYLOAD_V4: &str = "engine_newPayloadV4"; +pub const ENGINE_NEW_PAYLOAD_V5: &str = "engine_newPayloadV5"; pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; pub const ENGINE_GET_PAYLOAD_V3: &str = "engine_getPayloadV3"; pub const ENGINE_GET_PAYLOAD_V4: &str = "engine_getPayloadV4"; +pub const ENGINE_GET_PAYLOAD_V5: &str = "engine_getPayloadV5"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; @@ -72,10 +74,12 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, + ENGINE_NEW_PAYLOAD_V5, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, + ENGINE_GET_PAYLOAD_V5, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, @@ -825,6 +829,30 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn new_payload_v5_fulu( + &self, + new_payload_request_fulu: NewPayloadRequestFulu<'_, E>, + ) -> Result { + let params = json!([ + JsonExecutionPayload::V5(new_payload_request_fulu.execution_payload.clone().into()), + new_payload_request_fulu.versioned_hashes, + new_payload_request_fulu.parent_beacon_block_root, + new_payload_request_fulu + .execution_requests + .get_execution_requests_list(), + ]); + + let response: JsonPayloadStatusV1 = self + .rpc_request( + ENGINE_NEW_PAYLOAD_V5, + params, + ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_v1( &self, payload_id: PayloadId, @@ -880,9 +908,10 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } - ForkName::Base | ForkName::Altair | ForkName::Deneb | ForkName::Electra => Err( - Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)), - ), + _ => Err(Error::UnsupportedForkVariant(format!( + "called get_payload_v2 with {}", + fork_name + ))), } } @@ -906,11 +935,7 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Electra => Err(Error::UnsupportedForkVariant(format!( + _ => Err(Error::UnsupportedForkVariant(format!( "called get_payload_v3 with {}", fork_name ))), @@ -937,17 +962,40 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb => Err(Error::UnsupportedForkVariant(format!( + _ => Err(Error::UnsupportedForkVariant(format!( "called get_payload_v4 with {}", fork_name ))), } } + pub async fn get_payload_v5( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + match fork_name { + ForkName::Fulu => { + let response: JsonGetPayloadResponseV5 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V5, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + JsonGetPayloadResponse::V5(response) + .try_into() + .map_err(Error::BadResponse) + } + _ => Err(Error::UnsupportedForkVariant(format!( + "called get_payload_v5 with {}", + fork_name + ))), + } + } + pub async fn forkchoice_updated_v1( &self, forkchoice_state: ForkchoiceState, @@ -1071,6 +1119,7 @@ impl HttpJsonRpc { new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2), new_payload_v3: capabilities.contains(ENGINE_NEW_PAYLOAD_V3), new_payload_v4: capabilities.contains(ENGINE_NEW_PAYLOAD_V4), + new_payload_v5: capabilities.contains(ENGINE_NEW_PAYLOAD_V5), forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), forkchoice_updated_v3: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V3), @@ -1082,6 +1131,7 @@ impl HttpJsonRpc { get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), get_payload_v4: capabilities.contains(ENGINE_GET_PAYLOAD_V4), + get_payload_v5: capabilities.contains(ENGINE_GET_PAYLOAD_V5), get_client_version_v1: capabilities.contains(ENGINE_GET_CLIENT_VERSION_V1), get_blobs_v1: capabilities.contains(ENGINE_GET_BLOBS_V1), }) @@ -1212,6 +1262,13 @@ impl HttpJsonRpc { Err(Error::RequiredMethodUnsupported("engine_newPayloadV4")) } } + NewPayloadRequest::Fulu(new_payload_request_fulu) => { + if engine_capabilities.new_payload_v5 { + self.new_payload_v5_fulu(new_payload_request_fulu).await + } else { + Err(Error::RequiredMethodUnsupported("engine_newPayloadV5")) + } + } } } @@ -1247,6 +1304,13 @@ impl HttpJsonRpc { Err(Error::RequiredMethodUnsupported("engine_getPayloadv4")) } } + ForkName::Fulu => { + if engine_capabilities.get_payload_v5 { + self.get_payload_v5(fork_name, payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayloadv5")) + } + } ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!( "called get_payload with {}", fork_name diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 1c6639804e3..86acaaaf3bd 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -65,7 +65,7 @@ pub struct JsonPayloadIdResponse { } #[superstruct( - variants(V1, V2, V3, V4), + variants(V1, V2, V3, V4, V5), variant_attributes( derive(Debug, PartialEq, Default, Serialize, Deserialize,), serde(bound = "E: EthSpec", rename_all = "camelCase"), @@ -100,12 +100,12 @@ pub struct JsonExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, - #[superstruct(only(V2, V3, V4))] + #[superstruct(only(V2, V3, V4, V5))] pub withdrawals: VariableList, - #[superstruct(only(V3, V4))] + #[superstruct(only(V3, V4, V5))] #[serde(with = "serde_utils::u64_hex_be")] pub blob_gas_used: u64, - #[superstruct(only(V3, V4))] + #[superstruct(only(V3, V4, V5))] #[serde(with = "serde_utils::u64_hex_be")] pub excess_blob_gas: u64, } @@ -214,6 +214,35 @@ impl From> for JsonExecutionPayloadV4 } } +impl From> for JsonExecutionPayloadV5 { + fn from(payload: ExecutionPayloadFulu) -> Self { + JsonExecutionPayloadV5 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} + impl From> for JsonExecutionPayload { fn from(execution_payload: ExecutionPayload) -> Self { match execution_payload { @@ -221,6 +250,7 @@ impl From> for JsonExecutionPayload { ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()), ExecutionPayload::Deneb(payload) => JsonExecutionPayload::V3(payload.into()), ExecutionPayload::Electra(payload) => JsonExecutionPayload::V4(payload.into()), + ExecutionPayload::Fulu(payload) => JsonExecutionPayload::V5(payload.into()), } } } @@ -330,6 +360,35 @@ impl From> for ExecutionPayloadElectra } } +impl From> for ExecutionPayloadFulu { + fn from(payload: JsonExecutionPayloadV5) -> Self { + ExecutionPayloadFulu { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} + impl From> for ExecutionPayload { fn from(json_execution_payload: JsonExecutionPayload) -> Self { match json_execution_payload { @@ -337,6 +396,7 @@ impl From> for ExecutionPayload { JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()), JsonExecutionPayload::V3(payload) => ExecutionPayload::Deneb(payload.into()), JsonExecutionPayload::V4(payload) => ExecutionPayload::Electra(payload.into()), + JsonExecutionPayload::V5(payload) => ExecutionPayload::Fulu(payload.into()), } } } @@ -389,7 +449,7 @@ impl TryFrom for ExecutionRequests { } #[superstruct( - variants(V1, V2, V3, V4), + variants(V1, V2, V3, V4, V5), variant_attributes( derive(Debug, PartialEq, Serialize, Deserialize), serde(bound = "E: EthSpec", rename_all = "camelCase") @@ -408,13 +468,15 @@ pub struct JsonGetPayloadResponse { pub execution_payload: JsonExecutionPayloadV3, #[superstruct(only(V4), partial_getter(rename = "execution_payload_v4"))] pub execution_payload: JsonExecutionPayloadV4, + #[superstruct(only(V5), partial_getter(rename = "execution_payload_v5"))] + pub execution_payload: JsonExecutionPayloadV5, #[serde(with = "serde_utils::u256_hex_be")] pub block_value: Uint256, - #[superstruct(only(V3, V4))] + #[superstruct(only(V3, V4, V5))] pub blobs_bundle: JsonBlobsBundleV1, - #[superstruct(only(V3, V4))] + #[superstruct(only(V3, V4, V5))] pub should_override_builder: bool, - #[superstruct(only(V4))] + #[superstruct(only(V4, V5))] pub execution_requests: JsonExecutionRequests, } @@ -451,6 +513,15 @@ impl TryFrom> for GetPayloadResponse { requests: response.execution_requests.try_into()?, })) } + JsonGetPayloadResponse::V5(response) => { + Ok(GetPayloadResponse::Fulu(GetPayloadResponseFulu { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + blobs_bundle: response.blobs_bundle.into(), + should_override_builder: response.should_override_builder, + requests: response.execution_requests.try_into()?, + })) + } } } } diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 60bc8489744..a86b2fd9bbf 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -9,11 +9,11 @@ use types::{ }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionRequests, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionRequests, }; #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes(derive(Clone, Debug, PartialEq),), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), @@ -39,11 +39,13 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { pub execution_payload: &'block ExecutionPayloadDeneb, #[superstruct(only(Electra), partial_getter(rename = "execution_payload_electra"))] pub execution_payload: &'block ExecutionPayloadElectra, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] + pub execution_payload: &'block ExecutionPayloadFulu, + #[superstruct(only(Deneb, Electra, Fulu))] pub versioned_hashes: Vec, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Deneb, Electra, Fulu))] pub parent_beacon_block_root: Hash256, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub execution_requests: &'block ExecutionRequests, } @@ -54,6 +56,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { Self::Capella(payload) => payload.execution_payload.parent_hash, Self::Deneb(payload) => payload.execution_payload.parent_hash, Self::Electra(payload) => payload.execution_payload.parent_hash, + Self::Fulu(payload) => payload.execution_payload.parent_hash, } } @@ -63,6 +66,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { Self::Capella(payload) => payload.execution_payload.block_hash, Self::Deneb(payload) => payload.execution_payload.block_hash, Self::Electra(payload) => payload.execution_payload.block_hash, + Self::Fulu(payload) => payload.execution_payload.block_hash, } } @@ -72,6 +76,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { Self::Capella(payload) => payload.execution_payload.block_number, Self::Deneb(payload) => payload.execution_payload.block_number, Self::Electra(payload) => payload.execution_payload.block_number, + Self::Fulu(payload) => payload.execution_payload.block_number, } } @@ -81,6 +86,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { Self::Capella(request) => ExecutionPayloadRef::Capella(request.execution_payload), Self::Deneb(request) => ExecutionPayloadRef::Deneb(request.execution_payload), Self::Electra(request) => ExecutionPayloadRef::Electra(request.execution_payload), + Self::Fulu(request) => ExecutionPayloadRef::Fulu(request.execution_payload), } } @@ -92,6 +98,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { Self::Capella(request) => ExecutionPayload::Capella(request.execution_payload.clone()), Self::Deneb(request) => ExecutionPayload::Deneb(request.execution_payload.clone()), Self::Electra(request) => ExecutionPayload::Electra(request.execution_payload.clone()), + Self::Fulu(request) => ExecutionPayload::Fulu(request.execution_payload.clone()), } } @@ -190,6 +197,17 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> parent_beacon_block_root: block_ref.parent_root, execution_requests: &block_ref.body.execution_requests, })), + BeaconBlockRef::Fulu(block_ref) => Ok(Self::Fulu(NewPayloadRequestFulu { + execution_payload: &block_ref.body.execution_payload.execution_payload, + versioned_hashes: block_ref + .body + .blob_kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect(), + parent_beacon_block_root: block_ref.parent_root, + execution_requests: &block_ref.body.execution_requests, + })), } } } @@ -209,6 +227,7 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<' })), ExecutionPayloadRef::Deneb(_) => Err(Self::Error::IncorrectStateVariant), ExecutionPayloadRef::Electra(_) => Err(Self::Error::IncorrectStateVariant), + ExecutionPayloadRef::Fulu(_) => Err(Self::Error::IncorrectStateVariant), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f3b12b21d17..118d7adfcaa 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -54,8 +54,8 @@ use types::{ }; use types::{ BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadBellatrix, - ExecutionPayloadCapella, ExecutionPayloadElectra, FullPayload, ProposerPreparationData, - PublicKeyBytes, Signature, Slot, + ExecutionPayloadCapella, ExecutionPayloadElectra, ExecutionPayloadFulu, FullPayload, + ProposerPreparationData, PublicKeyBytes, Signature, Slot, }; mod block_hash; @@ -124,6 +124,14 @@ impl TryFrom> for ProvenancedPayload BlockProposalContents::PayloadAndBlobs { + payload: ExecutionPayloadHeader::Fulu(builder_bid.header).into(), + block_value: builder_bid.value, + kzg_commitments: builder_bid.blob_kzg_commitments, + blobs_and_proofs: None, + // TODO(fulu): update this with builder api returning the requests + requests: None, + }, }; Ok(ProvenancedPayload::Builder( BlockProposalContentsType::Blinded(block_proposal_contents), @@ -1821,6 +1829,7 @@ impl ExecutionLayer { ForkName::Capella => ExecutionPayloadCapella::default().into(), ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Electra => ExecutionPayloadElectra::default().into(), + ForkName::Fulu => ExecutionPayloadFulu::default().into(), ForkName::Base | ForkName::Altair => { return Err(Error::InvalidForkForPayload); } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 4fab7150ce3..2a39796707b 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -19,7 +19,7 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, - ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, + ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, Transaction, Transactions, Uint256, }; @@ -147,6 +147,7 @@ pub struct ExecutionBlockGenerator { pub shanghai_time: Option, // capella pub cancun_time: Option, // deneb pub prague_time: Option, // electra + pub osaka_time: Option, // fulu /* * deneb stuff */ @@ -162,6 +163,7 @@ fn make_rng() -> Arc> { } impl ExecutionBlockGenerator { + #[allow(clippy::too_many_arguments)] pub fn new( terminal_total_difficulty: Uint256, terminal_block_number: u64, @@ -169,6 +171,7 @@ impl ExecutionBlockGenerator { shanghai_time: Option, cancun_time: Option, prague_time: Option, + osaka_time: Option, kzg: Option>, ) -> Self { let mut gen = Self { @@ -185,6 +188,7 @@ impl ExecutionBlockGenerator { shanghai_time, cancun_time, prague_time, + osaka_time, blobs_bundles: <_>::default(), kzg, rng: make_rng(), @@ -233,13 +237,16 @@ impl ExecutionBlockGenerator { } pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName { - match self.prague_time { - Some(fork_time) if timestamp >= fork_time => ForkName::Electra, - _ => match self.cancun_time { - Some(fork_time) if timestamp >= fork_time => ForkName::Deneb, - _ => match self.shanghai_time { - Some(fork_time) if timestamp >= fork_time => ForkName::Capella, - _ => ForkName::Bellatrix, + match self.osaka_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Fulu, + _ => match self.prague_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Electra, + _ => match self.cancun_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Deneb, + _ => match self.shanghai_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Capella, + _ => ForkName::Bellatrix, + }, }, }, } @@ -664,6 +671,25 @@ impl ExecutionBlockGenerator { blob_gas_used: 0, excess_blob_gas: 0, }), + ForkName::Fulu => ExecutionPayload::Fulu(ExecutionPayloadFulu { + parent_hash: head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: DEFAULT_GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::from(1u64), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.clone().into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), _ => unreachable!(), }, }; @@ -811,6 +837,12 @@ pub fn generate_genesis_header( *header.transactions_root_mut() = empty_transactions_root; Some(header) } + ForkName::Fulu => { + let mut header = ExecutionPayloadHeader::Fulu(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + *header.transactions_root_mut() = empty_transactions_root; + Some(header) + } } } @@ -883,6 +915,7 @@ mod test { None, None, None, + None, ); for i in 0..=TERMINAL_BLOCK { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 9365024ffb7..0babb9d1a3e 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -99,7 +99,8 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 | ENGINE_NEW_PAYLOAD_V3 - | ENGINE_NEW_PAYLOAD_V4 => { + | ENGINE_NEW_PAYLOAD_V4 + | ENGINE_NEW_PAYLOAD_V5 => { let request = match method { ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1( get_param::>(params, 0) @@ -121,6 +122,9 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V4 => get_param::>(params, 0) .map(|jep| JsonExecutionPayload::V4(jep)) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, + ENGINE_NEW_PAYLOAD_V5 => get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V5(jep)) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, _ => unreachable!(), }; @@ -222,6 +226,54 @@ pub async fn handle_rpc( )); } } + ForkName::Fulu => { + if method == ENGINE_NEW_PAYLOAD_V1 + || method == ENGINE_NEW_PAYLOAD_V2 + || method == ENGINE_NEW_PAYLOAD_V3 + || method == ENGINE_NEW_PAYLOAD_V4 + { + return Err(( + format!("{} called after Fulu fork!", method), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V1(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV1` after Fulu fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V2(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV2` after Fulu fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V3(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV3` after Fulu fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V4(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV4` after Fulu fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + } _ => unreachable!(), }; @@ -260,7 +312,8 @@ pub async fn handle_rpc( ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 | ENGINE_GET_PAYLOAD_V3 - | ENGINE_GET_PAYLOAD_V4 => { + | ENGINE_GET_PAYLOAD_V4 + | ENGINE_GET_PAYLOAD_V5 => { let request: JsonPayloadIdRequest = get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let id = request.into(); @@ -320,6 +373,23 @@ pub async fn handle_rpc( )); } + // validate method called correctly according to fulu fork time + if ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(response.timestamp()) + == ForkName::Fulu + && (method == ENGINE_GET_PAYLOAD_V1 + || method == ENGINE_GET_PAYLOAD_V2 + || method == ENGINE_GET_PAYLOAD_V3 + || method == ENGINE_GET_PAYLOAD_V4) + { + return Err(( + format!("{} called after Fulu fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + match method { ENGINE_GET_PAYLOAD_V1 => { Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) @@ -380,6 +450,24 @@ pub async fn handle_rpc( } _ => unreachable!(), }), + ENGINE_GET_PAYLOAD_V5 => Ok(match JsonExecutionPayload::from(response) { + JsonExecutionPayload::V5(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV5 { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V5 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + execution_requests: Default::default(), + }) + .unwrap() + } + _ => unreachable!(), + }), _ => unreachable!(), } } @@ -411,7 +499,10 @@ pub async fn handle_rpc( .map(|opt| opt.map(JsonPayloadAttributes::V1)) .transpose() } - ForkName::Capella | ForkName::Deneb | ForkName::Electra => { + ForkName::Capella + | ForkName::Deneb + | ForkName::Electra + | ForkName::Fulu => { get_param::>(params, 1) .map(|opt| opt.map(JsonPayloadAttributes::V2)) .transpose() @@ -475,7 +566,7 @@ pub async fn handle_rpc( )); } } - ForkName::Deneb | ForkName::Electra => { + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => { if method == ENGINE_FORKCHOICE_UPDATED_V1 { return Err(( format!("{} called after Deneb fork!", method), diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 879b54eb075..65181dcf4f2 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -16,7 +16,7 @@ use tempfile::NamedTempFile; use tree_hash::TreeHash; use types::builder_bid::{ BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, - SignedBuilderBid, + BuilderBidFulu, SignedBuilderBid, }; use types::{ Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, @@ -95,6 +95,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.fee_recipient = fee_recipient; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.fee_recipient = fee_recipient; + } } } @@ -112,6 +115,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.gas_limit = gas_limit; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.gas_limit = gas_limit; + } } } @@ -133,6 +139,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.parent_hash = ExecutionBlockHash::from_root(parent_hash); } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); + } } } @@ -150,6 +159,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.prev_randao = prev_randao; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.prev_randao = prev_randao; + } } } @@ -167,6 +179,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.block_number = block_number; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.block_number = block_number; + } } } @@ -184,6 +199,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.timestamp = timestamp; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.timestamp = timestamp; + } } } @@ -201,6 +219,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.withdrawals_root = withdrawals_root; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.withdrawals_root = withdrawals_root; + } } } @@ -230,6 +251,10 @@ impl BidStuff for BuilderBid { header.extra_data = extra_data; header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } } } } @@ -378,6 +403,9 @@ pub fn serve( SignedBlindedBeaconBlock::Electra(block) => { block.message.body.execution_payload.tree_hash_root() } + SignedBlindedBeaconBlock::Fulu(block) => { + block.message.body.execution_payload.tree_hash_root() + } }; let payload = builder .el @@ -536,7 +564,7 @@ pub fn serve( expected_withdrawals, None, ), - ForkName::Deneb | ForkName::Electra => PayloadAttributes::new( + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => PayloadAttributes::new( timestamp, *prev_randao, fee_recipient, @@ -592,6 +620,17 @@ pub fn serve( ) = payload_response.into(); match fork { + ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { + header: payload + .as_fulu() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { header: payload .as_electra() @@ -644,6 +683,17 @@ pub fn serve( Option>, ) = payload_response.into(); match fork { + ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { + header: payload + .as_fulu() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { header: payload .as_electra() diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index dc90d91c0f2..9df8d9cc5cf 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -28,6 +28,7 @@ impl MockExecutionLayer { None, None, None, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec, None, @@ -41,6 +42,7 @@ impl MockExecutionLayer { shanghai_time: Option, cancun_time: Option, prague_time: Option, + osaka_time: Option, jwt_key: Option, spec: ChainSpec, kzg: Option>, @@ -57,6 +59,7 @@ impl MockExecutionLayer { shanghai_time, cancun_time, prague_time, + osaka_time, kzg, ); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index faf6d4ef0b6..5934c069a2f 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -44,6 +44,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { new_payload_v2: true, new_payload_v3: true, new_payload_v4: true, + new_payload_v5: true, forkchoice_updated_v1: true, forkchoice_updated_v2: true, forkchoice_updated_v3: true, @@ -53,6 +54,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v2: true, get_payload_v3: true, get_payload_v4: true, + get_payload_v5: true, get_client_version_v1: true, get_blobs_v1: true, }; @@ -82,6 +84,7 @@ pub struct MockExecutionConfig { pub shanghai_time: Option, pub cancun_time: Option, pub prague_time: Option, + pub osaka_time: Option, } impl Default for MockExecutionConfig { @@ -95,6 +98,7 @@ impl Default for MockExecutionConfig { shanghai_time: None, cancun_time: None, prague_time: None, + osaka_time: None, } } } @@ -117,6 +121,7 @@ impl MockServer { None, // FIXME(capella): should this be the default? None, // FIXME(deneb): should this be the default? None, // FIXME(electra): should this be the default? + None, // FIXME(fulu): should this be the default? None, ) } @@ -135,6 +140,7 @@ impl MockServer { shanghai_time, cancun_time, prague_time, + osaka_time, } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); @@ -145,6 +151,7 @@ impl MockServer { shanghai_time, cancun_time, prague_time, + osaka_time, kzg, ); @@ -208,6 +215,7 @@ impl MockServer { shanghai_time: Option, cancun_time: Option, prague_time: Option, + osaka_time: Option, kzg: Option>, ) -> Self { Self::new_with_config( @@ -221,6 +229,7 @@ impl MockServer { shanghai_time, cancun_time, prague_time, + osaka_time, }, kzg, ) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index e1ecf2d4fc3..8e0a51a32a2 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -75,7 +75,7 @@ pub async fn gossip_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -124,7 +124,7 @@ pub async fn gossip_partial_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -165,7 +165,7 @@ pub async fn gossip_full_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) @@ -261,7 +261,7 @@ pub async fn consensus_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -307,7 +307,7 @@ pub async fn consensus_gossip() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -423,7 +423,7 @@ pub async fn consensus_full_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) @@ -475,7 +475,7 @@ pub async fn equivocation_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -533,7 +533,7 @@ pub async fn equivocation_consensus_early_equivocation() { /* submit `block_a` as valid */ assert!(tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block_a.clone(), blobs_a), validation_level ) @@ -547,7 +547,7 @@ pub async fn equivocation_consensus_early_equivocation() { /* submit `block_b` which should induce equivocation */ let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block_b.clone(), blobs_b), validation_level, ) @@ -596,7 +596,7 @@ pub async fn equivocation_gossip() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -721,7 +721,7 @@ pub async fn equivocation_full_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) @@ -1413,7 +1413,7 @@ pub async fn block_seen_on_gossip_without_blobs() { // Post the block *and* blobs to the HTTP API. let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), Some(blobs)), validation_level, ) @@ -1498,7 +1498,7 @@ pub async fn block_seen_on_gossip_with_some_blobs() { // Post the block *and* all blobs to the HTTP API. let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), Some(blobs)), validation_level, ) @@ -1571,7 +1571,7 @@ pub async fn blobs_seen_on_gossip_without_block() { // Post the block *and* all blobs to the HTTP API. let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), Some((kzg_proofs, blobs))), validation_level, ) @@ -1645,7 +1645,7 @@ pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { // Post just the block to the HTTP API (blob lists are empty). let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new( block.clone(), Some((Default::default(), Default::default())), @@ -1717,7 +1717,7 @@ pub async fn slashable_blobs_seen_on_gossip_cause_failure() { // Post block A *and* all its blobs to the HTTP API. let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block_a.clone(), Some((kzg_proofs_a, blobs_a))), validation_level, ) @@ -1778,7 +1778,7 @@ pub async fn duplicate_block_status_code() { let block_request = PublishBlockRequest::new(block.clone(), Some((kzg_proofs, blobs))); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block_request, validation_level) + .post_beacon_blocks_v2_ssz(&block_request, validation_level) .await; // This should result in the block being fully imported. @@ -1791,7 +1791,7 @@ pub async fn duplicate_block_status_code() { // Post again. let duplicate_response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block_request, validation_level) + .post_beacon_blocks_v2_ssz(&block_request, validation_level) .await; let err = duplicate_response.unwrap_err(); assert_eq!(err.status().unwrap(), duplicate_block_status_code); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 1efe44a613c..85d3b4e9bae 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2232,9 +2232,9 @@ impl ApiTester { pub async fn test_get_config_spec(self) -> Self { let result = self .client - .get_config_spec::() + .get_config_spec::() .await - .map(|res| ConfigAndPreset::Electra(res.data)) + .map(|res| ConfigAndPreset::Fulu(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec, None); diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 5d86936d41d..c3d20bbfb17 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -20,7 +20,7 @@ use types::{ LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, }; use unsigned_varint::codec::Uvi; @@ -458,6 +458,9 @@ fn context_bytes( return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! + SignedBeaconBlock::Fulu { .. } => { + fork_context.to_context_bytes(ForkName::Fulu) + } SignedBeaconBlock::Electra { .. } => { fork_context.to_context_bytes(ForkName::Electra) } @@ -682,18 +685,18 @@ fn handle_rpc_response( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), SupportedProtocol::BlobsByRangeV1 => match fork_name { - Some(ForkName::Deneb) | Some(ForkName::Electra) => { - Ok(Some(RpcSuccessResponse::BlobsByRange(Arc::new( - BlobSidecar::from_ssz_bytes(decoded_buffer)?, - )))) + Some(fork_name) => { + if fork_name.deneb_enabled() { + Ok(Some(RpcSuccessResponse::BlobsByRange(Arc::new( + BlobSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + "Invalid fork name for blobs by range".to_string(), + )) + } } - Some(ForkName::Base) - | Some(ForkName::Altair) - | Some(ForkName::Bellatrix) - | Some(ForkName::Capella) => Err(RPCError::ErrorResponse( - RpcErrorResponse::InvalidRequest, - "Invalid fork name for blobs by range".to_string(), - )), None => Err(RPCError::ErrorResponse( RpcErrorResponse::InvalidRequest, format!( @@ -703,18 +706,18 @@ fn handle_rpc_response( )), }, SupportedProtocol::BlobsByRootV1 => match fork_name { - Some(ForkName::Deneb) | Some(ForkName::Electra) => { - Ok(Some(RpcSuccessResponse::BlobsByRoot(Arc::new( - BlobSidecar::from_ssz_bytes(decoded_buffer)?, - )))) + Some(fork_name) => { + if fork_name.deneb_enabled() { + Ok(Some(RpcSuccessResponse::BlobsByRoot(Arc::new( + BlobSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + "Invalid fork name for blobs by root".to_string(), + )) + } } - Some(ForkName::Base) - | Some(ForkName::Altair) - | Some(ForkName::Bellatrix) - | Some(ForkName::Capella) => Err(RPCError::ErrorResponse( - RpcErrorResponse::InvalidRequest, - "Invalid fork name for blobs by root".to_string(), - )), None => Err(RPCError::ErrorResponse( RpcErrorResponse::InvalidRequest, format!( @@ -864,6 +867,9 @@ fn handle_rpc_response( decoded_buffer, )?), )))), + Some(ForkName::Fulu) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Fulu(SignedBeaconBlockFulu::from_ssz_bytes(decoded_buffer)?), + )))), None => Err(RPCError::ErrorResponse( RpcErrorResponse::InvalidRequest, format!( @@ -897,6 +903,9 @@ fn handle_rpc_response( decoded_buffer, )?), )))), + Some(ForkName::Fulu) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Fulu(SignedBeaconBlockFulu::from_ssz_bytes(decoded_buffer)?), + )))), None => Err(RPCError::ErrorResponse( RpcErrorResponse::InvalidRequest, format!( @@ -948,12 +957,14 @@ mod tests { let capella_fork_epoch = Epoch::new(3); let deneb_fork_epoch = Epoch::new(4); let electra_fork_epoch = Epoch::new(5); + let fulu_fork_epoch = Epoch::new(6); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); chain_spec.electra_fork_epoch = Some(electra_fork_epoch); + chain_spec.fulu_fork_epoch = Some(fulu_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), @@ -962,6 +973,7 @@ mod tests { ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Electra => electra_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Fulu => fulu_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } @@ -1396,6 +1408,16 @@ mod tests { Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), ); + assert_eq!( + encode_then_decode_response( + SupportedProtocol::BlobsByRangeV1, + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), + ForkName::Fulu, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), + ); + assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, @@ -1416,6 +1438,16 @@ mod tests { Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), ); + assert_eq!( + encode_then_decode_response( + SupportedProtocol::BlobsByRootV1, + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), + ForkName::Fulu, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), + ); + assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, @@ -1444,6 +1476,20 @@ mod tests { ))), ); + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRangeV1, + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( + empty_data_column_sidecar() + )), + ForkName::Fulu, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::DataColumnsByRange( + empty_data_column_sidecar() + ))), + ); + assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, @@ -1471,6 +1517,20 @@ mod tests { empty_data_column_sidecar() ))), ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRootV1, + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( + empty_data_column_sidecar() + )), + ForkName::Fulu, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::DataColumnsByRoot( + empty_data_column_sidecar() + ))), + ); } // Test RPCResponse encoding/decoding for V1 messages diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 57c2795b04f..87bde58292b 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -18,9 +18,9 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockElectra, - BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, EthSpecId, ForkContext, - ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, - LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, + BeaconBlockFulu, BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, EthSpecId, + ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, + LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, }; @@ -73,6 +73,15 @@ pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD: LazyLock = La .len() }); +pub static SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD: LazyLock = LazyLock::new(|| { + SignedBeaconBlock::::from_block( + BeaconBlock::Fulu(BeaconBlockFulu::full(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len() +}); + /// The `BeaconBlockBellatrix` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network @@ -105,6 +114,15 @@ pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX: LazyLock = LazyLock::new(|| { + ssz::BYTES_PER_LENGTH_OFFSET }); // Length offset for the blob commitments field. +pub static SIGNED_BEACON_BLOCK_FULU_MAX: LazyLock = LazyLock::new(|| { + *SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD + + types::ExecutionPayload::::max_execution_payload_fulu_size() + + ssz::BYTES_PER_LENGTH_OFFSET + + (::ssz_fixed_len() + * ::max_blobs_per_block()) + + ssz::BYTES_PER_LENGTH_OFFSET +}); + pub static BLOB_SIDECAR_SIZE: LazyLock = LazyLock::new(BlobSidecar::::max_size); @@ -209,6 +227,10 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks *SIGNED_BEACON_BLOCK_ELECTRA_MAX, // Electra block is larger than Deneb block ), + ForkName::Fulu => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than all other blocks + *SIGNED_BEACON_BLOCK_FULU_MAX, // Fulu block is largest + ), } } @@ -226,7 +248,7 @@ fn rpc_light_client_updates_by_range_limits_by_fork(current_fork: ForkName) -> R ForkName::Deneb => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX) } - ForkName::Electra => { + ForkName::Electra | ForkName::Fulu => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX) } } @@ -246,7 +268,7 @@ fn rpc_light_client_finality_update_limits_by_fork(current_fork: ForkName) -> Rp ForkName::Deneb => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_FINALITY_UPDATE_DENEB_MAX) } - ForkName::Electra => { + ForkName::Electra | ForkName::Fulu => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_FINALITY_UPDATE_ELECTRA_MAX) } } @@ -267,7 +289,7 @@ fn rpc_light_client_optimistic_update_limits_by_fork(current_fork: ForkName) -> ForkName::Deneb => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_OPTIMISTIC_UPDATE_DENEB_MAX) } - ForkName::Electra => RpcLimits::new( + ForkName::Electra | ForkName::Fulu => RpcLimits::new( altair_fixed_len, *LIGHT_CLIENT_OPTIMISTIC_UPDATE_ELECTRA_MAX, ), @@ -284,7 +306,9 @@ fn rpc_light_client_bootstrap_limits_by_fork(current_fork: ForkName) -> RpcLimit } ForkName::Capella => RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_CAPELLA_MAX), ForkName::Deneb => RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_DENEB_MAX), - ForkName::Electra => RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX), + ForkName::Electra | ForkName::Fulu => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX) + } } } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 9f68278e284..c9769594708 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -13,8 +13,9 @@ use types::{ ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -242,6 +243,10 @@ impl PubsubMessage { SignedBeaconBlockElectra::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), + Some(ForkName::Fulu) => SignedBeaconBlock::::Fulu( + SignedBeaconBlockFulu::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 174787f999c..8cdecc6bfa2 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -61,6 +61,7 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V deneb_topics } ForkName::Electra => vec![], + ForkName::Fulu => vec![], } } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 84e19c81d04..6a3ec6dd322 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -25,12 +25,14 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { let capella_fork_epoch = Epoch::new(3); let deneb_fork_epoch = Epoch::new(4); let electra_fork_epoch = Epoch::new(5); + let fulu_fork_epoch = Epoch::new(6); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); chain_spec.electra_fork_epoch = Some(electra_fork_epoch); + chain_spec.fulu_fork_epoch = Some(fulu_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), @@ -39,6 +41,7 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Deneb => deneb_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Electra => electra_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Fulu => fulu_fork_epoch.start_slot(E::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 94aacad3e81..a43b3bd022f 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -174,7 +174,7 @@ impl TestRig { } pub fn after_deneb(&self) -> bool { - matches!(self.fork_name, ForkName::Deneb | ForkName::Electra) + self.fork_name.deneb_enabled() } fn trigger_unknown_parent_block(&mut self, peer_id: PeerId, block: Arc>) { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index d8183de7529..835133a0598 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1239,14 +1239,11 @@ mod release_tests { let stats = op_pool.attestation_stats(); let fork_name = state.fork_name_unchecked(); - match fork_name { - ForkName::Electra => { - assert_eq!(stats.num_attestation_data, 1); - } - _ => { - assert_eq!(stats.num_attestation_data, committees.len()); - } - }; + if fork_name.electra_enabled() { + assert_eq!(stats.num_attestation_data, 1); + } else { + assert_eq!(stats.num_attestation_data, committees.len()); + } assert_eq!( stats.num_attestations, @@ -1258,25 +1255,19 @@ mod release_tests { let best_attestations = op_pool .get_attestations(&state, |_| true, |_| true, spec) .expect("should have best attestations"); - match fork_name { - ForkName::Electra => { - assert_eq!(best_attestations.len(), 8); - } - _ => { - assert_eq!(best_attestations.len(), max_attestations); - } - }; + if fork_name.electra_enabled() { + assert_eq!(best_attestations.len(), 8); + } else { + assert_eq!(best_attestations.len(), max_attestations); + } // All the best attestations should be signed by at least `big_step_size` (4) validators. for att in &best_attestations { - match fork_name { - ForkName::Electra => { - assert!(att.num_set_aggregation_bits() >= small_step_size); - } - _ => { - assert!(att.num_set_aggregation_bits() >= big_step_size); - } - }; + if fork_name.electra_enabled() { + assert!(att.num_set_aggregation_bits() >= small_step_size); + } else { + assert!(att.num_set_aggregation_bits() >= big_step_size); + } } } @@ -1357,17 +1348,14 @@ mod release_tests { let num_big = target_committee_size / big_step_size; let fork_name = state.fork_name_unchecked(); - match fork_name { - ForkName::Electra => { - assert_eq!(op_pool.attestation_stats().num_attestation_data, 1); - } - _ => { - assert_eq!( - op_pool.attestation_stats().num_attestation_data, - committees.len() - ); - } - }; + if fork_name.electra_enabled() { + assert_eq!(op_pool.attestation_stats().num_attestation_data, 1); + } else { + assert_eq!( + op_pool.attestation_stats().num_attestation_data, + committees.len() + ); + } assert_eq!( op_pool.num_attestations(), @@ -1380,14 +1368,11 @@ mod release_tests { .get_attestations(&state, |_| true, |_| true, spec) .expect("should have valid best attestations"); - match fork_name { - ForkName::Electra => { - assert_eq!(best_attestations.len(), 8); - } - _ => { - assert_eq!(best_attestations.len(), max_attestations); - } - }; + if fork_name.electra_enabled() { + assert_eq!(best_attestations.len(), 8); + } else { + assert_eq!(best_attestations.len(), max_attestations); + } let total_active_balance = state.get_total_active_balance().unwrap(); diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index cca617d8c61..0c4cbf0f579 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -238,6 +238,7 @@ mod test { spec.bellatrix_fork_epoch = Some(Epoch::new(256)); spec.deneb_fork_epoch = Some(Epoch::new(257)); spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; let result = validator_fork_epochs(&spec); assert_eq!( result, diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index 14fc10ad6de..5c60aa8d7e3 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -2,7 +2,7 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; use types::{ BlobSidecarList, EthSpec, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, - ExecutionPayloadDeneb, ExecutionPayloadElectra, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, }; macro_rules! impl_store_item { @@ -26,6 +26,7 @@ impl_store_item!(ExecutionPayloadBellatrix); impl_store_item!(ExecutionPayloadCapella); impl_store_item!(ExecutionPayloadDeneb); impl_store_item!(ExecutionPayloadElectra); +impl_store_item!(ExecutionPayloadFulu); impl_store_item!(BlobSidecarList); /// This fork-agnostic implementation should be only used for writing. @@ -42,17 +43,21 @@ impl StoreItem for ExecutionPayload { } fn from_store_bytes(bytes: &[u8]) -> Result { - ExecutionPayloadElectra::from_ssz_bytes(bytes) - .map(Self::Electra) + ExecutionPayloadFulu::from_ssz_bytes(bytes) + .map(Self::Fulu) .or_else(|_| { - ExecutionPayloadDeneb::from_ssz_bytes(bytes) - .map(Self::Deneb) + ExecutionPayloadElectra::from_ssz_bytes(bytes) + .map(Self::Electra) .or_else(|_| { - ExecutionPayloadCapella::from_ssz_bytes(bytes) - .map(Self::Capella) + ExecutionPayloadDeneb::from_ssz_bytes(bytes) + .map(Self::Deneb) .or_else(|_| { - ExecutionPayloadBellatrix::from_ssz_bytes(bytes) - .map(Self::Bellatrix) + ExecutionPayloadCapella::from_ssz_bytes(bytes) + .map(Self::Capella) + .or_else(|_| { + ExecutionPayloadBellatrix::from_ssz_bytes(bytes) + .map(Self::Bellatrix) + }) }) }) }) diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 22eecdcc605..0b8bc2e0d4b 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -16,7 +16,7 @@ use types::*; /// /// This can be deleted once schema versions prior to V22 are no longer supported. #[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] @@ -68,9 +68,9 @@ where pub current_epoch_attestations: List, E::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub previous_epoch_participation: List, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub current_epoch_participation: List, // Finality @@ -80,13 +80,13 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub inactivity_scores: List, // Light-client sync committees - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub next_sync_committee: Arc>, // Execution @@ -110,37 +110,42 @@ where partial_getter(rename = "latest_execution_payload_header_electra") )] pub latest_execution_payload_header: ExecutionPayloadHeaderElectra, + #[superstruct( + only(Fulu), + partial_getter(rename = "latest_execution_payload_header_fulu") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderFulu, // Capella - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub next_withdrawal_index: u64, - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub next_withdrawal_validator_index: u64, #[ssz(skip_serializing, skip_deserializing)] - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub historical_summaries: Option>, // Electra - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub deposit_requests_start_index: u64, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub deposit_balance_to_consume: u64, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub exit_balance_to_consume: u64, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub earliest_exit_epoch: Epoch, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub consolidation_balance_to_consume: u64, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub earliest_consolidation_epoch: Epoch, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_deposits: List, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_partial_withdrawals: List, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_consolidations: List, } @@ -409,6 +414,31 @@ impl TryInto> for PartialBeaconState { ], [historical_summaries] ), + PartialBeaconState::Fulu(inner) => impl_try_into_beacon_state!( + inner, + Fulu, + BeaconStateFulu, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index, + deposit_requests_start_index, + deposit_balance_to_consume, + exit_balance_to_consume, + earliest_exit_epoch, + consolidation_balance_to_consume, + earliest_consolidation_epoch, + pending_deposits, + pending_partial_withdrawals, + pending_consolidations + ], + [historical_summaries] + ), }; Ok(state) } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index a303953a863..695d536944b 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1078,6 +1078,9 @@ impl ForkVersionDeserialize for SsePayloadAttributes { ForkName::Electra => serde_json::from_value(value) .map(Self::V3) .map_err(serde::de::Error::custom), + ForkName::Fulu => serde_json::from_value(value) + .map(Self::V3) + .map_err(serde::de::Error::custom), ForkName::Base | ForkName::Altair => Err(serde::de::Error::custom(format!( "SsePayloadAttributes deserialization for {fork_name} not implemented" ))), @@ -1861,14 +1864,10 @@ impl PublishBlockRequest { impl TryFrom>> for PublishBlockRequest { type Error = &'static str; fn try_from(block: Arc>) -> Result { - match *block { - SignedBeaconBlock::Base(_) - | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Bellatrix(_) - | SignedBeaconBlock::Capella(_) => Ok(PublishBlockRequest::Block(block)), - SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => Err( - "post-Deneb block contents cannot be fully constructed from just the signed block", - ), + if block.message().fork_name_unchecked().deneb_enabled() { + Err("post-Deneb block contents cannot be fully constructed from just the signed block") + } else { + Ok(PublishBlockRequest::Block(block)) } } } @@ -1972,16 +1971,18 @@ impl ForkVersionDeserialize for FullPayloadContents { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Bellatrix | ForkName::Capella => serde_json::from_value(value) - .map(Self::Payload) - .map_err(serde::de::Error::custom), - ForkName::Deneb | ForkName::Electra => serde_json::from_value(value) + if fork_name.deneb_enabled() { + serde_json::from_value(value) .map(Self::PayloadAndBlobs) - .map_err(serde::de::Error::custom), - ForkName::Base | ForkName::Altair => Err(serde::de::Error::custom(format!( + .map_err(serde::de::Error::custom) + } else if fork_name.bellatrix_enabled() { + serde_json::from_value(value) + .map(Self::Payload) + .map_err(serde::de::Error::custom) + } else { + Err(serde::de::Error::custom(format!( "FullPayloadContents deserialization for {fork_name} not implemented" - ))), + ))) } } } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 1eca01bbeef..a107f6147ae 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -15,7 +15,6 @@ TERMINAL_TOTAL_DIFFICULTY: 231707791542740786049188744689299064356246512 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 - # Genesis # --------------------------------------------------------------- # *CUSTOM @@ -27,7 +26,6 @@ GENESIS_FORK_VERSION: 0x0000006f # *CUSTOM GENESIS_DELAY: 300 - # Forking # --------------------------------------------------------------- # Some forks are disabled for now: @@ -49,6 +47,9 @@ DENEB_FORK_EPOCH: 516608 # Wed Jan 31 2024 18:15:40 GMT+0000 # Electra ELECTRA_FORK_VERSION: 0x0500006f ELECTRA_FORK_EPOCH: 18446744073709551615 +# Fulu +FULU_FORK_VERSION: 0x0600006f +FULU_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- @@ -63,7 +64,6 @@ SHARD_COMMITTEE_PERIOD: 256 # 2**10 (= 1024) ~1.4 hour ETH1_FOLLOW_DISTANCE: 1024 - # Validator cycle # --------------------------------------------------------------- # 2**2 (= 4) @@ -90,7 +90,6 @@ REORG_PARENT_WEIGHT_THRESHOLD: 160 # `2` epochs REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 - # Deposit contract # --------------------------------------------------------------- # xDai Mainnet @@ -141,4 +140,4 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 -SAMPLES_PER_SLOT: 8 \ No newline at end of file +SAMPLES_PER_SLOT: 8 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 500555a2694..f71984059aa 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -23,7 +23,6 @@ GENESIS_FORK_VERSION: 0x00000064 # 6000 seconds (100 minutes) GENESIS_DELAY: 6000 - # Forking # --------------------------------------------------------------- # Some forks are disabled for now: @@ -45,7 +44,9 @@ DENEB_FORK_EPOCH: 889856 # 2024-03-11T18:30:20.000Z # Electra ELECTRA_FORK_VERSION: 0x05000064 ELECTRA_FORK_EPOCH: 18446744073709551615 - +# Fulu +FULU_FORK_VERSION: 0x06000064 +FULU_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- @@ -60,7 +61,6 @@ SHARD_COMMITTEE_PERIOD: 256 # 2**10 (= 1024) ~1.4 hour ETH1_FOLLOW_DISTANCE: 1024 - # Validator cycle # --------------------------------------------------------------- # 2**2 (= 4) @@ -76,7 +76,6 @@ MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 2 # 2**12 (= 4096) CHURN_LIMIT_QUOTIENT: 4096 - # Fork choice # --------------------------------------------------------------- # 40% @@ -124,4 +123,4 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 -SAMPLES_PER_SLOT: 8 \ No newline at end of file +SAMPLES_PER_SLOT: 8 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index d67d77d3bea..6d344b5b524 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -12,7 +12,6 @@ GENESIS_FORK_VERSION: 0x01017000 # Genesis delay 5 mins GENESIS_DELAY: 300 - # Forking # --------------------------------------------------------------- # Some forks are disabled for now: @@ -37,6 +36,9 @@ DENEB_FORK_EPOCH: 29696 # Electra ELECTRA_FORK_VERSION: 0x06017000 ELECTRA_FORK_EPOCH: 18446744073709551615 +# Fulu +FULU_FORK_VERSION: 0x07017000 +FULU_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- @@ -51,7 +53,6 @@ SHARD_COMMITTEE_PERIOD: 256 # 2**11 (= 2,048) Eth1 blocks ~8 hours ETH1_FOLLOW_DISTANCE: 2048 - # Validator cycle # --------------------------------------------------------------- # 2**2 (= 4) @@ -128,4 +129,4 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 -SAMPLES_PER_SLOT: 8 \ No newline at end of file +SAMPLES_PER_SLOT: 8 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 18591fecdcd..244ddd564d2 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -18,8 +18,6 @@ TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 - - # Genesis # --------------------------------------------------------------- # `2**14` (= 16,384) @@ -31,7 +29,6 @@ GENESIS_FORK_VERSION: 0x00000000 # 604800 seconds (7 days) GENESIS_DELAY: 604800 - # Forking # --------------------------------------------------------------- # Some forks are disabled for now: @@ -40,23 +37,25 @@ GENESIS_DELAY: 604800 # Altair ALTAIR_FORK_VERSION: 0x01000000 -ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC +ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC # Bellatrix BELLATRIX_FORK_VERSION: 0x02000000 -BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC +BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Capella CAPELLA_FORK_VERSION: 0x03000000 -CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC +CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC # Deneb DENEB_FORK_VERSION: 0x04000000 -DENEB_FORK_EPOCH: 269568 # March 13, 2024, 01:55:35pm UTC +DENEB_FORK_EPOCH: 269568 # March 13, 2024, 01:55:35pm UTC # Electra ELECTRA_FORK_VERSION: 0x05000000 ELECTRA_FORK_EPOCH: 18446744073709551615 +# Fulu +FULU_FORK_VERSION: 0x06000000 +FULU_FORK_EPOCH: 18446744073709551615 # PeerDAS EIP7594_FORK_EPOCH: 18446744073709551615 - # Time parameters # --------------------------------------------------------------- # 12 seconds @@ -70,7 +69,6 @@ SHARD_COMMITTEE_PERIOD: 256 # 2**11 (= 2,048) Eth1 blocks ~8 hours ETH1_FOLLOW_DISTANCE: 2048 - # Validator cycle # --------------------------------------------------------------- # 2**2 (= 4) @@ -97,7 +95,6 @@ REORG_PARENT_WEIGHT_THRESHOLD: 160 # `2` epochs REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 - # Deposit contract # --------------------------------------------------------------- # Ethereum PoW Mainnet @@ -105,7 +102,6 @@ DEPOSIT_CHAIN_ID: 1 DEPOSIT_NETWORK_ID: 1 DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa - # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) @@ -150,4 +146,4 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 -SAMPLES_PER_SLOT: 8 \ No newline at end of file +SAMPLES_PER_SLOT: 8 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index b08a6180bf0..88f8359bd13 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -124,4 +124,4 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 -SAMPLES_PER_SLOT: 8 \ No newline at end of file +SAMPLES_PER_SLOT: 8 diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 85704042df4..4c25be950b0 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -755,20 +755,15 @@ where if let Some((parent_justified, parent_finalized)) = parent_checkpoints { (parent_justified, parent_finalized) } else { - let justification_and_finalization_state = match block { - BeaconBlockRef::Electra(_) - | BeaconBlockRef::Deneb(_) - | BeaconBlockRef::Capella(_) - | BeaconBlockRef::Bellatrix(_) - | BeaconBlockRef::Altair(_) => { + let justification_and_finalization_state = + if block.fork_name_unchecked().altair_enabled() { // NOTE: Processing justification & finalization requires the progressive // balances cache, but we cannot initialize it here as we only have an // immutable reference. The state *should* have come straight from block // processing, which initialises the cache, but if we add other `on_block` // calls in future it could be worth passing a mutable reference. per_epoch_processing::altair::process_justification_and_finalization(state)? - } - BeaconBlockRef::Base(_) => { + } else { let mut validator_statuses = per_epoch_processing::base::ValidatorStatuses::new(state, spec) .map_err(Error::ValidatorStatuses)?; @@ -780,8 +775,7 @@ where &validator_statuses.total_balances, spec, )? - } - }; + }; ( justification_and_finalization_state.current_justified_checkpoint(), diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index ef017159a02..001b80fe113 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1263,7 +1263,7 @@ async fn progressive_balances_cache_proposer_slashing() { // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip // the slot in this scenario rather than panic-ing. The same applies to // `progressive_balances_cache_attester_slashing`. - .apply_blocks(2) + .apply_blocks(1) .await .add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch()) .await diff --git a/consensus/state_processing/src/common/get_attestation_participation.rs b/consensus/state_processing/src/common/get_attestation_participation.rs index fc09dad1f4e..2c6fd3b215f 100644 --- a/consensus/state_processing/src/common/get_attestation_participation.rs +++ b/consensus/state_processing/src/common/get_attestation_participation.rs @@ -44,22 +44,15 @@ pub fn get_attestation_participation_flag_indices( if is_matching_source && inclusion_delay <= E::slots_per_epoch().integer_sqrt() { participation_flag_indices.push(TIMELY_SOURCE_FLAG_INDEX); } - match state { - &BeaconState::Base(_) - | &BeaconState::Altair(_) - | &BeaconState::Bellatrix(_) - | &BeaconState::Capella(_) => { - if is_matching_target && inclusion_delay <= E::slots_per_epoch() { - participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); - } - } - &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { - if is_matching_target { - // [Modified in Deneb:EIP7045] - participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); - } + if state.fork_name_unchecked().deneb_enabled() { + if is_matching_target { + // [Modified in Deneb:EIP7045] + participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); } + } else if is_matching_target && inclusion_delay <= E::slots_per_epoch() { + participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); } + if is_matching_head && inclusion_delay == spec.min_attestation_inclusion_delay { participation_flag_indices.push(TIMELY_HEAD_FLAG_INDEX); } diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 80d857cc009..bd60f16014c 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -55,15 +55,12 @@ pub fn slash_validator( let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); let whistleblower_reward = validator_effective_balance .safe_div(spec.whistleblower_reward_quotient_for_state(state))?; - let proposer_reward = match state { - BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => whistleblower_reward + let proposer_reward = if state.fork_name_unchecked().altair_enabled() { + whistleblower_reward .safe_mul(PROPOSER_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR)?, + .safe_div(WEIGHT_DENOMINATOR)? + } else { + whistleblower_reward.safe_div(spec.proposer_reward_quotient)? }; // Ensure the whistleblower index is in the validator registry. diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index ccff3d80c03..10723ecc519 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -4,7 +4,7 @@ use super::per_block_processing::{ use crate::common::DepositDataTree; use crate::upgrade::electra::upgrade_state_to_electra; use crate::upgrade::{ - upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, upgrade_to_fulu, }; use safe_arith::{ArithError, SafeArith}; use std::sync::Arc; @@ -135,11 +135,27 @@ pub fn initialize_beacon_state_from_eth1( // Override latest execution payload header. // See https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#testing - if let Some(ExecutionPayloadHeader::Electra(header)) = execution_payload_header { + if let Some(ExecutionPayloadHeader::Electra(ref header)) = execution_payload_header { *state.latest_execution_payload_header_electra_mut()? = header.clone(); } } + // Upgrade to fulu if configured from genesis. + if spec + .fulu_fork_epoch + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) + { + upgrade_to_fulu(&mut state, spec)?; + + // Remove intermediate Electra fork from `state.fork`. + state.fork_mut().previous_version = spec.fulu_fork_version; + + // Override latest execution payload header. + if let Some(ExecutionPayloadHeader::Fulu(header)) = execution_payload_header { + *state.latest_execution_payload_header_fulu_mut()? = header.clone(); + } + } + // Now that we have our validators, initialize the caches (including the committees) state.build_caches(spec)?; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 436f4934b90..22e0a5eab30 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -442,6 +442,12 @@ pub fn process_execution_payload>( _ => return Err(BlockProcessingError::IncorrectStateType), } } + ExecutionPayloadHeaderRefMut::Fulu(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Fulu(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } } Ok(()) @@ -453,15 +459,17 @@ pub fn process_execution_payload>( /// repeatedly write code to treat these errors as false. /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_complete pub fn is_merge_transition_complete(state: &BeaconState) -> bool { - match state { + if state.fork_name_unchecked().capella_enabled() { + true + } else if state.fork_name_unchecked().bellatrix_enabled() { // We must check defaultness against the payload header with 0x0 roots, as that's what's meant // by `ExecutionPayloadHeader()` in the spec. - BeaconState::Bellatrix(_) => state + state .latest_execution_payload_header() .map(|header| !header.is_default_with_zero_roots()) - .unwrap_or(false), - BeaconState::Electra(_) | BeaconState::Deneb(_) | BeaconState::Capella(_) => true, - BeaconState::Base(_) | BeaconState::Altair(_) => false, + .unwrap_or(false) + } else { + false } } /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_block @@ -603,66 +611,65 @@ pub fn process_withdrawals>( payload: Payload::Ref<'_>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - match state { - BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { - let (expected_withdrawals, partial_withdrawals_count) = - get_expected_withdrawals(state, spec)?; - let expected_root = expected_withdrawals.tree_hash_root(); - let withdrawals_root = payload.withdrawals_root()?; - - if expected_root != withdrawals_root { - return Err(BlockProcessingError::WithdrawalsRootMismatch { - expected: expected_root, - found: withdrawals_root, - }); - } + if state.fork_name_unchecked().capella_enabled() { + let (expected_withdrawals, partial_withdrawals_count) = + get_expected_withdrawals(state, spec)?; + let expected_root = expected_withdrawals.tree_hash_root(); + let withdrawals_root = payload.withdrawals_root()?; + + if expected_root != withdrawals_root { + return Err(BlockProcessingError::WithdrawalsRootMismatch { + expected: expected_root, + found: withdrawals_root, + }); + } - for withdrawal in expected_withdrawals.iter() { - decrease_balance( - state, - withdrawal.validator_index as usize, - withdrawal.amount, - )?; - } + for withdrawal in expected_withdrawals.iter() { + decrease_balance( + state, + withdrawal.validator_index as usize, + withdrawal.amount, + )?; + } - // Update pending partial withdrawals [New in Electra:EIP7251] - if let Some(partial_withdrawals_count) = partial_withdrawals_count { - // TODO(electra): Use efficient pop_front after milhouse release https://github.com/sigp/milhouse/pull/38 - let new_partial_withdrawals = state - .pending_partial_withdrawals()? - .iter_from(partial_withdrawals_count)? - .cloned() - .collect::>(); - *state.pending_partial_withdrawals_mut()? = List::new(new_partial_withdrawals)?; - } + // Update pending partial withdrawals [New in Electra:EIP7251] + if let Some(partial_withdrawals_count) = partial_withdrawals_count { + // TODO(electra): Use efficient pop_front after milhouse release https://github.com/sigp/milhouse/pull/38 + let new_partial_withdrawals = state + .pending_partial_withdrawals()? + .iter_from(partial_withdrawals_count)? + .cloned() + .collect::>(); + *state.pending_partial_withdrawals_mut()? = List::new(new_partial_withdrawals)?; + } - // Update the next withdrawal index if this block contained withdrawals - if let Some(latest_withdrawal) = expected_withdrawals.last() { - *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; - - // Update the next validator index to start the next withdrawal sweep - if expected_withdrawals.len() == E::max_withdrawals_per_payload() { - // Next sweep starts after the latest withdrawal's validator index - let next_validator_index = latest_withdrawal - .validator_index - .safe_add(1)? - .safe_rem(state.validators().len() as u64)?; - *state.next_withdrawal_validator_index_mut()? = next_validator_index; - } - } + // Update the next withdrawal index if this block contained withdrawals + if let Some(latest_withdrawal) = expected_withdrawals.last() { + *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; - // Advance sweep by the max length of the sweep if there was not a full set of withdrawals - if expected_withdrawals.len() != E::max_withdrawals_per_payload() { - let next_validator_index = state - .next_withdrawal_validator_index()? - .safe_add(spec.max_validators_per_withdrawals_sweep)? + // Update the next validator index to start the next withdrawal sweep + if expected_withdrawals.len() == E::max_withdrawals_per_payload() { + // Next sweep starts after the latest withdrawal's validator index + let next_validator_index = latest_withdrawal + .validator_index + .safe_add(1)? .safe_rem(state.validators().len() as u64)?; *state.next_withdrawal_validator_index_mut()? = next_validator_index; } + } - Ok(()) + // Advance sweep by the max length of the sweep if there was not a full set of withdrawals + if expected_withdrawals.len() != E::max_withdrawals_per_payload() { + let next_validator_index = state + .next_withdrawal_validator_index()? + .safe_add(spec.max_validators_per_withdrawals_sweep)? + .safe_rem(state.validators().len() as u64)?; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; } + + Ok(()) + } else { // these shouldn't even be encountered but they're here for completeness - BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Bellatrix(_) => Ok(()), + Ok(()) } } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 22d8592364c..4977f7c7e9d 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -284,29 +284,22 @@ pub fn process_attestations>( ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - match block_body { - BeaconBlockBodyRef::Base(_) => { - base::process_attestations( - state, - block_body.attestations(), - verify_signatures, - ctxt, - spec, - )?; - } - BeaconBlockBodyRef::Altair(_) - | BeaconBlockBodyRef::Bellatrix(_) - | BeaconBlockBodyRef::Capella(_) - | BeaconBlockBodyRef::Deneb(_) - | BeaconBlockBodyRef::Electra(_) => { - altair_deneb::process_attestations( - state, - block_body.attestations(), - verify_signatures, - ctxt, - spec, - )?; - } + if state.fork_name_unchecked().altair_enabled() { + altair_deneb::process_attestations( + state, + block_body.attestations(), + verify_signatures, + ctxt, + spec, + )?; + } else { + base::process_attestations( + state, + block_body.attestations(), + verify_signatures, + ctxt, + spec, + )?; } Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 2e00ee03418..39f438f97f6 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -387,22 +387,20 @@ where let exit = &signed_exit.message; let proposer_index = exit.validator_index as usize; - let domain = match state { - BeaconState::Base(_) - | BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) => spec.get_domain( - exit.epoch, - Domain::VoluntaryExit, - &state.fork(), - state.genesis_validators_root(), - ), + let domain = if state.fork_name_unchecked().deneb_enabled() { // EIP-7044 - BeaconState::Deneb(_) | BeaconState::Electra(_) => spec.compute_domain( + spec.compute_domain( Domain::VoluntaryExit, spec.capella_fork_version, state.genesis_validators_root(), - ), + ) + } else { + spec.get_domain( + exit.epoch, + Domain::VoluntaryExit, + &state.fork(), + state.genesis_validators_root(), + ) }; let message = exit.signing_root(domain); diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 6bfb51d475b..0b399bea6cf 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -32,21 +32,16 @@ pub fn verify_attestation_for_block_inclusion<'ctxt, E: EthSpec>( attestation: data.slot, } ); - match state { - BeaconState::Base(_) - | BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) => { - verify!( - state.slot() <= data.slot.safe_add(E::slots_per_epoch())?, - Invalid::IncludedTooLate { - state: state.slot(), - attestation: data.slot, - } - ); - } + if state.fork_name_unchecked().deneb_enabled() { // [Modified in Deneb:EIP7045] - BeaconState::Deneb(_) | BeaconState::Electra(_) => {} + } else { + verify!( + state.slot() <= data.slot.safe_add(E::slots_per_epoch())?, + Invalid::IncludedTooLate { + state: state.slot(), + attestation: data.slot, + } + ); } verify_attestation_for_state(state, attestation, ctxt, verify_signatures, spec) diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 55e8853f3f8..41c30c49318 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -41,13 +41,10 @@ pub fn process_epoch( .fork_name(spec) .map_err(Error::InconsistentStateFork)?; - match state { - BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => altair::process_epoch(state, spec), + if state.fork_name_unchecked().altair_enabled() { + altair::process_epoch(state, spec) + } else { + base::process_epoch(state, spec) } } diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 6554423199f..af1cce602c3 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,6 +1,6 @@ use crate::upgrade::{ upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, - upgrade_to_electra, + upgrade_to_electra, upgrade_to_fulu, }; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; @@ -71,6 +71,11 @@ pub fn per_slot_processing( upgrade_to_electra(state, spec)?; } + // Fulu. + if spec.fulu_fork_epoch == Some(state.current_epoch()) { + upgrade_to_fulu(state, spec)?; + } + // Additionally build all caches so that all valid states that are advanced always have // committee caches built, and we don't have to worry about initialising them at higher // layers. diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index 93cafa73d03..88bc87849f3 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -3,9 +3,11 @@ pub mod bellatrix; pub mod capella; pub mod deneb; pub mod electra; +pub mod fulu; pub use altair::upgrade_to_altair; pub use bellatrix::upgrade_to_bellatrix; pub use capella::upgrade_to_capella; pub use deneb::upgrade_to_deneb; pub use electra::upgrade_to_electra; +pub use fulu::upgrade_to_fulu; diff --git a/consensus/state_processing/src/upgrade/fulu.rs b/consensus/state_processing/src/upgrade/fulu.rs new file mode 100644 index 00000000000..6e0cd3fa9de --- /dev/null +++ b/consensus/state_processing/src/upgrade/fulu.rs @@ -0,0 +1,94 @@ +use std::mem; +use types::{BeaconState, BeaconStateError as Error, BeaconStateFulu, ChainSpec, EthSpec, Fork}; + +/// Transform a `Electra` state into an `Fulu` state. +pub fn upgrade_to_fulu( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let _epoch = pre_state.current_epoch(); + + let post = upgrade_state_to_fulu(pre_state, spec)?; + + *pre_state = post; + + Ok(()) +} + +pub fn upgrade_state_to_fulu( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result, Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_electra_mut()?; + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let post = BeaconState::Fulu(BeaconStateFulu { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: pre.fork.current_version, + current_version: spec.fulu_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation), + current_epoch_participation: mem::take(&mut pre.current_epoch_participation), + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores: mem::take(&mut pre.inactivity_scores), + // Sync committees + current_sync_committee: pre.current_sync_committee.clone(), + next_sync_committee: pre.next_sync_committee.clone(), + // Execution + latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_fulu(), + // Capella + next_withdrawal_index: pre.next_withdrawal_index, + next_withdrawal_validator_index: pre.next_withdrawal_validator_index, + historical_summaries: pre.historical_summaries.clone(), + // Electra + deposit_requests_start_index: pre.deposit_requests_start_index, + deposit_balance_to_consume: pre.deposit_balance_to_consume, + exit_balance_to_consume: pre.exit_balance_to_consume, + earliest_exit_epoch: pre.earliest_exit_epoch, + consolidation_balance_to_consume: pre.consolidation_balance_to_consume, + earliest_consolidation_epoch: pre.earliest_consolidation_epoch, + pending_deposits: pre.pending_deposits.clone(), + pending_partial_withdrawals: pre.pending_partial_withdrawals.clone(), + pending_consolidations: pre.pending_consolidations.clone(), + // Caches + total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + slashings_cache: mem::take(&mut pre.slashings_cache), + epoch_cache: mem::take(&mut pre.epoch_cache), + }); + Ok(post) +} diff --git a/consensus/types/presets/gnosis/fulu.yaml b/consensus/types/presets/gnosis/fulu.yaml new file mode 100644 index 00000000000..35a7c98fbf3 --- /dev/null +++ b/consensus/types/presets/gnosis/fulu.yaml @@ -0,0 +1,3 @@ +# Gnosis preset - Fulu + +FULU_PLACEHOLDER: 0 diff --git a/consensus/types/presets/mainnet/fulu.yaml b/consensus/types/presets/mainnet/fulu.yaml new file mode 100644 index 00000000000..8aa9ccdcc37 --- /dev/null +++ b/consensus/types/presets/mainnet/fulu.yaml @@ -0,0 +1,3 @@ +# Mainnet preset - Fulu + +FULU_PLACEHOLDER: 0 diff --git a/consensus/types/presets/minimal/fulu.yaml b/consensus/types/presets/minimal/fulu.yaml new file mode 100644 index 00000000000..121c9858f41 --- /dev/null +++ b/consensus/types/presets/minimal/fulu.yaml @@ -0,0 +1,3 @@ +# Minimal preset - Fulu + +FULU_PLACEHOLDER: 0 diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 801b7dd1c78..d72550aa121 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -16,7 +16,7 @@ use self::indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectr /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -75,6 +75,8 @@ pub struct BeaconBlock = FullPayload pub body: BeaconBlockBodyDeneb, #[superstruct(only(Electra), partial_getter(rename = "body_electra"))] pub body: BeaconBlockBodyElectra, + #[superstruct(only(Fulu), partial_getter(rename = "body_fulu"))] + pub body: BeaconBlockBodyFulu, } pub type BlindedBeaconBlock = BeaconBlock>; @@ -127,8 +129,9 @@ impl> BeaconBlock { /// Usually it's better to prefer `from_ssz_bytes` which will decode the correct variant based /// on the fork slot. pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { - BeaconBlockElectra::from_ssz_bytes(bytes) - .map(BeaconBlock::Electra) + BeaconBlockFulu::from_ssz_bytes(bytes) + .map(BeaconBlock::Fulu) + .or_else(|_| BeaconBlockElectra::from_ssz_bytes(bytes).map(BeaconBlock::Electra)) .or_else(|_| BeaconBlockDeneb::from_ssz_bytes(bytes).map(BeaconBlock::Deneb)) .or_else(|_| BeaconBlockCapella::from_ssz_bytes(bytes).map(BeaconBlock::Capella)) .or_else(|_| BeaconBlockBellatrix::from_ssz_bytes(bytes).map(BeaconBlock::Bellatrix)) @@ -226,6 +229,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, E, Payl BeaconBlockRef::Capella { .. } => ForkName::Capella, BeaconBlockRef::Deneb { .. } => ForkName::Deneb, BeaconBlockRef::Electra { .. } => ForkName::Electra, + BeaconBlockRef::Fulu { .. } => ForkName::Fulu, } } @@ -704,6 +708,110 @@ impl> EmptyBlock for BeaconBlockElec } } +impl> BeaconBlockFulu { + /// Return a Fulu block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); + let indexed_attestation: IndexedAttestationElectra = IndexedAttestationElectra { + attesting_indices: VariableList::new(vec![0_u64; E::MaxValidatorsPerSlot::to_usize()]) + .unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::empty(), + }; + let attester_slashings = vec![ + AttesterSlashingElectra { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation, + }; + E::max_attester_slashings_electra() + ] + .into(); + let attestation = AttestationElectra { + aggregation_bits: BitList::with_capacity(E::MaxValidatorsPerSlot::to_usize()).unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::empty(), + committee_bits: BitVector::new(), + }; + let mut attestations_electra = vec![]; + for _ in 0..E::MaxAttestationsElectra::to_usize() { + attestations_electra.push(attestation.clone()); + } + + let bls_to_execution_changes = vec![ + SignedBlsToExecutionChange { + message: BlsToExecutionChange { + validator_index: 0, + from_bls_pubkey: PublicKeyBytes::empty(), + to_execution_address: Address::ZERO, + }, + signature: Signature::empty() + }; + E::max_bls_to_execution_changes() + ] + .into(); + let sync_aggregate = SyncAggregate { + sync_committee_signature: AggregateSignature::empty(), + sync_committee_bits: BitVector::default(), + }; + BeaconBlockFulu { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyFulu { + proposer_slashings: base_block.body.proposer_slashings, + attester_slashings, + attestations: attestations_electra.into(), + deposits: base_block.body.deposits, + voluntary_exits: base_block.body.voluntary_exits, + bls_to_execution_changes, + sync_aggregate, + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + execution_payload: Payload::Fulu::default(), + blob_kzg_commitments: VariableList::empty(), + execution_requests: ExecutionRequests::default(), + }, + } + } +} + +impl> EmptyBlock for BeaconBlockFulu { + /// Returns an empty Fulu block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockFulu { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyFulu { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Fulu::default(), + bls_to_execution_changes: VariableList::empty(), + blob_kzg_commitments: VariableList::empty(), + execution_requests: ExecutionRequests::default(), + }, + } + } +} + // We can convert pre-Bellatrix blocks without payloads into blocks "with" payloads. impl From>> for BeaconBlockBase> @@ -785,6 +893,7 @@ impl_from!(BeaconBlockBellatrix, >, >, |b impl_from!(BeaconBlockCapella, >, >, |body: BeaconBlockBodyCapella<_, _>| body.into()); impl_from!(BeaconBlockDeneb, >, >, |body: BeaconBlockBodyDeneb<_, _>| body.into()); impl_from!(BeaconBlockElectra, >, >, |body: BeaconBlockBodyElectra<_, _>| body.into()); +impl_from!(BeaconBlockFulu, >, >, |body: BeaconBlockBodyFulu<_, _>| body.into()); // We can clone blocks with payloads to blocks without payloads, without cloning the payload. macro_rules! impl_clone_as_blinded { @@ -818,6 +927,7 @@ impl_clone_as_blinded!(BeaconBlockBellatrix, >, >, >); impl_clone_as_blinded!(BeaconBlockDeneb, >, >); impl_clone_as_blinded!(BeaconBlockElectra, >, >); +impl_clone_as_blinded!(BeaconBlockFulu, >, >); // A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the // execution payload. @@ -988,6 +1098,26 @@ mod tests { }); } + #[test] + fn roundtrip_fulu_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Fulu.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockFulu { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyFulu::random_for_test(rng), + }; + + let block = BeaconBlock::Fulu(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; @@ -1007,11 +1137,14 @@ mod tests { let deneb_slot = deneb_epoch.start_slot(E::slots_per_epoch()); let electra_epoch = deneb_epoch + 1; let electra_slot = electra_epoch.start_slot(E::slots_per_epoch()); + let fulu_epoch = electra_epoch + 1; + let fulu_slot = fulu_epoch.start_slot(E::slots_per_epoch()); spec.altair_fork_epoch = Some(altair_epoch); spec.capella_fork_epoch = Some(capella_epoch); spec.deneb_fork_epoch = Some(deneb_epoch); spec.electra_fork_epoch = Some(electra_epoch); + spec.fulu_fork_epoch = Some(fulu_epoch); // BeaconBlockBase { @@ -1122,5 +1255,29 @@ mod tests { BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) .expect_err("bad electra block cannot be decoded"); } + + // BeaconBlockFulu + { + let good_block = BeaconBlock::Fulu(BeaconBlockFulu { + slot: fulu_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have a Fulu block with a epoch lower than the fork epoch. + let _bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = electra_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good fulu block can be decoded"), + good_block + ); + // TODO(fulu): Uncomment once Fulu has features since without features + // and with an Electra slot it decodes successfully to Electra. + //BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + // .expect_err("bad fulu block cannot be decoded"); + } } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index f7a701fed6d..a198cdf28f4 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -30,7 +30,7 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -58,6 +58,7 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; Capella(metastruct(mappings(beacon_block_body_capella_fields(groups(fields))))), Deneb(metastruct(mappings(beacon_block_body_deneb_fields(groups(fields))))), Electra(metastruct(mappings(beacon_block_body_electra_fields(groups(fields))))), + Fulu(metastruct(mappings(beacon_block_body_fulu_fields(groups(fields))))) ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") @@ -77,7 +78,10 @@ pub struct BeaconBlockBody = FullPay partial_getter(rename = "attester_slashings_base") )] pub attester_slashings: VariableList, E::MaxAttesterSlashings>, - #[superstruct(only(Electra), partial_getter(rename = "attester_slashings_electra"))] + #[superstruct( + only(Electra, Fulu), + partial_getter(rename = "attester_slashings_electra") + )] pub attester_slashings: VariableList, E::MaxAttesterSlashingsElectra>, #[superstruct( @@ -85,11 +89,11 @@ pub struct BeaconBlockBody = FullPay partial_getter(rename = "attestations_base") )] pub attestations: VariableList, E::MaxAttestations>, - #[superstruct(only(Electra), partial_getter(rename = "attestations_electra"))] + #[superstruct(only(Electra, Fulu), partial_getter(rename = "attestations_electra"))] pub attestations: VariableList, E::MaxAttestationsElectra>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded @@ -109,12 +113,15 @@ pub struct BeaconBlockBody = FullPay #[superstruct(only(Electra), partial_getter(rename = "execution_payload_electra"))] #[serde(flatten)] pub execution_payload: Payload::Electra, - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] + #[serde(flatten)] + pub execution_payload: Payload::Fulu, + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub bls_to_execution_changes: VariableList, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Deneb, Electra, Fulu))] pub blob_kzg_commitments: KzgCommitments, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub execution_requests: ExecutionRequests, #[superstruct(only(Base, Altair))] #[metastruct(exclude_from(fields))] @@ -144,6 +151,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Electra(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Fulu(body) => Ok(Payload::Ref::from(&body.execution_payload)), } } @@ -174,6 +182,10 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, beacon_block_body_electra_fields!(body, |_, field| leaves .push(field.tree_hash_root())); } + Self::Fulu(body) => { + beacon_block_body_fulu_fields!(body, |_, field| leaves + .push(field.tree_hash_root())); + } } leaves } @@ -202,7 +214,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Self::Base(_) | Self::Altair(_) | Self::Bellatrix(_) | Self::Capella(_) => { Err(Error::IncorrectStateVariant) } - Self::Deneb(_) | Self::Electra(_) => { + Self::Deneb(_) | Self::Electra(_) | Self::Fulu(_) => { // We compute the branches by generating 2 merkle trees: // 1. Merkle tree for the `blob_kzg_commitments` List object // 2. Merkle tree for the `BeaconBlockBody` container @@ -294,6 +306,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Self::Capella(body) => body.attestations.len(), Self::Deneb(body) => body.attestations.len(), Self::Electra(body) => body.attestations.len(), + Self::Fulu(body) => body.attestations.len(), } } @@ -305,6 +318,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Self::Capella(body) => body.attester_slashings.len(), Self::Deneb(body) => body.attester_slashings.len(), Self::Electra(body) => body.attester_slashings.len(), + Self::Fulu(body) => body.attester_slashings.len(), } } @@ -316,6 +330,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Self::Capella(body) => Box::new(body.attestations.iter().map(AttestationRef::Base)), Self::Deneb(body) => Box::new(body.attestations.iter().map(AttestationRef::Base)), Self::Electra(body) => Box::new(body.attestations.iter().map(AttestationRef::Electra)), + Self::Fulu(body) => Box::new(body.attestations.iter().map(AttestationRef::Electra)), } } @@ -351,6 +366,11 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, .iter() .map(AttesterSlashingRef::Electra), ), + Self::Fulu(body) => Box::new( + body.attester_slashings + .iter() + .map(AttesterSlashingRef::Electra), + ), } } } @@ -376,6 +396,9 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRefMut<'a, Self::Electra(body) => { Box::new(body.attestations.iter_mut().map(AttestationRefMut::Electra)) } + Self::Fulu(body) => { + Box::new(body.attestations.iter_mut().map(AttestationRefMut::Electra)) + } } } } @@ -390,6 +413,7 @@ impl> BeaconBlockBodyRef<'_, E, Payl BeaconBlockBodyRef::Capella { .. } => ForkName::Capella, BeaconBlockBodyRef::Deneb { .. } => ForkName::Deneb, BeaconBlockBodyRef::Electra { .. } => ForkName::Electra, + BeaconBlockBodyRef::Fulu { .. } => ForkName::Fulu, } } } @@ -704,6 +728,52 @@ impl From>> } } +impl From>> + for ( + BeaconBlockBodyFulu>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyFulu>) -> Self { + let BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadFulu { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + execution_requests, + } = body; + + ( + BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadFulu { + execution_payload_header: From::from(&execution_payload), + }, + bls_to_execution_changes, + blob_kzg_commitments: blob_kzg_commitments.clone(), + execution_requests, + }, + Some(execution_payload), + ) + } +} + // We can clone a full block into a blinded block, without cloning the payload. impl BeaconBlockBodyBase> { pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase> { @@ -859,6 +929,44 @@ impl BeaconBlockBodyElectra> { } } +impl BeaconBlockBodyFulu> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyFulu> { + let BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadFulu { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + execution_requests, + } = self; + + BeaconBlockBodyFulu { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayloadFulu { + execution_payload_header: execution_payload.into(), + }, + bls_to_execution_changes: bls_to_execution_changes.clone(), + blob_kzg_commitments: blob_kzg_commitments.clone(), + execution_requests: execution_requests.clone(), + } + } +} + impl From>> for ( BeaconBlockBody>, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 05f28744faf..de6077bf940 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -223,7 +223,7 @@ impl From for Hash256 { /// /// https://github.com/sigp/milhouse/issues/43 #[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Derivative, @@ -326,6 +326,20 @@ impl From for Hash256 { groups(tree_lists) )), num_fields(all()), + )), + Fulu(metastruct( + mappings( + map_beacon_state_fulu_fields(), + map_beacon_state_fulu_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_fulu_tree_list_fields_immutable(groups(tree_lists)), + ), + bimappings(bimap_beacon_state_fulu_tree_list_fields( + other_type = "BeaconStateFulu", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), )) ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), @@ -408,11 +422,11 @@ where // Participation (Altair and later) #[compare_fields(as_iter)] - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] #[test_random(default)] #[compare_fields(as_iter)] pub previous_epoch_participation: List, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] #[test_random(default)] pub current_epoch_participation: List, @@ -432,15 +446,15 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] #[test_random(default)] pub inactivity_scores: List, // Light-client sync committees - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] #[metastruct(exclude_from(tree_lists))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] #[metastruct(exclude_from(tree_lists))] pub next_sync_committee: Arc>, @@ -469,56 +483,62 @@ where )] #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderElectra, + #[superstruct( + only(Fulu), + partial_getter(rename = "latest_execution_payload_header_fulu") + )] + #[metastruct(exclude_from(tree_lists))] + pub latest_execution_payload_header: ExecutionPayloadHeaderFulu, // Capella - #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Capella, Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] #[metastruct(exclude_from(tree_lists))] pub next_withdrawal_index: u64, - #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Capella, Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] #[metastruct(exclude_from(tree_lists))] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] #[test_random(default)] pub historical_summaries: List, // Electra - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub deposit_requests_start_index: u64, - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub deposit_balance_to_consume: u64, - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub exit_balance_to_consume: u64, - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] pub earliest_exit_epoch: Epoch, - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub consolidation_balance_to_consume: u64, - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] pub earliest_consolidation_epoch: Epoch, #[compare_fields(as_iter)] #[test_random(default)] - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_deposits: List, #[compare_fields(as_iter)] #[test_random(default)] - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_partial_withdrawals: List, #[compare_fields(as_iter)] #[test_random(default)] - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_consolidations: List, // Caching (not in the spec) @@ -659,6 +679,7 @@ impl BeaconState { BeaconState::Capella { .. } => ForkName::Capella, BeaconState::Deneb { .. } => ForkName::Deneb, BeaconState::Electra { .. } => ForkName::Electra, + BeaconState::Fulu { .. } => ForkName::Fulu, } } @@ -948,6 +969,9 @@ impl BeaconState { BeaconState::Electra(state) => Ok(ExecutionPayloadHeaderRef::Electra( &state.latest_execution_payload_header, )), + BeaconState::Fulu(state) => Ok(ExecutionPayloadHeaderRef::Fulu( + &state.latest_execution_payload_header, + )), } } @@ -968,6 +992,9 @@ impl BeaconState { BeaconState::Electra(state) => Ok(ExecutionPayloadHeaderRefMut::Electra( &mut state.latest_execution_payload_header, )), + BeaconState::Fulu(state) => Ok(ExecutionPayloadHeaderRefMut::Fulu( + &mut state.latest_execution_payload_header, + )), } } @@ -1481,6 +1508,16 @@ impl BeaconState { &mut state.exit_cache, &mut state.epoch_cache, )), + BeaconState::Fulu(state) => Ok(( + &mut state.validators, + &mut state.balances, + &state.previous_epoch_participation, + &state.current_epoch_participation, + &mut state.inactivity_scores, + &mut state.progressive_balances_cache, + &mut state.exit_cache, + &mut state.epoch_cache, + )), } } @@ -1662,10 +1699,12 @@ impl BeaconState { | BeaconState::Altair(_) | BeaconState::Bellatrix(_) | BeaconState::Capella(_) => self.get_validator_churn_limit(spec)?, - BeaconState::Deneb(_) | BeaconState::Electra(_) => std::cmp::min( - spec.max_per_epoch_activation_churn_limit, - self.get_validator_churn_limit(spec)?, - ), + BeaconState::Deneb(_) | BeaconState::Electra(_) | BeaconState::Fulu(_) => { + std::cmp::min( + spec.max_per_epoch_activation_churn_limit, + self.get_validator_churn_limit(spec)?, + ) + } }) } @@ -1783,6 +1822,7 @@ impl BeaconState { BeaconState::Capella(state) => Ok(&mut state.current_epoch_participation), BeaconState::Deneb(state) => Ok(&mut state.current_epoch_participation), BeaconState::Electra(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Fulu(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == previous_epoch { match self { @@ -1792,6 +1832,7 @@ impl BeaconState { BeaconState::Capella(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Deneb(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Electra(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Fulu(state) => Ok(&mut state.previous_epoch_participation), } } else { Err(BeaconStateError::EpochOutOfBounds) @@ -2045,6 +2086,11 @@ impl BeaconState { } ); } + Self::Fulu(self_inner) => { + map_beacon_state_fulu_tree_list_fields_immutable!(self_inner, |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + }); + } }; any_pending_mutations } @@ -2238,12 +2284,29 @@ impl BeaconState { exit_balance_to_consume .safe_add_assign(additional_epochs.safe_mul(per_epoch_churn)?)?; } - let state = self.as_electra_mut()?; - // Consume the balance and update state variables - state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; - state.earliest_exit_epoch = earliest_exit_epoch; + match self { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Bellatrix(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + BeaconState::Electra(_) => { + let state = self.as_electra_mut()?; + + // Consume the balance and update state variables + state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; + state.earliest_exit_epoch = earliest_exit_epoch; + Ok(state.earliest_exit_epoch) + } + BeaconState::Fulu(_) => { + let state = self.as_fulu_mut()?; - Ok(state.earliest_exit_epoch) + // Consume the balance and update state variables + state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; + state.earliest_exit_epoch = earliest_exit_epoch; + Ok(state.earliest_exit_epoch) + } + } } pub fn compute_consolidation_epoch_and_update_churn( @@ -2277,13 +2340,31 @@ impl BeaconState { consolidation_balance_to_consume .safe_add_assign(additional_epochs.safe_mul(per_epoch_consolidation_churn)?)?; } - // Consume the balance and update state variables - let state = self.as_electra_mut()?; - state.consolidation_balance_to_consume = - consolidation_balance_to_consume.safe_sub(consolidation_balance)?; - state.earliest_consolidation_epoch = earliest_consolidation_epoch; - - Ok(state.earliest_consolidation_epoch) + match self { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Bellatrix(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + BeaconState::Electra(_) => { + let state = self.as_electra_mut()?; + + // Consume the balance and update state variables. + state.consolidation_balance_to_consume = + consolidation_balance_to_consume.safe_sub(consolidation_balance)?; + state.earliest_consolidation_epoch = earliest_consolidation_epoch; + Ok(state.earliest_consolidation_epoch) + } + BeaconState::Fulu(_) => { + let state = self.as_fulu_mut()?; + + // Consume the balance and update state variables. + state.consolidation_balance_to_consume = + consolidation_balance_to_consume.safe_sub(consolidation_balance)?; + state.earliest_consolidation_epoch = earliest_consolidation_epoch; + Ok(state.earliest_consolidation_epoch) + } + } } #[allow(clippy::arithmetic_side_effects)] @@ -2339,6 +2420,14 @@ impl BeaconState { ); } (Self::Electra(_), _) => (), + (Self::Fulu(self_inner), Self::Fulu(base_inner)) => { + bimap_beacon_state_fulu_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Fulu(_), _) => (), } // Use sync committees from `base` if they are equal. @@ -2411,6 +2500,7 @@ impl BeaconState { ForkName::Capella => BeaconStateCapella::::NUM_FIELDS.next_power_of_two(), ForkName::Deneb => BeaconStateDeneb::::NUM_FIELDS.next_power_of_two(), ForkName::Electra => BeaconStateElectra::::NUM_FIELDS.next_power_of_two(), + ForkName::Fulu => BeaconStateFulu::::NUM_FIELDS.next_power_of_two(), } } @@ -2459,6 +2549,9 @@ impl BeaconState { Self::Electra(inner) => { map_beacon_state_electra_tree_list_fields!(inner, |_, x| { x.apply_updates() }) } + Self::Fulu(inner) => { + map_beacon_state_fulu_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } } Ok(()) } @@ -2554,6 +2647,11 @@ impl BeaconState { leaves.push(field.tree_hash_root()); }); } + BeaconState::Fulu(state) => { + map_beacon_state_fulu_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } }; leaves @@ -2611,6 +2709,7 @@ impl CompareFields for BeaconState { (BeaconState::Capella(x), BeaconState::Capella(y)) => x.compare_fields(y), (BeaconState::Deneb(x), BeaconState::Deneb(y)) => x.compare_fields(y), (BeaconState::Electra(x), BeaconState::Electra(y)) => x.compare_fields(y), + (BeaconState::Fulu(x), BeaconState::Fulu(y)) => x.compare_fields(y), _ => panic!("compare_fields: mismatched state variants",), } } diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index bc258ef68de..8e8a1a6aa94 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -285,12 +285,5 @@ impl ProgressiveBalancesCache { /// `ProgressiveBalancesCache` is only enabled from `Altair` as it uses Altair-specific logic. pub fn is_progressive_balances_enabled(state: &BeaconState) -> bool { - match state { - BeaconState::Base(_) => false, - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => true, - } + state.fork_name_unchecked().altair_enabled() } diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 9885f78474f..2ce46ca704b 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,8 +1,9 @@ use crate::beacon_block_body::KzgCommitments; use crate::{ ChainSpec, EthSpec, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, - ExecutionPayloadHeaderRefMut, ForkName, ForkVersionDeserialize, SignedRoot, Uint256, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName, ForkVersionDeserialize, + SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; @@ -11,7 +12,7 @@ use superstruct::superstruct; use tree_hash_derive::TreeHash; #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone), serde(bound = "E: EthSpec", deny_unknown_fields) @@ -31,7 +32,9 @@ pub struct BuilderBid { pub header: ExecutionPayloadHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "header_electra"))] pub header: ExecutionPayloadHeaderElectra, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Fulu), partial_getter(rename = "header_fulu"))] + pub header: ExecutionPayloadHeaderFulu, + #[superstruct(only(Deneb, Electra, Fulu))] pub blob_kzg_commitments: KzgCommitments, #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, @@ -85,6 +88,7 @@ impl ForkVersionDeserialize for BuilderBid { ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Fulu => Self::Fulu(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "BuilderBid failed to deserialize: unsupported fork '{}'", diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 9d3308cf234..f0bfeba680b 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -192,6 +192,14 @@ pub struct ChainSpec { pub min_per_epoch_churn_limit_electra: u64, pub max_per_epoch_activation_exit_churn_limit: u64, + /* + * Fulu hard fork params + */ + pub fulu_fork_version: [u8; 4], + /// The Fulu fork epoch is optional, with `None` representing "Fulu never happens". + pub fulu_fork_epoch: Option, + pub fulu_placeholder: u64, + /* * DAS params */ @@ -313,17 +321,20 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.electra_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Electra, - _ => match self.deneb_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Deneb, - _ => match self.capella_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, - _ => match self.bellatrix_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Bellatrix, - _ => match self.altair_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, - _ => ForkName::Base, + match self.fulu_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Fulu, + _ => match self.electra_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Electra, + _ => match self.deneb_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Deneb, + _ => match self.capella_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, + _ => match self.bellatrix_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Bellatrix, + _ => match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + }, }, }, }, @@ -340,6 +351,7 @@ impl ChainSpec { ForkName::Capella => self.capella_fork_version, ForkName::Deneb => self.deneb_fork_version, ForkName::Electra => self.electra_fork_version, + ForkName::Fulu => self.fulu_fork_version, } } @@ -352,6 +364,7 @@ impl ChainSpec { ForkName::Capella => self.capella_fork_epoch, ForkName::Deneb => self.deneb_fork_epoch, ForkName::Electra => self.electra_fork_epoch, + ForkName::Fulu => self.fulu_fork_epoch, } } @@ -802,6 +815,13 @@ impl ChainSpec { }) .expect("calculation does not overflow"), + /* + * Fulu hard fork params + */ + fulu_fork_version: [0x06, 0x00, 0x00, 0x00], + fulu_fork_epoch: None, + fulu_placeholder: 0, + /* * DAS params */ @@ -917,6 +937,9 @@ impl ChainSpec { u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), + // Fulu + fulu_fork_version: [0x06, 0x00, 0x00, 0x01], + fulu_fork_epoch: None, // PeerDAS eip7594_fork_epoch: None, // Other @@ -1121,6 +1144,13 @@ impl ChainSpec { }) .expect("calculation does not overflow"), + /* + * Fulu hard fork params + */ + fulu_fork_version: [0x06, 0x00, 0x00, 0x64], + fulu_fork_epoch: None, + fulu_placeholder: 0, + /* * DAS params */ @@ -1255,6 +1285,14 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub electra_fork_epoch: Option>, + #[serde(default = "default_fulu_fork_version")] + #[serde(with = "serde_utils::bytes_4_hex")] + fulu_fork_version: [u8; 4], + #[serde(default)] + #[serde(serialize_with = "serialize_fork_epoch")] + #[serde(deserialize_with = "deserialize_fork_epoch")] + pub fulu_fork_epoch: Option>, + #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] @@ -1392,6 +1430,11 @@ fn default_electra_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } +fn default_fulu_fork_version() -> [u8; 4] { + // This value shouldn't be used. + [0xff, 0xff, 0xff, 0xff] +} + /// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912). /// /// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 @@ -1655,6 +1698,11 @@ impl Config { .electra_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), + fulu_fork_version: spec.fulu_fork_version, + fulu_fork_epoch: spec + .fulu_fork_epoch + .map(|epoch| MaybeQuoted { value: epoch }), + eip7594_fork_epoch: spec .eip7594_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), @@ -1738,6 +1786,8 @@ impl Config { deneb_fork_version, electra_fork_epoch, electra_fork_version, + fulu_fork_epoch, + fulu_fork_version, eip7594_fork_epoch, seconds_per_slot, seconds_per_eth1_block, @@ -1801,6 +1851,8 @@ impl Config { deneb_fork_version, electra_fork_epoch: electra_fork_epoch.map(|q| q.value), electra_fork_version, + fulu_fork_epoch: fulu_fork_epoch.map(|q| q.value), + fulu_fork_version, eip7594_fork_epoch: eip7594_fork_epoch.map(|q| q.value), seconds_per_slot, seconds_per_eth1_block, diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index c80d678b2a3..235bf202382 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,6 +1,6 @@ use crate::{ consts::altair, consts::deneb, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, - ChainSpec, Config, DenebPreset, ElectraPreset, EthSpec, ForkName, + ChainSpec, Config, DenebPreset, ElectraPreset, EthSpec, ForkName, FuluPreset, }; use maplit::hashmap; use serde::{Deserialize, Serialize}; @@ -12,7 +12,7 @@ use superstruct::superstruct; /// /// Mostly useful for the API. #[superstruct( - variants(Capella, Deneb, Electra), + variants(Deneb, Electra, Fulu), variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) )] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] @@ -29,12 +29,14 @@ pub struct ConfigAndPreset { pub bellatrix_preset: BellatrixPreset, #[serde(flatten)] pub capella_preset: CapellaPreset, - #[superstruct(only(Deneb, Electra))] #[serde(flatten)] pub deneb_preset: DenebPreset, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] #[serde(flatten)] pub electra_preset: ElectraPreset, + #[superstruct(only(Fulu))] + #[serde(flatten)] + pub fulu_preset: FuluPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap, @@ -48,16 +50,17 @@ impl ConfigAndPreset { let altair_preset = AltairPreset::from_chain_spec::(spec); let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let capella_preset = CapellaPreset::from_chain_spec::(spec); + let deneb_preset = DenebPreset::from_chain_spec::(spec); let extra_fields = get_extra_fields(spec); - if spec.electra_fork_epoch.is_some() + if spec.fulu_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Electra) + || fork_name == Some(ForkName::Fulu) { - let deneb_preset = DenebPreset::from_chain_spec::(spec); let electra_preset = ElectraPreset::from_chain_spec::(spec); + let fulu_preset = FuluPreset::from_chain_spec::(spec); - ConfigAndPreset::Electra(ConfigAndPresetElectra { + ConfigAndPreset::Fulu(ConfigAndPresetFulu { config, base_preset, altair_preset, @@ -65,29 +68,33 @@ impl ConfigAndPreset { capella_preset, deneb_preset, electra_preset, + fulu_preset, extra_fields, }) - } else if spec.deneb_fork_epoch.is_some() + } else if spec.electra_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Deneb) + || fork_name == Some(ForkName::Electra) { - let deneb_preset = DenebPreset::from_chain_spec::(spec); - ConfigAndPreset::Deneb(ConfigAndPresetDeneb { + let electra_preset = ElectraPreset::from_chain_spec::(spec); + + ConfigAndPreset::Electra(ConfigAndPresetElectra { config, base_preset, altair_preset, bellatrix_preset, capella_preset, deneb_preset, + electra_preset, extra_fields, }) } else { - ConfigAndPreset::Capella(ConfigAndPresetCapella { + ConfigAndPreset::Deneb(ConfigAndPresetDeneb { config, base_preset, altair_preset, bellatrix_preset, capella_preset, + deneb_preset, extra_fields, }) } @@ -164,8 +171,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPresetElectra = + let from: ConfigAndPresetFulu = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(ConfigAndPreset::Electra(from), yamlconfig); + assert_eq!(ConfigAndPreset::Fulu(from), yamlconfig); } } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 9f16b676a6a..c619d614871 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -15,7 +15,7 @@ pub type Transactions = VariableList< pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Default, @@ -82,12 +82,12 @@ pub struct ExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub withdrawals: Withdrawals, - #[superstruct(only(Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub blob_gas_used: u64, - #[superstruct(only(Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, } @@ -114,6 +114,7 @@ impl ExecutionPayload { ForkName::Capella => ExecutionPayloadCapella::from_ssz_bytes(bytes).map(Self::Capella), ForkName::Deneb => ExecutionPayloadDeneb::from_ssz_bytes(bytes).map(Self::Deneb), ForkName::Electra => ExecutionPayloadElectra::from_ssz_bytes(bytes).map(Self::Electra), + ForkName::Fulu => ExecutionPayloadFulu::from_ssz_bytes(bytes).map(Self::Fulu), } } @@ -166,6 +167,19 @@ impl ExecutionPayload { // Max size of variable length `withdrawals` field + (E::max_withdrawals_per_payload() * ::ssz_fixed_len()) } + + #[allow(clippy::arithmetic_side_effects)] + /// Returns the maximum size of an execution payload. + pub fn max_execution_payload_fulu_size() -> usize { + // Fixed part + ExecutionPayloadFulu::::default().as_ssz_bytes().len() + // Max size of variable length `extra_data` field + + (E::max_extra_data_bytes() * ::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (E::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + E::max_bytes_per_transaction())) + // Max size of variable length `withdrawals` field + + (E::max_withdrawals_per_payload() * ::ssz_fixed_len()) + } } impl ForkVersionDeserialize for ExecutionPayload { @@ -184,6 +198,7 @@ impl ForkVersionDeserialize for ExecutionPayload { ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Fulu => Self::Fulu(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "ExecutionPayload failed to deserialize: unsupported fork '{}'", @@ -201,6 +216,7 @@ impl ExecutionPayload { ExecutionPayload::Capella(_) => ForkName::Capella, ExecutionPayload::Deneb(_) => ForkName::Deneb, ExecutionPayload::Electra(_) => ForkName::Electra, + ExecutionPayload::Fulu(_) => ForkName::Fulu, } } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 4bfbfee9bf0..3012041b8b1 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -8,7 +8,7 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Default, @@ -78,12 +78,12 @@ pub struct ExecutionPayloadHeader { pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] pub transactions_root: Hash256, - #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Capella, Deneb, Electra, Fulu), partial_getter(copy))] pub withdrawals_root: Hash256, - #[superstruct(only(Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub blob_gas_used: u64, - #[superstruct(only(Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, } @@ -108,18 +108,18 @@ impl ExecutionPayloadHeader { ForkName::Electra => { ExecutionPayloadHeaderElectra::from_ssz_bytes(bytes).map(Self::Electra) } + ForkName::Fulu => ExecutionPayloadHeaderFulu::from_ssz_bytes(bytes).map(Self::Fulu), } } #[allow(clippy::arithmetic_side_effects)] pub fn ssz_max_var_len_for_fork(fork_name: ForkName) -> usize { - // Matching here in case variable fields are added in future forks. - match fork_name { - ForkName::Base | ForkName::Altair => 0, - ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { - // Max size of variable length `extra_data` field - E::max_extra_data_bytes() * ::ssz_fixed_len() - } + // TODO(newfork): Add a new case here if there are new variable fields + if fork_name.bellatrix_enabled() { + // Max size of variable length `extra_data` field + E::max_extra_data_bytes() * ::ssz_fixed_len() + } else { + 0 } } @@ -129,6 +129,7 @@ impl ExecutionPayloadHeader { ExecutionPayloadHeader::Capella(_) => ForkName::Capella, ExecutionPayloadHeader::Deneb(_) => ForkName::Deneb, ExecutionPayloadHeader::Electra(_) => ForkName::Electra, + ExecutionPayloadHeader::Fulu(_) => ForkName::Fulu, } } } @@ -212,6 +213,30 @@ impl ExecutionPayloadHeaderDeneb { } } +impl ExecutionPayloadHeaderElectra { + pub fn upgrade_to_fulu(&self) -> ExecutionPayloadHeaderFulu { + ExecutionPayloadHeaderFulu { + parent_hash: self.parent_hash, + fee_recipient: self.fee_recipient, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: self.logs_bloom.clone(), + prev_randao: self.prev_randao, + block_number: self.block_number, + gas_limit: self.gas_limit, + gas_used: self.gas_used, + timestamp: self.timestamp, + extra_data: self.extra_data.clone(), + base_fee_per_gas: self.base_fee_per_gas, + block_hash: self.block_hash, + transactions_root: self.transactions_root, + withdrawals_root: self.withdrawals_root, + blob_gas_used: self.blob_gas_used, + excess_blob_gas: self.excess_blob_gas, + } + } +} + impl<'a, E: EthSpec> From<&'a ExecutionPayloadBellatrix> for ExecutionPayloadHeaderBellatrix { fn from(payload: &'a ExecutionPayloadBellatrix) -> Self { Self { @@ -303,6 +328,30 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadElectra> for ExecutionPayloadHe } } +impl<'a, E: EthSpec> From<&'a ExecutionPayloadFulu> for ExecutionPayloadHeaderFulu { + fn from(payload: &'a ExecutionPayloadFulu) -> Self { + Self { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + withdrawals_root: payload.withdrawals.tree_hash_root(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} + // These impls are required to work around an inelegance in `to_execution_payload_header`. // They only clone headers so they should be relatively cheap. impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderBellatrix { @@ -329,6 +378,12 @@ impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderElectra { } } +impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderFulu { + fn from(payload: &'a Self) -> Self { + payload.clone() + } +} + impl<'a, E: EthSpec> From> for ExecutionPayloadHeader { fn from(payload: ExecutionPayloadRef<'a, E>) -> Self { map_execution_payload_ref_into_execution_payload_header!( @@ -387,6 +442,9 @@ impl ExecutionPayloadHeaderRefMut<'_, E> { ExecutionPayloadHeaderRefMut::Electra(mut_ref) => { *mut_ref = header.try_into()?; } + ExecutionPayloadHeaderRefMut::Fulu(mut_ref) => { + *mut_ref = header.try_into()?; + } } Ok(()) } @@ -404,6 +462,16 @@ impl TryFrom> for ExecutionPayloadHeaderEl } } +impl TryFrom> for ExecutionPayloadHeaderFulu { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Fulu(execution_payload_header) => Ok(execution_payload_header), + _ => Err(BeaconStateError::IncorrectStateVariant), + } + } +} + impl ForkVersionDeserialize for ExecutionPayloadHeader { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, @@ -423,6 +491,7 @@ impl ForkVersionDeserialize for ExecutionPayloadHeader { ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Fulu => Self::Fulu(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 0f7f0eb769e..33f1c51d446 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -69,6 +69,13 @@ impl ForkContext { )); } + if spec.fulu_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Fulu, + ChainSpec::compute_fork_digest(spec.fulu_fork_version, genesis_validators_root), + )); + } + let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); let digest_to_fork = fork_to_digest diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 51a5b3813ba..b61e0a4d4a5 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -17,6 +17,7 @@ pub enum ForkName { Capella, Deneb, Electra, + Fulu, } impl ForkName { @@ -28,6 +29,7 @@ impl ForkName { ForkName::Capella, ForkName::Deneb, ForkName::Electra, + ForkName::Fulu, ] } @@ -38,6 +40,7 @@ impl ForkName { (ForkName::Capella, spec.capella_fork_epoch), (ForkName::Deneb, spec.deneb_fork_epoch), (ForkName::Electra, spec.electra_fork_epoch), + (ForkName::Fulu, spec.fulu_fork_epoch), ] } @@ -57,6 +60,7 @@ impl ForkName { spec.capella_fork_epoch = None; spec.deneb_fork_epoch = None; spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; spec } ForkName::Altair => { @@ -65,6 +69,7 @@ impl ForkName { spec.capella_fork_epoch = None; spec.deneb_fork_epoch = None; spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; spec } ForkName::Bellatrix => { @@ -73,6 +78,7 @@ impl ForkName { spec.capella_fork_epoch = None; spec.deneb_fork_epoch = None; spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; spec } ForkName::Capella => { @@ -81,6 +87,7 @@ impl ForkName { spec.capella_fork_epoch = Some(Epoch::new(0)); spec.deneb_fork_epoch = None; spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; spec } ForkName::Deneb => { @@ -89,6 +96,7 @@ impl ForkName { spec.capella_fork_epoch = Some(Epoch::new(0)); spec.deneb_fork_epoch = Some(Epoch::new(0)); spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; spec } ForkName::Electra => { @@ -97,6 +105,16 @@ impl ForkName { spec.capella_fork_epoch = Some(Epoch::new(0)); spec.deneb_fork_epoch = Some(Epoch::new(0)); spec.electra_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = None; + spec + } + ForkName::Fulu => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); + spec.electra_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = Some(Epoch::new(0)); spec } } @@ -113,6 +131,7 @@ impl ForkName { ForkName::Capella => Some(ForkName::Bellatrix), ForkName::Deneb => Some(ForkName::Capella), ForkName::Electra => Some(ForkName::Deneb), + ForkName::Fulu => Some(ForkName::Electra), } } @@ -126,7 +145,8 @@ impl ForkName { ForkName::Bellatrix => Some(ForkName::Capella), ForkName::Capella => Some(ForkName::Deneb), ForkName::Deneb => Some(ForkName::Electra), - ForkName::Electra => None, + ForkName::Electra => Some(ForkName::Fulu), + ForkName::Fulu => None, } } @@ -149,6 +169,10 @@ impl ForkName { pub fn electra_enabled(self) -> bool { self >= ForkName::Electra } + + pub fn fulu_enabled(self) -> bool { + self >= ForkName::Fulu + } } /// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. @@ -200,6 +224,10 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Electra(value), extra_data) } + ForkName::Fulu => { + let (value, extra_data) = $body; + ($t::Fulu(value), extra_data) + } } }; } @@ -215,6 +243,7 @@ impl FromStr for ForkName { "capella" => ForkName::Capella, "deneb" => ForkName::Deneb, "electra" => ForkName::Electra, + "fulu" => ForkName::Fulu, _ => return Err(format!("unknown fork name: {}", fork_name)), }) } @@ -229,6 +258,7 @@ impl Display for ForkName { ForkName::Capella => "capella".fmt(f), ForkName::Deneb => "deneb".fmt(f), ForkName::Electra => "electra".fmt(f), + ForkName::Fulu => "fulu".fmt(f), } } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index dd304c6296c..282f27a5179 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -126,13 +126,13 @@ pub use crate::attester_slashing::{ }; pub use crate::beacon_block::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, - BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, - BlockImportSource, EmptyBlock, + BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockRef, BeaconBlockRefMut, + BlindedBeaconBlock, BlockImportSource, EmptyBlock, }; pub use crate::beacon_block_body::{ BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, - BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, + BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; @@ -142,7 +142,7 @@ pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetCapella, ConfigAndPresetDeneb, ConfigAndPresetElectra, + ConfigAndPreset, ConfigAndPresetDeneb, ConfigAndPresetElectra, ConfigAndPresetFulu, }; pub use crate::consolidation_request::ConsolidationRequest; pub use crate::contribution_and_proof::ContributionAndProof; @@ -163,12 +163,13 @@ pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; pub use crate::execution_payload::{ ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionPayloadRef, Transaction, Transactions, Withdrawals, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadRef, Transaction, Transactions, + Withdrawals, }; pub use crate::execution_payload_header::{ ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, - ExecutionPayloadHeaderRefMut, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; pub use crate::execution_requests::{ExecutionRequests, RequestPrefix}; pub use crate::fork::Fork; @@ -183,31 +184,33 @@ pub use crate::indexed_attestation::{ }; pub use crate::light_client_bootstrap::{ LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, - LightClientBootstrapDeneb, LightClientBootstrapElectra, + LightClientBootstrapDeneb, LightClientBootstrapElectra, LightClientBootstrapFulu, }; pub use crate::light_client_finality_update::{ LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, + LightClientFinalityUpdateFulu, }; pub use crate::light_client_header::{ LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, + LightClientHeaderElectra, LightClientHeaderFulu, }; pub use crate::light_client_optimistic_update::{ LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, - LightClientOptimisticUpdateElectra, + LightClientOptimisticUpdateElectra, LightClientOptimisticUpdateFulu, }; pub use crate::light_client_update::{ Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, MerkleProof, + LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, + LightClientUpdateFulu, MerkleProof, }; pub use crate::participation_flags::ParticipationFlags; pub use crate::payload::{ AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, - BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadRef, BlockType, ExecPayload, - FullPayload, FullPayloadBellatrix, FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, - FullPayloadRef, OwnedExecPayload, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadRef, BlockType, + ExecPayload, FullPayload, FullPayloadBellatrix, FullPayloadCapella, FullPayloadDeneb, + FullPayloadElectra, FullPayloadFulu, FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; pub use crate::pending_consolidation::PendingConsolidation; @@ -215,6 +218,7 @@ pub use crate::pending_deposit::PendingDeposit; pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; pub use crate::preset::{ AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, + FuluPreset, }; pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; @@ -229,7 +233,7 @@ pub use crate::signed_beacon_block::{ ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockHash, SignedBlindedBeaconBlock, + SignedBeaconBlockFulu, SignedBeaconBlockHash, SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 21a7e5416f2..aa0d8836d14 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -2,7 +2,7 @@ use crate::{ light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec, EthSpec, FixedVector, ForkName, ForkVersionDeserialize, Hash256, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderElectra, - SignedBlindedBeaconBlock, Slot, SyncCommittee, + LightClientHeaderFulu, SignedBlindedBeaconBlock, Slot, SyncCommittee, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A LightClientBootstrap is the initializer we send over to light_client nodes /// that are trying to generate their basic storage when booting up. #[superstruct( - variants(Altair, Capella, Deneb, Electra), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -54,6 +54,8 @@ pub struct LightClientBootstrap { pub header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "header_electra"))] pub header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "header_fulu"))] + pub header: LightClientHeaderFulu, /// The `SyncCommittee` used in the requested period. pub current_sync_committee: Arc>, /// Merkle proof for sync committee @@ -63,7 +65,7 @@ pub struct LightClientBootstrap { )] pub current_sync_committee_branch: FixedVector, #[superstruct( - only(Electra), + only(Electra, Fulu), partial_getter(rename = "current_sync_committee_branch_electra") )] pub current_sync_committee_branch: FixedVector, @@ -79,6 +81,7 @@ impl LightClientBootstrap { Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), + Self::Fulu(_) => func(ForkName::Fulu), } } @@ -97,6 +100,7 @@ impl LightClientBootstrap { ForkName::Capella => Self::Capella(LightClientBootstrapCapella::from_ssz_bytes(bytes)?), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb::from_ssz_bytes(bytes)?), ForkName::Electra => Self::Electra(LightClientBootstrapElectra::from_ssz_bytes(bytes)?), + ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu::from_ssz_bytes(bytes)?), ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientBootstrap decoding for {fork_name} not implemented" @@ -117,6 +121,7 @@ impl LightClientBootstrap { ForkName::Capella => as Encode>::ssz_fixed_len(), ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), + ForkName::Fulu => as Encode>::ssz_fixed_len(), }; fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } @@ -152,6 +157,11 @@ impl LightClientBootstrap { current_sync_committee, current_sync_committee_branch: current_sync_committee_branch.into(), }), + ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { + header: LightClientHeaderFulu::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch: current_sync_committee_branch.into(), + }), }; Ok(light_client_bootstrap) @@ -192,6 +202,11 @@ impl LightClientBootstrap { current_sync_committee, current_sync_committee_branch: current_sync_committee_branch.into(), }), + ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { + header: LightClientHeaderFulu::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch: current_sync_committee_branch.into(), + }), }; Ok(light_client_bootstrap) @@ -241,4 +256,10 @@ mod tests { use crate::{LightClientBootstrapElectra, MainnetEthSpec}; ssz_tests!(LightClientBootstrapElectra); } + + #[cfg(test)] + mod fulu { + use crate::{LightClientBootstrapFulu, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapFulu); + } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index ba2f2083cd9..ee3b53c853e 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -3,7 +3,7 @@ use crate::ChainSpec; use crate::{ light_client_update::*, test_utils::TestRandom, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, SignedBlindedBeaconBlock, + LightClientHeaderElectra, LightClientHeaderFulu, SignedBlindedBeaconBlock, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -16,7 +16,7 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; #[superstruct( - variants(Altair, Capella, Deneb, Electra), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -53,6 +53,8 @@ pub struct LightClientFinalityUpdate { pub attested_header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "attested_header_electra"))] pub attested_header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "attested_header_fulu"))] + pub attested_header: LightClientHeaderFulu, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -62,13 +64,18 @@ pub struct LightClientFinalityUpdate { pub finalized_header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "finalized_header_fulu"))] + pub finalized_header: LightClientHeaderFulu, /// Merkle proof attesting finalized header. #[superstruct( only(Altair, Capella, Deneb), partial_getter(rename = "finality_branch_altair") )] pub finality_branch: FixedVector, - #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + #[superstruct( + only(Electra, Fulu), + partial_getter(rename = "finality_branch_electra") + )] pub finality_branch: FixedVector, /// current sync aggregate pub sync_aggregate: SyncAggregate, @@ -135,6 +142,17 @@ impl LightClientFinalityUpdate { sync_aggregate, signature_slot, }), + ForkName::Fulu => Self::Fulu(LightClientFinalityUpdateFulu { + attested_header: LightClientHeaderFulu::block_to_light_client_header( + attested_block, + )?, + finalized_header: LightClientHeaderFulu::block_to_light_client_header( + finalized_block, + )?, + finality_branch: finality_branch.into(), + sync_aggregate, + signature_slot, + }), ForkName::Base => return Err(Error::AltairForkNotActive), }; @@ -151,6 +169,7 @@ impl LightClientFinalityUpdate { Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), + Self::Fulu(_) => func(ForkName::Fulu), } } @@ -173,6 +192,7 @@ impl LightClientFinalityUpdate { ForkName::Electra => { Self::Electra(LightClientFinalityUpdateElectra::from_ssz_bytes(bytes)?) } + ForkName::Fulu => Self::Fulu(LightClientFinalityUpdateFulu::from_ssz_bytes(bytes)?), ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientFinalityUpdate decoding for {fork_name} not implemented" @@ -193,6 +213,7 @@ impl LightClientFinalityUpdate { ForkName::Capella => as Encode>::ssz_fixed_len(), ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), + ForkName::Fulu => as Encode>::ssz_fixed_len(), }; // `2 *` because there are two headers in the update fixed_size + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) @@ -255,4 +276,10 @@ mod tests { use crate::{LightClientFinalityUpdateElectra, MainnetEthSpec}; ssz_tests!(LightClientFinalityUpdateElectra); } + + #[cfg(test)] + mod fulu { + use crate::{LightClientFinalityUpdateFulu, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateFulu); + } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 6655e0a093b..0be26a70360 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -4,7 +4,8 @@ use crate::ForkVersionDeserialize; use crate::{light_client_update::*, BeaconBlockBody}; use crate::{ test_utils::TestRandom, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, FixedVector, Hash256, SignedBlindedBeaconBlock, + ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, FixedVector, Hash256, + SignedBlindedBeaconBlock, }; use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; use derivative::Derivative; @@ -17,7 +18,7 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; #[superstruct( - variants(Altair, Capella, Deneb, Electra), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -59,8 +60,10 @@ pub struct LightClientHeader { partial_getter(rename = "execution_payload_header_electra") )] pub execution: ExecutionPayloadHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_header_fulu"))] + pub execution: ExecutionPayloadHeaderFulu, - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub execution_branch: FixedVector, #[ssz(skip_serializing, skip_deserializing)] @@ -92,6 +95,9 @@ impl LightClientHeader { ForkName::Electra => LightClientHeader::Electra( LightClientHeaderElectra::block_to_light_client_header(block)?, ), + ForkName::Fulu => { + LightClientHeader::Fulu(LightClientHeaderFulu::block_to_light_client_header(block)?) + } }; Ok(header) } @@ -110,6 +116,9 @@ impl LightClientHeader { ForkName::Electra => { LightClientHeader::Electra(LightClientHeaderElectra::from_ssz_bytes(bytes)?) } + ForkName::Fulu => { + LightClientHeader::Fulu(LightClientHeaderFulu::from_ssz_bytes(bytes)?) + } ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientHeader decoding for {fork_name} not implemented" @@ -283,6 +292,48 @@ impl Default for LightClientHeaderElectra { } } +impl LightClientHeaderFulu { + pub fn block_to_light_client_header( + block: &SignedBlindedBeaconBlock, + ) -> Result { + let payload = block + .message() + .execution_payload()? + .execution_payload_fulu()?; + + let header = ExecutionPayloadHeaderFulu::from(payload); + let beacon_block_body = BeaconBlockBody::from( + block + .message() + .body_fulu() + .map_err(|_| Error::BeaconBlockBodyError)? + .to_owned(), + ); + + let execution_branch = beacon_block_body + .to_ref() + .block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; + + Ok(LightClientHeaderFulu { + beacon: block.message().block_header(), + execution: header, + execution_branch: FixedVector::new(execution_branch)?, + _phantom_data: PhantomData, + }) + } +} + +impl Default for LightClientHeaderFulu { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderFulu::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl ForkVersionDeserialize for LightClientHeader { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, @@ -301,6 +352,9 @@ impl ForkVersionDeserialize for LightClientHeader { ForkName::Electra => serde_json::from_value(value) .map(|light_client_header| Self::Electra(light_client_header)) .map_err(serde::de::Error::custom), + ForkName::Fulu => serde_json::from_value(value) + .map(|light_client_header| Self::Fulu(light_client_header)) + .map_err(serde::de::Error::custom), ForkName::Base => Err(serde::de::Error::custom(format!( "LightClientHeader deserialization for {fork_name} not implemented" ))), diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 209388af87b..fcf357757b1 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -2,7 +2,8 @@ use super::{EthSpec, ForkName, ForkVersionDeserialize, LightClientHeader, Slot, use crate::test_utils::TestRandom; use crate::{ light_client_update::*, ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, LightClientHeaderElectra, SignedBlindedBeaconBlock, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + SignedBlindedBeaconBlock, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -18,7 +19,7 @@ use tree_hash_derive::TreeHash; /// A LightClientOptimisticUpdate is the update we send on each slot, /// it is based off the current unfinalized epoch is verified only against BLS signature. #[superstruct( - variants(Altair, Capella, Deneb, Electra), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -55,6 +56,8 @@ pub struct LightClientOptimisticUpdate { pub attested_header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "attested_header_electra"))] pub attested_header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "attested_header_fulu"))] + pub attested_header: LightClientHeaderFulu, /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -102,6 +105,13 @@ impl LightClientOptimisticUpdate { sync_aggregate, signature_slot, }), + ForkName::Fulu => Self::Fulu(LightClientOptimisticUpdateFulu { + attested_header: LightClientHeaderFulu::block_to_light_client_header( + attested_block, + )?, + sync_aggregate, + signature_slot, + }), ForkName::Base => return Err(Error::AltairForkNotActive), }; @@ -117,6 +127,7 @@ impl LightClientOptimisticUpdate { Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), + Self::Fulu(_) => func(ForkName::Fulu), } } @@ -155,6 +166,7 @@ impl LightClientOptimisticUpdate { ForkName::Electra => { Self::Electra(LightClientOptimisticUpdateElectra::from_ssz_bytes(bytes)?) } + ForkName::Fulu => Self::Fulu(LightClientOptimisticUpdateFulu::from_ssz_bytes(bytes)?), ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientOptimisticUpdate decoding for {fork_name} not implemented" @@ -175,6 +187,7 @@ impl LightClientOptimisticUpdate { ForkName::Capella => as Encode>::ssz_fixed_len(), ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), + ForkName::Fulu => as Encode>::ssz_fixed_len(), }; fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } @@ -238,4 +251,10 @@ mod tests { use crate::{LightClientOptimisticUpdateElectra, MainnetEthSpec}; ssz_tests!(LightClientOptimisticUpdateElectra); } + + #[cfg(test)] + mod fulu { + use crate::{LightClientOptimisticUpdateFulu, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateFulu); + } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index c3a50e71c15..0dd91edc3c4 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -4,7 +4,7 @@ use crate::LightClientHeader; use crate::{ beacon_state, test_utils::TestRandom, ChainSpec, Epoch, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - SignedBlindedBeaconBlock, + LightClientHeaderFulu, SignedBlindedBeaconBlock, }; use derivative::Derivative; use safe_arith::ArithError; @@ -100,7 +100,7 @@ impl From for Error { /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. #[superstruct( - variants(Altair, Capella, Deneb, Electra), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -137,6 +137,8 @@ pub struct LightClientUpdate { pub attested_header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "attested_header_electra"))] pub attested_header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "attested_header_fulu"))] + pub attested_header: LightClientHeaderFulu, /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, // Merkle proof for next sync committee @@ -146,7 +148,7 @@ pub struct LightClientUpdate { )] pub next_sync_committee_branch: NextSyncCommitteeBranch, #[superstruct( - only(Electra), + only(Electra, Fulu), partial_getter(rename = "next_sync_committee_branch_electra") )] pub next_sync_committee_branch: NextSyncCommitteeBranchElectra, @@ -159,13 +161,18 @@ pub struct LightClientUpdate { pub finalized_header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "finalized_header_fulu"))] + pub finalized_header: LightClientHeaderFulu, /// Merkle proof attesting finalized header. #[superstruct( only(Altair, Capella, Deneb), partial_getter(rename = "finality_branch_altair") )] pub finality_branch: FinalityBranch, - #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + #[superstruct( + only(Electra, Fulu), + partial_getter(rename = "finality_branch_electra") + )] pub finality_branch: FinalityBranchElectra, /// current sync aggreggate pub sync_aggregate: SyncAggregate, @@ -285,6 +292,26 @@ impl LightClientUpdate { sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) + } + ForkName::Fulu => { + let attested_header = + LightClientHeaderFulu::block_to_light_client_header(attested_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderFulu::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderFulu::default() + }; + + Self::Fulu(LightClientUpdateFulu { + attested_header, + next_sync_committee, + next_sync_committee_branch: next_sync_committee_branch.into(), + finalized_header, + finality_branch: finality_branch.into(), + sync_aggregate: sync_aggregate.clone(), + signature_slot: block_slot, + }) } // To add a new fork, just append the new fork variant on the latest fork. Forks that // have a distinct execution header will need a new LightClientUpdate variant only // if you need to test or support lightclient usages @@ -301,6 +328,7 @@ impl LightClientUpdate { ForkName::Capella => Self::Capella(LightClientUpdateCapella::from_ssz_bytes(bytes)?), ForkName::Deneb => Self::Deneb(LightClientUpdateDeneb::from_ssz_bytes(bytes)?), ForkName::Electra => Self::Electra(LightClientUpdateElectra::from_ssz_bytes(bytes)?), + ForkName::Fulu => Self::Fulu(LightClientUpdateFulu::from_ssz_bytes(bytes)?), ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientUpdate decoding for {fork_name} not implemented" @@ -317,6 +345,7 @@ impl LightClientUpdate { LightClientUpdate::Capella(update) => update.attested_header.beacon.slot, LightClientUpdate::Deneb(update) => update.attested_header.beacon.slot, LightClientUpdate::Electra(update) => update.attested_header.beacon.slot, + LightClientUpdate::Fulu(update) => update.attested_header.beacon.slot, } } @@ -326,6 +355,7 @@ impl LightClientUpdate { LightClientUpdate::Capella(update) => update.finalized_header.beacon.slot, LightClientUpdate::Deneb(update) => update.finalized_header.beacon.slot, LightClientUpdate::Electra(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Fulu(update) => update.finalized_header.beacon.slot, } } @@ -445,6 +475,7 @@ impl LightClientUpdate { ForkName::Capella => as Encode>::ssz_fixed_len(), ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), + ForkName::Fulu => as Encode>::ssz_fixed_len(), }; fixed_len + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } @@ -458,6 +489,7 @@ impl LightClientUpdate { Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), + Self::Fulu(_) => func(ForkName::Fulu), } } } @@ -513,6 +545,13 @@ mod tests { ssz_tests!(LightClientUpdateElectra); } + #[cfg(test)] + mod fulu { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateFulu); + } + #[test] fn finalized_root_params() { assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX); diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index e68801840af..abc9afd34c8 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -84,13 +84,15 @@ pub trait AbstractExecPayload: + TryInto + TryInto + TryInto + + TryInto { type Ref<'a>: ExecPayload + Copy + From<&'a Self::Bellatrix> + From<&'a Self::Capella> + From<&'a Self::Deneb> - + From<&'a Self::Electra>; + + From<&'a Self::Electra> + + From<&'a Self::Fulu>; type Bellatrix: OwnedExecPayload + Into @@ -108,10 +110,14 @@ pub trait AbstractExecPayload: + Into + for<'a> From>> + TryFrom>; + type Fulu: OwnedExecPayload + + Into + + for<'a> From>> + + TryFrom>; } #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -157,6 +163,8 @@ pub struct FullPayload { pub execution_payload: ExecutionPayloadDeneb, #[superstruct(only(Electra), partial_getter(rename = "execution_payload_electra"))] pub execution_payload: ExecutionPayloadElectra, + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] + pub execution_payload: ExecutionPayloadFulu, } impl From> for ExecutionPayload { @@ -273,6 +281,9 @@ impl ExecPayload for FullPayload { FullPayload::Electra(ref inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } + FullPayload::Fulu(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } } } @@ -283,6 +294,7 @@ impl ExecPayload for FullPayload { } FullPayload::Deneb(ref inner) => Ok(inner.execution_payload.blob_gas_used), FullPayload::Electra(ref inner) => Ok(inner.execution_payload.blob_gas_used), + FullPayload::Fulu(ref inner) => Ok(inner.execution_payload.blob_gas_used), } } @@ -313,6 +325,7 @@ impl FullPayload { ForkName::Capella => Ok(FullPayloadCapella::default().into()), ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), ForkName::Electra => Ok(FullPayloadElectra::default().into()), + ForkName::Fulu => Ok(FullPayloadFulu::default().into()), } } } @@ -412,6 +425,7 @@ impl ExecPayload for FullPayloadRef<'_, E> { FullPayloadRef::Electra(inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } + FullPayloadRef::Fulu(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), } } @@ -422,6 +436,7 @@ impl ExecPayload for FullPayloadRef<'_, E> { } FullPayloadRef::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayloadRef::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), + FullPayloadRef::Fulu(inner) => Ok(inner.execution_payload.blob_gas_used), } } @@ -444,6 +459,7 @@ impl AbstractExecPayload for FullPayload { type Capella = FullPayloadCapella; type Deneb = FullPayloadDeneb; type Electra = FullPayloadElectra; + type Fulu = FullPayloadFulu; } impl From> for FullPayload { @@ -462,7 +478,7 @@ impl TryFrom> for FullPayload { } #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -507,6 +523,8 @@ pub struct BlindedPayload { pub execution_payload_header: ExecutionPayloadHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "execution_payload_electra"))] pub execution_payload_header: ExecutionPayloadHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] + pub execution_payload_header: ExecutionPayloadHeaderFulu, } impl<'a, E: EthSpec> From> for BlindedPayload { @@ -599,6 +617,7 @@ impl ExecPayload for BlindedPayload { BlindedPayload::Electra(ref inner) => { Ok(inner.execution_payload_header.withdrawals_root) } + BlindedPayload::Fulu(ref inner) => Ok(inner.execution_payload_header.withdrawals_root), } } @@ -609,6 +628,7 @@ impl ExecPayload for BlindedPayload { } BlindedPayload::Deneb(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayload::Electra(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), + BlindedPayload::Fulu(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), } } @@ -707,6 +727,7 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { BlindedPayloadRef::Electra(inner) => { Ok(inner.execution_payload_header.withdrawals_root) } + BlindedPayloadRef::Fulu(inner) => Ok(inner.execution_payload_header.withdrawals_root), } } @@ -717,6 +738,7 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } BlindedPayloadRef::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayloadRef::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), + BlindedPayloadRef::Fulu(inner) => Ok(inner.execution_payload_header.blob_gas_used), } } @@ -1020,6 +1042,13 @@ impl_exec_payload_for_fork!( ExecutionPayloadElectra, Electra ); +impl_exec_payload_for_fork!( + BlindedPayloadFulu, + FullPayloadFulu, + ExecutionPayloadHeaderFulu, + ExecutionPayloadFulu, + Fulu +); impl AbstractExecPayload for BlindedPayload { type Ref<'a> = BlindedPayloadRef<'a, E>; @@ -1027,6 +1056,7 @@ impl AbstractExecPayload for BlindedPayload { type Capella = BlindedPayloadCapella; type Deneb = BlindedPayloadDeneb; type Electra = BlindedPayloadElectra; + type Fulu = BlindedPayloadFulu; } impl From> for BlindedPayload { @@ -1063,6 +1093,11 @@ impl From> for BlindedPayload { execution_payload_header, }) } + ExecutionPayloadHeader::Fulu(execution_payload_header) => { + Self::Fulu(BlindedPayloadFulu { + execution_payload_header, + }) + } } } } @@ -1082,6 +1117,9 @@ impl From> for ExecutionPayloadHeader { BlindedPayload::Electra(blinded_payload) => { ExecutionPayloadHeader::Electra(blinded_payload.execution_payload_header) } + BlindedPayload::Fulu(blinded_payload) => { + ExecutionPayloadHeader::Fulu(blinded_payload.execution_payload_header) + } } } } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index b469b7b777a..f8b36654093 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -276,6 +276,21 @@ impl ElectraPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct FuluPreset { + #[serde(with = "serde_utils::quoted_u64")] + pub fulu_placeholder: u64, +} + +impl FuluPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + fulu_placeholder: spec.fulu_placeholder, + } + } +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct Eip7594Preset { @@ -343,6 +358,9 @@ mod test { let electra: ElectraPreset = preset_from_file(&preset_name, "electra.yaml"); assert_eq!(electra, ElectraPreset::from_chain_spec::(&spec)); + let fulu: FuluPreset = preset_from_file(&preset_name, "fulu.yaml"); + assert_eq!(fulu, FuluPreset::from_chain_spec::(&spec)); + let eip7594: Eip7594Preset = preset_from_file(&preset_name, "eip7594.yaml"); assert_eq!(eip7594, Eip7594Preset::from_chain_spec::(&spec)); } diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index bb5e1ea34b7..d9bf9bf55dd 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -38,7 +38,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -81,6 +81,8 @@ pub struct SignedBeaconBlock = FullP pub message: BeaconBlockDeneb, #[superstruct(only(Electra), partial_getter(rename = "message_electra"))] pub message: BeaconBlockElectra, + #[superstruct(only(Fulu), partial_getter(rename = "message_fulu"))] + pub message: BeaconBlockFulu, pub signature: Signature, } @@ -163,6 +165,9 @@ impl> SignedBeaconBlock BeaconBlock::Electra(message) => { SignedBeaconBlock::Electra(SignedBeaconBlockElectra { message, signature }) } + BeaconBlock::Fulu(message) => { + SignedBeaconBlock::Fulu(SignedBeaconBlockFulu { message, signature }) + } } } @@ -570,6 +575,64 @@ impl SignedBeaconBlockElectra> { } } +impl SignedBeaconBlockFulu> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayloadFulu, + ) -> SignedBeaconBlockFulu> { + let SignedBeaconBlockFulu { + message: + BeaconBlockFulu { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadFulu { .. }, + bls_to_execution_changes, + blob_kzg_commitments, + execution_requests, + }, + }, + signature, + } = self; + SignedBeaconBlockFulu { + message: BeaconBlockFulu { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadFulu { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + execution_requests, + }, + }, + signature, + } + } +} + impl SignedBeaconBlock> { pub fn try_into_full_block( self, @@ -590,12 +653,16 @@ impl SignedBeaconBlock> { (SignedBeaconBlock::Electra(block), Some(ExecutionPayload::Electra(payload))) => { SignedBeaconBlock::Electra(block.into_full_block(payload)) } + (SignedBeaconBlock::Fulu(block), Some(ExecutionPayload::Fulu(payload))) => { + SignedBeaconBlock::Fulu(block.into_full_block(payload)) + } // avoid wildcard matching forks so that compiler will // direct us here when a new fork has been added (SignedBeaconBlock::Bellatrix(_), _) => return None, (SignedBeaconBlock::Capella(_), _) => return None, (SignedBeaconBlock::Deneb(_), _) => return None, (SignedBeaconBlock::Electra(_), _) => return None, + (SignedBeaconBlock::Fulu(_), _) => return None, }; Some(full_block) } @@ -741,6 +808,9 @@ pub mod ssz_tagged_signed_beacon_block { ForkName::Electra => Ok(SignedBeaconBlock::Electra( SignedBeaconBlockElectra::from_ssz_bytes(body)?, )), + ForkName::Fulu => Ok(SignedBeaconBlock::Fulu( + SignedBeaconBlockFulu::from_ssz_bytes(body)?, + )), } } } @@ -841,8 +911,9 @@ mod test { ), SignedBeaconBlock::from_block( BeaconBlock::Electra(BeaconBlockElectra::empty(spec)), - sig, + sig.clone(), ), + SignedBeaconBlock::from_block(BeaconBlock::Fulu(BeaconBlockFulu::empty(spec)), sig), ]; for block in blocks { diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs index 8d3220b1df8..7719f02aa33 100644 --- a/lcli/src/mock_el.rs +++ b/lcli/src/mock_el.rs @@ -19,6 +19,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< let shanghai_time = parse_required(matches, "shanghai-time")?; let cancun_time = parse_optional(matches, "cancun-time")?; let prague_time = parse_optional(matches, "prague-time")?; + let osaka_time = parse_optional(matches, "osaka-time")?; let handle = env.core_context().executor.handle().unwrap(); let spec = &E::default_spec(); @@ -37,6 +38,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< shanghai_time: Some(shanghai_time), cancun_time, prague_time, + osaka_time, }; let kzg = None; let server: MockServer = MockServer::new_with_config(&handle, config, kzg); diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index e16f5b257f9..62f834820fd 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -66,6 +66,7 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { ForkName::Capella => ForkName::Bellatrix, ForkName::Deneb => ForkName::Capella, ForkName::Electra => ForkName::Deneb, + ForkName::Fulu => ForkName::Electra, } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index c1adf107704..e05225c1715 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -100,47 +100,35 @@ type_name!(ParticipationFlagUpdates, "participation_flag_updates"); impl EpochTransition for JustificationAndFinalization { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => { - let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; - validator_statuses.process_attestations(state)?; - let justification_and_finalization_state = - base::process_justification_and_finalization( - state, - &validator_statuses.total_balances, - spec, - )?; - justification_and_finalization_state.apply_changes_to_state(state); - Ok(()) - } - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => { - initialize_progressive_balances_cache(state, spec)?; - let justification_and_finalization_state = - altair::process_justification_and_finalization(state)?; - justification_and_finalization_state.apply_changes_to_state(state); - Ok(()) - } + if state.fork_name_unchecked().altair_enabled() { + initialize_progressive_balances_cache(state, spec)?; + let justification_and_finalization_state = + altair::process_justification_and_finalization(state)?; + justification_and_finalization_state.apply_changes_to_state(state); + Ok(()) + } else { + let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state)?; + let justification_and_finalization_state = + base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )?; + justification_and_finalization_state.apply_changes_to_state(state); + Ok(()) } } } impl EpochTransition for RewardsAndPenalties { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => { - let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; - validator_statuses.process_attestations(state)?; - base::process_rewards_and_penalties(state, &validator_statuses, spec) - } - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => altair::process_rewards_and_penalties_slow(state, spec), + if state.fork_name_unchecked().altair_enabled() { + altair::process_rewards_and_penalties_slow(state, spec) + } else { + let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state)?; + base::process_rewards_and_penalties(state, &validator_statuses, spec) } } } @@ -159,24 +147,17 @@ impl EpochTransition for RegistryUpdates { impl EpochTransition for Slashings { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => { - let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; - validator_statuses.process_attestations(state)?; - process_slashings( - state, - validator_statuses.total_balances.current_epoch(), - spec, - )?; - } - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => { - process_slashings_slow(state, spec)?; - } - }; + if state.fork_name_unchecked().altair_enabled() { + process_slashings_slow(state, spec)?; + } else { + let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state)?; + process_slashings( + state, + validator_statuses.total_balances.current_epoch(), + spec, + )?; + } Ok(()) } } @@ -251,11 +232,10 @@ impl EpochTransition for HistoricalRootsUpdate { impl EpochTransition for HistoricalSummariesUpdate { fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { - process_historical_summaries_update(state) - } - _ => Ok(()), + if state.fork_name_unchecked().capella_enabled() { + process_historical_summaries_update(state) + } else { + Ok(()) } } } @@ -272,39 +252,30 @@ impl EpochTransition for ParticipationRecordUpdates { impl EpochTransition for SyncCommitteeUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => altair::process_sync_committee_updates(state, spec), + if state.fork_name_unchecked().altair_enabled() { + altair::process_sync_committee_updates(state, spec) + } else { + Ok(()) } } } impl EpochTransition for InactivityUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => altair::process_inactivity_updates_slow(state, spec), + if state.fork_name_unchecked().altair_enabled() { + altair::process_inactivity_updates_slow(state, spec) + } else { + Ok(()) } } } impl EpochTransition for ParticipationFlagUpdates { fn run(state: &mut BeaconState, _: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => altair::process_participation_flag_updates(state), + if state.fork_name_unchecked().altair_enabled() { + altair::process_participation_flag_updates(state) + } else { + Ok(()) } } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index 132cfb4c0ae..85301e22f6d 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -5,7 +5,7 @@ use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use state_processing::upgrade::{ upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, - upgrade_to_electra, + upgrade_to_electra, upgrade_to_fulu, }; use types::BeaconState; @@ -69,6 +69,7 @@ impl Case for ForkTest { ForkName::Capella => upgrade_to_capella(&mut result_state, spec).map(|_| result_state), ForkName::Deneb => upgrade_to_deneb(&mut result_state, spec).map(|_| result_state), ForkName::Electra => upgrade_to_electra(&mut result_state, spec).map(|_| result_state), + ForkName::Fulu => upgrade_to_fulu(&mut result_state, spec).map(|_| result_state), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 49c07197848..109d2cc7969 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -4,7 +4,7 @@ use serde::Deserialize; use tree_hash::Hash256; use types::{ light_client_update, BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, - BeaconBlockBodyElectra, BeaconState, FixedVector, FullPayload, Unsigned, + BeaconBlockBodyElectra, BeaconBlockBodyFulu, BeaconState, FixedVector, FullPayload, Unsigned, }; #[derive(Debug, Clone, Deserialize)] @@ -131,6 +131,9 @@ impl LoadCase for KzgInclusionMerkleProofValidity { ssz_decode_file::>(&path.join("object.ssz_snappy"))? .into() } + ForkName::Fulu => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))?.into() + } }; let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; // Metadata does not exist in these tests but it is left like this just in case. @@ -246,6 +249,9 @@ impl LoadCase for BeaconBlockBodyMerkleProofValidity { ssz_decode_file::>(&path.join("object.ssz_snappy"))? .into() } + ForkName::Fulu => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))?.into() + } }; let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; // Metadata does not exist in these tests but it is left like this just in case. diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index d8cade296bc..adb5bee7681 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -98,29 +98,24 @@ impl Operation for Attestation { ) -> Result<(), BlockProcessingError> { initialize_epoch_cache(state, spec)?; let mut ctxt = ConsensusContext::new(state.slot()); - match state { - BeaconState::Base(_) => base::process_attestations( + if state.fork_name_unchecked().altair_enabled() { + initialize_progressive_balances_cache(state, spec)?; + altair_deneb::process_attestation( + state, + self.to_ref(), + 0, + &mut ctxt, + VerifySignatures::True, + spec, + ) + } else { + base::process_attestations( state, [self.clone().to_ref()].into_iter(), VerifySignatures::True, &mut ctxt, spec, - ), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => { - initialize_progressive_balances_cache(state, spec)?; - altair_deneb::process_attestation( - state, - self.to_ref(), - 0, - &mut ctxt, - VerifySignatures::True, - spec, - ) - } + ) } } } @@ -131,14 +126,11 @@ impl Operation for AttesterSlashing { } fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { - Ok(match fork_name { - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb => Self::Base(ssz_decode_file(path)?), - ForkName::Electra => Self::Electra(ssz_decode_file(path)?), - }) + if fork_name.electra_enabled() { + Ok(Self::Electra(ssz_decode_file(path)?)) + } else { + Ok(Self::Base(ssz_decode_file(path)?)) + } } fn apply_to( @@ -308,6 +300,7 @@ impl Operation for BeaconBlockBody> { ForkName::Capella => BeaconBlockBody::Capella(<_>::from_ssz_bytes(bytes)?), ForkName::Deneb => BeaconBlockBody::Deneb(<_>::from_ssz_bytes(bytes)?), ForkName::Electra => BeaconBlockBody::Electra(<_>::from_ssz_bytes(bytes)?), + ForkName::Fulu => BeaconBlockBody::Fulu(<_>::from_ssz_bytes(bytes)?), _ => panic!(), }) }) @@ -363,6 +356,10 @@ impl Operation for BeaconBlockBody> { let inner = >>::from_ssz_bytes(bytes)?; BeaconBlockBody::Electra(inner.clone_as_blinded()) } + ForkName::Fulu => { + let inner = >>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Electra(inner.clone_as_blinded()) + } _ => panic!(), }) }) diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index dc5029d53e7..6d037dae87d 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -60,6 +60,14 @@ impl LoadCase for TransitionTest { spec.deneb_fork_epoch = Some(Epoch::new(0)); spec.electra_fork_epoch = Some(metadata.fork_epoch); } + ForkName::Fulu => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); + spec.electra_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = Some(metadata.fork_epoch); + } } // Load blocks diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index f4a09de32cb..e7c148645c4 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -24,7 +24,7 @@ pub trait Handler { // Add forks here to exclude them from EF spec testing. Helpful for adding future or // unspecified forks. fn disabled_forks(&self) -> Vec { - vec![] + vec![ForkName::Fulu] } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { @@ -287,6 +287,10 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Electra]) } + pub fn fulu_only() -> Self { + Self::for_forks(vec![ForkName::Fulu]) + } + pub fn altair_and_later() -> Self { Self::for_forks(ForkName::list_all()[1..].to_vec()) } @@ -307,6 +311,10 @@ impl SszStaticHandler { Self::for_forks(ForkName::list_all()[5..].to_vec()) } + pub fn fulu_and_later() -> Self { + Self::for_forks(ForkName::list_all()[6..].to_vec()) + } + pub fn pre_electra() -> Self { Self::for_forks(ForkName::list_all()[0..5].to_vec()) } diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 8f659a893f3..82a70285827 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -22,7 +22,8 @@ const ALTAIR_FORK_EPOCH: u64 = 0; const BELLATRIX_FORK_EPOCH: u64 = 0; const CAPELLA_FORK_EPOCH: u64 = 1; const DENEB_FORK_EPOCH: u64 = 2; -//const ELECTRA_FORK_EPOCH: u64 = 3; +// const ELECTRA_FORK_EPOCH: u64 = 3; +// const FULU_FORK_EPOCH: u64 = 4; const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; @@ -118,6 +119,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); //spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); + //spec.fulu_fork_epoch = Some(Epoch::new(FULU_FORK_EPOCH)); let spec = Arc::new(spec); env.eth2_config.spec = spec.clone(); diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index b3b9a460018..7d4bdfa264e 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -21,7 +21,8 @@ const ALTAIR_FORK_EPOCH: u64 = 0; const BELLATRIX_FORK_EPOCH: u64 = 0; const CAPELLA_FORK_EPOCH: u64 = 1; const DENEB_FORK_EPOCH: u64 = 2; -//const ELECTRA_FORK_EPOCH: u64 = 3; +// const ELECTRA_FORK_EPOCH: u64 = 3; +// const FULU_FORK_EPOCH: u64 = 4; // Since simulator tests are non-deterministic and there is a non-zero chance of missed // attestations, define an acceptable network-wide attestation performance. @@ -123,6 +124,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); //spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); + //spec.fulu_fork_epoch = Some(Epoch::new(FULU_FORK_EPOCH)); let spec = Arc::new(spec); env.eth2_config.spec = spec.clone(); diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 59efc09baa6..a95c15c2313 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -88,6 +88,11 @@ fn default_mock_execution_config( + spec.seconds_per_slot * E::slots_per_epoch() * electra_fork_epoch.as_u64(), ) } + if let Some(fulu_fork_epoch) = spec.fulu_fork_epoch { + mock_execution_config.osaka_time = Some( + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * fulu_fork_epoch.as_u64(), + ) + } mock_execution_config } diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 95a221f1897..beae176193c 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -361,6 +361,14 @@ impl CandidateBeaconNode { "endpoint_electra_fork_epoch" => ?beacon_node_spec.electra_fork_epoch, "hint" => UPDATE_REQUIRED_LOG_HINT, ); + } else if beacon_node_spec.fulu_fork_epoch != spec.fulu_fork_epoch { + warn!( + log, + "Beacon node has mismatched Fulu fork epoch"; + "endpoint" => %self.beacon_node, + "endpoint_fulu_fork_epoch" => ?beacon_node_spec.fulu_fork_epoch, + "hint" => UPDATE_REQUIRED_LOG_HINT, + ); } Ok(()) diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 390095eec73..0531626846b 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -251,9 +251,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::() + .get_lighthouse_spec::() .await - .map(|res| ConfigAndPreset::Electra(res.data)) + .map(|res| ConfigAndPreset::Fulu(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index 7ea3d7ebaab..4e9acc42378 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -214,9 +214,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::() + .get_lighthouse_spec::() .await - .map(|res| ConfigAndPreset::Electra(res.data)) + .map(|res| ConfigAndPreset::Fulu(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); diff --git a/validator_client/signing_method/src/web3signer.rs b/validator_client/signing_method/src/web3signer.rs index 86e7015ad35..d286449d203 100644 --- a/validator_client/signing_method/src/web3signer.rs +++ b/validator_client/signing_method/src/web3signer.rs @@ -29,6 +29,7 @@ pub enum ForkName { Capella, Deneb, Electra, + Fulu, } #[derive(Debug, PartialEq, Serialize)] @@ -107,6 +108,11 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, E, Pa block: None, block_header: Some(block.block_header()), }), + BeaconBlock::Fulu(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Fulu, + block: None, + block_header: Some(block.block_header()), + }), } } From 05727290fbef27df72e854de0ca9280ee6347101 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 10 Jan 2025 12:04:58 +0530 Subject: [PATCH 05/26] Make max_blobs_per_block a config parameter (#6329) * First pass * Add restrictions to RuntimeVariableList api * Use empty_uninitialized and fix warnings * Fix some todos * Merge branch 'unstable' into max-blobs-preset * Fix take impl on RuntimeFixedList * cleanup * Fix test compilations * Fix some more tests * Fix test from unstable * Merge branch 'unstable' into max-blobs-preset * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Remove footgun function * Minor simplifications * Move from preset to config * Fix typo * Revert "Remove footgun function" This reverts commit de01f923c7452355c87f50c0e8031ca94fa00d36. * Try fixing tests * Thread through ChainSpec * Fix release tests * Move RuntimeFixedVector into module and rename * Add test * Remove empty RuntimeVarList awefullness * Fix tests * Simplify BlobSidecarListFromRoot * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Bump quota to account for new target (6) * Remove clone * Fix issue from review * Try to remove ugliness * Merge branch 'unstable' into max-blobs-preset * Fix max value * Fix doctest * Fix formatting * Fix max check * Delete hardcoded max_blobs_per_block in RPC limits * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset --- .cargo/config.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 16 +- .../beacon_chain/src/blob_verification.rs | 2 +- .../src/block_verification_types.rs | 20 +-- .../src/data_availability_checker.rs | 14 +- .../overflow_lru_cache.rs | 147 ++++++++++-------- beacon_node/beacon_chain/src/fetch_blobs.rs | 10 +- beacon_node/beacon_chain/src/kzg_utils.rs | 17 +- .../src/observed_data_sidecars.rs | 20 +-- beacon_node/beacon_chain/src/test_utils.rs | 31 ++-- .../tests/attestation_production.rs | 9 +- .../beacon_chain/tests/block_verification.rs | 18 ++- beacon_node/beacon_chain/tests/events.rs | 2 +- beacon_node/beacon_chain/tests/store_tests.rs | 29 ++-- beacon_node/client/src/builder.rs | 4 +- .../execution_layer/src/engine_api/http.rs | 3 +- .../test_utils/execution_block_generator.rs | 11 +- .../src/test_utils/mock_execution_layer.rs | 9 +- .../execution_layer/src/test_utils/mod.rs | 9 +- beacon_node/http_api/src/block_id.rs | 25 +-- .../tests/broadcast_validation_tests.rs | 3 +- .../lighthouse_network/src/rpc/codec.rs | 22 ++- .../lighthouse_network/src/rpc/config.rs | 4 +- .../lighthouse_network/src/rpc/handler.rs | 6 +- .../lighthouse_network/src/rpc/methods.rs | 6 +- beacon_node/lighthouse_network/src/rpc/mod.rs | 5 +- .../lighthouse_network/src/rpc/protocol.rs | 36 ++--- .../src/rpc/rate_limiter.rs | 24 ++- .../src/rpc/self_limiter.rs | 20 ++- .../network_beacon_processor/rpc_methods.rs | 8 - .../src/network_beacon_processor/tests.rs | 11 +- .../src/sync/block_sidecar_coupling.rs | 72 ++++++--- beacon_node/network/src/sync/manager.rs | 2 + .../network/src/sync/network_context.rs | 38 ++++- .../src/sync/network_context/requests.rs | 1 + beacon_node/network/src/sync/tests/lookups.rs | 16 +- beacon_node/network/src/sync/tests/mod.rs | 3 +- .../store/src/blob_sidecar_list_from_root.rs | 42 +++++ beacon_node/store/src/hot_cold_store.rs | 38 +++-- .../store/src/impls/execution_payload.rs | 3 +- beacon_node/store/src/lib.rs | 2 + .../chiado/config.yaml | 2 + .../gnosis/config.yaml | 2 + .../holesky/config.yaml | 2 + .../mainnet/config.yaml | 2 + .../sepolia/config.yaml | 2 + .../src/per_block_processing.rs | 6 +- consensus/types/presets/gnosis/deneb.yaml | 2 - consensus/types/presets/mainnet/deneb.yaml | 2 - consensus/types/presets/minimal/deneb.yaml | 2 - consensus/types/src/beacon_block_body.rs | 2 - consensus/types/src/blob_sidecar.rs | 33 ++-- consensus/types/src/chain_spec.rs | 26 ++++ consensus/types/src/data_column_sidecar.rs | 17 +- consensus/types/src/eth_spec.rs | 12 +- consensus/types/src/lib.rs | 2 + consensus/types/src/preset.rs | 3 - consensus/types/src/runtime_fixed_vector.rs | 81 ++++++++++ consensus/types/src/runtime_var_list.rs | 19 ++- lcli/src/mock_el.rs | 5 +- testing/node_test_rig/src/lib.rs | 9 +- 61 files changed, 655 insertions(+), 335 deletions(-) create mode 100644 beacon_node/store/src/blob_sidecar_list_from_root.rs create mode 100644 consensus/types/src/runtime_fixed_vector.rs diff --git a/.cargo/config.toml b/.cargo/config.toml index a408305c4d1..dac01630032 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,4 @@ [env] # Set the number of arenas to 16 when using jemalloc. JEMALLOC_SYS_WITH_MALLOC_CONF = "abort_conf:true,narenas:16" + diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d84cd9615aa..81783267ba6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -117,7 +117,8 @@ use std::sync::Arc; use std::time::Duration; use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator}; use store::{ - DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, + BlobSidecarListFromRoot, DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, + KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::sync::mpsc::Receiver; @@ -1147,9 +1148,10 @@ impl BeaconChain { pub fn get_blobs_checking_early_attester_cache( &self, block_root: &Hash256, - ) -> Result, Error> { + ) -> Result, Error> { self.early_attester_cache .get_blobs(*block_root) + .map(Into::into) .map_or_else(|| self.get_blobs(block_root), Ok) } @@ -1240,11 +1242,11 @@ impl BeaconChain { /// /// ## Errors /// May return a database error. - pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { - match self.store.get_blobs(block_root)? { - Some(blobs) => Ok(blobs), - None => Ok(BlobSidecarList::default()), - } + pub fn get_blobs( + &self, + block_root: &Hash256, + ) -> Result, Error> { + self.store.get_blobs(block_root).map_err(Error::from) } /// Returns the data columns at the given root, if any. diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 6c87deb8260..786b627bb7e 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -400,7 +400,7 @@ pub fn validate_blob_sidecar_for_gossip= T::EthSpec::max_blobs_per_block() as u64 { + if blob_index >= chain.spec.max_blobs_per_block(blob_epoch) { return Err(GossipBlobError::InvalidSubnet { expected: subnet, received: blob_index, diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 420c83081c7..0bf3007e9b0 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -4,11 +4,10 @@ use crate::data_column_verification::{CustodyDataColumn, CustodyDataColumnList}; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::{get_block_root, PayloadVerificationOutcome}; use derivative::Derivative; -use ssz_types::VariableList; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; use std::sync::Arc; -use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; +use types::blob_sidecar::BlobIdentifier; use types::{ BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, @@ -176,23 +175,6 @@ impl RpcBlock { }) } - pub fn new_from_fixed( - block_root: Hash256, - block: Arc>, - blobs: FixedBlobSidecarList, - ) -> Result { - let filtered = blobs - .into_iter() - .filter_map(|b| b.clone()) - .collect::>(); - let blobs = if filtered.is_empty() { - None - } else { - Some(VariableList::from(filtered)) - }; - Self::new(Some(block_root), block, blobs) - } - #[allow(clippy::type_complexity)] pub fn deconstruct( self, diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index f6002ea0ac9..4c5152239c2 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -215,9 +215,12 @@ impl DataAvailabilityChecker { // Note: currently not reporting which specific blob is invalid because we fetch all blobs // from the same peer for both lookup and range sync. - let verified_blobs = - KzgVerifiedBlobList::new(blobs.iter().flatten().cloned(), &self.kzg, seen_timestamp) - .map_err(AvailabilityCheckError::InvalidBlobs)?; + let verified_blobs = KzgVerifiedBlobList::new( + blobs.into_vec().into_iter().flatten(), + &self.kzg, + seen_timestamp, + ) + .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache .put_kzg_verified_blobs(block_root, verified_blobs, &self.log) @@ -400,14 +403,13 @@ impl DataAvailabilityChecker { blocks: Vec>, ) -> Result>, AvailabilityCheckError> { let mut results = Vec::with_capacity(blocks.len()); - let all_blobs: BlobSidecarList = blocks + let all_blobs = blocks .iter() .filter(|block| self.blobs_required_for_block(block.as_block())) // this clone is cheap as it's cloning an Arc .filter_map(|block| block.blobs().cloned()) .flatten() - .collect::>() - .into(); + .collect::>(); // verify kzg for all blobs at once if !all_blobs.is_empty() { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 5ce023038d2..44148922f48 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -10,13 +10,12 @@ use crate::BeaconChainTypes; use lru::LruCache; use parking_lot::RwLock; use slog::{debug, Logger}; -use ssz_types::FixedVector; use std::num::NonZeroUsize; use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, - Hash256, SignedBeaconBlock, + Hash256, RuntimeFixedVector, RuntimeVariableList, SignedBeaconBlock, }; /// This represents the components of a partially available block @@ -28,7 +27,7 @@ use types::{ #[derive(Clone)] pub struct PendingComponents { pub block_root: Hash256, - pub verified_blobs: FixedVector>, E::MaxBlobsPerBlock>, + pub verified_blobs: RuntimeFixedVector>>, pub verified_data_columns: Vec>, pub executed_block: Option>, pub reconstruction_started: bool, @@ -41,9 +40,7 @@ impl PendingComponents { } /// Returns an immutable reference to the fixed vector of cached blobs. - pub fn get_cached_blobs( - &self, - ) -> &FixedVector>, E::MaxBlobsPerBlock> { + pub fn get_cached_blobs(&self) -> &RuntimeFixedVector>> { &self.verified_blobs } @@ -64,9 +61,7 @@ impl PendingComponents { } /// Returns a mutable reference to the fixed vector of cached blobs. - pub fn get_cached_blobs_mut( - &mut self, - ) -> &mut FixedVector>, E::MaxBlobsPerBlock> { + pub fn get_cached_blobs_mut(&mut self) -> &mut RuntimeFixedVector>> { &mut self.verified_blobs } @@ -138,10 +133,7 @@ impl PendingComponents { /// Blobs are only inserted if: /// 1. The blob entry at the index is empty and no block exists. /// 2. The block exists and its commitment matches the blob's commitment. - pub fn merge_blobs( - &mut self, - blobs: FixedVector>, E::MaxBlobsPerBlock>, - ) { + pub fn merge_blobs(&mut self, blobs: RuntimeFixedVector>>) { for (index, blob) in blobs.iter().cloned().enumerate() { let Some(blob) = blob else { continue }; self.merge_single_blob(index, blob); @@ -185,7 +177,7 @@ impl PendingComponents { /// Blobs that don't match the new block's commitments are evicted. pub fn merge_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { self.insert_block(block); - let reinsert = std::mem::take(self.get_cached_blobs_mut()); + let reinsert = self.get_cached_blobs_mut().take(); self.merge_blobs(reinsert); } @@ -237,10 +229,10 @@ impl PendingComponents { } /// Returns an empty `PendingComponents` object with the given block root. - pub fn empty(block_root: Hash256) -> Self { + pub fn empty(block_root: Hash256, max_len: usize) -> Self { Self { block_root, - verified_blobs: FixedVector::default(), + verified_blobs: RuntimeFixedVector::new(vec![None; max_len]), verified_data_columns: vec![], executed_block: None, reconstruction_started: false, @@ -299,7 +291,11 @@ impl PendingComponents { else { return Err(AvailabilityCheckError::Unexpected); }; - (Some(verified_blobs), None) + let max_len = spec.max_blobs_per_block(diet_executed_block.as_block().epoch()) as usize; + ( + Some(RuntimeVariableList::new(verified_blobs, max_len)?), + None, + ) }; let executed_block = recover(diet_executed_block)?; @@ -341,10 +337,7 @@ impl PendingComponents { } if let Some(kzg_verified_data_column) = self.verified_data_columns.first() { - let epoch = kzg_verified_data_column - .as_data_column() - .slot() - .epoch(E::slots_per_epoch()); + let epoch = kzg_verified_data_column.as_data_column().epoch(); return Some(epoch); } @@ -457,7 +450,18 @@ impl DataAvailabilityCheckerInner { kzg_verified_blobs: I, log: &Logger, ) -> Result, AvailabilityCheckError> { - let mut fixed_blobs = FixedVector::default(); + let mut kzg_verified_blobs = kzg_verified_blobs.into_iter().peekable(); + + let Some(epoch) = kzg_verified_blobs + .peek() + .map(|verified_blob| verified_blob.as_blob().epoch()) + else { + // Verified blobs list should be non-empty. + return Err(AvailabilityCheckError::Unexpected); + }; + + let mut fixed_blobs = + RuntimeFixedVector::new(vec![None; self.spec.max_blobs_per_block(epoch) as usize]); for blob in kzg_verified_blobs { if let Some(blob_opt) = fixed_blobs.get_mut(blob.blob_index() as usize) { @@ -471,7 +475,9 @@ impl DataAvailabilityCheckerInner { let mut pending_components = write_lock .pop_entry(&block_root) .map(|(_, v)| v) - .unwrap_or_else(|| PendingComponents::empty(block_root)); + .unwrap_or_else(|| { + PendingComponents::empty(block_root, self.spec.max_blobs_per_block(epoch) as usize) + }); // Merge in the blobs. pending_components.merge_blobs(fixed_blobs); @@ -498,13 +504,24 @@ impl DataAvailabilityCheckerInner { kzg_verified_data_columns: I, log: &Logger, ) -> Result, AvailabilityCheckError> { + let mut kzg_verified_data_columns = kzg_verified_data_columns.into_iter().peekable(); + let Some(epoch) = kzg_verified_data_columns + .peek() + .map(|verified_blob| verified_blob.as_data_column().epoch()) + else { + // Verified data_columns list should be non-empty. + return Err(AvailabilityCheckError::Unexpected); + }; + let mut write_lock = self.critical.write(); // Grab existing entry or create a new entry. let mut pending_components = write_lock .pop_entry(&block_root) .map(|(_, v)| v) - .unwrap_or_else(|| PendingComponents::empty(block_root)); + .unwrap_or_else(|| { + PendingComponents::empty(block_root, self.spec.max_blobs_per_block(epoch) as usize) + }); // Merge in the data columns. pending_components.merge_data_columns(kzg_verified_data_columns)?; @@ -581,6 +598,7 @@ impl DataAvailabilityCheckerInner { log: &Logger, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); + let epoch = executed_block.as_block().epoch(); let block_root = executed_block.import_data.block_root; // register the block to get the diet block @@ -592,7 +610,9 @@ impl DataAvailabilityCheckerInner { let mut pending_components = write_lock .pop_entry(&block_root) .map(|(_, v)| v) - .unwrap_or_else(|| PendingComponents::empty(block_root)); + .unwrap_or_else(|| { + PendingComponents::empty(block_root, self.spec.max_blobs_per_block(epoch) as usize) + }); // Merge in the block. pending_components.merge_block(diet_executed_block); @@ -812,7 +832,8 @@ mod test { info!(log, "done printing kzg commitments"); let gossip_verified_blobs = if let Some((kzg_proofs, blobs)) = maybe_blobs { - let sidecars = BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap(); + let sidecars = + BlobSidecar::build_sidecars(blobs, &block, kzg_proofs, &chain.spec).unwrap(); Vec::from(sidecars) .into_iter() .map(|sidecar| { @@ -945,6 +966,8 @@ mod test { assert_eq!(cache.critical.read().len(), 1); } } + // remove the blob to simulate successful import + cache.remove_pending_components(root); assert!( cache.critical.read().is_empty(), "cache should be empty now that all components available" @@ -1125,7 +1148,7 @@ mod pending_components_tests { use super::*; use crate::block_verification_types::BlockImportData; use crate::eth1_finalization_cache::Eth1FinalizationData; - use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs}; + use crate::test_utils::{generate_rand_block_and_blobs, test_spec, NumBlobs}; use crate::PayloadVerificationOutcome; use fork_choice::PayloadVerificationStatus; use kzg::KzgCommitment; @@ -1141,15 +1164,19 @@ mod pending_components_tests { type Setup = ( SignedBeaconBlock, - FixedVector>>, ::MaxBlobsPerBlock>, - FixedVector>>, ::MaxBlobsPerBlock>, + RuntimeFixedVector>>>, + RuntimeFixedVector>>>, + usize, ); pub fn pre_setup() -> Setup { let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + let spec = test_spec::(); let (block, blobs_vec) = - generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng); - let mut blobs: FixedVector<_, ::MaxBlobsPerBlock> = FixedVector::default(); + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng, &spec); + let max_len = spec.max_blobs_per_block(block.epoch()) as usize; + let mut blobs: RuntimeFixedVector>>> = + RuntimeFixedVector::default(max_len); for blob in blobs_vec { if let Some(b) = blobs.get_mut(blob.index as usize) { @@ -1157,10 +1184,8 @@ mod pending_components_tests { } } - let mut invalid_blobs: FixedVector< - Option>>, - ::MaxBlobsPerBlock, - > = FixedVector::default(); + let mut invalid_blobs: RuntimeFixedVector>>> = + RuntimeFixedVector::default(max_len); for (index, blob) in blobs.iter().enumerate() { if let Some(invalid_blob) = blob { let mut blob_copy = invalid_blob.as_ref().clone(); @@ -1169,21 +1194,21 @@ mod pending_components_tests { } } - (block, blobs, invalid_blobs) + (block, blobs, invalid_blobs, max_len) } type PendingComponentsSetup = ( DietAvailabilityPendingExecutedBlock, - FixedVector>, ::MaxBlobsPerBlock>, - FixedVector>, ::MaxBlobsPerBlock>, + RuntimeFixedVector>>, + RuntimeFixedVector>>, ); pub fn setup_pending_components( block: SignedBeaconBlock, - valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, - invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + valid_blobs: RuntimeFixedVector>>>, + invalid_blobs: RuntimeFixedVector>>>, ) -> PendingComponentsSetup { - let blobs = FixedVector::from( + let blobs = RuntimeFixedVector::new( valid_blobs .iter() .map(|blob_opt| { @@ -1193,7 +1218,7 @@ mod pending_components_tests { }) .collect::>(), ); - let invalid_blobs = FixedVector::from( + let invalid_blobs = RuntimeFixedVector::new( invalid_blobs .iter() .map(|blob_opt| { @@ -1225,10 +1250,10 @@ mod pending_components_tests { (block.into(), blobs, invalid_blobs) } - pub fn assert_cache_consistent(cache: PendingComponents) { + pub fn assert_cache_consistent(cache: PendingComponents, max_len: usize) { if let Some(cached_block) = cache.get_cached_block() { let cached_block_commitments = cached_block.get_commitments(); - for index in 0..E::max_blobs_per_block() { + for index in 0..max_len { let block_commitment = cached_block_commitments.get(index).copied(); let blob_commitment_opt = cache.get_cached_blobs().get(index).unwrap(); let blob_commitment = blob_commitment_opt.as_ref().map(|b| *b.get_commitment()); @@ -1247,40 +1272,40 @@ mod pending_components_tests { #[test] fn valid_block_invalid_blobs_valid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_block(block_commitments); cache.merge_blobs(random_blobs); cache.merge_blobs(blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn invalid_blobs_block_valid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(random_blobs); cache.merge_block(block_commitments); cache.merge_blobs(blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn invalid_blobs_valid_blobs_block() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(random_blobs); cache.merge_blobs(blobs); cache.merge_block(block_commitments); @@ -1290,46 +1315,46 @@ mod pending_components_tests { #[test] fn block_valid_blobs_invalid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_block(block_commitments); cache.merge_blobs(blobs); cache.merge_blobs(random_blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn valid_blobs_block_invalid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(blobs); cache.merge_block(block_commitments); cache.merge_blobs(random_blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn valid_blobs_invalid_blobs_block() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(blobs); cache.merge_blobs(random_blobs); cache.merge_block(block_commitments); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } } diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index f740b693fbf..f1646072c96 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -21,8 +21,8 @@ use std::sync::Arc; use tokio::sync::mpsc::Receiver; use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList}; use types::{ - BeaconStateError, BlobSidecar, DataColumnSidecar, DataColumnSidecarList, EthSpec, FullPayload, - Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, + BeaconStateError, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnSidecarList, EthSpec, + FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, }; pub enum BlobsOrDataColumns { @@ -112,6 +112,7 @@ pub async fn fetch_and_process_engine_blobs( response, signed_block_header, &kzg_commitments_proof, + &chain.spec, )?; let num_fetched_blobs = fixed_blob_sidecar_list @@ -275,8 +276,11 @@ fn build_blob_sidecars( response: Vec>>, signed_block_header: SignedBeaconBlockHeader, kzg_commitments_inclusion_proof: &FixedVector, + spec: &ChainSpec, ) -> Result, FetchEngineBlobError> { - let mut fixed_blob_sidecar_list = FixedBlobSidecarList::default(); + let epoch = block.epoch(); + let mut fixed_blob_sidecar_list = + FixedBlobSidecarList::default(spec.max_blobs_per_block(epoch) as usize); for (index, blob_and_proof) in response .into_iter() .enumerate() diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index bd47e82215e..e32ee9c24bd 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -194,9 +194,11 @@ fn build_data_column_sidecars( spec: &ChainSpec, ) -> Result, String> { let number_of_columns = spec.number_of_columns; - let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - let mut column_kzg_proofs = - vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + let max_blobs_per_block = spec + .max_blobs_per_block(signed_block_header.message.slot.epoch(E::slots_per_epoch())) + as usize; + let mut columns = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; + let mut column_kzg_proofs = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { // we iterate over each column, and we construct the column from "top to bottom", @@ -253,6 +255,7 @@ pub fn reconstruct_blobs( data_columns: &[Arc>], blob_indices_opt: Option>, signed_block: &SignedBlindedBeaconBlock, + spec: &ChainSpec, ) -> Result, String> { // The data columns are from the database, so we assume their correctness. let first_data_column = data_columns @@ -315,10 +318,11 @@ pub fn reconstruct_blobs( .map(Arc::new) .map_err(|e| format!("{e:?}")) }) - .collect::, _>>()? - .into(); + .collect::, _>>()?; + + let max_blobs = spec.max_blobs_per_block(signed_block.epoch()) as usize; - Ok(blob_sidecars) + BlobSidecarList::new(blob_sidecars, max_blobs).map_err(|e| format!("{e:?}")) } /// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). @@ -478,6 +482,7 @@ mod test { &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], Some(blob_indices.clone()), &signed_blinded_block, + spec, ) .unwrap(); diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index a9f46640645..48989e07d3d 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -24,7 +24,7 @@ pub trait ObservableDataSidecar { fn slot(&self) -> Slot; fn block_proposer_index(&self) -> u64; fn index(&self) -> u64; - fn max_num_of_items(spec: &ChainSpec) -> usize; + fn max_num_of_items(spec: &ChainSpec, slot: Slot) -> usize; } impl ObservableDataSidecar for BlobSidecar { @@ -40,8 +40,8 @@ impl ObservableDataSidecar for BlobSidecar { self.index } - fn max_num_of_items(_spec: &ChainSpec) -> usize { - E::max_blobs_per_block() + fn max_num_of_items(spec: &ChainSpec, slot: Slot) -> usize { + spec.max_blobs_per_block(slot.epoch(E::slots_per_epoch())) as usize } } @@ -58,7 +58,7 @@ impl ObservableDataSidecar for DataColumnSidecar { self.index } - fn max_num_of_items(spec: &ChainSpec) -> usize { + fn max_num_of_items(spec: &ChainSpec, _slot: Slot) -> usize { spec.number_of_columns } } @@ -103,7 +103,9 @@ impl ObservedDataSidecars { slot: data_sidecar.slot(), proposer: data_sidecar.block_proposer_index(), }) - .or_insert_with(|| HashSet::with_capacity(T::max_num_of_items(&self.spec))); + .or_insert_with(|| { + HashSet::with_capacity(T::max_num_of_items(&self.spec, data_sidecar.slot())) + }); let did_not_exist = data_indices.insert(data_sidecar.index()); Ok(!did_not_exist) @@ -123,7 +125,7 @@ impl ObservedDataSidecars { } fn sanitize_data_sidecar(&self, data_sidecar: &T) -> Result<(), Error> { - if data_sidecar.index() >= T::max_num_of_items(&self.spec) as u64 { + if data_sidecar.index() >= T::max_num_of_items(&self.spec, data_sidecar.slot()) as u64 { return Err(Error::InvalidDataIndex(data_sidecar.index())); } let finalized_slot = self.finalized_slot; @@ -179,7 +181,7 @@ mod tests { use crate::test_utils::test_spec; use bls::Hash256; use std::sync::Arc; - use types::MainnetEthSpec; + use types::{Epoch, MainnetEthSpec}; type E = MainnetEthSpec; @@ -333,7 +335,7 @@ mod tests { #[test] fn simple_observations() { let spec = Arc::new(test_spec::()); - let mut cache = ObservedDataSidecars::>::new(spec); + let mut cache = ObservedDataSidecars::>::new(spec.clone()); // Slot 0, index 0 let proposer_index_a = 420; @@ -489,7 +491,7 @@ mod tests { ); // Try adding an out of bounds index - let invalid_index = E::max_blobs_per_block() as u64; + let invalid_index = spec.max_blobs_per_block(Epoch::new(0)); let sidecar_d = get_blob_sidecar(0, proposer_index_a, invalid_index); assert_eq!( cache.observe_sidecar(&sidecar_d), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d37398e4e02..fd3cc496260 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -514,7 +514,7 @@ where pub fn mock_execution_layer_with_config(mut self) -> Self { let mock = mock_execution_layer_from_parts::( - self.spec.as_ref().expect("cannot build without spec"), + self.spec.clone().expect("cannot build without spec"), self.runtime.task_executor.clone(), ); self.execution_layer = Some(mock.el.clone()); @@ -614,7 +614,7 @@ where } pub fn mock_execution_layer_from_parts( - spec: &ChainSpec, + spec: Arc, task_executor: TaskExecutor, ) -> MockExecutionLayer { let shanghai_time = spec.capella_fork_epoch.map(|epoch| { @@ -630,7 +630,7 @@ pub fn mock_execution_layer_from_parts( HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); - let kzg = get_kzg(spec); + let kzg = get_kzg(&spec); MockExecutionLayer::new( task_executor, @@ -640,7 +640,7 @@ pub fn mock_execution_layer_from_parts( prague_time, osaka_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), - spec.clone(), + spec, Some(kzg), ) } @@ -749,15 +749,15 @@ where pub fn get_head_block(&self) -> RpcBlock { let block = self.chain.head_beacon_block(); let block_root = block.canonical_root(); - let blobs = self.chain.get_blobs(&block_root).unwrap(); - RpcBlock::new(Some(block_root), block, Some(blobs)).unwrap() + let blobs = self.chain.get_blobs(&block_root).unwrap().blobs(); + RpcBlock::new(Some(block_root), block, blobs).unwrap() } pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { let block = self.chain.get_blinded_block(block_root).unwrap().unwrap(); let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); - let blobs = self.chain.get_blobs(block_root).unwrap(); - RpcBlock::new(Some(*block_root), Arc::new(full_block), Some(blobs)).unwrap() + let blobs = self.chain.get_blobs(block_root).unwrap().blobs(); + RpcBlock::new(Some(*block_root), Arc::new(full_block), blobs).unwrap() } pub fn get_all_validators(&self) -> Vec { @@ -2020,7 +2020,7 @@ where let (block, blob_items) = block_contents; let sidecars = blob_items - .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)) .transpose() .unwrap(); let block_hash: SignedBeaconBlockHash = self @@ -2046,7 +2046,7 @@ where let (block, blob_items) = block_contents; let sidecars = blob_items - .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)) .transpose() .unwrap(); let block_root = block.canonical_root(); @@ -2817,11 +2817,12 @@ pub fn generate_rand_block_and_blobs( fork_name: ForkName, num_blobs: NumBlobs, rng: &mut impl Rng, + spec: &ChainSpec, ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); - + let max_blobs = spec.max_blobs_per_block(block.epoch()) as usize; let mut blob_sidecars = vec![]; let bundle = match block { @@ -2831,7 +2832,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadDeneb = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -2851,7 +2852,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadElectra = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -2870,7 +2871,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadFulu = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -2924,7 +2925,7 @@ pub fn generate_rand_block_and_data_columns( DataColumnSidecarList, ) { let kzg = get_kzg(spec); - let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng); + let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec); let blob_refs = blobs.iter().map(|b| &b.blob).collect::>(); let data_columns = blobs_to_data_column_sidecars(&blob_refs, &block, &kzg, spec).unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 87fefe71146..60001159938 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -155,7 +155,7 @@ async fn produces_attestations() { .store .make_full_block(&block_root, blinded_block) .unwrap(); - let blobs = chain.get_blobs(&block_root).unwrap(); + let blobs = chain.get_blobs(&block_root).unwrap().blobs(); let epoch_boundary_slot = state .current_epoch() @@ -223,7 +223,7 @@ async fn produces_attestations() { assert_eq!(data.target.root, target_root, "bad target root"); let rpc_block = - RpcBlock::::new(None, Arc::new(block.clone()), Some(blobs.clone())) + RpcBlock::::new(None, Arc::new(block.clone()), blobs.clone()) .unwrap(); let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available( available_block, @@ -299,10 +299,11 @@ async fn early_attester_cache_old_request() { let head_blobs = harness .chain .get_blobs(&head.beacon_block_root) - .expect("should get blobs"); + .expect("should get blobs") + .blobs(); let rpc_block = - RpcBlock::::new(None, head.beacon_block.clone(), Some(head_blobs)).unwrap(); + RpcBlock::::new(None, head.beacon_block.clone(), head_blobs).unwrap(); let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) = harness .chain diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 103734b2247..b61f758cac5 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -65,12 +65,13 @@ async fn get_chain_segment() -> (Vec>, Vec( signed_block: &SignedBeaconBlock, blobs: &mut BlobSidecarList, ) { - for old_blob_sidecar in blobs.iter_mut() { + for old_blob_sidecar in blobs.as_mut_slice() { let new_blob = Arc::new(BlobSidecar:: { index: old_blob_sidecar.index, blob: old_blob_sidecar.blob.clone(), @@ -1223,7 +1225,7 @@ async fn verify_block_for_gossip_slashing_detection() { let slasher = Arc::new( Slasher::open( SlasherConfig::new(slasher_dir.path().into()), - spec, + spec.clone(), test_logger(), ) .unwrap(), @@ -1247,7 +1249,7 @@ async fn verify_block_for_gossip_slashing_detection() { if let Some((kzg_proofs, blobs)) = blobs1 { let sidecars = - BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs).unwrap(); + BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs, &spec).unwrap(); for sidecar in sidecars { let blob_index = sidecar.index; let verified_blob = harness diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index ab784d3be45..c9bd55e0620 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -73,7 +73,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { let blob_1 = Arc::new(blob_1); let blob_2 = Arc::new(blob_2); - let blobs = FixedBlobSidecarList::from(vec![Some(blob_1.clone()), Some(blob_2.clone())]); + let blobs = FixedBlobSidecarList::new(vec![Some(blob_1.clone()), Some(blob_2.clone())]); let expected_sse_blobs = vec![ SseBlobSidecar::from_blob_sidecar(blob_1.as_ref()), SseBlobSidecar::from_blob_sidecar(blob_2.as_ref()), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ed97b8d6345..60d46e8269d 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2317,7 +2317,12 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .get_full_block(&wss_block_root) .unwrap() .unwrap(); - let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); + let wss_blobs_opt = harness + .chain + .store + .get_blobs(&wss_block_root) + .unwrap() + .blobs(); let wss_state = full_store .get_state(&wss_state_root, Some(checkpoint_slot)) .unwrap() @@ -2342,8 +2347,10 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { let kzg = get_kzg(&spec); - let mock = - mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); + let mock = mock_execution_layer_from_parts( + harness.spec.clone(), + harness.runtime.task_executor.clone(), + ); // Initialise a new beacon chain from the finalized checkpoint. // The slot clock must be set to a time ahead of the checkpoint state. @@ -2388,7 +2395,11 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .unwrap() .unwrap(); - let store_wss_blobs_opt = beacon_chain.store.get_blobs(&wss_block_root).unwrap(); + let store_wss_blobs_opt = beacon_chain + .store + .get_blobs(&wss_block_root) + .unwrap() + .blobs(); assert_eq!(store_wss_block, wss_block); assert_eq!(store_wss_blobs_opt, wss_blobs_opt); @@ -2407,7 +2418,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .unwrap() .unwrap(); - let blobs = harness.chain.get_blobs(&block_root).expect("blobs"); + let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs(); let slot = full_block.slot(); let state_root = full_block.state_root(); @@ -2415,7 +2426,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { beacon_chain .process_block( full_block.canonical_root(), - RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), + RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -2469,13 +2480,13 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .expect("should get block") .expect("should get block"); - let blobs = harness.chain.get_blobs(&block_root).expect("blobs"); + let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs(); if let MaybeAvailableBlock::Available(block) = harness .chain .data_availability_checker .verify_kzg_for_rpc_block( - RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), + RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(), ) .expect("should verify kzg") { @@ -3351,7 +3362,7 @@ fn check_blob_existence( .unwrap() .map(Result::unwrap) { - if let Some(blobs) = harness.chain.store.get_blobs(&block_root).unwrap() { + if let Some(blobs) = harness.chain.store.get_blobs(&block_root).unwrap().blobs() { assert!(should_exist, "blobs at slot {slot} exist but should not"); blobs_seen += blobs.len(); } else { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 24c66158225..1cd9e89b96c 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -36,7 +36,6 @@ use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; -use ssz::Decode; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -361,10 +360,11 @@ where let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; let anchor_blobs = if anchor_block.message().body().has_blobs() { + let max_blobs_len = spec.max_blobs_per_block(anchor_block.epoch()) as usize; let anchor_blobs_bytes = anchor_blobs_bytes .ok_or("Blobs for checkpoint must be provided using --checkpoint-blobs")?; Some( - BlobSidecarList::from_ssz_bytes(&anchor_blobs_bytes) + BlobSidecarList::from_ssz_bytes(&anchor_blobs_bytes, max_blobs_len) .map_err(|e| format!("Unable to parse weak subj blobs SSZ: {e:?}"))?, ) } else { diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 1fd9f81d46f..daf2bf6ed4b 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -1383,7 +1383,8 @@ mod test { impl Tester { pub fn new(with_auth: bool) -> Self { - let server = MockServer::unit_testing(); + let spec = Arc::new(MainnetEthSpec::default_spec()); + let server = MockServer::unit_testing(spec); let rpc_url = SensitiveUrl::parse(&server.url()).unwrap(); let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 2a39796707b..9fa375b3757 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -154,6 +154,7 @@ pub struct ExecutionBlockGenerator { pub blobs_bundles: HashMap>, pub kzg: Option>, rng: Arc>, + spec: Arc, } fn make_rng() -> Arc> { @@ -172,6 +173,7 @@ impl ExecutionBlockGenerator { cancun_time: Option, prague_time: Option, osaka_time: Option, + spec: Arc, kzg: Option>, ) -> Self { let mut gen = Self { @@ -192,6 +194,7 @@ impl ExecutionBlockGenerator { blobs_bundles: <_>::default(), kzg, rng: make_rng(), + spec, }; gen.insert_pow_block(0).unwrap(); @@ -697,7 +700,11 @@ impl ExecutionBlockGenerator { if execution_payload.fork_name().deneb_enabled() { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); - let num_blobs = rng.gen::() % (E::max_blobs_per_block() + 1); + let max_blobs = self + .spec + .max_blobs_per_block_by_fork(execution_payload.fork_name()) + as usize; + let num_blobs = rng.gen::() % (max_blobs + 1); let (bundle, transactions) = generate_blobs(num_blobs)?; for tx in Vec::from(transactions) { execution_payload @@ -906,6 +913,7 @@ mod test { const TERMINAL_DIFFICULTY: u64 = 10; const TERMINAL_BLOCK: u64 = 10; const DIFFICULTY_INCREMENT: u64 = 1; + let spec = Arc::new(MainnetEthSpec::default_spec()); let mut generator: ExecutionBlockGenerator = ExecutionBlockGenerator::new( Uint256::from(TERMINAL_DIFFICULTY), @@ -915,6 +923,7 @@ mod test { None, None, None, + spec, None, ); diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 9df8d9cc5cf..f45bfda9ffa 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -13,7 +13,7 @@ pub struct MockExecutionLayer { pub server: MockServer, pub el: ExecutionLayer, pub executor: TaskExecutor, - pub spec: ChainSpec, + pub spec: Arc, } impl MockExecutionLayer { @@ -30,7 +30,7 @@ impl MockExecutionLayer { None, None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), - spec, + Arc::new(spec), None, ) } @@ -44,7 +44,7 @@ impl MockExecutionLayer { prague_time: Option, osaka_time: Option, jwt_key: Option, - spec: ChainSpec, + spec: Arc, kzg: Option>, ) -> Self { let handle = executor.handle().unwrap(); @@ -60,6 +60,7 @@ impl MockExecutionLayer { cancun_time, prague_time, osaka_time, + spec.clone(), kzg, ); @@ -323,7 +324,7 @@ impl MockExecutionLayer { pub async fn with_terminal_block(self, func: U) -> Self where - U: Fn(ChainSpec, ExecutionLayer, Option) -> V, + U: Fn(Arc, ExecutionLayer, Option) -> V, V: Future, { let terminal_block_number = self diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 5934c069a2f..75ff4358865 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -21,7 +21,7 @@ use std::marker::PhantomData; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::{Arc, LazyLock}; use tokio::{runtime, sync::oneshot}; -use types::{EthSpec, ExecutionBlockHash, Uint256}; +use types::{ChainSpec, EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; @@ -111,7 +111,7 @@ pub struct MockServer { } impl MockServer { - pub fn unit_testing() -> Self { + pub fn unit_testing(chain_spec: Arc) -> Self { Self::new( &runtime::Handle::current(), JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), @@ -122,6 +122,7 @@ impl MockServer { None, // FIXME(deneb): should this be the default? None, // FIXME(electra): should this be the default? None, // FIXME(fulu): should this be the default? + chain_spec, None, ) } @@ -129,6 +130,7 @@ impl MockServer { pub fn new_with_config( handle: &runtime::Handle, config: MockExecutionConfig, + spec: Arc, kzg: Option>, ) -> Self { let MockExecutionConfig { @@ -152,6 +154,7 @@ impl MockServer { cancun_time, prague_time, osaka_time, + spec, kzg, ); @@ -216,6 +219,7 @@ impl MockServer { cancun_time: Option, prague_time: Option, osaka_time: Option, + spec: Arc, kzg: Option>, ) -> Self { Self::new_with_config( @@ -231,6 +235,7 @@ impl MockServer { prague_time, osaka_time, }, + spec, kzg, ) } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index b9e48833184..0b00958f26a 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -287,14 +287,16 @@ impl BlockId { })?; // Return the `BlobSidecarList` identified by `self`. + let max_blobs_per_block = chain.spec.max_blobs_per_block(block.epoch()) as usize; let blob_sidecar_list = if !blob_kzg_commitments.is_empty() { if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { Self::get_blobs_from_data_columns(chain, root, query.indices, &block)? } else { - Self::get_blobs(chain, root, query.indices)? + Self::get_blobs(chain, root, query.indices, max_blobs_per_block)? } } else { - BlobSidecarList::default() + BlobSidecarList::new(vec![], max_blobs_per_block) + .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))? }; Ok((block, blob_sidecar_list, execution_optimistic, finalized)) @@ -304,22 +306,25 @@ impl BlockId { chain: &BeaconChain, root: Hash256, indices: Option>, + max_blobs_per_block: usize, ) -> Result, Rejection> { let blob_sidecar_list = chain .store .get_blobs(&root) .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? + .blobs() .ok_or_else(|| { warp_utils::reject::custom_not_found(format!("no blobs stored for block {root}")) })?; let blob_sidecar_list_filtered = match indices { Some(vec) => { - let list = blob_sidecar_list + let list: Vec<_> = blob_sidecar_list .into_iter() .filter(|blob_sidecar| vec.contains(&blob_sidecar.index)) .collect(); - BlobSidecarList::new(list) + + BlobSidecarList::new(list, max_blobs_per_block) .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))? } None => blob_sidecar_list, @@ -356,11 +361,13 @@ impl BlockId { ) .collect::, _>>()?; - reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block).map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "Error reconstructing data columns: {e:?}" - )) - }) + reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block, &chain.spec).map_err( + |e| { + warp_utils::reject::custom_server_error(format!( + "Error reconstructing data columns: {e:?}" + )) + }, + ) } else { Err(warp_utils::reject::custom_server_error( format!("Insufficient data columns to reconstruct blobs: required {num_required_columns}, but only {num_found_column_keys} were found.") diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 8e0a51a32a2..db4ef002579 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1460,7 +1460,8 @@ pub async fn block_seen_on_gossip_with_some_blobs() { let blobs = blobs.expect("should have some blobs"); assert!( blobs.0.len() >= 2, - "need at least 2 blobs for partial reveal" + "need at least 2 blobs for partial reveal, got: {}", + blobs.0.len() ); let partial_kzg_proofs = vec![*blobs.0.first().unwrap()]; diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index c3d20bbfb17..61b2699ac5a 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -186,6 +186,7 @@ impl Decoder for SSZSnappyInboundCodec { handle_rpc_request( self.protocol.versioned_protocol, &decoded_buffer, + self.fork_context.current_fork(), &self.fork_context.spec, ) } @@ -555,6 +556,7 @@ fn handle_length( fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], + current_fork: ForkName, spec: &ChainSpec, ) -> Result>, RPCError> { match versioned_protocol { @@ -586,9 +588,23 @@ fn handle_rpc_request( )?, }), ))), - SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange( - BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, - ))), + SupportedProtocol::BlobsByRangeV1 => { + let req = BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?; + let max_requested_blobs = req + .count + .saturating_mul(spec.max_blobs_per_block_by_fork(current_fork)); + // TODO(pawan): change this to max_blobs_per_rpc_request in the alpha10 PR + if max_requested_blobs > spec.max_request_blob_sidecars { + return Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + format!( + "requested exceeded limit. allowed: {}, requested: {}", + spec.max_request_blob_sidecars, max_requested_blobs + ), + )); + } + Ok(Some(RequestType::BlobsByRange(req))) + } SupportedProtocol::BlobsByRootV1 => { Ok(Some(RequestType::BlobsByRoot(BlobsByRootRequest { blob_ids: RuntimeVariableList::from_ssz_bytes( diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 7b3a59eac7e..75d49e9cb5f 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -110,8 +110,8 @@ impl RateLimiterConfig { pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(128, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); // `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage - pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(512, 10); - pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(896, 10); + pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(896, 10); // 320 blocks worth of columns for regular node, or 40 blocks for supernode. // Range sync load balances when requesting blocks, and each batch is 32 blocks. pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10); diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 0a0a6ca754f..3a008df023d 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -855,7 +855,8 @@ where } let (req, substream) = substream; - let max_responses = req.max_responses(); + let max_responses = + req.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); // store requests that expect responses if max_responses > 0 { @@ -924,7 +925,8 @@ where } // add the stream to substreams if we expect a response, otherwise drop the stream. - let max_responses = request.max_responses(); + let max_responses = + request.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); if max_responses > 0 { let max_remaining_chunks = if request.expect_exactly_one_response() { // Currently enforced only for multiple responses diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index bb8bfb0e206..500188beefb 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -15,6 +15,7 @@ use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; +use types::ForkName; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, @@ -327,8 +328,9 @@ pub struct BlobsByRangeRequest { } impl BlobsByRangeRequest { - pub fn max_blobs_requested(&self) -> u64 { - self.count.saturating_mul(E::max_blobs_per_block() as u64) + pub fn max_blobs_requested(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { + let max_blobs_per_block = spec.max_blobs_per_block_by_fork(current_fork); + self.count.saturating_mul(max_blobs_per_block) } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 7d091da7660..03f1395b8b5 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -181,12 +181,13 @@ impl RPC { let inbound_limiter = inbound_rate_limiter_config.map(|config| { debug!(log, "Using inbound rate limiting params"; "config" => ?config); - RateLimiter::new_with_config(config.0) + RateLimiter::new_with_config(config.0, fork_context.clone()) .expect("Inbound limiter configuration parameters are valid") }); let self_limiter = outbound_rate_limiter_config.map(|config| { - SelfRateLimiter::new(config, log.clone()).expect("Configuration parameters are valid") + SelfRateLimiter::new(config, fork_context.clone(), log.clone()) + .expect("Configuration parameters are valid") }); RPC { diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 87bde58292b..681b739d598 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -86,6 +86,10 @@ pub static SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD: LazyLock = LazyL /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network /// with `max_chunk_size`. +/// +/// FIXME: Given that these limits are useless we should probably delete them. See: +/// +/// https://github.com/sigp/lighthouse/issues/6790 pub static SIGNED_BEACON_BLOCK_BELLATRIX_MAX: LazyLock = LazyLock::new(|| // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX @@ -102,7 +106,6 @@ pub static SIGNED_BEACON_BLOCK_DENEB_MAX: LazyLock = LazyLock::new(|| { *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_deneb_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` - + (::ssz_fixed_len() * ::max_blobs_per_block()) + ssz::BYTES_PER_LENGTH_OFFSET }); // Length offset for the blob commitments field. // @@ -110,7 +113,6 @@ pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX: LazyLock = LazyLock::new(|| { *SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_electra_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional ssz offset for the `ExecutionPayload` field - + (::ssz_fixed_len() * ::max_blobs_per_block()) + ssz::BYTES_PER_LENGTH_OFFSET }); // Length offset for the blob commitments field. @@ -118,8 +120,6 @@ pub static SIGNED_BEACON_BLOCK_FULU_MAX: LazyLock = LazyLock::new(|| { *SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_fulu_size() + ssz::BYTES_PER_LENGTH_OFFSET - + (::ssz_fixed_len() - * ::max_blobs_per_block()) + ssz::BYTES_PER_LENGTH_OFFSET }); @@ -129,14 +129,6 @@ pub static BLOB_SIDECAR_SIZE: LazyLock = pub static BLOB_SIDECAR_SIZE_MINIMAL: LazyLock = LazyLock::new(BlobSidecar::::max_size); -pub static DATA_COLUMNS_SIDECAR_MIN: LazyLock = LazyLock::new(|| { - DataColumnSidecar::::empty() - .as_ssz_bytes() - .len() -}); -pub static DATA_COLUMNS_SIDECAR_MAX: LazyLock = - LazyLock::new(DataColumnSidecar::::max_size); - pub static ERROR_TYPE_MIN: LazyLock = LazyLock::new(|| { VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -635,8 +627,10 @@ impl ProtocolId { Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), - Protocol::DataColumnsByRoot => rpc_data_column_limits(), - Protocol::DataColumnsByRange => rpc_data_column_limits(), + Protocol::DataColumnsByRoot => rpc_data_column_limits::(fork_context.current_fork()), + Protocol::DataColumnsByRange => { + rpc_data_column_limits::(fork_context.current_fork()) + } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -716,8 +710,14 @@ pub fn rpc_blob_limits() -> RpcLimits { } } -pub fn rpc_data_column_limits() -> RpcLimits { - RpcLimits::new(*DATA_COLUMNS_SIDECAR_MIN, *DATA_COLUMNS_SIDECAR_MAX) +// TODO(peerdas): fix hardcoded max here +pub fn rpc_data_column_limits(fork_name: ForkName) -> RpcLimits { + RpcLimits::new( + DataColumnSidecar::::empty().as_ssz_bytes().len(), + DataColumnSidecar::::max_size( + E::default_spec().max_blobs_per_block_by_fork(fork_name) as usize + ), + ) } /* Inbound upgrade */ @@ -815,13 +815,13 @@ impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. - pub fn max_responses(&self) -> u64 { + pub fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { match self { RequestType::Status(_) => 1, RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, - RequestType::BlobsByRange(req) => req.max_blobs_requested::(), + RequestType::BlobsByRange(req) => req.max_blobs_requested(current_fork, spec), RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index ecbacc8c112..b9e82a5f1ee 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -6,10 +6,11 @@ use serde::{Deserialize, Serialize}; use std::future::Future; use std::hash::Hash; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use tokio::time::Interval; -use types::EthSpec; +use types::{ChainSpec, EthSpec, ForkContext, ForkName}; /// Nanoseconds since a given time. // Maintained as u64 to reduce footprint @@ -109,6 +110,7 @@ pub struct RPCRateLimiter { lc_finality_update_rl: Limiter, /// LightClientUpdatesByRange rate limiter. lc_updates_by_range_rl: Limiter, + fork_context: Arc, } /// Error type for non conformant requests @@ -176,7 +178,7 @@ impl RPCRateLimiterBuilder { self } - pub fn build(self) -> Result { + pub fn build(self, fork_context: Arc) -> Result { // get our quotas let ping_quota = self.ping_quota.ok_or("Ping quota not specified")?; let metadata_quota = self.metadata_quota.ok_or("MetaData quota not specified")?; @@ -253,13 +255,14 @@ impl RPCRateLimiterBuilder { lc_finality_update_rl, lc_updates_by_range_rl, init_time: Instant::now(), + fork_context, }) } } pub trait RateLimiterItem { fn protocol(&self) -> Protocol; - fn max_responses(&self) -> u64; + fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64; } impl RateLimiterItem for super::RequestType { @@ -267,13 +270,16 @@ impl RateLimiterItem for super::RequestType { self.versioned_protocol().protocol() } - fn max_responses(&self) -> u64 { - self.max_responses() + fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { + self.max_responses(current_fork, spec) } } impl RPCRateLimiter { - pub fn new_with_config(config: RateLimiterConfig) -> Result { + pub fn new_with_config( + config: RateLimiterConfig, + fork_context: Arc, + ) -> Result { // Destructure to make sure every configuration value is used. let RateLimiterConfig { ping_quota, @@ -316,7 +322,7 @@ impl RPCRateLimiter { Protocol::LightClientUpdatesByRange, light_client_updates_by_range_quota, ) - .build() + .build(fork_context) } /// Get a builder instance. @@ -330,7 +336,9 @@ impl RPCRateLimiter { request: &Item, ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); - let tokens = request.max_responses().max(1); + let tokens = request + .max_responses(self.fork_context.current_fork(), &self.fork_context.spec) + .max(1); let check = |limiter: &mut Limiter| limiter.allows(time_since_start, peer_id, tokens); diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index e968ad11e3d..e0c8593f29f 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -1,5 +1,6 @@ use std::{ collections::{hash_map::Entry, HashMap, VecDeque}, + sync::Arc, task::{Context, Poll}, time::Duration, }; @@ -9,7 +10,7 @@ use libp2p::{swarm::NotifyHandler, PeerId}; use slog::{crit, debug, Logger}; use smallvec::SmallVec; use tokio_util::time::DelayQueue; -use types::EthSpec; +use types::{EthSpec, ForkContext}; use super::{ config::OutboundRateLimiterConfig, @@ -50,9 +51,13 @@ pub enum Error { impl SelfRateLimiter { /// Creates a new [`SelfRateLimiter`] based on configration values. - pub fn new(config: OutboundRateLimiterConfig, log: Logger) -> Result { + pub fn new( + config: OutboundRateLimiterConfig, + fork_context: Arc, + log: Logger, + ) -> Result { debug!(log, "Using self rate limiting params"; "config" => ?config); - let limiter = RateLimiter::new_with_config(config.0)?; + let limiter = RateLimiter::new_with_config(config.0, fork_context)?; Ok(SelfRateLimiter { delayed_requests: Default::default(), @@ -215,7 +220,7 @@ mod tests { use crate::service::api_types::{AppRequestId, RequestId, SyncRequestId}; use libp2p::PeerId; use std::time::Duration; - use types::MainnetEthSpec; + use types::{EthSpec, ForkContext, Hash256, MainnetEthSpec, Slot}; /// Test that `next_peer_request_ready` correctly maintains the queue. #[tokio::test] @@ -225,8 +230,13 @@ mod tests { ping_quota: Quota::n_every(1, 2), ..Default::default() }); + let fork_context = std::sync::Arc::new(ForkContext::new::( + Slot::new(0), + Hash256::ZERO, + &MainnetEthSpec::default_spec(), + )); let mut limiter: SelfRateLimiter = - SelfRateLimiter::new(config, log).unwrap(); + SelfRateLimiter::new(config, fork_context, log).unwrap(); let peer_id = PeerId::random(); for i in 1..=5u32 { diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index c4944078fef..b4f19f668d7 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -890,14 +890,6 @@ impl NetworkBeaconProcessor { "start_slot" => req.start_slot, ); - // Should not send more than max request blocks - if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { - return Err(( - RpcErrorResponse::InvalidRequest, - "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", - )); - } - let request_start_slot = Slot::from(req.start_slot); let data_availability_boundary_slot = match self.chain.data_availability_boundary() { diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 7e27a91bd6b..8238fa146dd 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -259,7 +259,7 @@ impl TestRig { assert!(beacon_processor.is_ok()); let block = next_block_tuple.0; let blob_sidecars = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 { - Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap()) + Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs, &chain.spec).unwrap()) } else { None }; @@ -344,7 +344,7 @@ impl TestRig { } pub fn enqueue_single_lookup_rpc_blobs(&self) { if let Some(blobs) = self.next_blobs.clone() { - let blobs = FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::>()); + let blobs = FixedBlobSidecarList::new(blobs.into_iter().map(Some).collect::>()); self.network_beacon_processor .send_rpc_blobs( self.next_block.canonical_root(), @@ -1130,7 +1130,12 @@ async fn test_blobs_by_range() { .block_root_at_slot(Slot::new(slot), WhenSlotSkipped::None) .unwrap(); blob_count += root - .map(|root| rig.chain.get_blobs(&root).unwrap_or_default().len()) + .map(|root| { + rig.chain + .get_blobs(&root) + .map(|list| list.len()) + .unwrap_or(0) + }) .unwrap_or(0); } let mut actual_count = 0; diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 966ce55fabe..7a234eaef04 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -2,13 +2,13 @@ use beacon_chain::{ block_verification_types::RpcBlock, data_column_verification::CustodyDataColumn, get_block_root, }; use lighthouse_network::PeerId; -use ssz_types::VariableList; use std::{ collections::{HashMap, VecDeque}, sync::Arc, }; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, EthSpec, Hash256, RuntimeVariableList, + SignedBeaconBlock, }; #[derive(Debug)] @@ -31,6 +31,7 @@ pub struct RangeBlockComponentsRequest { num_custody_column_requests: Option, /// The peers the request was made to. pub(crate) peer_ids: Vec, + max_blobs_per_block: usize, } impl RangeBlockComponentsRequest { @@ -39,6 +40,7 @@ impl RangeBlockComponentsRequest { expects_custody_columns: Option>, num_custody_column_requests: Option, peer_ids: Vec, + max_blobs_per_block: usize, ) -> Self { Self { blocks: <_>::default(), @@ -51,6 +53,7 @@ impl RangeBlockComponentsRequest { expects_custody_columns, num_custody_column_requests, peer_ids, + max_blobs_per_block, } } @@ -100,7 +103,7 @@ impl RangeBlockComponentsRequest { let mut responses = Vec::with_capacity(blocks.len()); let mut blob_iter = blobs.into_iter().peekable(); for block in blocks.into_iter() { - let mut blob_list = Vec::with_capacity(E::max_blobs_per_block()); + let mut blob_list = Vec::with_capacity(self.max_blobs_per_block); while { let pair_next_blob = blob_iter .peek() @@ -111,7 +114,7 @@ impl RangeBlockComponentsRequest { blob_list.push(blob_iter.next().ok_or("Missing next blob".to_string())?); } - let mut blobs_buffer = vec![None; E::max_blobs_per_block()]; + let mut blobs_buffer = vec![None; self.max_blobs_per_block]; for blob in blob_list { let blob_index = blob.index as usize; let Some(blob_opt) = blobs_buffer.get_mut(blob_index) else { @@ -123,7 +126,11 @@ impl RangeBlockComponentsRequest { *blob_opt = Some(blob); } } - let blobs = VariableList::from(blobs_buffer.into_iter().flatten().collect::>()); + let blobs = RuntimeVariableList::new( + blobs_buffer.into_iter().flatten().collect::>(), + self.max_blobs_per_block, + ) + .map_err(|_| "Blobs returned exceeds max length".to_string())?; responses.push(RpcBlock::new(None, block, Some(blobs)).map_err(|e| format!("{e:?}"))?) } @@ -245,12 +252,18 @@ mod tests { #[test] fn no_blobs_into_responses() { + let spec = test_spec::(); let peer_id = PeerId::random(); - let mut info = RangeBlockComponentsRequest::::new(false, None, None, vec![peer_id]); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) - .map(|_| generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng).0) + .map(|_| { + generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec) + .0 + }) .collect::>(); + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().epoch()) as usize; + let mut info = + RangeBlockComponentsRequest::::new(false, None, None, vec![peer_id], max_len); // Send blocks and complete terminate response for block in blocks { @@ -265,15 +278,24 @@ mod tests { #[test] fn empty_blobs_into_responses() { + let spec = test_spec::(); let peer_id = PeerId::random(); - let mut info = RangeBlockComponentsRequest::::new(true, None, None, vec![peer_id]); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { // Always generate some blobs. - generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Number(3), &mut rng).0 + generate_rand_block_and_blobs::( + ForkName::Deneb, + NumBlobs::Number(3), + &mut rng, + &spec, + ) + .0 }) .collect::>(); + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().epoch()) as usize; + let mut info = + RangeBlockComponentsRequest::::new(true, None, None, vec![peer_id], max_len); // Send blocks and complete terminate response for block in blocks { @@ -294,12 +316,7 @@ mod tests { fn rpc_block_with_custody_columns() { let spec = test_spec::(); let expects_custody_columns = vec![1, 2, 3, 4]; - let mut info = RangeBlockComponentsRequest::::new( - false, - Some(expects_custody_columns.clone()), - Some(expects_custody_columns.len()), - vec![PeerId::random()], - ); + let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { @@ -311,7 +328,14 @@ mod tests { ) }) .collect::>(); - + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().0.epoch()) as usize; + let mut info = RangeBlockComponentsRequest::::new( + false, + Some(expects_custody_columns.clone()), + Some(expects_custody_columns.len()), + vec![PeerId::random()], + max_len, + ); // Send blocks and complete terminate response for block in &blocks { info.add_block_response(Some(block.0.clone().into())); @@ -355,12 +379,7 @@ mod tests { let spec = test_spec::(); let expects_custody_columns = vec![1, 2, 3, 4]; let num_of_data_column_requests = 2; - let mut info = RangeBlockComponentsRequest::::new( - false, - Some(expects_custody_columns.clone()), - Some(num_of_data_column_requests), - vec![PeerId::random()], - ); + let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { @@ -372,7 +391,14 @@ mod tests { ) }) .collect::>(); - + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().0.epoch()) as usize; + let mut info = RangeBlockComponentsRequest::::new( + false, + Some(expects_custody_columns.clone()), + Some(num_of_data_column_requests), + vec![PeerId::random()], + max_len, + ); // Send blocks and complete terminate response for block in &blocks { info.add_block_response(Some(block.0.clone().into())); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 5d02be2b4c1..2df8b5f94c0 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1234,6 +1234,7 @@ impl SyncManager { .network .range_block_and_blob_response(id, block_or_blob) { + let epoch = resp.sender_id.batch_id(); match resp.responses { Ok(blocks) => { match resp.sender_id { @@ -1277,6 +1278,7 @@ impl SyncManager { resp.expects_custody_columns, None, vec![], + self.chain.spec.max_blobs_per_block(epoch) as usize, ), ); // inform range that the request needs to be treated as failed diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index b6b7b315f3f..e1b2b974ec4 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -67,6 +67,15 @@ pub enum RangeRequestId { }, } +impl RangeRequestId { + pub fn batch_id(&self) -> BatchId { + match self { + RangeRequestId::RangeSync { batch_id, .. } => *batch_id, + RangeRequestId::BackfillSync { batch_id, .. } => *batch_id, + } + } +} + #[derive(Debug)] pub enum RpcEvent { StreamTermination, @@ -445,11 +454,14 @@ impl SyncNetworkContext { (None, None) }; + // TODO(pawan): this would break if a batch contains multiple epochs + let max_blobs_len = self.chain.spec.max_blobs_per_block(epoch); let info = RangeBlockComponentsRequest::new( expected_blobs, expects_columns, num_of_column_req, requested_peers, + max_blobs_len as usize, ); self.range_block_components_requests .insert(id, (sender_id, info)); @@ -950,12 +962,23 @@ impl SyncNetworkContext { ) -> Option>> { let response = self.blobs_by_root_requests.on_response(id, rpc_event); let response = response.map(|res| { - res.and_then( - |(blobs, seen_timestamp)| match to_fixed_blob_sidecar_list(blobs) { - Ok(blobs) => Ok((blobs, seen_timestamp)), - Err(e) => Err(e.into()), - }, - ) + res.and_then(|(blobs, seen_timestamp)| { + if let Some(max_len) = blobs + .first() + .map(|blob| self.chain.spec.max_blobs_per_block(blob.epoch()) as usize) + { + match to_fixed_blob_sidecar_list(blobs, max_len) { + Ok(blobs) => Ok((blobs, seen_timestamp)), + Err(e) => Err(e.into()), + } + } else { + Err(RpcResponseError::VerifyError( + LookupVerifyError::InternalError( + "Requested blobs for a block that has no blobs".to_string(), + ), + )) + } + }) }); if let Some(Err(RpcResponseError::VerifyError(e))) = &response { self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); @@ -1150,8 +1173,9 @@ impl SyncNetworkContext { fn to_fixed_blob_sidecar_list( blobs: Vec>>, + max_len: usize, ) -> Result, LookupVerifyError> { - let mut fixed_list = FixedBlobSidecarList::default(); + let mut fixed_list = FixedBlobSidecarList::new(vec![None; max_len]); for blob in blobs.into_iter() { let index = blob.index as usize; *fixed_list diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index b9214bafcd7..4a5a16459d3 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -28,6 +28,7 @@ pub enum LookupVerifyError { UnrequestedIndex(u64), InvalidInclusionProof, DuplicateData, + InternalError(String), } /// Collection of active requests of a single ReqResp method, i.e. `blocks_by_root` diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index a43b3bd022f..b9e38237c58 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -119,6 +119,8 @@ impl TestRig { .network_globals .set_sync_state(SyncState::Synced); + let spec = chain.spec.clone(); + let rng = XorShiftRng::from_seed([42; 16]); TestRig { beacon_processor_rx, @@ -142,6 +144,7 @@ impl TestRig { harness, fork_name, log, + spec, } } @@ -213,7 +216,7 @@ impl TestRig { ) -> (SignedBeaconBlock, Vec>) { let fork_name = self.fork_name; let rng = &mut self.rng; - generate_rand_block_and_blobs::(fork_name, num_blobs, rng) + generate_rand_block_and_blobs::(fork_name, num_blobs, rng, &self.spec) } fn rand_block_and_data_columns( @@ -1328,8 +1331,10 @@ impl TestRig { #[test] fn stable_rng() { + let spec = types::MainnetEthSpec::default_spec(); let mut rng = XorShiftRng::from_seed([42; 16]); - let (block, _) = generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng); + let (block, _) = + generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec); assert_eq!( block.canonical_root(), Hash256::from_slice( @@ -2187,8 +2192,8 @@ mod deneb_only { block_verification_types::{AsBlock, RpcBlock}, data_availability_checker::AvailabilityCheckError, }; - use ssz_types::VariableList; use std::collections::VecDeque; + use types::RuntimeVariableList; struct DenebTester { rig: TestRig, @@ -2546,12 +2551,15 @@ mod deneb_only { fn parent_block_unknown_parent(mut self) -> Self { self.rig.log("parent_block_unknown_parent"); let block = self.unknown_parent_block.take().unwrap(); + let max_len = self.rig.spec.max_blobs_per_block(block.epoch()) as usize; // Now this block is the one we expect requests from self.block = block.clone(); let block = RpcBlock::new( Some(block.canonical_root()), block, - self.unknown_parent_blobs.take().map(VariableList::from), + self.unknown_parent_blobs + .take() + .map(|vec| RuntimeVariableList::from_vec(vec, max_len)), ) .unwrap(); self.rig.parent_block_processed( diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index 47666b413c5..6ed5c7f8fab 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -12,7 +12,7 @@ use slot_clock::ManualSlotClock; use std::sync::Arc; use store::MemoryStore; use tokio::sync::mpsc; -use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; +use types::{test_utils::XorShiftRng, ChainSpec, ForkName, MinimalEthSpec as E}; mod lookups; mod range; @@ -64,4 +64,5 @@ struct TestRig { rng: XorShiftRng, fork_name: ForkName, log: Logger, + spec: Arc, } diff --git a/beacon_node/store/src/blob_sidecar_list_from_root.rs b/beacon_node/store/src/blob_sidecar_list_from_root.rs new file mode 100644 index 00000000000..de63eaa76ce --- /dev/null +++ b/beacon_node/store/src/blob_sidecar_list_from_root.rs @@ -0,0 +1,42 @@ +use std::sync::Arc; +use types::{BlobSidecar, BlobSidecarList, EthSpec}; + +#[derive(Debug, Clone)] +pub enum BlobSidecarListFromRoot { + /// Valid root that exists in the DB, but has no blobs associated with it. + NoBlobs, + /// Contains > 1 blob for the requested root. + Blobs(BlobSidecarList), + /// No root exists in the db or cache for the requested root. + NoRoot, +} + +impl From> for BlobSidecarListFromRoot { + fn from(value: BlobSidecarList) -> Self { + Self::Blobs(value) + } +} + +impl BlobSidecarListFromRoot { + pub fn blobs(self) -> Option> { + match self { + Self::NoBlobs | Self::NoRoot => None, + Self::Blobs(blobs) => Some(blobs), + } + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + match self { + Self::NoBlobs | Self::NoRoot => 0, + Self::Blobs(blobs) => blobs.len(), + } + } + + pub fn iter(&self) -> impl Iterator>> { + match self { + Self::NoBlobs | Self::NoRoot => [].iter(), + Self::Blobs(list) => list.iter(), + } + } +} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c6148e53144..c29305f9831 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -14,8 +14,8 @@ use crate::metadata::{ }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, - KeyValueStoreOp, StoreItem, StoreOp, + get_data_column_key, get_key_for_col, BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, + ItemStore, KeyValueStoreOp, StoreItem, StoreOp, }; use crate::{metrics, parse_data_column_key}; use itertools::{process_results, Itertools}; @@ -1280,9 +1280,10 @@ impl, Cold: ItemStore> HotColdDB StoreOp::PutBlobs(_, _) | StoreOp::PutDataColumns(_, _) => true, StoreOp::DeleteBlobs(block_root) => { match self.get_blobs(block_root) { - Ok(Some(blob_sidecar_list)) => { + Ok(BlobSidecarListFromRoot::Blobs(blob_sidecar_list)) => { blobs_to_delete.push((*block_root, blob_sidecar_list)); } + Ok(BlobSidecarListFromRoot::NoBlobs | BlobSidecarListFromRoot::NoRoot) => {} Err(e) => { error!( self.log, "Error getting blobs"; @@ -1290,7 +1291,6 @@ impl, Cold: ItemStore> HotColdDB "error" => ?e ); } - _ => (), } true } @@ -2045,11 +2045,11 @@ impl, Cold: ItemStore> HotColdDB } /// Fetch blobs for a given block from the store. - pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { + pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { // Check the cache. if let Some(blobs) = self.block_cache.lock().get_blobs(block_root) { metrics::inc_counter(&metrics::BEACON_BLOBS_CACHE_HIT_COUNT); - return Ok(Some(blobs.clone())); + return Ok(blobs.clone().into()); } match self @@ -2057,13 +2057,27 @@ impl, Cold: ItemStore> HotColdDB .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_slice())? { Some(ref blobs_bytes) => { - let blobs = BlobSidecarList::from_ssz_bytes(blobs_bytes)?; - self.block_cache - .lock() - .put_blobs(*block_root, blobs.clone()); - Ok(Some(blobs)) + // We insert a VariableList of BlobSidecars into the db, but retrieve + // a plain vec since we don't know the length limit of the list without + // knowing the slot. + // The encoding of a VariableList is the same as a regular vec. + let blobs: Vec>> = Vec::<_>::from_ssz_bytes(blobs_bytes)?; + if let Some(max_blobs_per_block) = blobs + .first() + .map(|blob| self.spec.max_blobs_per_block(blob.epoch())) + { + let blobs = BlobSidecarList::from_vec(blobs, max_blobs_per_block as usize); + self.block_cache + .lock() + .put_blobs(*block_root, blobs.clone()); + + Ok(BlobSidecarListFromRoot::Blobs(blobs)) + } else { + // This always implies that there were no blobs for this block_root + Ok(BlobSidecarListFromRoot::NoBlobs) + } } - None => Ok(None), + None => Ok(BlobSidecarListFromRoot::NoRoot), } } diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index 5c60aa8d7e3..097b069a665 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -1,7 +1,7 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; use types::{ - BlobSidecarList, EthSpec, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + EthSpec, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, }; @@ -27,7 +27,6 @@ impl_store_item!(ExecutionPayloadCapella); impl_store_item!(ExecutionPayloadDeneb); impl_store_item!(ExecutionPayloadElectra); impl_store_item!(ExecutionPayloadFulu); -impl_store_item!(BlobSidecarList); /// This fork-agnostic implementation should be only used for writing. /// diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 09ae9a32dd0..1458fa846c6 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -7,6 +7,7 @@ //! //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See //! tests for implementation examples. +pub mod blob_sidecar_list_from_root; pub mod chunked_iter; pub mod chunked_vector; pub mod config; @@ -28,6 +29,7 @@ pub mod state_cache; pub mod iter; +pub use self::blob_sidecar_list_from_root::BlobSidecarListFromRoot; pub use self::config::StoreConfig; pub use self::consensus_context::OnDiskConsensusContext; pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index a107f6147ae..a303bea2681 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -135,6 +135,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index f71984059aa..68d2b0eafe2 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -118,6 +118,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 6d344b5b524..930ce0a1bcb 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -124,6 +124,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 244ddd564d2..638f6fe42f8 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -141,6 +141,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 88f8359bd13..38185188976 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -119,6 +119,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 22e0a5eab30..782dbe2a547 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -391,10 +391,12 @@ pub fn partially_verify_execution_payload = VariableList::MaxBlobCommitmentsPerBlock>; -pub type KzgCommitmentOpts = - FixedVector, ::MaxBlobsPerBlock>; /// The number of leaves (including padding) on the `BeaconBlockBody` Merkle tree. /// diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 302aa2a4c18..ff4555747c6 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,10 +1,10 @@ use crate::test_utils::TestRandom; use crate::{ - beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, - Epoch, EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, + beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, AbstractExecPayload, BeaconBlockHeader, + BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, FixedVector, ForkName, + ForkVersionDeserialize, Hash256, KzgProofs, RuntimeFixedVector, RuntimeVariableList, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, VariableList, }; -use crate::{AbstractExecPayload, ForkName}; -use crate::{ForkVersionDeserialize, KzgProofs, SignedBeaconBlock}; use bls::Signature; use derivative::Derivative; use kzg::{Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT}; @@ -30,19 +30,6 @@ pub struct BlobIdentifier { pub index: u64, } -impl BlobIdentifier { - pub fn get_all_blob_ids(block_root: Hash256) -> Vec { - let mut blob_ids = Vec::with_capacity(E::max_blobs_per_block()); - for i in 0..E::max_blobs_per_block() { - blob_ids.push(BlobIdentifier { - block_root, - index: i as u64, - }); - } - blob_ids - } -} - impl PartialOrd for BlobIdentifier { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) @@ -291,19 +278,23 @@ impl BlobSidecar { blobs: BlobsList, block: &SignedBeaconBlock, kzg_proofs: KzgProofs, + spec: &ChainSpec, ) -> Result, BlobSidecarError> { let mut blob_sidecars = vec![]; for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { let blob_sidecar = BlobSidecar::new(i, blob, block, *kzg_proof)?; blob_sidecars.push(Arc::new(blob_sidecar)); } - Ok(VariableList::from(blob_sidecars)) + Ok(RuntimeVariableList::from_vec( + blob_sidecars, + spec.max_blobs_per_block(block.epoch()) as usize, + )) } } -pub type BlobSidecarList = VariableList>, ::MaxBlobsPerBlock>; -pub type FixedBlobSidecarList = - FixedVector>>, ::MaxBlobsPerBlock>; +pub type BlobSidecarList = RuntimeVariableList>>; +/// Alias for a non length-constrained list of `BlobSidecar`s. +pub type FixedBlobSidecarList = RuntimeFixedVector>>>; pub type BlobsList = VariableList, ::MaxBlobCommitmentsPerBlock>; impl ForkVersionDeserialize for BlobSidecarList { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index f0bfeba680b..65f4c37aa15 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -237,6 +237,7 @@ pub struct ChainSpec { pub max_request_data_column_sidecars: u64, pub min_epochs_for_blob_sidecars_requests: u64, pub blob_sidecar_subnet_count: u64, + max_blobs_per_block: u64, /* * Networking Derived @@ -616,6 +617,17 @@ impl ChainSpec { } } + /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`. + pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 { + self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch)) + } + + /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. + pub fn max_blobs_per_block_by_fork(&self, _fork_name: ForkName) -> u64 { + // TODO(electra): add Electra blobs per block change here + self.max_blobs_per_block + } + pub fn data_columns_per_subnet(&self) -> usize { self.number_of_columns .safe_div(self.data_column_sidecar_subnet_count as usize) @@ -859,6 +871,7 @@ impl ChainSpec { max_request_data_column_sidecars: default_max_request_data_column_sidecars(), min_epochs_for_blob_sidecars_requests: default_min_epochs_for_blob_sidecars_requests(), blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), + max_blobs_per_block: default_max_blobs_per_block(), /* * Derived Deneb Specific @@ -1187,6 +1200,7 @@ impl ChainSpec { max_request_data_column_sidecars: default_max_request_data_column_sidecars(), min_epochs_for_blob_sidecars_requests: 16384, blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), + max_blobs_per_block: default_max_blobs_per_block(), /* * Derived Deneb Specific @@ -1388,6 +1402,9 @@ pub struct Config { #[serde(default = "default_blob_sidecar_subnet_count")] #[serde(with = "serde_utils::quoted_u64")] blob_sidecar_subnet_count: u64, + #[serde(default = "default_max_blobs_per_block")] + #[serde(with = "serde_utils::quoted_u64")] + max_blobs_per_block: u64, #[serde(default = "default_min_per_epoch_churn_limit_electra")] #[serde(with = "serde_utils::quoted_u64")] @@ -1523,6 +1540,12 @@ const fn default_blob_sidecar_subnet_count() -> u64 { 6 } +/// Its important to keep this consistent with the deneb preset value for +/// `MAX_BLOBS_PER_BLOCK` else we might run into consensus issues. +const fn default_max_blobs_per_block() -> u64 { + 6 +} + const fn default_min_per_epoch_churn_limit_electra() -> u64 { 128_000_000_000 } @@ -1745,6 +1768,7 @@ impl Config { max_request_data_column_sidecars: spec.max_request_data_column_sidecars, min_epochs_for_blob_sidecars_requests: spec.min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count: spec.blob_sidecar_subnet_count, + max_blobs_per_block: spec.max_blobs_per_block, min_per_epoch_churn_limit_electra: spec.min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit: spec @@ -1822,6 +1846,7 @@ impl Config { max_request_data_column_sidecars, min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count, + max_blobs_per_block, min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, @@ -1890,6 +1915,7 @@ impl Config { max_request_data_column_sidecars, min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count, + max_blobs_per_block, min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 57251e319a4..b2a050e9d5e 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,7 +1,7 @@ use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; use crate::test_utils::TestRandom; use crate::BeaconStateError; -use crate::{BeaconBlockHeader, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; +use crate::{BeaconBlockHeader, Epoch, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; use bls::Signature; use derivative::Derivative; use kzg::Error as KzgError; @@ -11,7 +11,6 @@ use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::Unsigned; use ssz_types::Error as SszError; use ssz_types::{FixedVector, VariableList}; use std::hash::Hash; @@ -68,6 +67,10 @@ impl DataColumnSidecar { self.signed_block_header.message.slot } + pub fn epoch(&self) -> Epoch { + self.slot().epoch(E::slots_per_epoch()) + } + pub fn block_root(&self) -> Hash256 { self.signed_block_header.message.tree_hash_root() } @@ -110,18 +113,16 @@ impl DataColumnSidecar { .len() } - pub fn max_size() -> usize { + pub fn max_size(max_blobs_per_block: usize) -> usize { Self { index: 0, - column: VariableList::new(vec![Cell::::default(); E::MaxBlobsPerBlock::to_usize()]) - .unwrap(), + column: VariableList::new(vec![Cell::::default(); max_blobs_per_block]).unwrap(), kzg_commitments: VariableList::new(vec![ KzgCommitment::empty_for_testing(); - E::MaxBlobsPerBlock::to_usize() + max_blobs_per_block ]) .unwrap(), - kzg_proofs: VariableList::new(vec![KzgProof::empty(); E::MaxBlobsPerBlock::to_usize()]) - .unwrap(), + kzg_proofs: VariableList::new(vec![KzgProof::empty(); max_blobs_per_block]).unwrap(), signed_block_header: SignedBeaconBlockHeader { message: BeaconBlockHeader::empty(), signature: Signature::empty(), diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 23e82762096..976766dfa9d 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -4,8 +4,7 @@ use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ bit::B0, UInt, U0, U1, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U134217728, - U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, - U8192, + U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192, }; use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; @@ -109,7 +108,6 @@ pub trait EthSpec: /* * New in Deneb */ - type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; type MaxBlobCommitmentsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -281,11 +279,6 @@ pub trait EthSpec: Self::MaxWithdrawalsPerPayload::to_usize() } - /// Returns the `MAX_BLOBS_PER_BLOCK` constant for this specification. - fn max_blobs_per_block() -> usize { - Self::MaxBlobsPerBlock::to_usize() - } - /// Returns the `MAX_BLOB_COMMITMENTS_PER_BLOCK` constant for this specification. fn max_blob_commitments_per_block() -> usize { Self::MaxBlobCommitmentsPerBlock::to_usize() @@ -421,7 +414,6 @@ impl EthSpec for MainnetEthSpec { type GasLimitDenominator = U1024; type MinGasLimit = U5000; type MaxExtraDataBytes = U32; - type MaxBlobsPerBlock = U6; type MaxBlobCommitmentsPerBlock = U4096; type BytesPerFieldElement = U32; type FieldElementsPerBlob = U4096; @@ -505,7 +497,6 @@ impl EthSpec for MinimalEthSpec { MinGasLimit, MaxExtraDataBytes, MaxBlsToExecutionChanges, - MaxBlobsPerBlock, BytesPerFieldElement, PendingDepositsLimit, MaxPendingDepositsPerEpoch, @@ -559,7 +550,6 @@ impl EthSpec for GnosisEthSpec { type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U8; - type MaxBlobsPerBlock = U6; type MaxBlobCommitmentsPerBlock = U4096; type FieldElementsPerBlob = U4096; type BytesPerFieldElement = U32; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 282f27a5179..54d8bf51b6a 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -108,6 +108,7 @@ pub mod data_column_sidecar; pub mod data_column_subnet_id; pub mod light_client_header; pub mod non_zero_usize; +pub mod runtime_fixed_vector; pub mod runtime_var_list; pub use crate::activation_queue::ActivationQueue; @@ -223,6 +224,7 @@ pub use crate::preset::{ pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; +pub use crate::runtime_fixed_vector::RuntimeFixedVector; pub use crate::runtime_var_list::RuntimeVariableList; pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index f8b36654093..f64b7051e5f 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -205,8 +205,6 @@ impl CapellaPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct DenebPreset { - #[serde(with = "serde_utils::quoted_u64")] - pub max_blobs_per_block: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_blob_commitments_per_block: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -216,7 +214,6 @@ pub struct DenebPreset { impl DenebPreset { pub fn from_chain_spec(_spec: &ChainSpec) -> Self { Self { - max_blobs_per_block: E::max_blobs_per_block() as u64, max_blob_commitments_per_block: E::max_blob_commitments_per_block() as u64, field_elements_per_blob: E::field_elements_per_blob() as u64, } diff --git a/consensus/types/src/runtime_fixed_vector.rs b/consensus/types/src/runtime_fixed_vector.rs new file mode 100644 index 00000000000..2b08b7bf702 --- /dev/null +++ b/consensus/types/src/runtime_fixed_vector.rs @@ -0,0 +1,81 @@ +//! Emulates a fixed size array but with the length set at runtime. +//! +//! The length of the list cannot be changed once it is set. + +#[derive(Clone, Debug)] +pub struct RuntimeFixedVector { + vec: Vec, + len: usize, +} + +impl RuntimeFixedVector { + pub fn new(vec: Vec) -> Self { + let len = vec.len(); + Self { vec, len } + } + + pub fn to_vec(&self) -> Vec { + self.vec.clone() + } + + pub fn as_slice(&self) -> &[T] { + self.vec.as_slice() + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.len + } + + pub fn into_vec(self) -> Vec { + self.vec + } + + pub fn default(max_len: usize) -> Self { + Self { + vec: vec![T::default(); max_len], + len: max_len, + } + } + + pub fn take(&mut self) -> Self { + let new = std::mem::take(&mut self.vec); + *self = Self::new(vec![T::default(); self.len]); + Self { + vec: new, + len: self.len, + } + } +} + +impl std::ops::Deref for RuntimeFixedVector { + type Target = [T]; + + fn deref(&self) -> &[T] { + &self.vec[..] + } +} + +impl std::ops::DerefMut for RuntimeFixedVector { + fn deref_mut(&mut self) -> &mut [T] { + &mut self.vec[..] + } +} + +impl IntoIterator for RuntimeFixedVector { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.into_iter() + } +} + +impl<'a, T> IntoIterator for &'a RuntimeFixedVector { + type Item = &'a T; + type IntoIter = std::slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.vec.iter() + } +} diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs index 8290876fa1f..857073b3b84 100644 --- a/consensus/types/src/runtime_var_list.rs +++ b/consensus/types/src/runtime_var_list.rs @@ -2,7 +2,7 @@ use derivative::Derivative; use serde::{Deserialize, Serialize}; use ssz::Decode; use ssz_types::Error; -use std::ops::{Deref, DerefMut, Index, IndexMut}; +use std::ops::{Deref, Index, IndexMut}; use std::slice::SliceIndex; /// Emulates a SSZ `List`. @@ -10,6 +10,8 @@ use std::slice::SliceIndex; /// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than /// `max_len` values. /// +/// To ensure there are no inconsistent states, we do not allow any mutating operation if `max_len` is not set. +/// /// ## Example /// /// ``` @@ -35,6 +37,7 @@ use std::slice::SliceIndex; /// /// // Push a value to if it _does_ exceed the maximum. /// assert!(long.push(6).is_err()); +/// /// ``` #[derive(Debug, Clone, Serialize, Deserialize, Derivative)] #[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] @@ -65,7 +68,7 @@ impl RuntimeVariableList { Self { vec, max_len } } - /// Create an empty list. + /// Create an empty list with the given `max_len`. pub fn empty(max_len: usize) -> Self { Self { vec: vec![], @@ -77,6 +80,10 @@ impl RuntimeVariableList { self.vec.as_slice() } + pub fn as_mut_slice(&mut self) -> &mut [T] { + self.vec.as_mut_slice() + } + /// Returns the number of values presently in `self`. pub fn len(&self) -> usize { self.vec.len() @@ -88,6 +95,8 @@ impl RuntimeVariableList { } /// Returns the type-level maximum length. + /// + /// Returns `None` if self is uninitialized with a max_len. pub fn max_len(&self) -> usize { self.max_len } @@ -169,12 +178,6 @@ impl Deref for RuntimeVariableList { } } -impl DerefMut for RuntimeVariableList { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - impl<'a, T> IntoIterator for &'a RuntimeVariableList { type Item = &'a T; type IntoIter = std::slice::Iter<'a, T>; diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs index 7719f02aa33..2e2c27a2dbf 100644 --- a/lcli/src/mock_el.rs +++ b/lcli/src/mock_el.rs @@ -9,6 +9,7 @@ use execution_layer::{ }; use std::net::Ipv4Addr; use std::path::PathBuf; +use std::sync::Arc; use types::*; pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { @@ -22,7 +23,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< let osaka_time = parse_optional(matches, "osaka-time")?; let handle = env.core_context().executor.handle().unwrap(); - let spec = &E::default_spec(); + let spec = Arc::new(E::default_spec()); let jwt_key = JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(); std::fs::write(jwt_path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); @@ -41,7 +42,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< osaka_time, }; let kzg = None; - let server: MockServer = MockServer::new_with_config(&handle, config, kzg); + let server: MockServer = MockServer::new_with_config(&handle, config, spec, kzg); if all_payloads_valid { eprintln!( diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index ac01c84b9d0..6e632ccf549 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -7,6 +7,7 @@ use environment::RuntimeContext; use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, Timeouts}; use sensitive_url::SensitiveUrl; use std::path::PathBuf; +use std::sync::Arc; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use tempfile::{Builder as TempBuilder, TempDir}; @@ -248,8 +249,14 @@ impl LocalExecutionNode { if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) { panic!("Failed to write jwt file {}", e); } + let spec = Arc::new(E::default_spec()); Self { - server: MockServer::new_with_config(&context.executor.handle().unwrap(), config, None), + server: MockServer::new_with_config( + &context.executor.handle().unwrap(), + config, + spec, + None, + ), datadir, } } From 348fbdb83816ba213945e5c7fcd8c5c5342aa0ed Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 10 Jan 2025 11:35:05 +0400 Subject: [PATCH 06/26] Add missing crates to cargo workspace (#6774) * Add the remaining crates to cargo workspace * Merge branch 'unstable' into add-remaining-crates-workspace --- Cargo.toml | 7 +++- beacon_node/genesis/Cargo.toml | 16 ++++---- beacon_node/operation_pool/Cargo.toml | 14 +++---- common/filesystem/Cargo.toml | 1 - consensus/merkle_proof/Cargo.toml | 2 +- consensus/types/Cargo.toml | 56 +++++++++++++-------------- 6 files changed, 50 insertions(+), 46 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 23e52a306b6..233e5fa775b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,11 +9,13 @@ members = [ "beacon_node/client", "beacon_node/eth1", "beacon_node/execution_layer", + "beacon_node/genesis", "beacon_node/http_api", "beacon_node/http_metrics", "beacon_node/lighthouse_network", "beacon_node/lighthouse_network/gossipsub", "beacon_node/network", + "beacon_node/operation_pool", "beacon_node/store", "beacon_node/timer", @@ -30,6 +32,7 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", + "common/filesystem", "common/lighthouse_version", "common/lockfile", "common/logging", @@ -48,14 +51,16 @@ members = [ "common/unused_port", "common/validator_dir", "common/warp_utils", + "consensus/fixed_bytes", "consensus/fork_choice", - "consensus/int_to_bytes", + "consensus/merkle_proof", "consensus/proto_array", "consensus/safe_arith", "consensus/state_processing", "consensus/swap_or_not_shuffle", + "consensus/types", "crypto/bls", "crypto/eth2_key_derivation", diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index b01e6a6aea7..eeca393947d 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -9,16 +9,16 @@ eth1_test_rig = { workspace = true } sensitive_url = { workspace = true } [dependencies] -futures = { workspace = true } -types = { workspace = true } environment = { workspace = true } eth1 = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_ssz = { workspace = true } +futures = { workspace = true } +int_to_bytes = { workspace = true } +merkle_proof = { workspace = true } rayon = { workspace = true } +slog = { workspace = true } state_processing = { workspace = true } -merkle_proof = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_hashing = { workspace = true } -tree_hash = { workspace = true } tokio = { workspace = true } -slog = { workspace = true } -int_to_bytes = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 5b48e3f0d88..570b74226c9 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -5,24 +5,24 @@ authors = ["Michael Sproul "] edition = { workspace = true } [dependencies] +bitvec = { workspace = true } derivative = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } itertools = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } -types = { workspace = true } -state_processing = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } +rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } +state_processing = { workspace = true } store = { workspace = true } -bitvec = { workspace = true } -rand = { workspace = true } +types = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } -tokio = { workspace = true } maplit = { workspace = true } +tokio = { workspace = true } [features] portable = ["beacon_chain/portable"] diff --git a/common/filesystem/Cargo.toml b/common/filesystem/Cargo.toml index fd026bd517a..1b5abf03f4c 100644 --- a/common/filesystem/Cargo.toml +++ b/common/filesystem/Cargo.toml @@ -3,7 +3,6 @@ name = "filesystem" version = "0.1.0" authors = ["Mark Mackey "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index c2c6bf270a6..2f721d917b5 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -7,8 +7,8 @@ edition = { workspace = true } [dependencies] alloy-primitives = { workspace = true } ethereum_hashing = { workspace = true } -safe_arith = { workspace = true } fixed_bytes = { workspace = true } +safe_arith = { workspace = true } [dev-dependencies] quickcheck = { workspace = true } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 21a15fc5174..79beb812820 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -10,56 +10,56 @@ harness = false [dependencies] alloy-primitives = { workspace = true } -merkle_proof = { workspace = true } +alloy-rlp = { version = "0.3.4", features = ["derive"] } +# The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by +# `AbstractExecPayload` +arbitrary = { workspace = true, features = ["derive"] } bls = { workspace = true, features = ["arbitrary"] } -kzg = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } +derivative = { workspace = true } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum_hashing = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true, features = ["arbitrary"] } +ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } hex = { workspace = true } int_to_bytes = { workspace = true } +itertools = { workspace = true } +kzg = { workspace = true } log = { workspace = true } -rayon = { workspace = true } +maplit = { workspace = true } +merkle_proof = { workspace = true } +metastruct = "0.1.0" +milhouse = { workspace = true } +parking_lot = { workspace = true } rand = { workspace = true } +rand_xorshift = "0.3.0" +rayon = { workspace = true } +regex = { workspace = true } +rpds = { workspace = true } +rusqlite = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true, features = ["rc"] } +serde_json = { workspace = true } +serde_yaml = { workspace = true } slog = { workspace = true } -ethereum_ssz = { workspace = true, features = ["arbitrary"] } -ethereum_ssz_derive = { workspace = true } +smallvec = { workspace = true } ssz_types = { workspace = true, features = ["arbitrary"] } +superstruct = { workspace = true } swap_or_not_shuffle = { workspace = true, features = ["arbitrary"] } +tempfile = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } -rand_xorshift = "0.3.0" -serde_yaml = { workspace = true } -tempfile = { workspace = true } -derivative = { workspace = true } -rusqlite = { workspace = true } -# The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by -# `AbstractExecPayload` -arbitrary = { workspace = true, features = ["derive"] } -ethereum_serde_utils = { workspace = true } -regex = { workspace = true } -parking_lot = { workspace = true } -itertools = { workspace = true } -superstruct = { workspace = true } -metastruct = "0.1.0" -serde_json = { workspace = true } -smallvec = { workspace = true } -maplit = { workspace = true } -alloy-rlp = { version = "0.3.4", features = ["derive"] } -milhouse = { workspace = true } -rpds = { workspace = true } -fixed_bytes = { workspace = true } [dev-dependencies] -criterion = { workspace = true } beacon_chain = { workspace = true } +criterion = { workspace = true } +paste = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } -paste = { workspace = true } [features] default = ["sqlite", "legacy-arith"] From c9747fb77f3ff1d8da765003b4988c20ce2d7cd8 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 13 Jan 2025 11:08:51 +1100 Subject: [PATCH 07/26] Refactor feature testing for spec tests (#6737) * Refactor spec testing for features and simplify usage. * Fix `SszStatic` tests for PeerDAS: exclude eip7594 test vectors when testing Electra types. * Merge branch 'unstable' into refactor-ef-tests-features --- testing/ef_tests/src/cases.rs | 36 +++++++++++++-- .../ef_tests/src/cases/get_custody_columns.rs | 9 ++++ .../src/cases/kzg_blob_to_kzg_commitment.rs | 4 -- .../src/cases/kzg_compute_blob_kzg_proof.rs | 4 -- .../cases/kzg_compute_cells_and_kzg_proofs.rs | 8 +++- .../src/cases/kzg_compute_kzg_proof.rs | 4 -- .../cases/kzg_recover_cells_and_kzg_proofs.rs | 8 +++- .../src/cases/kzg_verify_blob_kzg_proof.rs | 4 -- .../cases/kzg_verify_blob_kzg_proof_batch.rs | 4 -- .../cases/kzg_verify_cell_kzg_proof_batch.rs | 8 +++- .../src/cases/kzg_verify_kzg_proof.rs | 4 -- testing/ef_tests/src/handler.rs | 46 ++++++++++++++----- testing/ef_tests/tests/tests.rs | 20 ++++---- 13 files changed, 103 insertions(+), 56 deletions(-) diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 63274ee0c03..8f5571d64a6 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -74,11 +74,37 @@ pub use ssz_generic::*; pub use ssz_static::*; pub use transition::TransitionTest; -#[derive(Debug, PartialEq)] +/// Used for running feature tests for future forks that have not yet been added to `ForkName`. +/// This runs tests in the directory named by the feature instead of the fork name. This has been +/// the pattern used in the `consensus-spec-tests` repository: +/// `consensus-spec-tests/tests/general/[feature_name]/[runner_name].` +/// e.g. consensus-spec-tests/tests/general/peerdas/ssz_static +/// +/// The feature tests can be run with one of the following methods: +/// 1. `handler.run_for_feature(feature_name)` for new tests that are not on existing fork, i.e. a +/// new handler. This will be temporary and the test will need to be updated to use +/// `handle.run()` once the feature is incorporated into a fork. +/// 2. `handler.run()` for tests that are already on existing forks, but with new test vectors for +/// the feature. In this case the `handler.is_enabled_for_feature` will need to be implemented +/// to return `true` for the feature in order for the feature test vector to be tested. +#[derive(Debug, PartialEq, Clone, Copy)] pub enum FeatureName { Eip7594, } +impl FeatureName { + pub fn list_all() -> Vec { + vec![FeatureName::Eip7594] + } + + /// `ForkName` to use when running the feature tests. + pub fn fork_name(&self) -> ForkName { + match self { + FeatureName::Eip7594 => ForkName::Deneb, + } + } +} + impl Display for FeatureName { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { @@ -107,11 +133,13 @@ pub trait Case: Debug + Sync { true } - /// Whether or not this test exists for the given `feature_name`. + /// Whether or not this test exists for the given `feature_name`. This is intended to be used + /// for features that have not been added to a fork yet, and there is usually a separate folder + /// for the feature in the `consensus-spec-tests` repository. /// - /// Returns `true` by default. + /// Returns `false` by default. fn is_enabled_for_feature(_feature_name: FeatureName) -> bool { - true + false } /// Execute a test and return the result. diff --git a/testing/ef_tests/src/cases/get_custody_columns.rs b/testing/ef_tests/src/cases/get_custody_columns.rs index 9665f877300..71b17aeaa3f 100644 --- a/testing/ef_tests/src/cases/get_custody_columns.rs +++ b/testing/ef_tests/src/cases/get_custody_columns.rs @@ -21,6 +21,14 @@ impl LoadCase for GetCustodyColumns { } impl Case for GetCustodyColumns { + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + false + } + + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 + } + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let spec = E::default_spec(); let node_id = U256::from_str_radix(&self.node_id, 10) @@ -33,6 +41,7 @@ impl Case for GetCustodyColumns { ) .expect("should compute custody columns") .collect::>(); + let expected = &self.result; if computed == *expected { Ok(()) diff --git a/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs index fa16a5fcb7a..feb9a4ff5c4 100644 --- a/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs +++ b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs @@ -31,10 +31,6 @@ impl Case for KZGBlobToKZGCommitment { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let kzg = get_kzg(); let commitment = parse_blob::(&self.input.blob).and_then(|blob| { diff --git a/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs index 694013e2513..4aadc37af21 100644 --- a/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs @@ -32,10 +32,6 @@ impl Case for KZGComputeBlobKZGProof { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGComputeBlobKZGProofInput| -> Result<_, Error> { let blob = parse_blob::(&input.blob)?; diff --git a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs index 2a9f8ceeef3..a7219f0629a 100644 --- a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs +++ b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs @@ -26,8 +26,12 @@ impl LoadCase for KZGComputeCellsAndKZGProofs { } impl Case for KZGComputeCellsAndKZGProofs { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Deneb + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + false + } + + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs index 6f53038f28e..4a47fe35ebe 100644 --- a/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs @@ -39,10 +39,6 @@ impl Case for KZGComputeKZGProof { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGComputeKZGProofInput| -> Result<_, Error> { let blob = parse_blob::(&input.blob)?; diff --git a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs index 10cc866fbe0..b72b3a05cd6 100644 --- a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs +++ b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs @@ -27,8 +27,12 @@ impl LoadCase for KZGRecoverCellsAndKZGProofs { } impl Case for KZGRecoverCellsAndKZGProofs { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Deneb + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + false + } + + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs index 3dc955bdcc8..66f50d534b8 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs @@ -116,10 +116,6 @@ impl Case for KZGVerifyBlobKZGProof { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGVerifyBlobKZGProofInput| -> Result<(Blob, KzgCommitment, KzgProof), Error> { let blob = parse_blob::(&input.blob)?; diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs index 80cd0a28496..efd41588069 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs @@ -33,10 +33,6 @@ impl Case for KZGVerifyBlobKZGProofBatch { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGVerifyBlobKZGProofBatchInput| -> Result<_, Error> { let blobs = input diff --git a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs index 5887d764cae..815ad7a5bcf 100644 --- a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs @@ -29,8 +29,12 @@ impl LoadCase for KZGVerifyCellKZGProofBatch { } impl Case for KZGVerifyCellKZGProofBatch { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Deneb + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + false + } + + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs index ed7583dbd0a..07df05a6ac1 100644 --- a/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs @@ -33,10 +33,6 @@ impl Case for KZGVerifyKZGProof { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGVerifyKZGProofInput| -> Result<_, Error> { let commitment = parse_commitment(&input.commitment)?; diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index e7c148645c4..d8fe061061a 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -7,9 +7,6 @@ use std::marker::PhantomData; use std::path::PathBuf; use types::{BeaconState, EthSpec, ForkName}; -const EIP7594_FORK: ForkName = ForkName::Deneb; -const EIP7594_TESTS: [&str; 4] = ["ssz_static", "merkle_proof", "networking", "kzg"]; - pub trait Handler { type Case: Case + LoadCase; @@ -39,13 +36,12 @@ pub trait Handler { for fork_name in ForkName::list_all() { if !self.disabled_forks().contains(&fork_name) && self.is_enabled_for_fork(fork_name) { self.run_for_fork(fork_name); + } + } - if fork_name == EIP7594_FORK - && EIP7594_TESTS.contains(&Self::runner_name()) - && self.is_enabled_for_feature(FeatureName::Eip7594) - { - self.run_for_feature(EIP7594_FORK, FeatureName::Eip7594); - } + for feature_name in FeatureName::list_all() { + if self.is_enabled_for_feature(feature_name) { + self.run_for_feature(feature_name); } } } @@ -96,8 +92,9 @@ pub trait Handler { crate::results::assert_tests_pass(&name, &handler_path, &results); } - fn run_for_feature(&self, fork_name: ForkName, feature_name: FeatureName) { + fn run_for_feature(&self, feature_name: FeatureName) { let feature_name_str = feature_name.to_string(); + let fork_name = feature_name.fork_name(); let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("consensus-spec-tests") @@ -352,6 +349,22 @@ where fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { self.supported_forks.contains(&fork_name) } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + // This ensures we only run the tests **once** for `Eip7594`, using the types matching the + // correct fork, e.g. `Eip7594` uses SSZ types from `Deneb` as of spec test version + // `v1.5.0-alpha.8`, therefore the `Eip7594` tests should get included when testing Deneb types. + // + // e.g. Eip7594 test vectors are executed in the first line below, but excluded in the 2nd + // line when testing the type `AttestationElectra`: + // + // ``` + // SszStaticHandler::, MainnetEthSpec>::pre_electra().run(); + // SszStaticHandler::, MainnetEthSpec>::electra_only().run(); + // ``` + feature_name == FeatureName::Eip7594 + && self.supported_forks.contains(&feature_name.fork_name()) + } } impl Handler for SszStaticTHCHandler, E> @@ -371,6 +384,10 @@ where fn handler_name(&self) -> String { BeaconState::::name().into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 + } } impl Handler for SszStaticWithSpecHandler @@ -392,6 +409,10 @@ where fn handler_name(&self) -> String { T::name().into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 + } } #[derive(Derivative)] @@ -971,9 +992,12 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { - // Enabled in Deneb fork_name.deneb_enabled() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 + } } #[derive(Derivative)] diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 292625a371a..691d27951ad 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -627,17 +627,17 @@ mod ssz_static { #[test] fn data_column_sidecar() { SszStaticHandler::, MinimalEthSpec>::deneb_only() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); SszStaticHandler::, MainnetEthSpec>::deneb_only() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); } #[test] fn data_column_identifier() { SszStaticHandler::::deneb_only() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); SszStaticHandler::::deneb_only() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); } #[test] @@ -902,19 +902,19 @@ fn kzg_verify_kzg_proof() { #[test] fn kzg_compute_cells_and_proofs() { KZGComputeCellsAndKZGProofHandler::::default() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); } #[test] fn kzg_verify_cell_proof_batch() { KZGVerifyCellKZGProofBatchHandler::::default() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); } #[test] fn kzg_recover_cells_and_proofs() { KZGRecoverCellsAndKZGProofHandler::::default() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); } #[test] @@ -949,8 +949,6 @@ fn rewards() { #[test] fn get_custody_columns() { - GetCustodyColumnsHandler::::default() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); - GetCustodyColumnsHandler::::default() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); + GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); } From 06e4d22d4954807ddc755feaea7518ad2ce06f35 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 14 Jan 2025 10:17:00 +1100 Subject: [PATCH 08/26] Electra spec changes for `v1.5.0-beta.0` (#6731) * First pass * Add restrictions to RuntimeVariableList api * Use empty_uninitialized and fix warnings * Fix some todos * Merge branch 'unstable' into max-blobs-preset * Fix take impl on RuntimeFixedList * cleanup * Fix test compilations * Fix some more tests * Fix test from unstable * Merge branch 'unstable' into max-blobs-preset * Implement "Bugfix and more withdrawal tests" * Implement "Add missed exit checks to consolidation processing" * Implement "Update initial earliest_exit_epoch calculation" * Implement "Limit consolidating balance by validator.effective_balance" * Implement "Use 16-bit random value in validator filter" * Implement "Do not change creds type on consolidation" * Rename PendingPartialWithdraw index field to validator_index * Skip slots to get test to pass and add TODO * Implement "Synchronously check all transactions to have non-zero length" * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Remove footgun function * Minor simplifications * Move from preset to config * Fix typo * Revert "Remove footgun function" This reverts commit de01f923c7452355c87f50c0e8031ca94fa00d36. * Try fixing tests * Implement "bump minimal preset MAX_BLOB_COMMITMENTS_PER_BLOCK and KZG_COMMITMENT_INCLUSION_PROOF_DEPTH" * Thread through ChainSpec * Fix release tests * Move RuntimeFixedVector into module and rename * Add test * Implement "Remove post-altair `initialize_beacon_state_from_eth1` from specs" * Update preset YAML * Remove empty RuntimeVarList awefullness * Make max_blobs_per_block a config parameter (#6329) Squashed commit of the following: commit 04b3743ec1e0b650269dd8e58b540c02430d1c0d Author: Michael Sproul Date: Mon Jan 6 17:36:58 2025 +1100 Add test commit 440e85419940d4daba406d910e7908dd1fe78668 Author: Michael Sproul Date: Mon Jan 6 17:24:50 2025 +1100 Move RuntimeFixedVector into module and rename commit f66e179a40c3917eee39a93534ecf75480172699 Author: Michael Sproul Date: Mon Jan 6 17:17:17 2025 +1100 Fix release tests commit e4bfe71cd1f0a2784d0bd57f85b2f5d8cf503ac1 Author: Michael Sproul Date: Mon Jan 6 17:05:30 2025 +1100 Thread through ChainSpec commit 063b79c16abd3f6df47b85efcf3858177bc933b9 Author: Michael Sproul Date: Mon Jan 6 15:32:16 2025 +1100 Try fixing tests commit 88bedf09bc647de66bd1ff944bbc8fb13e2b7590 Author: Michael Sproul Date: Mon Jan 6 15:04:37 2025 +1100 Revert "Remove footgun function" This reverts commit de01f923c7452355c87f50c0e8031ca94fa00d36. commit 32483d385b66f252d50cee5b524e2924157bdcd4 Author: Michael Sproul Date: Mon Jan 6 15:04:32 2025 +1100 Fix typo commit 2e86585b478c012f6e3483989c87e38161227674 Author: Michael Sproul Date: Mon Jan 6 15:04:15 2025 +1100 Move from preset to config commit 1095d60a40be20dd3c229b759fc3c228b51e51e3 Author: Michael Sproul Date: Mon Jan 6 14:38:40 2025 +1100 Minor simplifications commit de01f923c7452355c87f50c0e8031ca94fa00d36 Author: Michael Sproul Date: Mon Jan 6 14:06:57 2025 +1100 Remove footgun function commit 0c2c8c42245c25b8cf17885faf20acd3b81140ec Merge: 21ecb58ff f51a292f7 Author: Michael Sproul Date: Mon Jan 6 14:02:50 2025 +1100 Merge remote-tracking branch 'origin/unstable' into max-blobs-preset commit f51a292f77575a1786af34271fb44954f141c377 Author: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Fri Jan 3 20:27:21 2025 +0100 fully lint only explicitly to avoid unnecessary rebuilds (#6753) * fully lint only explicitly to avoid unnecessary rebuilds commit 7e0cddef321c2a069582c65b58e5f46590d60c49 Author: Akihito Nakano Date: Tue Dec 24 10:38:56 2024 +0900 Make sure we have fanout peers when publish (#6738) * Ensure that `fanout_peers` is always non-empty if it's `Some` commit 21ecb58ff88b86435ab62d9ac227394c10fdcd22 Merge: 2fcb2935e 9aefb5539 Author: Pawan Dhananjay Date: Mon Oct 21 14:46:00 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit 2fcb2935ec7ef4cd18bbdd8aedb7de61fac69e61 Author: Pawan Dhananjay Date: Fri Sep 6 18:28:31 2024 -0700 Fix test from unstable commit 12c6ef118a1a6d910c48d9d4b23004f3609264c7 Author: Pawan Dhananjay Date: Wed Sep 4 16:16:36 2024 -0700 Fix some more tests commit d37733b846ce58e318e976d6503ca394b4901141 Author: Pawan Dhananjay Date: Wed Sep 4 12:47:36 2024 -0700 Fix test compilations commit 52bb581e071d5f474d519366e860a4b3a0b52f78 Author: Pawan Dhananjay Date: Tue Sep 3 18:38:19 2024 -0700 cleanup commit e71020e3e613910e0315f558ead661b490a0ff20 Author: Pawan Dhananjay Date: Tue Sep 3 17:16:10 2024 -0700 Fix take impl on RuntimeFixedList commit 13f9bba6470b2140e5c34f14aed06dab2b062c1c Merge: 60100fc6b 4e675cf5d Author: Pawan Dhananjay Date: Tue Sep 3 16:08:59 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit 60100fc6be72792ff33913d7e5a53434c792aacf Author: Pawan Dhananjay Date: Fri Aug 30 16:04:11 2024 -0700 Fix some todos commit a9cb329a221a809f7dd818984753826f91c2e26b Author: Pawan Dhananjay Date: Fri Aug 30 15:54:00 2024 -0700 Use empty_uninitialized and fix warnings commit 4dc6e6515ecf75cefa4de840edc7b57e76a8fc9e Author: Pawan Dhananjay Date: Fri Aug 30 15:53:18 2024 -0700 Add restrictions to RuntimeVariableList api commit 25feedfde348b530c4fa2348cc71a06b746898ed Author: Pawan Dhananjay Date: Thu Aug 29 16:11:19 2024 -0700 First pass * Fix tests * Implement max_blobs_per_block_electra * Fix config issues * Simplify BlobSidecarListFromRoot * Disable PeerDAS tests * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Bump quota to account for new target (6) * Remove clone * Fix issue from review * Try to remove ugliness * Merge branch 'unstable' into max-blobs-preset * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Merge commit '04b3743ec1e0b650269dd8e58b540c02430d1c0d' into electra-alpha10 * Merge remote-tracking branch 'pawan/max-blobs-preset' into electra-alpha10 * Update tests to v1.5.0-beta.0 * Resolve merge conflicts * Linting * fmt * Fix test and add TODO * Gracefully handle slashed proposers in fork choice tests * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Keep latest changes from max_blobs_per_block PR in codec.rs * Revert a few more regressions and add a comment * Disable more DAS tests * Improve validator monitor test a little * Make test more robust * Fix sync test that didn't understand blobs * Fill out cropped comment --- .../beacon_chain/tests/validator_monitor.rs | 82 ++++++++----------- .../src/engine_api/new_payload_request.rs | 5 ++ beacon_node/execution_layer/src/lib.rs | 1 + .../lighthouse_network/src/rpc/protocol.rs | 2 +- beacon_node/network/src/sync/tests/range.rs | 53 ++++++++---- consensus/fork_choice/tests/tests.rs | 61 +++++++------- .../src/per_block_processing.rs | 36 +++++--- .../process_operations.rs | 33 +++++--- .../src/per_epoch_processing/single_pass.rs | 4 +- .../state_processing/src/upgrade/electra.rs | 4 +- consensus/types/presets/gnosis/electra.yaml | 13 ++- consensus/types/presets/mainnet/altair.yaml | 2 + consensus/types/presets/mainnet/electra.yaml | 13 ++- consensus/types/presets/mainnet/phase0.yaml | 2 +- consensus/types/presets/minimal/altair.yaml | 2 + consensus/types/presets/minimal/deneb.yaml | 8 +- consensus/types/presets/minimal/electra.yaml | 15 ++-- consensus/types/presets/minimal/phase0.yaml | 6 +- consensus/types/src/beacon_state.rs | 53 ++++++++++-- consensus/types/src/chain_spec.rs | 24 +++++- consensus/types/src/eth_spec.rs | 14 ++-- .../types/src/pending_partial_withdrawal.rs | 2 +- consensus/types/src/preset.rs | 4 +- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 6 ++ .../src/cases/genesis_initialization.rs | 3 +- .../ef_tests/src/cases/genesis_validity.rs | 4 + testing/ef_tests/src/handler.rs | 23 ++++-- testing/ef_tests/tests/tests.rs | 10 ++- 29 files changed, 309 insertions(+), 178 deletions(-) diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 91de4fe2702..180db6d76dd 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -4,7 +4,7 @@ use beacon_chain::test_utils::{ use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; use logging::test_logger; use std::sync::LazyLock; -use types::{Epoch, EthSpec, ForkName, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; @@ -117,7 +117,7 @@ async fn missed_blocks_across_epochs() { } #[tokio::test] -async fn produces_missed_blocks() { +async fn missed_blocks_basic() { let validator_count = 16; let slots_per_epoch = E::slots_per_epoch(); @@ -127,13 +127,10 @@ async fn produces_missed_blocks() { // Generate 63 slots (2 epochs * 32 slots per epoch - 1) let initial_blocks = slots_per_epoch * nb_epoch_to_simulate.as_u64() - 1; - // The validator index of the validator that is 'supposed' to miss a block - let validator_index_to_monitor = 1; - // 1st scenario // // // Missed block happens when slot and prev_slot are in the same epoch - let harness1 = get_harness(validator_count, vec![validator_index_to_monitor]); + let harness1 = get_harness(validator_count, vec![]); harness1 .extend_chain( initial_blocks as usize, @@ -153,7 +150,7 @@ async fn produces_missed_blocks() { let mut prev_slot = Slot::new(idx - 1); let mut duplicate_block_root = *_state.block_roots().get(idx as usize).unwrap(); let mut validator_indexes = _state.get_beacon_proposer_indices(&harness1.spec).unwrap(); - let mut validator_index = validator_indexes[slot_in_epoch.as_usize()]; + let mut missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; let mut proposer_shuffling_decision_root = _state .proposer_shuffling_decision_root(duplicate_block_root) .unwrap(); @@ -170,7 +167,7 @@ async fn produces_missed_blocks() { beacon_proposer_cache.lock().insert( epoch, proposer_shuffling_decision_root, - validator_indexes.into_iter().collect::>(), + validator_indexes, _state.fork() ), Ok(()) @@ -187,12 +184,15 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor = harness1.chain.validator_monitor.write(); + + validator_monitor.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress()); validator_monitor.process_valid_state(nb_epoch_to_simulate, _state, &harness1.chain.spec); // We should have one entry in the missed blocks map assert_eq!( - validator_monitor.get_monitored_validator_missed_block_count(validator_index as u64), - 1 + validator_monitor + .get_monitored_validator_missed_block_count(missed_block_proposer as u64), + 1, ); } @@ -201,23 +201,7 @@ async fn produces_missed_blocks() { // Missed block happens when slot and prev_slot are not in the same epoch // making sure that the cache reloads when the epoch changes // in that scenario the slot that missed a block is the first slot of the epoch - // We are adding other validators to monitor as these ones will miss a block depending on - // the fork name specified when running the test as the proposer cache differs depending on - // the fork name (cf. seed) - // - // If you are adding a new fork and seeing errors, print - // `validator_indexes[slot_in_epoch.as_usize()]` and add it below. - let validator_index_to_monitor = match harness1.spec.fork_name_at_slot::(Slot::new(0)) { - ForkName::Base => 7, - ForkName::Altair => 2, - ForkName::Bellatrix => 4, - ForkName::Capella => 11, - ForkName::Deneb => 3, - ForkName::Electra => 1, - ForkName::Fulu => 6, - }; - - let harness2 = get_harness(validator_count, vec![validator_index_to_monitor]); + let harness2 = get_harness(validator_count, vec![]); let advance_slot_by = 9; harness2 .extend_chain( @@ -238,11 +222,7 @@ async fn produces_missed_blocks() { slot_in_epoch = slot % slots_per_epoch; duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); - validator_index = validator_indexes[slot_in_epoch.as_usize()]; - // If you are adding a new fork and seeing errors, it means the fork seed has changed the - // validator_index. Uncomment this line, run the test again and add the resulting index to the - // list above. - //eprintln!("new index which needs to be added => {:?}", validator_index); + missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; let beacon_proposer_cache = harness2 .chain @@ -256,7 +236,7 @@ async fn produces_missed_blocks() { beacon_proposer_cache.lock().insert( epoch, duplicate_block_root, - validator_indexes.into_iter().collect::>(), + validator_indexes.clone(), _state2.fork() ), Ok(()) @@ -271,10 +251,12 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor2 = harness2.chain.validator_monitor.write(); + validator_monitor2.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress()); validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); // We should have one entry in the missed blocks map assert_eq!( - validator_monitor2.get_monitored_validator_missed_block_count(validator_index as u64), + validator_monitor2 + .get_monitored_validator_missed_block_count(missed_block_proposer as u64), 1 ); @@ -282,19 +264,20 @@ async fn produces_missed_blocks() { // // A missed block happens but the validator is not monitored // it should not be flagged as a missed block - idx = initial_blocks + (advance_slot_by) - 7; + while validator_indexes[(idx % slots_per_epoch) as usize] == missed_block_proposer + && idx / slots_per_epoch == epoch.as_u64() + { + idx += 1; + } slot = Slot::new(idx); prev_slot = Slot::new(idx - 1); slot_in_epoch = slot % slots_per_epoch; duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); - validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); - let not_monitored_validator_index = validator_indexes[slot_in_epoch.as_usize()]; - // This could do with a refactor: https://github.com/sigp/lighthouse/issues/6293 - assert_ne!( - not_monitored_validator_index, - validator_index_to_monitor, - "this test has a fragile dependency on hardcoded indices. you need to tweak some settings or rewrite this" - ); + let second_missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; + + // This test may fail if we can't find another distinct proposer in the same epoch. + // However, this should be vanishingly unlikely: P ~= (1/16)^32 = 2e-39. + assert_ne!(missed_block_proposer, second_missed_block_proposer); assert_eq!( _state2.set_block_root(prev_slot, duplicate_block_root), @@ -306,10 +289,9 @@ async fn produces_missed_blocks() { validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); // We shouldn't have any entry in the missed blocks map - assert_ne!(validator_index, not_monitored_validator_index); assert_eq!( validator_monitor2 - .get_monitored_validator_missed_block_count(not_monitored_validator_index as u64), + .get_monitored_validator_missed_block_count(second_missed_block_proposer as u64), 0 ); } @@ -318,7 +300,7 @@ async fn produces_missed_blocks() { // // A missed block happens at state.slot - LOG_SLOTS_PER_EPOCH // it shouldn't be flagged as a missed block - let harness3 = get_harness(validator_count, vec![validator_index_to_monitor]); + let harness3 = get_harness(validator_count, vec![]); harness3 .extend_chain( slots_per_epoch as usize, @@ -338,7 +320,7 @@ async fn produces_missed_blocks() { prev_slot = Slot::new(idx - 1); duplicate_block_root = *_state3.block_roots().get(idx as usize).unwrap(); validator_indexes = _state3.get_beacon_proposer_indices(&harness3.spec).unwrap(); - validator_index = validator_indexes[slot_in_epoch.as_usize()]; + missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; proposer_shuffling_decision_root = _state3 .proposer_shuffling_decision_root_at_epoch(epoch, duplicate_block_root) .unwrap(); @@ -355,7 +337,7 @@ async fn produces_missed_blocks() { beacon_proposer_cache.lock().insert( epoch, proposer_shuffling_decision_root, - validator_indexes.into_iter().collect::>(), + validator_indexes, _state3.fork() ), Ok(()) @@ -372,11 +354,13 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor3 = harness3.chain.validator_monitor.write(); + validator_monitor3.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress()); validator_monitor3.process_valid_state(epoch, _state3, &harness3.chain.spec); // We shouldn't have one entry in the missed blocks map assert_eq!( - validator_monitor3.get_monitored_validator_missed_block_count(validator_index as u64), + validator_monitor3 + .get_monitored_validator_missed_block_count(missed_block_proposer as u64), 0 ); } diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index a86b2fd9bbf..23610c9ae45 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -128,6 +128,11 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); + // Check that no transactions in the payload are zero length + if payload.transactions().iter().any(|slice| slice.is_empty()) { + return Err(Error::ZeroLengthTransaction); + } + let (header_hash, rlp_transactions_root) = calculate_execution_block_hash( payload, parent_beacon_block_root, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 118d7adfcaa..f7abe73543b 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -157,6 +157,7 @@ pub enum Error { payload: ExecutionBlockHash, transactions_root: Hash256, }, + ZeroLengthTransaction, PayloadBodiesByRangeNotSupported, InvalidJWTSecret(String), InvalidForkForPayload, diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 681b739d598..780dff937d3 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -710,7 +710,7 @@ pub fn rpc_blob_limits() -> RpcLimits { } } -// TODO(peerdas): fix hardcoded max here +// TODO(das): fix hardcoded max here pub fn rpc_data_column_limits(fork_name: ForkName) -> RpcLimits { RpcLimits::new( DataColumnSidecar::::empty().as_ssz_bytes().len(), diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 6faa8b72472..05d5e4a4143 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -4,12 +4,15 @@ use crate::sync::manager::SLOT_IMPORT_TOLERANCE; use crate::sync::range_sync::RangeSyncType; use crate::sync::SyncMessage; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; -use beacon_chain::EngineState; +use beacon_chain::{block_verification_types::RpcBlock, EngineState, NotifyExecutionLayer}; use lighthouse_network::rpc::{RequestType, StatusMessage}; use lighthouse_network::service::api_types::{AppRequestId, Id, SyncRequestId}; use lighthouse_network::{PeerId, SyncInfo}; use std::time::Duration; -use types::{EthSpec, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot}; +use types::{ + BlobSidecarList, BlockImportSource, EthSpec, Hash256, MinimalEthSpec as E, SignedBeaconBlock, + SignedBeaconBlockHash, Slot, +}; const D: Duration = Duration::new(0, 0); @@ -154,7 +157,9 @@ impl TestRig { } } - async fn create_canonical_block(&mut self) -> SignedBeaconBlock { + async fn create_canonical_block( + &mut self, + ) -> (SignedBeaconBlock, Option>) { self.harness.advance_slot(); let block_root = self @@ -165,19 +170,39 @@ impl TestRig { AttestationStrategy::AllValidators, ) .await; - self.harness - .chain - .store - .get_full_block(&block_root) - .unwrap() - .unwrap() + // TODO(das): this does not handle data columns yet + let store = &self.harness.chain.store; + let block = store.get_full_block(&block_root).unwrap().unwrap(); + let blobs = if block.fork_name_unchecked().deneb_enabled() { + store.get_blobs(&block_root).unwrap().blobs() + } else { + None + }; + (block, blobs) } - async fn remember_block(&mut self, block: SignedBeaconBlock) { - self.harness - .process_block(block.slot(), block.canonical_root(), (block.into(), None)) + async fn remember_block( + &mut self, + (block, blob_sidecars): (SignedBeaconBlock, Option>), + ) { + // This code is kind of duplicated from Harness::process_block, but takes sidecars directly. + let block_root = block.canonical_root(); + self.harness.set_current_slot(block.slot()); + let _: SignedBeaconBlockHash = self + .harness + .chain + .process_block( + block_root, + RpcBlock::new(Some(block_root), block.into(), blob_sidecars).unwrap(), + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) .await + .unwrap() + .try_into() .unwrap(); + self.harness.chain.recompute_head_at_current_slot().await; } } @@ -217,9 +242,9 @@ async fn state_update_while_purging() { // Need to create blocks that can be inserted into the fork-choice and fit the "known // conditions" below. let head_peer_block = rig_2.create_canonical_block().await; - let head_peer_root = head_peer_block.canonical_root(); + let head_peer_root = head_peer_block.0.canonical_root(); let finalized_peer_block = rig_2.create_canonical_block().await; - let finalized_peer_root = finalized_peer_block.canonical_root(); + let finalized_peer_root = finalized_peer_block.0.canonical_root(); // Get a peer with an advanced head let head_peer = rig.add_head_peer_with_root(head_peer_root); diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 001b80fe113..70b4b73d528 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -10,6 +10,7 @@ use beacon_chain::{ use fork_choice::{ ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, }; +use state_processing::state_advance::complete_state_advance; use std::fmt; use std::sync::Mutex; use std::time::Duration; @@ -172,6 +173,20 @@ impl ForkChoiceTest { let validators = self.harness.get_all_validators(); loop { let slot = self.harness.get_current_slot(); + + // Skip slashed proposers, as we expect validators to get slashed in these tests. + // Presently `make_block` will panic if the proposer is slashed, so we just avoid + // calling it in this case. + complete_state_advance(&mut state, None, slot, &self.harness.spec).unwrap(); + state.build_caches(&self.harness.spec).unwrap(); + let proposer_index = state + .get_beacon_proposer_index(slot, &self.harness.chain.spec) + .unwrap(); + if state.validators().get(proposer_index).unwrap().slashed { + self.harness.advance_slot(); + continue; + } + let (block_contents, state_) = self.harness.make_block(state, slot).await; state = state_; if !predicate(block_contents.0.message(), &state) { @@ -196,17 +211,20 @@ impl ForkChoiceTest { } /// Apply `count` blocks to the chain (with attestations). + /// + /// Note that in the case of slashed validators, their proposals will be skipped and the chain + /// may be advanced by *more than* `count` slots. pub async fn apply_blocks(self, count: usize) -> Self { - self.harness.advance_slot(); - self.harness - .extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - self + // Use `Self::apply_blocks_while` which gracefully handles slashed validators. + let mut blocks_applied = 0; + self.apply_blocks_while(|_, _| { + // Blocks are applied after the predicate is called, so continue applying the block if + // less than *or equal* to the count. + blocks_applied += 1; + blocks_applied <= count + }) + .await + .unwrap() } /// Slash a validator from the previous epoch committee. @@ -244,6 +262,7 @@ impl ForkChoiceTest { /// Apply `count` blocks to the chain (without attestations). pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { + // This function does not gracefully handle slashed proposers, but may need to in future. self.harness.advance_slot(); self.harness .extend_chain( @@ -1226,14 +1245,6 @@ async fn progressive_balances_cache_attester_slashing() { .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .await .unwrap() - // Note: This test may fail if the shuffling used changes, right now it re-runs with - // deterministic shuffling. A shuffling change my cause the slashed proposer to propose - // again in the next epoch, which results in a block processing failure - // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip - // the slot in this scenario rather than panic-ing. The same applies to - // `progressive_balances_cache_proposer_slashing`. - .apply_blocks(2) - .await .add_previous_epoch_attester_slashing() .await // expect fork choice to import blocks successfully after a previous epoch attester is @@ -1244,7 +1255,7 @@ async fn progressive_balances_cache_attester_slashing() { // expect fork choice to import another epoch of blocks successfully - the slashed // attester's balance should be excluded from the current epoch total balance in // `ProgressiveBalancesCache` as well. - .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .apply_blocks(E::slots_per_epoch() as usize) .await; } @@ -1257,15 +1268,7 @@ async fn progressive_balances_cache_proposer_slashing() { .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .await .unwrap() - // Note: This test may fail if the shuffling used changes, right now it re-runs with - // deterministic shuffling. A shuffling change may cause the slashed proposer to propose - // again in the next epoch, which results in a block processing failure - // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip - // the slot in this scenario rather than panic-ing. The same applies to - // `progressive_balances_cache_attester_slashing`. - .apply_blocks(1) - .await - .add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch()) + .add_previous_epoch_proposer_slashing(E::slots_per_epoch()) .await // expect fork choice to import blocks successfully after a previous epoch proposer is // slashed, i.e. the slashed proposer's balance is correctly excluded from @@ -1275,6 +1278,6 @@ async fn progressive_balances_cache_proposer_slashing() { // expect fork choice to import another epoch of blocks successfully - the slashed // proposer's balance should be excluded from the current epoch total balance in // `ProgressiveBalancesCache` as well. - .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .apply_blocks(E::slots_per_epoch() as usize) .await; } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 782dbe2a547..502ad25838e 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -1,7 +1,7 @@ use crate::consensus_context::ConsensusContext; use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid}; use rayon::prelude::*; -use safe_arith::{ArithError, SafeArith}; +use safe_arith::{ArithError, SafeArith, SafeArithIter}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; use std::borrow::Cow; use tree_hash::TreeHash; @@ -509,7 +509,7 @@ pub fn compute_timestamp_at_slot( /// Compute the next batch of withdrawals which should be included in a block. /// -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-get_expected_withdrawals +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/electra/beacon-chain.md#new-get_expected_withdrawals pub fn get_expected_withdrawals( state: &BeaconState, spec: &ChainSpec, @@ -522,9 +522,9 @@ pub fn get_expected_withdrawals( // [New in Electra:EIP7251] // Consume pending partial withdrawals - let partial_withdrawals_count = + let processed_partial_withdrawals_count = if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { - let mut partial_withdrawals_count = 0; + let mut processed_partial_withdrawals_count = 0; for withdrawal in partial_withdrawals { if withdrawal.withdrawable_epoch > epoch || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize @@ -532,8 +532,8 @@ pub fn get_expected_withdrawals( break; } - let withdrawal_balance = state.get_balance(withdrawal.index as usize)?; - let validator = state.get_validator(withdrawal.index as usize)?; + let withdrawal_balance = state.get_balance(withdrawal.validator_index as usize)?; + let validator = state.get_validator(withdrawal.validator_index as usize)?; let has_sufficient_effective_balance = validator.effective_balance >= spec.min_activation_balance; @@ -549,7 +549,7 @@ pub fn get_expected_withdrawals( ); withdrawals.push(Withdrawal { index: withdrawal_index, - validator_index: withdrawal.index, + validator_index: withdrawal.validator_index, address: validator .get_execution_withdrawal_address(spec) .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?, @@ -557,9 +557,9 @@ pub fn get_expected_withdrawals( }); withdrawal_index.safe_add_assign(1)?; } - partial_withdrawals_count.safe_add_assign(1)?; + processed_partial_withdrawals_count.safe_add_assign(1)?; } - Some(partial_withdrawals_count) + Some(processed_partial_withdrawals_count) } else { None }; @@ -570,9 +570,19 @@ pub fn get_expected_withdrawals( ); for _ in 0..bound { let validator = state.get_validator(validator_index as usize)?; - let balance = *state.balances().get(validator_index as usize).ok_or( - BeaconStateError::BalancesOutOfBounds(validator_index as usize), - )?; + let partially_withdrawn_balance = withdrawals + .iter() + .filter_map(|withdrawal| { + (withdrawal.validator_index == validator_index).then_some(withdrawal.amount) + }) + .safe_sum()?; + let balance = state + .balances() + .get(validator_index as usize) + .ok_or(BeaconStateError::BalancesOutOfBounds( + validator_index as usize, + ))? + .safe_sub(partially_withdrawn_balance)?; if validator.is_fully_withdrawable_at(balance, epoch, spec, fork_name) { withdrawals.push(Withdrawal { index: withdrawal_index, @@ -604,7 +614,7 @@ pub fn get_expected_withdrawals( .safe_rem(state.validators().len() as u64)?; } - Ok((withdrawals.into(), partial_withdrawals_count)) + Ok((withdrawals.into(), processed_partial_withdrawals_count)) } /// Apply withdrawals to the state. diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 4977f7c7e9d..82dd6167241 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -507,11 +507,11 @@ pub fn process_withdrawal_requests( } // Verify pubkey exists - let Some(index) = state.pubkey_cache().get(&request.validator_pubkey) else { + let Some(validator_index) = state.pubkey_cache().get(&request.validator_pubkey) else { continue; }; - let validator = state.get_validator(index)?; + let validator = state.get_validator(validator_index)?; // Verify withdrawal credentials let has_correct_credential = validator.has_execution_withdrawal_credential(spec); let is_correct_source_address = validator @@ -542,16 +542,16 @@ pub fn process_withdrawal_requests( continue; } - let pending_balance_to_withdraw = state.get_pending_balance_to_withdraw(index)?; + let pending_balance_to_withdraw = state.get_pending_balance_to_withdraw(validator_index)?; if is_full_exit_request { // Only exit validator if it has no pending withdrawals in the queue if pending_balance_to_withdraw == 0 { - initiate_validator_exit(state, index, spec)? + initiate_validator_exit(state, validator_index, spec)? } continue; } - let balance = state.get_balance(index)?; + let balance = state.get_balance(validator_index)?; let has_sufficient_effective_balance = validator.effective_balance >= spec.min_activation_balance; let has_excess_balance = balance @@ -576,7 +576,7 @@ pub fn process_withdrawal_requests( state .pending_partial_withdrawals_mut()? .push(PendingPartialWithdrawal { - index: index as u64, + validator_index: validator_index as u64, amount: to_withdraw, withdrawable_epoch, })?; @@ -739,8 +739,8 @@ pub fn process_consolidation_request( } let target_validator = state.get_validator(target_index)?; - // Verify the target has execution withdrawal credentials - if !target_validator.has_execution_withdrawal_credential(spec) { + // Verify the target has compounding withdrawal credentials + if !target_validator.has_compounding_withdrawal_credential(spec) { return Ok(()); } @@ -757,6 +757,18 @@ pub fn process_consolidation_request( { return Ok(()); } + // Verify the source has been active long enough + if current_epoch + < source_validator + .activation_epoch + .safe_add(spec.shard_committee_period)? + { + return Ok(()); + } + // Verify the source has no pending withdrawals in the queue + if state.get_pending_balance_to_withdraw(source_index)? > 0 { + return Ok(()); + } // Initiate source validator exit and append pending consolidation let source_exit_epoch = state @@ -772,10 +784,5 @@ pub fn process_consolidation_request( target_index: target_index as u64, })?; - let target_validator = state.get_validator(target_index)?; - // Churn any target excess active balance of target and raise its max - if target_validator.has_eth1_withdrawal_credential(spec) { - state.switch_to_compounding_validator(target_index, spec)?; - } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 904e68e3686..a4a81c8eeff 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -1057,14 +1057,12 @@ fn process_pending_consolidations( } // Calculate the consolidated balance - let max_effective_balance = - source_validator.get_max_effective_balance(spec, state_ctxt.fork_name); let source_effective_balance = std::cmp::min( *state .balances() .get(source_index) .ok_or(BeaconStateError::UnknownValidator(source_index))?, - max_effective_balance, + source_validator.effective_balance, ); // Move active balance to target. Excess balance is withdrawable. diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index 1e64ef28978..0f32e1553d9 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -14,13 +14,15 @@ pub fn upgrade_to_electra( ) -> Result<(), Error> { let epoch = pre_state.current_epoch(); + let activation_exit_epoch = spec.compute_activation_exit_epoch(epoch)?; let earliest_exit_epoch = pre_state .validators() .iter() .filter(|v| v.exit_epoch != spec.far_future_epoch) .map(|v| v.exit_epoch) .max() - .unwrap_or(epoch) + .unwrap_or(activation_exit_epoch) + .max(activation_exit_epoch) .safe_add(1)?; // The total active balance cache must be built before the consolidation churn limit diff --git a/consensus/types/presets/gnosis/electra.yaml b/consensus/types/presets/gnosis/electra.yaml index 660ed9b64cf..42afbb233ed 100644 --- a/consensus/types/presets/gnosis/electra.yaml +++ b/consensus/types/presets/gnosis/electra.yaml @@ -10,7 +10,7 @@ MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 # State list lengths # --------------------------------------------------------------- # `uint64(2**27)` (= 134,217,728) -PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +PENDING_DEPOSITS_LIMIT: 134217728 # `uint64(2**27)` (= 134,217,728) PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 # `uint64(2**18)` (= 262,144) @@ -29,12 +29,12 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**0)` (= 1) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 +# `uint64(2**1)` (= 2) +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- -# 2**13 (= 8192) receipts +# 2**13 (= 8192) deposit requests MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 # 2**4 (= 16) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 @@ -43,3 +43,8 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # --------------------------------------------------------------- # 2**3 ( = 8) pending withdrawals MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 + +# Pending deposits processing +# --------------------------------------------------------------- +# 2**4 ( = 4) pending deposits +MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/mainnet/altair.yaml b/consensus/types/presets/mainnet/altair.yaml index 9a17b780327..813ef72122a 100644 --- a/consensus/types/presets/mainnet/altair.yaml +++ b/consensus/types/presets/mainnet/altair.yaml @@ -22,3 +22,5 @@ EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256 # --------------------------------------------------------------- # 1 MIN_SYNC_COMMITTEE_PARTICIPANTS: 1 +# SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD (= 32 * 256) +UPDATE_TIMEOUT: 8192 diff --git a/consensus/types/presets/mainnet/electra.yaml b/consensus/types/presets/mainnet/electra.yaml index 660ed9b64cf..42afbb233ed 100644 --- a/consensus/types/presets/mainnet/electra.yaml +++ b/consensus/types/presets/mainnet/electra.yaml @@ -10,7 +10,7 @@ MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 # State list lengths # --------------------------------------------------------------- # `uint64(2**27)` (= 134,217,728) -PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +PENDING_DEPOSITS_LIMIT: 134217728 # `uint64(2**27)` (= 134,217,728) PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 # `uint64(2**18)` (= 262,144) @@ -29,12 +29,12 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**0)` (= 1) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 +# `uint64(2**1)` (= 2) +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- -# 2**13 (= 8192) receipts +# 2**13 (= 8192) deposit requests MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 # 2**4 (= 16) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 @@ -43,3 +43,8 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # --------------------------------------------------------------- # 2**3 ( = 8) pending withdrawals MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 + +# Pending deposits processing +# --------------------------------------------------------------- +# 2**4 ( = 4) pending deposits +MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/mainnet/phase0.yaml b/consensus/types/presets/mainnet/phase0.yaml index 02bc96c8cdb..00133ba3690 100644 --- a/consensus/types/presets/mainnet/phase0.yaml +++ b/consensus/types/presets/mainnet/phase0.yaml @@ -85,4 +85,4 @@ MAX_ATTESTATIONS: 128 # 2**4 (= 16) MAX_DEPOSITS: 16 # 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 +MAX_VOLUNTARY_EXITS: 16 \ No newline at end of file diff --git a/consensus/types/presets/minimal/altair.yaml b/consensus/types/presets/minimal/altair.yaml index 88d78bea365..5e472c49cf3 100644 --- a/consensus/types/presets/minimal/altair.yaml +++ b/consensus/types/presets/minimal/altair.yaml @@ -22,3 +22,5 @@ EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8 # --------------------------------------------------------------- # 1 MIN_SYNC_COMMITTEE_PARTICIPANTS: 1 +# SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD (= 8 * 8) +UPDATE_TIMEOUT: 64 diff --git a/consensus/types/presets/minimal/deneb.yaml b/consensus/types/presets/minimal/deneb.yaml index b1bbc4ee541..c101de3162d 100644 --- a/consensus/types/presets/minimal/deneb.yaml +++ b/consensus/types/presets/minimal/deneb.yaml @@ -2,9 +2,9 @@ # Misc # --------------------------------------------------------------- -# [customized] +# `uint64(4096)` FIELD_ELEMENTS_PER_BLOB: 4096 # [customized] -MAX_BLOB_COMMITMENTS_PER_BLOCK: 16 -# [customized] `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9 -KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 9 +MAX_BLOB_COMMITMENTS_PER_BLOCK: 32 +# [customized] `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 5 = 10 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 10 diff --git a/consensus/types/presets/minimal/electra.yaml b/consensus/types/presets/minimal/electra.yaml index ef1ce494d8e..44e4769756e 100644 --- a/consensus/types/presets/minimal/electra.yaml +++ b/consensus/types/presets/minimal/electra.yaml @@ -10,7 +10,7 @@ MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 # State list lengths # --------------------------------------------------------------- # `uint64(2**27)` (= 134,217,728) -PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +PENDING_DEPOSITS_LIMIT: 134217728 # [customized] `uint64(2**6)` (= 64) PENDING_PARTIAL_WITHDRAWALS_LIMIT: 64 # [customized] `uint64(2**6)` (= 64) @@ -29,8 +29,8 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**0)` (= 1) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 +# `uint64(2**1)` (= 2) +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- @@ -41,5 +41,10 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 2 # Withdrawals processing # --------------------------------------------------------------- -# 2**0 ( = 1) pending withdrawals -MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 1 +# 2**1 ( = 2) pending withdrawals +MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 2 + +# Pending deposits processing +# --------------------------------------------------------------- +# 2**4 ( = 4) pending deposits +MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/minimal/phase0.yaml b/consensus/types/presets/minimal/phase0.yaml index 1f756031421..d9a6a2b6c0d 100644 --- a/consensus/types/presets/minimal/phase0.yaml +++ b/consensus/types/presets/minimal/phase0.yaml @@ -4,11 +4,11 @@ # --------------------------------------------------------------- # [customized] Just 4 committees for slot for testing purposes MAX_COMMITTEES_PER_SLOT: 4 -# [customized] unsecure, but fast +# [customized] insecure, but fast TARGET_COMMITTEE_SIZE: 4 # 2**11 (= 2,048) MAX_VALIDATORS_PER_COMMITTEE: 2048 -# [customized] Faster, but unsecure. +# [customized] Faster, but insecure. SHUFFLE_ROUND_COUNT: 10 # 4 HYSTERESIS_QUOTIENT: 4 @@ -85,4 +85,4 @@ MAX_ATTESTATIONS: 128 # 2**4 (= 16) MAX_DEPOSITS: 16 # 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 +MAX_VOLUNTARY_EXITS: 16 \ No newline at end of file diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index de6077bf940..6f44998cdff 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -46,6 +46,7 @@ mod tests; pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; +const MAX_RANDOM_VALUE: u64 = (1 << 16) - 1; pub type Validators = List::ValidatorRegistryLimit>; pub type Balances = List::ValidatorRegistryLimit>; @@ -916,6 +917,11 @@ impl BeaconState { } let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); + let max_random_value = if self.fork_name_unchecked().electra_enabled() { + MAX_RANDOM_VALUE + } else { + MAX_RANDOM_BYTE + }; let mut i = 0; loop { @@ -929,10 +935,10 @@ impl BeaconState { let candidate_index = *indices .get(shuffled_index) .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; - let random_byte = Self::shuffling_random_byte(i, seed)?; + let random_value = self.shuffling_random_value(i, seed)?; let effective_balance = self.get_effective_balance(candidate_index)?; - if effective_balance.safe_mul(MAX_RANDOM_BYTE)? - >= max_effective_balance.safe_mul(u64::from(random_byte))? + if effective_balance.safe_mul(max_random_value)? + >= max_effective_balance.safe_mul(random_value)? { return Ok(candidate_index); } @@ -940,6 +946,19 @@ impl BeaconState { } } + /// Fork-aware abstraction for the shuffling. + /// + /// In Electra and later, the random value is a 16-bit integer stored in a `u64`. + /// + /// Prior to Electra, the random value is an 8-bit integer stored in a `u64`. + fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { + if self.fork_name_unchecked().electra_enabled() { + Self::shuffling_random_u16_electra(i, seed).map(u64::from) + } else { + Self::shuffling_random_byte(i, seed).map(u64::from) + } + } + /// Get a random byte from the given `seed`. /// /// Used by the proposer & sync committee selection functions. @@ -953,6 +972,21 @@ impl BeaconState { .ok_or(Error::ShuffleIndexOutOfBounds(index)) } + /// Get two random bytes from the given `seed`. + /// + /// This is used in place of `shuffling_random_byte` from Electra onwards. + fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { + let mut preimage = seed.to_vec(); + preimage.append(&mut int_to_bytes8(i.safe_div(16)? as u64)); + let offset = i.safe_rem(16)?.safe_mul(2)?; + hash(&preimage) + .get(offset..offset.safe_add(2)?) + .ok_or(Error::ShuffleIndexOutOfBounds(offset))? + .try_into() + .map(u16::from_le_bytes) + .map_err(|_| Error::ShuffleIndexOutOfBounds(offset)) + } + /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. pub fn latest_execution_payload_header(&self) -> Result, Error> { match self { @@ -1120,6 +1154,11 @@ impl BeaconState { let seed = self.get_seed(epoch, Domain::SyncCommittee, spec)?; let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); + let max_random_value = if self.fork_name_unchecked().electra_enabled() { + MAX_RANDOM_VALUE + } else { + MAX_RANDOM_BYTE + }; let mut i = 0; let mut sync_committee_indices = Vec::with_capacity(E::SyncCommitteeSize::to_usize()); @@ -1134,10 +1173,10 @@ impl BeaconState { let candidate_index = *active_validator_indices .get(shuffled_index) .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; - let random_byte = Self::shuffling_random_byte(i, seed.as_slice())?; + let random_value = self.shuffling_random_value(i, seed.as_slice())?; let effective_balance = self.get_validator(candidate_index)?.effective_balance; - if effective_balance.safe_mul(MAX_RANDOM_BYTE)? - >= max_effective_balance.safe_mul(u64::from(random_byte))? + if effective_balance.safe_mul(max_random_value)? + >= max_effective_balance.safe_mul(random_value)? { sync_committee_indices.push(candidate_index); } @@ -2205,7 +2244,7 @@ impl BeaconState { for withdrawal in self .pending_partial_withdrawals()? .iter() - .filter(|withdrawal| withdrawal.index as usize == validator_index) + .filter(|withdrawal| withdrawal.validator_index as usize == validator_index) { pending_balance.safe_add_assign(withdrawal.amount)?; } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 65f4c37aa15..ea4d8641f6c 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -191,6 +191,7 @@ pub struct ChainSpec { pub max_pending_partials_per_withdrawals_sweep: u64, pub min_per_epoch_churn_limit_electra: u64, pub max_per_epoch_activation_exit_churn_limit: u64, + pub max_blobs_per_block_electra: u64, /* * Fulu hard fork params @@ -623,9 +624,12 @@ impl ChainSpec { } /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. - pub fn max_blobs_per_block_by_fork(&self, _fork_name: ForkName) -> u64 { - // TODO(electra): add Electra blobs per block change here - self.max_blobs_per_block + pub fn max_blobs_per_block_by_fork(&self, fork_name: ForkName) -> u64 { + if fork_name.electra_enabled() { + self.max_blobs_per_block_electra + } else { + self.max_blobs_per_block + } } pub fn data_columns_per_subnet(&self) -> usize { @@ -826,6 +830,7 @@ impl ChainSpec { u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), + max_blobs_per_block_electra: default_max_blobs_per_block_electra(), /* * Fulu hard fork params @@ -940,7 +945,7 @@ impl ChainSpec { // Electra electra_fork_version: [0x05, 0x00, 0x00, 0x01], electra_fork_epoch: None, - max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 0) + max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 1) .expect("pow does not overflow"), min_per_epoch_churn_limit_electra: option_wrapper(|| { u64::checked_pow(2, 6)?.checked_mul(u64::checked_pow(10, 9)?) @@ -1156,6 +1161,7 @@ impl ChainSpec { u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), + max_blobs_per_block_electra: default_max_blobs_per_block_electra(), /* * Fulu hard fork params @@ -1412,6 +1418,9 @@ pub struct Config { #[serde(default = "default_max_per_epoch_activation_exit_churn_limit")] #[serde(with = "serde_utils::quoted_u64")] max_per_epoch_activation_exit_churn_limit: u64, + #[serde(default = "default_max_blobs_per_block_electra")] + #[serde(with = "serde_utils::quoted_u64")] + max_blobs_per_block_electra: u64, #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] @@ -1554,6 +1563,10 @@ const fn default_max_per_epoch_activation_exit_churn_limit() -> u64 { 256_000_000_000 } +const fn default_max_blobs_per_block_electra() -> u64 { + 9 +} + const fn default_attestation_propagation_slot_range() -> u64 { 32 } @@ -1773,6 +1786,7 @@ impl Config { min_per_epoch_churn_limit_electra: spec.min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit: spec .max_per_epoch_activation_exit_churn_limit, + max_blobs_per_block_electra: spec.max_blobs_per_block_electra, custody_requirement: spec.custody_requirement, data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, @@ -1850,6 +1864,7 @@ impl Config { min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, + max_blobs_per_block_electra, custody_requirement, data_column_sidecar_subnet_count, number_of_columns, @@ -1919,6 +1934,7 @@ impl Config { min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, + max_blobs_per_block_electra, // We need to re-derive any values that might have changed in the config. max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks), diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 976766dfa9d..0bc074072f6 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -3,10 +3,10 @@ use crate::*; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ - bit::B0, UInt, U0, U1, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U134217728, - U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192, + bit::B0, UInt, U0, U1, U10, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, + U134217728, U16, U16777216, U17, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, + U65536, U8, U8192, }; -use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; use std::str::FromStr; @@ -431,7 +431,7 @@ impl EthSpec for MainnetEthSpec { type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; - type MaxConsolidationRequestsPerPayload = U1; + type MaxConsolidationRequestsPerPayload = U2; type MaxDepositRequestsPerPayload = U8192; type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; @@ -466,8 +466,8 @@ impl EthSpec for MinimalEthSpec { type MaxWithdrawalsPerPayload = U4; type FieldElementsPerBlob = U4096; type BytesPerBlob = U131072; - type MaxBlobCommitmentsPerBlock = U16; - type KzgCommitmentInclusionProofDepth = U9; + type MaxBlobCommitmentsPerBlock = U32; + type KzgCommitmentInclusionProofDepth = U10; type PendingPartialWithdrawalsLimit = U64; type PendingConsolidationsLimit = U64; type MaxDepositRequestsPerPayload = U4; @@ -558,7 +558,7 @@ impl EthSpec for GnosisEthSpec { type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; - type MaxConsolidationRequestsPerPayload = U1; + type MaxConsolidationRequestsPerPayload = U2; type MaxDepositRequestsPerPayload = U8192; type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/pending_partial_withdrawal.rs index e5ace7b2736..846dd973602 100644 --- a/consensus/types/src/pending_partial_withdrawal.rs +++ b/consensus/types/src/pending_partial_withdrawal.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; )] pub struct PendingPartialWithdrawal { #[serde(with = "serde_utils::quoted_u64")] - pub index: u64, + pub validator_index: u64, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, pub withdrawable_epoch: Epoch, diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index f64b7051e5f..9a9915e458a 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -234,7 +234,7 @@ pub struct ElectraPreset { #[serde(with = "serde_utils::quoted_u64")] pub max_pending_partials_per_withdrawals_sweep: u64, #[serde(with = "serde_utils::quoted_u64")] - pub pending_balance_deposits_limit: u64, + pub pending_deposits_limit: u64, #[serde(with = "serde_utils::quoted_u64")] pub pending_partial_withdrawals_limit: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -260,7 +260,7 @@ impl ElectraPreset { whistleblower_reward_quotient_electra: spec.whistleblower_reward_quotient_electra, max_pending_partials_per_withdrawals_sweep: spec .max_pending_partials_per_withdrawals_sweep, - pending_balance_deposits_limit: E::pending_deposits_limit() as u64, + pending_deposits_limit: E::pending_deposits_limit() as u64, pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index d5f4997bb7e..7108e3e8f68 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.8 +TESTS_TAG := v1.5.0-beta.0 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index dacca204c19..bf9e5d6cfa4 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -35,6 +35,8 @@ "tests/.*/.*/ssz_static/LightClientStore", # LightClientSnapshot "tests/.*/.*/ssz_static/LightClientSnapshot", + # LightClientDataCollection + "tests/minimal/.*/light_client/data_collection", # One of the EF researchers likes to pack the tarballs on a Mac ".*\\.DS_Store.*", # More Mac weirdness. @@ -48,6 +50,10 @@ "tests/.*/eip6110", "tests/.*/whisk", "tests/.*/eip7594", + # Fulu tests are not yet being run + "tests/.*/fulu", + # TODO(electra): SingleAttestation tests are waiting on Eitan's PR + "tests/.*/electra/ssz_static/SingleAttestation" ] diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index 11402c75e62..210e18f781a 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -66,8 +66,7 @@ impl LoadCase for GenesisInitialization { impl Case for GenesisInitialization { fn is_enabled_for_fork(fork_name: ForkName) -> bool { - // Altair genesis and later requires real crypto. - fork_name == ForkName::Base || cfg!(not(feature = "fake_crypto")) + fork_name == ForkName::Base } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/genesis_validity.rs b/testing/ef_tests/src/cases/genesis_validity.rs index e977fa3d637..8fb9f2fbdcb 100644 --- a/testing/ef_tests/src/cases/genesis_validity.rs +++ b/testing/ef_tests/src/cases/genesis_validity.rs @@ -39,6 +39,10 @@ impl LoadCase for GenesisValidity { } impl Case for GenesisValidity { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Base + } + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let spec = &testing_spec::(fork_name); diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index d8fe061061a..2e49b1301d4 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -350,7 +350,7 @@ where self.supported_forks.contains(&fork_name) } - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { // This ensures we only run the tests **once** for `Eip7594`, using the types matching the // correct fork, e.g. `Eip7594` uses SSZ types from `Deneb` as of spec test version // `v1.5.0-alpha.8`, therefore the `Eip7594` tests should get included when testing Deneb types. @@ -362,8 +362,11 @@ where // SszStaticHandler::, MainnetEthSpec>::pre_electra().run(); // SszStaticHandler::, MainnetEthSpec>::electra_only().run(); // ``` + /* TODO(das): re-enable feature_name == FeatureName::Eip7594 && self.supported_forks.contains(&feature_name.fork_name()) + */ + false } } @@ -385,8 +388,10 @@ where BeaconState::::name().into() } - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Eip7594 + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { + // TODO(das): re-enable + // feature_name == FeatureName::Eip7594 + false } } @@ -410,8 +415,10 @@ where T::name().into() } - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Eip7594 + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { + // TODO(das): re-enable + // feature_name == FeatureName::Eip7594 + false } } @@ -995,8 +1002,10 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { - feature_name == FeatureName::Eip7594 + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { + // TODO(das): re-enable this + // feature_name == FeatureName::Eip7594 + false } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 691d27951ad..7c268123fae 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -237,9 +237,7 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { - use ef_tests::{ - FeatureName, Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler, - }; + use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; use types::historical_summary::HistoricalSummary; use types::{ AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DepositRequest, @@ -624,6 +622,7 @@ mod ssz_static { SszStaticHandler::::capella_and_later().run(); } + /* FIXME(das): re-enable #[test] fn data_column_sidecar() { SszStaticHandler::, MinimalEthSpec>::deneb_only() @@ -639,6 +638,7 @@ mod ssz_static { SszStaticHandler::::deneb_only() .run_for_feature(FeatureName::Eip7594); } + */ #[test] fn consolidation() { @@ -899,6 +899,7 @@ fn kzg_verify_kzg_proof() { KZGVerifyKZGProofHandler::::default().run(); } +/* FIXME(das): re-enable these tests #[test] fn kzg_compute_cells_and_proofs() { KZGComputeCellsAndKZGProofHandler::::default() @@ -916,6 +917,7 @@ fn kzg_recover_cells_and_proofs() { KZGRecoverCellsAndKZGProofHandler::::default() .run_for_feature(FeatureName::Eip7594); } +*/ #[test] fn beacon_state_merkle_proof_validity() { @@ -947,8 +949,10 @@ fn rewards() { } } +/* FIXME(das): re-enable these tests #[test] fn get_custody_columns() { GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); } +*/ From 93f9c2c7183b6b59cbb02355e2379490fee81d94 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 14 Jan 2025 07:36:27 +0530 Subject: [PATCH 09/26] Execution requests with prefix (#6801) * Exclude empty requests and add back prefix * cleanup * fix after rebase --- .../src/engine_api/json_structures.rs | 87 +++++++++++++------ consensus/types/src/execution_requests.rs | 45 +++++++--- consensus/types/src/lib.rs | 2 +- 3 files changed, 98 insertions(+), 36 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 86acaaaf3bd..95b4b50925c 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -7,7 +7,7 @@ use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; use types::execution_requests::{ - ConsolidationRequests, DepositRequests, RequestPrefix, WithdrawalRequests, + ConsolidationRequests, DepositRequests, RequestType, WithdrawalRequests, }; use types::{Blob, FixedVector, KzgProof, Unsigned}; @@ -401,47 +401,80 @@ impl From> for ExecutionPayload { } } +#[derive(Debug, Clone)] +pub enum RequestsError { + InvalidHex(hex::FromHexError), + EmptyRequest(usize), + InvalidOrdering, + InvalidPrefix(u8), + DecodeError(String), +} + /// Format of `ExecutionRequests` received over the engine api. /// -/// Array of ssz-encoded requests list encoded as hex bytes. -/// The prefix of the request type is used to index into the array. -/// -/// For e.g. [0xab, 0xcd, 0xef] -/// Here, 0xab are the deposits bytes (prefix and index == 0) -/// 0xcd are the withdrawals bytes (prefix and index == 1) -/// 0xef are the consolidations bytes (prefix and index == 2) +/// Array of ssz-encoded requests list encoded as hex bytes prefixed +/// with a `RequestType` #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] #[serde(transparent)] pub struct JsonExecutionRequests(pub Vec); impl TryFrom for ExecutionRequests { - type Error = String; + type Error = RequestsError; fn try_from(value: JsonExecutionRequests) -> Result { let mut requests = ExecutionRequests::default(); - + let mut prev_prefix: Option = None; for (i, request) in value.0.into_iter().enumerate() { // hex string let decoded_bytes = hex::decode(request.strip_prefix("0x").unwrap_or(&request)) - .map_err(|e| format!("Invalid hex {:?}", e))?; - match RequestPrefix::from_prefix(i as u8) { - Some(RequestPrefix::Deposit) => { - requests.deposits = DepositRequests::::from_ssz_bytes(&decoded_bytes) - .map_err(|e| format!("Failed to decode DepositRequest from EL: {:?}", e))?; + .map_err(RequestsError::InvalidHex)?; + + // The first byte of each element is the `request_type` and the remaining bytes are the `request_data`. + // Elements with empty `request_data` **MUST** be excluded from the list. + let Some((prefix_byte, request_bytes)) = decoded_bytes.split_first() else { + return Err(RequestsError::EmptyRequest(i)); + }; + if request_bytes.is_empty() { + return Err(RequestsError::EmptyRequest(i)); + } + // Elements of the list **MUST** be ordered by `request_type` in ascending order + let current_prefix = RequestType::from_u8(*prefix_byte) + .ok_or(RequestsError::InvalidPrefix(*prefix_byte))?; + if let Some(prev) = prev_prefix { + if prev.to_u8() >= current_prefix.to_u8() { + return Err(RequestsError::InvalidOrdering); } - Some(RequestPrefix::Withdrawal) => { - requests.withdrawals = WithdrawalRequests::::from_ssz_bytes(&decoded_bytes) + } + prev_prefix = Some(current_prefix); + + match current_prefix { + RequestType::Deposit => { + requests.deposits = DepositRequests::::from_ssz_bytes(request_bytes) .map_err(|e| { - format!("Failed to decode WithdrawalRequest from EL: {:?}", e) + RequestsError::DecodeError(format!( + "Failed to decode DepositRequest from EL: {:?}", + e + )) })?; } - Some(RequestPrefix::Consolidation) => { + RequestType::Withdrawal => { + requests.withdrawals = WithdrawalRequests::::from_ssz_bytes(request_bytes) + .map_err(|e| { + RequestsError::DecodeError(format!( + "Failed to decode WithdrawalRequest from EL: {:?}", + e + )) + })?; + } + RequestType::Consolidation => { requests.consolidations = - ConsolidationRequests::::from_ssz_bytes(&decoded_bytes).map_err( - |e| format!("Failed to decode ConsolidationRequest from EL: {:?}", e), - )?; + ConsolidationRequests::::from_ssz_bytes(request_bytes).map_err(|e| { + RequestsError::DecodeError(format!( + "Failed to decode ConsolidationRequest from EL: {:?}", + e + )) + })?; } - None => return Err("Empty requests string".to_string()), } } Ok(requests) @@ -510,7 +543,9 @@ impl TryFrom> for GetPayloadResponse { block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - requests: response.execution_requests.try_into()?, + requests: response.execution_requests.try_into().map_err(|e| { + format!("Failed to convert json to execution requests : {:?}", e) + })?, })) } JsonGetPayloadResponse::V5(response) => { @@ -519,7 +554,9 @@ impl TryFrom> for GetPayloadResponse { block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - requests: response.execution_requests.try_into()?, + requests: response.execution_requests.try_into().map_err(|e| { + format!("Failed to convert json to execution requests {:?}", e) + })?, })) } } diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs index 96a39054207..223c6444ccf 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution_requests.rs @@ -43,10 +43,29 @@ impl ExecutionRequests { /// Returns the encoding according to EIP-7685 to send /// to the execution layer over the engine api. pub fn get_execution_requests_list(&self) -> Vec { - let deposit_bytes = Bytes::from(self.deposits.as_ssz_bytes()); - let withdrawal_bytes = Bytes::from(self.withdrawals.as_ssz_bytes()); - let consolidation_bytes = Bytes::from(self.consolidations.as_ssz_bytes()); - vec![deposit_bytes, withdrawal_bytes, consolidation_bytes] + let mut requests_list = Vec::new(); + if !self.deposits.is_empty() { + requests_list.push(Bytes::from_iter( + [RequestType::Deposit.to_u8()] + .into_iter() + .chain(self.deposits.as_ssz_bytes()), + )); + } + if !self.withdrawals.is_empty() { + requests_list.push(Bytes::from_iter( + [RequestType::Withdrawal.to_u8()] + .into_iter() + .chain(self.withdrawals.as_ssz_bytes()), + )); + } + if !self.consolidations.is_empty() { + requests_list.push(Bytes::from_iter( + [RequestType::Consolidation.to_u8()] + .into_iter() + .chain(self.consolidations.as_ssz_bytes()), + )); + } + requests_list } /// Generate the execution layer `requests_hash` based on EIP-7685. @@ -55,9 +74,8 @@ impl ExecutionRequests { pub fn requests_hash(&self) -> Hash256 { let mut hasher = DynamicContext::new(); - for (i, request) in self.get_execution_requests_list().iter().enumerate() { + for request in self.get_execution_requests_list().iter() { let mut request_hasher = DynamicContext::new(); - request_hasher.update(&[i as u8]); request_hasher.update(request); let request_hash = request_hasher.finalize(); @@ -68,16 +86,16 @@ impl ExecutionRequests { } } -/// This is used to index into the `execution_requests` array. +/// The prefix types for `ExecutionRequest` objects. #[derive(Debug, Copy, Clone)] -pub enum RequestPrefix { +pub enum RequestType { Deposit, Withdrawal, Consolidation, } -impl RequestPrefix { - pub fn from_prefix(prefix: u8) -> Option { +impl RequestType { + pub fn from_u8(prefix: u8) -> Option { match prefix { 0 => Some(Self::Deposit), 1 => Some(Self::Withdrawal), @@ -85,6 +103,13 @@ impl RequestPrefix { _ => None, } } + pub fn to_u8(&self) -> u8 { + match self { + Self::Deposit => 0, + Self::Withdrawal => 1, + Self::Consolidation => 2, + } + } } #[cfg(test)] diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 54d8bf51b6a..76e414b2f18 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -172,7 +172,7 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; -pub use crate::execution_requests::{ExecutionRequests, RequestPrefix}; +pub use crate::execution_requests::{ExecutionRequests, RequestType}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; From 587c3e2b8c293787b1b5a7d3b38628a8e4488c49 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 14 Jan 2025 11:52:30 +0530 Subject: [PATCH 10/26] Implement changes for EIP 7691 (#6803) * Add new config options * Use electra_enabled --- .../lighthouse_network/src/service/mod.rs | 13 ++--- .../lighthouse_network/src/service/utils.rs | 15 +++--- .../lighthouse_network/src/types/mod.rs | 2 +- .../lighthouse_network/src/types/topics.rs | 31 +++++++---- consensus/types/src/chain_spec.rs | 52 +++++++++++++++++-- 5 files changed, 85 insertions(+), 28 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index afcbfce1732..999803b8fed 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -16,7 +16,7 @@ use crate::rpc::{ use crate::types::{ attestation_sync_committee_topics, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, - BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, + BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; use crate::EnrExt; use crate::Eth2Enr; @@ -285,26 +285,23 @@ impl Network { let max_topics = ctx.chain_spec.attestation_subnet_count as usize + SYNC_COMMITTEE_SUBNET_COUNT as usize - + ctx.chain_spec.blob_sidecar_subnet_count as usize + + ctx.chain_spec.blob_sidecar_subnet_count_electra as usize + ctx.chain_spec.data_column_sidecar_subnet_count as usize + BASE_CORE_TOPICS.len() + ALTAIR_CORE_TOPICS.len() - + CAPELLA_CORE_TOPICS.len() - + DENEB_CORE_TOPICS.len() + + CAPELLA_CORE_TOPICS.len() // 0 core deneb and electra topics + LIGHT_CLIENT_GOSSIP_TOPICS.len(); let possible_fork_digests = ctx.fork_context.all_fork_digests(); let filter = gossipsub::MaxCountSubscriptionFilter { filter: utils::create_whitelist_filter( possible_fork_digests, - ctx.chain_spec.attestation_subnet_count, + &ctx.chain_spec, SYNC_COMMITTEE_SUBNET_COUNT, - ctx.chain_spec.blob_sidecar_subnet_count, - ctx.chain_spec.data_column_sidecar_subnet_count, ), // during a fork we subscribe to both the old and new topics max_subscribed_topics: max_topics * 4, - // 418 in theory = (64 attestation + 4 sync committee + 7 core topics + 6 blob topics + 128 column topics) * 2 + // 424 in theory = (64 attestation + 4 sync committee + 7 core topics + 9 blob topics + 128 column topics) * 2 max_subscriptions_per_request: max_topics * 2, }; diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 490928c08c3..a9eaa002ffa 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -236,10 +236,8 @@ pub fn load_or_build_metadata( /// possible fork digests. pub(crate) fn create_whitelist_filter( possible_fork_digests: Vec<[u8; 4]>, - attestation_subnet_count: u64, + spec: &ChainSpec, sync_committee_subnet_count: u64, - blob_sidecar_subnet_count: u64, - data_column_sidecar_subnet_count: u64, ) -> gossipsub::WhitelistSubscriptionFilter { let mut possible_hashes = HashSet::new(); for fork_digest in possible_fork_digests { @@ -259,16 +257,21 @@ pub(crate) fn create_whitelist_filter( add(BlsToExecutionChange); add(LightClientFinalityUpdate); add(LightClientOptimisticUpdate); - for id in 0..attestation_subnet_count { + for id in 0..spec.attestation_subnet_count { add(Attestation(SubnetId::new(id))); } for id in 0..sync_committee_subnet_count { add(SyncCommitteeMessage(SyncSubnetId::new(id))); } - for id in 0..blob_sidecar_subnet_count { + let blob_subnet_count = if spec.electra_fork_epoch.is_some() { + spec.blob_sidecar_subnet_count_electra + } else { + spec.blob_sidecar_subnet_count + }; + for id in 0..blob_subnet_count { add(BlobSidecar(id)); } - for id in 0..data_column_sidecar_subnet_count { + for id in 0..spec.data_column_sidecar_subnet_count { add(DataColumnSidecar(DataColumnSubnetId::new(id))); } } diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 6f266fd2bad..a1eedaef746 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -18,5 +18,5 @@ pub use sync_state::{BackFillState, SyncState}; pub use topics::{ attestation_sync_committee_topics, core_topics_to_subscribe, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, ALTAIR_CORE_TOPICS, - BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, + BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 8cdecc6bfa2..475b459ccbc 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -41,8 +41,6 @@ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ GossipKind::LightClientOptimisticUpdate, ]; -pub const DENEB_CORE_TOPICS: [GossipKind; 0] = []; - /// Returns the core topics associated with each fork that are new to the previous fork pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> Vec { match fork_name { @@ -56,11 +54,16 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V for i in 0..spec.blob_sidecar_subnet_count { deneb_blob_topics.push(GossipKind::BlobSidecar(i)); } - let mut deneb_topics = DENEB_CORE_TOPICS.to_vec(); - deneb_topics.append(&mut deneb_blob_topics); - deneb_topics + deneb_blob_topics + } + ForkName::Electra => { + // All of electra blob topics are core topics + let mut electra_blob_topics = Vec::new(); + for i in 0..spec.blob_sidecar_subnet_count_electra { + electra_blob_topics.push(GossipKind::BlobSidecar(i)); + } + electra_blob_topics } - ForkName::Electra => vec![], ForkName::Fulu => vec![], } } @@ -88,7 +91,12 @@ pub fn core_topics_to_subscribe( topics.extend(previous_fork_topics); current_fork = previous_fork; } + // Remove duplicates topics + .into_iter() + .collect::>() + .into_iter() + .collect() } /// A gossipsub topic which encapsulates the type of messages that should be sent and received over @@ -467,16 +475,19 @@ mod tests { type E = MainnetEthSpec; let spec = E::default_spec(); let mut all_topics = Vec::new(); + let mut electra_core_topics = fork_core_topics::(&ForkName::Electra, &spec); let mut deneb_core_topics = fork_core_topics::(&ForkName::Deneb, &spec); + all_topics.append(&mut electra_core_topics); all_topics.append(&mut deneb_core_topics); all_topics.extend(CAPELLA_CORE_TOPICS); all_topics.extend(ALTAIR_CORE_TOPICS); all_topics.extend(BASE_CORE_TOPICS); let latest_fork = *ForkName::list_all().last().unwrap(); - assert_eq!( - core_topics_to_subscribe::(latest_fork, &spec), - all_topics - ); + let core_topics = core_topics_to_subscribe::(latest_fork, &spec); + // Need to check all the topics exist in an order independent manner + for topic in all_topics { + assert!(core_topics.contains(&topic)); + } } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ea4d8641f6c..6594f3c44e9 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -191,7 +191,6 @@ pub struct ChainSpec { pub max_pending_partials_per_withdrawals_sweep: u64, pub min_per_epoch_churn_limit_electra: u64, pub max_per_epoch_activation_exit_churn_limit: u64, - pub max_blobs_per_block_electra: u64, /* * Fulu hard fork params @@ -240,6 +239,13 @@ pub struct ChainSpec { pub blob_sidecar_subnet_count: u64, max_blobs_per_block: u64, + /* + * Networking Electra + */ + max_blobs_per_block_electra: u64, + pub blob_sidecar_subnet_count_electra: u64, + pub max_request_blob_sidecars_electra: u64, + /* * Networking Derived * @@ -618,6 +624,14 @@ impl ChainSpec { } } + pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize { + if fork_name.electra_enabled() { + self.max_request_blob_sidecars_electra as usize + } else { + self.max_request_blob_sidecars as usize + } + } + /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`. pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 { self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch)) @@ -830,7 +844,6 @@ impl ChainSpec { u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), - max_blobs_per_block_electra: default_max_blobs_per_block_electra(), /* * Fulu hard fork params @@ -886,6 +899,13 @@ impl ChainSpec { max_blobs_by_root_request: default_max_blobs_by_root_request(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + /* + * Networking Electra specific + */ + max_blobs_per_block_electra: default_max_blobs_per_block_electra(), + blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(), + max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(), + /* * Application specific */ @@ -1161,7 +1181,6 @@ impl ChainSpec { u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), - max_blobs_per_block_electra: default_max_blobs_per_block_electra(), /* * Fulu hard fork params @@ -1216,6 +1235,13 @@ impl ChainSpec { max_blobs_by_root_request: default_max_blobs_by_root_request(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + /* + * Networking Electra specific + */ + max_blobs_per_block_electra: default_max_blobs_per_block_electra(), + blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(), + max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(), + /* * Application specific */ @@ -1421,6 +1447,12 @@ pub struct Config { #[serde(default = "default_max_blobs_per_block_electra")] #[serde(with = "serde_utils::quoted_u64")] max_blobs_per_block_electra: u64, + #[serde(default = "default_blob_sidecar_subnet_count_electra")] + #[serde(with = "serde_utils::quoted_u64")] + pub blob_sidecar_subnet_count_electra: u64, + #[serde(default = "default_max_request_blob_sidecars_electra")] + #[serde(with = "serde_utils::quoted_u64")] + max_request_blob_sidecars_electra: u64, #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] @@ -1555,6 +1587,14 @@ const fn default_max_blobs_per_block() -> u64 { 6 } +const fn default_blob_sidecar_subnet_count_electra() -> u64 { + 9 +} + +const fn default_max_request_blob_sidecars_electra() -> u64 { + 1152 +} + const fn default_min_per_epoch_churn_limit_electra() -> u64 { 128_000_000_000 } @@ -1787,6 +1827,8 @@ impl Config { max_per_epoch_activation_exit_churn_limit: spec .max_per_epoch_activation_exit_churn_limit, max_blobs_per_block_electra: spec.max_blobs_per_block_electra, + blob_sidecar_subnet_count_electra: spec.blob_sidecar_subnet_count_electra, + max_request_blob_sidecars_electra: spec.max_request_blob_sidecars_electra, custody_requirement: spec.custody_requirement, data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, @@ -1865,6 +1907,8 @@ impl Config { min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, max_blobs_per_block_electra, + blob_sidecar_subnet_count_electra, + max_request_blob_sidecars_electra, custody_requirement, data_column_sidecar_subnet_count, number_of_columns, @@ -1935,6 +1979,8 @@ impl Config { min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, max_blobs_per_block_electra, + max_request_blob_sidecars_electra, + blob_sidecar_subnet_count_electra, // We need to re-derive any values that might have changed in the config. max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks), From 4fd8e521a4245ec7bb729895f103d3ca5083927b Mon Sep 17 00:00:00 2001 From: jking-aus <72330194+jking-aus@users.noreply.github.com> Date: Wed, 15 Jan 2025 14:53:40 +1100 Subject: [PATCH 11/26] Use existing peer count metrics loop to check for open_nat toggle (#6800) * implement update_nat_open function in network_behaviour for tracking incoming peers below a given threshold count * implement update_nat_open function in network_behaviour for tracking incoming peers below a given threshold count * tidy logic and comments * move logic to existing metrics loop * revert change to network_behaviour protocol check * clippy * clippy matches! macro * pull nat_open check outside of peercounting loop * missing close bracket * make threshold const --- .../src/peer_manager/mod.rs | 33 +++++++++++++++++-- .../src/peer_manager/peerdb/peer_info.rs | 18 ++++++++++ 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4df2566dacb..6502a8dbff5 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -63,6 +63,8 @@ pub const MIN_OUTBOUND_ONLY_FACTOR: f32 = 0.2; /// limit is 55, and we are at 55 peers, the following parameter provisions a few more slots of /// dialing priority peers we need for validator duties. pub const PRIORITY_PEER_EXCESS: f32 = 0.2; +/// The numbre of inbound libp2p peers we have seen before we consider our NAT to be open. +pub const LIBP2P_NAT_OPEN_THRESHOLD: usize = 3; /// The main struct that handles peer's reputation and connection status. pub struct PeerManager { @@ -1307,7 +1309,9 @@ impl PeerManager { fn update_peer_count_metrics(&self) { let mut peers_connected = 0; let mut clients_per_peer = HashMap::new(); - let mut peers_connected_mutli: HashMap<(&str, &str), i32> = HashMap::new(); + let mut inbound_ipv4_peers_connected: usize = 0; + let mut inbound_ipv6_peers_connected: usize = 0; + let mut peers_connected_multi: HashMap<(&str, &str), i32> = HashMap::new(); let mut peers_per_custody_subnet_count: HashMap = HashMap::new(); for (_, peer_info) in self.network_globals.peers.read().connected_peers() { @@ -1336,7 +1340,7 @@ impl PeerManager { }) }) .unwrap_or("unknown"); - *peers_connected_mutli + *peers_connected_multi .entry((direction, transport)) .or_default() += 1; @@ -1345,6 +1349,29 @@ impl PeerManager { .entry(meta_data.custody_subnet_count) .or_default() += 1; } + // Check if incoming peer is ipv4 + if peer_info.is_incoming_ipv4_connection() { + inbound_ipv4_peers_connected += 1; + } + + // Check if incoming peer is ipv6 + if peer_info.is_incoming_ipv6_connection() { + inbound_ipv6_peers_connected += 1; + } + } + + // Set ipv4 nat_open metric flag if threshold of peercount is met, unset if below threshold + if inbound_ipv4_peers_connected >= LIBP2P_NAT_OPEN_THRESHOLD { + metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p_ipv4"], 1); + } else { + metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p_ipv4"], 0); + } + + // Set ipv6 nat_open metric flag if threshold of peercount is met, unset if below threshold + if inbound_ipv6_peers_connected >= LIBP2P_NAT_OPEN_THRESHOLD { + metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p_ipv6"], 1); + } else { + metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p_ipv6"], 0); } // PEERS_CONNECTED @@ -1375,7 +1402,7 @@ impl PeerManager { metrics::set_gauge_vec( &metrics::PEERS_CONNECTED_MULTI, &[direction, transport], - *peers_connected_mutli + *peers_connected_multi .get(&(direction, transport)) .unwrap_or(&0) as i64, ); diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 27c8463a55e..d8b3568a280 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -122,6 +122,24 @@ impl PeerInfo { self.connection_direction.as_ref() } + /// Returns true if this is an incoming ipv4 connection. + pub fn is_incoming_ipv4_connection(&self) -> bool { + self.seen_multiaddrs.iter().any(|multiaddr| { + multiaddr + .iter() + .any(|protocol| matches!(protocol, libp2p::core::multiaddr::Protocol::Ip4(_))) + }) + } + + /// Returns true if this is an incoming ipv6 connection. + pub fn is_incoming_ipv6_connection(&self) -> bool { + self.seen_multiaddrs.iter().any(|multiaddr| { + multiaddr + .iter() + .any(|protocol| matches!(protocol, libp2p::core::multiaddr::Protocol::Ip6(_))) + }) + } + /// Returns the sync status of the peer. pub fn sync_status(&self) -> &SyncStatus { &self.sync_status From dd7591f7123dfe072631c0deb0abc1b78cc82733 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 15 Jan 2025 17:56:51 +1100 Subject: [PATCH 12/26] Fix data columns not persisting for PeerDAS due to a `getBlobs` race condition (#6756) * Fix data columns not persisting for PeerDAS due to a `getBlobs` race condition. * Refactor blobs and columns logic in `chain.import_block` for clarity. Add more docs on `data_column_recv`. * Add more code comments for clarity. * Merge remote-tracking branch 'origin/unstable' into fix-column-race # Conflicts: # beacon_node/beacon_chain/src/block_verification_types.rs # beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs * Fix lint. --- beacon_node/beacon_chain/src/beacon_chain.rs | 183 ++++++++++-------- .../beacon_chain/src/block_verification.rs | 1 + .../src/block_verification_types.rs | 15 +- .../src/data_availability_checker.rs | 14 +- .../overflow_lru_cache.rs | 71 +++++-- .../state_lru_cache.rs | 1 + beacon_node/beacon_chain/src/fetch_blobs.rs | 8 +- .../beacon_chain/tests/block_verification.rs | 2 +- 8 files changed, 189 insertions(+), 106 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 81783267ba6..a6da610c0e8 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -121,7 +121,7 @@ use store::{ KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; -use tokio::sync::mpsc::Receiver; +use tokio::sync::oneshot; use tokio_stream::Stream; use tree_hash::TreeHash; use types::blob_sidecar::FixedBlobSidecarList; @@ -3088,7 +3088,7 @@ impl BeaconChain { slot: Slot, block_root: Hash256, blobs: FixedBlobSidecarList, - data_column_recv: Option>>, + data_column_recv: Option>>, ) -> Result { // If this block has already been imported to forkchoice it must have been available, so // we don't need to process its blobs again. @@ -3216,7 +3216,7 @@ impl BeaconChain { }; let r = self - .process_availability(slot, availability, None, || Ok(())) + .process_availability(slot, availability, || Ok(())) .await; self.remove_notified(&block_root, r) .map(|availability_processing_status| { @@ -3344,7 +3344,7 @@ impl BeaconChain { match executed_block { ExecutedBlock::Available(block) => { - self.import_available_block(Box::new(block), None).await + self.import_available_block(Box::new(block)).await } ExecutedBlock::AvailabilityPending(block) => { self.check_block_availability_and_import(block).await @@ -3476,7 +3476,7 @@ impl BeaconChain { let availability = self .data_availability_checker .put_pending_executed_block(block)?; - self.process_availability(slot, availability, None, || Ok(())) + self.process_availability(slot, availability, || Ok(())) .await } @@ -3492,7 +3492,7 @@ impl BeaconChain { } let availability = self.data_availability_checker.put_gossip_blob(blob)?; - self.process_availability(slot, availability, None, || Ok(())) + self.process_availability(slot, availability, || Ok(())) .await } @@ -3515,7 +3515,7 @@ impl BeaconChain { .data_availability_checker .put_gossip_data_columns(block_root, data_columns)?; - self.process_availability(slot, availability, None, publish_fn) + self.process_availability(slot, availability, publish_fn) .await } @@ -3559,7 +3559,7 @@ impl BeaconChain { .data_availability_checker .put_rpc_blobs(block_root, blobs)?; - self.process_availability(slot, availability, None, || Ok(())) + self.process_availability(slot, availability, || Ok(())) .await } @@ -3568,14 +3568,14 @@ impl BeaconChain { slot: Slot, block_root: Hash256, blobs: FixedBlobSidecarList, - data_column_recv: Option>>, + data_column_recv: Option>>, ) -> Result { self.check_blobs_for_slashability(block_root, &blobs)?; - let availability = self - .data_availability_checker - .put_engine_blobs(block_root, blobs)?; + let availability = + self.data_availability_checker + .put_engine_blobs(block_root, blobs, data_column_recv)?; - self.process_availability(slot, availability, data_column_recv, || Ok(())) + self.process_availability(slot, availability, || Ok(())) .await } @@ -3615,7 +3615,7 @@ impl BeaconChain { .data_availability_checker .put_rpc_custody_columns(block_root, custody_columns)?; - self.process_availability(slot, availability, None, || Ok(())) + self.process_availability(slot, availability, || Ok(())) .await } @@ -3627,14 +3627,13 @@ impl BeaconChain { self: &Arc, slot: Slot, availability: Availability, - recv: Option>>, publish_fn: impl FnOnce() -> Result<(), BlockError>, ) -> Result { match availability { Availability::Available(block) => { publish_fn()?; // Block is fully available, import into fork choice - self.import_available_block(block, recv).await + self.import_available_block(block).await } Availability::MissingComponents(block_root) => Ok( AvailabilityProcessingStatus::MissingComponents(slot, block_root), @@ -3645,7 +3644,6 @@ impl BeaconChain { pub async fn import_available_block( self: &Arc, block: Box>, - data_column_recv: Option>>, ) -> Result { let AvailableExecutedBlock { block, @@ -3660,6 +3658,7 @@ impl BeaconChain { parent_eth1_finalization_data, confirmed_state_roots, consensus_context, + data_column_recv, } = import_data; // Record the time at which this block's blobs became available. @@ -3726,7 +3725,7 @@ impl BeaconChain { parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, mut consensus_context: ConsensusContext, - data_column_recv: Option>>, + data_column_recv: Option>>, ) -> Result { // ----------------------------- BLOCK NOT YET ATTESTABLE ---------------------------------- // Everything in this initial section is on the hot path between processing the block and @@ -3894,44 +3893,32 @@ impl BeaconChain { // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 let (_, signed_block, blobs, data_columns) = signed_block.deconstruct(); - // TODO(das) we currently store all subnet sampled columns. Tracking issue to exclude non - // custody columns: https://github.com/sigp/lighthouse/issues/6465 - let custody_columns_count = self.data_availability_checker.get_sampling_column_count(); - // if block is made available via blobs, dropped the data columns. - let data_columns = data_columns.filter(|columns| columns.len() == custody_columns_count); - - let data_columns = match (data_columns, data_column_recv) { - // If the block was made available via custody columns received from gossip / rpc, use them - // since we already have them. - (Some(columns), _) => Some(columns), - // Otherwise, it means blobs were likely available via fetching from EL, in this case we - // wait for the data columns to be computed (blocking). - (None, Some(mut data_column_recv)) => { - let _column_recv_timer = - metrics::start_timer(&metrics::BLOCK_PROCESSING_DATA_COLUMNS_WAIT); - // Unable to receive data columns from sender, sender is either dropped or - // failed to compute data columns from blobs. We restore fork choice here and - // return to avoid inconsistency in database. - if let Some(columns) = data_column_recv.blocking_recv() { - Some(columns) - } else { - let err_msg = "Did not receive data columns from sender"; - error!( - self.log, - "Failed to store data columns into the database"; - "msg" => "Restoring fork choice from disk", - "error" => err_msg, - ); - return Err(self - .handle_import_block_db_write_error(fork_choice) - .err() - .unwrap_or(BlockError::InternalError(err_msg.to_string()))); - } + + match self.get_blobs_or_columns_store_op( + block_root, + signed_block.epoch(), + blobs, + data_columns, + data_column_recv, + ) { + Ok(Some(blobs_or_columns_store_op)) => { + ops.push(blobs_or_columns_store_op); } - // No data columns present and compute data columns task was not spawned. - // Could either be no blobs in the block or before PeerDAS activation. - (None, None) => None, - }; + Ok(None) => {} + Err(e) => { + error!( + self.log, + "Failed to store data columns into the database"; + "msg" => "Restoring fork choice from disk", + "error" => &e, + "block_root" => ?block_root + ); + return Err(self + .handle_import_block_db_write_error(fork_choice) + .err() + .unwrap_or(BlockError::InternalError(e))); + } + } let block = signed_block.message(); let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); @@ -3943,30 +3930,6 @@ impl BeaconChain { ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); - if let Some(blobs) = blobs { - if !blobs.is_empty() { - debug!( - self.log, "Writing blobs to store"; - "block_root" => %block_root, - "count" => blobs.len(), - ); - ops.push(StoreOp::PutBlobs(block_root, blobs)); - } - } - - if let Some(data_columns) = data_columns { - // TODO(das): `available_block includes all sampled columns, but we only need to store - // custody columns. To be clarified in spec. - if !data_columns.is_empty() { - debug!( - self.log, "Writing data_columns to store"; - "block_root" => %block_root, - "count" => data_columns.len(), - ); - ops.push(StoreOp::PutDataColumns(block_root, data_columns)); - } - } - let txn_lock = self.store.hot_db.begin_rw_transaction(); if let Err(e) = self.store.do_atomically_with_block_and_blobs_cache(ops) { @@ -7184,6 +7147,68 @@ impl BeaconChain { reqresp_pre_import_cache_len: self.reqresp_pre_import_cache.read().len(), } } + + fn get_blobs_or_columns_store_op( + &self, + block_root: Hash256, + block_epoch: Epoch, + blobs: Option>, + data_columns: Option>, + data_column_recv: Option>>, + ) -> Result>, String> { + if self.spec.is_peer_das_enabled_for_epoch(block_epoch) { + // TODO(das) we currently store all subnet sampled columns. Tracking issue to exclude non + // custody columns: https://github.com/sigp/lighthouse/issues/6465 + let custody_columns_count = self.data_availability_checker.get_sampling_column_count(); + + let custody_columns_available = data_columns + .as_ref() + .as_ref() + .is_some_and(|columns| columns.len() == custody_columns_count); + + let data_columns_to_persist = if custody_columns_available { + // If the block was made available via custody columns received from gossip / rpc, use them + // since we already have them. + data_columns + } else if let Some(data_column_recv) = data_column_recv { + // Blobs were available from the EL, in this case we wait for the data columns to be computed (blocking). + let _column_recv_timer = + metrics::start_timer(&metrics::BLOCK_PROCESSING_DATA_COLUMNS_WAIT); + // Unable to receive data columns from sender, sender is either dropped or + // failed to compute data columns from blobs. We restore fork choice here and + // return to avoid inconsistency in database. + let computed_data_columns = data_column_recv + .blocking_recv() + .map_err(|e| format!("Did not receive data columns from sender: {e:?}"))?; + Some(computed_data_columns) + } else { + // No blobs in the block. + None + }; + + if let Some(data_columns) = data_columns_to_persist { + if !data_columns.is_empty() { + debug!( + self.log, "Writing data_columns to store"; + "block_root" => %block_root, + "count" => data_columns.len(), + ); + return Ok(Some(StoreOp::PutDataColumns(block_root, data_columns))); + } + } + } else if let Some(blobs) = blobs { + if !blobs.is_empty() { + debug!( + self.log, "Writing blobs to store"; + "block_root" => %block_root, + "count" => blobs.len(), + ); + return Ok(Some(StoreOp::PutBlobs(block_root, blobs))); + } + } + + Ok(None) + } } impl Drop for BeaconChain { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index ddb7bb614a3..315105ac2b5 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1677,6 +1677,7 @@ impl ExecutionPendingBlock { parent_eth1_finalization_data, confirmed_state_roots, consensus_context, + data_column_recv: None, }, payload_verification_handle, }) diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 0bf3007e9b0..b81e728257e 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -7,10 +7,11 @@ use derivative::Derivative; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; use std::sync::Arc; +use tokio::sync::oneshot; use types::blob_sidecar::BlobIdentifier; use types::{ - BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, ChainSpec, Epoch, EthSpec, - Hash256, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, ChainSpec, DataColumnSidecarList, + Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; /// A block that has been received over RPC. It has 2 internal variants: @@ -337,7 +338,8 @@ impl AvailabilityPendingExecutedBlock { } } -#[derive(Debug, PartialEq)] +#[derive(Debug, Derivative)] +#[derivative(PartialEq)] pub struct BlockImportData { pub block_root: Hash256, pub state: BeaconState, @@ -345,6 +347,12 @@ pub struct BlockImportData { pub parent_eth1_finalization_data: Eth1FinalizationData, pub confirmed_state_roots: Vec, pub consensus_context: ConsensusContext, + #[derivative(PartialEq = "ignore")] + /// An optional receiver for `DataColumnSidecarList`. + /// + /// This field is `Some` when data columns are being computed asynchronously. + /// The resulting `DataColumnSidecarList` will be sent through this receiver. + pub data_column_recv: Option>>, } impl BlockImportData { @@ -363,6 +371,7 @@ impl BlockImportData { }, confirmed_state_roots: vec![], consensus_context: ConsensusContext::new(Slot::new(0)), + data_column_recv: None, } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 4c5152239c2..3ac1a954940 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -15,6 +15,7 @@ use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; +use tokio::sync::oneshot; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ BlobSidecarList, ChainSpec, DataColumnIdentifier, DataColumnSidecar, DataColumnSidecarList, @@ -223,7 +224,7 @@ impl DataAvailabilityChecker { .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache - .put_kzg_verified_blobs(block_root, verified_blobs, &self.log) + .put_kzg_verified_blobs(block_root, verified_blobs, None, &self.log) } /// Put a list of custody columns received via RPC into the availability cache. This performs KZG @@ -263,6 +264,7 @@ impl DataAvailabilityChecker { &self, block_root: Hash256, blobs: FixedBlobSidecarList, + data_column_recv: Option>>, ) -> Result, AvailabilityCheckError> { let seen_timestamp = self .slot_clock @@ -272,8 +274,12 @@ impl DataAvailabilityChecker { let verified_blobs = KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp); - self.availability_cache - .put_kzg_verified_blobs(block_root, verified_blobs, &self.log) + self.availability_cache.put_kzg_verified_blobs( + block_root, + verified_blobs, + data_column_recv, + &self.log, + ) } /// Check if we've cached other blobs for this block. If it completes a set and we also @@ -288,6 +294,7 @@ impl DataAvailabilityChecker { self.availability_cache.put_kzg_verified_blobs( gossip_blob.block_root(), vec![gossip_blob.into_inner()], + None, &self.log, ) } @@ -803,7 +810,6 @@ impl AvailableBlock { block, blobs, data_columns, - blobs_available_timestamp: _, .. } = self; (block_root, block, blobs, data_columns) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 44148922f48..a2936206aef 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -12,28 +12,46 @@ use parking_lot::RwLock; use slog::{debug, Logger}; use std::num::NonZeroUsize; use std::sync::Arc; +use tokio::sync::oneshot; use types::blob_sidecar::BlobIdentifier; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, - Hash256, RuntimeFixedVector, RuntimeVariableList, SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, + DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeFixedVector, RuntimeVariableList, + SignedBeaconBlock, }; /// This represents the components of a partially available block /// /// The blobs are all gossip and kzg verified. /// The block has completed all verifications except the availability check. -/// TODO(das): this struct can potentially be reafactored as blobs and data columns are mutually -/// exclusive and this could simplify `is_importable`. -#[derive(Clone)] pub struct PendingComponents { pub block_root: Hash256, pub verified_blobs: RuntimeFixedVector>>, pub verified_data_columns: Vec>, pub executed_block: Option>, pub reconstruction_started: bool, + /// Receiver for data columns that are computed asynchronously; + /// + /// If `data_column_recv` is `Some`, it means data column computation or reconstruction has been + /// started. This can happen either via engine blobs fetching or data column reconstruction + /// (triggered when >= 50% columns are received via gossip). + pub data_column_recv: Option>>, } impl PendingComponents { + /// Clones the `PendingComponent` without cloning `data_column_recv`, as `Receiver` is not cloneable. + /// This should only be used when the receiver is no longer needed. + pub fn clone_without_column_recv(&self) -> Self { + PendingComponents { + block_root: self.block_root, + verified_blobs: self.verified_blobs.clone(), + verified_data_columns: self.verified_data_columns.clone(), + executed_block: self.executed_block.clone(), + reconstruction_started: self.reconstruction_started, + data_column_recv: None, + } + } + /// Returns an immutable reference to the cached block. pub fn get_cached_block(&self) -> &Option> { &self.executed_block @@ -236,6 +254,7 @@ impl PendingComponents { verified_data_columns: vec![], executed_block: None, reconstruction_started: false, + data_column_recv: None, } } @@ -260,6 +279,7 @@ impl PendingComponents { verified_blobs, verified_data_columns, executed_block, + data_column_recv, .. } = self; @@ -302,10 +322,12 @@ impl PendingComponents { let AvailabilityPendingExecutedBlock { block, - import_data, + mut import_data, payload_verification_outcome, } = executed_block; + import_data.data_column_recv = data_column_recv; + let available_block = AvailableBlock { block_root, block, @@ -444,10 +466,17 @@ impl DataAvailabilityCheckerInner { f(self.critical.read().peek(block_root)) } + /// Puts the KZG verified blobs into the availability cache as pending components. + /// + /// The `data_column_recv` parameter is an optional `Receiver` for data columns that are + /// computed asynchronously. This method remains **used** after PeerDAS activation, because + /// blocks can be made available if the EL already has the blobs and returns them via the + /// `getBlobsV1` engine method. More details in [fetch_blobs.rs](https://github.com/sigp/lighthouse/blob/44f8add41ea2252769bb967864af95b3c13af8ca/beacon_node/beacon_chain/src/fetch_blobs.rs). pub fn put_kzg_verified_blobs>>( &self, block_root: Hash256, kzg_verified_blobs: I, + data_column_recv: Option>>, log: &Logger, ) -> Result, AvailabilityCheckError> { let mut kzg_verified_blobs = kzg_verified_blobs.into_iter().peekable(); @@ -482,9 +511,17 @@ impl DataAvailabilityCheckerInner { // Merge in the blobs. pending_components.merge_blobs(fixed_blobs); + if data_column_recv.is_some() { + // If `data_column_recv` is `Some`, it means we have all the blobs from engine, and have + // started computing data columns. We store the receiver in `PendingComponents` for + // later use when importing the block. + pending_components.data_column_recv = data_column_recv; + } + if pending_components.is_available(self.sampling_column_count, log) { - write_lock.put(block_root, pending_components.clone()); - // No need to hold the write lock anymore + // We keep the pending components in the availability cache during block import (#5845). + // `data_column_recv` is returned as part of the available block and is no longer needed here. + write_lock.put(block_root, pending_components.clone_without_column_recv()); drop(write_lock); pending_components.make_available(&self.spec, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) @@ -527,8 +564,9 @@ impl DataAvailabilityCheckerInner { pending_components.merge_data_columns(kzg_verified_data_columns)?; if pending_components.is_available(self.sampling_column_count, log) { - write_lock.put(block_root, pending_components.clone()); - // No need to hold the write lock anymore + // We keep the pending components in the availability cache during block import (#5845). + // `data_column_recv` is returned as part of the available block and is no longer needed here. + write_lock.put(block_root, pending_components.clone_without_column_recv()); drop(write_lock); pending_components.make_available(&self.spec, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) @@ -577,7 +615,7 @@ impl DataAvailabilityCheckerInner { } pending_components.reconstruction_started = true; - ReconstructColumnsDecision::Yes(pending_components.clone()) + ReconstructColumnsDecision::Yes(pending_components.clone_without_column_recv()) } /// This could mean some invalid data columns made it through to the `DataAvailabilityChecker`. @@ -619,8 +657,9 @@ impl DataAvailabilityCheckerInner { // Check if we have all components and entire set is consistent. if pending_components.is_available(self.sampling_column_count, log) { - write_lock.put(block_root, pending_components.clone()); - // No need to hold the write lock anymore + // We keep the pending components in the availability cache during block import (#5845). + // `data_column_recv` is returned as part of the available block and is no longer needed here. + write_lock.put(block_root, pending_components.clone_without_column_recv()); drop(write_lock); pending_components.make_available(&self.spec, |diet_block| { self.state_cache.recover_pending_executed_block(diet_block) @@ -855,6 +894,7 @@ mod test { parent_eth1_finalization_data, confirmed_state_roots: vec![], consensus_context, + data_column_recv: None, }; let payload_verification_outcome = PayloadVerificationOutcome { @@ -957,7 +997,7 @@ mod test { for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), harness.logger()) + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), None, harness.logger()) .expect("should put blob"); if blob_index == blobs_expected - 1 { assert!(matches!(availability, Availability::Available(_))); @@ -985,7 +1025,7 @@ mod test { for gossip_blob in blobs { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), harness.logger()) + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), None, harness.logger()) .expect("should put blob"); assert_eq!( availability, @@ -1241,6 +1281,7 @@ mod pending_components_tests { }, confirmed_state_roots: vec![], consensus_context: ConsensusContext::new(Slot::new(0)), + data_column_recv: None, }, payload_verification_outcome: PayloadVerificationOutcome { payload_verification_status: PayloadVerificationStatus::Verified, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 5b9b7c70233..2a2a0431ccb 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -136,6 +136,7 @@ impl StateLRUCache { consensus_context: diet_executed_block .consensus_context .into_consensus_context(), + data_column_recv: None, }, payload_verification_outcome: diet_executed_block.payload_verification_outcome, }) diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index f1646072c96..49e46a50fec 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -18,7 +18,7 @@ use slog::{debug, error, o, Logger}; use ssz_types::FixedVector; use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash; use std::sync::Arc; -use tokio::sync::mpsc::Receiver; +use tokio::sync::oneshot; use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList}; use types::{ BeaconStateError, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnSidecarList, EthSpec, @@ -213,9 +213,9 @@ fn spawn_compute_and_publish_data_columns_task( blobs: FixedBlobSidecarList, publish_fn: impl Fn(BlobsOrDataColumns) + Send + 'static, log: Logger, -) -> Receiver>>> { +) -> oneshot::Receiver>>> { let chain_cloned = chain.clone(); - let (data_columns_sender, data_columns_receiver) = tokio::sync::mpsc::channel(1); + let (data_columns_sender, data_columns_receiver) = oneshot::channel(); chain.task_executor.spawn_blocking( move || { @@ -248,7 +248,7 @@ fn spawn_compute_and_publish_data_columns_task( } }; - if let Err(e) = data_columns_sender.try_send(all_data_columns.clone()) { + if let Err(e) = data_columns_sender.send(all_data_columns.clone()) { error!(log, "Failed to send computed data columns"; "error" => ?e); }; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index b61f758cac5..1a651332ade 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1737,7 +1737,7 @@ async fn import_execution_pending_block( .unwrap() { ExecutedBlock::Available(block) => chain - .import_available_block(Box::from(block), None) + .import_available_block(Box::from(block)) .await .map_err(|e| format!("{e:?}")), ExecutedBlock::AvailabilityPending(_) => { From e98209d1186419264868a28591c2b14af3f6abff Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 15 Jan 2025 18:40:26 +1100 Subject: [PATCH 13/26] Implement PeerDAS subnet decoupling (aka custody groups) (#6736) * Implement PeerDAS subnet decoupling (aka custody groups). * Merge branch 'unstable' into decouple-subnets * Refactor feature testing for spec tests (#6737) Squashed commit of the following: commit 898d05ee17114bef77ab748dda6ff4a41a2c61d5 Merge: ffbd25e2b 7e0cddef3 Author: Jimmy Chen Date: Tue Dec 24 14:41:19 2024 +1100 Merge branch 'unstable' into refactor-ef-tests-features commit ffbd25e2be041584e823123b051cf96775dd9e6c Author: Jimmy Chen Date: Tue Dec 24 14:40:38 2024 +1100 Fix `SszStatic` tests for PeerDAS: exclude eip7594 test vectors when testing Electra types. commit aa593cf35c51da9dc1f6131a4e1699a321d2d2e0 Author: Jimmy Chen Date: Fri Dec 20 12:08:54 2024 +1100 Refactor spec testing for features and simplify usage. * Fix build. * Add input validation and improve arithmetic handling when calculating custody groups. * Address review comments re code style consistency. * Merge branch 'unstable' into decouple-subnets # Conflicts: # beacon_node/beacon_chain/src/kzg_utils.rs # beacon_node/beacon_chain/src/observed_data_sidecars.rs # beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs # common/eth2_network_config/built_in_network_configs/chiado/config.yaml # common/eth2_network_config/built_in_network_configs/gnosis/config.yaml # common/eth2_network_config/built_in_network_configs/holesky/config.yaml # common/eth2_network_config/built_in_network_configs/mainnet/config.yaml # common/eth2_network_config/built_in_network_configs/sepolia/config.yaml # consensus/types/src/chain_spec.rs * Update consensus/types/src/chain_spec.rs Co-authored-by: Lion - dapplion <35266934+dapplion@users.noreply.github.com> * Merge remote-tracking branch 'origin/unstable' into decouple-subnets * Update error handling. * Address review comment. * Merge remote-tracking branch 'origin/unstable' into decouple-subnets # Conflicts: # consensus/types/src/chain_spec.rs * Update PeerDAS spec tests to `1.5.0-beta.0` and fix failing unit tests. * Merge remote-tracking branch 'origin/unstable' into decouple-subnets # Conflicts: # beacon_node/lighthouse_network/src/peer_manager/mod.rs --- .../src/block_verification_types.rs | 2 +- .../src/data_availability_checker.rs | 21 +-- .../overflow_lru_cache.rs | 4 +- .../src/data_column_verification.rs | 4 +- beacon_node/beacon_chain/src/kzg_utils.rs | 6 +- .../src/observed_data_sidecars.rs | 2 +- beacon_node/http_api/src/block_id.rs | 2 +- beacon_node/http_api/src/publish_blocks.rs | 10 +- .../lighthouse_network/src/discovery/enr.rs | 46 ++--- .../src/discovery/subnet_predicate.rs | 14 +- beacon_node/lighthouse_network/src/metrics.rs | 8 +- .../src/peer_manager/mod.rs | 104 +++++++----- .../src/peer_manager/peerdb.rs | 18 +- .../src/peer_manager/peerdb/peer_info.rs | 6 +- .../lighthouse_network/src/rpc/codec.rs | 2 +- .../lighthouse_network/src/rpc/methods.rs | 8 +- .../lighthouse_network/src/service/mod.rs | 11 +- .../lighthouse_network/src/service/utils.rs | 12 +- .../lighthouse_network/src/types/globals.rs | 80 +++++---- .../src/network_beacon_processor/mod.rs | 9 +- .../network/src/sync/network_context.rs | 6 +- beacon_node/network/src/sync/tests/lookups.rs | 2 +- .../chiado/config.yaml | 5 +- .../gnosis/config.yaml | 5 +- .../holesky/config.yaml | 5 +- .../mainnet/config.yaml | 5 +- .../sepolia/config.yaml | 5 +- consensus/types/src/chain_spec.rs | 79 ++++++--- .../types/src/data_column_custody_group.rs | 142 ++++++++++++++++ consensus/types/src/data_column_subnet_id.rs | 160 +----------------- consensus/types/src/lib.rs | 1 + testing/ef_tests/src/cases.rs | 14 +- .../compute_columns_for_custody_groups.rs | 47 +++++ ...stody_columns.rs => get_custody_groups.rs} | 26 +-- .../cases/kzg_compute_cells_and_kzg_proofs.rs | 2 +- .../cases/kzg_recover_cells_and_kzg_proofs.rs | 2 +- .../cases/kzg_verify_cell_kzg_proof_batch.rs | 2 +- testing/ef_tests/src/handler.rs | 65 ++++--- testing/ef_tests/tests/tests.rs | 38 +++-- 39 files changed, 551 insertions(+), 429 deletions(-) create mode 100644 consensus/types/src/data_column_custody_group.rs create mode 100644 testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs rename testing/ef_tests/src/cases/{get_custody_columns.rs => get_custody_groups.rs} (65%) diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index b81e728257e..38d0fc708ca 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -165,7 +165,7 @@ impl RpcBlock { let inner = if !custody_columns.is_empty() { RpcBlockInner::BlockAndCustodyColumns( block, - RuntimeVariableList::new(custody_columns, spec.number_of_columns)?, + RuntimeVariableList::new(custody_columns, spec.number_of_columns as usize)?, ) } else { RpcBlockInner::Block(block) diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 3ac1a954940..aa4689121ce 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -117,21 +117,16 @@ impl DataAvailabilityChecker { spec: Arc, log: Logger, ) -> Result { - let custody_subnet_count = if import_all_data_columns { - spec.data_column_sidecar_subnet_count as usize - } else { - spec.custody_requirement as usize - }; - - let subnet_sampling_size = - std::cmp::max(custody_subnet_count, spec.samples_per_slot as usize); - let sampling_column_count = - subnet_sampling_size.saturating_mul(spec.data_columns_per_subnet()); + let custody_group_count = spec.custody_group_count(import_all_data_columns); + // This should only panic if the chain spec contains invalid values. + let sampling_size = spec + .sampling_size(custody_group_count) + .expect("should compute node sampling size from valid chain spec"); let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY, store, - sampling_column_count, + sampling_size as usize, spec.clone(), )?; Ok(Self { @@ -148,7 +143,7 @@ impl DataAvailabilityChecker { } pub(crate) fn is_supernode(&self) -> bool { - self.get_sampling_column_count() == self.spec.number_of_columns + self.get_sampling_column_count() == self.spec.number_of_columns as usize } /// Checks if the block root is currenlty in the availability cache awaiting import because @@ -433,7 +428,7 @@ impl DataAvailabilityChecker { .map(CustodyDataColumn::into_inner) .collect::>(); let all_data_columns = - RuntimeVariableList::from_vec(all_data_columns, self.spec.number_of_columns); + RuntimeVariableList::from_vec(all_data_columns, self.spec.number_of_columns as usize); // verify kzg for all data columns at once if !all_data_columns.is_empty() { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index a2936206aef..c8e92f7e9f5 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -598,7 +598,7 @@ impl DataAvailabilityCheckerInner { // If we're sampling all columns, it means we must be custodying all columns. let custody_column_count = self.sampling_column_count(); - let total_column_count = self.spec.number_of_columns; + let total_column_count = self.spec.number_of_columns as usize; let received_column_count = pending_components.verified_data_columns.len(); if pending_components.reconstruction_started { @@ -607,7 +607,7 @@ impl DataAvailabilityCheckerInner { if custody_column_count != total_column_count { return ReconstructColumnsDecision::No("not required for full node"); } - if received_column_count == self.spec.number_of_columns { + if received_column_count >= total_column_count { return ReconstructColumnsDecision::No("all columns received"); } if received_column_count < total_column_count / 2 { diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 6cfd26786aa..1bd17485ab6 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -423,7 +423,7 @@ fn verify_data_column_sidecar( data_column: &DataColumnSidecar, spec: &ChainSpec, ) -> Result<(), GossipDataColumnError> { - if data_column.index >= spec.number_of_columns as u64 { + if data_column.index >= spec.number_of_columns { return Err(GossipDataColumnError::InvalidColumnIndex(data_column.index)); } if data_column.kzg_commitments.is_empty() { @@ -611,7 +611,7 @@ fn verify_index_matches_subnet( spec: &ChainSpec, ) -> Result<(), GossipDataColumnError> { let expected_subnet: u64 = - DataColumnSubnetId::from_column_index::(data_column.index as usize, spec).into(); + DataColumnSubnetId::from_column_index(data_column.index, spec).into(); if expected_subnet != subnet { return Err(GossipDataColumnError::InvalidSubnetId { received: subnet, diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index e32ee9c24bd..dcb3864f785 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -193,7 +193,7 @@ fn build_data_column_sidecars( blob_cells_and_proofs_vec: Vec, spec: &ChainSpec, ) -> Result, String> { - let number_of_columns = spec.number_of_columns; + let number_of_columns = spec.number_of_columns as usize; let max_blobs_per_block = spec .max_blobs_per_block(signed_block_header.message.slot.epoch(E::slots_per_epoch())) as usize; @@ -428,7 +428,7 @@ mod test { .kzg_commitments_merkle_proof() .unwrap(); - assert_eq!(column_sidecars.len(), spec.number_of_columns); + assert_eq!(column_sidecars.len(), spec.number_of_columns as usize); for (idx, col_sidecar) in column_sidecars.iter().enumerate() { assert_eq!(col_sidecar.index, idx as u64); @@ -461,7 +461,7 @@ mod test { ) .unwrap(); - for i in 0..spec.number_of_columns { + for i in 0..spec.number_of_columns as usize { assert_eq!(reconstructed_columns.get(i), column_sidecars.get(i), "{i}"); } } diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index 48989e07d3d..1ca6c03f002 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -59,7 +59,7 @@ impl ObservableDataSidecar for DataColumnSidecar { } fn max_num_of_items(spec: &ChainSpec, _slot: Slot) -> usize { - spec.number_of_columns + spec.number_of_columns as usize } } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 0b00958f26a..be70f615e34 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -347,7 +347,7 @@ impl BlockId { let num_found_column_keys = column_indices.len(); let num_required_columns = chain.spec.number_of_columns / 2; - let is_blob_available = num_found_column_keys >= num_required_columns; + let is_blob_available = num_found_column_keys >= num_required_columns as usize; if is_blob_available { let data_columns = column_indices diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index b5aa23acf8e..60d4b2f16ed 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -395,9 +395,8 @@ fn build_gossip_verified_data_columns( let gossip_verified_data_columns = data_column_sidecars .into_iter() .map(|data_column_sidecar| { - let column_index = data_column_sidecar.index as usize; - let subnet = - DataColumnSubnetId::from_column_index::(column_index, &chain.spec); + let column_index = data_column_sidecar.index; + let subnet = DataColumnSubnetId::from_column_index(column_index, &chain.spec); let gossip_verified_column = GossipVerifiedDataColumn::new(data_column_sidecar, subnet.into(), chain); @@ -520,10 +519,7 @@ fn publish_column_sidecars( let pubsub_messages = data_column_sidecars .into_iter() .map(|data_col| { - let subnet = DataColumnSubnetId::from_column_index::( - data_col.index as usize, - &chain.spec, - ); + let subnet = DataColumnSubnetId::from_column_index(data_col.index, &chain.spec); PubsubMessage::DataColumnSidecar(Box::new((subnet, data_col))) }) .collect::>(); diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index ce29480ffdb..8946c7753cc 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -25,8 +25,8 @@ pub const ETH2_ENR_KEY: &str = "eth2"; pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; /// The ENR field specifying the sync committee subnet bitfield. pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; -/// The ENR field specifying the peerdas custody subnet count. -pub const PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY: &str = "csc"; +/// The ENR field specifying the peerdas custody group count. +pub const PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY: &str = "cgc"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { @@ -38,8 +38,8 @@ pub trait Eth2Enr { &self, ) -> Result, &'static str>; - /// The peerdas custody subnet count associated with the ENR. - fn custody_subnet_count(&self, spec: &ChainSpec) -> Result; + /// The peerdas custody group count associated with the ENR. + fn custody_group_count(&self, spec: &ChainSpec) -> Result; fn eth2(&self) -> Result; } @@ -67,16 +67,16 @@ impl Eth2Enr for Enr { .map_err(|_| "Could not decode the ENR syncnets bitfield") } - fn custody_subnet_count(&self, spec: &ChainSpec) -> Result { - let csc = self - .get_decodable::(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) - .ok_or("ENR custody subnet count non-existent")? - .map_err(|_| "Could not decode the ENR custody subnet count")?; + fn custody_group_count(&self, spec: &ChainSpec) -> Result { + let cgc = self + .get_decodable::(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) + .ok_or("ENR custody group count non-existent")? + .map_err(|_| "Could not decode the ENR custody group count")?; - if csc >= spec.custody_requirement && csc <= spec.data_column_sidecar_subnet_count { - Ok(csc) + if (spec.custody_requirement..=spec.number_of_custody_groups).contains(&cgc) { + Ok(cgc) } else { - Err("Invalid custody subnet count in ENR") + Err("Invalid custody group count in ENR") } } @@ -253,14 +253,14 @@ pub fn build_enr( &bitfield.as_ssz_bytes().into(), ); - // only set `csc` if PeerDAS fork epoch has been scheduled + // only set `cgc` if PeerDAS fork epoch has been scheduled if spec.is_peer_das_scheduled() { - let custody_subnet_count = if config.subscribe_all_data_column_subnets { - spec.data_column_sidecar_subnet_count + let custody_group_count = if config.subscribe_all_data_column_subnets { + spec.number_of_custody_groups } else { spec.custody_requirement }; - builder.add_value(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY, &custody_subnet_count); + builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count); } builder @@ -287,11 +287,11 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and - // PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will + // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will // likely only be true for non-validating nodes. && local_enr.get_decodable::(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get_decodable(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get_decodable::(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) - && local_enr.get_decodable::(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) + && local_enr.get_decodable::(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) } /// Loads enr from the given directory @@ -348,7 +348,7 @@ mod test { } #[test] - fn custody_subnet_count_default() { + fn custody_group_count_default() { let config = NetworkConfig { subscribe_all_data_column_subnets: false, ..NetworkConfig::default() @@ -358,13 +358,13 @@ mod test { let enr = build_enr_with_config(config, &spec).0; assert_eq!( - enr.custody_subnet_count::(&spec).unwrap(), + enr.custody_group_count::(&spec).unwrap(), spec.custody_requirement, ); } #[test] - fn custody_subnet_count_all() { + fn custody_group_count_all() { let config = NetworkConfig { subscribe_all_data_column_subnets: true, ..NetworkConfig::default() @@ -373,8 +373,8 @@ mod test { let enr = build_enr_with_config(config, &spec).0; assert_eq!( - enr.custody_subnet_count::(&spec).unwrap(), - spec.data_column_sidecar_subnet_count, + enr.custody_group_count::(&spec).unwrap(), + spec.number_of_custody_groups, ); } diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 751f8dbb83a..400a0c2d562 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -1,10 +1,10 @@ //! The subnet predicate used for searching for a particular subnet. use super::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; -use itertools::Itertools; use slog::trace; use std::ops::Deref; -use types::{ChainSpec, DataColumnSubnetId}; +use types::data_column_custody_group::compute_subnets_for_node; +use types::ChainSpec; /// Returns the predicate for a given subnet. pub fn subnet_predicate( @@ -37,13 +37,9 @@ where .as_ref() .is_ok_and(|b| b.get(*s.deref() as usize).unwrap_or(false)), Subnet::DataColumn(s) => { - if let Ok(custody_subnet_count) = enr.custody_subnet_count::(&spec) { - DataColumnSubnetId::compute_custody_subnets::( - enr.node_id().raw(), - custody_subnet_count, - &spec, - ) - .is_ok_and(|mut subnets| subnets.contains(s)) + if let Ok(custody_group_count) = enr.custody_group_count::(&spec) { + compute_subnets_for_node(enr.node_id().raw(), custody_group_count, &spec) + .is_ok_and(|subnets| subnets.contains(s)) } else { false } diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index cb9c007b91a..b36cb8075d1 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -93,11 +93,11 @@ pub static PEERS_PER_CLIENT: LazyLock> = LazyLock::new(|| { ) }); -pub static PEERS_PER_CUSTODY_SUBNET_COUNT: LazyLock> = LazyLock::new(|| { +pub static PEERS_PER_CUSTODY_GROUP_COUNT: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( - "peers_per_custody_subnet_count", - "The current count of peers by custody subnet count", - &["custody_subnet_count"], + "peers_per_custody_group_count", + "The current count of peers by custody group count", + &["custody_group_count"], ) }); diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 6502a8dbff5..07c4be7959b 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -34,6 +34,9 @@ pub use peerdb::sync_status::{SyncInfo, SyncStatus}; use std::collections::{hash_map::Entry, HashMap, HashSet}; use std::net::IpAddr; use strum::IntoEnumIterator; +use types::data_column_custody_group::{ + compute_subnets_from_custody_group, get_custody_groups, CustodyIndex, +}; pub mod config; mod network_behaviour; @@ -101,6 +104,8 @@ pub struct PeerManager { /// discovery queries for subnet peers if we disconnect from existing sync /// committee subnet peers. sync_committee_subnets: HashMap, + /// A mapping of all custody groups to column subnets to avoid re-computation. + subnets_by_custody_group: HashMap>, /// The heartbeat interval to perform routine maintenance. heartbeat: tokio::time::Interval, /// Keeps track of whether the discovery service is enabled or not. @@ -160,6 +165,21 @@ impl PeerManager { // Set up the peer manager heartbeat interval let heartbeat = tokio::time::interval(tokio::time::Duration::from_secs(HEARTBEAT_INTERVAL)); + // Compute subnets for all custody groups + let subnets_by_custody_group = if network_globals.spec.is_peer_das_scheduled() { + (0..network_globals.spec.number_of_custody_groups) + .map(|custody_index| { + let subnets = + compute_subnets_from_custody_group(custody_index, &network_globals.spec) + .expect("Should compute subnets for all custody groups") + .collect(); + (custody_index, subnets) + }) + .collect::>>() + } else { + HashMap::new() + }; + Ok(PeerManager { network_globals, events: SmallVec::new(), @@ -170,6 +190,7 @@ impl PeerManager { target_peers: target_peer_count, temporary_banned_peers: LRUTimeCache::new(PEER_RECONNECTION_TIMEOUT), sync_committee_subnets: Default::default(), + subnets_by_custody_group, heartbeat, discovery_enabled, metrics_enabled, @@ -711,22 +732,39 @@ impl PeerManager { "peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number()); } - let custody_subnet_count_opt = meta_data.custody_subnet_count().copied().ok(); + let custody_group_count_opt = meta_data.custody_group_count().copied().ok(); peer_info.set_meta_data(meta_data); if self.network_globals.spec.is_peer_das_scheduled() { // Gracefully ignore metadata/v2 peers. Potentially downscore after PeerDAS to // prioritize PeerDAS peers. - if let Some(custody_subnet_count) = custody_subnet_count_opt { - match self.compute_peer_custody_subnets(peer_id, custody_subnet_count) { - Ok(custody_subnets) => { + if let Some(custody_group_count) = custody_group_count_opt { + match self.compute_peer_custody_groups(peer_id, custody_group_count) { + Ok(custody_groups) => { + let custody_subnets = custody_groups + .into_iter() + .flat_map(|custody_index| { + self.subnets_by_custody_group + .get(&custody_index) + .cloned() + .unwrap_or_else(|| { + warn!( + self.log, + "Custody group not found in subnet mapping"; + "custody_index" => custody_index, + "peer_id" => %peer_id + ); + vec![] + }) + }) + .collect(); peer_info.set_custody_subnets(custody_subnets); } Err(err) => { - debug!(self.log, "Unable to compute peer custody subnets from metadata"; + debug!(self.log, "Unable to compute peer custody groups from metadata"; "info" => "Sending goodbye to peer", "peer_id" => %peer_id, - "custody_subnet_count" => custody_subnet_count, + "custody_group_count" => custody_group_count, "error" => ?err, ); invalid_meta_data = true; @@ -1312,7 +1350,7 @@ impl PeerManager { let mut inbound_ipv4_peers_connected: usize = 0; let mut inbound_ipv6_peers_connected: usize = 0; let mut peers_connected_multi: HashMap<(&str, &str), i32> = HashMap::new(); - let mut peers_per_custody_subnet_count: HashMap = HashMap::new(); + let mut peers_per_custody_group_count: HashMap = HashMap::new(); for (_, peer_info) in self.network_globals.peers.read().connected_peers() { peers_connected += 1; @@ -1345,8 +1383,8 @@ impl PeerManager { .or_default() += 1; if let Some(MetaData::V3(meta_data)) = peer_info.meta_data() { - *peers_per_custody_subnet_count - .entry(meta_data.custody_subnet_count) + *peers_per_custody_group_count + .entry(meta_data.custody_group_count) .or_default() += 1; } // Check if incoming peer is ipv4 @@ -1377,11 +1415,11 @@ impl PeerManager { // PEERS_CONNECTED metrics::set_gauge(&metrics::PEERS_CONNECTED, peers_connected); - // CUSTODY_SUBNET_COUNT - for (custody_subnet_count, peer_count) in peers_per_custody_subnet_count.into_iter() { + // CUSTODY_GROUP_COUNT + for (custody_group_count, peer_count) in peers_per_custody_group_count.into_iter() { metrics::set_gauge_vec( - &metrics::PEERS_PER_CUSTODY_SUBNET_COUNT, - &[&custody_subnet_count.to_string()], + &metrics::PEERS_PER_CUSTODY_GROUP_COUNT, + &[&custody_group_count.to_string()], peer_count, ) } @@ -1410,43 +1448,27 @@ impl PeerManager { } } - fn compute_peer_custody_subnets( + fn compute_peer_custody_groups( &self, peer_id: &PeerId, - custody_subnet_count: u64, - ) -> Result, String> { + custody_group_count: u64, + ) -> Result, String> { // If we don't have a node id, we cannot compute the custody duties anyway let node_id = peer_id_to_node_id(peer_id)?; let spec = &self.network_globals.spec; - if !(spec.custody_requirement..=spec.data_column_sidecar_subnet_count) - .contains(&custody_subnet_count) + if !(spec.custody_requirement..=spec.number_of_custody_groups) + .contains(&custody_group_count) { - return Err("Invalid custody subnet count in metadata: out of range".to_string()); + return Err("Invalid custody group count in metadata: out of range".to_string()); } - let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( - node_id.raw(), - custody_subnet_count, - spec, - ) - .map(|subnets| subnets.collect()) - .unwrap_or_else(|e| { - // This is an unreachable scenario unless there's a bug, as we've validated the csc - // just above. - error!( - self.log, - "Computing peer custody subnets failed unexpectedly"; - "info" => "Falling back to default custody requirement subnets", - "peer_id" => %peer_id, - "custody_subnet_count" => custody_subnet_count, - "error" => ?e - ); - DataColumnSubnetId::compute_custody_requirement_subnets::(node_id.raw(), spec) - .collect() - }); - - Ok(custody_subnets) + get_custody_groups(node_id.raw(), custody_group_count, spec).map_err(|e| { + format!( + "Error computing peer custody groups for node {} with cgc={}: {:?}", + node_id, custody_group_count, e + ) + }) } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 22a3df1ae85..37cb5df6ea5 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1,4 +1,4 @@ -use crate::discovery::enr::PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY; +use crate::discovery::enr::PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY; use crate::discovery::{peer_id_to_node_id, CombinedKey}; use crate::{metrics, multiaddr::Multiaddr, types::Subnet, Enr, EnrExt, Gossipsub, PeerId}; use itertools::Itertools; @@ -13,6 +13,7 @@ use std::{ fmt::Formatter, }; use sync_status::SyncStatus; +use types::data_column_custody_group::compute_subnets_for_node; use types::{ChainSpec, DataColumnSubnetId, EthSpec}; pub mod client; @@ -695,8 +696,8 @@ impl PeerDB { if supernode { enr.insert( - PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY, - &spec.data_column_sidecar_subnet_count, + PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, + &spec.number_of_custody_groups, &enr_key, ) .expect("u64 can be encoded"); @@ -714,19 +715,14 @@ impl PeerDB { if supernode { let peer_info = self.peers.get_mut(&peer_id).expect("peer exists"); let all_subnets = (0..spec.data_column_sidecar_subnet_count) - .map(|csc| csc.into()) + .map(|subnet_id| subnet_id.into()) .collect(); peer_info.set_custody_subnets(all_subnets); } else { let peer_info = self.peers.get_mut(&peer_id).expect("peer exists"); let node_id = peer_id_to_node_id(&peer_id).expect("convert peer_id to node_id"); - let subnets = DataColumnSubnetId::compute_custody_subnets::( - node_id.raw(), - spec.custody_requirement, - spec, - ) - .expect("should compute custody subnets") - .collect(); + let subnets = compute_subnets_for_node(node_id.raw(), spec.custody_requirement, spec) + .expect("should compute custody subnets"); peer_info.set_custody_subnets(subnets); } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index d8b3568a280..2e8f462565f 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -89,7 +89,7 @@ impl PeerInfo { } /// Returns if the peer is subscribed to a given `Subnet` from the metadata attnets/syncnets field. - /// Also returns true if the peer is assigned to custody a given data column `Subnet` computed from the metadata `custody_column_count` field or ENR `csc` field. + /// Also returns true if the peer is assigned to custody a given data column `Subnet` computed from the metadata `custody_group_count` field or ENR `cgc` field. pub fn on_subnet_metadata(&self, subnet: &Subnet) -> bool { if let Some(meta_data) = &self.meta_data { match subnet { @@ -101,7 +101,9 @@ impl PeerInfo { .syncnets() .is_ok_and(|s| s.get(**id as usize).unwrap_or(false)) } - Subnet::DataColumn(column) => return self.custody_subnets.contains(column), + Subnet::DataColumn(subnet_id) => { + return self.is_assigned_to_custody_subnet(subnet_id) + } } } false diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 61b2699ac5a..8981a75aed1 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -1139,7 +1139,7 @@ mod tests { seq_number: 1, attnets: EnrAttestationBitfield::::default(), syncnets: EnrSyncCommitteeBitfield::::default(), - custody_subnet_count: 1, + custody_group_count: 1, }) } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 500188beefb..958041c53f8 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -138,7 +138,7 @@ pub struct MetaData { #[superstruct(only(V2, V3))] pub syncnets: EnrSyncCommitteeBitfield, #[superstruct(only(V3))] - pub custody_subnet_count: u64, + pub custody_group_count: u64, } impl MetaData { @@ -181,13 +181,13 @@ impl MetaData { seq_number: metadata.seq_number, attnets: metadata.attnets.clone(), syncnets: Default::default(), - custody_subnet_count: spec.custody_requirement, + custody_group_count: spec.custody_requirement, }), MetaData::V2(metadata) => MetaData::V3(MetaDataV3 { seq_number: metadata.seq_number, attnets: metadata.attnets.clone(), syncnets: metadata.syncnets.clone(), - custody_subnet_count: spec.custody_requirement, + custody_group_count: spec.custody_requirement, }), md @ MetaData::V3(_) => md.clone(), } @@ -364,7 +364,7 @@ impl DataColumnsByRangeRequest { DataColumnsByRangeRequest { start_slot: 0, count: 0, - columns: vec![0; spec.number_of_columns], + columns: vec![0; spec.number_of_columns as usize], } .as_ssz_bytes() .len() diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 999803b8fed..4738c76d0c2 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -198,15 +198,12 @@ impl Network { )?; // Construct the metadata - let custody_subnet_count = ctx.chain_spec.is_peer_das_scheduled().then(|| { - if config.subscribe_all_data_column_subnets { - ctx.chain_spec.data_column_sidecar_subnet_count - } else { - ctx.chain_spec.custody_requirement - } + let custody_group_count = ctx.chain_spec.is_peer_das_scheduled().then(|| { + ctx.chain_spec + .custody_group_count(config.subscribe_all_data_column_subnets) }); let meta_data = - utils::load_or_build_metadata(&config.network_dir, custody_subnet_count, &log); + utils::load_or_build_metadata(&config.network_dir, custody_group_count, &log); let seq_number = *meta_data.seq_number(); let globals = NetworkGlobals::new( enr, diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index a9eaa002ffa..5746c13c58b 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -164,8 +164,8 @@ pub fn strip_peer_id(addr: &mut Multiaddr) { /// Load metadata from persisted file. Return default metadata if loading fails. pub fn load_or_build_metadata( - network_dir: &std::path::Path, - custody_subnet_count: Option, + network_dir: &Path, + custody_group_count_opt: Option, log: &slog::Logger, ) -> MetaData { // We load a V2 metadata version by default (regardless of current fork) @@ -216,12 +216,12 @@ pub fn load_or_build_metadata( }; // Wrap the MetaData - let meta_data = if let Some(custody_count) = custody_subnet_count { + let meta_data = if let Some(custody_group_count) = custody_group_count_opt { MetaData::V3(MetaDataV3 { attnets: meta_data.attnets, seq_number: meta_data.seq_number, syncnets: meta_data.syncnets, - custody_subnet_count: custody_count, + custody_group_count, }) } else { MetaData::V2(meta_data) @@ -286,8 +286,8 @@ pub(crate) fn save_metadata_to_disk( ) { let _ = std::fs::create_dir_all(dir); // We always store the metadata v2 to disk because - // custody_subnet_count parameter doesn't need to be persisted across runs. - // custody_subnet_count is what the user sets it for the current run. + // custody_group_count parameter doesn't need to be persisted across runs. + // custody_group_count is what the user sets it for the current run. // This is to prevent ugly branching logic when reading the metadata from disk. let metadata_bytes = metadata.metadata_v2().as_ssz_bytes(); match File::create(dir.join(METADATA_FILENAME)).and_then(|mut f| f.write_all(&metadata_bytes)) { diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 92583b7b5d0..8cce9a0f255 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -3,10 +3,13 @@ use crate::peer_manager::peerdb::PeerDB; use crate::rpc::{MetaData, MetaDataV3}; use crate::types::{BackFillState, SyncState}; use crate::{Client, Enr, EnrExt, GossipTopic, Multiaddr, NetworkConfig, PeerId}; -use itertools::Itertools; use parking_lot::RwLock; +use slog::error; use std::collections::HashSet; use std::sync::Arc; +use types::data_column_custody_group::{ + compute_columns_for_custody_group, compute_subnets_from_custody_group, get_custody_groups, +}; use types::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec}; pub struct NetworkGlobals { @@ -27,8 +30,8 @@ pub struct NetworkGlobals { /// The current state of the backfill sync. pub backfill_state: RwLock, /// The computed sampling subnets and columns is stored to avoid re-computing. - pub sampling_subnets: Vec, - pub sampling_columns: Vec, + pub sampling_subnets: HashSet, + pub sampling_columns: HashSet, /// Network-related configuration. Immutable after initialization. pub config: Arc, /// Ethereum chain configuration. Immutable after initialization. @@ -48,30 +51,43 @@ impl NetworkGlobals { let (sampling_subnets, sampling_columns) = if spec.is_peer_das_scheduled() { let node_id = enr.node_id().raw(); - let custody_subnet_count = local_metadata - .custody_subnet_count() - .copied() - .expect("custody subnet count must be set if PeerDAS is scheduled"); + let custody_group_count = match local_metadata.custody_group_count() { + Ok(&cgc) if cgc <= spec.number_of_custody_groups => cgc, + _ => { + error!( + log, + "custody_group_count from metadata is either invalid or not set. This is a bug!"; + "info" => "falling back to default custody requirement" + ); + spec.custody_requirement + } + }; - let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); + // The below `expect` calls will panic on start up if the chain spec config values used + // are invalid + let sampling_size = spec + .sampling_size(custody_group_count) + .expect("should compute node sampling size from valid chain spec"); + let custody_groups = get_custody_groups(node_id, sampling_size, &spec) + .expect("should compute node custody groups"); - let sampling_subnets = DataColumnSubnetId::compute_custody_subnets::( - node_id, - subnet_sampling_size, - &spec, - ) - .expect("sampling subnet count must be valid") - .collect::>(); + let mut sampling_subnets = HashSet::new(); + for custody_index in &custody_groups { + let subnets = compute_subnets_from_custody_group(*custody_index, &spec) + .expect("should compute custody subnets for node"); + sampling_subnets.extend(subnets); + } - let sampling_columns = sampling_subnets - .iter() - .flat_map(|subnet| subnet.columns::(&spec)) - .sorted() - .collect(); + let mut sampling_columns = HashSet::new(); + for custody_index in &custody_groups { + let columns = compute_columns_for_custody_group(*custody_index, &spec) + .expect("should compute custody columns for node"); + sampling_columns.extend(columns); + } (sampling_subnets, sampling_columns) } else { - (vec![], vec![]) + (HashSet::new(), HashSet::new()) }; NetworkGlobals { @@ -159,8 +175,8 @@ impl NetworkGlobals { pub fn custody_peers_for_column(&self, column_index: ColumnIndex) -> Vec { self.peers .read() - .good_custody_subnet_peer(DataColumnSubnetId::from_column_index::( - column_index as usize, + .good_custody_subnet_peer(DataColumnSubnetId::from_column_index( + column_index, &self.spec, )) .cloned() @@ -178,7 +194,7 @@ impl NetworkGlobals { seq_number: 0, attnets: Default::default(), syncnets: Default::default(), - custody_subnet_count: spec.custody_requirement, + custody_group_count: spec.custody_requirement, }); Self::new_test_globals_with_metadata(trusted_peers, metadata, log, config, spec) } @@ -209,9 +225,9 @@ mod test { let mut spec = E::default_spec(); spec.eip7594_fork_epoch = Some(Epoch::new(0)); - let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; - let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); - let metadata = get_metadata(custody_subnet_count); + let custody_group_count = spec.number_of_custody_groups / 2; + let subnet_sampling_size = spec.sampling_size(custody_group_count).unwrap(); + let metadata = get_metadata(custody_group_count); let config = Arc::new(NetworkConfig::default()); let globals = NetworkGlobals::::new_test_globals_with_metadata( @@ -233,9 +249,9 @@ mod test { let mut spec = E::default_spec(); spec.eip7594_fork_epoch = Some(Epoch::new(0)); - let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; - let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); - let metadata = get_metadata(custody_subnet_count); + let custody_group_count = spec.number_of_custody_groups / 2; + let subnet_sampling_size = spec.sampling_size(custody_group_count).unwrap(); + let metadata = get_metadata(custody_group_count); let config = Arc::new(NetworkConfig::default()); let globals = NetworkGlobals::::new_test_globals_with_metadata( @@ -251,12 +267,12 @@ mod test { ); } - fn get_metadata(custody_subnet_count: u64) -> MetaData { + fn get_metadata(custody_group_count: u64) -> MetaData { MetaData::V3(MetaDataV3 { seq_number: 0, attnets: Default::default(), syncnets: Default::default(), - custody_subnet_count, + custody_group_count, }) } } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index d81d964e7cf..2d15d39c6fc 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1122,10 +1122,8 @@ impl NetworkBeaconProcessor { messages: columns .into_iter() .map(|d| { - let subnet = DataColumnSubnetId::from_column_index::( - d.index as usize, - &chain.spec, - ); + let subnet = + DataColumnSubnetId::from_column_index(d.index, &chain.spec); PubsubMessage::DataColumnSidecar(Box::new((subnet, d))) }) .collect(), @@ -1139,7 +1137,8 @@ impl NetworkBeaconProcessor { let blob_publication_batch_interval = chain.config.blob_publication_batch_interval; let blob_publication_batches = chain.config.blob_publication_batches; - let batch_size = chain.spec.number_of_columns / blob_publication_batches; + let number_of_columns = chain.spec.number_of_columns as usize; + let batch_size = number_of_columns / blob_publication_batches; let mut publish_count = 0usize; for batch in data_columns_to_publish.chunks(batch_size) { diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index e1b2b974ec4..0a6bc8961f8 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -36,7 +36,7 @@ use requests::{ }; use slog::{debug, error, warn}; use std::collections::hash_map::Entry; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; @@ -458,7 +458,7 @@ impl SyncNetworkContext { let max_blobs_len = self.chain.spec.max_blobs_per_block(epoch); let info = RangeBlockComponentsRequest::new( expected_blobs, - expects_columns, + expects_columns.map(|c| c.into_iter().collect()), num_of_column_req, requested_peers, max_blobs_len as usize, @@ -471,7 +471,7 @@ impl SyncNetworkContext { fn make_columns_by_range_requests( &self, request: BlocksByRangeRequest, - custody_indexes: &Vec, + custody_indexes: &HashSet, ) -> Result, RpcRequestSendError> { let mut peer_id_to_request_map = HashMap::new(); diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index b9e38237c58..f623aa2c12e 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -2170,7 +2170,7 @@ fn custody_lookup_happy_path() { let id = r.expect_block_lookup_request(block.canonical_root()); r.complete_valid_block_request(id, block.into(), true); // for each slot we download `samples_per_slot` columns - let sample_column_count = spec.samples_per_slot * spec.data_columns_per_subnet() as u64; + let sample_column_count = spec.samples_per_slot * spec.data_columns_per_group(); let custody_ids = r.expect_only_data_columns_by_root_requests(block_root, sample_column_count as usize); r.complete_valid_custody_request(custody_ids, data_columns, false); diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index a303bea2681..35ba3af28b6 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -139,7 +139,8 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 MAX_BLOBS_PER_BLOCK: 6 # DAS -CUSTODY_REQUIREMENT: 4 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 +NUMBER_OF_CUSTODY_GROUPS: 128 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 68d2b0eafe2..9ff5a161980 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -122,7 +122,8 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 MAX_BLOBS_PER_BLOCK: 6 # DAS -CUSTODY_REQUIREMENT: 4 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 +NUMBER_OF_CUSTODY_GROUPS: 128 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 930ce0a1bcb..d0b61422e06 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -128,7 +128,8 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 MAX_BLOBS_PER_BLOCK: 6 # DAS -CUSTODY_REQUIREMENT: 4 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 +NUMBER_OF_CUSTODY_GROUPS: 128 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 638f6fe42f8..f92de4225dc 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -145,7 +145,8 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 MAX_BLOBS_PER_BLOCK: 6 # DAS -CUSTODY_REQUIREMENT: 4 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 +NUMBER_OF_CUSTODY_GROUPS: 128 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 38185188976..7564d8f0f68 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -123,7 +123,8 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 MAX_BLOBS_PER_BLOCK: 6 # DAS -CUSTODY_REQUIREMENT: 4 -DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 +NUMBER_OF_CUSTODY_GROUPS: 128 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 6594f3c44e9..9177f66b944 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -204,10 +204,11 @@ pub struct ChainSpec { * DAS params */ pub eip7594_fork_epoch: Option, - pub custody_requirement: u64, + pub number_of_columns: u64, + pub number_of_custody_groups: u64, pub data_column_sidecar_subnet_count: u64, - pub number_of_columns: usize, pub samples_per_slot: u64, + pub custody_requirement: u64, /* * Networking @@ -237,7 +238,7 @@ pub struct ChainSpec { pub max_request_data_column_sidecars: u64, pub min_epochs_for_blob_sidecars_requests: u64, pub blob_sidecar_subnet_count: u64, - max_blobs_per_block: u64, + pub max_blobs_per_block: u64, /* * Networking Electra @@ -646,10 +647,33 @@ impl ChainSpec { } } - pub fn data_columns_per_subnet(&self) -> usize { + /// Returns the number of data columns per custody group. + pub fn data_columns_per_group(&self) -> u64 { self.number_of_columns - .safe_div(self.data_column_sidecar_subnet_count as usize) - .expect("Subnet count must be greater than 0") + .safe_div(self.number_of_custody_groups) + .expect("Custody group count must be greater than 0") + } + + /// Returns the number of column sidecars to sample per slot. + pub fn sampling_size(&self, custody_group_count: u64) -> Result { + let columns_per_custody_group = self + .number_of_columns + .safe_div(self.number_of_custody_groups) + .map_err(|_| "number_of_custody_groups must be greater than 0")?; + + let custody_column_count = columns_per_custody_group + .safe_mul(custody_group_count) + .map_err(|_| "Computing sampling size should not overflow")?; + + Ok(std::cmp::max(custody_column_count, self.samples_per_slot)) + } + + pub fn custody_group_count(&self, is_supernode: bool) -> u64 { + if is_supernode { + self.number_of_custody_groups + } else { + self.custody_requirement + } } /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. @@ -856,10 +880,11 @@ impl ChainSpec { * DAS params */ eip7594_fork_epoch: None, - custody_requirement: 4, - data_column_sidecar_subnet_count: 128, number_of_columns: 128, + number_of_custody_groups: 128, + data_column_sidecar_subnet_count: 128, samples_per_slot: 8, + custody_requirement: 4, /* * Network specific @@ -1193,10 +1218,12 @@ impl ChainSpec { * DAS params */ eip7594_fork_epoch: None, - custody_requirement: 4, - data_column_sidecar_subnet_count: 128, number_of_columns: 128, + number_of_custody_groups: 128, + data_column_sidecar_subnet_count: 128, samples_per_slot: 8, + custody_requirement: 4, + /* * Network specific */ @@ -1454,18 +1481,21 @@ pub struct Config { #[serde(with = "serde_utils::quoted_u64")] max_request_blob_sidecars_electra: u64, - #[serde(default = "default_custody_requirement")] + #[serde(default = "default_number_of_columns")] #[serde(with = "serde_utils::quoted_u64")] - custody_requirement: u64, + number_of_columns: u64, + #[serde(default = "default_number_of_custody_groups")] + #[serde(with = "serde_utils::quoted_u64")] + number_of_custody_groups: u64, #[serde(default = "default_data_column_sidecar_subnet_count")] #[serde(with = "serde_utils::quoted_u64")] data_column_sidecar_subnet_count: u64, - #[serde(default = "default_number_of_columns")] - #[serde(with = "serde_utils::quoted_u64")] - number_of_columns: u64, #[serde(default = "default_samples_per_slot")] #[serde(with = "serde_utils::quoted_u64")] samples_per_slot: u64, + #[serde(default = "default_custody_requirement")] + #[serde(with = "serde_utils::quoted_u64")] + custody_requirement: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1627,6 +1657,10 @@ const fn default_number_of_columns() -> u64 { 128 } +const fn default_number_of_custody_groups() -> u64 { + 128 +} + const fn default_samples_per_slot() -> u64 { 8 } @@ -1830,10 +1864,11 @@ impl Config { blob_sidecar_subnet_count_electra: spec.blob_sidecar_subnet_count_electra, max_request_blob_sidecars_electra: spec.max_request_blob_sidecars_electra, - custody_requirement: spec.custody_requirement, + number_of_columns: spec.number_of_columns, + number_of_custody_groups: spec.number_of_custody_groups, data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, - number_of_columns: spec.number_of_columns as u64, samples_per_slot: spec.samples_per_slot, + custody_requirement: spec.custody_requirement, } } @@ -1909,10 +1944,11 @@ impl Config { max_blobs_per_block_electra, blob_sidecar_subnet_count_electra, max_request_blob_sidecars_electra, - custody_requirement, - data_column_sidecar_subnet_count, number_of_columns, + number_of_custody_groups, + data_column_sidecar_subnet_count, samples_per_slot, + custody_requirement, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -1992,10 +2028,11 @@ impl Config { max_request_data_column_sidecars, ), - custody_requirement, + number_of_columns, + number_of_custody_groups, data_column_sidecar_subnet_count, - number_of_columns: number_of_columns as usize, samples_per_slot, + custody_requirement, ..chain_spec.clone() }) diff --git a/consensus/types/src/data_column_custody_group.rs b/consensus/types/src/data_column_custody_group.rs new file mode 100644 index 00000000000..bb204c34a2d --- /dev/null +++ b/consensus/types/src/data_column_custody_group.rs @@ -0,0 +1,142 @@ +use crate::{ChainSpec, ColumnIndex, DataColumnSubnetId}; +use alloy_primitives::U256; +use itertools::Itertools; +use maplit::hashset; +use safe_arith::{ArithError, SafeArith}; +use std::collections::HashSet; + +pub type CustodyIndex = u64; + +#[derive(Debug)] +pub enum DataColumnCustodyGroupError { + InvalidCustodyGroup(CustodyIndex), + InvalidCustodyGroupCount(u64), + ArithError(ArithError), +} + +/// The `get_custody_groups` function is used to determine the custody groups that a node is +/// assigned to. +/// +/// spec: https://github.com/ethereum/consensus-specs/blob/8e0d0d48e81d6c7c5a8253ab61340f5ea5bac66a/specs/fulu/das-core.md#get_custody_groups +pub fn get_custody_groups( + raw_node_id: [u8; 32], + custody_group_count: u64, + spec: &ChainSpec, +) -> Result, DataColumnCustodyGroupError> { + if custody_group_count > spec.number_of_custody_groups { + return Err(DataColumnCustodyGroupError::InvalidCustodyGroupCount( + custody_group_count, + )); + } + + let mut custody_groups: HashSet = hashset![]; + let mut current_id = U256::from_be_slice(&raw_node_id); + while custody_groups.len() < custody_group_count as usize { + let mut node_id_bytes = [0u8; 32]; + node_id_bytes.copy_from_slice(current_id.as_le_slice()); + let hash = ethereum_hashing::hash_fixed(&node_id_bytes); + let hash_prefix: [u8; 8] = hash[0..8] + .try_into() + .expect("hash_fixed produces a 32 byte array"); + let hash_prefix_u64 = u64::from_le_bytes(hash_prefix); + let custody_group = hash_prefix_u64 + .safe_rem(spec.number_of_custody_groups) + .expect("spec.number_of_custody_groups must not be zero"); + custody_groups.insert(custody_group); + + current_id = current_id.wrapping_add(U256::from(1u64)); + } + + Ok(custody_groups) +} + +/// Returns the columns that are associated with a given custody group. +/// +/// spec: https://github.com/ethereum/consensus-specs/blob/8e0d0d48e81d6c7c5a8253ab61340f5ea5bac66a/specs/fulu/das-core.md#compute_columns_for_custody_group +pub fn compute_columns_for_custody_group( + custody_group: CustodyIndex, + spec: &ChainSpec, +) -> Result, DataColumnCustodyGroupError> { + let number_of_custody_groups = spec.number_of_custody_groups; + if custody_group >= number_of_custody_groups { + return Err(DataColumnCustodyGroupError::InvalidCustodyGroup( + custody_group, + )); + } + + let mut columns = Vec::new(); + for i in 0..spec.data_columns_per_group() { + let column = number_of_custody_groups + .safe_mul(i) + .and_then(|v| v.safe_add(custody_group)) + .map_err(DataColumnCustodyGroupError::ArithError)?; + columns.push(column); + } + + Ok(columns.into_iter()) +} + +pub fn compute_subnets_for_node( + raw_node_id: [u8; 32], + custody_group_count: u64, + spec: &ChainSpec, +) -> Result, DataColumnCustodyGroupError> { + let custody_groups = get_custody_groups(raw_node_id, custody_group_count, spec)?; + let mut subnets = HashSet::new(); + + for custody_group in custody_groups { + let custody_group_subnets = compute_subnets_from_custody_group(custody_group, spec)?; + subnets.extend(custody_group_subnets); + } + + Ok(subnets) +} + +/// Returns the subnets that are associated with a given custody group. +pub fn compute_subnets_from_custody_group( + custody_group: CustodyIndex, + spec: &ChainSpec, +) -> Result + '_, DataColumnCustodyGroupError> { + let result = compute_columns_for_custody_group(custody_group, spec)? + .map(|column_index| DataColumnSubnetId::from_column_index(column_index, spec)) + .unique(); + Ok(result) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_compute_columns_for_custody_group() { + let mut spec = ChainSpec::mainnet(); + spec.number_of_custody_groups = 64; + spec.number_of_columns = 128; + let columns_per_custody_group = spec.number_of_columns / spec.number_of_custody_groups; + + for custody_group in 0..spec.number_of_custody_groups { + let columns = compute_columns_for_custody_group(custody_group, &spec) + .unwrap() + .collect::>(); + assert_eq!(columns.len(), columns_per_custody_group as usize); + } + } + + #[test] + fn test_compute_subnets_from_custody_group() { + let mut spec = ChainSpec::mainnet(); + spec.number_of_custody_groups = 64; + spec.number_of_columns = 256; + spec.data_column_sidecar_subnet_count = 128; + + let subnets_per_custody_group = + spec.data_column_sidecar_subnet_count / spec.number_of_custody_groups; + + for custody_group in 0..spec.number_of_custody_groups { + let subnets = compute_subnets_from_custody_group(custody_group, &spec) + .unwrap() + .collect::>(); + assert_eq!(subnets.len(), subnets_per_custody_group as usize); + } + } +} diff --git a/consensus/types/src/data_column_subnet_id.rs b/consensus/types/src/data_column_subnet_id.rs index df61d711c19..5b3eef24ccc 100644 --- a/consensus/types/src/data_column_subnet_id.rs +++ b/consensus/types/src/data_column_subnet_id.rs @@ -1,11 +1,8 @@ //! Identifies each data column subnet by an integer identifier. use crate::data_column_sidecar::ColumnIndex; -use crate::{ChainSpec, EthSpec}; -use alloy_primitives::U256; -use itertools::Itertools; +use crate::ChainSpec; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; -use std::collections::HashSet; use std::fmt::{self, Display}; use std::ops::{Deref, DerefMut}; @@ -18,76 +15,14 @@ impl DataColumnSubnetId { id.into() } - pub fn from_column_index(column_index: usize, spec: &ChainSpec) -> Self { - (column_index - .safe_rem(spec.data_column_sidecar_subnet_count as usize) + pub fn from_column_index(column_index: ColumnIndex, spec: &ChainSpec) -> Self { + column_index + .safe_rem(spec.data_column_sidecar_subnet_count) .expect( "data_column_sidecar_subnet_count should never be zero if this function is called", - ) as u64) + ) .into() } - - #[allow(clippy::arithmetic_side_effects)] - pub fn columns(&self, spec: &ChainSpec) -> impl Iterator { - let subnet = self.0; - let data_column_sidecar_subnet = spec.data_column_sidecar_subnet_count; - let columns_per_subnet = spec.data_columns_per_subnet() as u64; - (0..columns_per_subnet).map(move |i| data_column_sidecar_subnet * i + subnet) - } - - /// Compute required subnets to subscribe to given the node id. - #[allow(clippy::arithmetic_side_effects)] - pub fn compute_custody_subnets( - raw_node_id: [u8; 32], - custody_subnet_count: u64, - spec: &ChainSpec, - ) -> Result, Error> { - if custody_subnet_count > spec.data_column_sidecar_subnet_count { - return Err(Error::InvalidCustodySubnetCount(custody_subnet_count)); - } - - let mut subnets: HashSet = HashSet::new(); - let mut current_id = U256::from_be_slice(&raw_node_id); - while (subnets.len() as u64) < custody_subnet_count { - let mut node_id_bytes = [0u8; 32]; - node_id_bytes.copy_from_slice(current_id.as_le_slice()); - let hash = ethereum_hashing::hash_fixed(&node_id_bytes); - let hash_prefix: [u8; 8] = hash[0..8] - .try_into() - .expect("hash_fixed produces a 32 byte array"); - let hash_prefix_u64 = u64::from_le_bytes(hash_prefix); - let subnet = hash_prefix_u64 % spec.data_column_sidecar_subnet_count; - - if !subnets.contains(&subnet) { - subnets.insert(subnet); - } - - if current_id == U256::MAX { - current_id = U256::ZERO - } - current_id += U256::from(1u64) - } - Ok(subnets.into_iter().map(DataColumnSubnetId::new)) - } - - /// Compute the custody subnets for a given node id with the default `custody_requirement`. - /// This operation should be infallable, and empty iterator is returned if it fails unexpectedly. - pub fn compute_custody_requirement_subnets( - node_id: [u8; 32], - spec: &ChainSpec, - ) -> impl Iterator { - Self::compute_custody_subnets::(node_id, spec.custody_requirement, spec) - .expect("should compute default custody subnets") - } - - pub fn compute_custody_columns( - raw_node_id: [u8; 32], - custody_subnet_count: u64, - spec: &ChainSpec, - ) -> Result, Error> { - Self::compute_custody_subnets::(raw_node_id, custody_subnet_count, spec) - .map(|subnet| subnet.flat_map(|subnet| subnet.columns::(spec)).sorted()) - } } impl Display for DataColumnSubnetId { @@ -139,88 +74,3 @@ impl From for Error { Error::ArithError(e) } } - -#[cfg(test)] -mod test { - use crate::data_column_subnet_id::DataColumnSubnetId; - use crate::MainnetEthSpec; - use crate::Uint256; - use crate::{EthSpec, GnosisEthSpec, MinimalEthSpec}; - - type E = MainnetEthSpec; - - #[test] - fn test_compute_subnets_for_data_column() { - let spec = E::default_spec(); - let node_ids = [ - "0", - "88752428858350697756262172400162263450541348766581994718383409852729519486397", - "18732750322395381632951253735273868184515463718109267674920115648614659369468", - "27726842142488109545414954493849224833670205008410190955613662332153332462900", - "39755236029158558527862903296867805548949739810920318269566095185775868999998", - "31899136003441886988955119620035330314647133604576220223892254902004850516297", - "58579998103852084482416614330746509727562027284701078483890722833654510444626", - "28248042035542126088870192155378394518950310811868093527036637864276176517397", - "60930578857433095740782970114409273483106482059893286066493409689627770333527", - "103822458477361691467064888613019442068586830412598673713899771287914656699997", - ] - .into_iter() - .map(|v| Uint256::from_str_radix(v, 10).unwrap().to_be_bytes::<32>()) - .collect::>(); - - let custody_requirement = 4; - for node_id in node_ids { - let computed_subnets = DataColumnSubnetId::compute_custody_subnets::( - node_id, - custody_requirement, - &spec, - ) - .unwrap(); - let computed_subnets: Vec<_> = computed_subnets.collect(); - - // the number of subnets is equal to the custody requirement - assert_eq!(computed_subnets.len() as u64, custody_requirement); - - let subnet_count = spec.data_column_sidecar_subnet_count; - for subnet in computed_subnets { - let columns: Vec<_> = subnet.columns::(&spec).collect(); - // the number of columns is equal to the specified number of columns per subnet - assert_eq!(columns.len(), spec.data_columns_per_subnet()); - - for pair in columns.windows(2) { - // each successive column index is offset by the number of subnets - assert_eq!(pair[1] - pair[0], subnet_count); - } - } - } - } - - #[test] - fn test_compute_custody_requirement_subnets_never_panics() { - let node_id = [1u8; 32]; - test_compute_custody_requirement_subnets_with_spec::(node_id); - test_compute_custody_requirement_subnets_with_spec::(node_id); - test_compute_custody_requirement_subnets_with_spec::(node_id); - } - - fn test_compute_custody_requirement_subnets_with_spec(node_id: [u8; 32]) { - let _ = DataColumnSubnetId::compute_custody_requirement_subnets::( - node_id, - &E::default_spec(), - ); - } - - #[test] - fn test_columns_subnet_conversion() { - let spec = E::default_spec(); - for subnet in 0..spec.data_column_sidecar_subnet_count { - let subnet_id = DataColumnSubnetId::new(subnet); - for column_index in subnet_id.columns::(&spec) { - assert_eq!( - subnet_id, - DataColumnSubnetId::from_column_index::(column_index as usize, &spec) - ); - } - } - } -} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 76e414b2f18..dcfa918146a 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -104,6 +104,7 @@ pub mod slot_data; pub mod sqlite; pub mod blob_sidecar; +pub mod data_column_custody_group; pub mod data_column_sidecar; pub mod data_column_subnet_id; pub mod light_client_header; diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 8f5571d64a6..54a142a96b6 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -13,12 +13,13 @@ mod bls_fast_aggregate_verify; mod bls_sign_msg; mod bls_verify_msg; mod common; +mod compute_columns_for_custody_groups; mod epoch_processing; mod fork; mod fork_choice; mod genesis_initialization; mod genesis_validity; -mod get_custody_columns; +mod get_custody_groups; mod kzg_blob_to_kzg_commitment; mod kzg_compute_blob_kzg_proof; mod kzg_compute_cells_and_kzg_proofs; @@ -49,11 +50,12 @@ pub use bls_fast_aggregate_verify::*; pub use bls_sign_msg::*; pub use bls_verify_msg::*; pub use common::SszStaticType; +pub use compute_columns_for_custody_groups::*; pub use epoch_processing::*; pub use fork::ForkTest; pub use genesis_initialization::*; pub use genesis_validity::*; -pub use get_custody_columns::*; +pub use get_custody_groups::*; pub use kzg_blob_to_kzg_commitment::*; pub use kzg_compute_blob_kzg_proof::*; pub use kzg_compute_cells_and_kzg_proofs::*; @@ -89,18 +91,18 @@ pub use transition::TransitionTest; /// to return `true` for the feature in order for the feature test vector to be tested. #[derive(Debug, PartialEq, Clone, Copy)] pub enum FeatureName { - Eip7594, + Fulu, } impl FeatureName { pub fn list_all() -> Vec { - vec![FeatureName::Eip7594] + vec![FeatureName::Fulu] } /// `ForkName` to use when running the feature tests. pub fn fork_name(&self) -> ForkName { match self { - FeatureName::Eip7594 => ForkName::Deneb, + FeatureName::Fulu => ForkName::Electra, } } } @@ -108,7 +110,7 @@ impl FeatureName { impl Display for FeatureName { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - FeatureName::Eip7594 => f.write_str("eip7594"), + FeatureName::Fulu => f.write_str("fulu"), } } } diff --git a/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs b/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs new file mode 100644 index 00000000000..1d0bf951bc6 --- /dev/null +++ b/testing/ef_tests/src/cases/compute_columns_for_custody_groups.rs @@ -0,0 +1,47 @@ +use super::*; +use serde::Deserialize; +use std::marker::PhantomData; +use types::data_column_custody_group::{compute_columns_for_custody_group, CustodyIndex}; + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +pub struct ComputeColumnsForCustodyGroups { + /// The custody group index. + pub custody_group: CustodyIndex, + /// The list of resulting custody columns. + pub result: Vec, + #[serde(skip)] + _phantom: PhantomData, +} + +impl LoadCase for ComputeColumnsForCustodyGroups { + fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { + decode::yaml_decode_file(path.join("meta.yaml").as_path()) + } +} + +impl Case for ComputeColumnsForCustodyGroups { + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + false + } + + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu + } + + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let spec = E::default_spec(); + let computed_columns = compute_columns_for_custody_group(self.custody_group, &spec) + .expect("should compute custody columns from group") + .collect::>(); + + let expected = &self.result; + if computed_columns == *expected { + Ok(()) + } else { + Err(Error::NotEqual(format!( + "Got {computed_columns:?}\nExpected {expected:?}" + ))) + } + } +} diff --git a/testing/ef_tests/src/cases/get_custody_columns.rs b/testing/ef_tests/src/cases/get_custody_groups.rs similarity index 65% rename from testing/ef_tests/src/cases/get_custody_columns.rs rename to testing/ef_tests/src/cases/get_custody_groups.rs index 71b17aeaa3f..f8c4370aeb7 100644 --- a/testing/ef_tests/src/cases/get_custody_columns.rs +++ b/testing/ef_tests/src/cases/get_custody_groups.rs @@ -2,31 +2,34 @@ use super::*; use alloy_primitives::U256; use serde::Deserialize; use std::marker::PhantomData; -use types::DataColumnSubnetId; +use types::data_column_custody_group::get_custody_groups; #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec", deny_unknown_fields)] -pub struct GetCustodyColumns { +pub struct GetCustodyGroups { + /// The NodeID input. pub node_id: String, - pub custody_subnet_count: u64, + /// The count of custody groups. + pub custody_group_count: u64, + /// The list of resulting custody groups. pub result: Vec, #[serde(skip)] _phantom: PhantomData, } -impl LoadCase for GetCustodyColumns { +impl LoadCase for GetCustodyGroups { fn load_from_dir(path: &Path, _fork_name: ForkName) -> Result { decode::yaml_decode_file(path.join("meta.yaml").as_path()) } } -impl Case for GetCustodyColumns { +impl Case for GetCustodyGroups { fn is_enabled_for_fork(_fork_name: ForkName) -> bool { false } fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Eip7594 + feature_name == FeatureName::Fulu } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { @@ -34,13 +37,10 @@ impl Case for GetCustodyColumns { let node_id = U256::from_str_radix(&self.node_id, 10) .map_err(|e| Error::FailedToParseTest(format!("{e:?}")))?; let raw_node_id = node_id.to_be_bytes::<32>(); - let computed = DataColumnSubnetId::compute_custody_columns::( - raw_node_id, - self.custody_subnet_count, - &spec, - ) - .expect("should compute custody columns") - .collect::>(); + let mut computed = get_custody_groups(raw_node_id, self.custody_group_count, &spec) + .map(|set| set.into_iter().collect::>()) + .expect("should compute custody groups"); + computed.sort(); let expected = &self.result; if computed == *expected { diff --git a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs index a7219f0629a..8df43bb2671 100644 --- a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs +++ b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs @@ -31,7 +31,7 @@ impl Case for KZGComputeCellsAndKZGProofs { } fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Eip7594 + feature_name == FeatureName::Fulu } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs index b72b3a05cd6..26ab4e96b59 100644 --- a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs +++ b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs @@ -32,7 +32,7 @@ impl Case for KZGRecoverCellsAndKZGProofs { } fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Eip7594 + feature_name == FeatureName::Fulu } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs index 815ad7a5bcf..fc625063b11 100644 --- a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs @@ -34,7 +34,7 @@ impl Case for KZGVerifyCellKZGProofBatch { } fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name == FeatureName::Eip7594 + feature_name == FeatureName::Fulu } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 2e49b1301d4..6c0165efab1 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -39,6 +39,10 @@ pub trait Handler { } } + // Run feature tests for future forks that are not yet added to `ForkName`. + // This runs tests in the directory named by the feature instead of the fork name. + // e.g. consensus-spec-tests/tests/general/[feature_name]/[runner_name] + // e.g. consensus-spec-tests/tests/general/peerdas/ssz_static for feature_name in FeatureName::list_all() { if self.is_enabled_for_feature(feature_name) { self.run_for_feature(feature_name); @@ -350,23 +354,20 @@ where self.supported_forks.contains(&fork_name) } - fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { - // This ensures we only run the tests **once** for `Eip7594`, using the types matching the - // correct fork, e.g. `Eip7594` uses SSZ types from `Deneb` as of spec test version - // `v1.5.0-alpha.8`, therefore the `Eip7594` tests should get included when testing Deneb types. + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + // This ensures we only run the tests **once** for the feature, using the types matching the + // correct fork, e.g. `Fulu` uses SSZ types from `Electra` fork as of spec test version + // `v1.5.0-beta.0`, therefore the `Fulu` tests should get included when testing Electra types. // - // e.g. Eip7594 test vectors are executed in the first line below, but excluded in the 2nd + // e.g. Fulu test vectors are executed in the first line below, but excluded in the 2nd // line when testing the type `AttestationElectra`: // // ``` // SszStaticHandler::, MainnetEthSpec>::pre_electra().run(); // SszStaticHandler::, MainnetEthSpec>::electra_only().run(); // ``` - /* TODO(das): re-enable - feature_name == FeatureName::Eip7594 + feature_name == FeatureName::Fulu && self.supported_forks.contains(&feature_name.fork_name()) - */ - false } } @@ -388,10 +389,8 @@ where BeaconState::::name().into() } - fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { - // TODO(das): re-enable - // feature_name == FeatureName::Eip7594 - false + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu } } @@ -415,10 +414,8 @@ where T::name().into() } - fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { - // TODO(das): re-enable - // feature_name == FeatureName::Eip7594 - false + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu } } @@ -877,10 +874,30 @@ impl Handler for KZGVerifyKZGProofHandler { #[derive(Derivative)] #[derivative(Default(bound = ""))] -pub struct GetCustodyColumnsHandler(PhantomData); +pub struct GetCustodyGroupsHandler(PhantomData); + +impl Handler for GetCustodyGroupsHandler { + type Case = cases::GetCustodyGroups; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "networking" + } + + fn handler_name(&self) -> String { + "get_custody_groups".into() + } +} + +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct ComputeColumnsForCustodyGroupHandler(PhantomData); -impl Handler for GetCustodyColumnsHandler { - type Case = cases::GetCustodyColumns; +impl Handler for ComputeColumnsForCustodyGroupHandler { + type Case = cases::ComputeColumnsForCustodyGroups; fn config_name() -> &'static str { E::name() @@ -891,7 +908,7 @@ impl Handler for GetCustodyColumnsHandler { } fn handler_name(&self) -> String { - "get_custody_columns".into() + "compute_columns_for_custody_group".into() } } @@ -1002,10 +1019,8 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { - // TODO(das): re-enable this - // feature_name == FeatureName::Eip7594 - false + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Fulu } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 7c268123fae..61581128d4f 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -237,7 +237,9 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { - use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; + use ef_tests::{ + FeatureName, Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler, + }; use types::historical_summary::HistoricalSummary; use types::{ AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DepositRequest, @@ -622,23 +624,21 @@ mod ssz_static { SszStaticHandler::::capella_and_later().run(); } - /* FIXME(das): re-enable #[test] fn data_column_sidecar() { SszStaticHandler::, MinimalEthSpec>::deneb_only() - .run_for_feature(FeatureName::Eip7594); + .run_for_feature(FeatureName::Fulu); SszStaticHandler::, MainnetEthSpec>::deneb_only() - .run_for_feature(FeatureName::Eip7594); + .run_for_feature(FeatureName::Fulu); } #[test] fn data_column_identifier() { SszStaticHandler::::deneb_only() - .run_for_feature(FeatureName::Eip7594); + .run_for_feature(FeatureName::Fulu); SszStaticHandler::::deneb_only() - .run_for_feature(FeatureName::Eip7594); + .run_for_feature(FeatureName::Fulu); } - */ #[test] fn consolidation() { @@ -899,25 +899,23 @@ fn kzg_verify_kzg_proof() { KZGVerifyKZGProofHandler::::default().run(); } -/* FIXME(das): re-enable these tests #[test] fn kzg_compute_cells_and_proofs() { KZGComputeCellsAndKZGProofHandler::::default() - .run_for_feature(FeatureName::Eip7594); + .run_for_feature(FeatureName::Fulu); } #[test] fn kzg_verify_cell_proof_batch() { KZGVerifyCellKZGProofBatchHandler::::default() - .run_for_feature(FeatureName::Eip7594); + .run_for_feature(FeatureName::Fulu); } #[test] fn kzg_recover_cells_and_proofs() { KZGRecoverCellsAndKZGProofHandler::::default() - .run_for_feature(FeatureName::Eip7594); + .run_for_feature(FeatureName::Fulu); } -*/ #[test] fn beacon_state_merkle_proof_validity() { @@ -949,10 +947,16 @@ fn rewards() { } } -/* FIXME(das): re-enable these tests #[test] -fn get_custody_columns() { - GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); - GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); +fn get_custody_groups() { + GetCustodyGroupsHandler::::default().run_for_feature(FeatureName::Fulu); + GetCustodyGroupsHandler::::default().run_for_feature(FeatureName::Fulu); +} + +#[test] +fn compute_columns_for_custody_group() { + ComputeColumnsForCustodyGroupHandler::::default() + .run_for_feature(FeatureName::Fulu); + ComputeColumnsForCustodyGroupHandler::::default() + .run_for_feature(FeatureName::Fulu); } -*/ From b1a19a8b20b26f2efc527238318149dd9d18dab6 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 15 Jan 2025 20:43:00 +0800 Subject: [PATCH 14/26] Remove ineffectual block RPC limits post merge (#6798) * Remove ineffectual block RPC limits post merge * Remove more things --- .../lighthouse_network/src/rpc/protocol.rs | 89 +------- consensus/types/src/beacon_block.rs | 194 +----------------- consensus/types/src/execution_payload.rs | 52 ----- 3 files changed, 9 insertions(+), 326 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 780dff937d3..80f15c94459 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -17,12 +17,11 @@ use tokio_util::{ compat::{Compat, FuturesAsyncReadCompatExt}, }; use types::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockElectra, - BeaconBlockFulu, BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, EthSpecId, - ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, - LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, - LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, - Signature, SignedBeaconBlock, + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BlobSidecar, ChainSpec, DataColumnSidecar, + EmptyBlock, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, + LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, + LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, + MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -55,74 +54,16 @@ pub static SIGNED_BEACON_BLOCK_ALTAIR_MAX: LazyLock = LazyLock::new(|| { .len() }); -pub static SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD: LazyLock = LazyLock::new(|| { - SignedBeaconBlock::::from_block( - BeaconBlock::Capella(BeaconBlockCapella::full(&MainnetEthSpec::default_spec())), - Signature::empty(), - ) - .as_ssz_bytes() - .len() -}); - -pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD: LazyLock = LazyLock::new(|| { - SignedBeaconBlock::::from_block( - BeaconBlock::Electra(BeaconBlockElectra::full(&MainnetEthSpec::default_spec())), - Signature::empty(), - ) - .as_ssz_bytes() - .len() -}); - -pub static SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD: LazyLock = LazyLock::new(|| { - SignedBeaconBlock::::from_block( - BeaconBlock::Fulu(BeaconBlockFulu::full(&MainnetEthSpec::default_spec())), - Signature::empty(), - ) - .as_ssz_bytes() - .len() -}); - /// The `BeaconBlockBellatrix` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network /// with `max_chunk_size`. -/// -/// FIXME: Given that these limits are useless we should probably delete them. See: -/// -/// https://github.com/sigp/lighthouse/issues/6790 pub static SIGNED_BEACON_BLOCK_BELLATRIX_MAX: LazyLock = LazyLock::new(|| // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX + types::ExecutionPayload::::max_execution_payload_bellatrix_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET); // Adding the additional ssz offset for the `ExecutionPayload` field -pub static SIGNED_BEACON_BLOCK_CAPELLA_MAX: LazyLock = LazyLock::new(|| { - *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD - + types::ExecutionPayload::::max_execution_payload_capella_size() // adding max size of execution payload (~16gb) - + ssz::BYTES_PER_LENGTH_OFFSET -}); // Adding the additional ssz offset for the `ExecutionPayload` field - -pub static SIGNED_BEACON_BLOCK_DENEB_MAX: LazyLock = LazyLock::new(|| { - *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD - + types::ExecutionPayload::::max_execution_payload_deneb_size() // adding max size of execution payload (~16gb) - + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` - + ssz::BYTES_PER_LENGTH_OFFSET -}); // Length offset for the blob commitments field. - // -pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX: LazyLock = LazyLock::new(|| { - *SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD - + types::ExecutionPayload::::max_execution_payload_electra_size() // adding max size of execution payload (~16gb) - + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional ssz offset for the `ExecutionPayload` field - + ssz::BYTES_PER_LENGTH_OFFSET -}); // Length offset for the blob commitments field. - -pub static SIGNED_BEACON_BLOCK_FULU_MAX: LazyLock = LazyLock::new(|| { - *SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD - + types::ExecutionPayload::::max_execution_payload_fulu_size() - + ssz::BYTES_PER_LENGTH_OFFSET - + ssz::BYTES_PER_LENGTH_OFFSET -}); - pub static BLOB_SIDECAR_SIZE: LazyLock = LazyLock::new(BlobSidecar::::max_size); @@ -203,26 +144,12 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair blocks *SIGNED_BEACON_BLOCK_ALTAIR_MAX, // Altair block is larger than base blocks ), - ForkName::Bellatrix => RpcLimits::new( + // After the merge the max SSZ size of a block is absurdly big. The size is actually + // bound by other constants, so here we default to the bellatrix's max value + _ => RpcLimits::new( *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks *SIGNED_BEACON_BLOCK_BELLATRIX_MAX, // Bellatrix block is larger than base and altair blocks ), - ForkName::Capella => RpcLimits::new( - *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks - *SIGNED_BEACON_BLOCK_CAPELLA_MAX, // Capella block is larger than base, altair and merge blocks - ), - ForkName::Deneb => RpcLimits::new( - *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks - *SIGNED_BEACON_BLOCK_DENEB_MAX, // Deneb block is larger than all prior fork blocks - ), - ForkName::Electra => RpcLimits::new( - *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks - *SIGNED_BEACON_BLOCK_ELECTRA_MAX, // Electra block is larger than Deneb block - ), - ForkName::Fulu => RpcLimits::new( - *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than all other blocks - *SIGNED_BEACON_BLOCK_FULU_MAX, // Fulu block is largest - ), } } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index d72550aa121..6ea897cf1a7 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -12,7 +12,7 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use self::indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectra}; +use self::indexed_attestation::IndexedAttestationBase; /// A block of the `BeaconChain`. #[superstruct( @@ -499,52 +499,6 @@ impl> EmptyBlock for BeaconBlockBell } } -impl> BeaconBlockCapella { - /// Return a Capella block where the block has maximum size. - pub fn full(spec: &ChainSpec) -> Self { - let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); - let bls_to_execution_changes = vec![ - SignedBlsToExecutionChange { - message: BlsToExecutionChange { - validator_index: 0, - from_bls_pubkey: PublicKeyBytes::empty(), - to_execution_address: Address::ZERO, - }, - signature: Signature::empty() - }; - E::max_bls_to_execution_changes() - ] - .into(); - let sync_aggregate = SyncAggregate { - sync_committee_signature: AggregateSignature::empty(), - sync_committee_bits: BitVector::default(), - }; - BeaconBlockCapella { - slot: spec.genesis_slot, - proposer_index: 0, - parent_root: Hash256::zero(), - state_root: Hash256::zero(), - body: BeaconBlockBodyCapella { - proposer_slashings: base_block.body.proposer_slashings, - attester_slashings: base_block.body.attester_slashings, - attestations: base_block.body.attestations, - deposits: base_block.body.deposits, - voluntary_exits: base_block.body.voluntary_exits, - bls_to_execution_changes, - sync_aggregate, - randao_reveal: Signature::empty(), - eth1_data: Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - deposit_count: 0, - }, - graffiti: Graffiti::default(), - execution_payload: Payload::Capella::default(), - }, - } - } -} - impl> EmptyBlock for BeaconBlockCapella { /// Returns an empty Capella block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { @@ -604,79 +558,6 @@ impl> EmptyBlock for BeaconBlockDene } } -impl> BeaconBlockElectra { - /// Return a Electra block where the block has maximum size. - pub fn full(spec: &ChainSpec) -> Self { - let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); - let indexed_attestation: IndexedAttestationElectra = IndexedAttestationElectra { - attesting_indices: VariableList::new(vec![0_u64; E::MaxValidatorsPerSlot::to_usize()]) - .unwrap(), - data: AttestationData::default(), - signature: AggregateSignature::empty(), - }; - let attester_slashings = vec![ - AttesterSlashingElectra { - attestation_1: indexed_attestation.clone(), - attestation_2: indexed_attestation, - }; - E::max_attester_slashings_electra() - ] - .into(); - let attestation = AttestationElectra { - aggregation_bits: BitList::with_capacity(E::MaxValidatorsPerSlot::to_usize()).unwrap(), - data: AttestationData::default(), - signature: AggregateSignature::empty(), - committee_bits: BitVector::new(), - }; - let mut attestations_electra = vec![]; - for _ in 0..E::MaxAttestationsElectra::to_usize() { - attestations_electra.push(attestation.clone()); - } - - let bls_to_execution_changes = vec![ - SignedBlsToExecutionChange { - message: BlsToExecutionChange { - validator_index: 0, - from_bls_pubkey: PublicKeyBytes::empty(), - to_execution_address: Address::ZERO, - }, - signature: Signature::empty() - }; - E::max_bls_to_execution_changes() - ] - .into(); - let sync_aggregate = SyncAggregate { - sync_committee_signature: AggregateSignature::empty(), - sync_committee_bits: BitVector::default(), - }; - BeaconBlockElectra { - slot: spec.genesis_slot, - proposer_index: 0, - parent_root: Hash256::zero(), - state_root: Hash256::zero(), - body: BeaconBlockBodyElectra { - proposer_slashings: base_block.body.proposer_slashings, - attester_slashings, - attestations: attestations_electra.into(), - deposits: base_block.body.deposits, - voluntary_exits: base_block.body.voluntary_exits, - bls_to_execution_changes, - sync_aggregate, - randao_reveal: Signature::empty(), - eth1_data: Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - deposit_count: 0, - }, - graffiti: Graffiti::default(), - execution_payload: Payload::Electra::default(), - blob_kzg_commitments: VariableList::empty(), - execution_requests: ExecutionRequests::default(), - }, - } - } -} - impl> EmptyBlock for BeaconBlockElectra { /// Returns an empty Electra block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { @@ -708,79 +589,6 @@ impl> EmptyBlock for BeaconBlockElec } } -impl> BeaconBlockFulu { - /// Return a Fulu block where the block has maximum size. - pub fn full(spec: &ChainSpec) -> Self { - let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); - let indexed_attestation: IndexedAttestationElectra = IndexedAttestationElectra { - attesting_indices: VariableList::new(vec![0_u64; E::MaxValidatorsPerSlot::to_usize()]) - .unwrap(), - data: AttestationData::default(), - signature: AggregateSignature::empty(), - }; - let attester_slashings = vec![ - AttesterSlashingElectra { - attestation_1: indexed_attestation.clone(), - attestation_2: indexed_attestation, - }; - E::max_attester_slashings_electra() - ] - .into(); - let attestation = AttestationElectra { - aggregation_bits: BitList::with_capacity(E::MaxValidatorsPerSlot::to_usize()).unwrap(), - data: AttestationData::default(), - signature: AggregateSignature::empty(), - committee_bits: BitVector::new(), - }; - let mut attestations_electra = vec![]; - for _ in 0..E::MaxAttestationsElectra::to_usize() { - attestations_electra.push(attestation.clone()); - } - - let bls_to_execution_changes = vec![ - SignedBlsToExecutionChange { - message: BlsToExecutionChange { - validator_index: 0, - from_bls_pubkey: PublicKeyBytes::empty(), - to_execution_address: Address::ZERO, - }, - signature: Signature::empty() - }; - E::max_bls_to_execution_changes() - ] - .into(); - let sync_aggregate = SyncAggregate { - sync_committee_signature: AggregateSignature::empty(), - sync_committee_bits: BitVector::default(), - }; - BeaconBlockFulu { - slot: spec.genesis_slot, - proposer_index: 0, - parent_root: Hash256::zero(), - state_root: Hash256::zero(), - body: BeaconBlockBodyFulu { - proposer_slashings: base_block.body.proposer_slashings, - attester_slashings, - attestations: attestations_electra.into(), - deposits: base_block.body.deposits, - voluntary_exits: base_block.body.voluntary_exits, - bls_to_execution_changes, - sync_aggregate, - randao_reveal: Signature::empty(), - eth1_data: Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - deposit_count: 0, - }, - graffiti: Graffiti::default(), - execution_payload: Payload::Fulu::default(), - blob_kzg_commitments: VariableList::empty(), - execution_requests: ExecutionRequests::default(), - }, - } - } -} - impl> EmptyBlock for BeaconBlockFulu { /// Returns an empty Fulu block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index c619d614871..2df66343af1 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -128,58 +128,6 @@ impl ExecutionPayload { // Max size of variable length `transactions` field + (E::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + E::max_bytes_per_transaction())) } - - #[allow(clippy::arithmetic_side_effects)] - /// Returns the maximum size of an execution payload. - pub fn max_execution_payload_capella_size() -> usize { - // Fixed part - ExecutionPayloadCapella::::default().as_ssz_bytes().len() - // Max size of variable length `extra_data` field - + (E::max_extra_data_bytes() * ::ssz_fixed_len()) - // Max size of variable length `transactions` field - + (E::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + E::max_bytes_per_transaction())) - // Max size of variable length `withdrawals` field - + (E::max_withdrawals_per_payload() * ::ssz_fixed_len()) - } - - #[allow(clippy::arithmetic_side_effects)] - /// Returns the maximum size of an execution payload. - pub fn max_execution_payload_deneb_size() -> usize { - // Fixed part - ExecutionPayloadDeneb::::default().as_ssz_bytes().len() - // Max size of variable length `extra_data` field - + (E::max_extra_data_bytes() * ::ssz_fixed_len()) - // Max size of variable length `transactions` field - + (E::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + E::max_bytes_per_transaction())) - // Max size of variable length `withdrawals` field - + (E::max_withdrawals_per_payload() * ::ssz_fixed_len()) - } - - #[allow(clippy::arithmetic_side_effects)] - /// Returns the maximum size of an execution payload. - pub fn max_execution_payload_electra_size() -> usize { - // Fixed part - ExecutionPayloadElectra::::default().as_ssz_bytes().len() - // Max size of variable length `extra_data` field - + (E::max_extra_data_bytes() * ::ssz_fixed_len()) - // Max size of variable length `transactions` field - + (E::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + E::max_bytes_per_transaction())) - // Max size of variable length `withdrawals` field - + (E::max_withdrawals_per_payload() * ::ssz_fixed_len()) - } - - #[allow(clippy::arithmetic_side_effects)] - /// Returns the maximum size of an execution payload. - pub fn max_execution_payload_fulu_size() -> usize { - // Fixed part - ExecutionPayloadFulu::::default().as_ssz_bytes().len() - // Max size of variable length `extra_data` field - + (E::max_extra_data_bytes() * ::ssz_fixed_len()) - // Max size of variable length `transactions` field - + (E::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + E::max_bytes_per_transaction())) - // Max size of variable length `withdrawals` field - + (E::max_withdrawals_per_payload() * ::ssz_fixed_len()) - } } impl ForkVersionDeserialize for ExecutionPayload { From 669932aa671c69013f6133cdda7cb6c19b0832ae Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Thu, 16 Jan 2025 02:48:50 +0100 Subject: [PATCH 15/26] Misc. dependency cleanup (#6810) * remove ensure_dir_exists (2 deps saved) * group UNHANDLED_ERRORs into a generic (2 deps saved) * Introduce separate `health_metrics` crate * separate health_metrics crate * remove metrics from warp_utils * move ProcessHealth::observe and SystemHealth::observe to health_metrics * fix errors * nitpick `Cargo.toml`s --------- Co-authored-by: Daniel Knopik # Conflicts: # Cargo.toml --- Cargo.lock | 21 ++- Cargo.toml | 2 + account_manager/src/validator/create.rs | 11 +- account_manager/src/validator/recover.rs | 8 +- account_manager/src/wallet/mod.rs | 5 +- beacon_node/http_api/Cargo.toml | 1 + .../http_api/src/attestation_performance.rs | 18 +-- beacon_node/http_api/src/attester_duties.rs | 18 ++- beacon_node/http_api/src/block_id.rs | 28 ++-- .../http_api/src/block_packing_efficiency.rs | 12 +- beacon_node/http_api/src/block_rewards.rs | 22 +-- .../http_api/src/build_block_contents.rs | 2 +- beacon_node/http_api/src/lib.rs | 51 ++++--- beacon_node/http_api/src/produce_block.rs | 4 +- beacon_node/http_api/src/proposer_duties.rs | 20 +-- .../http_api/src/standard_block_rewards.rs | 4 +- beacon_node/http_api/src/state_id.rs | 24 ++-- .../http_api/src/sync_committee_rewards.rs | 6 +- beacon_node/http_api/src/sync_committees.rs | 8 +- beacon_node/http_api/src/ui.rs | 6 +- beacon_node/http_metrics/Cargo.toml | 1 + beacon_node/http_metrics/src/metrics.rs | 2 +- common/account_utils/Cargo.toml | 1 - .../src/validator_definitions.rs | 5 +- common/directory/src/lib.rs | 13 +- common/eth2/Cargo.toml | 6 +- common/eth2/src/lighthouse.rs | 122 ----------------- common/health_metrics/Cargo.toml | 12 ++ common/health_metrics/src/lib.rs | 2 + .../src/metrics.rs | 1 + common/health_metrics/src/observe.rs | 127 ++++++++++++++++++ common/monitoring_api/Cargo.toml | 1 + common/monitoring_api/src/gather.rs | 1 + common/monitoring_api/src/lib.rs | 1 + common/validator_dir/Cargo.toml | 1 - common/validator_dir/src/builder.rs | 5 +- common/warp_utils/Cargo.toml | 2 - common/warp_utils/src/lib.rs | 1 - common/warp_utils/src/reject.rs | 38 +----- validator_client/http_api/Cargo.toml | 1 + validator_client/http_api/src/lib.rs | 1 + validator_client/http_metrics/Cargo.toml | 1 + validator_client/http_metrics/src/lib.rs | 2 +- 43 files changed, 303 insertions(+), 315 deletions(-) create mode 100644 common/health_metrics/Cargo.toml create mode 100644 common/health_metrics/src/lib.rs rename common/{warp_utils => health_metrics}/src/metrics.rs (99%) create mode 100644 common/health_metrics/src/observe.rs diff --git a/Cargo.lock b/Cargo.lock index c62e9fbc878..aa9bdd2afc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -43,7 +43,6 @@ dependencies = [ name = "account_utils" version = "0.1.0" dependencies = [ - "directory", "eth2_keystore", "eth2_wallet", "filesystem", @@ -2572,9 +2571,7 @@ dependencies = [ "lighthouse_network", "mediatype", "pretty_reqwest_error", - "procfs", "proto_array", - "psutil", "reqwest", "reqwest-eventsource", "sensitive_url", @@ -3710,6 +3707,16 @@ dependencies = [ "http 0.2.12", ] +[[package]] +name = "health_metrics" +version = "0.1.0" +dependencies = [ + "eth2", + "metrics", + "procfs", + "psutil", +] + [[package]] name = "heck" version = "0.4.1" @@ -3951,6 +3958,7 @@ dependencies = [ "execution_layer", "futures", "genesis", + "health_metrics", "hex", "lighthouse_network", "lighthouse_version", @@ -3986,6 +3994,7 @@ name = "http_metrics" version = "0.1.0" dependencies = [ "beacon_chain", + "health_metrics", "lighthouse_network", "lighthouse_version", "logging", @@ -5716,6 +5725,7 @@ name = "monitoring_api" version = "0.1.0" dependencies = [ "eth2", + "health_metrics", "lighthouse_version", "metrics", "regex", @@ -9561,7 +9571,6 @@ dependencies = [ "bls", "deposit_contract", "derivative", - "directory", "eth2_keystore", "filesystem", "hex", @@ -9589,6 +9598,7 @@ dependencies = [ "filesystem", "futures", "graffiti_file", + "health_metrics", "initialized_validators", "itertools 0.10.5", "lighthouse_version", @@ -9621,6 +9631,7 @@ dependencies = [ name = "validator_http_metrics" version = "0.1.0" dependencies = [ + "health_metrics", "lighthouse_version", "malloc_utils", "metrics", @@ -9799,7 +9810,6 @@ dependencies = [ name = "warp_utils" version = "0.1.0" dependencies = [ - "beacon_chain", "bytes", "eth2", "headers", @@ -9808,7 +9818,6 @@ dependencies = [ "serde", "serde_array_query", "serde_json", - "state_processing", "tokio", "types", "warp", diff --git a/Cargo.toml b/Cargo.toml index 233e5fa775b..e30b6aa2b60 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,6 +33,7 @@ members = [ "common/eth2_network_config", "common/eth2_wallet_manager", "common/filesystem", + "common/health_metrics", "common/lighthouse_version", "common/lockfile", "common/logging", @@ -252,6 +253,7 @@ filesystem = { path = "common/filesystem" } fork_choice = { path = "consensus/fork_choice" } genesis = { path = "beacon_node/genesis" } gossipsub = { path = "beacon_node/lighthouse_network/gossipsub/" } +health_metrics = { path = "common/health_metrics" } http_api = { path = "beacon_node/http_api" } initialized_validators = { path = "validator_client/initialized_validators" } int_to_bytes = { path = "consensus/int_to_bytes" } diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 73e0ad54d47..3db8c3f152d 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -6,14 +6,13 @@ use account_utils::{ }; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use directory::{ - ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR, -}; +use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR}; use environment::Environment; use eth2_wallet_manager::WalletManager; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::ffi::OsStr; use std::fs; +use std::fs::create_dir_all; use std::path::{Path, PathBuf}; use types::EthSpec; use validator_dir::Builder as ValidatorDirBuilder; @@ -156,8 +155,10 @@ pub fn cli_run( )); } - ensure_dir_exists(&validator_dir)?; - ensure_dir_exists(&secrets_dir)?; + create_dir_all(&validator_dir) + .map_err(|e| format!("Could not create validator dir at {validator_dir:?}: {e:?}"))?; + create_dir_all(&secrets_dir) + .map_err(|e| format!("Could not create secrets dir at {secrets_dir:?}: {e:?}"))?; eprintln!("secrets-dir path {:?}", secrets_dir); eprintln!("wallets-dir path {:?}", wallet_base_dir); diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index ddf754edac9..19d161a468f 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -5,10 +5,10 @@ use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilde use account_utils::{random_password, read_mnemonic_from_cli, STDIN_INPUTS_FLAG}; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use directory::ensure_dir_exists; use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; use eth2_wallet::bip39::Seed; use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType, ValidatorKeystores}; +use std::fs::create_dir_all; use std::path::PathBuf; use validator_dir::Builder as ValidatorDirBuilder; pub const CMD: &str = "recover"; @@ -91,8 +91,10 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin eprintln!("secrets-dir path: {:?}", secrets_dir); - ensure_dir_exists(&validator_dir)?; - ensure_dir_exists(&secrets_dir)?; + create_dir_all(&validator_dir) + .map_err(|e| format!("Could not create validator dir at {validator_dir:?}: {e:?}"))?; + create_dir_all(&secrets_dir) + .map_err(|e| format!("Could not create secrets dir at {secrets_dir:?}: {e:?}"))?; eprintln!(); eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING."); diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index 020858db772..c34f0363a48 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -5,7 +5,8 @@ pub mod recover; use crate::WALLETS_DIR_FLAG; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use directory::{ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; +use directory::{parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; +use std::fs::create_dir_all; use std::path::PathBuf; pub const CMD: &str = "wallet"; @@ -44,7 +45,7 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { } else { parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)? }; - ensure_dir_exists(&wallet_base_dir)?; + create_dir_all(&wallet_base_dir).map_err(|_| "Could not create wallet base dir")?; eprintln!("wallet-dir path: {:?}", wallet_base_dir); diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 5d601008bc0..0ced27e4464 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -17,6 +17,7 @@ ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } futures = { workspace = true } +health_metrics = { workspace = true } hex = { workspace = true } lighthouse_network = { workspace = true } lighthouse_version = { workspace = true } diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index d4f9916814a..2f3f3404456 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -7,7 +7,7 @@ use state_processing::{ }; use std::sync::Arc; use types::{BeaconState, BeaconStateError, EthSpec, Hash256}; -use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; +use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error}; const MAX_REQUEST_RANGE_EPOCHS: usize = 100; const BLOCK_ROOT_CHUNK_SIZE: usize = 100; @@ -50,7 +50,7 @@ pub fn get_attestation_performance( let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch()); // Ensure end_epoch is smaller than the current epoch - 1. - let current_epoch = chain.epoch().map_err(beacon_chain_error)?; + let current_epoch = chain.epoch().map_err(unhandled_error)?; if query.end_epoch >= current_epoch - 1 { return Err(custom_bad_request(format!( "end_epoch must be less than the current epoch - 1. current: {}, end: {}", @@ -83,7 +83,7 @@ pub fn get_attestation_performance( let index_range = if target.to_lowercase() == "global" { chain .with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect())) - .map_err(beacon_chain_error)? + .map_err(unhandled_error::)? } else { vec![target.parse::().map_err(|_| { custom_bad_request(format!( @@ -96,10 +96,10 @@ pub fn get_attestation_performance( // Load block roots. let mut block_roots: Vec = chain .forwards_iter_block_roots_until(start_slot, end_slot) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .map(|res| res.map(|(root, _)| root)) .collect::, _>>() - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; block_roots.dedup(); // Load first block so we can get its parent. @@ -113,7 +113,7 @@ pub fn get_attestation_performance( .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) }) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; // Load the block of the prior slot which will be used to build the starting state. let prior_block = chain @@ -122,14 +122,14 @@ pub fn get_attestation_performance( maybe_block .ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root())) }) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; // Load state for block replay. let state_root = prior_block.state_root(); let state = chain .get_state(&state_root, Some(prior_slot)) .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; // Allocate an AttestationPerformance vector for each validator in the range. let mut perfs: Vec = @@ -198,7 +198,7 @@ pub fn get_attestation_performance( .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) }) - .map_err(beacon_chain_error) + .map_err(unhandled_error) }) .collect::, _>>()?; diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 6c7dc3348c1..8905b24cded 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -16,9 +16,7 @@ pub fn attester_duties( request_indices: &[u64], chain: &BeaconChain, ) -> Result { - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = chain.epoch().map_err(warp_utils::reject::unhandled_error)?; // Determine what the current epoch would be if we fast-forward our system clock by // `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. @@ -57,7 +55,7 @@ fn cached_attestation_duties( let (duties, dependent_root, execution_status) = chain .validator_attestation_duties(request_indices, request_epoch, head_block_root) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( duties, @@ -82,7 +80,7 @@ fn compute_historic_attester_duties( let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let head = &cached_head.snapshot; if head.beacon_state.current_epoch() <= request_epoch { @@ -131,13 +129,13 @@ fn compute_historic_attester_duties( state .build_committee_cache(relative_epoch, &chain.spec) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let dependent_root = state // The only block which decides its own shuffling is the genesis block. .attester_shuffling_decision_root(chain.genesis_block_root, relative_epoch) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let duties = request_indices .iter() @@ -147,7 +145,7 @@ fn compute_historic_attester_duties( .map_err(BeaconChainError::from) }) .collect::>() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( duties, @@ -181,7 +179,7 @@ fn ensure_state_knows_attester_duties_for_epoch( // A "partial" state advance is adequate since attester duties don't rely on state roots. partial_state_advance(state, Some(state_root), target_slot, spec) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; } Ok(()) @@ -208,7 +206,7 @@ fn convert_to_api_response( let usize_indices = indices.iter().map(|i| *i as usize).collect::>(); let index_to_pubkey_map = chain .validator_pubkey_bytes_many(&usize_indices) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let data = duties .into_iter() diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index be70f615e34..cdef1521ec9 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -38,7 +38,7 @@ impl BlockId { let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; Ok(( cached_head.head_block_root(), execution_status.is_optimistic_or_invalid(), @@ -63,10 +63,10 @@ impl BlockId { CoreBlockId::Slot(slot) => { let execution_optimistic = chain .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let root = chain .block_root_at_slot(*slot, WhenSlotSkipped::None) - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) .and_then(|root_opt| { root_opt.ok_or_else(|| { warp_utils::reject::custom_not_found(format!( @@ -96,17 +96,17 @@ impl BlockId { .store .block_exists(root) .map_err(BeaconChainError::DBError) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? { let execution_optimistic = chain .canonical_head .fork_choice_read_lock() .is_optimistic_or_invalid_block(root) .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let blinded_block = chain .get_blinded_block(root) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? .ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -116,7 +116,7 @@ impl BlockId { let block_slot = blinded_block.slot(); let finalized = chain .is_finalized_block(root, block_slot) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; Ok((*root, execution_optimistic, finalized)) } else { Err(warp_utils::reject::custom_not_found(format!( @@ -134,7 +134,7 @@ impl BlockId { ) -> Result>, warp::Rejection> { chain .get_blinded_block(root) - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) } /// Return the `SignedBeaconBlock` identified by `self`. @@ -154,7 +154,7 @@ impl BlockId { let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; Ok(( cached_head.snapshot.beacon_block.clone_as_blinded(), execution_status.is_optimistic_or_invalid(), @@ -211,7 +211,7 @@ impl BlockId { let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; Ok(( cached_head.snapshot.beacon_block.clone(), execution_status.is_optimistic_or_invalid(), @@ -223,7 +223,7 @@ impl BlockId { chain .get_block(&root) .await - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) .and_then(|block_opt| match block_opt { Some(block) => { if block.slot() != *slot { @@ -245,7 +245,7 @@ impl BlockId { chain .get_block(&root) .await - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) .and_then(|block_opt| { block_opt .map(|block| (Arc::new(block), execution_optimistic, finalized)) @@ -311,7 +311,7 @@ impl BlockId { let blob_sidecar_list = chain .store .get_blobs(&root) - .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? + .map_err(|e| warp_utils::reject::unhandled_error(BeaconChainError::from(e)))? .blobs() .ok_or_else(|| { warp_utils::reject::custom_not_found(format!("no blobs stored for block {root}")) @@ -356,7 +356,7 @@ impl BlockId { |column_index| match chain.get_data_column(&root, &column_index) { Ok(Some(data_column)) => Some(Ok(data_column)), Ok(None) => None, - Err(e) => Some(Err(warp_utils::reject::beacon_chain_error(e))), + Err(e) => Some(Err(warp_utils::reject::unhandled_error(e))), }, ) .collect::, _>>()?; diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 66c71872786..431547f10b1 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -13,7 +13,7 @@ use types::{ AttestationRef, BeaconCommittee, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot, }; -use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; +use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error}; /// Load blocks from block roots in chunks to reduce load on memory. const BLOCK_ROOT_CHUNK_SIZE: usize = 100; @@ -263,9 +263,9 @@ pub fn get_block_packing_efficiency( // Load block roots. let mut block_roots: Vec = chain .forwards_iter_block_roots_until(start_slot_of_prior_epoch, end_slot) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .collect::, _>>() - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .iter() .map(|(root, _)| *root) .collect(); @@ -280,7 +280,7 @@ pub fn get_block_packing_efficiency( .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) }) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; // Load state for block replay. let starting_state_root = first_block.state_root(); @@ -290,7 +290,7 @@ pub fn get_block_packing_efficiency( .and_then(|maybe_state| { maybe_state.ok_or(BeaconChainError::MissingBeaconState(starting_state_root)) }) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; // Initialize response vector. let mut response = Vec::new(); @@ -392,7 +392,7 @@ pub fn get_block_packing_efficiency( .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) }) - .map_err(beacon_chain_error) + .map_err(unhandled_error) }) .collect::, _>>()?; diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index ad71e9e9d00..0cc878bb48f 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -7,7 +7,7 @@ use std::num::NonZeroUsize; use std::sync::Arc; use types::beacon_block::BlindedBeaconBlock; use types::non_zero_usize::new_non_zero_usize; -use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; +use warp_utils::reject::{beacon_state_error, custom_bad_request, unhandled_error}; const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2); @@ -30,23 +30,23 @@ pub fn get_block_rewards( let end_block_root = chain .block_root_at_slot(end_slot, WhenSlotSkipped::Prev) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .ok_or_else(|| custom_bad_request(format!("block at end slot {} unknown", end_slot)))?; let blocks = chain .store .load_blocks_to_replay(start_slot, end_slot, end_block_root) - .map_err(|e| beacon_chain_error(e.into()))?; + .map_err(|e| unhandled_error(BeaconChainError::from(e)))?; let state_root = chain .state_root_at_slot(prior_slot) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .ok_or_else(|| custom_bad_request(format!("prior state at slot {} unknown", prior_slot)))?; let mut state = chain .get_state(&state_root, Some(prior_slot)) .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; state .build_caches(&chain.spec) @@ -73,12 +73,12 @@ pub fn get_block_rewards( .state_root_iter( chain .forwards_iter_state_roots_until(prior_slot, end_slot) - .map_err(beacon_chain_error)?, + .map_err(unhandled_error)?, ) .no_signature_verification() .minimal_block_root_verification() .apply_blocks(blocks, None) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; if block_replayer.state_root_miss() { warn!( @@ -125,7 +125,7 @@ pub fn compute_block_rewards( ); let parent_block = chain .get_blinded_block(&parent_root) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .ok_or_else(|| { custom_bad_request(format!( "parent block not known or not canonical: {:?}", @@ -135,7 +135,7 @@ pub fn compute_block_rewards( let parent_state = chain .get_state(&parent_block.state_root(), Some(parent_block.slot())) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .ok_or_else(|| { custom_bad_request(format!( "no state known for parent block: {:?}", @@ -148,7 +148,7 @@ pub fn compute_block_rewards( .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) .minimal_block_root_verification() .apply_blocks(vec![], Some(block.slot())) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error::)?; if block_replayer.state_root_miss() { warn!( @@ -176,7 +176,7 @@ pub fn compute_block_rewards( &mut reward_cache, true, ) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; block_rewards.push(block_reward); } diff --git a/beacon_node/http_api/src/build_block_contents.rs b/beacon_node/http_api/src/build_block_contents.rs index c2ccb6695eb..fb8fba0731d 100644 --- a/beacon_node/http_api/src/build_block_contents.rs +++ b/beacon_node/http_api/src/build_block_contents.rs @@ -23,7 +23,7 @@ pub fn build_block_contents( } = block; let Some((kzg_proofs, blobs)) = blob_items else { - return Err(warp_utils::reject::block_production_error( + return Err(warp_utils::reject::unhandled_error( BlockProductionError::MissingBlobs, )); }; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index febdf692590..d5c6c115670 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -50,6 +50,7 @@ use eth2::types::{ ValidatorStatus, ValidatorsRequestBody, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; +use health_metrics::observe::Observe; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use logging::SSELoggingComponents; @@ -938,9 +939,9 @@ pub fn serve( ) } } - _ => { - warp_utils::reject::beacon_chain_error(e.into()) - } + _ => warp_utils::reject::unhandled_error( + BeaconChainError::from(e), + ), } })?; @@ -1067,7 +1068,7 @@ pub fn serve( let validators = chain .validator_indices(sync_committee.pubkeys.iter()) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let validator_aggregates = validators .chunks_exact(T::EthSpec::sync_subcommittee_size()) @@ -1147,7 +1148,7 @@ pub fn serve( let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; ( cached_head.head_block_root(), cached_head.snapshot.beacon_block.clone_as_blinded(), @@ -1161,13 +1162,13 @@ pub fn serve( BlockId::from_root(parent_root).blinded_block(&chain)?; let (root, _slot) = chain .forwards_iter_block_roots(parent.slot()) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? // Ignore any skip-slots immediately following the parent. .find(|res| { res.as_ref().is_ok_and(|(root, _)| *root != parent_root) }) .transpose() - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? .ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "child of block with root {}", @@ -1248,7 +1249,7 @@ pub fn serve( let canonical = chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? .is_some_and(|canonical| root == canonical); let data = api_types::BlockHeaderData { @@ -2932,7 +2933,7 @@ pub fn serve( let (head, head_execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let head_slot = head.head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { @@ -2992,7 +2993,7 @@ pub fn serve( .blocking_response_task(Priority::P0, move || { let is_optimistic = chain .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let is_syncing = !network_globals.sync_state.read().is_synced(); @@ -3302,9 +3303,7 @@ pub fn serve( task_spawner.blocking_json_task(Priority::P0, move || { not_synced_filter?; - let current_slot = chain - .slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; // allow a tolerance of one slot to account for clock skew if query.slot > current_slot + 1 { @@ -3318,7 +3317,7 @@ pub fn serve( .produce_unaggregated_attestation(query.slot, query.committee_index) .map(|attestation| attestation.data().clone()) .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) }) }, ); @@ -3690,11 +3689,9 @@ pub fn serve( .execution_layer .as_ref() .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; - let current_slot = chain - .slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); debug!( @@ -3747,12 +3744,12 @@ pub fn serve( .execution_layer .as_ref() .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let current_slot = chain .slot_clock .now_or_genesis() .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); debug!( @@ -3848,12 +3845,12 @@ pub fn serve( .execution_layer .as_ref() .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? .builder(); let builder = arc_builder .as_ref() .ok_or(BeaconChainError::BuilderMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; builder .post_builder_validators(&filtered_registration_data) .await @@ -3969,9 +3966,8 @@ pub fn serve( chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { // Ensure the request is for either the current, previous or next epoch. - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = + chain.epoch().map_err(warp_utils::reject::unhandled_error)?; let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); let next_epoch = current_epoch.saturating_add(Epoch::new(1)); @@ -4010,9 +4006,8 @@ pub fn serve( chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { // Ensure the request is for either the current, previous or next epoch. - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = + chain.epoch().map_err(warp_utils::reject::unhandled_error)?; let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); let next_epoch = current_epoch.saturating_add(Epoch::new(1)); diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index ed30da7362c..0e24e8f1758 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -153,7 +153,7 @@ pub async fn produce_blinded_block_v2( BlockProductionVersion::BlindedV2, ) .await - .map_err(warp_utils::reject::block_production_error)?; + .map_err(warp_utils::reject::unhandled_error)?; build_response_v2(chain, block_response_type, endpoint_version, accept_header) } @@ -184,7 +184,7 @@ pub async fn produce_block_v2( BlockProductionVersion::FullV2, ) .await - .map_err(warp_utils::reject::block_production_error)?; + .map_err(warp_utils::reject::unhandled_error)?; build_response_v2(chain, block_response_type, endpoint_version, accept_header) } diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 515599ce887..c4945df9d70 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -26,7 +26,7 @@ pub fn proposer_duties( .now_or_genesis() .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; // Determine what the current epoch would be if we fast-forward our system clock by // `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. @@ -66,7 +66,7 @@ pub fn proposer_duties( { let (proposers, dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( chain, request_epoch, @@ -114,7 +114,7 @@ fn try_proposer_duties_from_cache( .map_err(warp_utils::reject::beacon_state_error)?; let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let dependent_root = match head_epoch.cmp(&request_epoch) { // head_epoch == request_epoch @@ -163,7 +163,7 @@ fn compute_and_cache_proposer_duties( ) -> Result { let (indices, dependent_root, execution_status, fork) = compute_proposer_duties_from_head(current_epoch, chain) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; // Prime the proposer shuffling cache with the newly-learned value. chain @@ -171,7 +171,7 @@ fn compute_and_cache_proposer_duties( .lock() .insert(current_epoch, dependent_root, indices.clone(), fork) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( chain, @@ -195,7 +195,7 @@ fn compute_historic_proposer_duties( let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let head = &cached_head.snapshot; if head.beacon_state.current_epoch() <= epoch { @@ -214,7 +214,7 @@ fn compute_historic_proposer_duties( // If we've loaded the head state it might be from a previous epoch, ensure it's in a // suitable epoch. ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; (state, execution_optimistic) } else { let (state, execution_optimistic, _finalized) = @@ -234,14 +234,14 @@ fn compute_historic_proposer_duties( let indices = state .get_beacon_proposer_indices(&chain.spec) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; // We can supply the genesis block root as the block root since we know that the only block that // decides its own root is the genesis block. let dependent_root = state .proposer_shuffling_decision_root(chain.genesis_block_root) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices) } @@ -257,7 +257,7 @@ fn convert_to_api_response( ) -> Result { let index_to_pubkey_map = chain .validator_pubkey_bytes_many(&indices) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; // Map our internal data structure into the API structure. let proposer_data = indices diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs index 1ab75374ea8..372a2765da4 100644 --- a/beacon_node/http_api/src/standard_block_rewards.rs +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -4,7 +4,7 @@ use crate::ExecutionOptimistic; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::lighthouse::StandardBlockReward; use std::sync::Arc; -use warp_utils::reject::beacon_chain_error; +use warp_utils::reject::unhandled_error; /// The difference between block_rewards and beacon_block_rewards is the later returns block /// reward format that satisfies beacon-api specs pub fn compute_beacon_block_rewards( @@ -19,7 +19,7 @@ pub fn compute_beacon_block_rewards( let rewards = chain .compute_beacon_block_reward(block_ref, &mut state) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; Ok((rewards, execution_optimistic, finalized)) } diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index ddacde9a3fc..353390cdad7 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -30,7 +30,7 @@ impl StateId { let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; return Ok(( cached_head.head_state_root(), execution_status.is_optimistic_or_invalid(), @@ -56,7 +56,7 @@ impl StateId { *slot, chain .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?, + .map_err(warp_utils::reject::unhandled_error)?, *slot <= chain .canonical_head @@ -70,11 +70,11 @@ impl StateId { .store .load_hot_state_summary(root) .map_err(BeaconChainError::DBError) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? { let finalization_status = chain .state_finalization_and_canonicity(root, hot_summary.slot) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let finalized = finalization_status.is_finalized(); let fork_choice = chain.canonical_head.fork_choice_read_lock(); let execution_optimistic = if finalization_status.slot_is_finalized @@ -94,14 +94,14 @@ impl StateId { fork_choice .is_optimistic_or_invalid_block(&hot_summary.latest_block_root) .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? }; return Ok((*root, execution_optimistic, finalized)); } else if let Some(_cold_state_slot) = chain .store .load_cold_state_slot(root) .map_err(BeaconChainError::DBError) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? { let fork_choice = chain.canonical_head.fork_choice_read_lock(); let finalized_root = fork_choice @@ -111,7 +111,7 @@ impl StateId { let execution_optimistic = fork_choice .is_optimistic_or_invalid_block_no_fallback(&finalized_root) .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; return Ok((*root, execution_optimistic, true)); } else { return Err(warp_utils::reject::custom_not_found(format!( @@ -124,7 +124,7 @@ impl StateId { let root = chain .state_root_at_slot(slot) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? .ok_or_else(|| { warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) })?; @@ -178,7 +178,7 @@ impl StateId { let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; return Ok(( cached_head.snapshot.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), @@ -191,7 +191,7 @@ impl StateId { let state = chain .get_state(&state_root, slot_opt) - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) .and_then(|opt| { opt.ok_or_else(|| { warp_utils::reject::custom_not_found(format!( @@ -224,7 +224,7 @@ impl StateId { let (head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; return func( &head.snapshot.beacon_state, execution_status.is_optimistic_or_invalid(), @@ -273,7 +273,7 @@ pub fn checkpoint_slot_and_execution_optimistic( let execution_optimistic = fork_choice .is_optimistic_or_invalid_block_no_fallback(root) .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; Ok((slot, execution_optimistic)) } diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index 68a06b1ce8c..ec633724060 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -6,7 +6,7 @@ use slog::{debug, Logger}; use state_processing::BlockReplayer; use std::sync::Arc; use types::{BeaconState, SignedBlindedBeaconBlock}; -use warp_utils::reject::{beacon_chain_error, custom_not_found}; +use warp_utils::reject::{custom_not_found, unhandled_error}; pub fn compute_sync_committee_rewards( chain: Arc>, @@ -20,7 +20,7 @@ pub fn compute_sync_committee_rewards( let reward_payload = chain .compute_sync_committee_rewards(block.message(), &mut state) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; let data = if reward_payload.is_empty() { debug!(log, "compute_sync_committee_rewards returned empty"); @@ -71,7 +71,7 @@ pub fn get_state_before_applying_block( .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) .minimal_block_root_verification() .apply_blocks(vec![], Some(block.slot())) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error::)?; Ok(replayer.into_state()) } diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 3e5b1dc5247..da9f9b7a063 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -39,7 +39,7 @@ pub fn sync_committee_duties( // still dependent on the head. So using `is_optimistic_head` is fine for both cases. let execution_optimistic = chain .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; // Try using the head's sync committees to satisfy the request. This should be sufficient for // the vast majority of requests. Rather than checking if we think the request will succeed in a @@ -55,7 +55,7 @@ pub fn sync_committee_duties( .. })) | Err(BeaconChainError::SyncDutiesError(BeaconStateError::IncorrectStateVariant)) => (), - Err(e) => return Err(warp_utils::reject::beacon_chain_error(e)), + Err(e) => return Err(warp_utils::reject::unhandled_error(e)), } let duties = duties_from_state_load(request_epoch, request_indices, altair_fork_epoch, chain) @@ -67,7 +67,7 @@ pub fn sync_committee_duties( "invalid epoch: {}, current epoch: {}", request_epoch, current_epoch )), - e => warp_utils::reject::beacon_chain_error(e), + e => warp_utils::reject::unhandled_error(e), })?; Ok(convert_to_response( verify_unknown_validators(duties, request_epoch, chain)?, @@ -164,7 +164,7 @@ fn verify_unknown_validators( BeaconChainError::SyncDutiesError(BeaconStateError::UnknownValidator(idx)) => { warp_utils::reject::custom_bad_request(format!("invalid validator index: {idx}")) } - e => warp_utils::reject::beacon_chain_error(e), + e => warp_utils::reject::unhandled_error(e), }) } diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index 616745dbefe..80a9ed896db 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -5,7 +5,7 @@ use eth2::types::{Epoch, ValidatorStatus}; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use warp_utils::reject::beacon_chain_error; +use warp_utils::reject::unhandled_error; #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorCountResponse { @@ -58,7 +58,7 @@ pub fn get_validator_count( } Ok::<(), BeaconChainError>(()) }) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; Ok(ValidatorCountResponse { active_ongoing, @@ -101,7 +101,7 @@ pub fn get_validator_info( request_data: ValidatorInfoRequestData, chain: Arc>, ) -> Result { - let current_epoch = chain.epoch().map_err(beacon_chain_error)?; + let current_epoch = chain.epoch().map_err(unhandled_error)?; let epochs = current_epoch.saturating_sub(HISTORIC_EPOCHS).as_u64()..=current_epoch.as_u64(); diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index d92f986440c..9ad073439d1 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -7,6 +7,7 @@ edition = { workspace = true } [dependencies] beacon_chain = { workspace = true } +health_metrics = { workspace = true } lighthouse_network = { workspace = true } lighthouse_version = { workspace = true } malloc_utils = { workspace = true } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index d751c51e4c9..bcfb8e4c9cf 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -39,7 +39,7 @@ pub fn gather_prometheus_metrics( lighthouse_network::scrape_discovery_metrics(); - warp_utils::metrics::scrape_health_metrics(); + health_metrics::metrics::scrape_health_metrics(); // It's important to ensure these metrics are explicitly enabled in the case that users aren't // using glibc and this function causes panics. diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index dece975d37e..3ab60346886 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -6,7 +6,6 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -directory = { workspace = true } eth2_keystore = { workspace = true } eth2_wallet = { workspace = true } filesystem = { workspace = true } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 24f6861daa2..7337d6dfb40 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -4,13 +4,12 @@ //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. use crate::{default_keystore_password_path, read_password_string, write_file_via_temporary}; -use directory::ensure_dir_exists; use eth2_keystore::Keystore; use regex::Regex; use serde::{Deserialize, Serialize}; use slog::{error, Logger}; use std::collections::HashSet; -use std::fs::{self, File}; +use std::fs::{self, create_dir_all, File}; use std::io; use std::path::{Path, PathBuf}; use types::{graffiti::GraffitiString, Address, PublicKey}; @@ -229,7 +228,7 @@ impl From> for ValidatorDefinitions { impl ValidatorDefinitions { /// Open an existing file or create a new, empty one if it does not exist. pub fn open_or_create>(validators_dir: P) -> Result { - ensure_dir_exists(validators_dir.as_ref()).map_err(|_| { + create_dir_all(validators_dir.as_ref()).map_err(|_| { Error::UnableToCreateValidatorDir(PathBuf::from(validators_dir.as_ref())) })?; let config_path = validators_dir.as_ref().join(CONFIG_FILENAME); diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs index df03b4f9a4e..d042f8dfadc 100644 --- a/common/directory/src/lib.rs +++ b/common/directory/src/lib.rs @@ -1,6 +1,6 @@ use clap::ArgMatches; pub use eth2_network_config::DEFAULT_HARDCODED_NETWORK; -use std::fs::{self, create_dir_all}; +use std::fs; use std::path::{Path, PathBuf}; /// Names for the default directories. @@ -30,17 +30,6 @@ pub fn get_network_dir(matches: &ArgMatches) -> String { } } -/// Checks if a directory exists in the given path and creates a directory if it does not exist. -pub fn ensure_dir_exists>(path: P) -> Result<(), String> { - let path = path.as_ref(); - - if !path.exists() { - create_dir_all(path).map_err(|e| format!("Unable to create {:?}: {:?}", path, e))?; - } - - Ok(()) -} - /// If `arg` is in `matches`, parses the value as a path. /// /// Otherwise, attempts to find the default directory for the `testnet` from the `matches`. diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 9d6dea100d4..ca7fa7ccdbe 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -31,10 +31,6 @@ zeroize = { workspace = true } [dev-dependencies] tokio = { workspace = true } -[target.'cfg(target_os = "linux")'.dependencies] -psutil = { version = "3.3.0", optional = true } -procfs = { version = "0.15.1", optional = true } - [features] default = ["lighthouse"] -lighthouse = ["psutil", "procfs"] +lighthouse = [] diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 66dd5d779bd..badc4857c4c 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -88,12 +88,6 @@ pub struct ValidatorInclusionData { pub is_previous_epoch_head_attester: bool, } -#[cfg(target_os = "linux")] -use { - psutil::cpu::os::linux::CpuTimesExt, psutil::memory::os::linux::VirtualMemoryExt, - psutil::process::Process, -}; - /// Reports on the health of the Lighthouse instance. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Health { @@ -164,69 +158,6 @@ pub struct SystemHealth { pub misc_os: String, } -impl SystemHealth { - #[cfg(not(target_os = "linux"))] - pub fn observe() -> Result { - Err("Health is only available on Linux".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe() -> Result { - let vm = psutil::memory::virtual_memory() - .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; - let loadavg = - psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; - - let cpu = - psutil::cpu::cpu_times().map_err(|e| format!("Unable to get cpu times: {:?}", e))?; - - let disk_usage = psutil::disk::disk_usage("/") - .map_err(|e| format!("Unable to disk usage info: {:?}", e))?; - - let disk = psutil::disk::DiskIoCountersCollector::default() - .disk_io_counters() - .map_err(|e| format!("Unable to get disk counters: {:?}", e))?; - - let net = psutil::network::NetIoCountersCollector::default() - .net_io_counters() - .map_err(|e| format!("Unable to get network io counters: {:?}", e))?; - - let boot_time = psutil::host::boot_time() - .map_err(|e| format!("Unable to get system boot time: {:?}", e))? - .duration_since(std::time::UNIX_EPOCH) - .map_err(|e| format!("Boot time is lower than unix epoch: {}", e))? - .as_secs(); - - Ok(Self { - sys_virt_mem_total: vm.total(), - sys_virt_mem_available: vm.available(), - sys_virt_mem_used: vm.used(), - sys_virt_mem_free: vm.free(), - sys_virt_mem_cached: vm.cached(), - sys_virt_mem_buffers: vm.buffers(), - sys_virt_mem_percent: vm.percent(), - sys_loadavg_1: loadavg.one, - sys_loadavg_5: loadavg.five, - sys_loadavg_15: loadavg.fifteen, - cpu_cores: psutil::cpu::cpu_count_physical(), - cpu_threads: psutil::cpu::cpu_count(), - system_seconds_total: cpu.system().as_secs(), - cpu_time_total: cpu.total().as_secs(), - user_seconds_total: cpu.user().as_secs(), - iowait_seconds_total: cpu.iowait().as_secs(), - idle_seconds_total: cpu.idle().as_secs(), - disk_node_bytes_total: disk_usage.total(), - disk_node_bytes_free: disk_usage.free(), - disk_node_reads_total: disk.read_count(), - disk_node_writes_total: disk.write_count(), - network_node_bytes_total_received: net.bytes_recv(), - network_node_bytes_total_transmit: net.bytes_sent(), - misc_node_boot_ts_seconds: boot_time, - misc_os: std::env::consts::OS.to_string(), - }) - } -} - /// Process specific health #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProcessHealth { @@ -244,59 +175,6 @@ pub struct ProcessHealth { pub pid_process_seconds_total: u64, } -impl ProcessHealth { - #[cfg(not(target_os = "linux"))] - pub fn observe() -> Result { - Err("Health is only available on Linux".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe() -> Result { - let process = - Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; - - let process_mem = process - .memory_info() - .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - - let me = procfs::process::Process::myself() - .map_err(|e| format!("Unable to get process: {:?}", e))?; - let stat = me - .stat() - .map_err(|e| format!("Unable to get stat: {:?}", e))?; - - let process_times = process - .cpu_times() - .map_err(|e| format!("Unable to get process cpu times : {:?}", e))?; - - Ok(Self { - pid: process.pid(), - pid_num_threads: stat.num_threads, - pid_mem_resident_set_size: process_mem.rss(), - pid_mem_virtual_memory_size: process_mem.vms(), - pid_mem_shared_memory_size: process_mem.shared(), - pid_process_seconds_total: process_times.busy().as_secs() - + process_times.children_system().as_secs() - + process_times.children_system().as_secs(), - }) - } -} - -impl Health { - #[cfg(not(target_os = "linux"))] - pub fn observe() -> Result { - Err("Health is only available on Linux".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe() -> Result { - Ok(Self { - process: ProcessHealth::observe()?, - system: SystemHealth::observe()?, - }) - } -} - /// Indicates how up-to-date the Eth1 caches are. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Eth1SyncStatusData { diff --git a/common/health_metrics/Cargo.toml b/common/health_metrics/Cargo.toml new file mode 100644 index 00000000000..08591471b20 --- /dev/null +++ b/common/health_metrics/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "health_metrics" +version = "0.1.0" +edition = { workspace = true } + +[dependencies] +eth2 = { workspace = true } +metrics = { workspace = true } + +[target.'cfg(target_os = "linux")'.dependencies] +psutil = "3.3.0" +procfs = "0.15.1" diff --git a/common/health_metrics/src/lib.rs b/common/health_metrics/src/lib.rs new file mode 100644 index 00000000000..bab80fb9128 --- /dev/null +++ b/common/health_metrics/src/lib.rs @@ -0,0 +1,2 @@ +pub mod metrics; +pub mod observe; diff --git a/common/warp_utils/src/metrics.rs b/common/health_metrics/src/metrics.rs similarity index 99% rename from common/warp_utils/src/metrics.rs rename to common/health_metrics/src/metrics.rs index fabcf936507..c216426b7d3 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/health_metrics/src/metrics.rs @@ -1,3 +1,4 @@ +use crate::observe::Observe; use eth2::lighthouse::{ProcessHealth, SystemHealth}; use metrics::*; use std::sync::LazyLock; diff --git a/common/health_metrics/src/observe.rs b/common/health_metrics/src/observe.rs new file mode 100644 index 00000000000..81bb8e6f7e4 --- /dev/null +++ b/common/health_metrics/src/observe.rs @@ -0,0 +1,127 @@ +use eth2::lighthouse::{Health, ProcessHealth, SystemHealth}; + +#[cfg(target_os = "linux")] +use { + psutil::cpu::os::linux::CpuTimesExt, psutil::memory::os::linux::VirtualMemoryExt, + psutil::process::Process, +}; + +pub trait Observe: Sized { + fn observe() -> Result; +} + +impl Observe for Health { + #[cfg(not(target_os = "linux"))] + fn observe() -> Result { + Err("Health is only available on Linux".into()) + } + + #[cfg(target_os = "linux")] + fn observe() -> Result { + Ok(Self { + process: ProcessHealth::observe()?, + system: SystemHealth::observe()?, + }) + } +} + +impl Observe for SystemHealth { + #[cfg(not(target_os = "linux"))] + fn observe() -> Result { + Err("Health is only available on Linux".into()) + } + + #[cfg(target_os = "linux")] + fn observe() -> Result { + let vm = psutil::memory::virtual_memory() + .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; + let loadavg = + psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + + let cpu = + psutil::cpu::cpu_times().map_err(|e| format!("Unable to get cpu times: {:?}", e))?; + + let disk_usage = psutil::disk::disk_usage("/") + .map_err(|e| format!("Unable to disk usage info: {:?}", e))?; + + let disk = psutil::disk::DiskIoCountersCollector::default() + .disk_io_counters() + .map_err(|e| format!("Unable to get disk counters: {:?}", e))?; + + let net = psutil::network::NetIoCountersCollector::default() + .net_io_counters() + .map_err(|e| format!("Unable to get network io counters: {:?}", e))?; + + let boot_time = psutil::host::boot_time() + .map_err(|e| format!("Unable to get system boot time: {:?}", e))? + .duration_since(std::time::UNIX_EPOCH) + .map_err(|e| format!("Boot time is lower than unix epoch: {}", e))? + .as_secs(); + + Ok(Self { + sys_virt_mem_total: vm.total(), + sys_virt_mem_available: vm.available(), + sys_virt_mem_used: vm.used(), + sys_virt_mem_free: vm.free(), + sys_virt_mem_cached: vm.cached(), + sys_virt_mem_buffers: vm.buffers(), + sys_virt_mem_percent: vm.percent(), + sys_loadavg_1: loadavg.one, + sys_loadavg_5: loadavg.five, + sys_loadavg_15: loadavg.fifteen, + cpu_cores: psutil::cpu::cpu_count_physical(), + cpu_threads: psutil::cpu::cpu_count(), + system_seconds_total: cpu.system().as_secs(), + cpu_time_total: cpu.total().as_secs(), + user_seconds_total: cpu.user().as_secs(), + iowait_seconds_total: cpu.iowait().as_secs(), + idle_seconds_total: cpu.idle().as_secs(), + disk_node_bytes_total: disk_usage.total(), + disk_node_bytes_free: disk_usage.free(), + disk_node_reads_total: disk.read_count(), + disk_node_writes_total: disk.write_count(), + network_node_bytes_total_received: net.bytes_recv(), + network_node_bytes_total_transmit: net.bytes_sent(), + misc_node_boot_ts_seconds: boot_time, + misc_os: std::env::consts::OS.to_string(), + }) + } +} + +impl Observe for ProcessHealth { + #[cfg(not(target_os = "linux"))] + fn observe() -> Result { + Err("Health is only available on Linux".into()) + } + + #[cfg(target_os = "linux")] + fn observe() -> Result { + let process = + Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; + + let process_mem = process + .memory_info() + .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; + + let me = procfs::process::Process::myself() + .map_err(|e| format!("Unable to get process: {:?}", e))?; + let stat = me + .stat() + .map_err(|e| format!("Unable to get stat: {:?}", e))?; + + let process_times = process + .cpu_times() + .map_err(|e| format!("Unable to get process cpu times : {:?}", e))?; + + Ok(Self { + pid: process.pid(), + pid_num_threads: stat.num_threads, + pid_mem_resident_set_size: process_mem.rss(), + pid_mem_virtual_memory_size: process_mem.vms(), + pid_mem_shared_memory_size: process_mem.shared(), + pid_process_seconds_total: process_times.busy().as_secs() + + process_times.children_system().as_secs() + + process_times.children_system().as_secs(), + }) + } +} diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 5008c86e858..cb52cff29a5 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -7,6 +7,7 @@ edition = { workspace = true } [dependencies] eth2 = { workspace = true } +health_metrics = { workspace = true } lighthouse_version = { workspace = true } metrics = { workspace = true } regex = { workspace = true } diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index 2f6c820f562..43bea35a933 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -1,4 +1,5 @@ use super::types::{BeaconProcessMetrics, ValidatorProcessMetrics}; +use health_metrics::observe::Observe; use metrics::{MetricFamily, MetricType}; use serde_json::json; use std::collections::HashMap; diff --git a/common/monitoring_api/src/lib.rs b/common/monitoring_api/src/lib.rs index 9592c50a404..6f919971b02 100644 --- a/common/monitoring_api/src/lib.rs +++ b/common/monitoring_api/src/lib.rs @@ -4,6 +4,7 @@ use std::{path::PathBuf, time::Duration}; use eth2::lighthouse::SystemHealth; use gather::{gather_beacon_metrics, gather_validator_metrics}; +use health_metrics::observe::Observe; use reqwest::{IntoUrl, Response}; pub use reqwest::{StatusCode, Url}; use sensitive_url::SensitiveUrl; diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 773431c93c6..4c03b7662ed 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -12,7 +12,6 @@ insecure_keys = [] bls = { workspace = true } deposit_contract = { workspace = true } derivative = { workspace = true } -directory = { workspace = true } eth2_keystore = { workspace = true } filesystem = { workspace = true } hex = { workspace = true } diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 3d5d1496082..2e971a8b1ae 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -1,7 +1,6 @@ use crate::{Error as DirError, ValidatorDir}; use bls::get_withdrawal_credentials; use deposit_contract::{encode_eth1_tx_data, Error as DepositError}; -use directory::ensure_dir_exists; use eth2_keystore::{Error as KeystoreError, Keystore, KeystoreBuilder, PlainText}; use filesystem::create_with_600_perms; use rand::{distributions::Alphanumeric, Rng}; @@ -42,7 +41,7 @@ pub enum Error { #[cfg(feature = "insecure_keys")] InsecureKeysError(String), MissingPasswordDir, - UnableToCreatePasswordDir(String), + UnableToCreatePasswordDir(io::Error), } impl From for Error { @@ -163,7 +162,7 @@ impl<'a> Builder<'a> { } if let Some(password_dir) = &self.password_dir { - ensure_dir_exists(password_dir).map_err(Error::UnableToCreatePasswordDir)?; + create_dir_all(password_dir).map_err(Error::UnableToCreatePasswordDir)?; } // The withdrawal keystore must be initialized in order to store it or create an eth1 diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 4a3cde54a9a..ec2d23686b1 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -6,7 +6,6 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -beacon_chain = { workspace = true } bytes = { workspace = true } eth2 = { workspace = true } headers = "0.3.2" @@ -15,7 +14,6 @@ safe_arith = { workspace = true } serde = { workspace = true } serde_array_query = "0.1.0" serde_json = { workspace = true } -state_processing = { workspace = true } tokio = { workspace = true } types = { workspace = true } warp = { workspace = true } diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs index 55ee423fa41..c10adbac0df 100644 --- a/common/warp_utils/src/lib.rs +++ b/common/warp_utils/src/lib.rs @@ -3,7 +3,6 @@ pub mod cors; pub mod json; -pub mod metrics; pub mod query; pub mod reject; pub mod task; diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs index bbd5274a7eb..3c7ef5e4fa7 100644 --- a/common/warp_utils/src/reject.rs +++ b/common/warp_utils/src/reject.rs @@ -2,6 +2,7 @@ use eth2::types::{ErrorMessage, Failure, IndexedErrorMessage}; use std::convert::Infallible; use std::error::Error; use std::fmt; +use std::fmt::Debug; use warp::{http::StatusCode, reject::Reject, reply::Response, Reply}; #[derive(Debug)] @@ -19,15 +20,6 @@ pub fn server_sent_event_error(s: String) -> ServerSentEventError { ServerSentEventError(s) } -#[derive(Debug)] -pub struct BeaconChainError(pub beacon_chain::BeaconChainError); - -impl Reject for BeaconChainError {} - -pub fn beacon_chain_error(e: beacon_chain::BeaconChainError) -> warp::reject::Rejection { - warp::reject::custom(BeaconChainError(e)) -} - #[derive(Debug)] pub struct BeaconStateError(pub types::BeaconStateError); @@ -47,21 +39,12 @@ pub fn arith_error(e: safe_arith::ArithError) -> warp::reject::Rejection { } #[derive(Debug)] -pub struct SlotProcessingError(pub state_processing::SlotProcessingError); - -impl Reject for SlotProcessingError {} - -pub fn slot_processing_error(e: state_processing::SlotProcessingError) -> warp::reject::Rejection { - warp::reject::custom(SlotProcessingError(e)) -} - -#[derive(Debug)] -pub struct BlockProductionError(pub beacon_chain::BlockProductionError); +pub struct UnhandledError(pub Box); -impl Reject for BlockProductionError {} +impl Reject for UnhandledError {} -pub fn block_production_error(e: beacon_chain::BlockProductionError) -> warp::reject::Rejection { - warp::reject::custom(BlockProductionError(e)) +pub fn unhandled_error(e: D) -> warp::reject::Rejection { + warp::reject::custom(UnhandledError(Box::new(e))) } #[derive(Debug)] @@ -191,16 +174,7 @@ pub async fn handle_rejection(err: warp::Rejection) -> Result() { code = StatusCode::BAD_REQUEST; message = format!("BAD_REQUEST: invalid query: {}", e); - } else if let Some(e) = err.find::() { - code = StatusCode::INTERNAL_SERVER_ERROR; - message = format!("UNHANDLED_ERROR: {:?}", e.0); - } else if let Some(e) = err.find::() { - code = StatusCode::INTERNAL_SERVER_ERROR; - message = format!("UNHANDLED_ERROR: {:?}", e.0); - } else if let Some(e) = err.find::() { - code = StatusCode::INTERNAL_SERVER_ERROR; - message = format!("UNHANDLED_ERROR: {:?}", e.0); - } else if let Some(e) = err.find::() { + } else if let Some(e) = err.find::() { code = StatusCode::INTERNAL_SERVER_ERROR; message = format!("UNHANDLED_ERROR: {:?}", e.0); } else if let Some(e) = err.find::() { diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 76a021ab8c3..651e658a7a5 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -21,6 +21,7 @@ eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } graffiti_file = { workspace = true } +health_metrics = { workspace = true } initialized_validators = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index 73ebe717af3..9c3e3da63d1 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -32,6 +32,7 @@ use eth2::lighthouse_vc::{ PublicKeyBytes, SetGraffitiRequest, }, }; +use health_metrics::observe::Observe; use lighthouse_version::version_with_platform; use logging::SSELoggingComponents; use parking_lot::RwLock; diff --git a/validator_client/http_metrics/Cargo.toml b/validator_client/http_metrics/Cargo.toml index c29a4d18fa0..a3432410bc7 100644 --- a/validator_client/http_metrics/Cargo.toml +++ b/validator_client/http_metrics/Cargo.toml @@ -5,6 +5,7 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +health_metrics = { workspace = true } lighthouse_version = { workspace = true } malloc_utils = { workspace = true } metrics = { workspace = true } diff --git a/validator_client/http_metrics/src/lib.rs b/validator_client/http_metrics/src/lib.rs index 984b752e5a5..f1c6d4ed8ad 100644 --- a/validator_client/http_metrics/src/lib.rs +++ b/validator_client/http_metrics/src/lib.rs @@ -206,7 +206,7 @@ pub fn gather_prometheus_metrics( scrape_allocator_metrics(); } - warp_utils::metrics::scrape_health_metrics(); + health_metrics::metrics::scrape_health_metrics(); encoder .encode(&metrics::gather(), &mut buffer) From 06329ec2d105fc60c4c7218561cff11fb6b398a3 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Fri, 17 Jan 2025 01:27:08 +0700 Subject: [PATCH 16/26] `SingleAttestation` implementation (#6488) * First pass * Add restrictions to RuntimeVariableList api * Use empty_uninitialized and fix warnings * Fix some todos * Merge branch 'unstable' into max-blobs-preset * Fix take impl on RuntimeFixedList * cleanup * Fix test compilations * Fix some more tests * Fix test from unstable * Merge branch 'unstable' into max-blobs-preset * SingleAttestation * Add post attestation v2 endpoint logic to attestation service * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * Implement "Bugfix and more withdrawal tests" * Implement "Add missed exit checks to consolidation processing" * Implement "Update initial earliest_exit_epoch calculation" * Implement "Limit consolidating balance by validator.effective_balance" * Implement "Use 16-bit random value in validator filter" * Implement "Do not change creds type on consolidation" * some tests and fixed attestqtion calc * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * Rename PendingPartialWithdraw index field to validator_index * Skip slots to get test to pass and add TODO * Implement "Synchronously check all transactions to have non-zero length" * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Remove footgun function * Minor simplifications * Move from preset to config * Fix typo * Revert "Remove footgun function" This reverts commit de01f923c7452355c87f50c0e8031ca94fa00d36. * Try fixing tests * Implement "bump minimal preset MAX_BLOB_COMMITMENTS_PER_BLOCK and KZG_COMMITMENT_INCLUSION_PROOF_DEPTH" * Thread through ChainSpec * Fix release tests * Move RuntimeFixedVector into module and rename * Add test * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * Added more test coverage, simplified Attestation conversion, and other minor refactors * Removed unusued codepaths * Fix failing test * Implement "Remove post-altair `initialize_beacon_state_from_eth1` from specs" * Update preset YAML * Remove empty RuntimeVarList awefullness * Make max_blobs_per_block a config parameter (#6329) Squashed commit of the following: commit 04b3743ec1e0b650269dd8e58b540c02430d1c0d Author: Michael Sproul Date: Mon Jan 6 17:36:58 2025 +1100 Add test commit 440e85419940d4daba406d910e7908dd1fe78668 Author: Michael Sproul Date: Mon Jan 6 17:24:50 2025 +1100 Move RuntimeFixedVector into module and rename commit f66e179a40c3917eee39a93534ecf75480172699 Author: Michael Sproul Date: Mon Jan 6 17:17:17 2025 +1100 Fix release tests commit e4bfe71cd1f0a2784d0bd57f85b2f5d8cf503ac1 Author: Michael Sproul Date: Mon Jan 6 17:05:30 2025 +1100 Thread through ChainSpec commit 063b79c16abd3f6df47b85efcf3858177bc933b9 Author: Michael Sproul Date: Mon Jan 6 15:32:16 2025 +1100 Try fixing tests commit 88bedf09bc647de66bd1ff944bbc8fb13e2b7590 Author: Michael Sproul Date: Mon Jan 6 15:04:37 2025 +1100 Revert "Remove footgun function" This reverts commit de01f923c7452355c87f50c0e8031ca94fa00d36. commit 32483d385b66f252d50cee5b524e2924157bdcd4 Author: Michael Sproul Date: Mon Jan 6 15:04:32 2025 +1100 Fix typo commit 2e86585b478c012f6e3483989c87e38161227674 Author: Michael Sproul Date: Mon Jan 6 15:04:15 2025 +1100 Move from preset to config commit 1095d60a40be20dd3c229b759fc3c228b51e51e3 Author: Michael Sproul Date: Mon Jan 6 14:38:40 2025 +1100 Minor simplifications commit de01f923c7452355c87f50c0e8031ca94fa00d36 Author: Michael Sproul Date: Mon Jan 6 14:06:57 2025 +1100 Remove footgun function commit 0c2c8c42245c25b8cf17885faf20acd3b81140ec Merge: 21ecb58ff f51a292f7 Author: Michael Sproul Date: Mon Jan 6 14:02:50 2025 +1100 Merge remote-tracking branch 'origin/unstable' into max-blobs-preset commit f51a292f77575a1786af34271fb44954f141c377 Author: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Fri Jan 3 20:27:21 2025 +0100 fully lint only explicitly to avoid unnecessary rebuilds (#6753) * fully lint only explicitly to avoid unnecessary rebuilds commit 7e0cddef321c2a069582c65b58e5f46590d60c49 Author: Akihito Nakano Date: Tue Dec 24 10:38:56 2024 +0900 Make sure we have fanout peers when publish (#6738) * Ensure that `fanout_peers` is always non-empty if it's `Some` commit 21ecb58ff88b86435ab62d9ac227394c10fdcd22 Merge: 2fcb2935e 9aefb5539 Author: Pawan Dhananjay Date: Mon Oct 21 14:46:00 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit 2fcb2935ec7ef4cd18bbdd8aedb7de61fac69e61 Author: Pawan Dhananjay Date: Fri Sep 6 18:28:31 2024 -0700 Fix test from unstable commit 12c6ef118a1a6d910c48d9d4b23004f3609264c7 Author: Pawan Dhananjay Date: Wed Sep 4 16:16:36 2024 -0700 Fix some more tests commit d37733b846ce58e318e976d6503ca394b4901141 Author: Pawan Dhananjay Date: Wed Sep 4 12:47:36 2024 -0700 Fix test compilations commit 52bb581e071d5f474d519366e860a4b3a0b52f78 Author: Pawan Dhananjay Date: Tue Sep 3 18:38:19 2024 -0700 cleanup commit e71020e3e613910e0315f558ead661b490a0ff20 Author: Pawan Dhananjay Date: Tue Sep 3 17:16:10 2024 -0700 Fix take impl on RuntimeFixedList commit 13f9bba6470b2140e5c34f14aed06dab2b062c1c Merge: 60100fc6b 4e675cf5d Author: Pawan Dhananjay Date: Tue Sep 3 16:08:59 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit 60100fc6be72792ff33913d7e5a53434c792aacf Author: Pawan Dhananjay Date: Fri Aug 30 16:04:11 2024 -0700 Fix some todos commit a9cb329a221a809f7dd818984753826f91c2e26b Author: Pawan Dhananjay Date: Fri Aug 30 15:54:00 2024 -0700 Use empty_uninitialized and fix warnings commit 4dc6e6515ecf75cefa4de840edc7b57e76a8fc9e Author: Pawan Dhananjay Date: Fri Aug 30 15:53:18 2024 -0700 Add restrictions to RuntimeVariableList api commit 25feedfde348b530c4fa2348cc71a06b746898ed Author: Pawan Dhananjay Date: Thu Aug 29 16:11:19 2024 -0700 First pass * Fix tests * Implement max_blobs_per_block_electra * Fix config issues * Simplify BlobSidecarListFromRoot * Disable PeerDAS tests * Cleanup single attestation imports * Fix some single attestation network plumbing * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Bump quota to account for new target (6) * Remove clone * Fix issue from review * Try to remove ugliness * Merge branch 'unstable' into max-blobs-preset * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Merge commit '04b3743ec1e0b650269dd8e58b540c02430d1c0d' into electra-alpha10 * Merge remote-tracking branch 'pawan/max-blobs-preset' into electra-alpha10 * Update tests to v1.5.0-beta.0 * Merge remote-tracking branch 'origin/electra-alpha10' into single_attestation * Fix some tests * Cargo fmt * lint * fmt * Resolve merge conflicts * Merge branch 'electra-alpha10' of https://github.com/sigp/lighthouse into single_attestation * lint * Linting * fmt * Merge branch 'electra-alpha10' of https://github.com/sigp/lighthouse into single_attestation * Fmt * Fix test and add TODO * Gracefully handle slashed proposers in fork choice tests * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Keep latest changes from max_blobs_per_block PR in codec.rs * Revert a few more regressions and add a comment * Merge branch 'electra-alpha10' of https://github.com/sigp/lighthouse into single_attestation * Disable more DAS tests * Improve validator monitor test a little * Make test more robust * Fix sync test that didn't understand blobs * Fill out cropped comment * Merge remote-tracking branch 'origin/electra-alpha10' into single_attestation * Merge remote-tracking branch 'origin/unstable' into single_attestation * Merge remote-tracking branch 'origin/unstable' into single_attestation * Merge branch 'unstable' of https://github.com/sigp/lighthouse into single_attestation * publish_attestations should accept Either * log an error when failing to convert to SingleAttestation * Use Cow to avoid clone * Avoid reconverting to SingleAttestation * Tweak VC error message * update comments * update comments * pass in single attestation as ref to subnetid calculation method * Improved API, new error variants and other minor tweaks * Fix single_attestation event topic boilerplate * fix sse event failure * Add single_attestation event topic test coverage --- Cargo.lock | 1 + .../src/attestation_verification.rs | 13 +- beacon_node/beacon_chain/src/beacon_chain.rs | 26 +- beacon_node/beacon_chain/src/events.rs | 15 ++ beacon_node/beacon_chain/src/test_utils.rs | 245 ++++++++++++++++++ beacon_node/http_api/Cargo.toml | 1 + beacon_node/http_api/src/lib.rs | 53 +++- .../http_api/src/publish_attestations.rs | 95 +++++-- beacon_node/http_api/tests/fork_tests.rs | 4 - .../http_api/tests/interactive_tests.rs | 59 +++-- beacon_node/http_api/tests/tests.rs | 99 ++++++- .../lighthouse_network/src/types/pubsub.rs | 83 +++--- .../src/network_beacon_processor/mod.rs | 52 ++++ beacon_node/network/src/router.rs | 11 + beacon_node/network/src/service.rs | 18 +- beacon_node/network/src/subnet_service/mod.rs | 6 +- common/eth2/src/lib.rs | 4 +- common/eth2/src/types.rs | 10 + consensus/types/src/attestation.rs | 94 ++++++- consensus/types/src/lib.rs | 2 +- consensus/types/src/subnet_id.rs | 16 ++ .../src/attestation_service.rs | 24 +- 22 files changed, 829 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aa9bdd2afc7..29ffdc49bae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3951,6 +3951,7 @@ dependencies = [ "bs58 0.4.0", "bytes", "directory", + "either", "eth1", "eth2", "ethereum_serde_utils", diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index c3dea3dbb40..ffaf61e41af 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -62,7 +62,7 @@ use tree_hash::TreeHash; use types::{ Attestation, AttestationRef, BeaconCommittee, BeaconStateError::NoCommitteeFound, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, SelectionProof, - SignedAggregateAndProof, Slot, SubnetId, + SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, }; pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; @@ -317,12 +317,22 @@ pub struct VerifiedUnaggregatedAttestation<'a, T: BeaconChainTypes> { attestation: AttestationRef<'a, T::EthSpec>, indexed_attestation: IndexedAttestation, subnet_id: SubnetId, + validator_index: usize, } impl VerifiedUnaggregatedAttestation<'_, T> { pub fn into_indexed_attestation(self) -> IndexedAttestation { self.indexed_attestation } + + pub fn single_attestation(&self) -> Option { + Some(SingleAttestation { + committee_index: self.attestation.committee_index()? as usize, + attester_index: self.validator_index, + data: self.attestation.data().clone(), + signature: self.attestation.signature().clone(), + }) + } } /// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive @@ -1035,6 +1045,7 @@ impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> { attestation, indexed_attestation, subnet_id, + validator_index: validator_index as usize, }) } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a6da610c0e8..d0c294b44ff 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2035,10 +2035,30 @@ impl BeaconChain { |v| { // This method is called for API and gossip attestations, so this covers all unaggregated attestation events if let Some(event_handler) = self.event_handler.as_ref() { + if event_handler.has_single_attestation_subscribers() { + let current_fork = self + .spec + .fork_name_at_slot::(v.attestation().data().slot); + if current_fork.electra_enabled() { + // I don't see a situation where this could return None. The upstream unaggregated attestation checks + // should have already verified that this is an attestation with a single committee bit set. + if let Some(single_attestation) = v.single_attestation() { + event_handler.register(EventKind::SingleAttestation(Box::new( + single_attestation, + ))); + } + } + } + if event_handler.has_attestation_subscribers() { - event_handler.register(EventKind::Attestation(Box::new( - v.attestation().clone_as_attestation(), - ))); + let current_fork = self + .spec + .fork_name_at_slot::(v.attestation().data().slot); + if !current_fork.electra_enabled() { + event_handler.register(EventKind::Attestation(Box::new( + v.attestation().clone_as_attestation(), + ))); + } } } metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_SUCCESSES); diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 267d56220c9..8c342893ae3 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -8,6 +8,7 @@ const DEFAULT_CHANNEL_CAPACITY: usize = 16; pub struct ServerSentEventHandler { attestation_tx: Sender>, + single_attestation_tx: Sender>, block_tx: Sender>, blob_sidecar_tx: Sender>, finalized_tx: Sender>, @@ -37,6 +38,7 @@ impl ServerSentEventHandler { pub fn new_with_capacity(log: Logger, capacity: usize) -> Self { let (attestation_tx, _) = broadcast::channel(capacity); + let (single_attestation_tx, _) = broadcast::channel(capacity); let (block_tx, _) = broadcast::channel(capacity); let (blob_sidecar_tx, _) = broadcast::channel(capacity); let (finalized_tx, _) = broadcast::channel(capacity); @@ -56,6 +58,7 @@ impl ServerSentEventHandler { Self { attestation_tx, + single_attestation_tx, block_tx, blob_sidecar_tx, finalized_tx, @@ -90,6 +93,10 @@ impl ServerSentEventHandler { .attestation_tx .send(kind) .map(|count| log_count("attestation", count)), + EventKind::SingleAttestation(_) => self + .single_attestation_tx + .send(kind) + .map(|count| log_count("single_attestation", count)), EventKind::Block(_) => self .block_tx .send(kind) @@ -164,6 +171,10 @@ impl ServerSentEventHandler { self.attestation_tx.subscribe() } + pub fn subscribe_single_attestation(&self) -> Receiver> { + self.single_attestation_tx.subscribe() + } + pub fn subscribe_block(&self) -> Receiver> { self.block_tx.subscribe() } @@ -232,6 +243,10 @@ impl ServerSentEventHandler { self.attestation_tx.receiver_count() > 0 } + pub fn has_single_attestation_subscribers(&self) -> bool { + self.single_attestation_tx.receiver_count() > 0 + } + pub fn has_block_subscribers(&self) -> bool { self.block_tx.receiver_count() > 0 } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index fd3cc496260..443cc686ebe 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -669,10 +669,16 @@ pub struct BeaconChainHarness { pub rng: Mutex, } +pub type CommitteeSingleAttestations = Vec<(SingleAttestation, SubnetId)>; pub type CommitteeAttestations = Vec<(Attestation, SubnetId)>; pub type HarnessAttestations = Vec<(CommitteeAttestations, Option>)>; +pub type HarnessSingleAttestations = Vec<( + CommitteeSingleAttestations, + Option>, +)>; + pub type HarnessSyncContributions = Vec<( Vec<(SyncCommitteeMessage, usize)>, Option>, @@ -1024,6 +1030,99 @@ where ) } + #[allow(clippy::too_many_arguments)] + pub fn produce_single_attestation_for_block( + &self, + slot: Slot, + index: CommitteeIndex, + beacon_block_root: Hash256, + mut state: Cow>, + state_root: Hash256, + aggregation_bit_index: usize, + validator_index: usize, + ) -> Result { + let epoch = slot.epoch(E::slots_per_epoch()); + + if state.slot() > slot { + return Err(BeaconChainError::CannotAttestToFutureState); + } else if state.current_epoch() < epoch { + let mut_state = state.to_mut(); + complete_state_advance( + mut_state, + Some(state_root), + epoch.start_slot(E::slots_per_epoch()), + &self.spec, + )?; + mut_state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + } + + let committee_len = state.get_beacon_committee(slot, index)?.committee.len(); + + let target_slot = epoch.start_slot(E::slots_per_epoch()); + let target_root = if state.slot() <= target_slot { + beacon_block_root + } else { + *state.get_block_root(target_slot)? + }; + + let attestation: Attestation = Attestation::empty_for_signing( + index, + committee_len, + slot, + beacon_block_root, + state.current_justified_checkpoint(), + Checkpoint { + epoch, + root: target_root, + }, + &self.spec, + )?; + + let attestation = match attestation { + Attestation::Electra(mut attn) => { + attn.aggregation_bits + .set(aggregation_bit_index, true) + .unwrap(); + attn + } + Attestation::Base(_) => panic!("Must be an Electra attestation"), + }; + + let aggregation_bits = attestation.get_aggregation_bits(); + + if aggregation_bits.len() != 1 { + panic!("Must be an unaggregated attestation") + } + + let aggregation_bit = *aggregation_bits.first().unwrap(); + + let committee = state.get_beacon_committee(slot, index).unwrap(); + + let attester_index = committee + .committee + .iter() + .enumerate() + .find_map(|(i, &index)| { + if aggregation_bit as usize == i { + return Some(index); + } + None + }) + .unwrap(); + + let single_attestation = + attestation.to_single_attestation_with_attester_index(attester_index)?; + + let attestation: Attestation = single_attestation.to_attestation(committee.committee)?; + + assert_eq!( + single_attestation.committee_index, + attestation.committee_index().unwrap() as usize + ); + assert_eq!(single_attestation.attester_index, validator_index); + Ok(single_attestation) + } + /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the /// `block` identified by `beacon_block_root`. @@ -1081,6 +1180,33 @@ where )?) } + /// A list of attestations for each committee for the given slot. + /// + /// The first layer of the Vec is organised per committee. For example, if the return value is + /// called `all_attestations`, then all attestations in `all_attestations[0]` will be for + /// committee 0, whilst all in `all_attestations[1]` will be for committee 1. + pub fn make_single_attestations( + &self, + attesting_validators: &[usize], + state: &BeaconState, + state_root: Hash256, + head_block_root: SignedBeaconBlockHash, + attestation_slot: Slot, + ) -> Vec { + let fork = self + .spec + .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); + self.make_single_attestations_with_opts( + attesting_validators, + state, + state_root, + head_block_root, + attestation_slot, + MakeAttestationOptions { limit: None, fork }, + ) + .0 + } + /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is @@ -1108,6 +1234,99 @@ where .0 } + pub fn make_single_attestations_with_opts( + &self, + attesting_validators: &[usize], + state: &BeaconState, + state_root: Hash256, + head_block_root: SignedBeaconBlockHash, + attestation_slot: Slot, + opts: MakeAttestationOptions, + ) -> (Vec, Vec) { + let MakeAttestationOptions { limit, fork } = opts; + let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); + let num_attesters = AtomicUsize::new(0); + + let (attestations, split_attesters) = state + .get_beacon_committees_at_slot(attestation_slot) + .expect("should get committees") + .iter() + .map(|bc| { + bc.committee + .par_iter() + .enumerate() + .filter_map(|(i, validator_index)| { + if !attesting_validators.contains(validator_index) { + return None; + } + + if let Some(limit) = limit { + // This atomics stuff is necessary because we're under a par_iter, + // and Rayon will deadlock if we use a mutex. + if num_attesters.fetch_add(1, Ordering::Relaxed) >= limit { + num_attesters.fetch_sub(1, Ordering::Relaxed); + return None; + } + } + + let mut attestation = self + .produce_single_attestation_for_block( + attestation_slot, + bc.index, + head_block_root.into(), + Cow::Borrowed(state), + state_root, + i, + *validator_index, + ) + .unwrap(); + + attestation.signature = { + let domain = self.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + state.genesis_validators_root(), + ); + + let message = attestation.data.signing_root(domain); + + let mut agg_sig = AggregateSignature::infinity(); + + agg_sig.add_assign( + &self.validator_keypairs[*validator_index].sk.sign(message), + ); + + agg_sig + }; + + let subnet_id = SubnetId::compute_subnet_for_single_attestation::( + &attestation, + committee_count, + &self.chain.spec, + ) + .unwrap(); + + Some(((attestation, subnet_id), validator_index)) + }) + .unzip::<_, _, Vec<_>, Vec<_>>() + }) + .unzip::<_, _, Vec<_>, Vec<_>>(); + + // Flatten attesters. + let attesters = split_attesters.into_iter().flatten().collect::>(); + + if let Some(limit) = limit { + assert_eq!(limit, num_attesters.load(Ordering::Relaxed)); + assert_eq!( + limit, + attesters.len(), + "failed to generate `limit` attestations" + ); + } + (attestations, attesters) + } + pub fn make_unaggregated_attestations_with_opts( &self, attesting_validators: &[usize], @@ -1288,6 +1507,32 @@ where ) } + /// A list of attestations for each committee for the given slot. + /// + /// The first layer of the Vec is organised per committee. For example, if the return value is + /// called `all_attestations`, then all attestations in `all_attestations[0]` will be for + /// committee 0, whilst all in `all_attestations[1]` will be for committee 1. + pub fn get_single_attestations( + &self, + attestation_strategy: &AttestationStrategy, + state: &BeaconState, + state_root: Hash256, + head_block_root: Hash256, + attestation_slot: Slot, + ) -> Vec> { + let validators: Vec = match attestation_strategy { + AttestationStrategy::AllValidators => self.get_all_validators(), + AttestationStrategy::SomeValidators(vals) => vals.clone(), + }; + self.make_single_attestations( + &validators, + state, + state_root, + head_block_root.into(), + attestation_slot, + ) + } + pub fn make_attestations( &self, attesting_validators: &[usize], diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 0ced27e4464..61f3370c702 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -11,6 +11,7 @@ beacon_processor = { workspace = true } bs58 = "0.4.0" bytes = { workspace = true } directory = { workspace = true } +either = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } ethereum_serde_utils = { workspace = true } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index d5c6c115670..5dc9055c6ca 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -44,6 +44,7 @@ pub use block_id::BlockId; use builder_states::get_next_withdrawals; use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; +use either::Either; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, ValidatorBalancesRequestBody, ValidatorId, @@ -86,8 +87,8 @@ use types::{ AttesterSlashing, BeaconStateError, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, - SyncCommitteeMessage, SyncContributionData, + SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, + SingleAttestation, Slot, SyncCommitteeMessage, SyncContributionData, }; use validator::pubkey_to_validator_index; use version::{ @@ -1832,26 +1833,62 @@ pub fn serve( .and(task_spawner_filter.clone()) .and(chain_filter.clone()); + let beacon_pool_path_v2 = eth_v2 + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + // POST beacon/pool/attestations - let post_beacon_pool_attestations = beacon_pool_path_any + let post_beacon_pool_attestations = beacon_pool_path .clone() .and(warp::path("attestations")) .and(warp::path::end()) .and(warp_utils::json::json()) .and(network_tx_filter.clone()) - .and(reprocess_send_filter) + .and(reprocess_send_filter.clone()) .and(log_filter.clone()) .then( // V1 and V2 are identical except V2 has a consensus version header in the request. // We only require this header for SSZ deserialization, which isn't supported for // this endpoint presently. - |_endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, + |task_spawner: TaskSpawner, chain: Arc>, attestations: Vec>, network_tx: UnboundedSender>, reprocess_tx: Option>, log: Logger| async move { + let attestations = attestations.into_iter().map(Either::Left).collect(); + let result = crate::publish_attestations::publish_attestations( + task_spawner, + chain, + attestations, + network_tx, + reprocess_tx, + log, + ) + .await + .map(|()| warp::reply::json(&())); + convert_rejection(result).await + }, + ); + + let post_beacon_pool_attestations_v2 = beacon_pool_path_v2 + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .and(reprocess_send_filter) + .and(log_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + attestations: Vec, + network_tx: UnboundedSender>, + reprocess_tx: Option>, + log: Logger| async move { + let attestations = attestations.into_iter().map(Either::Right).collect(); let result = crate::publish_attestations::publish_attestations( task_spawner, chain, @@ -4509,6 +4546,9 @@ pub fn serve( api_types::EventTopic::Attestation => { event_handler.subscribe_attestation() } + api_types::EventTopic::SingleAttestation => { + event_handler.subscribe_single_attestation() + } api_types::EventTopic::VoluntaryExit => { event_handler.subscribe_exit() } @@ -4736,6 +4776,7 @@ pub fn serve( .uor(post_beacon_blocks_v2) .uor(post_beacon_blinded_blocks_v2) .uor(post_beacon_pool_attestations) + .uor(post_beacon_pool_attestations_v2) .uor(post_beacon_pool_attester_slashings) .uor(post_beacon_pool_proposer_slashings) .uor(post_beacon_pool_voluntary_exits) diff --git a/beacon_node/http_api/src/publish_attestations.rs b/beacon_node/http_api/src/publish_attestations.rs index 00654765325..111dee3cffb 100644 --- a/beacon_node/http_api/src/publish_attestations.rs +++ b/beacon_node/http_api/src/publish_attestations.rs @@ -40,17 +40,19 @@ use beacon_chain::{ BeaconChainTypes, }; use beacon_processor::work_reprocessing_queue::{QueuedUnaggregate, ReprocessQueueMessage}; +use either::Either; use eth2::types::Failure; use lighthouse_network::PubsubMessage; use network::NetworkMessage; use slog::{debug, error, warn, Logger}; +use std::borrow::Cow; use std::sync::Arc; use std::time::Duration; use tokio::sync::{ mpsc::{Sender, UnboundedSender}, oneshot, }; -use types::Attestation; +use types::{Attestation, EthSpec, SingleAttestation}; // Error variants are only used in `Debug` and considered `dead_code` by the compiler. #[derive(Debug)] @@ -62,6 +64,7 @@ enum Error { ReprocessDisabled, ReprocessFull, ReprocessTimeout, + FailedConversion(#[allow(dead_code)] BeaconChainError), } enum PublishAttestationResult { @@ -73,24 +76,39 @@ enum PublishAttestationResult { fn verify_and_publish_attestation( chain: &Arc>, - attestation: &Attestation, + either_attestation: &Either, SingleAttestation>, seen_timestamp: Duration, network_tx: &UnboundedSender>, log: &Logger, ) -> Result<(), Error> { - let attestation = chain - .verify_unaggregated_attestation_for_gossip(attestation, None) + let attestation = convert_to_attestation(chain, either_attestation)?; + let verified_attestation = chain + .verify_unaggregated_attestation_for_gossip(&attestation, None) .map_err(Error::Validation)?; - // Publish. - network_tx - .send(NetworkMessage::Publish { - messages: vec![PubsubMessage::Attestation(Box::new(( - attestation.subnet_id(), - attestation.attestation().clone_as_attestation(), - )))], - }) - .map_err(|_| Error::Publication)?; + match either_attestation { + Either::Left(attestation) => { + // Publish. + network_tx + .send(NetworkMessage::Publish { + messages: vec![PubsubMessage::Attestation(Box::new(( + verified_attestation.subnet_id(), + attestation.clone(), + )))], + }) + .map_err(|_| Error::Publication)?; + } + Either::Right(single_attestation) => { + network_tx + .send(NetworkMessage::Publish { + messages: vec![PubsubMessage::SingleAttestation(Box::new(( + verified_attestation.subnet_id(), + single_attestation.clone(), + )))], + }) + .map_err(|_| Error::Publication)?; + } + } // Notify the validator monitor. chain @@ -98,12 +116,12 @@ fn verify_and_publish_attestation( .read() .register_api_unaggregated_attestation( seen_timestamp, - attestation.indexed_attestation(), + verified_attestation.indexed_attestation(), &chain.slot_clock, ); - let fc_result = chain.apply_attestation_to_fork_choice(&attestation); - let naive_aggregation_result = chain.add_to_naive_aggregation_pool(&attestation); + let fc_result = chain.apply_attestation_to_fork_choice(&verified_attestation); + let naive_aggregation_result = chain.add_to_naive_aggregation_pool(&verified_attestation); if let Err(e) = &fc_result { warn!( @@ -129,10 +147,48 @@ fn verify_and_publish_attestation( } } +fn convert_to_attestation<'a, T: BeaconChainTypes>( + chain: &Arc>, + attestation: &'a Either, SingleAttestation>, +) -> Result>, Error> { + let a = match attestation { + Either::Left(a) => Cow::Borrowed(a), + Either::Right(single_attestation) => chain + .with_committee_cache( + single_attestation.data.target.root, + single_attestation + .data + .slot + .epoch(T::EthSpec::slots_per_epoch()), + |committee_cache, _| { + let Some(committee) = committee_cache.get_beacon_committee( + single_attestation.data.slot, + single_attestation.committee_index as u64, + ) else { + return Err(BeaconChainError::AttestationError( + types::AttestationError::NoCommitteeForSlotAndIndex { + slot: single_attestation.data.slot, + index: single_attestation.committee_index as u64, + }, + )); + }; + + let attestation = + single_attestation.to_attestation::(committee.committee)?; + + Ok(Cow::Owned(attestation)) + }, + ) + .map_err(Error::FailedConversion)?, + }; + + Ok(a) +} + pub async fn publish_attestations( task_spawner: TaskSpawner, chain: Arc>, - attestations: Vec>, + attestations: Vec, SingleAttestation>>, network_tx: UnboundedSender>, reprocess_send: Option>, log: Logger, @@ -141,7 +197,10 @@ pub async fn publish_attestations( // move the `attestations` vec into the blocking task, so this small overhead is unavoidable. let attestation_metadata = attestations .iter() - .map(|att| (att.data().slot, att.committee_index())) + .map(|att| match att { + Either::Left(att) => (att.data().slot, att.committee_index()), + Either::Right(att) => (att.data.slot, Some(att.committee_index as u64)), + }) .collect::>(); // Gossip validate and publish attestations that can be immediately processed. diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 8cb6053e9ff..d6b8df33b3f 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -155,10 +155,6 @@ async fn attestations_across_fork_with_skip_slots() { .post_beacon_pool_attestations_v1(&unaggregated_attestations) .await .unwrap(); - client - .post_beacon_pool_attestations_v2(&unaggregated_attestations, fork_name) - .await - .unwrap(); let signed_aggregates = attestations .into_iter() diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 8cfcf5d93e9..60a4c507832 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -890,27 +890,48 @@ async fn queue_attestations_from_http() { let pre_state = harness.get_current_state(); let (block, post_state) = harness.make_block(pre_state, attestation_slot).await; let block_root = block.0.canonical_root(); + let fork_name = tester.harness.spec.fork_name_at_slot::(attestation_slot); // Make attestations to the block and POST them to the beacon node on a background thread. - let attestations = harness - .make_unaggregated_attestations( - &all_validators, - &post_state, - block.0.state_root(), - block_root.into(), - attestation_slot, - ) - .into_iter() - .flat_map(|attestations| attestations.into_iter().map(|(att, _subnet)| att)) - .collect::>(); - - let fork_name = tester.harness.spec.fork_name_at_slot::(attestation_slot); - let attestation_future = tokio::spawn(async move { - client - .post_beacon_pool_attestations_v2(&attestations, fork_name) - .await - .expect("attestations should be processed successfully") - }); + let attestation_future = if fork_name.electra_enabled() { + let single_attestations = harness + .make_single_attestations( + &all_validators, + &post_state, + block.0.state_root(), + block_root.into(), + attestation_slot, + ) + .into_iter() + .flat_map(|attestations| attestations.into_iter().map(|(att, _subnet)| att)) + .collect::>(); + + tokio::spawn(async move { + client + .post_beacon_pool_attestations_v2(&single_attestations, fork_name) + .await + .expect("attestations should be processed successfully") + }) + } else { + let attestations = harness + .make_unaggregated_attestations( + &all_validators, + &post_state, + block.0.state_root(), + block_root.into(), + attestation_slot, + ) + .into_iter() + .flat_map(|attestations| attestations.into_iter().map(|(att, _subnet)| att)) + .collect::>(); + + tokio::spawn(async move { + client + .post_beacon_pool_attestations_v1(&attestations) + .await + .expect("attestations should be processed successfully") + }) + }; // In parallel, apply the block. We need to manually notify the reprocess queue, because the // `beacon_chain` does not know about the queue and will not update it for us. diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 85d3b4e9bae..dd6a92603aa 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -40,7 +40,8 @@ use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; use types::{ attestation::AttestationBase, AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, - Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, + Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, SingleAttestation, + Slot, }; type E = MainnetEthSpec; @@ -71,6 +72,7 @@ struct ApiTester { next_block: PublishBlockRequest, reorg_block: PublishBlockRequest, attestations: Vec>, + single_attestations: Vec, contribution_and_proofs: Vec>, attester_slashing: AttesterSlashing, proposer_slashing: ProposerSlashing, @@ -203,6 +205,27 @@ impl ApiTester { "precondition: attestations for testing" ); + let fork_name = harness + .chain + .spec + .fork_name_at_slot::(harness.chain.slot().unwrap()); + + let single_attestations = if fork_name.electra_enabled() { + harness + .get_single_attestations( + &AttestationStrategy::AllValidators, + &head.beacon_state, + head_state_root, + head.beacon_block_root, + harness.chain.slot().unwrap(), + ) + .into_iter() + .flat_map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) + .collect::>() + } else { + vec![] + }; + let current_epoch = harness .chain .slot() @@ -294,6 +317,7 @@ impl ApiTester { next_block, reorg_block, attestations, + single_attestations, contribution_and_proofs, attester_slashing, proposer_slashing, @@ -381,6 +405,7 @@ impl ApiTester { next_block, reorg_block, attestations, + single_attestations: vec![], contribution_and_proofs: vec![], attester_slashing, proposer_slashing, @@ -1800,13 +1825,16 @@ impl ApiTester { } pub async fn test_post_beacon_pool_attestations_valid_v2(mut self) -> Self { + if self.single_attestations.is_empty() { + return self; + } let fork_name = self - .attestations + .single_attestations .first() - .map(|att| self.chain.spec.fork_name_at_slot::(att.data().slot)) + .map(|att| self.chain.spec.fork_name_at_slot::(att.data.slot)) .unwrap(); self.client - .post_beacon_pool_attestations_v2(self.attestations.as_slice(), fork_name) + .post_beacon_pool_attestations_v2(self.single_attestations.as_slice(), fork_name) .await .unwrap(); assert!( @@ -1854,10 +1882,13 @@ impl ApiTester { self } pub async fn test_post_beacon_pool_attestations_invalid_v2(mut self) -> Self { + if self.single_attestations.is_empty() { + return self; + } let mut attestations = Vec::new(); - for attestation in &self.attestations { + for attestation in &self.single_attestations { let mut invalid_attestation = attestation.clone(); - invalid_attestation.data_mut().slot += 1; + invalid_attestation.data.slot += 1; // add both to ensure we only fail on invalid attestations attestations.push(attestation.clone()); @@ -6011,6 +6042,48 @@ impl ApiTester { self } + pub async fn test_get_events_electra(self) -> Self { + let topics = vec![EventTopic::SingleAttestation]; + let mut events_future = self + .client + .get_events::(topics.as_slice()) + .await + .unwrap(); + + let expected_attestation_len = self.single_attestations.len(); + + let fork_name = self + .chain + .spec + .fork_name_at_slot::(self.chain.slot().unwrap()); + + self.client + .post_beacon_pool_attestations_v2(&self.single_attestations, fork_name) + .await + .unwrap(); + + let attestation_events = poll_events( + &mut events_future, + expected_attestation_len, + Duration::from_millis(10000), + ) + .await; + + assert_eq!( + attestation_events.as_slice(), + self.single_attestations + .clone() + .into_iter() + .map(|single_attestation| EventKind::SingleAttestation(Box::new( + single_attestation + ))) + .collect::>() + .as_slice() + ); + + self + } + pub async fn test_get_events_altair(self) -> Self { let topics = vec![EventTopic::ContributionAndProof]; let mut events_future = self @@ -6158,6 +6231,20 @@ async fn get_events_altair() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_events_electra() { + let mut config = ApiTesterConfig::default(); + config.spec.altair_fork_epoch = Some(Epoch::new(0)); + config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + config.spec.capella_fork_epoch = Some(Epoch::new(0)); + config.spec.deneb_fork_epoch = Some(Epoch::new(0)); + config.spec.electra_fork_epoch = Some(Epoch::new(0)); + ApiTester::new_from_config(config) + .await + .test_get_events_electra() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_events_from_genesis() { ApiTester::new_from_genesis() diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index c9769594708..1e1f3efa18c 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -7,15 +7,14 @@ use ssz::{Decode, Encode}; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttestationBase, AttestationElectra, AttesterSlashing, AttesterSlashingBase, - AttesterSlashingElectra, BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, - ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, - ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, - SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, - SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + Attestation, AttestationBase, AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, + BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkContext, ForkName, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockFulu, SignedBlsToExecutionChange, SignedContributionAndProof, + SignedVoluntaryExit, SingleAttestation, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -28,8 +27,10 @@ pub enum PubsubMessage { DataColumnSidecar(Box<(DataColumnSubnetId, Arc>)>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), - /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. + /// Gossipsub message providing notification of a raw un-aggregated attestation with its subnet id. Attestation(Box<(SubnetId, Attestation)>), + /// Gossipsub message providing notification of a `SingleAttestation`` with its subnet id. + SingleAttestation(Box<(SubnetId, SingleAttestation)>), /// Gossipsub message providing notification of a voluntary exit. VoluntaryExit(Box), /// Gossipsub message providing notification of a new proposer slashing. @@ -129,6 +130,9 @@ impl PubsubMessage { PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) } + PubsubMessage::SingleAttestation(attestation_data) => { + GossipKind::Attestation(attestation_data.0) + } PubsubMessage::VoluntaryExit(_) => GossipKind::VoluntaryExit, PubsubMessage::ProposerSlashing(_) => GossipKind::ProposerSlashing, PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, @@ -189,32 +193,32 @@ impl PubsubMessage { ))) } GossipKind::Attestation(subnet_id) => { - let attestation = - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(&fork_name) => { - if fork_name.electra_enabled() { - Attestation::Electra( - AttestationElectra::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ) - } else { - Attestation::Base( - AttestationBase::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ) - } - } - None => { - return Err(format!( - "Unknown gossipsub fork digest: {:?}", - gossip_topic.fork_digest - )) + match fork_context.from_context_bytes(gossip_topic.fork_digest) { + Some(&fork_name) => { + if fork_name.electra_enabled() { + let single_attestation = + SingleAttestation::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + Ok(PubsubMessage::SingleAttestation(Box::new(( + *subnet_id, + single_attestation, + )))) + } else { + let attestation = Attestation::Base( + AttestationBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ); + Ok(PubsubMessage::Attestation(Box::new(( + *subnet_id, + attestation, + )))) } - }; - Ok(PubsubMessage::Attestation(Box::new(( - *subnet_id, - attestation, - )))) + } + None => Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )), + } } GossipKind::BeaconBlock => { let beacon_block = @@ -416,6 +420,7 @@ impl PubsubMessage { PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), PubsubMessage::AttesterSlashing(data) => data.as_ssz_bytes(), PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), + PubsubMessage::SingleAttestation(data) => data.1.as_ssz_bytes(), PubsubMessage::SignedContributionAndProof(data) => data.as_ssz_bytes(), PubsubMessage::SyncCommitteeMessage(data) => data.1.as_ssz_bytes(), PubsubMessage::BlsToExecutionChange(data) => data.as_ssz_bytes(), @@ -460,6 +465,14 @@ impl std::fmt::Display for PubsubMessage { data.1.data().slot, data.1.committee_index(), ), + PubsubMessage::SingleAttestation(data) => write!( + f, + "SingleAttestation: subnet_id: {}, attestation_slot: {}, committee_index: {:?}, attester_index: {:?}", + *data.0, + data.1.data.slot, + data.1.committee_index, + data.1.attester_index, + ), PubsubMessage::VoluntaryExit(_data) => write!(f, "Voluntary Exit"), PubsubMessage::ProposerSlashing(_data) => write!(f, "Proposer Slashing"), PubsubMessage::AttesterSlashing(_data) => write!(f, "Attester Slashing"), diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 2d15d39c6fc..4a3fb28e102 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -84,6 +84,58 @@ impl NetworkBeaconProcessor { .map_err(Into::into) } + /// Create a new `Work` event for some `SingleAttestation`. + pub fn send_single_attestation( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + single_attestation: SingleAttestation, + subnet_id: SubnetId, + should_import: bool, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let result = self.chain.with_committee_cache( + single_attestation.data.target.root, + single_attestation + .data + .slot + .epoch(T::EthSpec::slots_per_epoch()), + |committee_cache, _| { + let Some(committee) = committee_cache.get_beacon_committee( + single_attestation.data.slot, + single_attestation.committee_index as u64, + ) else { + warn!( + self.log, + "No beacon committee for slot and index"; + "slot" => single_attestation.data.slot, + "index" => single_attestation.committee_index + ); + return Ok(Ok(())); + }; + + let attestation = single_attestation.to_attestation(committee.committee)?; + + Ok(self.send_unaggregated_attestation( + message_id.clone(), + peer_id, + attestation, + subnet_id, + should_import, + seen_timestamp, + )) + }, + ); + + match result { + Ok(result) => result, + Err(e) => { + warn!(self.log, "Failed to send SingleAttestation"; "error" => ?e); + Ok(()) + } + } + } + /// Create a new `Work` event for some unaggregated attestation. pub fn send_unaggregated_attestation( self: &Arc, diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 0a99b6af0cf..d3da341e1c8 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -398,6 +398,17 @@ impl Router { timestamp_now(), ), ), + PubsubMessage::SingleAttestation(subnet_attestation) => self + .handle_beacon_processor_send_result( + self.network_beacon_processor.send_single_attestation( + message_id, + peer_id, + subnet_attestation.1, + subnet_attestation.0, + should_process, + timestamp_now(), + ), + ), PubsubMessage::BeaconBlock(block) => self.handle_beacon_processor_send_result( self.network_beacon_processor.send_gossip_beacon_block( message_id, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 7826807e035..f89241b4ae6 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -549,7 +549,23 @@ impl NetworkService { // the attestation, else we just just propagate the Attestation. let should_process = self.subnet_service.should_process_attestation( Subnet::Attestation(subnet_id), - attestation, + attestation.data(), + ); + self.send_to_router(RouterMessage::PubsubMessage( + id, + source, + message, + should_process, + )); + } + PubsubMessage::SingleAttestation(ref subnet_and_attestation) => { + let subnet_id = subnet_and_attestation.0; + let single_attestation = &subnet_and_attestation.1; + // checks if we have an aggregator for the slot. If so, we should process + // the attestation, else we just just propagate the Attestation. + let should_process = self.subnet_service.should_process_attestation( + Subnet::Attestation(subnet_id), + &single_attestation.data, ); self.send_to_router(RouterMessage::PubsubMessage( id, diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index da1f220f042..33ae567eb3f 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -17,7 +17,7 @@ use lighthouse_network::{discv5::enr::NodeId, NetworkConfig, Subnet, SubnetDisco use slog::{debug, error, o, warn}; use slot_clock::SlotClock; use types::{ - Attestation, EthSpec, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, + AttestationData, EthSpec, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, }; @@ -363,7 +363,7 @@ impl SubnetService { pub fn should_process_attestation( &self, subnet: Subnet, - attestation: &Attestation, + attestation_data: &AttestationData, ) -> bool { // Proposer-only mode does not need to process attestations if self.proposer_only { @@ -374,7 +374,7 @@ impl SubnetService { .map(|tracked_vals| { tracked_vals.contains_key(&ExactSubnet { subnet, - slot: attestation.data().slot, + slot: attestation_data.slot, }) }) .unwrap_or(true) diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 12b1538984e..af8573a5789 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1324,9 +1324,9 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/pool/attestations` - pub async fn post_beacon_pool_attestations_v2( + pub async fn post_beacon_pool_attestations_v2( &self, - attestations: &[Attestation], + attestations: &[SingleAttestation], fork_name: ForkName, ) -> Result<(), Error> { let mut path = self.eth_path(V2)?; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 695d536944b..6d76101cb62 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1113,6 +1113,7 @@ impl ForkVersionDeserialize for SseExtendedPayloadAttributes { #[serde(bound = "E: EthSpec", untagged)] pub enum EventKind { Attestation(Box>), + SingleAttestation(Box), Block(SseBlock), BlobSidecar(SseBlobSidecar), FinalizedCheckpoint(SseFinalizedCheckpoint), @@ -1139,6 +1140,7 @@ impl EventKind { EventKind::Block(_) => "block", EventKind::BlobSidecar(_) => "blob_sidecar", EventKind::Attestation(_) => "attestation", + EventKind::SingleAttestation(_) => "single_attestation", EventKind::VoluntaryExit(_) => "voluntary_exit", EventKind::FinalizedCheckpoint(_) => "finalized_checkpoint", EventKind::ChainReorg(_) => "chain_reorg", @@ -1161,6 +1163,11 @@ impl EventKind { "attestation" => Ok(EventKind::Attestation(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Attestation: {:?}", e)), )?)), + "single_attestation" => Ok(EventKind::SingleAttestation( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("SingleAttestation: {:?}", e)) + })?, + )), "block" => Ok(EventKind::Block(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Block: {:?}", e)), )?)), @@ -1255,6 +1262,7 @@ pub enum EventTopic { Block, BlobSidecar, Attestation, + SingleAttestation, VoluntaryExit, FinalizedCheckpoint, ChainReorg, @@ -1280,6 +1288,7 @@ impl FromStr for EventTopic { "block" => Ok(EventTopic::Block), "blob_sidecar" => Ok(EventTopic::BlobSidecar), "attestation" => Ok(EventTopic::Attestation), + "single_attestation" => Ok(EventTopic::SingleAttestation), "voluntary_exit" => Ok(EventTopic::VoluntaryExit), "finalized_checkpoint" => Ok(EventTopic::FinalizedCheckpoint), "chain_reorg" => Ok(EventTopic::ChainReorg), @@ -1306,6 +1315,7 @@ impl fmt::Display for EventTopic { EventTopic::Block => write!(f, "block"), EventTopic::BlobSidecar => write!(f, "blob_sidecar"), EventTopic::Attestation => write!(f, "attestation"), + EventTopic::SingleAttestation => write!(f, "single_attestation"), EventTopic::VoluntaryExit => write!(f, "voluntary_exit"), EventTopic::FinalizedCheckpoint => write!(f, "finalized_checkpoint"), EventTopic::ChainReorg => write!(f, "chain_reorg"), diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation.rs index 190964736fe..47e41acb5b1 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation.rs @@ -12,8 +12,8 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; use super::{ - AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, - Signature, SignedRoot, + AggregateSignature, AttestationData, BitList, ChainSpec, CommitteeIndex, Domain, EthSpec, Fork, + SecretKey, Signature, SignedRoot, }; #[derive(Debug, PartialEq)] @@ -24,6 +24,10 @@ pub enum Error { IncorrectStateVariant, InvalidCommitteeLength, InvalidCommitteeIndex, + AttesterNotInCommittee(usize), + InvalidCommittee, + MissingCommittee, + NoCommitteeForSlotAndIndex { slot: Slot, index: CommitteeIndex }, } impl From for Error { @@ -231,6 +235,16 @@ impl Attestation { Attestation::Electra(att) => att.aggregation_bits.get(index), } } + + pub fn to_single_attestation_with_attester_index( + &self, + attester_index: usize, + ) -> Result { + match self { + Self::Base(_) => Err(Error::IncorrectStateVariant), + Self::Electra(attn) => attn.to_single_attestation_with_attester_index(attester_index), + } + } } impl AttestationRef<'_, E> { @@ -287,6 +301,14 @@ impl AttestationElectra { self.get_committee_indices().first().cloned() } + pub fn get_aggregation_bits(&self) -> Vec { + self.aggregation_bits + .iter() + .enumerate() + .filter_map(|(index, bit)| if bit { Some(index as u64) } else { None }) + .collect() + } + pub fn get_committee_indices(&self) -> Vec { self.committee_bits .iter() @@ -350,6 +372,22 @@ impl AttestationElectra { Ok(()) } } + + pub fn to_single_attestation_with_attester_index( + &self, + attester_index: usize, + ) -> Result { + let Some(committee_index) = self.committee_index() else { + return Err(Error::InvalidCommitteeIndex); + }; + + Ok(SingleAttestation { + committee_index: committee_index as usize, + attester_index, + data: self.data.clone(), + signature: self.signature.clone(), + }) + } } impl AttestationBase { @@ -527,6 +565,58 @@ impl ForkVersionDeserialize for Vec> { } } +#[derive( + Debug, + Clone, + Serialize, + Deserialize, + Decode, + Encode, + TestRandom, + Derivative, + arbitrary::Arbitrary, + TreeHash, + PartialEq, +)] +pub struct SingleAttestation { + pub committee_index: usize, + pub attester_index: usize, + pub data: AttestationData, + pub signature: AggregateSignature, +} + +impl SingleAttestation { + pub fn to_attestation(&self, committee: &[usize]) -> Result, Error> { + let aggregation_bit = committee + .iter() + .enumerate() + .find_map(|(i, &validator_index)| { + if self.attester_index == validator_index { + return Some(i); + } + None + }) + .ok_or(Error::AttesterNotInCommittee(self.attester_index))?; + + let mut committee_bits: BitVector = BitVector::default(); + committee_bits + .set(self.committee_index, true) + .map_err(|_| Error::InvalidCommitteeIndex)?; + + let mut aggregation_bits = + BitList::with_capacity(committee.len()).map_err(|_| Error::InvalidCommitteeLength)?; + + aggregation_bits.set(aggregation_bit, true)?; + + Ok(Attestation::Electra(AttestationElectra { + aggregation_bits, + committee_bits, + data: self.data.clone(), + signature: self.signature.clone(), + })) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index dcfa918146a..11d1f5271b7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -118,7 +118,7 @@ pub use crate::aggregate_and_proof::{ }; pub use crate::attestation::{ Attestation, AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut, - Error as AttestationError, + Error as AttestationError, SingleAttestation, }; pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 187b070d29f..981d6d5653d 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -1,4 +1,5 @@ //! Identifies each shard by an integer identifier. +use crate::SingleAttestation; use crate::{AttestationRef, ChainSpec, CommitteeIndex, EthSpec, Slot}; use alloy_primitives::{bytes::Buf, U256}; use safe_arith::{ArithError, SafeArith}; @@ -57,6 +58,21 @@ impl SubnetId { ) } + /// Compute the subnet for an attestation where each slot in the + /// attestation epoch contains `committee_count_per_slot` committees. + pub fn compute_subnet_for_single_attestation( + attestation: &SingleAttestation, + committee_count_per_slot: u64, + spec: &ChainSpec, + ) -> Result { + Self::compute_subnet::( + attestation.data.slot, + attestation.committee_index as u64, + committee_count_per_slot, + spec, + ) + } + /// Compute the subnet for an attestation with `attestation.data.slot == slot` and /// `attestation.data.index == committee_index` where each slot in the attestation epoch /// contains `committee_count_at_slot` committees. diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index e31ad4f661b..58c6ea32988 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -457,8 +457,30 @@ impl AttestationService { &[validator_metrics::ATTESTATIONS_HTTP_POST], ); if fork_name.electra_enabled() { + let single_attestations = attestations + .iter() + .zip(validator_indices) + .filter_map(|(a, i)| { + match a.to_single_attestation_with_attester_index(*i as usize) { + Ok(a) => Some(a), + Err(e) => { + // This shouldn't happen unless BN and VC are out of sync with + // respect to the Electra fork. + error!( + log, + "Unable to convert to SingleAttestation"; + "error" => ?e, + "committee_index" => attestation_data.index, + "slot" => slot.as_u64(), + "type" => "unaggregated", + ); + None + } + } + }) + .collect::>(); beacon_node - .post_beacon_pool_attestations_v2(attestations, fork_name) + .post_beacon_pool_attestations_v2(&single_attestations, fork_name) .await } else { beacon_node From 6ce33c4d1d8a7b3545181a7c8b0721ddf4c1bf38 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 20 Jan 2025 20:07:47 +1100 Subject: [PATCH 17/26] Do not send column requests if there is no blob for the block. (#6814) * Do not send column requests if there is no blob for the block. * Address review comments * Replace fix - the previous solution didnt work. --- .../network/src/sync/block_lookups/single_block_lookup.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 9bbd2bf295b..a096efcbb22 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -215,8 +215,7 @@ impl SingleBlockLookup { let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch()); if expected_blobs == 0 { self.component_requests = ComponentRequests::NotNeeded("no data"); - } - if cx.chain.should_fetch_blobs(block_epoch) { + } else if cx.chain.should_fetch_blobs(block_epoch) { self.component_requests = ComponentRequests::ActiveBlobRequest( BlobRequestState::new(self.block_root), expected_blobs, From 7a0388ef2aa8ea6de5cfa0be26dd000f011a6484 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 20 Jan 2025 19:31:18 +0700 Subject: [PATCH 18/26] Fix custodial peer assumption on lookup custody requests (#6815) * Fix custodial peer assumption on lookup custody requests * lint --- .../network/src/sync/block_lookups/common.rs | 17 ++++--- .../network/src/sync/block_lookups/mod.rs | 13 ++--- .../sync/block_lookups/single_block_lookup.rs | 49 ++++++------------- .../network/src/sync/network_context.rs | 41 ++++++++++++++-- .../src/sync/network_context/custody.rs | 20 ++++++-- 5 files changed, 80 insertions(+), 60 deletions(-) diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 5e336d9c38e..8eefb2d6756 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -9,6 +9,8 @@ use crate::sync::network_context::{LookupRequestResult, SyncNetworkContext}; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::BeaconChainTypes; use lighthouse_network::service::api_types::Id; +use parking_lot::RwLock; +use std::collections::HashSet; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; use types::{DataColumnSidecarList, SignedBeaconBlock}; @@ -41,7 +43,7 @@ pub trait RequestState { fn make_request( &self, id: Id, - peer_id: PeerId, + lookup_peers: Arc>>, expected_blobs: usize, cx: &mut SyncNetworkContext, ) -> Result; @@ -76,11 +78,11 @@ impl RequestState for BlockRequestState { fn make_request( &self, id: SingleLookupId, - peer_id: PeerId, + lookup_peers: Arc>>, _: usize, cx: &mut SyncNetworkContext, ) -> Result { - cx.block_lookup_request(id, peer_id, self.requested_block_root) + cx.block_lookup_request(id, lookup_peers, self.requested_block_root) .map_err(LookupRequestError::SendFailedNetwork) } @@ -124,11 +126,11 @@ impl RequestState for BlobRequestState { fn make_request( &self, id: Id, - peer_id: PeerId, + lookup_peers: Arc>>, expected_blobs: usize, cx: &mut SyncNetworkContext, ) -> Result { - cx.blob_lookup_request(id, peer_id, self.block_root, expected_blobs) + cx.blob_lookup_request(id, lookup_peers, self.block_root, expected_blobs) .map_err(LookupRequestError::SendFailedNetwork) } @@ -172,12 +174,11 @@ impl RequestState for CustodyRequestState { fn make_request( &self, id: Id, - // TODO(das): consider selecting peers that have custody but are in this set - _peer_id: PeerId, + lookup_peers: Arc>>, _: usize, cx: &mut SyncNetworkContext, ) -> Result { - cx.custody_lookup_request(id, self.block_root) + cx.custody_lookup_request(id, self.block_root, lookup_peers) .map_err(LookupRequestError::SendFailedNetwork) } diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 5a11bca4814..ac4df42a4e8 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -153,14 +153,7 @@ impl BlockLookups { pub(crate) fn active_single_lookups(&self) -> Vec { self.single_block_lookups .iter() - .map(|(id, l)| { - ( - *id, - l.block_root(), - l.awaiting_parent(), - l.all_peers().copied().collect(), - ) - }) + .map(|(id, l)| (*id, l.block_root(), l.awaiting_parent(), l.all_peers())) .collect() } @@ -283,7 +276,7 @@ impl BlockLookups { .find(|(_, l)| l.block_root() == parent_chain_tip) { cx.send_sync_message(SyncMessage::AddPeersForceRangeSync { - peers: lookup.all_peers().copied().collect(), + peers: lookup.all_peers(), head_slot: tip_lookup.peek_downloaded_block_slot(), head_root: parent_chain_tip, }); @@ -682,7 +675,7 @@ impl BlockLookups { lookup.continue_requests(cx) } Action::ParentUnknown { parent_root } => { - let peers = lookup.all_peers().copied().collect::>(); + let peers = lookup.all_peers(); lookup.set_awaiting_parent(parent_root); debug!(self.log, "Marking lookup as awaiting parent"; "id" => lookup.id, "block_root" => ?block_root, "parent_root" => ?parent_root); self.search_parent_of_child(parent_root, block_root, &peers, cx); diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index a096efcbb22..3789dbe91e0 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -7,7 +7,7 @@ use crate::sync::network_context::{ use beacon_chain::{BeaconChainTypes, BlockProcessStatus}; use derivative::Derivative; use lighthouse_network::service::api_types::Id; -use rand::seq::IteratorRandom; +use parking_lot::RwLock; use std::collections::HashSet; use std::fmt::Debug; use std::sync::Arc; @@ -33,8 +33,6 @@ pub enum LookupRequestError { /// The failed attempts were primarily due to processing failures. cannot_process: bool, }, - /// No peers left to serve this lookup - NoPeers, /// Error sending event to network SendFailedNetwork(RpcRequestSendError), /// Error sending event to processor @@ -63,9 +61,12 @@ pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, pub component_requests: ComponentRequests, - /// Peers that claim to have imported this set of block components + /// Peers that claim to have imported this set of block components. This state is shared with + /// the custody request to have an updated view of the peers that claim to have imported the + /// block associated with this lookup. The peer set of a lookup can change rapidly, and faster + /// than the lifetime of a custody request. #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] - peers: HashSet, + peers: Arc>>, block_root: Hash256, awaiting_parent: Option, created: Instant, @@ -92,7 +93,7 @@ impl SingleBlockLookup { id, block_request_state: BlockRequestState::new(requested_block_root), component_requests: ComponentRequests::WaitingForBlock, - peers: HashSet::from_iter(peers.iter().copied()), + peers: Arc::new(RwLock::new(HashSet::from_iter(peers.iter().copied()))), block_root: requested_block_root, awaiting_parent, created: Instant::now(), @@ -282,24 +283,11 @@ impl SingleBlockLookup { return Err(LookupRequestError::TooManyAttempts { cannot_process }); } - let Some(peer_id) = self.use_rand_available_peer() else { - // Allow lookup to not have any peers and do nothing. This is an optimization to not - // lose progress of lookups created from a block with unknown parent before we receive - // attestations for said block. - // Lookup sync event safety: If a lookup requires peers to make progress, and does - // not receive any new peers for some time it will be dropped. If it receives a new - // peer it must attempt to make progress. - R::request_state_mut(self) - .map_err(|e| LookupRequestError::BadState(e.to_owned()))? - .get_state_mut() - .update_awaiting_download_status("no peers"); - return Ok(()); - }; - + let peers = self.peers.clone(); let request = R::request_state_mut(self) .map_err(|e| LookupRequestError::BadState(e.to_owned()))?; - match request.make_request(id, peer_id, expected_blobs, cx)? { + match request.make_request(id, peers, expected_blobs, cx)? { LookupRequestResult::RequestSent(req_id) => { // Lookup sync event safety: If make_request returns `RequestSent`, we are // guaranteed that `BlockLookups::on_download_response` will be called exactly @@ -347,29 +335,24 @@ impl SingleBlockLookup { } /// Get all unique peers that claim to have imported this set of block components - pub fn all_peers(&self) -> impl Iterator + '_ { - self.peers.iter() + pub fn all_peers(&self) -> Vec { + self.peers.read().iter().copied().collect() } /// Add peer to all request states. The peer must be able to serve this request. /// Returns true if the peer was newly inserted into some request state. pub fn add_peer(&mut self, peer_id: PeerId) -> bool { - self.peers.insert(peer_id) + self.peers.write().insert(peer_id) } /// Remove peer from available peers. pub fn remove_peer(&mut self, peer_id: &PeerId) { - self.peers.remove(peer_id); + self.peers.write().remove(peer_id); } /// Returns true if this lookup has zero peers pub fn has_no_peers(&self) -> bool { - self.peers.is_empty() - } - - /// Selects a random peer from available peers if any - fn use_rand_available_peer(&mut self) -> Option { - self.peers.iter().choose(&mut rand::thread_rng()).copied() + self.peers.read().is_empty() } } @@ -688,8 +671,8 @@ impl std::fmt::Debug for State { } fn fmt_peer_set_as_len( - peer_set: &HashSet, + peer_set: &Arc>>, f: &mut std::fmt::Formatter, ) -> Result<(), std::fmt::Error> { - write!(f, "{}", peer_set.len()) + write!(f, "{}", peer_set.read().len()) } diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 0a6bc8961f8..f8999361288 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -27,7 +27,8 @@ use lighthouse_network::service::api_types::{ DataColumnsByRootRequester, Id, SingleLookupReqId, SyncRequestId, }; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource}; -use rand::seq::SliceRandom; +use parking_lot::RwLock; +use rand::prelude::IteratorRandom; use rand::thread_rng; pub use requests::LookupVerifyError; use requests::{ @@ -308,8 +309,8 @@ impl SyncNetworkContext { pub fn get_random_custodial_peer(&self, column_index: ColumnIndex) -> Option { self.get_custodial_peers(column_index) + .into_iter() .choose(&mut thread_rng()) - .cloned() } pub fn network_globals(&self) -> &NetworkGlobals { @@ -562,9 +563,24 @@ impl SyncNetworkContext { pub fn block_lookup_request( &mut self, lookup_id: SingleLookupId, - peer_id: PeerId, + lookup_peers: Arc>>, block_root: Hash256, ) -> Result { + let Some(peer_id) = lookup_peers + .read() + .iter() + .choose(&mut rand::thread_rng()) + .copied() + else { + // Allow lookup to not have any peers and do nothing. This is an optimization to not + // lose progress of lookups created from a block with unknown parent before we receive + // attestations for said block. + // Lookup sync event safety: If a lookup requires peers to make progress, and does + // not receive any new peers for some time it will be dropped. If it receives a new + // peer it must attempt to make progress. + return Ok(LookupRequestResult::Pending("no peers")); + }; + match self.chain.get_block_process_status(&block_root) { // Unknown block, continue request to download BlockProcessStatus::Unknown => {} @@ -634,10 +650,25 @@ impl SyncNetworkContext { pub fn blob_lookup_request( &mut self, lookup_id: SingleLookupId, - peer_id: PeerId, + lookup_peers: Arc>>, block_root: Hash256, expected_blobs: usize, ) -> Result { + let Some(peer_id) = lookup_peers + .read() + .iter() + .choose(&mut rand::thread_rng()) + .copied() + else { + // Allow lookup to not have any peers and do nothing. This is an optimization to not + // lose progress of lookups created from a block with unknown parent before we receive + // attestations for said block. + // Lookup sync event safety: If a lookup requires peers to make progress, and does + // not receive any new peers for some time it will be dropped. If it receives a new + // peer it must attempt to make progress. + return Ok(LookupRequestResult::Pending("no peers")); + }; + let imported_blob_indexes = self .chain .data_availability_checker @@ -740,6 +771,7 @@ impl SyncNetworkContext { &mut self, lookup_id: SingleLookupId, block_root: Hash256, + lookup_peers: Arc>>, ) -> Result { let custody_indexes_imported = self .chain @@ -777,6 +809,7 @@ impl SyncNetworkContext { block_root, CustodyId { requester }, &custody_indexes_to_fetch, + lookup_peers, self.log.clone(), ); diff --git a/beacon_node/network/src/sync/network_context/custody.rs b/beacon_node/network/src/sync/network_context/custody.rs index e4bce3dafcd..8a29545c21d 100644 --- a/beacon_node/network/src/sync/network_context/custody.rs +++ b/beacon_node/network/src/sync/network_context/custody.rs @@ -7,8 +7,10 @@ use fnv::FnvHashMap; use lighthouse_network::service::api_types::{CustodyId, DataColumnsByRootRequester}; use lighthouse_network::PeerId; use lru_cache::LRUTimeCache; +use parking_lot::RwLock; use rand::Rng; use slog::{debug, warn}; +use std::collections::HashSet; use std::time::{Duration, Instant}; use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use types::EthSpec; @@ -32,6 +34,8 @@ pub struct ActiveCustodyRequest { /// Peers that have recently failed to successfully respond to a columns by root request. /// Having a LRUTimeCache allows this request to not have to track disconnecting peers. failed_peers: LRUTimeCache, + /// Set of peers that claim to have imported this block and their custody columns + lookup_peers: Arc>>, /// Logger for the `SyncNetworkContext`. pub log: slog::Logger, _phantom: PhantomData, @@ -64,6 +68,7 @@ impl ActiveCustodyRequest { block_root: Hash256, custody_id: CustodyId, column_indices: &[ColumnIndex], + lookup_peers: Arc>>, log: slog::Logger, ) -> Self { Self { @@ -76,6 +81,7 @@ impl ActiveCustodyRequest { ), active_batch_columns_requests: <_>::default(), failed_peers: LRUTimeCache::new(Duration::from_secs(FAILED_PEERS_CACHE_EXPIRY_SECONDS)), + lookup_peers, log, _phantom: PhantomData, } @@ -215,6 +221,7 @@ impl ActiveCustodyRequest { } let mut columns_to_request_by_peer = HashMap::>::new(); + let lookup_peers = self.lookup_peers.read(); // Need to: // - track how many active requests a peer has for load balancing @@ -244,6 +251,8 @@ impl ActiveCustodyRequest { .iter() .map(|peer| { ( + // Prioritize peers that claim to know have imported this block + if lookup_peers.contains(peer) { 0 } else { 1 }, // De-prioritize peers that have failed to successfully respond to // requests recently self.failed_peers.contains(peer), @@ -257,7 +266,7 @@ impl ActiveCustodyRequest { .collect::>(); priorized_peers.sort_unstable(); - if let Some((_, _, _, peer_id)) = priorized_peers.first() { + if let Some((_, _, _, _, peer_id)) = priorized_peers.first() { columns_to_request_by_peer .entry(*peer_id) .or_default() @@ -283,10 +292,11 @@ impl ActiveCustodyRequest { block_root: self.block_root, indices: indices.clone(), }, - // true = enforce max_requests are returned data_columns_by_root. We only issue requests - // for blocks after we know the block has data, and only request peers after they claim to - // have imported the block+columns and claim to be custodians - true, + // If peer is in the lookup peer set, it claims to have imported the block and + // must have its columns in custody. In that case, set `true = enforce max_requests` + // and downscore if data_columns_by_root does not returned the expected custody + // columns. For the rest of peers, don't downscore if columns are missing. + lookup_peers.contains(&peer_id), ) .map_err(Error::SendFailed)?; From 33c1648022e85d3443ed4c2fb72a3742975e9adb Mon Sep 17 00:00:00 2001 From: JKinc <73645805+JKincorperated@users.noreply.github.com> Date: Tue, 21 Jan 2025 00:24:14 +0000 Subject: [PATCH 19/26] Add EIP-7636 support (#6793) * Add eip7636 support * Add `version()` to the `lighthouse_version` crate and make the `enr.rs` file use it. * Hardcode version, Add `client_name()`, remove unneeded flag. * Make it use the new function. * Make cargo fmt zip it --- beacon_node/lighthouse_network/src/config.rs | 3 ++- .../lighthouse_network/src/discovery/enr.rs | 6 +++++ common/lighthouse_version/src/lib.rs | 26 +++++++++++++++++++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 8a93b1185db..55c1dbf491d 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -116,7 +116,8 @@ pub struct Config { pub network_load: u8, /// Indicates if the user has set the network to be in private mode. Currently this - /// prevents sending client identifying information over identify. + /// prevents sending client identifying information over identify and prevents + /// EIP-7636 indentifiable information being provided in the ENR. pub private: bool, /// Shutdown beacon node after sync is completed. diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 8946c7753cc..062a119e0d5 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -8,6 +8,7 @@ use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; use alloy_rlp::bytes::Bytes; use libp2p::identity::Keypair; +use lighthouse_version::{client_name, version}; use slog::{debug, warn}; use ssz::{Decode, Encode}; use ssz_types::BitVector; @@ -188,6 +189,11 @@ pub fn build_enr( builder.udp6(udp6_port.get()); } + // Add EIP 7636 client information + if !config.private { + builder.client_info(client_name().to_string(), version().to_string(), None); + } + // Add QUIC fields to the ENR. // Since QUIC is used as an alternative transport for the libp2p protocols, // the related fields should only be added when both QUIC and libp2p are enabled diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 0751bdadff5..a35b8c42c11 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -48,6 +48,22 @@ pub fn version_with_platform() -> String { format!("{}/{}-{}", VERSION, Target::arch(), Target::os()) } +/// Returns semantic versioning information only. +/// +/// ## Example +/// +/// `1.5.1` +pub fn version() -> &'static str { + "6.0.1" +} + +/// Returns the name of the current client running. +/// +/// This will usually be "Lighthouse" +pub fn client_name() -> &'static str { + "Lighthouse" +} + #[cfg(test)] mod test { use super::*; @@ -64,4 +80,14 @@ mod test { VERSION ); } + + #[test] + fn semantic_version_formatting() { + let re = Regex::new(r"^[0-9]+\.[0-9]+\.[0-9]+").unwrap(); + assert!( + re.is_match(version()), + "semantic version doesn't match regex: {}", + version() + ); + } } From 4e8f8269edeaa78a2c6ec9541810816cef99afa7 Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Mon, 13 Jan 2025 14:53:08 +0100 Subject: [PATCH 20/26] expose `point` and `from_point` on `GenericSignature` --- crypto/bls/src/generic_signature.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 05e0a222bd5..7f347dcaed0 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -93,12 +93,12 @@ where } /// Returns a reference to the underlying BLS point. - pub(crate) fn point(&self) -> Option<&Sig> { + pub fn point(&self) -> Option<&Sig> { self.point.as_ref() } /// Instantiates `Self` from a `point`. - pub(crate) fn from_point(point: Sig, is_infinity: bool) -> Self { + pub fn from_point(point: Sig, is_infinity: bool) -> Self { Self { point: Some(point), is_infinity, From dfd7ab0373a24fcb6c62b2a6202ffafcf80cfdc3 Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Tue, 14 Jan 2025 21:36:13 +0100 Subject: [PATCH 21/26] add `point` and `from_point` to `GenericSecretKey` --- crypto/bls/src/generic_secret_key.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/crypto/bls/src/generic_secret_key.rs b/crypto/bls/src/generic_secret_key.rs index a0a43311107..c48fdc4198a 100644 --- a/crypto/bls/src/generic_secret_key.rs +++ b/crypto/bls/src/generic_secret_key.rs @@ -61,6 +61,20 @@ where GenericPublicKey::from_point(self.point.public_key()) } + /// Returns a reference to the underlying BLS point. + pub fn point(&self) -> &Sec { + &self.point + } + + /// Instantiates `Self` from a `point`. + pub fn from_point(point: Sec) -> Self { + Self { + point, + _phantom_signature: PhantomData, + _phantom_public_key: PhantomData, + } + } + /// Serialize `self` as compressed bytes. /// /// ## Note From 5dfda40285eab4bd2180cd31ea9013db11fd9c39 Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Wed, 15 Jan 2025 12:49:28 +0100 Subject: [PATCH 22/26] add `serialize_uncompressed` to `GenericSignature` --- crypto/bls/src/generic_signature.rs | 11 +++++++++++ crypto/bls/src/impls/blst.rs | 5 +++++ crypto/bls/src/impls/fake_crypto.rs | 7 +++++++ 3 files changed, 23 insertions(+) diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 7f347dcaed0..4bb9c1597ba 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -14,6 +14,9 @@ use tree_hash::TreeHash; /// The byte-length of a BLS signature when serialized in compressed form. pub const SIGNATURE_BYTES_LEN: usize = 96; +/// The byte-length of a BLS signature when serialized in uncompressed form. +pub const SIGNATURE_UNCOMPRESSED_BYTES_LEN: usize = 192; + /// Represents the signature at infinity. pub const INFINITY_SIGNATURE: [u8; SIGNATURE_BYTES_LEN] = [ 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -31,6 +34,9 @@ pub trait TSignature: Sized + Clone { /// Serialize `self` as compressed bytes. fn serialize(&self) -> [u8; SIGNATURE_BYTES_LEN]; + /// Serialize `self` as uncompressed bytes. + fn serialize_uncompressed(&self) -> [u8; SIGNATURE_UNCOMPRESSED_BYTES_LEN]; + /// Deserialize `self` from compressed bytes. fn deserialize(bytes: &[u8]) -> Result; @@ -115,6 +121,11 @@ where } } + /// Serialize `self` as compressed bytes. + pub fn serialize_uncompressed(&self) -> Option<[u8; SIGNATURE_UNCOMPRESSED_BYTES_LEN]> { + self.point.as_ref().map(|point| point.serialize_uncompressed()) + } + /// Deserialize `self` from compressed bytes. pub fn deserialize(bytes: &[u8]) -> Result { let point = if bytes == &NONE_SIGNATURE[..] { diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index baa704e05a9..1937247a4fa 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -11,6 +11,7 @@ use crate::{ pub use blst::min_pk as blst_core; use blst::{blst_scalar, BLST_ERROR}; use rand::Rng; +use crate::generic_signature::SIGNATURE_UNCOMPRESSED_BYTES_LEN; pub const DST: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; pub const RAND_BITS: usize = 64; @@ -189,6 +190,10 @@ impl TSignature for blst_core::Signature { self.to_bytes() } + fn serialize_uncompressed(&self) -> [u8; SIGNATURE_UNCOMPRESSED_BYTES_LEN] { + self.serialize() + } + fn deserialize(bytes: &[u8]) -> Result { Self::from_bytes(bytes).map_err(Into::into) } diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index a09fb347e6b..f0884e0e6b3 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -8,6 +8,7 @@ use crate::{ generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, Error, Hash256, ZeroizeHash, INFINITY_PUBLIC_KEY, INFINITY_SIGNATURE, }; +use crate::generic_signature::SIGNATURE_UNCOMPRESSED_BYTES_LEN; /// Provides the externally-facing, core BLS types. pub mod types { @@ -106,6 +107,12 @@ impl TSignature for Signature { self.0 } + fn serialize_uncompressed(&self) -> [u8; SIGNATURE_UNCOMPRESSED_BYTES_LEN] { + let mut ret = [0; SIGNATURE_UNCOMPRESSED_BYTES_LEN]; + ret[0..SIGNATURE_BYTES_LEN].copy_from_slice(&self.0); + ret + } + fn deserialize(bytes: &[u8]) -> Result { let mut signature = Self::infinity(); signature.0[..].copy_from_slice(&bytes[0..SIGNATURE_BYTES_LEN]); From f551579e328409a6360ca2a01dfe59854de0e5a2 Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Wed, 15 Jan 2025 13:35:40 +0100 Subject: [PATCH 23/26] add `deserialize_uncompressed` to `GenericSignature` --- crypto/bls/src/generic_signature.rs | 3 +++ crypto/bls/src/impls/blst.rs | 4 ++++ crypto/bls/src/impls/fake_crypto.rs | 4 ++++ 3 files changed, 11 insertions(+) diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 4bb9c1597ba..e0a5ff6cdcb 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -40,6 +40,9 @@ pub trait TSignature: Sized + Clone { /// Deserialize `self` from compressed bytes. fn deserialize(bytes: &[u8]) -> Result; + /// Serialize `self` from uncompressed bytes. + fn deserialize_uncompressed(bytes: &[u8]) -> Result; + /// Returns `true` if `self` is a signature across `msg` by `pubkey`. fn verify(&self, pubkey: &GenericPublicKey, msg: Hash256) -> bool; } diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index 1937247a4fa..aef1e9695c4 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -198,6 +198,10 @@ impl TSignature for blst_core::Signature { Self::from_bytes(bytes).map_err(Into::into) } + fn deserialize_uncompressed(bytes: &[u8]) -> Result { + Self::deserialize(bytes).map_err(Into::into) + } + fn verify(&self, pubkey: &blst_core::PublicKey, msg: Hash256) -> bool { // Public keys have already been checked for subgroup and infinity // Check Signature inside function for subgroup diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index f0884e0e6b3..b2ac208da82 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -119,6 +119,10 @@ impl TSignature for Signature { Ok(signature) } + fn deserialize_uncompressed(bytes: &[u8]) -> Result { + Self::deserialize(bytes) + } + fn verify(&self, _pubkey: &PublicKey, _msg: Hash256) -> bool { true } From 87515cca2cd97f6766d7e09e4f7b7a9d37795938 Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Wed, 15 Jan 2025 13:40:20 +0100 Subject: [PATCH 24/26] actually add `deserialize_uncompressed` to `GenericSignature` --- crypto/bls/src/generic_signature.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index e0a5ff6cdcb..c5ca4d9a189 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -143,6 +143,15 @@ where _phantom: PhantomData, }) } + + /// Deserialize `self` from uncompressed bytes. + pub fn deserialize_uncompressed(bytes: &[u8]) -> Result { + Ok(Self { + point: Some(Sig::deserialize_uncompressed(bytes)?), + is_infinity: false, // todo + _phantom: PhantomData, + }) + } } impl GenericSignature From a714c146f1ea823cb815342eb9d5f3ef5c647760 Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Tue, 21 Jan 2025 12:57:19 +0100 Subject: [PATCH 25/26] cargo fmt --- crypto/bls/src/generic_signature.rs | 4 +++- crypto/bls/src/impls/blst.rs | 3 +-- crypto/bls/src/impls/fake_crypto.rs | 3 +-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index c5ca4d9a189..355f17aef1c 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -126,7 +126,9 @@ where /// Serialize `self` as compressed bytes. pub fn serialize_uncompressed(&self) -> Option<[u8; SIGNATURE_UNCOMPRESSED_BYTES_LEN]> { - self.point.as_ref().map(|point| point.serialize_uncompressed()) + self.point + .as_ref() + .map(|point| point.serialize_uncompressed()) } /// Deserialize `self` from compressed bytes. diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index aef1e9695c4..6ca0fe09b2d 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -5,13 +5,12 @@ use crate::{ GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, }, generic_secret_key::TSecretKey, - generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, + generic_signature::{TSignature, SIGNATURE_BYTES_LEN, SIGNATURE_UNCOMPRESSED_BYTES_LEN}, BlstError, Error, Hash256, ZeroizeHash, INFINITY_SIGNATURE, }; pub use blst::min_pk as blst_core; use blst::{blst_scalar, BLST_ERROR}; use rand::Rng; -use crate::generic_signature::SIGNATURE_UNCOMPRESSED_BYTES_LEN; pub const DST: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; pub const RAND_BITS: usize = 64; diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index b2ac208da82..7273697597b 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -5,10 +5,9 @@ use crate::{ GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, }, generic_secret_key::{TSecretKey, SECRET_KEY_BYTES_LEN}, - generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, + generic_signature::{TSignature, SIGNATURE_BYTES_LEN, SIGNATURE_UNCOMPRESSED_BYTES_LEN}, Error, Hash256, ZeroizeHash, INFINITY_PUBLIC_KEY, INFINITY_SIGNATURE, }; -use crate::generic_signature::SIGNATURE_UNCOMPRESSED_BYTES_LEN; /// Provides the externally-facing, core BLS types. pub mod types { From 3f49da0af532827ba086c4e0a166b2f409fd962e Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Tue, 21 Jan 2025 17:13:21 +0100 Subject: [PATCH 26/26] fix merge --- validator_client/beacon_node_fallback/src/lib.rs | 11 +++++------ .../validator_services/src/attestation_service.rs | 11 +++++------ 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 5ee6d7ab82a..48985f32566 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -351,12 +351,11 @@ impl CandidateBeaconNode { ); } else if beacon_node_spec.fulu_fork_epoch != spec.fulu_fork_epoch { warn!( - log, - "Beacon node has mismatched Fulu fork epoch"; - "endpoint" => %self.beacon_node, - "endpoint_fulu_fork_epoch" => ?beacon_node_spec.fulu_fork_epoch, - "hint" => UPDATE_REQUIRED_LOG_HINT, - ); + endpoint = %self.beacon_node, + endpoint_fulu_fork_epoc = ?beacon_node_spec.fulu_fork_epoch, + hint = UPDATE_REQUIRED_LOG_HINT, + "Beacon node has mismatched Fulu fork epoch" + ); } Ok(()) diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index baaef0bd393..76bba37a816 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -457,12 +457,11 @@ impl AttestationService ?e, - "committee_index" => attestation_data.index, - "slot" => slot.as_u64(), - "type" => "unaggregated", + error = ?e, + committee_index = attestation_data.index, + slot = slot.as_u64(), + "type" = "unaggregated", + "Unable to convert to SingleAttestation", ); None }