From 183b801c33ed1951b09b82edf62b416fd9b1b5e5 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 19 Nov 2024 22:32:32 +0000 Subject: [PATCH 01/49] start on execution layer integration --- Cargo.lock | 913 +++++++++++++++++++++++++-- Cargo.toml | 2 + anchor/eth/Cargo.toml | 14 + anchor/eth/src/abi/ssv_contract.json | 1 + anchor/eth/src/event_processor.rs | 6 + anchor/eth/src/gen.rs | 9 + anchor/eth/src/lib.rs | 3 + anchor/eth/src/sync.rs | 164 +++++ anchor/network/Cargo.toml | 2 +- anchor/src/main.rs | 1 + 10 files changed, 1078 insertions(+), 37 deletions(-) create mode 100644 anchor/eth/Cargo.toml create mode 100644 anchor/eth/src/abi/ssv_contract.json create mode 100644 anchor/eth/src/event_processor.rs create mode 100644 anchor/eth/src/gen.rs create mode 100644 anchor/eth/src/lib.rs create mode 100644 anchor/eth/src/sync.rs diff --git a/Cargo.lock b/Cargo.lock index 7b414109c..b53dde995 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -98,6 +98,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -118,16 +119,116 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +[[package]] +name = "alloy" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b524b8c28a7145d1fe4950f84360b5de3e307601679ff0558ddc20ea229399" +dependencies = [ + "alloy-consensus 0.6.4", + "alloy-contract", + "alloy-core", + "alloy-eips 0.6.4", + "alloy-genesis", + "alloy-network", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-serde", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ws", +] + +[[package]] +name = "alloy-chains" +version = "0.1.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18c5c520273946ecf715c0010b4e3503d7eba9893cd9ce6b7fff5654c4a3c470" +dependencies = [ + "alloy-primitives", + "num_enum", + "strum 0.26.3", +] + [[package]] name = "alloy-consensus" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" dependencies = [ - "alloy-eips", + "alloy-eips 0.3.6", + "alloy-primitives", + "alloy-rlp", + "c-kzg", +] + +[[package]] +name = "alloy-consensus" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae09ffd7c29062431dd86061deefe4e3c6f07fa0d674930095f8dcedb0baf02c" +dependencies = [ + "alloy-eips 0.6.4", "alloy-primitives", "alloy-rlp", + "alloy-serde", + "auto_impl", "c-kzg", + "derive_more 1.0.0", + "serde", +] + +[[package]] +name = "alloy-contract" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66430a72d5bf5edead101c8c2f0a24bada5ec9f3cf9909b3e08b6d6899b4803e" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-types-eth", + "alloy-sol-types", + "alloy-transport", + "futures", + "futures-util", + "thiserror", +] + +[[package]] +name = "alloy-core" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8316d83e590f4163b221b8180008f302bda5cf5451202855cdd323e588849c" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives", + "alloy-rlp", + "alloy-sol-types", +] + +[[package]] +name = "alloy-dyn-abi" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2364c782a245cf8725ea6dbfca5f530162702b5d685992ea03ce64529136cc" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-type-parser", + "alloy-sol-types", + "const-hex", + "itoa", + "serde", + "serde_json", + "winnow 0.6.20", ] [[package]] @@ -138,6 +239,7 @@ checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" dependencies = [ "alloy-primitives", "alloy-rlp", + "serde", ] [[package]] @@ -150,6 +252,18 @@ dependencies = [ "alloy-rlp", ] +[[package]] +name = "alloy-eip7702" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6cee6a35793f3db8a5ffe60e86c695f321d081a567211245f503e8c498fce8" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "derive_more 1.0.0", + "serde", +] + [[package]] name = "alloy-eips" version = "0.3.6" @@ -157,7 +271,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" dependencies = [ "alloy-eip2930", - "alloy-eip7702", + "alloy-eip7702 0.1.1", "alloy-primitives", "alloy-rlp", "c-kzg", @@ -167,11 +281,102 @@ dependencies = [ "sha2 0.10.8", ] +[[package]] +name = "alloy-eips" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b6aa3961694b30ba53d41006131a2fca3bdab22e4c344e46db2c639e7c2dfdd" +dependencies = [ + "alloy-eip2930", + "alloy-eip7702 0.4.1", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "derive_more 1.0.0", + "once_cell", + "serde", + "sha2 0.10.8", +] + +[[package]] +name = "alloy-genesis" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e53f7877ded3921d18a0a9556d55bedf84535567198c9edab2aa23106da91855" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-json-abi" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b84c506bf264110fa7e90d9924f742f40ef53c6572ea56a0b0bd714a567ed389" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-json-rpc" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3694b7e480728c0b3e228384f223937f14c10caef5a4c766021190fc8f283d35" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea94b8ceb5c75d7df0a93ba0acc53b55a22b47b532b600a800a87ef04eb5b0b4" +dependencies = [ + "alloy-consensus 0.6.4", + "alloy-eips 0.6.4", + "alloy-json-rpc", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", + "alloy-sol-types", + "async-trait", + "auto_impl", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "alloy-network-primitives" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df9f3e281005943944d15ee8491534a1c7b3cbf7a7de26f8c433b842b93eb5f9" +dependencies = [ + "alloy-consensus 0.6.4", + "alloy-eips 0.6.4", + "alloy-primitives", + "alloy-serde", + "serde", +] + [[package]] name = "alloy-primitives" -version = "0.8.5" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" +checksum = "9fce5dbd6a4f118eecc4719eaa9c7ffc31c315e6c5ccde3642db927802312425" dependencies = [ "alloy-rlp", "arbitrary", @@ -180,8 +385,9 @@ dependencies = [ "const-hex", "derive_arbitrary", "derive_more 1.0.0", + "foldhash", "getrandom", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "hex-literal", "indexmap", "itoa", @@ -198,11 +404,69 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "alloy-provider" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40c1f9eede27bf4c13c099e8e64d54efd7ce80ef6ea47478aa75d5d74e2dba3b" +dependencies = [ + "alloy-chains", + "alloy-consensus 0.6.4", + "alloy-eips 0.6.4", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ws", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "futures", + "futures-utils-wasm", + "lru", + "parking_lot 0.12.3", + "pin-project", + "reqwest 0.12.9", + "schnellru", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-pubsub" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90f1f34232f77341076541c405482e4ae12f0ee7153d8f9969fc1691201b2247" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "bimap", + "futures", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower", + "tracing", +] + [[package]] name = "alloy-rlp" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -211,13 +475,220 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.8" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" +checksum = "374dbe0dc3abdc2c964f36b3d3edf9cdb3db29d16bda34aa123f03d810bec1dd" dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-pubsub", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ws", + "futures", + "pin-project", + "reqwest 0.12.9", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c74832aa474b670309c20fffc2a869fa141edab7c79ff7963fad0a08de60bae1" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a477281940d82d29315846c7216db45b15e90bcd52309da9f54bcf7ad94a11" +dependencies = [ + "alloy-consensus 0.6.4", + "alloy-eips 0.6.4", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "derive_more 1.0.0", + "itertools 0.13.0", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-serde" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dfa4a7ccf15b2492bb68088692481fd6b2604ccbee1d0d6c44c21427ae4df83" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-signer" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e10aec39d60dc27edcac447302c7803d2371946fb737245320a05b78eb2fafd" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "elliptic-curve 0.13.8", + "k256 0.13.4", + "thiserror", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9343289b4a7461ed8bab8618504c995c049c082b70c7332efd7b32125633dc05" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.79", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4222d70bec485ceccc5d8fd4f2909edd65b5d5e43d4aca0b5dcee65d519ae98f" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.79", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e17f2677369571b976e51ea1430eb41c3690d344fef567b840bfc0b01b6f83a" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", "proc-macro2", "quote", + "serde_json", "syn 2.0.79", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa64d80ae58ffaafdff9d5d84f58d03775f66c84433916dc9a64ed16af5755da" +dependencies = [ + "serde", + "winnow 0.6.20", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6520d427d4a8eb7aa803d852d7a52ceb0c519e784c292f64bb339e636918cf27" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + +[[package]] +name = "alloy-transport" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f99acddb34000d104961897dbb0240298e8b775a7efffb9fda2a1a3efedd65b3" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.1", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-transport-http" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dc013132e34eeadaa0add7e74164c1503988bfba8bae885b32e0918ba85a8a6" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest 0.12.9", + "serde_json", + "tower", + "tracing", + "url", +] + +[[package]] +name = "alloy-transport-ws" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abd170e600801116d5efe64f74a4fc073dbbb35c807013a7d0a388742aeebba0" +dependencies = [ + "alloy-pubsub", + "alloy-transport", + "futures", + "http 1.1.0", + "rustls 0.23.15", + "serde_json", + "tokio", + "tokio-tungstenite", + "tracing", + "ws_stream_wasm", ] [[package]] @@ -554,6 +1025,28 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "async-trait" version = "0.1.83" @@ -565,6 +1058,17 @@ dependencies = [ "syn 2.0.79", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version 0.4.1", +] + [[package]] name = "asynchronous-codec" version = "0.7.0" @@ -767,7 +1271,7 @@ dependencies = [ "ssz_types", "state_processing", "store", - "strum", + "strum 0.24.1", "superstruct", "task_executor 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "tempfile", @@ -778,6 +1282,12 @@ dependencies = [ "types 0.2.1 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", ] +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + [[package]] name = "bincode" version = "1.3.3" @@ -956,7 +1466,7 @@ source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#3a68 dependencies = [ "eth2", "lighthouse_version 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", - "reqwest", + "reqwest 0.11.27", "sensitive_url 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "serde", ] @@ -1193,7 +1703,7 @@ dependencies = [ "parking_lot 0.12.3", "sensitive_url 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "serde", - "strum", + "strum 0.24.1", "task_executor 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "tokio", "tracing", @@ -1628,12 +2138,26 @@ dependencies = [ "libc", ] -[[package]] -name = "dary_heap" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728" - +[[package]] +name = "dary_heap" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728" + +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.10", +] + [[package]] name = "data-encoding" version = "2.6.0" @@ -1695,7 +2219,7 @@ dependencies = [ "ethabi 16.0.0", "ethereum_ssz", "hex", - "reqwest", + "reqwest 0.11.27", "serde_json", "sha2 0.9.9", "tree_hash", @@ -1985,6 +2509,12 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "ecdsa" version = "0.14.8" @@ -2178,6 +2708,15 @@ dependencies = [ "version_check", ] +[[package]] +name = "eth" +version = "0.1.0" +dependencies = [ + "alloy", + "futures", + "tokio", +] + [[package]] name = "eth1" version = "0.2.0" @@ -2224,7 +2763,7 @@ dependencies = [ "procfs", "proto_array", "psutil", - "reqwest", + "reqwest 0.11.27", "ring 0.16.20", "sensitive_url 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "serde", @@ -2324,7 +2863,7 @@ dependencies = [ "kzg 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "logging 0.2.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "pretty_reqwest_error 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", - "reqwest", + "reqwest 0.11.27", "sensitive_url 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "serde_yaml", "sha2 0.9.9", @@ -2345,7 +2884,7 @@ dependencies = [ "kzg 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "logging 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "pretty_reqwest_error 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", - "reqwest", + "reqwest 0.11.27", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "serde_yaml", "sha2 0.9.9", @@ -2527,7 +3066,7 @@ dependencies = [ "rlp-derive", "serde", "serde_json", - "strum", + "strum 0.24.1", "thiserror", "tiny-keccak", "unicode-xid", @@ -2565,7 +3104,7 @@ name = "execution_layer" version = "0.1.0" source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#3a6800fa8b220125b3b420d679aad59a82980e07" dependencies = [ - "alloy-consensus", + "alloy-consensus 0.3.6", "alloy-primitives", "alloy-rlp", "arc-swap", @@ -2591,7 +3130,7 @@ dependencies = [ "parking_lot 0.12.3", "pretty_reqwest_error 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "rand", - "reqwest", + "reqwest 0.11.27", "sensitive_url 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "serde", "serde_json", @@ -2600,7 +3139,7 @@ dependencies = [ "slot_clock", "ssz_types", "state_processing", - "strum", + "strum 0.24.1", "superstruct", "task_executor 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "tempfile", @@ -2968,6 +3507,12 @@ dependencies = [ "slab", ] +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + [[package]] name = "generic-array" version = "0.14.7" @@ -3172,6 +3717,12 @@ dependencies = [ "crunchy", ] +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" + [[package]] name = "hashbrown" version = "0.14.5" @@ -3180,7 +3731,6 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", - "serde", ] [[package]] @@ -3192,6 +3742,7 @@ dependencies = [ "allocator-api2", "equivalent", "foldhash", + "serde", ] [[package]] @@ -3523,6 +4074,7 @@ dependencies = [ "pin-project-lite", "smallvec", "tokio", + "want", ] [[package]] @@ -3552,6 +4104,22 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.9" @@ -3559,13 +4127,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", + "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.1", "hyper 1.4.1", "pin-project-lite", + "socket2 0.5.7", "tokio", "tower-service", + "tracing", ] [[package]] @@ -4582,7 +5153,7 @@ dependencies = [ "smallvec", "snap", "ssz_types", - "strum", + "strum 0.24.1", "superstruct", "task_executor 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", "tiny-keccak", @@ -4632,7 +5203,7 @@ dependencies = [ "smallvec", "snap", "ssz_types", - "strum", + "strum 0.24.1", "superstruct", "task_executor 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "tiny-keccak", @@ -5250,6 +5821,26 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "object" version = "0.36.4" @@ -5603,6 +6194,16 @@ dependencies = [ "ucd-trie", ] +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version 0.4.1", +] + [[package]] name = "pin-project" version = "1.1.6" @@ -5731,7 +6332,7 @@ name = "pretty_reqwest_error" version = "0.1.0" source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#3a6800fa8b220125b3b420d679aad59a82980e07" dependencies = [ - "reqwest", + "reqwest 0.11.27", "sensitive_url 0.1.0 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", ] @@ -5740,7 +6341,7 @@ name = "pretty_reqwest_error" version = "0.1.0" source = "git+https://github.com/sigp/lighthouse?branch=unstable#e31ac508d404700c35d99936028a5fd74749c335" dependencies = [ - "reqwest", + "reqwest 0.11.27", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] @@ -5799,6 +6400,28 @@ dependencies = [ "toml_edit 0.22.22", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "proc-macro2" version = "1.0.86" @@ -6222,7 +6845,7 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.31", "hyper-rustls", - "hyper-tls", + "hyper-tls 0.5.0", "ipnet", "js-sys", "log", @@ -6248,10 +6871,49 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots", + "webpki-roots 0.25.4", "winreg", ] +[[package]] +name = "reqwest" +version = "0.12.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-tls 0.6.0", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile 2.2.0", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "windows-registry", +] + [[package]] name = "resolv-conf" version = "0.7.0" @@ -6692,6 +7354,17 @@ dependencies = [ "parking_lot 0.12.3", ] +[[package]] +name = "schnellru" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" +dependencies = [ + "ahash", + "cfg-if", + "hashbrown 0.13.2", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -6801,6 +7474,12 @@ dependencies = [ "pest", ] +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + [[package]] name = "sensitive_url" version = "0.1.0" @@ -7059,7 +7738,7 @@ dependencies = [ "serde", "slog", "ssz_types", - "strum", + "strum 0.24.1", "tree_hash", "tree_hash_derive", "types 0.2.1 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", @@ -7353,7 +8032,7 @@ dependencies = [ "slog", "sloggers", "state_processing", - "strum", + "strum 0.24.1", "types 0.2.1 (git+https://github.com/agemanning/lighthouse?branch=modularize-vc)", ] @@ -7375,7 +8054,16 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" dependencies = [ - "strum_macros", + "strum_macros 0.24.3", +] + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros 0.26.4", ] [[package]] @@ -7391,6 +8079,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.79", +] + [[package]] name = "subtle" version = "2.6.1" @@ -7453,6 +8154,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f76fe0a3e1476bdaa0775b9aec5b869ed9520c2b2fedfe9c6df3618f8ea6290b" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -7464,6 +8177,9 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] [[package]] name = "synstructure" @@ -7778,6 +8494,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +dependencies = [ + "rustls 0.23.15", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.16" @@ -7790,6 +8517,22 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "tokio-tungstenite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9" +dependencies = [ + "futures-util", + "log", + "rustls 0.23.15", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tungstenite", + "webpki-roots 0.26.6", +] + [[package]] name = "tokio-util" version = "0.7.12" @@ -8017,6 +8760,26 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "tungstenite" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.1.0", + "httparse", + "log", + "rand", + "rustls 0.23.15", + "rustls-pki-types", + "sha1", + "thiserror", + "utf-8", +] + [[package]] name = "typenum" version = "1.17.0" @@ -8276,6 +9039,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8parse" version = "0.2.2" @@ -8510,6 +9279,20 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmtimer" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +dependencies = [ + "futures", + "js-sys", + "parking_lot 0.12.3", + "pin-utils", + "slab", + "wasm-bindgen", +] + [[package]] name = "web-sys" version = "0.3.72" @@ -8536,6 +9319,15 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +[[package]] +name = "webpki-roots" +version = "0.26.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "widestring" version = "0.4.3" @@ -8601,6 +9393,36 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -8843,6 +9665,25 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ws_stream_wasm" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "log", + "pharos", + "rustc_version 0.4.1", + "send_wrapper", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wyz" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index bbdc34b71..8236cdb5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,7 @@ members = [ "anchor/http_metrics", "anchor/qbft", "anchor/network", + "anchor/eth", "anchor/common/version" ] resolver = "2" @@ -17,6 +18,7 @@ edition = "2021" [workspace.dependencies] client = { path = "anchor/client" } qbft = { path = "anchor/qbft" } +eth = { path = "anchor/eth" } http_api = { path = "anchor/http_api" } http_metrics = { path = "anchor/http_metrics" } network = { path ="anchor/network"} diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml new file mode 100644 index 000000000..7ff695810 --- /dev/null +++ b/anchor/eth/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "eth" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "contract", "pubsub", +"provider-ws", "rpc-types"] } +tokio = { workspace = true } +futures = { workspace = true } + + + diff --git a/anchor/eth/src/abi/ssv_contract.json b/anchor/eth/src/abi/ssv_contract.json new file mode 100644 index 000000000..0b584eed3 --- /dev/null +++ b/anchor/eth/src/abi/ssv_contract.json @@ -0,0 +1 @@ +[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"address","name":"contractAddress","type":"address"}],"name":"AddressIsWhitelistingContract","type":"error"},{"inputs":[],"name":"ApprovalNotWithinTimeframe","type":"error"},{"inputs":[],"name":"CallerNotOwner","type":"error"},{"inputs":[{"internalType":"address","name":"caller","type":"address"},{"internalType":"address","name":"owner","type":"address"}],"name":"CallerNotOwnerWithData","type":"error"},{"inputs":[],"name":"CallerNotWhitelisted","type":"error"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"CallerNotWhitelistedWithData","type":"error"},{"inputs":[],"name":"ClusterAlreadyEnabled","type":"error"},{"inputs":[],"name":"ClusterDoesNotExists","type":"error"},{"inputs":[],"name":"ClusterIsLiquidated","type":"error"},{"inputs":[],"name":"ClusterNotLiquidatable","type":"error"},{"inputs":[],"name":"EmptyPublicKeysList","type":"error"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"ExceedValidatorLimit","type":"error"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"ExceedValidatorLimitWithData","type":"error"},{"inputs":[],"name":"FeeExceedsIncreaseLimit","type":"error"},{"inputs":[],"name":"FeeIncreaseNotAllowed","type":"error"},{"inputs":[],"name":"FeeTooHigh","type":"error"},{"inputs":[],"name":"FeeTooLow","type":"error"},{"inputs":[],"name":"IncorrectClusterState","type":"error"},{"inputs":[],"name":"IncorrectValidatorState","type":"error"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"}],"name":"IncorrectValidatorStateWithData","type":"error"},{"inputs":[],"name":"InsufficientBalance","type":"error"},{"inputs":[],"name":"InvalidContractAddress","type":"error"},{"inputs":[],"name":"InvalidOperatorIdsLength","type":"error"},{"inputs":[],"name":"InvalidPublicKeyLength","type":"error"},{"inputs":[],"name":"InvalidWhitelistAddressesLength","type":"error"},{"inputs":[{"internalType":"address","name":"contractAddress","type":"address"}],"name":"InvalidWhitelistingContract","type":"error"},{"inputs":[],"name":"MaxValueExceeded","type":"error"},{"inputs":[],"name":"NewBlockPeriodIsBelowMinimum","type":"error"},{"inputs":[],"name":"NoFeeDeclared","type":"error"},{"inputs":[],"name":"NotAuthorized","type":"error"},{"inputs":[],"name":"OperatorAlreadyExists","type":"error"},{"inputs":[],"name":"OperatorDoesNotExist","type":"error"},{"inputs":[],"name":"OperatorsListNotUnique","type":"error"},{"inputs":[],"name":"PublicKeysSharesLengthMismatch","type":"error"},{"inputs":[],"name":"SameFeeChangeNotAllowed","type":"error"},{"inputs":[],"name":"TargetModuleDoesNotExist","type":"error"},{"inputs":[{"internalType":"uint8","name":"moduleId","type":"uint8"}],"name":"TargetModuleDoesNotExistWithData","type":"error"},{"inputs":[],"name":"TokenTransferFailed","type":"error"},{"inputs":[],"name":"UnsortedOperatorsList","type":"error"},{"inputs":[],"name":"ValidatorAlreadyExists","type":"error"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"}],"name":"ValidatorAlreadyExistsWithData","type":"error"},{"inputs":[],"name":"ValidatorDoesNotExist","type":"error"},{"inputs":[],"name":"ZeroAddressNotAllowed","type":"error"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"previousAdmin","type":"address"},{"indexed":false,"internalType":"address","name":"newAdmin","type":"address"}],"name":"AdminChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"beacon","type":"address"}],"name":"BeaconUpgraded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ClusterDeposited","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ClusterLiquidated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ClusterReactivated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ClusterWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64","name":"value","type":"uint64"}],"name":"DeclareOperatorFeePeriodUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64","name":"value","type":"uint64"}],"name":"ExecuteOperatorFeePeriodUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"address","name":"recipientAddress","type":"address"}],"name":"FeeRecipientAddressUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint8","name":"version","type":"uint8"}],"name":"Initialized","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64","name":"value","type":"uint64"}],"name":"LiquidationThresholdPeriodUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"MinimumLiquidationCollateralUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"enum SSVModules","name":"moduleId","type":"uint8"},{"indexed":false,"internalType":"address","name":"moduleAddress","type":"address"}],"name":"ModuleUpgraded","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"},{"indexed":false,"internalType":"address","name":"recipient","type":"address"}],"name":"NetworkEarningsWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"oldFee","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"newFee","type":"uint256"}],"name":"NetworkFeeUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"},{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"bytes","name":"publicKey","type":"bytes"},{"indexed":false,"internalType":"uint256","name":"fee","type":"uint256"}],"name":"OperatorAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"OperatorFeeDeclarationCancelled","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"},{"indexed":false,"internalType":"uint256","name":"blockNumber","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"fee","type":"uint256"}],"name":"OperatorFeeDeclared","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"},{"indexed":false,"internalType":"uint256","name":"blockNumber","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"fee","type":"uint256"}],"name":"OperatorFeeExecuted","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64","name":"value","type":"uint64"}],"name":"OperatorFeeIncreaseLimitUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64","name":"maxFee","type":"uint64"}],"name":"OperatorMaximumFeeUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"address[]","name":"whitelistAddresses","type":"address[]"}],"name":"OperatorMultipleWhitelistRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"address[]","name":"whitelistAddresses","type":"address[]"}],"name":"OperatorMultipleWhitelistUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"bool","name":"toPrivate","type":"bool"}],"name":"OperatorPrivacyStatusUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"OperatorRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"address","name":"whitelistingContract","type":"address"}],"name":"OperatorWhitelistingContractUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"OperatorWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previousOwner","type":"address"},{"indexed":true,"internalType":"address","name":"newOwner","type":"address"}],"name":"OwnershipTransferStarted","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previousOwner","type":"address"},{"indexed":true,"internalType":"address","name":"newOwner","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"implementation","type":"address"}],"name":"Upgraded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"bytes","name":"publicKey","type":"bytes"},{"indexed":false,"internalType":"bytes","name":"shares","type":"bytes"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ValidatorAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"bytes","name":"publicKey","type":"bytes"}],"name":"ValidatorExited","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"bytes","name":"publicKey","type":"bytes"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ValidatorRemoved","type":"event"},{"stateMutability":"nonpayable","type":"fallback"},{"inputs":[],"name":"acceptOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"publicKeys","type":"bytes[]"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"}],"name":"bulkExitValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"publicKeys","type":"bytes[]"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"bytes[]","name":"sharesData","type":"bytes[]"},{"internalType":"uint256","name":"amount","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"bulkRegisterValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"publicKeys","type":"bytes[]"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"bulkRemoveValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"cancelDeclaredOperatorFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"},{"internalType":"uint256","name":"fee","type":"uint256"}],"name":"declareOperatorFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"clusterOwner","type":"address"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"uint256","name":"amount","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"deposit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"executeOperatorFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"}],"name":"exitValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"getVersion","outputs":[{"internalType":"string","name":"version","type":"string"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"contract IERC20","name":"token_","type":"address"},{"internalType":"contract ISSVOperators","name":"ssvOperators_","type":"address"},{"internalType":"contract ISSVClusters","name":"ssvClusters_","type":"address"},{"internalType":"contract ISSVDAO","name":"ssvDAO_","type":"address"},{"internalType":"contract ISSVViews","name":"ssvViews_","type":"address"},{"internalType":"uint64","name":"minimumBlocksBeforeLiquidation_","type":"uint64"},{"internalType":"uint256","name":"minimumLiquidationCollateral_","type":"uint256"},{"internalType":"uint32","name":"validatorsPerOperatorLimit_","type":"uint32"},{"internalType":"uint64","name":"declareOperatorFeePeriod_","type":"uint64"},{"internalType":"uint64","name":"executeOperatorFeePeriod_","type":"uint64"},{"internalType":"uint64","name":"operatorMaxFeeIncrease_","type":"uint64"}],"name":"initialize","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"clusterOwner","type":"address"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"liquidate","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"pendingOwner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"proxiableUUID","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"uint256","name":"amount","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"reactivate","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"},{"internalType":"uint256","name":"fee","type":"uint256"}],"name":"reduceOperatorFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"},{"internalType":"uint256","name":"fee","type":"uint256"},{"internalType":"bool","name":"setPrivate","type":"bool"}],"name":"registerOperator","outputs":[{"internalType":"uint64","name":"id","type":"uint64"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"bytes","name":"sharesData","type":"bytes"},{"internalType":"uint256","name":"amount","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"registerValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"removeOperator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"}],"name":"removeOperatorsWhitelistingContract","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"address[]","name":"whitelistAddresses","type":"address[]"}],"name":"removeOperatorsWhitelists","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"removeValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"renounceOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"recipientAddress","type":"address"}],"name":"setFeeRecipientAddress","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"}],"name":"setOperatorsPrivateUnchecked","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"}],"name":"setOperatorsPublicUnchecked","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"contract ISSVWhitelistingContract","name":"whitelistingContract","type":"address"}],"name":"setOperatorsWhitelistingContract","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"address[]","name":"whitelistAddresses","type":"address[]"}],"name":"setOperatorsWhitelists","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newOwner","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"timeInSeconds","type":"uint64"}],"name":"updateDeclareOperatorFeePeriod","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"timeInSeconds","type":"uint64"}],"name":"updateExecuteOperatorFeePeriod","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"blocks","type":"uint64"}],"name":"updateLiquidationThresholdPeriod","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"maxFee","type":"uint64"}],"name":"updateMaximumOperatorFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"updateMinimumLiquidationCollateral","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"enum SSVModules","name":"moduleId","type":"uint8"},{"internalType":"address","name":"moduleAddress","type":"address"}],"name":"updateModule","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"fee","type":"uint256"}],"name":"updateNetworkFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"percentage","type":"uint64"}],"name":"updateOperatorFeeIncreaseLimit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newImplementation","type":"address"}],"name":"upgradeTo","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newImplementation","type":"address"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"upgradeToAndCall","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"uint256","name":"amount","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"withdraw","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"withdrawAllOperatorEarnings","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"withdrawNetworkEarnings","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"withdrawOperatorEarnings","outputs":[],"stateMutability":"nonpayable","type":"function"}] diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs new file mode 100644 index 000000000..abd60b7dd --- /dev/null +++ b/anchor/eth/src/event_processor.rs @@ -0,0 +1,6 @@ +// Process Events +pub struct EventProcessor { +} + + + diff --git a/anchor/eth/src/gen.rs b/anchor/eth/src/gen.rs new file mode 100644 index 000000000..1642663bb --- /dev/null +++ b/anchor/eth/src/gen.rs @@ -0,0 +1,9 @@ +use alloy::sol; + +// Generate bindings around the SSV Network contract +sol! { + #[derive(Debug)] + #[sol(rpc)] + SSVContract, + "src/abi/ssv_contract.json" +} diff --git a/anchor/eth/src/lib.rs b/anchor/eth/src/lib.rs new file mode 100644 index 000000000..b69e2ff2b --- /dev/null +++ b/anchor/eth/src/lib.rs @@ -0,0 +1,3 @@ +mod gen; +mod sync; +mod event_processor; diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs new file mode 100644 index 000000000..3f34fa7be --- /dev/null +++ b/anchor/eth/src/sync.rs @@ -0,0 +1,164 @@ +use crate::gen::SSVContract; +use alloy::primitives::FixedBytes; +use alloy::primitives::{address, Address}; +use alloy::providers::{Provider, ProviderBuilder, RootProvider, WsConnect}; +use alloy::pubsub::PubSubFrontend; +use alloy::rpc::types::Filter; +use alloy::rpc::types::Log; +use alloy::sol_types::SolEvent; +use alloy::transports::http::{Client, Http}; +use futures::future::join_all; +use std::collections::BTreeMap; +use std::future::Future; +use std::sync::Arc; +use std::sync::LazyLock; + +/// SSV contract events needed to come up to date with the network +static SSV_EVENTS: LazyLock>> = LazyLock::new(|| { + vec![ + // event OperatorAdded(uint64 indexed operatorId, address indexed owner, bytes publicKey, uint256 fee); + SSVContract::OperatorAdded::SIGNATURE_HASH, + // event OperatorRemoved(uint64 indexed operatorId); + SSVContract::OperatorRemoved::SIGNATURE_HASH, + // event ValidatorAdded(address indexed owner, uint64[] operatorIds, bytes publicKey, bytes shares, Cluster cluster); + SSVContract::ValidatorAdded::SIGNATURE_HASH, + // event ValidatorRemoved(address indexed owner, uint64[] operatorIds, bytes publicKey, Cluster cluster); + SSVContract::ValidatorRemoved::SIGNATURE_HASH, + // event ClusterLiquidated(address indexed owner, uint64[] operatorIds, Cluster cluster); + SSVContract::ClusterLiquidated::SIGNATURE_HASH, + // event ClusterReactivated(address indexed owner, uint64[] operatorIds, Cluster cluster); + SSVContract::ClusterReactivated::SIGNATURE_HASH, + // event FeeRecipientAddressUpdated(address indexed owner, address recipientAddress); + SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH, + // event ValidatorExited(address indexed owner, uint64[] operatorIds, bytes publicKey); + SSVContract::ValidatorExited::SIGNATURE_HASH, + ] +}); + +/// Contract deployment address +/// https://etherscan.io/address/0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1 +static CONTRACT_DEPLOYMENT_ADDRESS: LazyLock
= + LazyLock::new(|| address!("DD9BC35aE942eF0cFa76930954a156B3fF30a4E1")); + +/// Contract deployment block on Ethereum Mainnet +/// https://etherscan.io/tx/0x4a11a560d3c2f693e96f98abb1feb447646b01b36203ecab0a96a1cf45fd650b +const CONTRACT_DEPLOYMENT_BLOCK: u64 = 17507487; + +/// Batch size for log fetching +const BATCH_SIZE: u64 = 500; + +/// Typedef RPC and WS clients +type RpcClient = RootProvider>; +type WsClient = RootProvider; + +/// Client for interacting with the SSV contract on Ethereum L1 +/// +/// Manages connections to the L1 and monitors SSV contract events to track the state of validator +/// and operators. Provides both historical synchronization and live event monitoring +struct SsvEventSyncer { + /// Http client connected to the L1 to fetch historical SSV event information + rpc_client: Arc, + // Websocket client connected to L1 to stream live SSV event information, (todo!()??) + ws_client: WsClient, +} + +impl SsvEventSyncer { + pub async fn new() -> Result { + // todo!() add a retry layer + + // Construct HTTP Provider + let http_url = "dummy_http".parse().unwrap(); // TODO!(), get this from config, unwrap + let rpc_client: Arc = Arc::new(ProviderBuilder::new().on_http(http_url)); + // Construct Websocket Provider + let ws_url = "dummy ws"; // TODO!(), get this from config + let ws_client = ProviderBuilder::new() + .on_ws(WsConnect::new(ws_url)) + .await + .map_err(|e| format!("Failed to bind to WS url {}, {}", ws_url, e))?; + + Ok(Self { + rpc_client, + ws_client, + }) + } + + // Top level function to sync data, change comment + pub async fn sync(&self) -> Result<(), String> { + // first, perform a historical sync + self.historical_sync().await?; + + // once the historical sync is done and we have processed them, start a live sync + // todo!(), live sync + + // OK. We have done the historical sync and spawned off live sync to process + Ok(()) + } + + /// Perform a historical sync from the contract deployment block to catch up to the current + /// state of the SSV network + async fn historical_sync(&self) -> Result<(), String> { + // Fetch range from start_block..(current_block-follow_distance) + let start_block = CONTRACT_DEPLOYMENT_BLOCK; + let current_block = self.rpc_client.get_block_number().await.unwrap(); + + // Chunk the start and end block range into a set of ranges of size BATCH_SIZE and construct + // a new task to fetch the logs from each range + let tasks: Vec<_> = (start_block..=current_block) + .step_by(BATCH_SIZE as usize) + .map(|start| { + let (start, end) = (start, std::cmp::min(start + BATCH_SIZE - 1, current_block)); + self.fetch_logs(start, end) + }) + .collect(); + + // Await all of the futures. This will panic if one of the futures is unsuccessful. + let event_logs: Vec = join_all(tasks).await.into_iter().flatten().collect(); + + // The futures may join out of order block wise. The individual events within the block + // retain their tx ordering. Due to this, we can reassemble back into blocks and be + // confident the order is correct + let mut ordered_event_logs: BTreeMap> = BTreeMap::new(); + for log in event_logs { + let block_num = log.block_number.expect("Log should have a block number"); + ordered_event_logs.entry(block_num).or_default().push(log); + } + + // Logs are all fetched from the chain and in order, process them + Ok(()) + } + + /// Fetch logs from the chain + fn fetch_logs(&self, from_block: u64, to_block: u64) -> impl Future> { + // Setup filter and rpc client + let rpc_client = self.rpc_client.clone(); + let filter = Filter::new() + .address(*CONTRACT_DEPLOYMENT_ADDRESS) + .from_block(from_block) + .to_block(to_block) + .events(&*SSV_EVENTS); + + // Try to fetch the logs. + // If there is an error, we want this to panic. The rpc client is layered to retry + // upon failure based on custom retry arguments. If this fails, we can assume + // there is some greater underlying issue with the rpc connection + async move { + match rpc_client.get_logs(&filter).await { + Ok(logs) => logs, + Err(_) => panic!("Unable to fetch logs"), + } + } + } + + /// Live sync with the chain to get new contract events while enforcing a follow distance + /// + /// todo!() should this be done like this?? should we use an interal slot clock instead?? not + /// sure to treat this as a client itself, or an extension of a validator + /// The actual beacon node is responsible for the slot timing and block creation, and the + /// validator just polls it on intervals, so it makes sense to just stream in blocks since this + /// is just event syncing and not critial as long as it is within the slot time + fn live_sync(&self) { + // Stream in a new block. We are enforcing a follow distance, so we do not care about the + // actual block. We just want to know that a new block has been added to the chain + todo!() + } +} diff --git a/anchor/network/Cargo.toml b/anchor/network/Cargo.toml index 73801a6aa..3c8bac686 100644 --- a/anchor/network/Cargo.toml +++ b/anchor/network/Cargo.toml @@ -17,4 +17,4 @@ serde = { workspace = true } tracing = { workspace = true } [dev-dependencies] -async-channel = { workspace = true } \ No newline at end of file +async-channel = { workspace = true } diff --git a/anchor/src/main.rs b/anchor/src/main.rs index 0f4bc07f8..ebc1c741d 100644 --- a/anchor/src/main.rs +++ b/anchor/src/main.rs @@ -33,6 +33,7 @@ fn main() { // The clone's here simply copy the Arc of the runtime. We pass these through the main // execution task + let anchor_executor = core_executor.clone(); let shutdown_executor = core_executor.clone(); From 1af7459796aa090480e42bda9fed938f9526b9b9 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Wed, 20 Nov 2024 16:30:53 +0000 Subject: [PATCH 02/49] spec out event processor and parser. thoughts on arch and todos --- anchor/eth/Cargo.toml | 1 + anchor/eth/src/abi/ssv_contract.json | 2126 +++++++++++++++++++++++++- anchor/eth/src/event_parser.rs | 32 + anchor/eth/src/event_processor.rs | 27 +- anchor/eth/src/lib.rs | 4 +- anchor/eth/src/sync.rs | 72 +- 6 files changed, 2236 insertions(+), 26 deletions(-) create mode 100644 anchor/eth/src/event_parser.rs diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml index 7ff695810..26ffcf040 100644 --- a/anchor/eth/Cargo.toml +++ b/anchor/eth/Cargo.toml @@ -9,6 +9,7 @@ alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "con "provider-ws", "rpc-types"] } tokio = { workspace = true } futures = { workspace = true } +rand = "0.8.5" diff --git a/anchor/eth/src/abi/ssv_contract.json b/anchor/eth/src/abi/ssv_contract.json index 0b584eed3..798172919 100644 --- a/anchor/eth/src/abi/ssv_contract.json +++ b/anchor/eth/src/abi/ssv_contract.json @@ -1 +1,2125 @@ -[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"address","name":"contractAddress","type":"address"}],"name":"AddressIsWhitelistingContract","type":"error"},{"inputs":[],"name":"ApprovalNotWithinTimeframe","type":"error"},{"inputs":[],"name":"CallerNotOwner","type":"error"},{"inputs":[{"internalType":"address","name":"caller","type":"address"},{"internalType":"address","name":"owner","type":"address"}],"name":"CallerNotOwnerWithData","type":"error"},{"inputs":[],"name":"CallerNotWhitelisted","type":"error"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"CallerNotWhitelistedWithData","type":"error"},{"inputs":[],"name":"ClusterAlreadyEnabled","type":"error"},{"inputs":[],"name":"ClusterDoesNotExists","type":"error"},{"inputs":[],"name":"ClusterIsLiquidated","type":"error"},{"inputs":[],"name":"ClusterNotLiquidatable","type":"error"},{"inputs":[],"name":"EmptyPublicKeysList","type":"error"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"ExceedValidatorLimit","type":"error"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"ExceedValidatorLimitWithData","type":"error"},{"inputs":[],"name":"FeeExceedsIncreaseLimit","type":"error"},{"inputs":[],"name":"FeeIncreaseNotAllowed","type":"error"},{"inputs":[],"name":"FeeTooHigh","type":"error"},{"inputs":[],"name":"FeeTooLow","type":"error"},{"inputs":[],"name":"IncorrectClusterState","type":"error"},{"inputs":[],"name":"IncorrectValidatorState","type":"error"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"}],"name":"IncorrectValidatorStateWithData","type":"error"},{"inputs":[],"name":"InsufficientBalance","type":"error"},{"inputs":[],"name":"InvalidContractAddress","type":"error"},{"inputs":[],"name":"InvalidOperatorIdsLength","type":"error"},{"inputs":[],"name":"InvalidPublicKeyLength","type":"error"},{"inputs":[],"name":"InvalidWhitelistAddressesLength","type":"error"},{"inputs":[{"internalType":"address","name":"contractAddress","type":"address"}],"name":"InvalidWhitelistingContract","type":"error"},{"inputs":[],"name":"MaxValueExceeded","type":"error"},{"inputs":[],"name":"NewBlockPeriodIsBelowMinimum","type":"error"},{"inputs":[],"name":"NoFeeDeclared","type":"error"},{"inputs":[],"name":"NotAuthorized","type":"error"},{"inputs":[],"name":"OperatorAlreadyExists","type":"error"},{"inputs":[],"name":"OperatorDoesNotExist","type":"error"},{"inputs":[],"name":"OperatorsListNotUnique","type":"error"},{"inputs":[],"name":"PublicKeysSharesLengthMismatch","type":"error"},{"inputs":[],"name":"SameFeeChangeNotAllowed","type":"error"},{"inputs":[],"name":"TargetModuleDoesNotExist","type":"error"},{"inputs":[{"internalType":"uint8","name":"moduleId","type":"uint8"}],"name":"TargetModuleDoesNotExistWithData","type":"error"},{"inputs":[],"name":"TokenTransferFailed","type":"error"},{"inputs":[],"name":"UnsortedOperatorsList","type":"error"},{"inputs":[],"name":"ValidatorAlreadyExists","type":"error"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"}],"name":"ValidatorAlreadyExistsWithData","type":"error"},{"inputs":[],"name":"ValidatorDoesNotExist","type":"error"},{"inputs":[],"name":"ZeroAddressNotAllowed","type":"error"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"previousAdmin","type":"address"},{"indexed":false,"internalType":"address","name":"newAdmin","type":"address"}],"name":"AdminChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"beacon","type":"address"}],"name":"BeaconUpgraded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ClusterDeposited","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ClusterLiquidated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ClusterReactivated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ClusterWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64","name":"value","type":"uint64"}],"name":"DeclareOperatorFeePeriodUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64","name":"value","type":"uint64"}],"name":"ExecuteOperatorFeePeriodUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"address","name":"recipientAddress","type":"address"}],"name":"FeeRecipientAddressUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint8","name":"version","type":"uint8"}],"name":"Initialized","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64","name":"value","type":"uint64"}],"name":"LiquidationThresholdPeriodUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"MinimumLiquidationCollateralUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"enum SSVModules","name":"moduleId","type":"uint8"},{"indexed":false,"internalType":"address","name":"moduleAddress","type":"address"}],"name":"ModuleUpgraded","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"},{"indexed":false,"internalType":"address","name":"recipient","type":"address"}],"name":"NetworkEarningsWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"oldFee","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"newFee","type":"uint256"}],"name":"NetworkFeeUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"},{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"bytes","name":"publicKey","type":"bytes"},{"indexed":false,"internalType":"uint256","name":"fee","type":"uint256"}],"name":"OperatorAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"OperatorFeeDeclarationCancelled","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"},{"indexed":false,"internalType":"uint256","name":"blockNumber","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"fee","type":"uint256"}],"name":"OperatorFeeDeclared","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"},{"indexed":false,"internalType":"uint256","name":"blockNumber","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"fee","type":"uint256"}],"name":"OperatorFeeExecuted","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64","name":"value","type":"uint64"}],"name":"OperatorFeeIncreaseLimitUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64","name":"maxFee","type":"uint64"}],"name":"OperatorMaximumFeeUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"address[]","name":"whitelistAddresses","type":"address[]"}],"name":"OperatorMultipleWhitelistRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"address[]","name":"whitelistAddresses","type":"address[]"}],"name":"OperatorMultipleWhitelistUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"bool","name":"toPrivate","type":"bool"}],"name":"OperatorPrivacyStatusUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"OperatorRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"address","name":"whitelistingContract","type":"address"}],"name":"OperatorWhitelistingContractUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"uint64","name":"operatorId","type":"uint64"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"OperatorWithdrawn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previousOwner","type":"address"},{"indexed":true,"internalType":"address","name":"newOwner","type":"address"}],"name":"OwnershipTransferStarted","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previousOwner","type":"address"},{"indexed":true,"internalType":"address","name":"newOwner","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"implementation","type":"address"}],"name":"Upgraded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"bytes","name":"publicKey","type":"bytes"},{"indexed":false,"internalType":"bytes","name":"shares","type":"bytes"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ValidatorAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"bytes","name":"publicKey","type":"bytes"}],"name":"ValidatorExited","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":false,"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"indexed":false,"internalType":"bytes","name":"publicKey","type":"bytes"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"indexed":false,"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"ValidatorRemoved","type":"event"},{"stateMutability":"nonpayable","type":"fallback"},{"inputs":[],"name":"acceptOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"publicKeys","type":"bytes[]"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"}],"name":"bulkExitValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"publicKeys","type":"bytes[]"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"bytes[]","name":"sharesData","type":"bytes[]"},{"internalType":"uint256","name":"amount","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"bulkRegisterValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"publicKeys","type":"bytes[]"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"bulkRemoveValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"cancelDeclaredOperatorFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"},{"internalType":"uint256","name":"fee","type":"uint256"}],"name":"declareOperatorFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"clusterOwner","type":"address"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"uint256","name":"amount","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"deposit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"executeOperatorFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"}],"name":"exitValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"getVersion","outputs":[{"internalType":"string","name":"version","type":"string"}],"stateMutability":"pure","type":"function"},{"inputs":[{"internalType":"contract IERC20","name":"token_","type":"address"},{"internalType":"contract ISSVOperators","name":"ssvOperators_","type":"address"},{"internalType":"contract ISSVClusters","name":"ssvClusters_","type":"address"},{"internalType":"contract ISSVDAO","name":"ssvDAO_","type":"address"},{"internalType":"contract ISSVViews","name":"ssvViews_","type":"address"},{"internalType":"uint64","name":"minimumBlocksBeforeLiquidation_","type":"uint64"},{"internalType":"uint256","name":"minimumLiquidationCollateral_","type":"uint256"},{"internalType":"uint32","name":"validatorsPerOperatorLimit_","type":"uint32"},{"internalType":"uint64","name":"declareOperatorFeePeriod_","type":"uint64"},{"internalType":"uint64","name":"executeOperatorFeePeriod_","type":"uint64"},{"internalType":"uint64","name":"operatorMaxFeeIncrease_","type":"uint64"}],"name":"initialize","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"clusterOwner","type":"address"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"liquidate","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"pendingOwner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"proxiableUUID","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"uint256","name":"amount","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"reactivate","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"},{"internalType":"uint256","name":"fee","type":"uint256"}],"name":"reduceOperatorFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"},{"internalType":"uint256","name":"fee","type":"uint256"},{"internalType":"bool","name":"setPrivate","type":"bool"}],"name":"registerOperator","outputs":[{"internalType":"uint64","name":"id","type":"uint64"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"bytes","name":"sharesData","type":"bytes"},{"internalType":"uint256","name":"amount","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"registerValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"removeOperator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"}],"name":"removeOperatorsWhitelistingContract","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"address[]","name":"whitelistAddresses","type":"address[]"}],"name":"removeOperatorsWhitelists","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"publicKey","type":"bytes"},{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"removeValidator","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"renounceOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"recipientAddress","type":"address"}],"name":"setFeeRecipientAddress","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"}],"name":"setOperatorsPrivateUnchecked","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"}],"name":"setOperatorsPublicUnchecked","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"contract ISSVWhitelistingContract","name":"whitelistingContract","type":"address"}],"name":"setOperatorsWhitelistingContract","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"address[]","name":"whitelistAddresses","type":"address[]"}],"name":"setOperatorsWhitelists","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newOwner","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"timeInSeconds","type":"uint64"}],"name":"updateDeclareOperatorFeePeriod","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"timeInSeconds","type":"uint64"}],"name":"updateExecuteOperatorFeePeriod","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"blocks","type":"uint64"}],"name":"updateLiquidationThresholdPeriod","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"maxFee","type":"uint64"}],"name":"updateMaximumOperatorFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"updateMinimumLiquidationCollateral","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"enum SSVModules","name":"moduleId","type":"uint8"},{"internalType":"address","name":"moduleAddress","type":"address"}],"name":"updateModule","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"fee","type":"uint256"}],"name":"updateNetworkFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"percentage","type":"uint64"}],"name":"updateOperatorFeeIncreaseLimit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newImplementation","type":"address"}],"name":"upgradeTo","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newImplementation","type":"address"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"upgradeToAndCall","outputs":[],"stateMutability":"payable","type":"function"},{"inputs":[{"internalType":"uint64[]","name":"operatorIds","type":"uint64[]"},{"internalType":"uint256","name":"amount","type":"uint256"},{"components":[{"internalType":"uint32","name":"validatorCount","type":"uint32"},{"internalType":"uint64","name":"networkFeeIndex","type":"uint64"},{"internalType":"uint64","name":"index","type":"uint64"},{"internalType":"bool","name":"active","type":"bool"},{"internalType":"uint256","name":"balance","type":"uint256"}],"internalType":"struct ISSVNetworkCore.Cluster","name":"cluster","type":"tuple"}],"name":"withdraw","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"}],"name":"withdrawAllOperatorEarnings","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"withdrawNetworkEarnings","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"operatorId","type":"uint64"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"withdrawOperatorEarnings","outputs":[],"stateMutability":"nonpayable","type":"function"}] +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "contractAddress", + "type": "address" + } + ], + "name": "AddressIsWhitelistingContract", + "type": "error" + }, + { + "inputs": [], + "name": "ApprovalNotWithinTimeframe", + "type": "error" + }, + { + "inputs": [], + "name": "CallerNotOwner", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "caller", + "type": "address" + }, + { + "internalType": "address", + "name": "owner", + "type": "address" + } + ], + "name": "CallerNotOwnerWithData", + "type": "error" + }, + { + "inputs": [], + "name": "CallerNotWhitelisted", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + } + ], + "name": "CallerNotWhitelistedWithData", + "type": "error" + }, + { + "inputs": [], + "name": "ClusterAlreadyEnabled", + "type": "error" + }, + { + "inputs": [], + "name": "ClusterDoesNotExists", + "type": "error" + }, + { + "inputs": [], + "name": "ClusterIsLiquidated", + "type": "error" + }, + { + "inputs": [], + "name": "ClusterNotLiquidatable", + "type": "error" + }, + { + "inputs": [], + "name": "EmptyPublicKeysList", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + } + ], + "name": "ExceedValidatorLimit", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + } + ], + "name": "ExceedValidatorLimitWithData", + "type": "error" + }, + { + "inputs": [], + "name": "FeeExceedsIncreaseLimit", + "type": "error" + }, + { + "inputs": [], + "name": "FeeIncreaseNotAllowed", + "type": "error" + }, + { + "inputs": [], + "name": "FeeTooHigh", + "type": "error" + }, + { + "inputs": [], + "name": "FeeTooLow", + "type": "error" + }, + { + "inputs": [], + "name": "IncorrectClusterState", + "type": "error" + }, + { + "inputs": [], + "name": "IncorrectValidatorState", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "publicKey", + "type": "bytes" + } + ], + "name": "IncorrectValidatorStateWithData", + "type": "error" + }, + { + "inputs": [], + "name": "InsufficientBalance", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidContractAddress", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidOperatorIdsLength", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidPublicKeyLength", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidWhitelistAddressesLength", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "contractAddress", + "type": "address" + } + ], + "name": "InvalidWhitelistingContract", + "type": "error" + }, + { + "inputs": [], + "name": "MaxValueExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "NewBlockPeriodIsBelowMinimum", + "type": "error" + }, + { + "inputs": [], + "name": "NoFeeDeclared", + "type": "error" + }, + { + "inputs": [], + "name": "NotAuthorized", + "type": "error" + }, + { + "inputs": [], + "name": "OperatorAlreadyExists", + "type": "error" + }, + { + "inputs": [], + "name": "OperatorDoesNotExist", + "type": "error" + }, + { + "inputs": [], + "name": "OperatorsListNotUnique", + "type": "error" + }, + { + "inputs": [], + "name": "PublicKeysSharesLengthMismatch", + "type": "error" + }, + { + "inputs": [], + "name": "SameFeeChangeNotAllowed", + "type": "error" + }, + { + "inputs": [], + "name": "TargetModuleDoesNotExist", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint8", + "name": "moduleId", + "type": "uint8" + } + ], + "name": "TargetModuleDoesNotExistWithData", + "type": "error" + }, + { + "inputs": [], + "name": "TokenTransferFailed", + "type": "error" + }, + { + "inputs": [], + "name": "UnsortedOperatorsList", + "type": "error" + }, + { + "inputs": [], + "name": "ValidatorAlreadyExists", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "publicKey", + "type": "bytes" + } + ], + "name": "ValidatorAlreadyExistsWithData", + "type": "error" + }, + { + "inputs": [], + "name": "ValidatorDoesNotExist", + "type": "error" + }, + { + "inputs": [], + "name": "ZeroAddressNotAllowed", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "address", + "name": "previousAdmin", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "newAdmin", + "type": "address" + } + ], + "name": "AdminChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "beacon", + "type": "address" + } + ], + "name": "BeaconUpgraded", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "indexed": false, + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "ClusterDeposited", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "indexed": false, + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "ClusterLiquidated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "indexed": false, + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "ClusterReactivated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "indexed": false, + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "ClusterWithdrawn", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint64", + "name": "value", + "type": "uint64" + } + ], + "name": "DeclareOperatorFeePeriodUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint64", + "name": "value", + "type": "uint64" + } + ], + "name": "ExecuteOperatorFeePeriodUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "recipientAddress", + "type": "address" + } + ], + "name": "FeeRecipientAddressUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint8", + "name": "version", + "type": "uint8" + } + ], + "name": "Initialized", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint64", + "name": "value", + "type": "uint64" + } + ], + "name": "LiquidationThresholdPeriodUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "MinimumLiquidationCollateralUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "enum SSVModules", + "name": "moduleId", + "type": "uint8" + }, + { + "indexed": false, + "internalType": "address", + "name": "moduleAddress", + "type": "address" + } + ], + "name": "ModuleUpgraded", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "address", + "name": "recipient", + "type": "address" + } + ], + "name": "NetworkEarningsWithdrawn", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint256", + "name": "oldFee", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "newFee", + "type": "uint256" + } + ], + "name": "NetworkFeeUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + }, + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "publicKey", + "type": "bytes" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "fee", + "type": "uint256" + } + ], + "name": "OperatorAdded", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + } + ], + "name": "OperatorFeeDeclarationCancelled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "blockNumber", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "fee", + "type": "uint256" + } + ], + "name": "OperatorFeeDeclared", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "blockNumber", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "fee", + "type": "uint256" + } + ], + "name": "OperatorFeeExecuted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint64", + "name": "value", + "type": "uint64" + } + ], + "name": "OperatorFeeIncreaseLimitUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint64", + "name": "maxFee", + "type": "uint64" + } + ], + "name": "OperatorMaximumFeeUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "indexed": false, + "internalType": "address[]", + "name": "whitelistAddresses", + "type": "address[]" + } + ], + "name": "OperatorMultipleWhitelistRemoved", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "indexed": false, + "internalType": "address[]", + "name": "whitelistAddresses", + "type": "address[]" + } + ], + "name": "OperatorMultipleWhitelistUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "indexed": false, + "internalType": "bool", + "name": "toPrivate", + "type": "bool" + } + ], + "name": "OperatorPrivacyStatusUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + } + ], + "name": "OperatorRemoved", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "indexed": false, + "internalType": "address", + "name": "whitelistingContract", + "type": "address" + } + ], + "name": "OperatorWhitelistingContractUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "OperatorWithdrawn", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnershipTransferStarted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnershipTransferred", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "implementation", + "type": "address" + } + ], + "name": "Upgraded", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "publicKey", + "type": "bytes" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "shares", + "type": "bytes" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "indexed": false, + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "ValidatorAdded", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "publicKey", + "type": "bytes" + } + ], + "name": "ValidatorExited", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "publicKey", + "type": "bytes" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "indexed": false, + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "ValidatorRemoved", + "type": "event" + }, + { + "stateMutability": "nonpayable", + "type": "fallback" + }, + { + "inputs": [], + "name": "acceptOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes[]", + "name": "publicKeys", + "type": "bytes[]" + }, + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + } + ], + "name": "bulkExitValidator", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes[]", + "name": "publicKeys", + "type": "bytes[]" + }, + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "internalType": "bytes[]", + "name": "sharesData", + "type": "bytes[]" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "bulkRegisterValidator", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes[]", + "name": "publicKeys", + "type": "bytes[]" + }, + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "bulkRemoveValidator", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + } + ], + "name": "cancelDeclaredOperatorFee", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "fee", + "type": "uint256" + } + ], + "name": "declareOperatorFee", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "clusterOwner", + "type": "address" + }, + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "deposit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + } + ], + "name": "executeOperatorFee", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "publicKey", + "type": "bytes" + }, + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + } + ], + "name": "exitValidator", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "getVersion", + "outputs": [ + { + "internalType": "string", + "name": "version", + "type": "string" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract IERC20", + "name": "token_", + "type": "address" + }, + { + "internalType": "contract ISSVOperators", + "name": "ssvOperators_", + "type": "address" + }, + { + "internalType": "contract ISSVClusters", + "name": "ssvClusters_", + "type": "address" + }, + { + "internalType": "contract ISSVDAO", + "name": "ssvDAO_", + "type": "address" + }, + { + "internalType": "contract ISSVViews", + "name": "ssvViews_", + "type": "address" + }, + { + "internalType": "uint64", + "name": "minimumBlocksBeforeLiquidation_", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "minimumLiquidationCollateral_", + "type": "uint256" + }, + { + "internalType": "uint32", + "name": "validatorsPerOperatorLimit_", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "declareOperatorFeePeriod_", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "executeOperatorFeePeriod_", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "operatorMaxFeeIncrease_", + "type": "uint64" + } + ], + "name": "initialize", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "clusterOwner", + "type": "address" + }, + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "liquidate", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "pendingOwner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "proxiableUUID", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "reactivate", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "fee", + "type": "uint256" + } + ], + "name": "reduceOperatorFee", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "publicKey", + "type": "bytes" + }, + { + "internalType": "uint256", + "name": "fee", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "setPrivate", + "type": "bool" + } + ], + "name": "registerOperator", + "outputs": [ + { + "internalType": "uint64", + "name": "id", + "type": "uint64" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "publicKey", + "type": "bytes" + }, + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "internalType": "bytes", + "name": "sharesData", + "type": "bytes" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "registerValidator", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + } + ], + "name": "removeOperator", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + } + ], + "name": "removeOperatorsWhitelistingContract", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "internalType": "address[]", + "name": "whitelistAddresses", + "type": "address[]" + } + ], + "name": "removeOperatorsWhitelists", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "publicKey", + "type": "bytes" + }, + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "removeValidator", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "renounceOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "recipientAddress", + "type": "address" + } + ], + "name": "setFeeRecipientAddress", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + } + ], + "name": "setOperatorsPrivateUnchecked", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + } + ], + "name": "setOperatorsPublicUnchecked", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "internalType": "contract ISSVWhitelistingContract", + "name": "whitelistingContract", + "type": "address" + } + ], + "name": "setOperatorsWhitelistingContract", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "internalType": "address[]", + "name": "whitelistAddresses", + "type": "address[]" + } + ], + "name": "setOperatorsWhitelists", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "transferOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "timeInSeconds", + "type": "uint64" + } + ], + "name": "updateDeclareOperatorFeePeriod", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "timeInSeconds", + "type": "uint64" + } + ], + "name": "updateExecuteOperatorFeePeriod", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "blocks", + "type": "uint64" + } + ], + "name": "updateLiquidationThresholdPeriod", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "maxFee", + "type": "uint64" + } + ], + "name": "updateMaximumOperatorFee", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "updateMinimumLiquidationCollateral", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "enum SSVModules", + "name": "moduleId", + "type": "uint8" + }, + { + "internalType": "address", + "name": "moduleAddress", + "type": "address" + } + ], + "name": "updateModule", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "fee", + "type": "uint256" + } + ], + "name": "updateNetworkFee", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "percentage", + "type": "uint64" + } + ], + "name": "updateOperatorFeeIncreaseLimit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "newImplementation", + "type": "address" + } + ], + "name": "upgradeTo", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "newImplementation", + "type": "address" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "name": "upgradeToAndCall", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64[]", + "name": "operatorIds", + "type": "uint64[]" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "uint32", + "name": "validatorCount", + "type": "uint32" + }, + { + "internalType": "uint64", + "name": "networkFeeIndex", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "index", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + } + ], + "internalType": "struct ISSVNetworkCore.Cluster", + "name": "cluster", + "type": "tuple" + } + ], + "name": "withdraw", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + } + ], + "name": "withdrawAllOperatorEarnings", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "withdrawNetworkEarnings", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "operatorId", + "type": "uint64" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "withdrawOperatorEarnings", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/anchor/eth/src/event_parser.rs b/anchor/eth/src/event_parser.rs new file mode 100644 index 000000000..533ed62cb --- /dev/null +++ b/anchor/eth/src/event_parser.rs @@ -0,0 +1,32 @@ +use super::gen::SSVContract; +use alloy::{rpc::types::Log, sol_types::SolEvent}; + +// Todo!() need some file that defines all the actions (duties from spec) that the validator should +// perform. Upon receiving an event in the live sync, the event log needs to be transaformed into +// and action, processed & persisted into the database, and then sent off to be executed (Runners in +// the spec). + +// todo!() This should be standardized into a common format that will be used client wide +// we do not want to use the contract events structures directly and want to define some types that +// hold all of the relevant data needed for execution +pub enum NetworkAction { + ValidatorAdded(SSVContract::ValidatorAdded), +} + +// Convert (parse) an rpc::Log into an Action +impl From for NetworkAction { + fn from(source: Log) -> NetworkAction { + let topic0 = source.topic0().expect("The log should have a topic0"); + match *topic0 { + SSVContract::OperatorAdded::SIGNATURE_HASH => todo!(), + SSVContract::OperatorRemoved::SIGNATURE_HASH => todo!(), + SSVContract::ValidatorAdded::SIGNATURE_HASH => todo!(), + SSVContract::ValidatorRemoved::SIGNATURE_HASH => todo!(), + SSVContract::ClusterLiquidated::SIGNATURE_HASH => todo!(), + SSVContract::ClusterReactivated::SIGNATURE_HASH => todo!(), + SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH => todo!(), + SSVContract::ValidatorExited::SIGNATURE_HASH => todo!(), + _ => panic!("Received an unexpected event log"), + } + } +} diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index abd60b7dd..8521fe10e 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -1,6 +1,29 @@ -// Process Events +use super::event_parser::NetworkAction; +use alloy::rpc::types::Log; + +// todo!() +// Given a set of logs, the event processor should transform it into some network action +// process it by performing all validation/write to db/anything else, and then send a message +// to some executor to perform a task if we are live + +// Process a new event by persisting it into the database and notifying +// the central processor this event has occured pub struct EventProcessor { + // reference to the database + // communication w/ central processor + // keymanager (from spec/impl) } +impl EventProcessor { + pub fn process_logs(&self, logs: Vec, live: bool) { + // Go through all of the logs and parse/process them based on the log types + // Reflect the change in the database and send event to central processor if we are live + for log in logs { + let action: NetworkAction = log.into(); - + if live { + // send off to the central processor + } + } + } +} diff --git a/anchor/eth/src/lib.rs b/anchor/eth/src/lib.rs index b69e2ff2b..21afe68f0 100644 --- a/anchor/eth/src/lib.rs +++ b/anchor/eth/src/lib.rs @@ -1,3 +1,5 @@ +pub use sync::SsvEventSyncer; +mod event_parser; +mod event_processor; mod gen; mod sync; -mod event_processor; diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 3f34fa7be..73ea87817 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -8,10 +8,12 @@ use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; use alloy::transports::http::{Client, Http}; use futures::future::join_all; +use rand::Rng; use std::collections::BTreeMap; use std::future::Future; use std::sync::Arc; use std::sync::LazyLock; +use tokio::time::Duration; /// SSV contract events needed to come up to date with the network static SSV_EVENTS: LazyLock>> = LazyLock::new(|| { @@ -45,36 +47,41 @@ static CONTRACT_DEPLOYMENT_ADDRESS: LazyLock
= const CONTRACT_DEPLOYMENT_BLOCK: u64 = 17507487; /// Batch size for log fetching +/// todo!(), play around with this number, default max logs per filter is 20k and this contract is +/// not event heavy, so I think this could be increased a lot const BATCH_SIZE: u64 = 500; /// Typedef RPC and WS clients type RpcClient = RootProvider>; type WsClient = RootProvider; +// Retry information for log fetching +// todo!() backoff if needed +const MAX_RETRIES: i32 = 5; + /// Client for interacting with the SSV contract on Ethereum L1 /// /// Manages connections to the L1 and monitors SSV contract events to track the state of validator /// and operators. Provides both historical synchronization and live event monitoring -struct SsvEventSyncer { +pub struct SsvEventSyncer { /// Http client connected to the L1 to fetch historical SSV event information rpc_client: Arc, - // Websocket client connected to L1 to stream live SSV event information, (todo!()??) + // Websocket client connected to L1 to stream live SSV event information ws_client: WsClient, } impl SsvEventSyncer { pub async fn new() -> Result { - // todo!() add a retry layer - // Construct HTTP Provider - let http_url = "dummy_http".parse().unwrap(); // TODO!(), get this from config, unwrap + let http_url = "dummy_http".parse().unwrap(); // TODO!(), get this from config let rpc_client: Arc = Arc::new(ProviderBuilder::new().on_http(http_url)); + // Construct Websocket Provider let ws_url = "dummy ws"; // TODO!(), get this from config let ws_client = ProviderBuilder::new() .on_ws(WsConnect::new(ws_url)) .await - .map_err(|e| format!("Failed to bind to WS url {}, {}", ws_url, e))?; + .map_err(|e| format!("Failed to bind to WS: {}, {}", ws_url, e))?; Ok(Self { rpc_client, @@ -82,11 +89,14 @@ impl SsvEventSyncer { }) } - // Top level function to sync data, change comment + // Top level function to sync data pub async fn sync(&self) -> Result<(), String> { // first, perform a historical sync self.historical_sync().await?; + // todo!() blocks are still added while we are syncing historical state + // impl to catch up to head - follow distance + // once the historical sync is done and we have processed them, start a live sync // todo!(), live sync @@ -97,6 +107,7 @@ impl SsvEventSyncer { /// Perform a historical sync from the contract deployment block to catch up to the current /// state of the SSV network async fn historical_sync(&self) -> Result<(), String> { + // todo!() impl follow distance // Fetch range from start_block..(current_block-follow_distance) let start_block = CONTRACT_DEPLOYMENT_BLOCK; let current_block = self.rpc_client.get_block_number().await.unwrap(); @@ -123,7 +134,11 @@ impl SsvEventSyncer { ordered_event_logs.entry(block_num).or_default().push(log); } + // join them back to a vec in ordered format + let ordered_event_logs: Vec = ordered_event_logs.into_values().flatten().collect(); + // Logs are all fetched from the chain and in order, process them + //self.event_processor.process_logs(ordered_event_logs)?; Ok(()) } @@ -137,28 +152,41 @@ impl SsvEventSyncer { .to_block(to_block) .events(&*SSV_EVENTS); - // Try to fetch the logs. - // If there is an error, we want this to panic. The rpc client is layered to retry - // upon failure based on custom retry arguments. If this fails, we can assume - // there is some greater underlying issue with the rpc connection + // Try to fetch logs with a retry upon error. Try up to MAX_RETRIES times and error if we + // exceed this as we can assume there is some underlying connection issue async move { - match rpc_client.get_logs(&filter).await { - Ok(logs) => logs, - Err(_) => panic!("Unable to fetch logs"), + let mut retry_cnt = 0; + loop { + match rpc_client.get_logs(&filter).await { + Ok(logs) => return logs, + Err(_) => { + // confirm we have not exceeded max + if retry_cnt > MAX_RETRIES { + panic!("Unable to fetch logs"); + } + + // increment retry_count and jitter retry duration + // todo!() exponential backoff?? + let jitter = rand::thread_rng().gen_range(0..=100); + let sleep_duration = Duration::from_millis(jitter); + tokio::time::sleep(sleep_duration).await; + retry_cnt += 1; + continue; + } + } } } } /// Live sync with the chain to get new contract events while enforcing a follow distance - /// - /// todo!() should this be done like this?? should we use an interal slot clock instead?? not - /// sure to treat this as a client itself, or an extension of a validator - /// The actual beacon node is responsible for the slot timing and block creation, and the - /// validator just polls it on intervals, so it makes sense to just stream in blocks since this - /// is just event syncing and not critial as long as it is within the slot time fn live_sync(&self) { - // Stream in a new block. We are enforcing a follow distance, so we do not care about the - // actual block. We just want to know that a new block has been added to the chain + // Do we want to stream new blocks in via a websocket connection or poll at regular + // intervals for new blocks? + // + // + // Get new events & process them. Will maintain very similar flow to historical sync except + // we want to set some flag signaling to handler that we want to forward these notifications + // to the central processor for some action to be taken todo!() } } From c01145801098099613dd2af6227e84d3b1451617 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 21 Nov 2024 21:29:45 +0000 Subject: [PATCH 03/49] range checking for historical sync, better error handling, event parsing --- Cargo.lock | 1 + anchor/eth/src/event_parser.rs | 97 +++++++++++++++++++---- anchor/eth/src/event_processor.rs | 30 ++++--- anchor/eth/src/sync.rs | 127 ++++++++++++++++-------------- 4 files changed, 168 insertions(+), 87 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b53dde995..842d63256 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2714,6 +2714,7 @@ version = "0.1.0" dependencies = [ "alloy", "futures", + "rand", "tokio", ] diff --git a/anchor/eth/src/event_parser.rs b/anchor/eth/src/event_parser.rs index 533ed62cb..efad90a18 100644 --- a/anchor/eth/src/event_parser.rs +++ b/anchor/eth/src/event_parser.rs @@ -1,32 +1,95 @@ use super::gen::SSVContract; +use alloy::primitives::Address; use alloy::{rpc::types::Log, sol_types::SolEvent}; -// Todo!() need some file that defines all the actions (duties from spec) that the validator should -// perform. Upon receiving an event in the live sync, the event log needs to be transaformed into -// and action, processed & persisted into the database, and then sent off to be executed (Runners in -// the spec). +// Todo!() need some file that defines all the actions that the validator should +// perform. Upon receiving an event in the live sync, the event log needs to be transformed into +// and action, processed & persisted into the database, and then sent off to be executed (execute +// trait in the impl) // todo!() This should be standardized into a common format that will be used client wide // we do not want to use the contract events structures directly and want to define some types that // hold all of the relevant data needed for execution + +#[derive(Debug, PartialEq)] pub enum NetworkAction { - ValidatorAdded(SSVContract::ValidatorAdded), + StopValidator { + //pubkey: bls::PublicKey + }, + LiquidateCluster { + owner: Address, + //operator_ids: Vec, + //to_liquidate: Vec + }, + ReactivateCluster { + owner: Address, + //operator_ids: Vec + //to_reactivate: Vec + }, + UpdateFeeRecipient { + owner: Address, + recipient: Address, + }, + ExitValidator { + //pubkey: bls::PublicKey + //block_number: u64, + //validator_index: u64, + //own_validator: bool, + }, + NoOp, } -// Convert (parse) an rpc::Log into an Action -impl From for NetworkAction { - fn from(source: Log) -> NetworkAction { +/// Parse a network log into an action to be executed +impl TryFrom for NetworkAction { + type Error = String; + + + fn try_from(source: Log) -> Result { let topic0 = source.topic0().expect("The log should have a topic0"); match *topic0 { - SSVContract::OperatorAdded::SIGNATURE_HASH => todo!(), - SSVContract::OperatorRemoved::SIGNATURE_HASH => todo!(), - SSVContract::ValidatorAdded::SIGNATURE_HASH => todo!(), - SSVContract::ValidatorRemoved::SIGNATURE_HASH => todo!(), - SSVContract::ClusterLiquidated::SIGNATURE_HASH => todo!(), - SSVContract::ClusterReactivated::SIGNATURE_HASH => todo!(), - SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH => todo!(), - SSVContract::ValidatorExited::SIGNATURE_HASH => todo!(), - _ => panic!("Received an unexpected event log"), + SSVContract::ValidatorRemoved::SIGNATURE_HASH => { + let _validator_removed_log = + SSVContract::ValidatorRemoved::decode_log(&source.inner, true) + .map_err(|e| format!("Failed to decode a validator removed log: {}", e))? + .data; + Ok(NetworkAction::StopValidator {}) + } + SSVContract::ClusterLiquidated::SIGNATURE_HASH => { + let cluster_liquidated_log = + SSVContract::ClusterLiquidated::decode_log(&source.inner, true) + .map_err(|e| format!("Failed to decode a cluster liquidated log: {}", e))? + .data; + Ok(NetworkAction::LiquidateCluster { + owner: cluster_liquidated_log.owner, + }) + } + SSVContract::ClusterReactivated::SIGNATURE_HASH => { + let cluster_reactivated_log = + SSVContract::ClusterReactivated::decode_log(&source.inner, true) + .map_err(|e| format!("Failed to decode a cluster reactivated log: {}", e))? + .data; + Ok(NetworkAction::ReactivateCluster { + owner: cluster_reactivated_log.owner, + }) + } + SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH => { + let recipient_updated_log = + SSVContract::FeeRecipientAddressUpdated::decode_log(&source.inner, true) + .map_err(|e| format!("Failed to decode a fee recipient address updated log: {}", e))? + .data; + Ok(NetworkAction::UpdateFeeRecipient { + owner: recipient_updated_log.owner, + recipient: recipient_updated_log.recipientAddress, + }) + } + SSVContract::ValidatorExited::SIGNATURE_HASH => { + let _validator_exited_log = + SSVContract::ValidatorExited::decode_log(&source.inner, true) + .map_err(|e| format!("Failed to decode a validator exited log: {}", e))? + .data; + Ok(NetworkAction::ExitValidator {}) + } + _ => Ok(NetworkAction::NoOp) } } } diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 8521fe10e..e8b43d9eb 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -1,13 +1,8 @@ use super::event_parser::NetworkAction; use alloy::rpc::types::Log; -// todo!() -// Given a set of logs, the event processor should transform it into some network action -// process it by performing all validation/write to db/anything else, and then send a message -// to some executor to perform a task if we are live - -// Process a new event by persisting it into the database and notifying -// the central processor this event has occured +// Given a set of logs, the event processor will persist the information into the underlying +// database, parse the logs into some network action, and then send the action to be executed pub struct EventProcessor { // reference to the database // communication w/ central processor @@ -15,15 +10,24 @@ pub struct EventProcessor { } impl EventProcessor { - pub fn process_logs(&self, logs: Vec, live: bool) { - // Go through all of the logs and parse/process them based on the log types - // Reflect the change in the database and send event to central processor if we are live + /// Construct a new EventProcessor + pub fn new() -> Self { + Self {} + } + + /// Process a new set of logs + pub fn process_logs(&self, logs: Vec, live: bool) -> Result<(), String> { for log in logs { - let action: NetworkAction = log.into(); + // perform all DB updated needed with the log + // todo!() - if live { - // send off to the central processor + // If we have a valid action and are live, then send off to the controller to execute + let action: NetworkAction = log.try_into()?; + if action != NetworkAction::NoOp && live { + // todo!() send off somewhere } } + + Ok(()) } } diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 73ea87817..99f8542f3 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -1,18 +1,14 @@ use crate::gen::SSVContract; -use alloy::primitives::FixedBytes; -use alloy::primitives::{address, Address}; +use alloy::primitives::{address, Address, FixedBytes}; use alloy::providers::{Provider, ProviderBuilder, RootProvider, WsConnect}; use alloy::pubsub::PubSubFrontend; -use alloy::rpc::types::Filter; -use alloy::rpc::types::Log; +use alloy::rpc::types::{Filter, Log}; use alloy::sol_types::SolEvent; use alloy::transports::http::{Client, Http}; -use futures::future::join_all; +use futures::future::{try_join_all, Future}; use rand::Rng; use std::collections::BTreeMap; -use std::future::Future; -use std::sync::Arc; -use std::sync::LazyLock; +use std::sync::{Arc, LazyLock}; use tokio::time::Duration; /// SSV contract events needed to come up to date with the network @@ -59,6 +55,10 @@ type WsClient = RootProvider; // todo!() backoff if needed const MAX_RETRIES: i32 = 5; +// Follow distance +// TODO!(), why 8 (in go client), or is this the eth1 follow distance +const FOLLOW_DISTANCE: u64 = 8; + /// Client for interacting with the SSV contract on Ethereum L1 /// /// Manages connections to the L1 and monitors SSV contract events to track the state of validator @@ -94,56 +94,76 @@ impl SsvEventSyncer { // first, perform a historical sync self.historical_sync().await?; - // todo!() blocks are still added while we are syncing historical state - // impl to catch up to head - follow distance - - // once the historical sync is done and we have processed them, start a live sync - // todo!(), live sync - - // OK. We have done the historical sync and spawned off live sync to process - Ok(()) + // start the live sync, options + // 1) spawn the sync off in its own long running task and return + // 2) transition into live sync and signal AtomicBool to coordinator + todo!() } /// Perform a historical sync from the contract deployment block to catch up to the current /// state of the SSV network async fn historical_sync(&self) -> Result<(), String> { - // todo!() impl follow distance - // Fetch range from start_block..(current_block-follow_distance) - let start_block = CONTRACT_DEPLOYMENT_BLOCK; - let current_block = self.rpc_client.get_block_number().await.unwrap(); - - // Chunk the start and end block range into a set of ranges of size BATCH_SIZE and construct - // a new task to fetch the logs from each range - let tasks: Vec<_> = (start_block..=current_block) - .step_by(BATCH_SIZE as usize) - .map(|start| { - let (start, end) = (start, std::cmp::min(start + BATCH_SIZE - 1, current_block)); - self.fetch_logs(start, end) - }) - .collect(); - - // Await all of the futures. This will panic if one of the futures is unsuccessful. - let event_logs: Vec = join_all(tasks).await.into_iter().flatten().collect(); - - // The futures may join out of order block wise. The individual events within the block - // retain their tx ordering. Due to this, we can reassemble back into blocks and be - // confident the order is correct - let mut ordered_event_logs: BTreeMap> = BTreeMap::new(); - for log in event_logs { - let block_num = log.block_number.expect("Log should have a block number"); - ordered_event_logs.entry(block_num).or_default().push(log); - } + // todo!(), differential between fresh sync and when we have already synced up to some block + let mut start_block = CONTRACT_DEPLOYMENT_BLOCK; + loop { + // get the current block and make sure we have blocks to sync + let current_block = self + .rpc_client + .get_block_number() + .await + .map_err(|e| format!("Unable to fetch block number {}", e))?; + if current_block < FOLLOW_DISTANCE { + break; + } - // join them back to a vec in ordered format - let ordered_event_logs: Vec = ordered_event_logs.into_values().flatten().collect(); + // calculate end block w/ follow distance + let end_block = current_block - FOLLOW_DISTANCE; + if end_block < start_block { + break; + } + + // Chunk the start and end block range into a set of ranges of size BATCH_SIZE + // and construct a future to fetch the logs in each range + let tasks: Vec<_> = (start_block..=current_block) + .step_by(BATCH_SIZE as usize) + .map(|start| { + let (start, end) = + (start, std::cmp::min(start + BATCH_SIZE - 1, current_block)); + self.fetch_logs(start, end) + }) + .collect(); + + // Await all of the futures. This will panic if one of the futures is unsuccessful. + let event_logs: Vec> = try_join_all(tasks).await?; + let event_logs: Vec = event_logs.into_iter().flatten().collect(); + + // The futures may join out of order block wise. The individual events within the block + // retain their tx ordering. Due to this, we can reassemble back into blocks and be + // confident the order is correct + let mut ordered_event_logs: BTreeMap> = BTreeMap::new(); + for log in event_logs { + let block_num = log.block_number.ok_or("Log is missing block number")?; + ordered_event_logs.entry(block_num).or_default().push(log); + } - // Logs are all fetched from the chain and in order, process them - //self.event_processor.process_logs(ordered_event_logs)?; + // join them back to a vec in ordered format + let ordered_event_logs: Vec = ordered_event_logs.into_values().flatten().collect(); + + // Logs are all fetched from the chain and in order, process them + //self.event_processor.process_logs(ordered_event_logs)?; + + // reset the start block to make up for missed blocks during sync + start_block = current_block + 1; + } Ok(()) } /// Fetch logs from the chain - fn fetch_logs(&self, from_block: u64, to_block: u64) -> impl Future> { + fn fetch_logs( + &self, + from_block: u64, + to_block: u64, + ) -> impl Future, String>> { // Setup filter and rpc client let rpc_client = self.rpc_client.clone(); let filter = Filter::new() @@ -158,11 +178,11 @@ impl SsvEventSyncer { let mut retry_cnt = 0; loop { match rpc_client.get_logs(&filter).await { - Ok(logs) => return logs, + Ok(logs) => return Ok(logs), Err(_) => { // confirm we have not exceeded max if retry_cnt > MAX_RETRIES { - panic!("Unable to fetch logs"); + return Err("Unable to fetch logs".to_string()); } // increment retry_count and jitter retry duration @@ -179,14 +199,7 @@ impl SsvEventSyncer { } /// Live sync with the chain to get new contract events while enforcing a follow distance - fn live_sync(&self) { - // Do we want to stream new blocks in via a websocket connection or poll at regular - // intervals for new blocks? - // - // - // Get new events & process them. Will maintain very similar flow to historical sync except - // we want to set some flag signaling to handler that we want to forward these notifications - // to the central processor for some action to be taken + async fn live_sync(&self) { todo!() } } From 5d98550918471fa484e72714d20eadf3661b73b6 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 21 Nov 2024 22:59:05 +0000 Subject: [PATCH 04/49] no panic --- anchor/eth/src/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 99f8542f3..5d3365ac7 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -133,7 +133,7 @@ impl SsvEventSyncer { }) .collect(); - // Await all of the futures. This will panic if one of the futures is unsuccessful. + // Await all of the futures. let event_logs: Vec> = try_join_all(tasks).await?; let event_logs: Vec = event_logs.into_iter().flatten().collect(); From 6044680c4db6284053146dc8ef52f8f84d7f69d9 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 25 Nov 2024 20:39:06 +0000 Subject: [PATCH 05/49] macro gen for decoding and handlers for event processing --- anchor/eth/src/action.rs | 84 ++++++++++++++++++++ anchor/eth/src/event_parser.rs | 114 +++++++-------------------- anchor/eth/src/event_processor.rs | 124 +++++++++++++++++++++++++++--- anchor/eth/src/lib.rs | 1 + anchor/eth/src/sync.rs | 16 +++- 5 files changed, 240 insertions(+), 99 deletions(-) create mode 100644 anchor/eth/src/action.rs diff --git a/anchor/eth/src/action.rs b/anchor/eth/src/action.rs new file mode 100644 index 000000000..62b650872 --- /dev/null +++ b/anchor/eth/src/action.rs @@ -0,0 +1,84 @@ +use super::gen::SSVContract; +use super::event_parser::EventDecoder; +use alloy::primitives::Address; +use alloy::{rpc::types::Log, sol_types::SolEvent}; + +// Todo!() need some file that defines all the actions that the validator should +// perform. Upon receiving an event in the live sync, the event log needs to be transformed into +// and action, processed & persisted into the database, and then sent off to be executed (execute +// trait in the impl) + +// todo!() This should be standardized into a common format that will be used client wide +// we do not want to use the contract events structures directly and want to define some types that +// hold all of the relevant data needed for execution + +#[derive(Debug, PartialEq)] +pub enum NetworkAction { + StopValidator { + //pubkey: bls::PublicKey + }, + LiquidateCluster { + owner: Address, + //operator_ids: Vec, + //to_liquidate: Vec + }, + ReactivateCluster { + owner: Address, + //operator_ids: Vec + //to_reactivate: Vec + }, + UpdateFeeRecipient { + owner: Address, + recipient: Address, + }, + ExitValidator { + //pubkey: bls::PublicKey + //block_number: u64, + //validator_index: u64, + //own_validator: bool, + }, + NoOp, +} + +/// Parse a network log into an action to be executed +impl TryFrom for NetworkAction { + type Error = String; + + fn try_from(source: Log) -> Result { + let topic0 = source.topic0().expect("The log should have a topic0"); + match *topic0 { + SSVContract::ValidatorRemoved::SIGNATURE_HASH => { + let _validator_removed_log = + SSVContract::ValidatorRemoved::decode_from_log(&source)?; + Ok(NetworkAction::StopValidator {}) + } + SSVContract::ClusterLiquidated::SIGNATURE_HASH => { + let cluster_liquidated_log = + SSVContract::ClusterLiquidated::decode_from_log(&source)?; + Ok(NetworkAction::LiquidateCluster { + owner: cluster_liquidated_log.owner, + }) + } + SSVContract::ClusterReactivated::SIGNATURE_HASH => { + let cluster_reactivated_log = + SSVContract::ClusterReactivated::decode_from_log(&source)?; + Ok(NetworkAction::ReactivateCluster { + owner: cluster_reactivated_log.owner, + }) + } + SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH => { + let recipient_updated_log = + SSVContract::FeeRecipientAddressUpdated::decode_from_log(&source)?; + Ok(NetworkAction::UpdateFeeRecipient { + owner: recipient_updated_log.owner, + recipient: recipient_updated_log.recipientAddress, + }) + } + SSVContract::ValidatorExited::SIGNATURE_HASH => { + let _validator_exited_log = SSVContract::ValidatorExited::decode_from_log(&source)?; + Ok(NetworkAction::ExitValidator {}) + } + _ => Ok(NetworkAction::NoOp), + } + } +} diff --git a/anchor/eth/src/event_parser.rs b/anchor/eth/src/event_parser.rs index efad90a18..709434b6b 100644 --- a/anchor/eth/src/event_parser.rs +++ b/anchor/eth/src/event_parser.rs @@ -1,95 +1,37 @@ use super::gen::SSVContract; -use alloy::primitives::Address; use alloy::{rpc::types::Log, sol_types::SolEvent}; -// Todo!() need some file that defines all the actions that the validator should -// perform. Upon receiving an event in the live sync, the event log needs to be transformed into -// and action, processed & persisted into the database, and then sent off to be executed (execute -// trait in the impl) +// Standardized event decoding +pub trait EventDecoder { + type Output; + fn decode_from_log(log: &Log) -> Result; +} -// todo!() This should be standardized into a common format that will be used client wide -// we do not want to use the contract events structures directly and want to define some types that -// hold all of the relevant data needed for execution +macro_rules! impl_event_decoder { + ($($event_type:ty),* $(,)?) => { + $( + impl EventDecoder for $event_type { + type Output = $event_type; -#[derive(Debug, PartialEq)] -pub enum NetworkAction { - StopValidator { - //pubkey: bls::PublicKey - }, - LiquidateCluster { - owner: Address, - //operator_ids: Vec, - //to_liquidate: Vec - }, - ReactivateCluster { - owner: Address, - //operator_ids: Vec - //to_reactivate: Vec - }, - UpdateFeeRecipient { - owner: Address, - recipient: Address, - }, - ExitValidator { - //pubkey: bls::PublicKey - //block_number: u64, - //validator_index: u64, - //own_validator: bool, - }, - NoOp, + fn decode_from_log(log: &Log) -> Result { + let decoded = Self::decode_log(&log.inner, true) + .map_err(|e| format!("Failed to decode {} event: {}", stringify!($event_type), e))?; + Ok(decoded.data) + } + } + )* + }; } -/// Parse a network log into an action to be executed -impl TryFrom for NetworkAction { - type Error = String; +impl_event_decoder! { + SSVContract::OperatorAdded, + SSVContract::OperatorRemoved, + SSVContract::ValidatorAdded, + SSVContract::ValidatorRemoved, + SSVContract::ClusterLiquidated, + SSVContract::ClusterReactivated, + SSVContract::FeeRecipientAddressUpdated, + SSVContract::ValidatorExited +} - fn try_from(source: Log) -> Result { - let topic0 = source.topic0().expect("The log should have a topic0"); - match *topic0 { - SSVContract::ValidatorRemoved::SIGNATURE_HASH => { - let _validator_removed_log = - SSVContract::ValidatorRemoved::decode_log(&source.inner, true) - .map_err(|e| format!("Failed to decode a validator removed log: {}", e))? - .data; - Ok(NetworkAction::StopValidator {}) - } - SSVContract::ClusterLiquidated::SIGNATURE_HASH => { - let cluster_liquidated_log = - SSVContract::ClusterLiquidated::decode_log(&source.inner, true) - .map_err(|e| format!("Failed to decode a cluster liquidated log: {}", e))? - .data; - Ok(NetworkAction::LiquidateCluster { - owner: cluster_liquidated_log.owner, - }) - } - SSVContract::ClusterReactivated::SIGNATURE_HASH => { - let cluster_reactivated_log = - SSVContract::ClusterReactivated::decode_log(&source.inner, true) - .map_err(|e| format!("Failed to decode a cluster reactivated log: {}", e))? - .data; - Ok(NetworkAction::ReactivateCluster { - owner: cluster_reactivated_log.owner, - }) - } - SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH => { - let recipient_updated_log = - SSVContract::FeeRecipientAddressUpdated::decode_log(&source.inner, true) - .map_err(|e| format!("Failed to decode a fee recipient address updated log: {}", e))? - .data; - Ok(NetworkAction::UpdateFeeRecipient { - owner: recipient_updated_log.owner, - recipient: recipient_updated_log.recipientAddress, - }) - } - SSVContract::ValidatorExited::SIGNATURE_HASH => { - let _validator_exited_log = - SSVContract::ValidatorExited::decode_log(&source.inner, true) - .map_err(|e| format!("Failed to decode a validator exited log: {}", e))? - .data; - Ok(NetworkAction::ExitValidator {}) - } - _ => Ok(NetworkAction::NoOp) - } - } -} diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index e8b43d9eb..9d0d70100 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -1,33 +1,137 @@ -use super::event_parser::NetworkAction; +use super::event_parser::EventDecoder; +use super::action::NetworkAction; +use super::gen::SSVContract; +use alloy::primitives::B256; use alloy::rpc::types::Log; +use alloy::sol_types::SolEvent; +use std::collections::HashMap; -// Given a set of logs, the event processor will persist the information into the underlying -// database, parse the logs into some network action, and then send the action to be executed +// Handler for a log +type EventHandler = fn(&EventProcessor, &Log) -> Result<(), String>; + +// Event Processor pub struct EventProcessor { - // reference to the database - // communication w/ central processor - // keymanager (from spec/impl) + handlers: HashMap, // reference to the database } impl EventProcessor { /// Construct a new EventProcessor pub fn new() -> Self { - Self {} + // register log handlers for easy dispatch + let mut handlers: HashMap = HashMap::new(); + handlers.insert( + SSVContract::OperatorAdded::SIGNATURE_HASH, + Self::process_operator_added, + ); + handlers.insert( + SSVContract::OperatorRemoved::SIGNATURE_HASH, + Self::process_operator_removed, + ); + handlers.insert( + SSVContract::ValidatorAdded::SIGNATURE_HASH, + Self::process_validator_added, + ); + handlers.insert( + SSVContract::ValidatorRemoved::SIGNATURE_HASH, + Self::process_validator_removed, + ); + handlers.insert( + SSVContract::ClusterLiquidated::SIGNATURE_HASH, + Self::process_cluster_liquidated, + ); + handlers.insert( + SSVContract::ClusterReactivated::SIGNATURE_HASH, + Self::process_cluster_reactivated, + ); + handlers.insert( + SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH, + Self::process_fee_recipient_updated, + ); + handlers.insert( + SSVContract::ValidatorExited::SIGNATURE_HASH, + Self::process_validator_exited, + ); + + Self { handlers } } /// Process a new set of logs pub fn process_logs(&self, logs: Vec, live: bool) -> Result<(), String> { for log in logs { - // perform all DB updated needed with the log - // todo!() + let topic0 = log.topic0().expect("Log should have a topic0"); + let handler = self.handlers.get(topic0).expect("A handler should exist for this topic"); + handler(self, &log)?; - // If we have a valid action and are live, then send off to the controller to execute let action: NetworkAction = log.try_into()?; if action != NetworkAction::NoOp && live { // todo!() send off somewhere } } + Ok(()) + } + + // Store the operator in the database + fn process_operator_added(&self, log: &Log) -> Result<(), String> { + let _decoded = SSVContract::OperatorAdded::decode_from_log(log)?; + // check to see if and operator with the same id already exists + // check to see if an operator with the same public key already exists + // if both pass, save it to database + //self.db.add_operator(decoded.operatorID, decoded.owner, decoded.publicKey); Ok(()) } + + fn process_operator_removed(&self, log: &Log) -> Result<(), String> { + let _decoded = SSVContract::OperatorRemoved::decode_from_log(log)?; + // this method is currently noop in the ref client + Ok(()) + } + + fn process_validator_added(&self, log: &Log) -> Result<(), String> { + let _decoded = SSVContract::ValidatorAdded::decode_from_log(log)?; + // get the next expected nonce + // increment the nonce + // validate the operators + // create shares + todo!() + } + + fn process_validator_removed(&self, log: &Log) -> Result<(), String> { + let _decoded = SSVContract::ValidatorRemoved::decode_from_log(log)?; + // get the shares + // Prevent removal of the validator registered with different owner address + // owner A registers validator with public key X (OK) + // owner B registers validator with public key X (NOT OK) + // owner A removes validator with public key X (OK) + // owner B removes validator with public key X (NOT OK) + // delete the shares + todo!() + } + + fn process_cluster_liquidated(&self, log: &Log) -> Result<(), String> { + let _decoded = SSVContract::ClusterLiquidated::decode_from_log(log)?; + // indicate the shares are liquidated + todo!() + } + + fn process_cluster_reactivated(&self, log: &Log) -> Result<(), String> { + let _decoded = SSVContract::ClusterReactivated::decode_from_log(log)?; + // process cluster event + // bump slashing protection + todo!() + } + + fn process_fee_recipient_updated(&self, log: &Log) -> Result<(), String> { + let _decoded = SSVContract::FeeRecipientAddressUpdated::decode_from_log(log)?; + // fetch recipient data + // create it if needed, then insert + todo!() + } + + fn process_validator_exited(&self, log: &Log) -> Result<(), String> { + let _decoded = SSVContract::ValidatorExited::decode_from_log(log)?; + // get the shares + // exit duty + todo!() + } } diff --git a/anchor/eth/src/lib.rs b/anchor/eth/src/lib.rs index 21afe68f0..bd9913bf1 100644 --- a/anchor/eth/src/lib.rs +++ b/anchor/eth/src/lib.rs @@ -3,3 +3,4 @@ mod event_parser; mod event_processor; mod gen; mod sync; +mod action; diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 5d3365ac7..3d434075d 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -11,6 +11,8 @@ use std::collections::BTreeMap; use std::sync::{Arc, LazyLock}; use tokio::time::Duration; +use crate::event_processor::EventProcessor; + /// SSV contract events needed to come up to date with the network static SSV_EVENTS: LazyLock>> = LazyLock::new(|| { vec![ @@ -68,10 +70,12 @@ pub struct SsvEventSyncer { rpc_client: Arc, // Websocket client connected to L1 to stream live SSV event information ws_client: WsClient, + // Event processor for logs + event_processor: EventProcessor, } impl SsvEventSyncer { - pub async fn new() -> Result { + pub async fn new(/*db: NetworkDatabase*/) -> Result { // Construct HTTP Provider let http_url = "dummy_http".parse().unwrap(); // TODO!(), get this from config let rpc_client: Arc = Arc::new(ProviderBuilder::new().on_http(http_url)); @@ -83,9 +87,13 @@ impl SsvEventSyncer { .await .map_err(|e| format!("Failed to bind to WS: {}, {}", ws_url, e))?; + // Pass db access here + let event_processor = EventProcessor::new(); + Ok(Self { rpc_client, ws_client, + event_processor, }) } @@ -149,8 +157,10 @@ impl SsvEventSyncer { // join them back to a vec in ordered format let ordered_event_logs: Vec = ordered_event_logs.into_values().flatten().collect(); - // Logs are all fetched from the chain and in order, process them - //self.event_processor.process_logs(ordered_event_logs)?; + // Logs are all fetched from the chain and in order, process them but do not send off to + // be processed + self.event_processor + .process_logs(ordered_event_logs, false)?; // reset the start block to make up for missed blocks during sync start_block = current_block + 1; From 10ab1605bad2994ae0d353043064bc646ae14703 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 25 Nov 2024 21:12:24 +0000 Subject: [PATCH 06/49] mock live sync --- anchor/eth/src/event_parser.rs | 2 -- anchor/eth/src/sync.rs | 23 +++++++++++++++++++++-- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/anchor/eth/src/event_parser.rs b/anchor/eth/src/event_parser.rs index 709434b6b..fc8c300bc 100644 --- a/anchor/eth/src/event_parser.rs +++ b/anchor/eth/src/event_parser.rs @@ -33,5 +33,3 @@ impl_event_decoder! { SSVContract::FeeRecipientAddressUpdated, SSVContract::ValidatorExited } - - diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 3d434075d..61ec607c5 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -6,6 +6,7 @@ use alloy::rpc::types::{Filter, Log}; use alloy::sol_types::SolEvent; use alloy::transports::http::{Client, Http}; use futures::future::{try_join_all, Future}; +use futures::StreamExt; use rand::Rng; use std::collections::BTreeMap; use std::sync::{Arc, LazyLock}; @@ -105,6 +106,7 @@ impl SsvEventSyncer { // start the live sync, options // 1) spawn the sync off in its own long running task and return // 2) transition into live sync and signal AtomicBool to coordinator + self.live_sync().await?; todo!() } @@ -209,7 +211,24 @@ impl SsvEventSyncer { } /// Live sync with the chain to get new contract events while enforcing a follow distance - async fn live_sync(&self) { - todo!() + /// todo!(), this must be 100% reliable. add reconnect functionality, logic to deteremine when + /// we can assume there is some bigger issue + async fn live_sync(&self) -> Result<(), String> { + // Subscribe to a block stream + let mut stream = match self.ws_client.subscribe_blocks().await { + Ok(sub) => sub.into_stream(), + Err(_) => todo!(), // have some reconnect mechansim + }; + + // Stream in new block headers + while let Some(block_header) = stream.next().await { + // fetch the logs and process with execute + let relevant_block = block_header.number - FOLLOW_DISTANCE; + let logs = self.fetch_logs(relevant_block, relevant_block).await?; + self.event_processor.process_logs(logs, true)?; + } + + // this should never reach here + Ok(()) } } From eaddafc24b047db259fd56842ccc38f3d0695b39 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 26 Nov 2024 16:31:03 +0000 Subject: [PATCH 07/49] Share handling walkthrough --- anchor/eth/src/event_processor.rs | 123 ++++++++++++++++++++++++++---- anchor/eth/src/lib.rs | 1 + anchor/eth/src/sigs.rs | 31 ++++++++ anchor/eth/src/sync.rs | 4 + anchor/eth/src/util.rs | 0 5 files changed, 144 insertions(+), 15 deletions(-) create mode 100644 anchor/eth/src/sigs.rs create mode 100644 anchor/eth/src/util.rs diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 9d0d70100..7e036fae2 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -1,10 +1,17 @@ -use super::event_parser::EventDecoder; use super::action::NetworkAction; +use super::event_parser::EventDecoder; use super::gen::SSVContract; +use super::sync::MAX_OPERATORS; +use super::sigs::{RawShares, verify_signature}; use alloy::primitives::B256; use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; + + +const SIGNATURE_LEN: usize = 96; +const PUBLICKEY_LENGTH: usize = 48; +const ENCRYPTEDKEY_LENGTH: usize = 32; // Handler for a log type EventHandler = fn(&EventProcessor, &Log) -> Result<(), String>; @@ -59,7 +66,10 @@ impl EventProcessor { pub fn process_logs(&self, logs: Vec, live: bool) -> Result<(), String> { for log in logs { let topic0 = log.topic0().expect("Log should have a topic0"); - let handler = self.handlers.get(topic0).expect("A handler should exist for this topic"); + let handler = self + .handlers + .get(topic0) + .expect("A handler should exist for this topic"); handler(self, &log)?; let action: NetworkAction = log.try_into()?; @@ -70,17 +80,31 @@ impl EventProcessor { Ok(()) } - // Store the operator in the database + // Store the operator in the database. fn process_operator_added(&self, log: &Log) -> Result<(), String> { - let _decoded = SSVContract::OperatorAdded::decode_from_log(log)?; - // check to see if and operator with the same id already exists - // check to see if an operator with the same public key already exists + let SSVContract::OperatorAdded { + operatorId: id, + owner, + publicKey: pubkey, + .. + } = SSVContract::OperatorAdded::decode_from_log(log)?; - // if both pass, save it to database - //self.db.add_operator(decoded.operatorID, decoded.owner, decoded.publicKey); + // Confirm that this operator does not already exist via ID + //if self.db.operator_exists_id(id)? { + // return Err(format!("Operator with id {} already exists", id")); + //} + + // Confirm that this operator does not already exist via pubkey + //if self.db.operator_exists_pubkey(pubkey)? { + // return Err(format!("Operator with public key {} already exists", pubkey")); + //} + + // New unique operator, save into the database + //self.db.add_operator(id, owner, pubkey)?; Ok(()) } + // Remove an operator from the database fn process_operator_removed(&self, log: &Log) -> Result<(), String> { let _decoded = SSVContract::OperatorRemoved::decode_from_log(log)?; // this method is currently noop in the ref client @@ -88,12 +112,45 @@ impl EventProcessor { } fn process_validator_added(&self, log: &Log) -> Result<(), String> { - let _decoded = SSVContract::ValidatorAdded::decode_from_log(log)?; - // get the next expected nonce - // increment the nonce - // validate the operators - // create shares - todo!() + let SSVContract::ValidatorAdded { + owner, + operatorIds: operator_ids, + publicKey: pubkey, + shares, + cluster, + } = SSVContract::ValidatorAdded::decode_from_log(log)?; + + // Get expected nonce and and increment it. Talk w/ security guys if this is needed. Wont + // the network handle this? What does it have to do with database + // todo!() + + // Perform some validator verification, parse the share byte stream into RawShares, and + // verifiy the signature is correct + self.validate_operators(operator_ids)?; + let shares: RawShares = shares.try_into()?; + verify_signature()?; + + // Walkthrough + // 1) We want to see if a share for this validator already exists + // 2) If it does not exist, we want to create it + // 1a) Create SSVShare struct (specType.share + metadata) + // 2a) deserialize publickey (bytes) into actual BLS publickey + // 3a) populate SSVShare w/ publick key above and owner of the share + // 4a) get the id of THIS operator + // 5a) go through all of the operator_ids + // 1b) extract operator id & get its data + // 2b) add it to the sharemembers (committee for this share) + // 3b) if the operator id == id of this operator + // 4b) decrypt the corresponding encryptedKey with RSAPrivkey + Some validation + // 6a) return the new share and the private key + // 7a) validate that this share does indeed belong to this operator + // 8a) save the share in the database + + + // Thoughts. Need to think in terms of a THIS operator. Not the network at large. + // When a new validator is added, all of the operators will get this event and extract their + // corresponding share private key. The database will reflect state for this operator. + Ok(()) } fn process_validator_removed(&self, log: &Log) -> Result<(), String> { @@ -134,4 +191,40 @@ impl EventProcessor { // exit duty todo!() } + + // Helper functions + fn validate_operators(&self, operator_ids: Vec) -> Result<(), String> { + let num_operators = operator_ids.len(); + + // make sure there is a valid number of operators + if num_operators > MAX_OPERATORS { + return Err(format!( + "Validator has too many operators: {}", + num_operators + )); + } + if num_operators == 0 { + return Err("Validator has no operators".to_string()); + } + + // make sure count is valid + let threshold = (num_operators - 1) / 3; + if (num_operators - 1) % 3 != 0 || !(1..=4).contains(&threshold) { + return Err(format!("Invalid number of operators: {}", num_operators)); + } + + // make sure there are no duplicates + let mut seen = HashSet::new(); + let are_duplicates = !operator_ids.iter().all(|x| seen.insert(x)); + if are_duplicates { + return Err("Operator IDs contain duplicates".to_string()); + } + + // make sure all of the operators exist + //if operator_ids.iter().any(|id| !self.db.operators_exist(id)) { + // return Err("One or more operators do not exist".to_string()); + //} + + Ok(()) + } } diff --git a/anchor/eth/src/lib.rs b/anchor/eth/src/lib.rs index bd9913bf1..ae6ed8425 100644 --- a/anchor/eth/src/lib.rs +++ b/anchor/eth/src/lib.rs @@ -4,3 +4,4 @@ mod event_processor; mod gen; mod sync; mod action; +mod sigs; diff --git a/anchor/eth/src/sigs.rs b/anchor/eth/src/sigs.rs new file mode 100644 index 000000000..8df396579 --- /dev/null +++ b/anchor/eth/src/sigs.rs @@ -0,0 +1,31 @@ + +use alloy::primitives::Bytes; + +// use types::publicKey +pub struct SharePublickKey([u8; 48]); +pub struct SharePrivateKey([u8; 32]); + + +// [signature | public keys | encrypted keys] +pub struct RawShares { + // Uncompressed bls signatures + signature: [u8; 96], + // Public keys of the Shares + public_keys: Vec, + // Split encrypted private keys + private_keys: Vec +} + +// Convert from a raw stream of bytes to a structured set of shares +// for a validator +impl TryFrom for RawShares { + type Error = String; + fn try_from(source: Bytes) -> Result { + todo!() + } +} + + +pub fn verify_signature() -> Result<(), String>{ + todo!() +} diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 61ec607c5..6e03589ab 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -62,6 +62,10 @@ const MAX_RETRIES: i32 = 5; // TODO!(), why 8 (in go client), or is this the eth1 follow distance const FOLLOW_DISTANCE: u64 = 8; +// The maximum number of operators a validator can have +//https://github.com/ssvlabs/ssv/blob/07095fe31e3ded288af722a9c521117980585d95/eth/eventhandler/validation.go#L15 +pub const MAX_OPERATORS: usize = 13; + /// Client for interacting with the SSV contract on Ethereum L1 /// /// Manages connections to the L1 and monitors SSV contract events to track the state of validator diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs new file mode 100644 index 000000000..e69de29bb From f60aa7999d3ef51c8413dfc20fd2874d03299e90 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 26 Nov 2024 17:28:55 +0000 Subject: [PATCH 08/49] pseudocode validator added process --- anchor/eth/src/event_processor.rs | 40 +++++++++++++++---------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 7e036fae2..912f0d616 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -119,6 +119,8 @@ impl EventProcessor { shares, cluster, } = SSVContract::ValidatorAdded::decode_from_log(log)?; + // Convert pubkey into BLS publickey, need types to do this + // todo!() // Get expected nonce and and increment it. Talk w/ security guys if this is needed. Wont // the network handle this? What does it have to do with database @@ -130,26 +132,24 @@ impl EventProcessor { let shares: RawShares = shares.try_into()?; verify_signature()?; - // Walkthrough - // 1) We want to see if a share for this validator already exists - // 2) If it does not exist, we want to create it - // 1a) Create SSVShare struct (specType.share + metadata) - // 2a) deserialize publickey (bytes) into actual BLS publickey - // 3a) populate SSVShare w/ publick key above and owner of the share - // 4a) get the id of THIS operator - // 5a) go through all of the operator_ids - // 1b) extract operator id & get its data - // 2b) add it to the sharemembers (committee for this share) - // 3b) if the operator id == id of this operator - // 4b) decrypt the corresponding encryptedKey with RSAPrivkey + Some validation - // 6a) return the new share and the private key - // 7a) validate that this share does indeed belong to this operator - // 8a) save the share in the database - - - // Thoughts. Need to think in terms of a THIS operator. Not the network at large. - // When a new validator is added, all of the operators will get this event and extract their - // corresponding share private key. The database will reflect state for this operator. + /* + if !self.db.share_exists(pubkey) { + let mut share = SSVShare::new(pubkey, owner, domaintype); + // todo!() call this committee member, share member, or cluster member + let mut committee: Vec = Vec::new(); + for (idx, operator_id ) in operator_ids.iter().enumerate() { + let operator_data = match self.db.get_operator_data(operator_id) { + Ok(operator_data) => operator_data, + Err(e) => todo!(), + }; + committee.push(CommitteeMember{idx, shares.public_keys[idx]}); + // decrypt relevant encryptedkey and add it to keymanager + // todo!() + } + share.commitee = committee + } else { + // Get the share and confirm the owner + }*/ Ok(()) } From 9cbdeb91d0c8efa5b29f307653da457af48f46e2 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 26 Nov 2024 22:39:34 +0000 Subject: [PATCH 09/49] file renaming, validator exited & removed process pseudo --- anchor/eth/src/event_parser.rs | 2 +- anchor/eth/src/event_processor.rs | 109 ++++++++++++++---- anchor/eth/src/lib.rs | 4 +- .../eth/src/{action.rs => network_actions.rs} | 4 +- anchor/eth/src/sigs.rs | 31 ----- anchor/eth/src/util.rs | 36 ++++++ 6 files changed, 128 insertions(+), 58 deletions(-) rename anchor/eth/src/{action.rs => network_actions.rs} (98%) delete mode 100644 anchor/eth/src/sigs.rs diff --git a/anchor/eth/src/event_parser.rs b/anchor/eth/src/event_parser.rs index fc8c300bc..bd18bff44 100644 --- a/anchor/eth/src/event_parser.rs +++ b/anchor/eth/src/event_parser.rs @@ -1,7 +1,7 @@ use super::gen::SSVContract; use alloy::{rpc::types::Log, sol_types::SolEvent}; -// Standardized event decoding +// Standardized event decoding via common Decoder trait pub trait EventDecoder { type Output; fn decode_from_log(log: &Log) -> Result; diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 912f0d616..c444f16a6 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -1,24 +1,23 @@ -use super::action::NetworkAction; use super::event_parser::EventDecoder; use super::gen::SSVContract; +use super::network_actions::NetworkAction; +use super::util::*; use super::sync::MAX_OPERATORS; -use super::sigs::{RawShares, verify_signature}; use alloy::primitives::B256; use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; use std::collections::{HashMap, HashSet}; -const SIGNATURE_LEN: usize = 96; -const PUBLICKEY_LENGTH: usize = 48; -const ENCRYPTEDKEY_LENGTH: usize = 32; - // Handler for a log type EventHandler = fn(&EventProcessor, &Log) -> Result<(), String>; // Event Processor pub struct EventProcessor { - handlers: HashMap, // reference to the database + // Function handlers for event processing + handlers: HashMap, + // reference to the database + // db: NetworkDatabase } impl EventProcessor { @@ -117,7 +116,7 @@ impl EventProcessor { operatorIds: operator_ids, publicKey: pubkey, shares, - cluster, + .. } = SSVContract::ValidatorAdded::decode_from_log(log)?; // Convert pubkey into BLS publickey, need types to do this // todo!() @@ -129,7 +128,7 @@ impl EventProcessor { // Perform some validator verification, parse the share byte stream into RawShares, and // verifiy the signature is correct self.validate_operators(operator_ids)?; - let shares: RawShares = shares.try_into()?; + let shares: ShareKeys = shares.try_into()?; verify_signature()?; /* @@ -154,42 +153,106 @@ impl EventProcessor { } fn process_validator_removed(&self, log: &Log) -> Result<(), String> { - let _decoded = SSVContract::ValidatorRemoved::decode_from_log(log)?; - // get the shares + let SSVContract::ValidatorRemoved { + owner, + operatorIds: operator_ids, + publicKey: pubkey, + .. + } = SSVContract::ValidatorRemoved::decode_from_log(log)?; + // convert to proper publickey + + /* + // fetch the share + let ssvshare = match self.db.get_share(pubkey) { + Ok(ssvshare) => share, + Err(e) => Err(format!("No share exists for the validaor {}: {}", pubkey, e)) + }; + + // validate the owners // Prevent removal of the validator registered with different owner address // owner A registers validator with public key X (OK) // owner B registers validator with public key X (NOT OK) // owner A removes validator with public key X (OK) // owner B removes validator with public key X (NOT OK) - // delete the shares - todo!() + if owner != ssvshare.metadata.owner { + return Err(format!("Share already exists with a different owner address. Expected {}. Got {}", share.metadata.owner, owner)); + } + + // delete this share + self.db.delete_share(pubkey)?; + + // Check if this operator has a piece of this share. If so, we are managing the share + // private key and should also remove that + let operator_id = self.db.operator_id; + let operator_present = ssvshare.share.committee.iter().map(|member| member.operator_id == operator_id); + if operator_present { + // remove it from the keystore + } + */ + + Ok(()) } fn process_cluster_liquidated(&self, log: &Log) -> Result<(), String> { - let _decoded = SSVContract::ClusterLiquidated::decode_from_log(log)?; + let SSVContract::ClusterLiquidated { + owner, + operatorIds: operator_ids, + .. + } = SSVContract::ClusterLiquidated::decode_from_log(log)?; + // indicate the shares are liquidated todo!() } fn process_cluster_reactivated(&self, log: &Log) -> Result<(), String> { - let _decoded = SSVContract::ClusterReactivated::decode_from_log(log)?; + let SSVContract::ClusterReactivated { + owner, + operatorIds: operator_ids, + .. + } = SSVContract::ClusterReactivated::decode_from_log(log)?; + // process cluster event // bump slashing protection todo!() } fn process_fee_recipient_updated(&self, log: &Log) -> Result<(), String> { - let _decoded = SSVContract::FeeRecipientAddressUpdated::decode_from_log(log)?; - // fetch recipient data - // create it if needed, then insert - todo!() + let SSVContract::FeeRecipientAddressUpdated { + owner, + recipientAddress: new_recipient, + } = SSVContract::FeeRecipientAddressUpdated::decode_from_log(log)?; + //self.db.update_recipient_address(owner, new_recipient)? + Ok(()) } fn process_validator_exited(&self, log: &Log) -> Result<(), String> { - let _decoded = SSVContract::ValidatorExited::decode_from_log(log)?; - // get the shares - // exit duty - todo!() + let SSVContract::ValidatorExited { + owner, + operatorIds: operator_ids, + publicKey: pubkey, + } = SSVContract::ValidatorExited::decode_from_log(log)?; + + /* + // fetch and validate share + let ssvshare = match self.db.get_share(pubkey) { + Ok(ssvshare) => { + // validate owner + if owner != ssvshare.metadata.owner { + return Err(format!( + "Share already exists with a different owner address. Expected {}. Got {}", + ssvshare.metadata.owner, owner)); + } + ssvshare + } + Err(e) => Err(format!( + "No share exists for the validator {}: {}", + pubkey, e + )), + }; + */ + + // Create a validator exit duty, shouldnt this be handled during live sync?? + Ok(()) } // Helper functions diff --git a/anchor/eth/src/lib.rs b/anchor/eth/src/lib.rs index ae6ed8425..82530e8fc 100644 --- a/anchor/eth/src/lib.rs +++ b/anchor/eth/src/lib.rs @@ -3,5 +3,5 @@ mod event_parser; mod event_processor; mod gen; mod sync; -mod action; -mod sigs; +mod network_actions; +mod util; diff --git a/anchor/eth/src/action.rs b/anchor/eth/src/network_actions.rs similarity index 98% rename from anchor/eth/src/action.rs rename to anchor/eth/src/network_actions.rs index 62b650872..650750c91 100644 --- a/anchor/eth/src/action.rs +++ b/anchor/eth/src/network_actions.rs @@ -1,8 +1,10 @@ -use super::gen::SSVContract; use super::event_parser::EventDecoder; +use super::gen::SSVContract; use alloy::primitives::Address; use alloy::{rpc::types::Log, sol_types::SolEvent}; +//use types::{SSVShare, OperatorID} + // Todo!() need some file that defines all the actions that the validator should // perform. Upon receiving an event in the live sync, the event log needs to be transformed into // and action, processed & persisted into the database, and then sent off to be executed (execute diff --git a/anchor/eth/src/sigs.rs b/anchor/eth/src/sigs.rs deleted file mode 100644 index 8df396579..000000000 --- a/anchor/eth/src/sigs.rs +++ /dev/null @@ -1,31 +0,0 @@ - -use alloy::primitives::Bytes; - -// use types::publicKey -pub struct SharePublickKey([u8; 48]); -pub struct SharePrivateKey([u8; 32]); - - -// [signature | public keys | encrypted keys] -pub struct RawShares { - // Uncompressed bls signatures - signature: [u8; 96], - // Public keys of the Shares - public_keys: Vec, - // Split encrypted private keys - private_keys: Vec -} - -// Convert from a raw stream of bytes to a structured set of shares -// for a validator -impl TryFrom for RawShares { - type Error = String; - fn try_from(source: Bytes) -> Result { - todo!() - } -} - - -pub fn verify_signature() -> Result<(), String>{ - todo!() -} diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index e69de29bb..92d604cc3 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -0,0 +1,36 @@ +use alloy::primitives::Bytes; + +// Offsets to parse the share bytes +const SIG_LEN: usize = 96; +const PUBKEY_LEN: usize = 48; +const ENCRYPTEDKEY_LEN: usize = 32; + +// use types::(PublicKey, PrivateKey), +pub struct SharePublickKey([u8; 48]); +pub struct SharePrivateKey([u8; 32]); + +// All of the public keys and encrypted private keys for a +// validator key that has been broken into N shares. +pub struct ShareKeys { + // Uncompressed bls signatures + signature: [u8; 96], + // Public keys of the Shares + public_keys: Vec, + // Encrypted private key of the shares + encrypted_keys: Vec, +} + +// Convert from a raw stream of bytes to a structured set of keys. +// Event contains a bytes stream of the form +// [signature | public keys | encrypted keys]. +impl TryFrom for ShareKeys { + type Error = String; + fn try_from(source: Bytes) -> Result { + todo!() + } +} + +// Verify that the signature over the share data is correct +pub fn verify_signature() -> Result<(), String> { + todo!() +} From 563ecb33a89f7e3b256f2c82399c5e9dbfd702de Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 28 Nov 2024 16:18:15 +0000 Subject: [PATCH 10/49] retry provider, cluster processing pseudo, helpers --- anchor/eth/src/event_processor.rs | 79 +++++++++++++------------------ anchor/eth/src/lib.rs | 2 +- anchor/eth/src/network_actions.rs | 2 - anchor/eth/src/sync.rs | 10 ++++ anchor/eth/src/util.rs | 46 +++++++++++++++++- 5 files changed, 88 insertions(+), 51 deletions(-) diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index c444f16a6..29b536c98 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -2,21 +2,19 @@ use super::event_parser::EventDecoder; use super::gen::SSVContract; use super::network_actions::NetworkAction; use super::util::*; -use super::sync::MAX_OPERATORS; use alloy::primitives::B256; use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; -use std::collections::{HashMap, HashSet}; - +use std::collections::HashMap; // Handler for a log type EventHandler = fn(&EventProcessor, &Log) -> Result<(), String>; -// Event Processor +/// Event Processor pub struct EventProcessor { - // Function handlers for event processing + /// Function handlers for event processing handlers: HashMap, - // reference to the database + // Reference to the database // db: NetworkDatabase } @@ -125,9 +123,15 @@ impl EventProcessor { // the network handle this? What does it have to do with database // todo!() - // Perform some validator verification, parse the share byte stream into RawShares, and + // Perform some validator verification, parse the share byte stream into ShareKeys, and // verifiy the signature is correct - self.validate_operators(operator_ids)?; + validate_operators(operator_ids)?; + + // make sure all of the operators exist + //if operator_ids.iter().any(|id| !self.db.operators_exist(id)) { + // return Err("One or more operators do not exist".to_string()); + //} + let shares: ShareKeys = shares.try_into()?; verify_signature()?; @@ -196,12 +200,19 @@ impl EventProcessor { fn process_cluster_liquidated(&self, log: &Log) -> Result<(), String> { let SSVContract::ClusterLiquidated { owner, - operatorIds: operator_ids, + operatorIds: mut operator_ids, .. } = SSVContract::ClusterLiquidated::decode_from_log(log)?; - // indicate the shares are liquidated - todo!() + /* + // Compute the identifier for this cluster and fetch all of the shares + let cluster_id = compute_cluster_id(owner, &mut operator_ids); + + // mark all of the shares for this specific cluster as liquidated + self.db.liquidate(cluster_id); + + */ + Ok(()) } fn process_cluster_reactivated(&self, log: &Log) -> Result<(), String> { @@ -211,9 +222,17 @@ impl EventProcessor { .. } = SSVContract::ClusterReactivated::decode_from_log(log)?; - // process cluster event + /* + // Compute the identifier for this cluster and fetch all of the shares + let cluster_id = compute_cluster_id(owner, &mut operator_ids); + + // mark all of the shares for this specific cluster as reactivated + self.db.reactivate(cluster_id); + // bump slashing protection - todo!() + */ + + Ok(()) } fn process_fee_recipient_updated(&self, log: &Log) -> Result<(), String> { @@ -256,38 +275,4 @@ impl EventProcessor { } // Helper functions - fn validate_operators(&self, operator_ids: Vec) -> Result<(), String> { - let num_operators = operator_ids.len(); - - // make sure there is a valid number of operators - if num_operators > MAX_OPERATORS { - return Err(format!( - "Validator has too many operators: {}", - num_operators - )); - } - if num_operators == 0 { - return Err("Validator has no operators".to_string()); - } - - // make sure count is valid - let threshold = (num_operators - 1) / 3; - if (num_operators - 1) % 3 != 0 || !(1..=4).contains(&threshold) { - return Err(format!("Invalid number of operators: {}", num_operators)); - } - - // make sure there are no duplicates - let mut seen = HashSet::new(); - let are_duplicates = !operator_ids.iter().all(|x| seen.insert(x)); - if are_duplicates { - return Err("Operator IDs contain duplicates".to_string()); - } - - // make sure all of the operators exist - //if operator_ids.iter().any(|id| !self.db.operators_exist(id)) { - // return Err("One or more operators do not exist".to_string()); - //} - - Ok(()) - } } diff --git a/anchor/eth/src/lib.rs b/anchor/eth/src/lib.rs index 82530e8fc..850be2e50 100644 --- a/anchor/eth/src/lib.rs +++ b/anchor/eth/src/lib.rs @@ -2,6 +2,6 @@ pub use sync::SsvEventSyncer; mod event_parser; mod event_processor; mod gen; -mod sync; mod network_actions; +mod sync; mod util; diff --git a/anchor/eth/src/network_actions.rs b/anchor/eth/src/network_actions.rs index 650750c91..6c707f8d0 100644 --- a/anchor/eth/src/network_actions.rs +++ b/anchor/eth/src/network_actions.rs @@ -22,12 +22,10 @@ pub enum NetworkAction { LiquidateCluster { owner: Address, //operator_ids: Vec, - //to_liquidate: Vec }, ReactivateCluster { owner: Address, //operator_ids: Vec - //to_reactivate: Vec }, UpdateFeeRecipient { owner: Address, diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 6e03589ab..71fc3f26e 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -5,6 +5,8 @@ use alloy::pubsub::PubSubFrontend; use alloy::rpc::types::{Filter, Log}; use alloy::sol_types::SolEvent; use alloy::transports::http::{Client, Http}; +use alloy::transports::layers::RetryBackoffLayer; +use alloy::rpc::client::ClientBuilder; use futures::future::{try_join_all, Future}; use futures::StreamExt; use rand::Rng; @@ -85,6 +87,14 @@ impl SsvEventSyncer { let http_url = "dummy_http".parse().unwrap(); // TODO!(), get this from config let rpc_client: Arc = Arc::new(ProviderBuilder::new().on_http(http_url)); + // Experiment with retry clients for both websocket and http + /* + let client = ClientBuilder::default() + .layer(RetryBackoffLayer::new(10, 300, 300)) + .http(http_url); + let retry_rpc_client = ProviderBuilder::new().on_client(client); + */ + // Construct Websocket Provider let ws_url = "dummy ws"; // TODO!(), get this from config let ws_client = ProviderBuilder::new() diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 92d604cc3..8c2849219 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -1,4 +1,7 @@ -use alloy::primitives::Bytes; +use super::sync::MAX_OPERATORS; +use alloy::primitives::{keccak256, Address, Bytes, FixedBytes, U256}; +use std::collections::HashSet; +use types::{PublicKey}; // Offsets to parse the share bytes const SIG_LEN: usize = 96; @@ -34,3 +37,44 @@ impl TryFrom for ShareKeys { pub fn verify_signature() -> Result<(), String> { todo!() } + +// Compute the unique hash of a committee when identified by an owner +pub fn compute_cluster_id(owner: Address, operator_ids: &mut [u64]) -> FixedBytes<32> { + operator_ids.sort(); + + // Concat to form ... + let mut byte_repr = Bytes::new(); + for id in operator_ids {} + keccak256(byte_repr) +} + +// Perform basic verification on the operator set +pub fn validate_operators(operator_ids: Vec) -> Result<(), String> { + let num_operators = operator_ids.len(); + + // make sure there is a valid number of operators + if num_operators > MAX_OPERATORS { + return Err(format!( + "Validator has too many operators: {}", + num_operators + )); + } + if num_operators == 0 { + return Err("Validator has no operators".to_string()); + } + + // make sure count is valid + let threshold = (num_operators - 1) / 3; + if (num_operators - 1) % 3 != 0 || !(1..=4).contains(&threshold) { + return Err(format!("Invalid number of operators: {}", num_operators)); + } + + // make sure there are no duplicates + let mut seen = HashSet::new(); + let are_duplicates = !operator_ids.iter().all(|x| seen.insert(x)); + if are_duplicates { + return Err("Operator IDs contain duplicates".to_string()); + } + + Ok(()) +} From 56acc7eb5f01291ddf4df04d36723133b70f9523 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 13 Dec 2024 22:15:40 +0000 Subject: [PATCH 11/49] start on db integration into execution layer --- .gitignore | 3 +- Cargo.lock | 23 +- Cargo.toml | 3 + anchor/common/ssv_types/Cargo.toml | 1 + anchor/common/ssv_types/src/cluster.rs | 6 +- anchor/common/ssv_types/src/lib.rs | 5 +- anchor/common/ssv_types/src/operator.rs | 9 + anchor/eth/Cargo.toml | 3 +- anchor/eth/src/abi/ssv_contract.json | 2125 ----------------------- anchor/eth/src/event_processor.rs | 245 ++- anchor/eth/src/gen.rs | 20 +- anchor/eth/src/network_actions.rs | 11 - anchor/eth/src/sync.rs | 15 +- anchor/eth/src/util.rs | 96 +- 14 files changed, 250 insertions(+), 2315 deletions(-) delete mode 100644 anchor/eth/src/abi/ssv_contract.json diff --git a/.gitignore b/.gitignore index 4240a3626..c11516bd9 100644 --- a/.gitignore +++ b/.gitignore @@ -19,4 +19,5 @@ perf.data* /.vscode # cross -/zcross \ No newline at end of file +/zcross +anchor/database diff --git a/Cargo.lock b/Cargo.lock index b2b5b9c83..07da0f9bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2185,6 +2185,22 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "database" +version = "0.1.0" +dependencies = [ + "base64 0.22.1", + "openssl", + "parking_lot 0.12.3", + "r2d2", + "r2d2_sqlite", + "rand", + "rusqlite", + "ssv_types", + "tempfile", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", +] + [[package]] name = "db-key" version = "0.0.5" @@ -2766,8 +2782,10 @@ name = "eth" version = "0.1.0" dependencies = [ "alloy", + "database", "futures", "rand", + "ssv_types", "tokio", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] @@ -7437,9 +7455,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" dependencies = [ "web-time", ] @@ -8171,6 +8189,7 @@ dependencies = [ "base64 0.22.1", "derive_more 1.0.0", "openssl", + "rusqlite", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] diff --git a/Cargo.toml b/Cargo.toml index 9132e14bc..1eaaedef5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "anchor/network", "anchor/eth", "anchor/common/version", + "anchor/database", "anchor/processor", "anchor/qbft", "anchor/common/ssv_types", @@ -28,6 +29,7 @@ network = { path ="anchor/network"} version = { path ="anchor/common/version"} processor = { path = "anchor/processor" } ssv_types = { path = "anchor/common/ssv_types" } +database = { path = "anchor/database" } lighthouse_network = { git = "https://github.com/sigp/lighthouse", branch = "unstable"} task_executor = { git = "https://github.com/sigp/lighthouse", branch = "unstable", default-features = false, features = [ "tracing", ] } metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } @@ -61,6 +63,7 @@ tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } base64 = "0.22.1" openssl = "0.10.68" +rusqlite = "0.28.0" [profile.maxperf] inherits = "release" diff --git a/anchor/common/ssv_types/Cargo.toml b/anchor/common/ssv_types/Cargo.toml index 1d6b71316..2d07f7ed6 100644 --- a/anchor/common/ssv_types/Cargo.toml +++ b/anchor/common/ssv_types/Cargo.toml @@ -9,3 +9,4 @@ types = { workspace = true} openssl = { workspace = true } derive_more = { workspace = true } base64 = { workspace = true } +rusqlite = { workspace = true } diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs index 308aee675..97acde7de 100644 --- a/anchor/common/ssv_types/src/cluster.rs +++ b/anchor/common/ssv_types/src/cluster.rs @@ -48,6 +48,10 @@ pub struct ValidatorMetadata { pub fee_recipient: Address, /// Graffiti pub graffiti: Graffiti, - /// The owner of the validator + /// The owner of the validator and cluster pub owner: Address, } + +pub fn compute_cluster_id(owner: Address, operator_ids: &mut [u64]) -> ClusterId { + todo!() +} diff --git a/anchor/common/ssv_types/src/lib.rs b/anchor/common/ssv_types/src/lib.rs index 6d25f44d2..ca3d929f3 100644 --- a/anchor/common/ssv_types/src/lib.rs +++ b/anchor/common/ssv_types/src/lib.rs @@ -1,7 +1,10 @@ -pub use cluster::{Cluster, ClusterId, ClusterMember, ValidatorIndex, ValidatorMetadata}; +pub use cluster::{ + compute_cluster_id, Cluster, ClusterId, ClusterMember, ValidatorIndex, ValidatorMetadata, +}; pub use operator::{Operator, OperatorId}; pub use share::Share; mod cluster; mod operator; mod share; +mod sql_conversions; mod util; diff --git a/anchor/common/ssv_types/src/operator.rs b/anchor/common/ssv_types/src/operator.rs index ce7f1cf58..a8dd4d479 100644 --- a/anchor/common/ssv_types/src/operator.rs +++ b/anchor/common/ssv_types/src/operator.rs @@ -5,6 +5,7 @@ use openssl::rsa::Rsa; use std::cmp::Eq; use std::fmt::Debug; use std::hash::Hash; +use std::str::Bytes; use types::Address; /// Unique identifier for an Operator. @@ -29,6 +30,14 @@ impl Operator { Ok(Self::new_with_pubkey(rsa_pubkey, operator_id, owner)) } + pub fn new_with_bytes( + rsa_bytes: Bytes, + operator_id: OperatorId, + owner: Address, + ) -> Result { + todo!() + } + // Creates a new operator from an existing RSA public key and OperatorId pub fn new_with_pubkey(rsa_pubkey: Rsa, id: OperatorId, owner: Address) -> Self { Self { diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml index b84994a87..9d16b0322 100644 --- a/anchor/eth/Cargo.toml +++ b/anchor/eth/Cargo.toml @@ -10,7 +10,8 @@ alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "con tokio = { workspace = true } futures = { workspace = true } rand = "0.8.5" - types = { workspace = true } +database = { workspace = true } +ssv_types = { workspace = true} diff --git a/anchor/eth/src/abi/ssv_contract.json b/anchor/eth/src/abi/ssv_contract.json deleted file mode 100644 index 798172919..000000000 --- a/anchor/eth/src/abi/ssv_contract.json +++ /dev/null @@ -1,2125 +0,0 @@ -[ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "contractAddress", - "type": "address" - } - ], - "name": "AddressIsWhitelistingContract", - "type": "error" - }, - { - "inputs": [], - "name": "ApprovalNotWithinTimeframe", - "type": "error" - }, - { - "inputs": [], - "name": "CallerNotOwner", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "caller", - "type": "address" - }, - { - "internalType": "address", - "name": "owner", - "type": "address" - } - ], - "name": "CallerNotOwnerWithData", - "type": "error" - }, - { - "inputs": [], - "name": "CallerNotWhitelisted", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - } - ], - "name": "CallerNotWhitelistedWithData", - "type": "error" - }, - { - "inputs": [], - "name": "ClusterAlreadyEnabled", - "type": "error" - }, - { - "inputs": [], - "name": "ClusterDoesNotExists", - "type": "error" - }, - { - "inputs": [], - "name": "ClusterIsLiquidated", - "type": "error" - }, - { - "inputs": [], - "name": "ClusterNotLiquidatable", - "type": "error" - }, - { - "inputs": [], - "name": "EmptyPublicKeysList", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - } - ], - "name": "ExceedValidatorLimit", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - } - ], - "name": "ExceedValidatorLimitWithData", - "type": "error" - }, - { - "inputs": [], - "name": "FeeExceedsIncreaseLimit", - "type": "error" - }, - { - "inputs": [], - "name": "FeeIncreaseNotAllowed", - "type": "error" - }, - { - "inputs": [], - "name": "FeeTooHigh", - "type": "error" - }, - { - "inputs": [], - "name": "FeeTooLow", - "type": "error" - }, - { - "inputs": [], - "name": "IncorrectClusterState", - "type": "error" - }, - { - "inputs": [], - "name": "IncorrectValidatorState", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "publicKey", - "type": "bytes" - } - ], - "name": "IncorrectValidatorStateWithData", - "type": "error" - }, - { - "inputs": [], - "name": "InsufficientBalance", - "type": "error" - }, - { - "inputs": [], - "name": "InvalidContractAddress", - "type": "error" - }, - { - "inputs": [], - "name": "InvalidOperatorIdsLength", - "type": "error" - }, - { - "inputs": [], - "name": "InvalidPublicKeyLength", - "type": "error" - }, - { - "inputs": [], - "name": "InvalidWhitelistAddressesLength", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "contractAddress", - "type": "address" - } - ], - "name": "InvalidWhitelistingContract", - "type": "error" - }, - { - "inputs": [], - "name": "MaxValueExceeded", - "type": "error" - }, - { - "inputs": [], - "name": "NewBlockPeriodIsBelowMinimum", - "type": "error" - }, - { - "inputs": [], - "name": "NoFeeDeclared", - "type": "error" - }, - { - "inputs": [], - "name": "NotAuthorized", - "type": "error" - }, - { - "inputs": [], - "name": "OperatorAlreadyExists", - "type": "error" - }, - { - "inputs": [], - "name": "OperatorDoesNotExist", - "type": "error" - }, - { - "inputs": [], - "name": "OperatorsListNotUnique", - "type": "error" - }, - { - "inputs": [], - "name": "PublicKeysSharesLengthMismatch", - "type": "error" - }, - { - "inputs": [], - "name": "SameFeeChangeNotAllowed", - "type": "error" - }, - { - "inputs": [], - "name": "TargetModuleDoesNotExist", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "uint8", - "name": "moduleId", - "type": "uint8" - } - ], - "name": "TargetModuleDoesNotExistWithData", - "type": "error" - }, - { - "inputs": [], - "name": "TokenTransferFailed", - "type": "error" - }, - { - "inputs": [], - "name": "UnsortedOperatorsList", - "type": "error" - }, - { - "inputs": [], - "name": "ValidatorAlreadyExists", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "publicKey", - "type": "bytes" - } - ], - "name": "ValidatorAlreadyExistsWithData", - "type": "error" - }, - { - "inputs": [], - "name": "ValidatorDoesNotExist", - "type": "error" - }, - { - "inputs": [], - "name": "ZeroAddressNotAllowed", - "type": "error" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "address", - "name": "previousAdmin", - "type": "address" - }, - { - "indexed": false, - "internalType": "address", - "name": "newAdmin", - "type": "address" - } - ], - "name": "AdminChanged", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "beacon", - "type": "address" - } - ], - "name": "BeaconUpgraded", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "value", - "type": "uint256" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "indexed": false, - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "ClusterDeposited", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "indexed": false, - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "ClusterLiquidated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "indexed": false, - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "ClusterReactivated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "value", - "type": "uint256" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "indexed": false, - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "ClusterWithdrawn", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint64", - "name": "value", - "type": "uint64" - } - ], - "name": "DeclareOperatorFeePeriodUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint64", - "name": "value", - "type": "uint64" - } - ], - "name": "ExecuteOperatorFeePeriodUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": false, - "internalType": "address", - "name": "recipientAddress", - "type": "address" - } - ], - "name": "FeeRecipientAddressUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint8", - "name": "version", - "type": "uint8" - } - ], - "name": "Initialized", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint64", - "name": "value", - "type": "uint64" - } - ], - "name": "LiquidationThresholdPeriodUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint256", - "name": "value", - "type": "uint256" - } - ], - "name": "MinimumLiquidationCollateralUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "enum SSVModules", - "name": "moduleId", - "type": "uint8" - }, - { - "indexed": false, - "internalType": "address", - "name": "moduleAddress", - "type": "address" - } - ], - "name": "ModuleUpgraded", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint256", - "name": "value", - "type": "uint256" - }, - { - "indexed": false, - "internalType": "address", - "name": "recipient", - "type": "address" - } - ], - "name": "NetworkEarningsWithdrawn", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint256", - "name": "oldFee", - "type": "uint256" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "newFee", - "type": "uint256" - } - ], - "name": "NetworkFeeUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - }, - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "publicKey", - "type": "bytes" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "fee", - "type": "uint256" - } - ], - "name": "OperatorAdded", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": true, - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - } - ], - "name": "OperatorFeeDeclarationCancelled", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": true, - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "blockNumber", - "type": "uint256" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "fee", - "type": "uint256" - } - ], - "name": "OperatorFeeDeclared", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": true, - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "blockNumber", - "type": "uint256" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "fee", - "type": "uint256" - } - ], - "name": "OperatorFeeExecuted", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint64", - "name": "value", - "type": "uint64" - } - ], - "name": "OperatorFeeIncreaseLimitUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint64", - "name": "maxFee", - "type": "uint64" - } - ], - "name": "OperatorMaximumFeeUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "indexed": false, - "internalType": "address[]", - "name": "whitelistAddresses", - "type": "address[]" - } - ], - "name": "OperatorMultipleWhitelistRemoved", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "indexed": false, - "internalType": "address[]", - "name": "whitelistAddresses", - "type": "address[]" - } - ], - "name": "OperatorMultipleWhitelistUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "indexed": false, - "internalType": "bool", - "name": "toPrivate", - "type": "bool" - } - ], - "name": "OperatorPrivacyStatusUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - } - ], - "name": "OperatorRemoved", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "indexed": false, - "internalType": "address", - "name": "whitelistingContract", - "type": "address" - } - ], - "name": "OperatorWhitelistingContractUpdated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": true, - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "value", - "type": "uint256" - } - ], - "name": "OperatorWithdrawn", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "previousOwner", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "newOwner", - "type": "address" - } - ], - "name": "OwnershipTransferStarted", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "previousOwner", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "newOwner", - "type": "address" - } - ], - "name": "OwnershipTransferred", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "implementation", - "type": "address" - } - ], - "name": "Upgraded", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "publicKey", - "type": "bytes" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "shares", - "type": "bytes" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "indexed": false, - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "ValidatorAdded", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "publicKey", - "type": "bytes" - } - ], - "name": "ValidatorExited", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "publicKey", - "type": "bytes" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "indexed": false, - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "ValidatorRemoved", - "type": "event" - }, - { - "stateMutability": "nonpayable", - "type": "fallback" - }, - { - "inputs": [], - "name": "acceptOwnership", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes[]", - "name": "publicKeys", - "type": "bytes[]" - }, - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - } - ], - "name": "bulkExitValidator", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes[]", - "name": "publicKeys", - "type": "bytes[]" - }, - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "internalType": "bytes[]", - "name": "sharesData", - "type": "bytes[]" - }, - { - "internalType": "uint256", - "name": "amount", - "type": "uint256" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "bulkRegisterValidator", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes[]", - "name": "publicKeys", - "type": "bytes[]" - }, - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "bulkRemoveValidator", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - } - ], - "name": "cancelDeclaredOperatorFee", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - }, - { - "internalType": "uint256", - "name": "fee", - "type": "uint256" - } - ], - "name": "declareOperatorFee", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "clusterOwner", - "type": "address" - }, - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "internalType": "uint256", - "name": "amount", - "type": "uint256" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "deposit", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - } - ], - "name": "executeOperatorFee", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "publicKey", - "type": "bytes" - }, - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - } - ], - "name": "exitValidator", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "getVersion", - "outputs": [ - { - "internalType": "string", - "name": "version", - "type": "string" - } - ], - "stateMutability": "pure", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "contract IERC20", - "name": "token_", - "type": "address" - }, - { - "internalType": "contract ISSVOperators", - "name": "ssvOperators_", - "type": "address" - }, - { - "internalType": "contract ISSVClusters", - "name": "ssvClusters_", - "type": "address" - }, - { - "internalType": "contract ISSVDAO", - "name": "ssvDAO_", - "type": "address" - }, - { - "internalType": "contract ISSVViews", - "name": "ssvViews_", - "type": "address" - }, - { - "internalType": "uint64", - "name": "minimumBlocksBeforeLiquidation_", - "type": "uint64" - }, - { - "internalType": "uint256", - "name": "minimumLiquidationCollateral_", - "type": "uint256" - }, - { - "internalType": "uint32", - "name": "validatorsPerOperatorLimit_", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "declareOperatorFeePeriod_", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "executeOperatorFeePeriod_", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "operatorMaxFeeIncrease_", - "type": "uint64" - } - ], - "name": "initialize", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "clusterOwner", - "type": "address" - }, - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "liquidate", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "owner", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "pendingOwner", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "proxiableUUID", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "internalType": "uint256", - "name": "amount", - "type": "uint256" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "reactivate", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - }, - { - "internalType": "uint256", - "name": "fee", - "type": "uint256" - } - ], - "name": "reduceOperatorFee", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "publicKey", - "type": "bytes" - }, - { - "internalType": "uint256", - "name": "fee", - "type": "uint256" - }, - { - "internalType": "bool", - "name": "setPrivate", - "type": "bool" - } - ], - "name": "registerOperator", - "outputs": [ - { - "internalType": "uint64", - "name": "id", - "type": "uint64" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "publicKey", - "type": "bytes" - }, - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "internalType": "bytes", - "name": "sharesData", - "type": "bytes" - }, - { - "internalType": "uint256", - "name": "amount", - "type": "uint256" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "registerValidator", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - } - ], - "name": "removeOperator", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - } - ], - "name": "removeOperatorsWhitelistingContract", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "internalType": "address[]", - "name": "whitelistAddresses", - "type": "address[]" - } - ], - "name": "removeOperatorsWhitelists", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "publicKey", - "type": "bytes" - }, - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "removeValidator", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "renounceOwnership", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "recipientAddress", - "type": "address" - } - ], - "name": "setFeeRecipientAddress", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - } - ], - "name": "setOperatorsPrivateUnchecked", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - } - ], - "name": "setOperatorsPublicUnchecked", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "internalType": "contract ISSVWhitelistingContract", - "name": "whitelistingContract", - "type": "address" - } - ], - "name": "setOperatorsWhitelistingContract", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "internalType": "address[]", - "name": "whitelistAddresses", - "type": "address[]" - } - ], - "name": "setOperatorsWhitelists", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "newOwner", - "type": "address" - } - ], - "name": "transferOwnership", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "timeInSeconds", - "type": "uint64" - } - ], - "name": "updateDeclareOperatorFeePeriod", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "timeInSeconds", - "type": "uint64" - } - ], - "name": "updateExecuteOperatorFeePeriod", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "blocks", - "type": "uint64" - } - ], - "name": "updateLiquidationThresholdPeriod", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "maxFee", - "type": "uint64" - } - ], - "name": "updateMaximumOperatorFee", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "amount", - "type": "uint256" - } - ], - "name": "updateMinimumLiquidationCollateral", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "enum SSVModules", - "name": "moduleId", - "type": "uint8" - }, - { - "internalType": "address", - "name": "moduleAddress", - "type": "address" - } - ], - "name": "updateModule", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "fee", - "type": "uint256" - } - ], - "name": "updateNetworkFee", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "percentage", - "type": "uint64" - } - ], - "name": "updateOperatorFeeIncreaseLimit", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "newImplementation", - "type": "address" - } - ], - "name": "upgradeTo", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "newImplementation", - "type": "address" - }, - { - "internalType": "bytes", - "name": "data", - "type": "bytes" - } - ], - "name": "upgradeToAndCall", - "outputs": [], - "stateMutability": "payable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64[]", - "name": "operatorIds", - "type": "uint64[]" - }, - { - "internalType": "uint256", - "name": "amount", - "type": "uint256" - }, - { - "components": [ - { - "internalType": "uint32", - "name": "validatorCount", - "type": "uint32" - }, - { - "internalType": "uint64", - "name": "networkFeeIndex", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "index", - "type": "uint64" - }, - { - "internalType": "bool", - "name": "active", - "type": "bool" - }, - { - "internalType": "uint256", - "name": "balance", - "type": "uint256" - } - ], - "internalType": "struct ISSVNetworkCore.Cluster", - "name": "cluster", - "type": "tuple" - } - ], - "name": "withdraw", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - } - ], - "name": "withdrawAllOperatorEarnings", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "amount", - "type": "uint256" - } - ], - "name": "withdrawNetworkEarnings", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint64", - "name": "operatorId", - "type": "uint64" - }, - { - "internalType": "uint256", - "name": "amount", - "type": "uint256" - } - ], - "name": "withdrawOperatorEarnings", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - } -] \ No newline at end of file diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 29b536c98..e33d80473 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -5,7 +5,12 @@ use super::util::*; use alloy::primitives::B256; use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; +use database::NetworkDatabase; +use ssv_types::{compute_cluster_id, Cluster, ClusterMember, Operator, OperatorId}; use std::collections::HashMap; +use std::str::FromStr; +use std::sync::Arc; +use types::PublicKey; // Handler for a log type EventHandler = fn(&EventProcessor, &Log) -> Result<(), String>; @@ -15,12 +20,12 @@ pub struct EventProcessor { /// Function handlers for event processing handlers: HashMap, // Reference to the database - // db: NetworkDatabase + db: Arc, } impl EventProcessor { /// Construct a new EventProcessor - pub fn new() -> Self { + pub fn new(db: Arc) -> Self { // register log handlers for easy dispatch let mut handlers: HashMap = HashMap::new(); handlers.insert( @@ -56,7 +61,7 @@ impl EventProcessor { Self::process_validator_exited, ); - Self { handlers } + Self { handlers, db } } /// Process a new set of logs @@ -77,202 +82,186 @@ impl EventProcessor { Ok(()) } - // Store the operator in the database. + // A new Operator has been registered in the network. fn process_operator_added(&self, log: &Log) -> Result<(), String> { let SSVContract::OperatorAdded { - operatorId: id, + operatorId, owner, - publicKey: pubkey, + publicKey, .. } = SSVContract::OperatorAdded::decode_from_log(log)?; + let operator_id = OperatorId(operatorId); - // Confirm that this operator does not already exist via ID - //if self.db.operator_exists_id(id)? { - // return Err(format!("Operator with id {} already exists", id")); - //} - - // Confirm that this operator does not already exist via pubkey - //if self.db.operator_exists_pubkey(pubkey)? { - // return Err(format!("Operator with public key {} already exists", pubkey")); - //} + // Confirm that this operator does not already exist + if self.db.operator_exists(&operator_id) { + return Err(String::from("Operator does not exist")); + } - // New unique operator, save into the database - //self.db.add_operator(id, owner, pubkey)?; + // Construct the operator and then insert it into the database + let operator = Operator::new(&publicKey.to_string(), operator_id, owner) + .map_err(|e| format!("Failed to construct an operator: {e}"))?; + self.db + .insert_operator(&operator) + .map_err(|e| format!("Failed to insert operator: {e}"))?; Ok(()) } - // Remove an operator from the database + // An Operator has been removed from the network fn process_operator_removed(&self, log: &Log) -> Result<(), String> { let _decoded = SSVContract::OperatorRemoved::decode_from_log(log)?; // this method is currently noop in the ref client Ok(()) } + // A new validator has entered the network. This means that a new cluster has formed and this + // operator is a potential member in the cluster. Perform verification, store all data, and + // extract the key if one belongs to us. fn process_validator_added(&self, log: &Log) -> Result<(), String> { let SSVContract::ValidatorAdded { owner, - operatorIds: operator_ids, - publicKey: pubkey, + operatorIds, + publicKey, shares, .. } = SSVContract::ValidatorAdded::decode_from_log(log)?; - // Convert pubkey into BLS publickey, need types to do this - // todo!() - // Get expected nonce and and increment it. Talk w/ security guys if this is needed. Wont - // the network handle this? What does it have to do with database - // todo!() + // Process data into usable forms + let validator_pubkey = PublicKey::from_str(&publicKey.to_string()) + .map_err(|e| format!("Failed to create PublicKey: {e}"))?; + let cluster_id = compute_cluster_id(owner, &mut operatorIds.clone()); + let operator_ids: Vec = operatorIds.iter().map(|id| OperatorId(*id)).collect(); - // Perform some validator verification, parse the share byte stream into ShareKeys, and - // verifiy the signature is correct - validate_operators(operator_ids)?; + // Get expected nonce and and increment it. Wont the network handle this? What does it have + // to do with the database - // make sure all of the operators exist - //if operator_ids.iter().any(|id| !self.db.operators_exist(id)) { - // return Err("One or more operators do not exist".to_string()); - //} + // Perform verification on the operator set and make sure they are all registered in the + // network + validate_operators(&operator_ids)?; + if operator_ids.iter().any(|id| !self.db.operator_exists(id)) { + return Err("One or more operators do not exist".to_string()); + } - let shares: ShareKeys = shares.try_into()?; - verify_signature()?; + // Parse the share byte stream into a list of valid Shares and then verify the signature + let (signature, shares) = parse_shares(shares.to_vec(), &operator_ids).unwrap(); + if !verify_signature(signature) { + return Err("Signature verification failed".to_string()); + } + + // fetch the validator metadata + // todo!() need to hook up to beacon api + let validator_metadata = fetch_validator_metadata(validator_pubkey); + + // Construct all of the cluster members + let cluster_members: Vec = shares + .iter() + .zip(operator_ids.iter()) + .map(|(share, op_id)| { + // todo!() check to see if one of these are this operator + ClusterMember { + operator_id: *op_id, + cluster_id, + share: share.to_owned(), + } + }) + .collect(); + + // Finally, construct and insert the full cluster and insert into the database + let cluster = Cluster { + cluster_id, + cluster_members, + faulty: 0, + liquidated: false, + validator_metadata, + }; + self.db + .insert_cluster(cluster) + .expect("Failed to insert cluster"); - /* - if !self.db.share_exists(pubkey) { - let mut share = SSVShare::new(pubkey, owner, domaintype); - // todo!() call this committee member, share member, or cluster member - let mut committee: Vec = Vec::new(); - for (idx, operator_id ) in operator_ids.iter().enumerate() { - let operator_data = match self.db.get_operator_data(operator_id) { - Ok(operator_data) => operator_data, - Err(e) => todo!(), - }; - committee.push(CommitteeMember{idx, shares.public_keys[idx]}); - // decrypt relevant encryptedkey and add it to keymanager - // todo!() - } - share.commitee = committee - } else { - // Get the share and confirm the owner - }*/ Ok(()) } + // A validator has been removed from the network. Since this validator is no long in the + // network, the cluster that was responsible for it must be cleaned up fn process_validator_removed(&self, log: &Log) -> Result<(), String> { let SSVContract::ValidatorRemoved { owner, - operatorIds: operator_ids, - publicKey: pubkey, + mut operatorIds, + publicKey, .. } = SSVContract::ValidatorRemoved::decode_from_log(log)?; - // convert to proper publickey - - /* - // fetch the share - let ssvshare = match self.db.get_share(pubkey) { - Ok(ssvshare) => share, - Err(e) => Err(format!("No share exists for the validaor {}: {}", pubkey, e)) - }; - - // validate the owners - // Prevent removal of the validator registered with different owner address - // owner A registers validator with public key X (OK) - // owner B registers validator with public key X (NOT OK) - // owner A removes validator with public key X (OK) - // owner B removes validator with public key X (NOT OK) - if owner != ssvshare.metadata.owner { - return Err(format!("Share already exists with a different owner address. Expected {}. Got {}", share.metadata.owner, owner)); + // Process and fetch data + let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).unwrap(); + let cluster_id = compute_cluster_id(owner, &mut operatorIds); + let metadata = self.db.get_validator_metadata(&cluster_id).unwrap(); + + // Make sure the right owner is removing this validator + if owner != metadata.owner { + return Err(format!( + "Cluster already exists with a different owner address. Expected {}. Got {}", + metadata.owner, owner + )); } - // delete this share - self.db.delete_share(pubkey)?; + // Make sure this is the correct validator + if validator_pubkey != metadata.validator_pubkey { + return Err("Validator does not match".to_string()); + } - // Check if this operator has a piece of this share. If so, we are managing the share - // private key and should also remove that - let operator_id = self.db.operator_id; - let operator_present = ssvshare.share.committee.iter().map(|member| member.operator_id == operator_id); - if operator_present { - // remove it from the keystore + // Remove all cluster data corresponding to this validator + if self.db.member_of_cluster(&cluster_id) { + // todo!(): Remove it from the internal keystore } - */ + self.db.delete_cluster(cluster_id).unwrap(); Ok(()) } + /// A cluster has ran out of operational funds. Set the cluster as liquidated fn process_cluster_liquidated(&self, log: &Log) -> Result<(), String> { let SSVContract::ClusterLiquidated { owner, operatorIds: mut operator_ids, .. } = SSVContract::ClusterLiquidated::decode_from_log(log)?; - - /* - // Compute the identifier for this cluster and fetch all of the shares let cluster_id = compute_cluster_id(owner, &mut operator_ids); - - // mark all of the shares for this specific cluster as liquidated - self.db.liquidate(cluster_id); - - */ + self.db + .update_status(cluster_id, true) + .map_err(|e| format!("Failed to mark cluster as liquidated: {e}"))?; Ok(()) } + // A cluster that was previously liquidated has had more SSV deposited fn process_cluster_reactivated(&self, log: &Log) -> Result<(), String> { let SSVContract::ClusterReactivated { owner, - operatorIds: operator_ids, + operatorIds: mut operator_ids, .. } = SSVContract::ClusterReactivated::decode_from_log(log)?; - - /* - // Compute the identifier for this cluster and fetch all of the shares let cluster_id = compute_cluster_id(owner, &mut operator_ids); - - // mark all of the shares for this specific cluster as reactivated - self.db.reactivate(cluster_id); - - // bump slashing protection - */ - + self.db + .update_status(cluster_id, false) + .map_err(|e| format!("Failed to mark cluter as active {e}"))?; Ok(()) } + // The fee recipient address of a validator has been changed fn process_fee_recipient_updated(&self, log: &Log) -> Result<(), String> { let SSVContract::FeeRecipientAddressUpdated { - owner, - recipientAddress: new_recipient, + owner: _, + recipientAddress: _, } = SSVContract::FeeRecipientAddressUpdated::decode_from_log(log)?; - //self.db.update_recipient_address(owner, new_recipient)? + // todo!(). this is accessed updated via owner Ok(()) } + // A validator has exited the beacon chain fn process_validator_exited(&self, log: &Log) -> Result<(), String> { let SSVContract::ValidatorExited { - owner, - operatorIds: operator_ids, - publicKey: pubkey, + owner: _, + operatorIds: _, + publicKey: _, } = SSVContract::ValidatorExited::decode_from_log(log)?; - - /* - // fetch and validate share - let ssvshare = match self.db.get_share(pubkey) { - Ok(ssvshare) => { - // validate owner - if owner != ssvshare.metadata.owner { - return Err(format!( - "Share already exists with a different owner address. Expected {}. Got {}", - ssvshare.metadata.owner, owner)); - } - ssvshare - } - Err(e) => Err(format!( - "No share exists for the validator {}: {}", - pubkey, e - )), - }; - */ - - // Create a validator exit duty, shouldnt this be handled during live sync?? + // todo!(). Figure out which comes first, exit or removed Ok(()) } - - // Helper functions } diff --git a/anchor/eth/src/gen.rs b/anchor/eth/src/gen.rs index 1642663bb..3da498b5f 100644 --- a/anchor/eth/src/gen.rs +++ b/anchor/eth/src/gen.rs @@ -3,7 +3,21 @@ use alloy::sol; // Generate bindings around the SSV Network contract sol! { #[derive(Debug)] - #[sol(rpc)] - SSVContract, - "src/abi/ssv_contract.json" + contract SSVContract { + struct Cluster { + uint32 validatorCount; + uint64 networkFeeIndex; + uint64 index; + bool active; + uint256 balance; + } + event OperatorAdded(uint64 indexed operatorId, address indexed owner, bytes publicKey, uint256 fee); + event OperatorRemoved(uint64 indexed operatorId); + event ValidatorAdded(address indexed owner, uint64[] operatorIds, bytes publicKey, bytes shares, Cluster cluster); + event ValidatorExited(address indexed owner, uint64[] operatorIds, bytes publicKey); + event ValidatorRemoved(address indexed owner, uint64[] operatorIds, bytes publicKey, Cluster cluster); + event FeeRecipientAddressUpdated(address indexed owner, address recipientAddress); + event ClusterLiquidated(address indexed owner, uint64[] operatorIds, Cluster cluster); + event ClusterReactivated(address indexed owner, uint64[] operatorIds, Cluster cluster); + } } diff --git a/anchor/eth/src/network_actions.rs b/anchor/eth/src/network_actions.rs index 6c707f8d0..3c5e18ddb 100644 --- a/anchor/eth/src/network_actions.rs +++ b/anchor/eth/src/network_actions.rs @@ -3,17 +3,6 @@ use super::gen::SSVContract; use alloy::primitives::Address; use alloy::{rpc::types::Log, sol_types::SolEvent}; -//use types::{SSVShare, OperatorID} - -// Todo!() need some file that defines all the actions that the validator should -// perform. Upon receiving an event in the live sync, the event log needs to be transformed into -// and action, processed & persisted into the database, and then sent off to be executed (execute -// trait in the impl) - -// todo!() This should be standardized into a common format that will be used client wide -// we do not want to use the contract events structures directly and want to define some types that -// hold all of the relevant data needed for execution - #[derive(Debug, PartialEq)] pub enum NetworkAction { StopValidator { diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 71fc3f26e..678bfeda5 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -2,11 +2,12 @@ use crate::gen::SSVContract; use alloy::primitives::{address, Address, FixedBytes}; use alloy::providers::{Provider, ProviderBuilder, RootProvider, WsConnect}; use alloy::pubsub::PubSubFrontend; +//use alloy::rpc::client::ClientBuilder; +//use alloy::transports::layers::RetryBackoffLayer; use alloy::rpc::types::{Filter, Log}; use alloy::sol_types::SolEvent; use alloy::transports::http::{Client, Http}; -use alloy::transports::layers::RetryBackoffLayer; -use alloy::rpc::client::ClientBuilder; +use database::NetworkDatabase; use futures::future::{try_join_all, Future}; use futures::StreamExt; use rand::Rng; @@ -43,6 +44,8 @@ static SSV_EVENTS: LazyLock>> = LazyLock::new(|| { static CONTRACT_DEPLOYMENT_ADDRESS: LazyLock
= LazyLock::new(|| address!("DD9BC35aE942eF0cFa76930954a156B3fF30a4E1")); +// todo!() define multiple networks + /// Contract deployment block on Ethereum Mainnet /// https://etherscan.io/tx/0x4a11a560d3c2f693e96f98abb1feb447646b01b36203ecab0a96a1cf45fd650b const CONTRACT_DEPLOYMENT_BLOCK: u64 = 17507487; @@ -82,13 +85,13 @@ pub struct SsvEventSyncer { } impl SsvEventSyncer { - pub async fn new(/*db: NetworkDatabase*/) -> Result { + pub async fn new(db: Arc) -> Result { // Construct HTTP Provider let http_url = "dummy_http".parse().unwrap(); // TODO!(), get this from config let rpc_client: Arc = Arc::new(ProviderBuilder::new().on_http(http_url)); // Experiment with retry clients for both websocket and http - /* + /* let client = ClientBuilder::default() .layer(RetryBackoffLayer::new(10, 300, 300)) .http(http_url); @@ -102,8 +105,8 @@ impl SsvEventSyncer { .await .map_err(|e| format!("Failed to bind to WS: {}, {}", ws_url, e))?; - // Pass db access here - let event_processor = EventProcessor::new(); + // Construct an EventProcessor with access to the DB + let event_processor = EventProcessor::new(db); Ok(Self { rpc_client, diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index bbb1371e2..3188159dc 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -1,55 +1,76 @@ use super::sync::MAX_OPERATORS; -use alloy::primitives::{keccak256, Address, Bytes, FixedBytes, U256}; +use ssv_types::Share; +use ssv_types::{OperatorId, ValidatorMetadata}; use std::collections::HashSet; use types::PublicKey; -// Offsets to parse the share bytes -const SIG_LEN: usize = 96; -const PUBKEY_LEN: usize = 48; -const ENCRYPTEDKEY_LEN: usize = 32; +const SIGNATURE_LENGTH: usize = 96; // phase0.SignatureLength +const PUBLIC_KEY_LENGTH: usize = 48; // phase0.PublicKeyLength +const ENCRYPTED_KEY_LENGTH: usize = 256; // Original encryptedKeyLength -// use types::(PublicKey, PrivateKey), -pub struct SharePublickKey([u8; 48]); -pub struct SharePrivateKey([u8; 32]); - -// All of the public keys and encrypted private keys for a -// validator key that has been broken into N shares. -pub struct ShareKeys { - // Uncompressed bls signatures - signature: [u8; 96], - // Public keys of the Shares - public_keys: Vec, - // Encrypted private key of the shares - encrypted_keys: Vec, -} - -// Convert from a raw stream of bytes to a structured set of keys. +// Validates and parses shares from a validator added event // Event contains a bytes stream of the form // [signature | public keys | encrypted keys]. -impl TryFrom for ShareKeys { - type Error = String; - fn try_from(source: Bytes) -> Result { +pub fn parse_shares( + shares: Vec, + operator_ids: &[OperatorId], +) -> Result<(Vec, Vec), String> { + let operator_count = operator_ids.len(); + + // Calculate offsets for different components within the shares + let signature_offset = SIGNATURE_LENGTH; + let pub_keys_offset = PUBLIC_KEY_LENGTH * operator_count + signature_offset; + let shares_expected_length = ENCRYPTED_KEY_LENGTH * operator_count + pub_keys_offset; + + // Validate total length of shares + if shares_expected_length != shares.len() { todo!() } + + // Extract components using array slicing + let signature = shares[..signature_offset].to_vec(); + let share_public_keys = split_bytes( + &shares[signature_offset..pub_keys_offset], + PUBLIC_KEY_LENGTH, + ); + let encrypted_keys = split_bytes(&shares[pub_keys_offset..], ENCRYPTED_KEY_LENGTH); + + let shares: Vec = share_public_keys + .iter() + .zip(encrypted_keys.iter()) + .map(|(_public, _encrypted)| { + todo!() + /* + Share { + share_pubkey: PublicKey::try_from(public), + encrypted_private_key: encrypted.as_slice() + } + */ + }) + .collect(); + + Ok((signature, shares)) } -// Verify that the signature over the share data is correct -pub fn verify_signature() -> Result<(), String> { - todo!() +// Splits a byte slice into chunks of specified size +fn split_bytes(data: &[u8], chunk_size: usize) -> Vec> { + data.chunks(chunk_size) + .map(|chunk| chunk.to_vec()) + .collect() } -// Compute the unique hash of a committee when identified by an owner -pub fn compute_cluster_id(owner: Address, operator_ids: &mut [u64]) -> FixedBytes<32> { - operator_ids.sort(); +// Fetch the metadata for a validator from the beacon chain +pub fn fetch_validator_metadata(_public_key: PublicKey) -> ValidatorMetadata { + todo!() +} - // Concat to form ... - let mut byte_repr = Bytes::new(); - for id in operator_ids {} - keccak256(byte_repr) +// Verify that the signature over the share data is correct +pub fn verify_signature(_signature: Vec) -> bool { + todo!() } // Perform basic verification on the operator set -pub fn validate_operators(operator_ids: Vec) -> Result<(), String> { +pub fn validate_operators(operator_ids: &[OperatorId]) -> Result<(), String> { let num_operators = operator_ids.len(); // make sure there is a valid number of operators @@ -66,7 +87,10 @@ pub fn validate_operators(operator_ids: Vec) -> Result<(), String> { // make sure count is valid let threshold = (num_operators - 1) / 3; if (num_operators - 1) % 3 != 0 || !(1..=4).contains(&threshold) { - return Err(format!("Invalid number of operators: {}", num_operators)); + return Err(format!( + "Given {} operators. Cannot build a 3f+1 quorum", + num_operators + )); } // make sure there are no duplicates From 13dbb6e1aeb93c51c0e538295efb50ceb3af9904 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 13 Dec 2024 22:16:47 +0000 Subject: [PATCH 12/49] revert ignore --- .gitignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitignore b/.gitignore index c11516bd9..23588ca19 100644 --- a/.gitignore +++ b/.gitignore @@ -20,4 +20,3 @@ perf.data* # cross /zcross -anchor/database From 9ce4233851d3204f33cced9e77722f8ac3bdc2ec Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 16 Dec 2024 17:12:16 +0000 Subject: [PATCH 13/49] start testing, add in tracing, operator key parsing --- anchor/eth/Cargo.toml | 9 + anchor/eth/src/event_processor.rs | 292 ++++++++++++++++++++++++------ anchor/eth/src/gen.rs | 1 + anchor/eth/src/lib.rs | 2 +- anchor/eth/src/network_actions.rs | 10 +- anchor/eth/src/sync.rs | 227 +++++++++++++++-------- anchor/eth/src/util.rs | 6 +- anchor/eth/test.rs | 49 +++++ 8 files changed, 459 insertions(+), 137 deletions(-) create mode 100644 anchor/eth/test.rs diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml index 9d16b0322..420192205 100644 --- a/anchor/eth/Cargo.toml +++ b/anchor/eth/Cargo.toml @@ -4,6 +4,10 @@ version = "0.1.0" edition = { workspace = true } authors = ["Sigma Prime "] +[[bin]] +name = "integration" +path = "test.rs" + [dependencies] alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "contract", "pubsub", "provider-ws", "rpc-types"] } @@ -13,5 +17,10 @@ rand = "0.8.5" types = { workspace = true } database = { workspace = true } ssv_types = { workspace = true} +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +openssl = { workspace = true } +hex = "0.4.3" + diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index e33d80473..23dcd7bc3 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -10,6 +10,7 @@ use ssv_types::{compute_cluster_id, Cluster, ClusterMember, Operator, OperatorId use std::collections::HashMap; use std::str::FromStr; use std::sync::Arc; +use tracing::{debug, error, info, instrument, trace, warn}; use types::PublicKey; // Handler for a log @@ -20,7 +21,7 @@ pub struct EventProcessor { /// Function handlers for event processing handlers: HashMap, // Reference to the database - db: Arc, + pub db: Arc, } impl EventProcessor { @@ -65,24 +66,38 @@ impl EventProcessor { } /// Process a new set of logs + #[instrument(skip(self, logs), fields(logs_count = logs.len()))] pub fn process_logs(&self, logs: Vec, live: bool) -> Result<(), String> { - for log in logs { - let topic0 = log.topic0().expect("Log should have a topic0"); - let handler = self - .handlers - .get(topic0) - .expect("A handler should exist for this topic"); - handler(self, &log)?; - - let action: NetworkAction = log.try_into()?; - if action != NetworkAction::NoOp && live { - // todo!() send off somewhere + info!(logs_count = logs.len(), "Starting log processing"); + for (index, log) in logs.iter().enumerate() { + trace!(log_index = index, topic = ?log.topic0(), "Processing individual log"); + + let topic0 = log.topic0().ok_or_else(|| { + error!("Log missing topic0"); + "Log missing topic0".to_string() + })?; + if topic0 == *SSVContract::OperatorAdded::SIGNATURE_HASH { + let handler = self.handlers.get(topic0).ok_or_else(|| { + error!(topic = ?topic0, "No handler found for topic"); + "No handler found for topic".to_string() + })?; + + let _ = handler(self, log); } + + //let action: NetworkAction = log.try_into()?; + //if action != NetworkAction::NoOp && live { + // debug!(action = ?action, "Network action needs processing"); + // todo!() send off somewhere + //} } + + info!(logs_count = logs.len(), "Completed processing all logs"); Ok(()) } // A new Operator has been registered in the network. + #[instrument(skip(self, log), fields(operator_id, owner))] fn process_operator_added(&self, log: &Log) -> Result<(), String> { let SSVContract::OperatorAdded { operatorId, @@ -92,30 +107,78 @@ impl EventProcessor { } = SSVContract::OperatorAdded::decode_from_log(log)?; let operator_id = OperatorId(operatorId); + debug!(operator_id = ?operator_id, owner = ?owner, "Processing operator registration"); + // Confirm that this operator does not already exist if self.db.operator_exists(&operator_id) { - return Err(String::from("Operator does not exist")); + error!(operator_id = ?operator_id, "Operator already exists in database"); + return Err(String::from("Operator already exists in database")); } - // Construct the operator and then insert it into the database - let operator = Operator::new(&publicKey.to_string(), operator_id, owner) - .map_err(|e| format!("Failed to construct an operator: {e}"))?; - self.db - .insert_operator(&operator) - .map_err(|e| format!("Failed to insert operator: {e}"))?; + // Parse ABI encoded public key string + let public_key_str = publicKey.to_string(); + let public_key_str = public_key_str.trim_start_matches("0x"); + + debug!(operator_id = ?operator_id, "Decoding operator public key"); + let data = hex::decode(public_key_str).map_err(|e| { + error!(operator_id = ?operator_id, error = %e, "Failed to decode public key hex"); + format!("Failed to decode public key hex: {e}") + })?; + + if data.len() < 64 { + error!(operator_id = ?operator_id, "Invalid data length"); + return Err(String::from("Invalid data length")); + } + + let data = &data[64..]; + let data = String::from_utf8(data.to_vec()).map_err(|e| { + error!(operator_id = ?operator_id, error = %e, "Invalid UTF-8 in public key"); + format!("Invalid UTF-8 in public key: {e}") + })?; + let data = data.trim_matches(char::from(0)).to_string(); + + // Construct the Operator and insert it into the database + let operator = Operator::new(&data, operator_id, owner).map_err(|e| { + error!( + operator_pubkey = ?publicKey, + operator_id = ?operator_id, + error = %e, + "Failed to construct operator" + ); + format!("Failed to construct operator: {e}") + })?; + + self.db.insert_operator(&operator).map_err(|e| { + error!( + operator_id = ?operator_id, + error = %e, + "Failed to insert operator into database" + ); + format!("Failed to insert operator into database: {e}") + })?; + + info!( + operator_id = ?operator_id, + owner = ?owner, + "Successfully registered operator" + ); Ok(()) } // An Operator has been removed from the network + #[instrument(skip(self, log), fields(operator_id))] fn process_operator_removed(&self, log: &Log) -> Result<(), String> { - let _decoded = SSVContract::OperatorRemoved::decode_from_log(log)?; - // this method is currently noop in the ref client + let SSVContract::OperatorRemoved { operatorId } = + SSVContract::OperatorRemoved::decode_from_log(log)?; + + info!(operator_id = ?operatorId, "Operator removed from network"); Ok(()) } // A new validator has entered the network. This means that a new cluster has formed and this // operator is a potential member in the cluster. Perform verification, store all data, and // extract the key if one belongs to us. + #[instrument(skip(self, log), fields(validator_pubkey, cluster_id, owner))] fn process_validator_added(&self, log: &Log) -> Result<(), String> { let SSVContract::ValidatorAdded { owner, @@ -125,9 +188,17 @@ impl EventProcessor { .. } = SSVContract::ValidatorAdded::decode_from_log(log)?; - // Process data into usable forms - let validator_pubkey = PublicKey::from_str(&publicKey.to_string()) - .map_err(|e| format!("Failed to create PublicKey: {e}"))?; + debug!(owner = ?owner, operator_count = operatorIds.len(), "Processing validator addition"); + + // Process data into a usable form + let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { + error!( + validator_pubkey = %publicKey, + error = %e, + "Failed to construct validator pubkey" + ); + format!("Failed to create PublicKey: {e}") + })?; let cluster_id = compute_cluster_id(owner, &mut operatorIds.clone()); let operator_ids: Vec = operatorIds.iter().map(|id| OperatorId(*id)).collect(); @@ -136,36 +207,45 @@ impl EventProcessor { // Perform verification on the operator set and make sure they are all registered in the // network + debug!(cluster_id = ?cluster_id, "Validating operators"); validate_operators(&operator_ids)?; if operator_ids.iter().any(|id| !self.db.operator_exists(id)) { + error!(cluster_id = ?cluster_id, "One or more operators do not exist in database"); return Err("One or more operators do not exist".to_string()); } // Parse the share byte stream into a list of valid Shares and then verify the signature - let (signature, shares) = parse_shares(shares.to_vec(), &operator_ids).unwrap(); + debug!(cluster_id = ?cluster_id, "Parsing and verifying shares"); + let (signature, shares) = parse_shares(shares.to_vec(), &operator_ids).map_err(|e| { + error!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); + format!("Failed to parse shares: {e}") + })?; + if !verify_signature(signature) { + error!(cluster_id = ?cluster_id, "Signature verification failed"); return Err("Signature verification failed".to_string()); } // fetch the validator metadata // todo!() need to hook up to beacon api - let validator_metadata = fetch_validator_metadata(validator_pubkey); + let validator_metadata = fetch_validator_metadata(&validator_pubkey).map_err(|e| { + error!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); + format!("Failed to fetch validator metadata: {e}") + })?; // Construct all of the cluster members + debug!(cluster_id = ?cluster_id, "Constructing cluster members"); let cluster_members: Vec = shares .iter() .zip(operator_ids.iter()) - .map(|(share, op_id)| { + .map(|(share, op_id)| ClusterMember { // todo!() check to see if one of these are this operator - ClusterMember { - operator_id: *op_id, - cluster_id, - share: share.to_owned(), - } + operator_id: *op_id, + cluster_id, + share: share.to_owned(), }) .collect(); - // Finally, construct and insert the full cluster and insert into the database let cluster = Cluster { cluster_id, cluster_members, @@ -173,15 +253,24 @@ impl EventProcessor { liquidated: false, validator_metadata, }; - self.db - .insert_cluster(cluster) - .expect("Failed to insert cluster"); + // Finally, construct and insert the full cluster and insert into the database + self.db.insert_cluster(cluster).map_err(|e| { + error!(cluster_id = ?cluster_id, error = %e, "Failed to insert cluster"); + format!("Failed to insert cluster: {e}") + })?; + + info!( + cluster_id = ?cluster_id, + validator_pubkey = %validator_pubkey, + "Successfully added validator and cluster" + ); Ok(()) } // A validator has been removed from the network. Since this validator is no long in the // network, the cluster that was responsible for it must be cleaned up + #[instrument(skip(self, log), fields(cluster_id, validator_pubkey, owner))] fn process_validator_removed(&self, log: &Log) -> Result<(), String> { let SSVContract::ValidatorRemoved { owner, @@ -189,13 +278,44 @@ impl EventProcessor { publicKey, .. } = SSVContract::ValidatorRemoved::decode_from_log(log)?; + // Process and fetch data - let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).unwrap(); + let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { + error!( + validator_pubkey = %publicKey, + error = %e, + "Failed to construct validator pubkey" + ); + format!("Failed to create PublicKey: {e}") + })?; + let cluster_id = compute_cluster_id(owner, &mut operatorIds); - let metadata = self.db.get_validator_metadata(&cluster_id).unwrap(); + + debug!( + cluster_id = ?cluster_id, + validator_pubkey = %validator_pubkey, + "Processing validator removal" + ); + + let metadata = match self.db.get_validator_metadata(&cluster_id) { + Some(data) => data, + None => { + error!( + cluster_id = ?cluster_id, + "Failed to fetch validator metadata" + ); + return Err("Failed to fetch validator metada".to_string()); + } + }; // Make sure the right owner is removing this validator if owner != metadata.owner { + error!( + cluster_id = ?cluster_id, + expected_owner = ?metadata.owner, + actual_owner = ?owner, + "Owner mismatch for validator removal" + ); return Err(format!( "Cluster already exists with a different owner address. Expected {}. Got {}", metadata.owner, owner @@ -204,64 +324,130 @@ impl EventProcessor { // Make sure this is the correct validator if validator_pubkey != metadata.validator_pubkey { + error!( + cluster_id = ?cluster_id, + expected_pubkey = %metadata.validator_pubkey, + actual_pubkey = %validator_pubkey, + "Validator pubkey mismatch" + ); return Err("Validator does not match".to_string()); } - // Remove all cluster data corresponding to this validator + // Check if we are a member of this cluster, if so we need to remove share data if self.db.member_of_cluster(&cluster_id) { + debug!(cluster_id = ?cluster_id, "Removing cluster from local keystore"); // todo!(): Remove it from the internal keystore } - self.db.delete_cluster(cluster_id).unwrap(); + // Remove all cluster data corresponding to this validator + self.db.delete_cluster(cluster_id).map_err(|e| { + error!( + cluster_id = ?cluster_id, + error = %e, + "Failed to delete cluster from database" + ); + format!("Failed to delete cluster: {e}") + })?; + + info!( + cluster_id = ?cluster_id, + validator_pubkey = %validator_pubkey, + "Successfully removed validator and cluster" + ); Ok(()) } /// A cluster has ran out of operational funds. Set the cluster as liquidated + #[instrument(skip(self, log), fields(cluster_id, owner))] fn process_cluster_liquidated(&self, log: &Log) -> Result<(), String> { let SSVContract::ClusterLiquidated { owner, operatorIds: mut operator_ids, .. } = SSVContract::ClusterLiquidated::decode_from_log(log)?; + let cluster_id = compute_cluster_id(owner, &mut operator_ids); - self.db - .update_status(cluster_id, true) - .map_err(|e| format!("Failed to mark cluster as liquidated: {e}"))?; + + debug!(cluster_id = ?cluster_id, "Processing cluster liquidation"); + + self.db.update_status(cluster_id, true).map_err(|e| { + error!( + cluster_id = ?cluster_id, + error = %e, + "Failed to mark cluster as liquidated" + ); + format!("Failed to mark cluster as liquidated: {e}") + })?; + + info!( + cluster_id = ?cluster_id, + owner = ?owner, + "Cluster marked as liquidated" + ); Ok(()) } // A cluster that was previously liquidated has had more SSV deposited + #[instrument(skip(self, log), fields(cluster_id, owner))] fn process_cluster_reactivated(&self, log: &Log) -> Result<(), String> { let SSVContract::ClusterReactivated { owner, operatorIds: mut operator_ids, .. } = SSVContract::ClusterReactivated::decode_from_log(log)?; + let cluster_id = compute_cluster_id(owner, &mut operator_ids); - self.db - .update_status(cluster_id, false) - .map_err(|e| format!("Failed to mark cluter as active {e}"))?; + + debug!(cluster_id = ?cluster_id, "Processing cluster reactivation"); + + self.db.update_status(cluster_id, false).map_err(|e| { + error!( + cluster_id = ?cluster_id, + error = %e, + "Failed to mark cluster as active" + ); + format!("Failed to mark cluster as active: {e}") + })?; + + info!( + cluster_id = ?cluster_id, + owner = ?owner, + "Cluster reactivated" + ); Ok(()) } // The fee recipient address of a validator has been changed + #[instrument(skip(self, log), fields(owner))] fn process_fee_recipient_updated(&self, log: &Log) -> Result<(), String> { let SSVContract::FeeRecipientAddressUpdated { - owner: _, - recipientAddress: _, + owner, + recipientAddress, } = SSVContract::FeeRecipientAddressUpdated::decode_from_log(log)?; - // todo!(). this is accessed updated via owner + + info!( + owner = ?owner, + new_recipient = ?recipientAddress, + "Fee recipient address updated" + ); Ok(()) } // A validator has exited the beacon chain + #[instrument(skip(self, log), fields(validator_pubkey, owner))] fn process_validator_exited(&self, log: &Log) -> Result<(), String> { let SSVContract::ValidatorExited { - owner: _, - operatorIds: _, - publicKey: _, + owner, + operatorIds, + publicKey, } = SSVContract::ValidatorExited::decode_from_log(log)?; - // todo!(). Figure out which comes first, exit or removed + + info!( + owner = ?owner, + validator_pubkey = ?publicKey, + operator_count = operatorIds.len(), + "Validator exited from network" + ); Ok(()) } } diff --git a/anchor/eth/src/gen.rs b/anchor/eth/src/gen.rs index 3da498b5f..ce4439cd4 100644 --- a/anchor/eth/src/gen.rs +++ b/anchor/eth/src/gen.rs @@ -3,6 +3,7 @@ use alloy::sol; // Generate bindings around the SSV Network contract sol! { #[derive(Debug)] + #[sol(rpc)] contract SSVContract { struct Cluster { uint32 validatorCount; diff --git a/anchor/eth/src/lib.rs b/anchor/eth/src/lib.rs index 850be2e50..0cf4243c1 100644 --- a/anchor/eth/src/lib.rs +++ b/anchor/eth/src/lib.rs @@ -1,4 +1,4 @@ -pub use sync::SsvEventSyncer; +pub use sync::{Config, Network, SsvEventSyncer}; mod event_parser; mod event_processor; mod gen; diff --git a/anchor/eth/src/network_actions.rs b/anchor/eth/src/network_actions.rs index 3c5e18ddb..56a46c4fc 100644 --- a/anchor/eth/src/network_actions.rs +++ b/anchor/eth/src/network_actions.rs @@ -2,26 +2,28 @@ use super::event_parser::EventDecoder; use super::gen::SSVContract; use alloy::primitives::Address; use alloy::{rpc::types::Log, sol_types::SolEvent}; +use ssv_types::OperatorId; +use types::PublicKey; #[derive(Debug, PartialEq)] pub enum NetworkAction { StopValidator { - //pubkey: bls::PublicKey + //pubkey: PublicKey, }, LiquidateCluster { owner: Address, - //operator_ids: Vec, + //operator_ids: Vec, }, ReactivateCluster { owner: Address, - //operator_ids: Vec + //operator_ids: Vec, }, UpdateFeeRecipient { owner: Address, recipient: Address, }, ExitValidator { - //pubkey: bls::PublicKey + //pubkey: PublicKey, //block_number: u64, //validator_index: u64, //own_validator: bool, diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 678bfeda5..7eb1cb3ad 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -1,9 +1,7 @@ use crate::gen::SSVContract; -use alloy::primitives::{address, Address, FixedBytes}; +use alloy::primitives::{address, Address}; use alloy::providers::{Provider, ProviderBuilder, RootProvider, WsConnect}; use alloy::pubsub::PubSubFrontend; -//use alloy::rpc::client::ClientBuilder; -//use alloy::transports::layers::RetryBackoffLayer; use alloy::rpc::types::{Filter, Log}; use alloy::sol_types::SolEvent; use alloy::transports::http::{Client, Http}; @@ -14,63 +12,81 @@ use rand::Rng; use std::collections::BTreeMap; use std::sync::{Arc, LazyLock}; use tokio::time::Duration; +use tracing::{debug, error, info, instrument, warn}; use crate::event_processor::EventProcessor; /// SSV contract events needed to come up to date with the network -static SSV_EVENTS: LazyLock>> = LazyLock::new(|| { +static SSV_EVENTS: LazyLock> = LazyLock::new(|| { vec![ // event OperatorAdded(uint64 indexed operatorId, address indexed owner, bytes publicKey, uint256 fee); - SSVContract::OperatorAdded::SIGNATURE_HASH, + SSVContract::OperatorAdded::SIGNATURE, // event OperatorRemoved(uint64 indexed operatorId); - SSVContract::OperatorRemoved::SIGNATURE_HASH, + SSVContract::OperatorRemoved::SIGNATURE, // event ValidatorAdded(address indexed owner, uint64[] operatorIds, bytes publicKey, bytes shares, Cluster cluster); - SSVContract::ValidatorAdded::SIGNATURE_HASH, + SSVContract::ValidatorAdded::SIGNATURE, // event ValidatorRemoved(address indexed owner, uint64[] operatorIds, bytes publicKey, Cluster cluster); - SSVContract::ValidatorRemoved::SIGNATURE_HASH, + SSVContract::ValidatorRemoved::SIGNATURE, // event ClusterLiquidated(address indexed owner, uint64[] operatorIds, Cluster cluster); - SSVContract::ClusterLiquidated::SIGNATURE_HASH, + SSVContract::ClusterLiquidated::SIGNATURE, // event ClusterReactivated(address indexed owner, uint64[] operatorIds, Cluster cluster); - SSVContract::ClusterReactivated::SIGNATURE_HASH, + SSVContract::ClusterReactivated::SIGNATURE, // event FeeRecipientAddressUpdated(address indexed owner, address recipientAddress); - SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH, + SSVContract::FeeRecipientAddressUpdated::SIGNATURE, // event ValidatorExited(address indexed owner, uint64[] operatorIds, bytes publicKey); - SSVContract::ValidatorExited::SIGNATURE_HASH, + SSVContract::ValidatorExited::SIGNATURE, ] }); -/// Contract deployment address -/// https://etherscan.io/address/0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1 -static CONTRACT_DEPLOYMENT_ADDRESS: LazyLock
= +/// Contract deployment addresses +/// Mainnet: https://etherscan.io/address/0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1 +static MAINNET_DEPLOYMENT_ADDRESS: LazyLock
= LazyLock::new(|| address!("DD9BC35aE942eF0cFa76930954a156B3fF30a4E1")); -// todo!() define multiple networks +/// Holesky: https://holesky.etherscan.io/address/0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA +static HOLESKY_DEPLOYMENT_ADDRESS: LazyLock
= + LazyLock::new(|| address!("38A4794cCEd47d3baf7370CcC43B560D3a1beEFA")); /// Contract deployment block on Ethereum Mainnet -/// https://etherscan.io/tx/0x4a11a560d3c2f693e96f98abb1feb447646b01b36203ecab0a96a1cf45fd650b -const CONTRACT_DEPLOYMENT_BLOCK: u64 = 17507487; +/// Mainnet: https://etherscan.io/tx/0x4a11a560d3c2f693e96f98abb1feb447646b01b36203ecab0a96a1cf45fd650b +const MAINNET_DEPLOYMENT_BLOCK: u64 = 17507487; + +/// Holesky: https://holesky.etherscan.io/tx/0x998c38ff37b47e69e23c21a8079168b7e0e0ade7244781587b00be3f08a725c6 +const HOLESKY_DEPLOYMENT_BLOCK: u64 = 181612; /// Batch size for log fetching -/// todo!(), play around with this number, default max logs per filter is 20k and this contract is -/// not event heavy, so I think this could be increased a lot -const BATCH_SIZE: u64 = 500; +const BATCH_SIZE: u64 = 10000; /// Typedef RPC and WS clients type RpcClient = RootProvider>; type WsClient = RootProvider; -// Retry information for log fetching -// todo!() backoff if needed +/// Retry information for log fetching const MAX_RETRIES: i32 = 5; // Follow distance // TODO!(), why 8 (in go client), or is this the eth1 follow distance const FOLLOW_DISTANCE: u64 = 8; -// The maximum number of operators a validator can have +/// The maximum number of operators a validator can have //https://github.com/ssvlabs/ssv/blob/07095fe31e3ded288af722a9c521117980585d95/eth/eventhandler/validation.go#L15 pub const MAX_OPERATORS: usize = 13; +/// Network that is being connected to +#[derive(Debug)] +pub enum Network { + Mainnet, + Holesky, +} + +// TODO!() Dummy config struct that will be replaced +#[derive(Debug)] +pub struct Config { + pub http_url: String, + pub ws_url: String, + pub network: Network, +} + /// Client for interacting with the SSV contract on Ethereum L1 /// /// Manages connections to the L1 and monitors SSV contract events to track the state of validator @@ -78,32 +94,28 @@ pub const MAX_OPERATORS: usize = 13; pub struct SsvEventSyncer { /// Http client connected to the L1 to fetch historical SSV event information rpc_client: Arc, - // Websocket client connected to L1 to stream live SSV event information + /// Websocket client connected to L1 to stream live SSV event information ws_client: WsClient, - // Event processor for logs + /// Event processor for logs event_processor: EventProcessor, + /// The network the node is connected to + network: Network, } impl SsvEventSyncer { - pub async fn new(db: Arc) -> Result { + #[instrument(skip(db))] + pub async fn new(db: Arc, config: Config) -> Result { + info!(?config, "Creating new SSV Event Syncer"); + // Construct HTTP Provider - let http_url = "dummy_http".parse().unwrap(); // TODO!(), get this from config + let http_url = config.http_url.parse().expect("Failed to parse HTTP URL"); let rpc_client: Arc = Arc::new(ProviderBuilder::new().on_http(http_url)); - // Experiment with retry clients for both websocket and http - /* - let client = ClientBuilder::default() - .layer(RetryBackoffLayer::new(10, 300, 300)) - .http(http_url); - let retry_rpc_client = ProviderBuilder::new().on_client(client); - */ - // Construct Websocket Provider - let ws_url = "dummy ws"; // TODO!(), get this from config let ws_client = ProviderBuilder::new() - .on_ws(WsConnect::new(ws_url)) + .on_ws(WsConnect::new(&config.ws_url)) .await - .map_err(|e| format!("Failed to bind to WS: {}, {}", ws_url, e))?; + .map_err(|e| format!("Failed to bind to WS: {}, {}", &config.ws_url, e))?; // Construct an EventProcessor with access to the DB let event_processor = EventProcessor::new(db); @@ -112,43 +124,64 @@ impl SsvEventSyncer { rpc_client, ws_client, event_processor, + network: config.network, }) } - // Top level function to sync data + #[instrument(skip(self))] pub async fn sync(&self) -> Result<(), String> { - // first, perform a historical sync - self.historical_sync().await?; + info!("Starting SSV event sync"); - // start the live sync, options - // 1) spawn the sync off in its own long running task and return - // 2) transition into live sync and signal AtomicBool to coordinator - self.live_sync().await?; + // get network specific contract information + let (contract_address, deployment_block) = match self.network { + Network::Mainnet => (*MAINNET_DEPLOYMENT_ADDRESS, MAINNET_DEPLOYMENT_BLOCK), + Network::Holesky => (*HOLESKY_DEPLOYMENT_ADDRESS, HOLESKY_DEPLOYMENT_BLOCK), + }; + + info!( + ?contract_address, + deployment_block, "Using contract configuration" + ); + + info!("Starting historical sync"); + self.historical_sync(contract_address, deployment_block) + .await?; + + info!("Starting live sync"); + self.live_sync(contract_address).await?; todo!() } - /// Perform a historical sync from the contract deployment block to catch up to the current - /// state of the SSV network - async fn historical_sync(&self) -> Result<(), String> { - // todo!(), differential between fresh sync and when we have already synced up to some block - let mut start_block = CONTRACT_DEPLOYMENT_BLOCK; + #[instrument(skip(self, contract_address, deployment_block))] + async fn historical_sync( + &self, + contract_address: Address, + deployment_block: u64, + ) -> Result<(), String> { + // Start from the contrat deployment block or the last block that has been processed + let last_processed_block = self.event_processor.db.get_last_processed_block(); + let deployment_block = std::cmp::max(deployment_block, last_processed_block); + let mut start_block = deployment_block; + loop { - // get the current block and make sure we have blocks to sync - let current_block = self - .rpc_client - .get_block_number() - .await - .map_err(|e| format!("Unable to fetch block number {}", e))?; + let current_block = self.rpc_client.get_block_number().await.map_err(|e| { + error!(?e, "Failed to fetch block number"); + format!("Unable to fetch block number {}", e) + })?; + if current_block < FOLLOW_DISTANCE { + debug!("Current block less than follow distance, breaking"); break; } - // calculate end block w/ follow distance let end_block = current_block - FOLLOW_DISTANCE; if end_block < start_block { + debug!("End block less than start block, breaking"); break; } + info!(start_block, end_block, "Fetching logs for block range"); + // Chunk the start and end block range into a set of ranges of size BATCH_SIZE // and construct a future to fetch the logs in each range let tasks: Vec<_> = (start_block..=current_block) @@ -156,10 +189,13 @@ impl SsvEventSyncer { .map(|start| { let (start, end) = (start, std::cmp::min(start + BATCH_SIZE - 1, current_block)); - self.fetch_logs(start, end) + self.fetch_logs(start, end, contract_address) }) .collect(); + // Process batches, also in batches. + // todo!() based on number of logs + // Await all of the futures. let event_logs: Vec> = try_join_all(tasks).await?; let event_logs: Vec = event_logs.into_iter().flatten().collect(); @@ -181,22 +217,34 @@ impl SsvEventSyncer { self.event_processor .process_logs(ordered_event_logs, false)?; - // reset the start block to make up for missed blocks during sync + // update the block we have synced to + self.event_processor + .db + .processed_block(end_block) + .expect("Failed to update last processed block number"); + + info!( + "Processed events from blocks {} to {}", + start_block, current_block + ); + start_block = current_block + 1; } + info!("Historical sync completed"); Ok(()) } - /// Fetch logs from the chain + #[instrument(skip(self, deployment_address))] fn fetch_logs( &self, from_block: u64, to_block: u64, + deployment_address: Address, ) -> impl Future, String>> { // Setup filter and rpc client let rpc_client = self.rpc_client.clone(); let filter = Filter::new() - .address(*CONTRACT_DEPLOYMENT_ADDRESS) + .address(deployment_address) .from_block(from_block) .to_block(to_block) .events(&*SSV_EVENTS); @@ -207,13 +255,23 @@ impl SsvEventSyncer { let mut retry_cnt = 0; loop { match rpc_client.get_logs(&filter).await { - Ok(logs) => return Ok(logs), - Err(_) => { - // confirm we have not exceeded max + Ok(logs) => { + debug!( + from_block, + to_block, + log_count = logs.len(), + "Successfully fetched logs" + ); + return Ok(logs); + } + Err(e) => { if retry_cnt > MAX_RETRIES { + error!(?e, retry_cnt, "Max retries exceeded while fetching logs"); return Err("Unable to fetch logs".to_string()); } + warn!(?e, retry_cnt, "Error fetching logs, retrying"); + // increment retry_count and jitter retry duration // todo!() exponential backoff?? let jitter = rand::thread_rng().gen_range(0..=100); @@ -227,25 +285,42 @@ impl SsvEventSyncer { } } - /// Live sync with the chain to get new contract events while enforcing a follow distance - /// todo!(), this must be 100% reliable. add reconnect functionality, logic to deteremine when - /// we can assume there is some bigger issue - async fn live_sync(&self) -> Result<(), String> { - // Subscribe to a block stream + // Once caught up with the chain, start live sync which will stream in live blocks from the + // network. The events will be processed and duties will be created in response to network + // actions + #[instrument(skip(self))] + async fn live_sync(&self, contract_address: Address) -> Result<(), String> { + info!(?contract_address, "Starting live sync"); + let mut stream = match self.ws_client.subscribe_blocks().await { - Ok(sub) => sub.into_stream(), - Err(_) => todo!(), // have some reconnect mechansim + Ok(sub) => { + info!("Successfully subscribed to block stream"); + sub.into_stream() + } + Err(e) => { + error!(?e, "Failed to subscribe to block stream"); + todo!() // retry or exit? + } }; - // Stream in new block headers + // Continuously stream in new blocks while let Some(block_header) = stream.next().await { - // fetch the logs and process with execute + // Block we are interested in is the current block - follow distance let relevant_block = block_header.number - FOLLOW_DISTANCE; - let logs = self.fetch_logs(relevant_block, relevant_block).await?; + debug!( + block_number = block_header.number, + relevant_block, "Processing new block" + ); + + let logs = self + .fetch_logs(relevant_block, relevant_block, contract_address) + .await?; + + debug!(log_count = logs.len(), "Processing logs from new block"); self.event_processor.process_logs(logs, true)?; } - // this should never reach here + error!("Block stream ended unexpectedly"); Ok(()) } } diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 3188159dc..07559c6d2 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -24,7 +24,7 @@ pub fn parse_shares( // Validate total length of shares if shares_expected_length != shares.len() { - todo!() + println!("should fail"); } // Extract components using array slicing @@ -60,13 +60,13 @@ fn split_bytes(data: &[u8], chunk_size: usize) -> Vec> { } // Fetch the metadata for a validator from the beacon chain -pub fn fetch_validator_metadata(_public_key: PublicKey) -> ValidatorMetadata { +pub fn fetch_validator_metadata(_public_key: &PublicKey) -> Result { todo!() } // Verify that the signature over the share data is correct pub fn verify_signature(_signature: Vec) -> bool { - todo!() + true } // Perform basic verification on the operator set diff --git a/anchor/eth/test.rs b/anchor/eth/test.rs new file mode 100644 index 000000000..ec705c4c7 --- /dev/null +++ b/anchor/eth/test.rs @@ -0,0 +1,49 @@ +use database::NetworkDatabase; +use eth::{Config, Network, SsvEventSyncer}; +use openssl::pkey::Public; +use openssl::rsa::Rsa; +use std::path::Path; +use std::sync::Arc; +use tracing::{info, info_span, Level}; +use tracing_subscriber::{EnvFilter, prelude::*, fmt}; + +#[tokio::main] +async fn main() { + let filter = EnvFilter::builder() + .parse("debug,hyper=off,hyper_util=off,alloy_transport_http=off,reqwest=off,alloy_rpc_client=off") + .expect("filter should be valid"); + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(filter) + .init(); + let span = info_span!("main"); + let _guard = span.enter(); + + let rpc_endpoint = "http://127.0.0.1:8545"; + let ws_endpoint = "ws://127.0.0.1:8546"; + + let config = Config { + http_url: String::from(rpc_endpoint), + ws_url: String::from(ws_endpoint), + network: Network::Holesky, + }; + + let priv_key = Rsa::generate(2046).expect("Failed to generate RSA key"); + let pubkey = priv_key + .public_key_to_pem() + .and_then(|pem| Rsa::public_key_from_pem(&pem)) + .expect("Failed to process RSA key"); + let path = Path::new("db.sqlite"); + + // tie the network into the database impl!() + let db = Arc::new(NetworkDatabase::new(path, &pubkey).expect("Failed to construct database")); + info!("Constructed the database"); + + let event_syncer = SsvEventSyncer::new(db.clone(), config) + .await + .expect("Failed to construct event syncer"); + let _ = event_syncer.sync().await; + + info!("hello"); +} From a3d18bba322e8cfc30049e7b759b5df8f900dda7 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 16 Dec 2024 20:25:09 +0000 Subject: [PATCH 14/49] websocket reconnection and error handling --- anchor/eth/src/sync.rs | 90 +++++++++++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 32 deletions(-) diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 7eb1cb3ad..a81b5d8ea 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -1,7 +1,7 @@ use crate::gen::SSVContract; use alloy::primitives::{address, Address}; use alloy::providers::{Provider, ProviderBuilder, RootProvider, WsConnect}; -use alloy::pubsub::PubSubFrontend; +use alloy::pubsub::{PubSubConnect, PubSubFrontend}; use alloy::rpc::types::{Filter, Log}; use alloy::sol_types::SolEvent; use alloy::transports::http::{Client, Http}; @@ -96,6 +96,8 @@ pub struct SsvEventSyncer { rpc_client: Arc, /// Websocket client connected to L1 to stream live SSV event information ws_client: WsClient, + /// Websocket connection url + ws_url: String, /// Event processor for logs event_processor: EventProcessor, /// The network the node is connected to @@ -112,8 +114,9 @@ impl SsvEventSyncer { let rpc_client: Arc = Arc::new(ProviderBuilder::new().on_http(http_url)); // Construct Websocket Provider + let ws = WsConnect::new(&config.ws_url); let ws_client = ProviderBuilder::new() - .on_ws(WsConnect::new(&config.ws_url)) + .on_ws(ws.clone()) .await .map_err(|e| format!("Failed to bind to WS: {}, {}", &config.ws_url, e))?; @@ -123,13 +126,14 @@ impl SsvEventSyncer { Ok(Self { rpc_client, ws_client, + ws_url: config.ws_url, event_processor, network: config.network, }) } #[instrument(skip(self))] - pub async fn sync(&self) -> Result<(), String> { + pub async fn sync(&mut self) -> Result<(), String> { info!("Starting SSV event sync"); // get network specific contract information @@ -285,42 +289,64 @@ impl SsvEventSyncer { } } - // Once caught up with the chain, start live sync which will stream in live blocks from the + // Once caught up with the chain, start live sync which will stream in live blocks from thek // network. The events will be processed and duties will be created in response to network // actions #[instrument(skip(self))] - async fn live_sync(&self, contract_address: Address) -> Result<(), String> { + async fn live_sync(&mut self, contract_address: Address) -> Result<(), String> { info!(?contract_address, "Starting live sync"); - let mut stream = match self.ws_client.subscribe_blocks().await { - Ok(sub) => { - info!("Successfully subscribed to block stream"); - sub.into_stream() - } - Err(e) => { - error!(?e, "Failed to subscribe to block stream"); - todo!() // retry or exit? + loop { + // Try to connect to the websocket and subscribe to a block stream + let stream = match self.ws_client.subscribe_blocks().await { + Ok(sub) => { + info!("Successfully subscribed to block stream"); + Some(sub.into_stream()) + } + Err(e) => { + error!( + ?e, + "Failed to subscribe to block stream. Retrying in 1 second..." + ); + + // Backend has closed, need to reconnect + let ws = WsConnect::new(&self.ws_url); + if let Ok(ws_client) = ProviderBuilder::new().on_ws(ws).await { + info!("Successfully reconnected to websocket. Catching back up"); + self.ws_client = ws_client; + // Todo!() historical sync any missed blocks + } else { + tokio::time::sleep(Duration::from_secs(1)).await; + } + None + } + }; + + // If we have a connection, continuously stream in blocks + if let Some(mut stream) = stream { + while let Some(block_header) = stream.next().await { + // Block we are interested in is the current block - follow distance + let relevant_block = block_header.number - FOLLOW_DISTANCE; + debug!( + block_number = block_header.number, + relevant_block, "Processing new block" + ); + + let logs = self + .fetch_logs(relevant_block, relevant_block, contract_address) + .await?; + + debug!(log_count = logs.len(), "Processing logs from new block"); + self.event_processor.process_logs(logs, true)?; + self.event_processor + .db + .processed_block(relevant_block) + .expect("Failed to update last processed block number"); + } } - }; - - // Continuously stream in new blocks - while let Some(block_header) = stream.next().await { - // Block we are interested in is the current block - follow distance - let relevant_block = block_header.number - FOLLOW_DISTANCE; - debug!( - block_number = block_header.number, - relevant_block, "Processing new block" - ); - let logs = self - .fetch_logs(relevant_block, relevant_block, contract_address) - .await?; - - debug!(log_count = logs.len(), "Processing logs from new block"); - self.event_processor.process_logs(logs, true)?; + // If we get here, the stream ended (likely due to disconnect) + error!("WebSocket stream ended, reconnecting..."); } - - error!("Block stream ended unexpectedly"); - Ok(()) } } From 05a69107d1c55171e4ba6cd13c67858fde1a5a02 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 16 Dec 2024 20:47:08 +0000 Subject: [PATCH 15/49] fix block range --- anchor/eth/src/sync.rs | 46 +++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index a81b5d8ea..b64a93655 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -1,7 +1,7 @@ use crate::gen::SSVContract; use alloy::primitives::{address, Address}; use alloy::providers::{Provider, ProviderBuilder, RootProvider, WsConnect}; -use alloy::pubsub::{PubSubConnect, PubSubFrontend}; +use alloy::pubsub::PubSubFrontend; use alloy::rpc::types::{Filter, Log}; use alloy::sol_types::SolEvent; use alloy::transports::http::{Client, Http}; @@ -164,8 +164,7 @@ impl SsvEventSyncer { ) -> Result<(), String> { // Start from the contrat deployment block or the last block that has been processed let last_processed_block = self.event_processor.db.get_last_processed_block(); - let deployment_block = std::cmp::max(deployment_block, last_processed_block); - let mut start_block = deployment_block; + let mut start_block = std::cmp::max(deployment_block, last_processed_block); loop { let current_block = self.rpc_client.get_block_number().await.map_err(|e| { @@ -173,26 +172,30 @@ impl SsvEventSyncer { format!("Unable to fetch block number {}", e) })?; + // Basic verification if current_block < FOLLOW_DISTANCE { debug!("Current block less than follow distance, breaking"); break; } - let end_block = current_block - FOLLOW_DISTANCE; if end_block < start_block { debug!("End block less than start block, breaking"); break; } + // make sure we have blocks to sync + if start_block == end_block { + info!("Synced up to the tip of the chain"); + break; + } info!(start_block, end_block, "Fetching logs for block range"); // Chunk the start and end block range into a set of ranges of size BATCH_SIZE // and construct a future to fetch the logs in each range - let tasks: Vec<_> = (start_block..=current_block) + let tasks: Vec<_> = (start_block..=end_block) .step_by(BATCH_SIZE as usize) .map(|start| { - let (start, end) = - (start, std::cmp::min(start + BATCH_SIZE - 1, current_block)); + let (start, end) = (start, std::cmp::min(start + BATCH_SIZE - 1, end_block)); self.fetch_logs(start, end, contract_address) }) .collect(); @@ -217,7 +220,11 @@ impl SsvEventSyncer { let ordered_event_logs: Vec = ordered_event_logs.into_values().flatten().collect(); // Logs are all fetched from the chain and in order, process them but do not send off to - // be processed + // be processed since we are just reconstructing state + info!( + "Processing events from blocks {} to {}", + start_block, end_block + ); self.event_processor .process_logs(ordered_event_logs, false)?; @@ -227,12 +234,7 @@ impl SsvEventSyncer { .processed_block(end_block) .expect("Failed to update last processed block number"); - info!( - "Processed events from blocks {} to {}", - start_block, current_block - ); - - start_block = current_block + 1; + start_block = end_block + 1; } info!("Historical sync completed"); Ok(()) @@ -260,12 +262,7 @@ impl SsvEventSyncer { loop { match rpc_client.get_logs(&filter).await { Ok(logs) => { - debug!( - from_block, - to_block, - log_count = logs.len(), - "Successfully fetched logs" - ); + debug!(log_count = logs.len(), "Successfully fetched logs"); return Ok(logs); } Err(e) => { @@ -289,10 +286,10 @@ impl SsvEventSyncer { } } - // Once caught up with the chain, start live sync which will stream in live blocks from thek + // Once caught up with the chain, start live sync which will stream in live blocks from the // network. The events will be processed and duties will be created in response to network // actions - #[instrument(skip(self))] + #[instrument(skip(self, contract_address))] async fn live_sync(&mut self, contract_address: Address) -> Result<(), String> { info!(?contract_address, "Starting live sync"); @@ -336,7 +333,10 @@ impl SsvEventSyncer { .fetch_logs(relevant_block, relevant_block, contract_address) .await?; - debug!(log_count = logs.len(), "Processing logs from new block"); + info!( + log_count = logs.len(), + "Processing events from block {}", relevant_block + ); self.event_processor.process_logs(logs, true)?; self.event_processor .db From 1a0673be93b7da393ab5ff1814ac38a8a417b20c Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 17 Dec 2024 16:29:43 +0000 Subject: [PATCH 16/49] merge and util updates --- Cargo.lock | 6 ++ Cargo.toml | 18 ++--- anchor/Cargo.toml | 6 +- anchor/client/Cargo.toml | 26 +++---- anchor/eth/Cargo.toml | 9 ++- anchor/eth/src/event_parser.rs | 2 +- anchor/eth/src/event_processor.rs | 78 ++++++++++--------- anchor/eth/src/network_actions.rs | 32 ++++---- anchor/eth/src/sync.rs | 24 +++--- anchor/eth/src/util.rs | 123 +++++++++++++++++++++++++----- anchor/eth/test.rs | 35 ++++++--- anchor/http_api/Cargo.toml | 4 +- anchor/http_metrics/Cargo.toml | 4 +- anchor/network/Cargo.toml | 14 ++-- 14 files changed, 247 insertions(+), 134 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07da0f9bd..4e5065da9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2782,11 +2782,16 @@ name = "eth" version = "0.1.0" dependencies = [ "alloy", + "base64 0.22.1", "database", "futures", + "hex", + "openssl", "rand", "ssv_types", "tokio", + "tracing", + "tracing-subscriber", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] @@ -8188,6 +8193,7 @@ version = "0.1.0" dependencies = [ "base64 0.22.1", "derive_more 1.0.0", + "hex", "openssl", "rusqlite", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", diff --git a/Cargo.toml b/Cargo.toml index 1eaaedef5..94e91f0b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,16 +3,16 @@ members = [ "anchor", "anchor/client", + "anchor/common/ssv_types", + "anchor/common/version", "anchor/common/version", + "anchor/database", + "anchor/eth", "anchor/http_api", "anchor/http_metrics", "anchor/network", - "anchor/eth", - "anchor/common/version", - "anchor/database", "anchor/processor", "anchor/qbft", - "anchor/common/ssv_types", ] resolver = "2" @@ -25,12 +25,12 @@ qbft = { path = "anchor/qbft" } eth = { path = "anchor/eth" } http_api = { path = "anchor/http_api" } http_metrics = { path = "anchor/http_metrics" } -network = { path ="anchor/network"} -version = { path ="anchor/common/version"} +network = { path = "anchor/network" } +version = { path = "anchor/common/version" } processor = { path = "anchor/processor" } ssv_types = { path = "anchor/common/ssv_types" } database = { path = "anchor/database" } -lighthouse_network = { git = "https://github.com/sigp/lighthouse", branch = "unstable"} +lighthouse_network = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } task_executor = { git = "https://github.com/sigp/lighthouse", branch = "unstable", default-features = false, features = [ "tracing", ] } metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } validator_metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } @@ -41,12 +41,12 @@ types = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } derive_more = { version = "1.0.0", features = ["full"] } async-channel = "1.9" axum = "0.7.7" -clap = { version = "4.5.15", features = ["derive", "wrap_help"]} +clap = { version = "4.5.15", features = ["derive", "wrap_help"] } discv5 = "0.8.0" dirs = "5.0.1" either = "1.13.0" futures = "0.3.30" -tower-http = {version = "0.6", features = ["cors"] } +tower-http = { version = "0.6", features = ["cors"] } hyper = "1.4" num_cpus = "1" parking_lot = "0.12" diff --git a/anchor/Cargo.toml b/anchor/Cargo.toml index 1c9dd15c1..7cbea8899 100644 --- a/anchor/Cargo.toml +++ b/anchor/Cargo.toml @@ -6,16 +6,16 @@ authors = ["Sigma Prime "] rust-version = "1.81.0" [dependencies] -task_executor = { workspace = true } -sensitive_url = { workspace = true } async-channel = { workspace = true } clap = { workspace = true } +client = { workspace = true } dirs = { workspace = true } futures = { workspace = true } +sensitive_url = { workspace = true } serde = { workspace = true } +task_executor = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } -client = { workspace = true } tracing-subscriber = { workspace = true } [dev-dependencies] diff --git a/anchor/client/Cargo.toml b/anchor/client/Cargo.toml index 2b1e53ebe..99a8ece9b 100644 --- a/anchor/client/Cargo.toml +++ b/anchor/client/Cargo.toml @@ -9,22 +9,22 @@ name = "client" path = "src/lib.rs" [dependencies] -task_executor = { workspace = true } -http_api = { workspace = true } -version = { workspace = true } -http_metrics = { workspace = true } clap = { workspace = true } -serde = { workspace = true } -strum = { workspace = true } -sensitive_url = { workspace = true } dirs = { workspace = true } +ethereum_hashing = "0.7.0" +# Local dependencies +fdlimit = "0.3" +http_api = { workspace = true } +http_metrics = { workspace = true } hyper = { workspace = true } -tracing = { workspace = true } network = { workspace = true } -unused_port = { workspace = true } -tokio = { workspace = true } parking_lot = { workspace = true } processor = { workspace = true } -# Local dependencies -fdlimit = "0.3" -ethereum_hashing = "0.7.0" +sensitive_url = { workspace = true } +serde = { workspace = true } +strum = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } +unused_port = { workspace = true } +version = { workspace = true } diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml index 420192205..6d40654df 100644 --- a/anchor/eth/Cargo.toml +++ b/anchor/eth/Cargo.toml @@ -11,16 +11,17 @@ path = "test.rs" [dependencies] alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "contract", "pubsub", "provider-ws", "rpc-types"] } -tokio = { workspace = true } +database = { workspace = true } futures = { workspace = true } rand = "0.8.5" -types = { workspace = true } -database = { workspace = true } -ssv_types = { workspace = true} +ssv_types = { workspace = true } +tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +types = { workspace = true } openssl = { workspace = true } hex = "0.4.3" +base64 = { workspace = true } diff --git a/anchor/eth/src/event_parser.rs b/anchor/eth/src/event_parser.rs index bd18bff44..d46bc813d 100644 --- a/anchor/eth/src/event_parser.rs +++ b/anchor/eth/src/event_parser.rs @@ -1,7 +1,7 @@ use super::gen::SSVContract; use alloy::{rpc::types::Log, sol_types::SolEvent}; -// Standardized event decoding via common Decoder trait +// Standardized event decoding via common Decoder trait. Reduces common boilerplate pub trait EventDecoder { type Output; fn decode_from_log(log: &Log) -> Result; diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 23dcd7bc3..d653b07d8 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -6,7 +6,7 @@ use alloy::primitives::B256; use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; use database::NetworkDatabase; -use ssv_types::{compute_cluster_id, Cluster, ClusterMember, Operator, OperatorId}; +use ssv_types::{Cluster, ClusterMember, Operator, OperatorId}; use std::collections::HashMap; use std::str::FromStr; use std::sync::Arc; @@ -68,7 +68,7 @@ impl EventProcessor { /// Process a new set of logs #[instrument(skip(self, logs), fields(logs_count = logs.len()))] pub fn process_logs(&self, logs: Vec, live: bool) -> Result<(), String> { - info!(logs_count = logs.len(), "Starting log processing"); + debug!(logs_count = logs.len(), "Starting log processing"); for (index, log) in logs.iter().enumerate() { trace!(log_index = index, topic = ?log.topic0(), "Processing individual log"); @@ -76,33 +76,34 @@ impl EventProcessor { error!("Log missing topic0"); "Log missing topic0".to_string() })?; - if topic0 == *SSVContract::OperatorAdded::SIGNATURE_HASH { - let handler = self.handlers.get(topic0).ok_or_else(|| { - error!(topic = ?topic0, "No handler found for topic"); - "No handler found for topic".to_string() - })?; - let _ = handler(self, log); - } + let handler = self.handlers.get(topic0).ok_or_else(|| { + error!(topic = ?topic0, "No handler found for topic"); + "No handler found for topic".to_string() + })?; + + // todo!() determine how we should handle errors + let _ = handler(self, log); - //let action: NetworkAction = log.try_into()?; - //if action != NetworkAction::NoOp && live { - // debug!(action = ?action, "Network action needs processing"); - // todo!() send off somewhere - //} + let action: NetworkAction = log.try_into()?; + if action != NetworkAction::NoOp && live { + debug!(action = ?action, "Network action ready for processing processing"); + // todo!() send off somewhere + } } - info!(logs_count = logs.len(), "Completed processing all logs"); + debug!(logs_count = logs.len(), "Completed processing all logs"); Ok(()) } // A new Operator has been registered in the network. #[instrument(skip(self, log), fields(operator_id, owner))] fn process_operator_added(&self, log: &Log) -> Result<(), String> { + // Destructure operator added event let SSVContract::OperatorAdded { - operatorId, - owner, - publicKey, + operatorId, // The new ID of the operator + owner, // The EOA owner address + publicKey, // The RSA public key .. } = SSVContract::OperatorAdded::decode_from_log(log)?; let operator_id = OperatorId(operatorId); @@ -115,17 +116,18 @@ impl EventProcessor { return Err(String::from("Operator already exists in database")); } - // Parse ABI encoded public key string + // Parse ABI encoded public key string and trim off 0x prefix let public_key_str = publicKey.to_string(); let public_key_str = public_key_str.trim_start_matches("0x"); - debug!(operator_id = ?operator_id, "Decoding operator public key"); let data = hex::decode(public_key_str).map_err(|e| { error!(operator_id = ?operator_id, error = %e, "Failed to decode public key hex"); format!("Failed to decode public key hex: {e}") })?; - if data.len() < 64 { + + // Make sure the data is the expected length + if data.len() != 704 { error!(operator_id = ?operator_id, "Invalid data length"); return Err(String::from("Invalid data length")); } @@ -135,10 +137,10 @@ impl EventProcessor { error!(operator_id = ?operator_id, error = %e, "Invalid UTF-8 in public key"); format!("Invalid UTF-8 in public key: {e}") })?; - let data = data.trim_matches(char::from(0)).to_string(); + let public_key_data = data.trim_matches(char::from(0)).to_string(); // Construct the Operator and insert it into the database - let operator = Operator::new(&data, operator_id, owner).map_err(|e| { + let operator = Operator::new(&public_key_data, operator_id, owner).map_err(|e| { error!( operator_pubkey = ?publicKey, operator_id = ?operator_id, @@ -199,7 +201,7 @@ impl EventProcessor { ); format!("Failed to create PublicKey: {e}") })?; - let cluster_id = compute_cluster_id(owner, &mut operatorIds.clone()); + let cluster_id = compute_cluster_id(owner, operatorIds.clone()); let operator_ids: Vec = operatorIds.iter().map(|id| OperatorId(*id)).collect(); // Get expected nonce and and increment it. Wont the network handle this? What does it have @@ -228,10 +230,11 @@ impl EventProcessor { // fetch the validator metadata // todo!() need to hook up to beacon api - let validator_metadata = fetch_validator_metadata(&validator_pubkey).map_err(|e| { - error!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); - format!("Failed to fetch validator metadata: {e}") - })?; + let validator_metadata = + fetch_validator_metadata(&owner, &validator_pubkey).map_err(|e| { + error!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); + format!("Failed to fetch validator metadata: {e}") + })?; // Construct all of the cluster members debug!(cluster_id = ?cluster_id, "Constructing cluster members"); @@ -274,11 +277,13 @@ impl EventProcessor { fn process_validator_removed(&self, log: &Log) -> Result<(), String> { let SSVContract::ValidatorRemoved { owner, - mut operatorIds, + operatorIds, publicKey, .. } = SSVContract::ValidatorRemoved::decode_from_log(log)?; + debug!(owner = ?owner, public_key = ?publicKey, "Processing Validator Removed"); + // Process and fetch data let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { error!( @@ -289,7 +294,8 @@ impl EventProcessor { format!("Failed to create PublicKey: {e}") })?; - let cluster_id = compute_cluster_id(owner, &mut operatorIds); + // Compute the cluster id + let cluster_id = compute_cluster_id(owner, operatorIds.clone()); debug!( cluster_id = ?cluster_id, @@ -302,9 +308,9 @@ impl EventProcessor { None => { error!( cluster_id = ?cluster_id, - "Failed to fetch validator metadata" + "Failed to fetch validator metadata from database" ); - return Err("Failed to fetch validator metada".to_string()); + return Err("Failed to fetch validator metadata from database".to_string()); } }; @@ -362,11 +368,11 @@ impl EventProcessor { fn process_cluster_liquidated(&self, log: &Log) -> Result<(), String> { let SSVContract::ClusterLiquidated { owner, - operatorIds: mut operator_ids, + operatorIds: operator_ids, .. } = SSVContract::ClusterLiquidated::decode_from_log(log)?; - let cluster_id = compute_cluster_id(owner, &mut operator_ids); + let cluster_id = compute_cluster_id(owner, operator_ids); debug!(cluster_id = ?cluster_id, "Processing cluster liquidation"); @@ -392,11 +398,11 @@ impl EventProcessor { fn process_cluster_reactivated(&self, log: &Log) -> Result<(), String> { let SSVContract::ClusterReactivated { owner, - operatorIds: mut operator_ids, + operatorIds: operator_ids, .. } = SSVContract::ClusterReactivated::decode_from_log(log)?; - let cluster_id = compute_cluster_id(owner, &mut operator_ids); + let cluster_id = compute_cluster_id(owner, operator_ids); debug!(cluster_id = ?cluster_id, "Processing cluster reactivation"); diff --git a/anchor/eth/src/network_actions.rs b/anchor/eth/src/network_actions.rs index 56a46c4fc..64fa6a4df 100644 --- a/anchor/eth/src/network_actions.rs +++ b/anchor/eth/src/network_actions.rs @@ -3,7 +3,6 @@ use super::gen::SSVContract; use alloy::primitives::Address; use alloy::{rpc::types::Log, sol_types::SolEvent}; use ssv_types::OperatorId; -use types::PublicKey; #[derive(Debug, PartialEq)] pub enum NetworkAction { @@ -12,11 +11,11 @@ pub enum NetworkAction { }, LiquidateCluster { owner: Address, - //operator_ids: Vec, + operator_ids: Vec, }, ReactivateCluster { owner: Address, - //operator_ids: Vec, + operator_ids: Vec, }, UpdateFeeRecipient { owner: Address, @@ -32,41 +31,44 @@ pub enum NetworkAction { } /// Parse a network log into an action to be executed -impl TryFrom for NetworkAction { +impl TryFrom<&Log> for NetworkAction { type Error = String; - - fn try_from(source: Log) -> Result { + fn try_from(source: &Log) -> Result { let topic0 = source.topic0().expect("The log should have a topic0"); match *topic0 { SSVContract::ValidatorRemoved::SIGNATURE_HASH => { let _validator_removed_log = - SSVContract::ValidatorRemoved::decode_from_log(&source)?; + SSVContract::ValidatorRemoved::decode_from_log(source)?; Ok(NetworkAction::StopValidator {}) } SSVContract::ClusterLiquidated::SIGNATURE_HASH => { - let cluster_liquidated_log = - SSVContract::ClusterLiquidated::decode_from_log(&source)?; + let SSVContract::ClusterLiquidated { + owner, operatorIds, .. + } = SSVContract::ClusterLiquidated::decode_from_log(source)?; Ok(NetworkAction::LiquidateCluster { - owner: cluster_liquidated_log.owner, + owner, + operator_ids: operatorIds.into_iter().map(OperatorId).collect(), }) } SSVContract::ClusterReactivated::SIGNATURE_HASH => { - let cluster_reactivated_log = - SSVContract::ClusterReactivated::decode_from_log(&source)?; + let SSVContract::ClusterReactivated { + owner, operatorIds, .. + } = SSVContract::ClusterReactivated::decode_from_log(source)?; Ok(NetworkAction::ReactivateCluster { - owner: cluster_reactivated_log.owner, + owner, + operator_ids: operatorIds.into_iter().map(OperatorId).collect(), }) } SSVContract::FeeRecipientAddressUpdated::SIGNATURE_HASH => { let recipient_updated_log = - SSVContract::FeeRecipientAddressUpdated::decode_from_log(&source)?; + SSVContract::FeeRecipientAddressUpdated::decode_from_log(source)?; Ok(NetworkAction::UpdateFeeRecipient { owner: recipient_updated_log.owner, recipient: recipient_updated_log.recipientAddress, }) } SSVContract::ValidatorExited::SIGNATURE_HASH => { - let _validator_exited_log = SSVContract::ValidatorExited::decode_from_log(&source)?; + let _validator_exited_log = SSVContract::ValidatorExited::decode_from_log(source)?; Ok(NetworkAction::ExitValidator {}) } _ => Ok(NetworkAction::NoOp), diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index b64a93655..18084e6ba 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -57,7 +57,7 @@ const HOLESKY_DEPLOYMENT_BLOCK: u64 = 181612; /// Batch size for log fetching const BATCH_SIZE: u64 = 10000; -/// Typedef RPC and WS clients +/// RPC and WS clients types type RpcClient = RootProvider>; type WsClient = RootProvider; @@ -79,7 +79,7 @@ pub enum Network { Holesky, } -// TODO!() Dummy config struct that will be replaced +// TODO!() Dummy config struct that will be replaced. will be passed into the #[derive(Debug)] pub struct Config { pub http_url: String, @@ -153,6 +153,8 @@ impl SsvEventSyncer { info!("Starting live sync"); self.live_sync(contract_address).await?; + // todo!(): should this spawn long running task and return or should the event processor + // just be spawned in its own task? todo!() } @@ -162,7 +164,7 @@ impl SsvEventSyncer { contract_address: Address, deployment_block: u64, ) -> Result<(), String> { - // Start from the contrat deployment block or the last block that has been processed + // Start from the contract deployment block or the last block that has been processed let last_processed_block = self.event_processor.db.get_last_processed_block(); let mut start_block = std::cmp::max(deployment_block, last_processed_block); @@ -172,6 +174,8 @@ impl SsvEventSyncer { format!("Unable to fetch block number {}", e) })?; + let current_block = 400_000; + // Basic verification if current_block < FOLLOW_DISTANCE { debug!("Current block less than follow distance, breaking"); @@ -183,13 +187,13 @@ impl SsvEventSyncer { break; } - // make sure we have blocks to sync + // Make sure we have blocks to sync if start_block == end_block { - info!("Synced up to the tip of the chain"); + info!("Synced up to the tip of the chain, breaking"); break; } - info!(start_block, end_block, "Fetching logs for block range"); + info!(start_block, end_block, "Fetching logs for block range"); // Chunk the start and end block range into a set of ranges of size BATCH_SIZE // and construct a future to fetch the logs in each range let tasks: Vec<_> = (start_block..=end_block) @@ -215,8 +219,6 @@ impl SsvEventSyncer { let block_num = log.block_number.ok_or("Log is missing block number")?; ordered_event_logs.entry(block_num).or_default().push(log); } - - // join them back to a vec in ordered format let ordered_event_logs: Vec = ordered_event_logs.into_values().flatten().collect(); // Logs are all fetched from the chain and in order, process them but do not send off to @@ -240,6 +242,7 @@ impl SsvEventSyncer { Ok(()) } + // Construct a future that will fetch logs in the range from_block..to_block #[instrument(skip(self, deployment_address))] fn fetch_logs( &self, @@ -274,7 +277,6 @@ impl SsvEventSyncer { warn!(?e, retry_cnt, "Error fetching logs, retrying"); // increment retry_count and jitter retry duration - // todo!() exponential backoff?? let jitter = rand::thread_rng().gen_range(0..=100); let sleep_duration = Duration::from_millis(jitter); tokio::time::sleep(sleep_duration).await; @@ -294,7 +296,7 @@ impl SsvEventSyncer { info!(?contract_address, "Starting live sync"); loop { - // Try to connect to the websocket and subscribe to a block stream + // Try to subscribe to a block stream let stream = match self.ws_client.subscribe_blocks().await { Ok(sub) => { info!("Successfully subscribed to block stream"); @@ -322,7 +324,7 @@ impl SsvEventSyncer { // If we have a connection, continuously stream in blocks if let Some(mut stream) = stream { while let Some(block_header) = stream.next().await { - // Block we are interested in is the current block - follow distance + // Block we are interested in is the current block number - follow distance let relevant_block = block_header.number - FOLLOW_DISTANCE; debug!( block_number = block_header.number, diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 07559c6d2..e88e263fe 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -1,14 +1,19 @@ use super::sync::MAX_OPERATORS; +use alloy::primitives::Address; use ssv_types::Share; -use ssv_types::{OperatorId, ValidatorMetadata}; +use ssv_types::{ClusterId, OperatorId, ValidatorIndex, ValidatorMetadata}; use std::collections::HashSet; +use std::str::FromStr; use types::PublicKey; -const SIGNATURE_LENGTH: usize = 96; // phase0.SignatureLength -const PUBLIC_KEY_LENGTH: usize = 48; // phase0.PublicKeyLength -const ENCRYPTED_KEY_LENGTH: usize = 256; // Original encryptedKeyLength +// phase0.SignatureLength +const SIGNATURE_LENGTH: usize = 96; +// phase0.PublicKeyLength +const PUBLIC_KEY_LENGTH: usize = 48; +// Length of an encrypted key +const ENCRYPTED_KEY_LENGTH: usize = 256; // Leng -// Validates and parses shares from a validator added event +// Parses shares from a ValidatorAdded event // Event contains a bytes stream of the form // [signature | public keys | encrypted keys]. pub fn parse_shares( @@ -24,30 +29,45 @@ pub fn parse_shares( // Validate total length of shares if shares_expected_length != shares.len() { - println!("should fail"); + return Err(format!( + "Share data has invalid length: expected {}, got {}", + shares_expected_length, + shares.len() + )); } - // Extract components using array slicing + // Extract components let signature = shares[..signature_offset].to_vec(); let share_public_keys = split_bytes( &shares[signature_offset..pub_keys_offset], PUBLIC_KEY_LENGTH, ); - let encrypted_keys = split_bytes(&shares[pub_keys_offset..], ENCRYPTED_KEY_LENGTH); + let encrypted_keys: Vec> = + split_bytes(&shares[pub_keys_offset..], ENCRYPTED_KEY_LENGTH); + // Create the shares from the share public keys and the encrypted private keys let shares: Vec = share_public_keys - .iter() - .zip(encrypted_keys.iter()) - .map(|(_public, _encrypted)| { - todo!() - /* - Share { - share_pubkey: PublicKey::try_from(public), - encrypted_private_key: encrypted.as_slice() - } - */ + .into_iter() + .zip(encrypted_keys) + .map(|(public, encrypted)| { + // Add 0x prefix to the hex encoded public key + let public_key_hex = format!("0x{}", hex::encode(&public)); + + // Create public key + let share_pubkey = PublicKey::from_str(&public_key_hex) + .map_err(|e| format!("Failed to create public key: {}", e))?; + + // Convert encrypted key into fixed array + let encrypted_array: [u8; 256] = encrypted + .try_into() + .map_err(|_| "Encrypted key has wrong length".to_string())?; + + Ok(Share { + share_pubkey, + encrypted_private_key: encrypted_array, + }) }) - .collect(); + .collect::, String>>()?; Ok((signature, shares)) } @@ -60,8 +80,20 @@ fn split_bytes(data: &[u8], chunk_size: usize) -> Vec> { } // Fetch the metadata for a validator from the beacon chain -pub fn fetch_validator_metadata(_public_key: &PublicKey) -> Result { - todo!() +pub fn fetch_validator_metadata( + owner: &Address, + public_key: &PublicKey, +) -> Result { + // todo!() fetch this from the chain + use rand::Rng; + use types::Graffiti; + Ok(ValidatorMetadata { + validator_index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), + validator_pubkey: public_key.clone(), + fee_recipient: *owner, + graffiti: Graffiti::default(), + owner: *owner, + }) } // Verify that the signature over the share data is correct @@ -102,3 +134,52 @@ pub fn validate_operators(operator_ids: &[OperatorId]) -> Result<(), String> { Ok(()) } + +// Compute an identifier from the cluster from the owners and the chosen operators +pub fn compute_cluster_id(owner: Address, mut operator_ids: Vec) -> ClusterId { + // Sort the operator IDs + operator_ids.sort(); + + // Create initial value from owner address + let mut result = owner + .as_slice() + .iter() // Convert address to bytes and iterate + .fold(0u64, |acc, &b| acc.wrapping_add(b as u64)); // Add up all bytes + + // Mix in each operator ID + for id in operator_ids { + result = result + .rotate_left(13) // Bit rotation + .wrapping_add(id); // Safe addition + } + + // Stay within SQL INTEGER bounds + result %= 2_147_483_647; + + ClusterId(result) +} + +#[cfg(test)] +mod eth_util_tests { + use super::*; + + #[test] + fn test_cluster_id() { + let owner = Address::random(); + let operator_ids = vec![1, 3, 4, 2]; + let operator_ids_mixed = vec![4, 2, 3, 1]; + + let cluster_id_1 = compute_cluster_id(owner, operator_ids); + let cluster_id_2 = compute_cluster_id(owner, operator_ids_mixed); + assert_eq!(cluster_id_1, cluster_id_2); + } + + #[test] + fn test_parse_shares() { + let share_data = "ab6c91297d2a604d2fc301ad161f99a16baa53e549fd1822acf0f6834450103555b03281d23d0ab7ee944d564f794e040ecd60ad9894747cc6b55ef017876079c1d6aa48595a1791cefc73aa6781c5e26bc644d515e9e9c5bbc8d2b5b173569ba547ba1edf393778d17ad13f2bc8c9b5c2e17b563998a2307b6dddda4d7c6ed3a7f261137fd9c2a81bb1ad1fea6896a8b9719027f01c9b496cf7ade5972e96c94e523e2671662bcfc80d5b6672877de39803d10251d7ecb76794252dea94aa348143c62887bcd62cfb680326c786e22b6a558895f037854e0a70019360c129a788fafe48c18374382cd97a4ea5797bcf982526e76eb89d132e5547f43e9ae9fdf64e061d2f5fcb5bd5ff1de8e7722b53730c6c6a1cc31791fceaabe2e5d79944a7c0d4459ec10153075996e9ef62e4fa9da730873652820c32476c1ddfd10a7b322e67e78759ed9cdec042a09069efc363778f620b3e5ffe01cb1a45bb278768f44342c45736b3a5ccdfbf10b0a10ed26a36af787363398dd776aea98d131738a881739b7e0ee4aa5e280355e2d2254f444ade07c239f5f6870fac2143de480e6ff5e3954d6e441fd16132296960b523bd23fa7b52e357ed03f8201ed4c9b4ed486a66c818e319418c8e34d844b3812f75a74a1607c9bb0eda11c89dbd67858730076e17ed3f6d021c2e57e94e9c3d53e1f6a9c7c2d8373fd5e3340e3a14951e97b7baa5fc1825ba59bb3990f1c607d22756fd178f1a0674d47ee476633f27e961ec3a79b236fb20f863814b47fb9eee75fdbdab99b6901087c41dd31d5320ac3e3c772a8982c64b1c138cbfb968e8a6e59f027bcc53adf2f4f171cbdc6f576dbf313b11485400356865f1f2b0b0533e576d7e3487d5d7d85e8d57aeab4314ec1e49f7647b3eea9a7f1fb805cb944b175c39a2668f96d4cd97afd3dc1258cbaccde6dc5e4b48d4bfd783396505e6f083c5cb3af9e24e90f1eac03f8e8cbc2664b9e6dc81543a1a68973bb03e84f50338ed6c1247447d3a3acef69879900fa9596492cce31130668621f038f365b8b4b1946c95e41e652d868421e574850f5b0b6befb481c93be55c3f9a90f613823942fbd71354ad8202b0121885a0da475d551a86da0c7a983b4d7b403d91adf275b3348fd09b797ccb6be7ebb96efe024588d2f8105e3b7ec5e6cbefd3bb287c82f717597244ea36df07753f0dcc4ce64570fff04447a96cb9f80c6359306c5e45a42e8bbaeb3de9e2ba37aeeed85bcaeb6c61f77c9d26dd4ca853ca09ea8e2e61c675b250c7c6c6c29d7829b3534e0749b9e69b67de569b21f6f0f9a46698b30aad615800aa26ae3629f4b91dfbc3d12cf6b61ed47846b0c0522db60ac41bfc3c4e233bd098180d0257310d58099592d0a5a87e4c6704b64683ee1c746f2a659a01939fbc2b72d196f94452a2b32fa945d1be80a76ba64061bdb73aa23fb83b9e96af949a13e3407a3b37529e79a79814eb172afe4ff56af68417a4191ede4c5c8521ca36c41c0f9e45a960bd32c8a14cb54442e27abf8cf96089736e14340eb017cadf640dbd30014f1802ba6c686e9039f6e5509384a5bfb3f82bef56a4db9778add48a7384d6e25357842a3c591c611908083d420c6e77699793dbf0f1cc597137b48933246c7f5693098a3218312c4ae030dd74b4291e3e1f95702c7f66c22dba7a8ac634e200534c1b6b9c6397c415ab1c448c4eb6481d35250dd83c599cdc05b6e222a4543147e289cf611755dbb1f0968a61c3741a7347db1599b9c4b71e39d4921c7b3bbe018a6a766c7c26fd31e77eb9b727a6a9ca1d72a44317a54e43004f4f42dd5731ed3e83248bc2d5ccef"; + let share_data = hex::decode(share_data).expect("Failed to decode hex string"); + + let operators = vec![OperatorId(1), OperatorId(2), OperatorId(3), OperatorId(4)]; + assert!(parse_shares(share_data, &operators).is_ok()); + } +} diff --git a/anchor/eth/test.rs b/anchor/eth/test.rs index ec705c4c7..8f220723f 100644 --- a/anchor/eth/test.rs +++ b/anchor/eth/test.rs @@ -1,11 +1,11 @@ +use base64::prelude::*; use database::NetworkDatabase; use eth::{Config, Network, SsvEventSyncer}; -use openssl::pkey::Public; use openssl::rsa::Rsa; use std::path::Path; use std::sync::Arc; -use tracing::{info, info_span, Level}; -use tracing_subscriber::{EnvFilter, prelude::*, fmt}; +use tracing::{info, info_span}; +use tracing_subscriber::{fmt, prelude::*, EnvFilter}; #[tokio::main] async fn main() { @@ -29,18 +29,33 @@ async fn main() { network: Network::Holesky, }; - let priv_key = Rsa::generate(2046).expect("Failed to generate RSA key"); - let pubkey = priv_key - .public_key_to_pem() - .and_then(|pem| Rsa::public_key_from_pem(&pem)) - .expect("Failed to process RSA key"); let path = Path::new("db.sqlite"); + let pem_data = "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBMVg2MUFXY001QUNLaGN5MTlUaEIKby9HMWlhN1ByOVUralJ5aWY5ZjAyRG9sd091V2ZLLzdSVUlhOEhEbHBvQlVERDkwRTVQUGdJSy9sTXB4RytXbwpwQ2N5bTBpWk9UT0JzNDE5bEh3TzA4bXFja1JsZEg5WExmbmY2UThqWFR5Ym1yYzdWNmwyNVprcTl4U0owbHR1CndmTnVTSzNCZnFtNkQxOUY0aTVCbmVaSWhjRVJTYlFLWDFxbWNqYnZFL2cyQko4TzhaZUgrd0RzTHJiNnZXQVIKY3BYWG1uelE3Vlp6ZklHTGVLVU1CTTh6SW0rcXI4RGZ4SEhSeVU1QTE3cFU4cy9MNUp5RXE1RGJjc2Q2dHlnbQp5UE9BYUNzWldVREI3UGhLOHpUWU9WYi9MM1lnSTU4bjFXek5IM0s5cmFreUppTmUxTE9GVVZzQTFDUnhtQ2YzCmlRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K"; + + let pem_decoded = BASE64_STANDARD.decode(pem_data).unwrap(); + + // Convert the decoded data to a string + let mut pem_string = String::from_utf8(pem_decoded).unwrap(); + + // Fix the header - replace PKCS1 header with PKCS8 header + pem_string = pem_string + .replace( + "-----BEGIN RSA PUBLIC KEY-----", + "-----BEGIN PUBLIC KEY-----", + ) + .replace("-----END RSA PUBLIC KEY-----", "-----END PUBLIC KEY-----"); + + // Parse the PEM string into an RSA public key using PKCS8 format + let rsa_pubkey = Rsa::public_key_from_pem(pem_string.as_bytes()) + .map_err(|e| format!("Failed to parse RSA public key: {}", e)) + .unwrap(); // tie the network into the database impl!() - let db = Arc::new(NetworkDatabase::new(path, &pubkey).expect("Failed to construct database")); + let db = + Arc::new(NetworkDatabase::new(path, &rsa_pubkey).expect("Failed to construct database")); info!("Constructed the database"); - let event_syncer = SsvEventSyncer::new(db.clone(), config) + let mut event_syncer = SsvEventSyncer::new(db.clone(), config) .await .expect("Failed to construct event syncer"); let _ = event_syncer.sync().await; diff --git a/anchor/http_api/Cargo.toml b/anchor/http_api/Cargo.toml index 9873a81ff..7769bf0cc 100644 --- a/anchor/http_api/Cargo.toml +++ b/anchor/http_api/Cargo.toml @@ -9,9 +9,9 @@ name = "http_api" path = "src/lib.rs" [dependencies] -task_executor = { workspace = true } axum = { workspace = true } -slot_clock = { workspace = true } serde = { workspace = true } +slot_clock = { workspace = true } +task_executor = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } diff --git a/anchor/http_metrics/Cargo.toml b/anchor/http_metrics/Cargo.toml index 7048b705a..fbf931ce9 100644 --- a/anchor/http_metrics/Cargo.toml +++ b/anchor/http_metrics/Cargo.toml @@ -5,12 +5,12 @@ edition = "2021" [dependencies] axum = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } +tokio = { workspace = true } tower-http = { workspace = true } tracing = { workspace = true } -tokio = { workspace = true } validator_metrics = { workspace = true } -metrics = { workspace = true } # Group dependencies warp_utils = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } diff --git a/anchor/network/Cargo.toml b/anchor/network/Cargo.toml index 3c8bac686..b3b994684 100644 --- a/anchor/network/Cargo.toml +++ b/anchor/network/Cargo.toml @@ -5,16 +5,16 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] -tokio = { workspace = true } -libp2p = { version = "0.54", default-features = false, features = ["identify", "yamux", "noise", "secp256k1", "tcp", "tokio", "macros", "gossipsub", "quic", "ping"] } -futures = { workspace = true } -task_executor = { workspace = true } -version = { workspace = true } -lighthouse_network = { workspace = true} +dirs = { workspace = true } discv5 = { workspace = true } -dirs = { workspace = true } +futures = { workspace = true } +libp2p = { version = "0.54", default-features = false, features = ["identify", "yamux", "noise", "secp256k1", "tcp", "tokio", "macros", "gossipsub", "quic", "ping"] } +lighthouse_network = { workspace = true } serde = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } tracing = { workspace = true } +version = { workspace = true } [dev-dependencies] async-channel = { workspace = true } From fcf0e45c6082acd785b3482691b92a375f86f5c9 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 17 Dec 2024 16:36:09 +0000 Subject: [PATCH 17/49] fix types and new bin --- anchor/common/ssv_types/src/cluster.rs | 4 ---- anchor/common/ssv_types/src/lib.rs | 2 +- anchor/common/ssv_types/src/operator.rs | 9 --------- anchor/eth/Cargo.toml | 4 ++-- anchor/eth/{test.rs => execution.rs} | 2 -- 5 files changed, 3 insertions(+), 18 deletions(-) rename anchor/eth/{test.rs => execution.rs} (99%) diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs index 97acde7de..34c9846f3 100644 --- a/anchor/common/ssv_types/src/cluster.rs +++ b/anchor/common/ssv_types/src/cluster.rs @@ -51,7 +51,3 @@ pub struct ValidatorMetadata { /// The owner of the validator and cluster pub owner: Address, } - -pub fn compute_cluster_id(owner: Address, operator_ids: &mut [u64]) -> ClusterId { - todo!() -} diff --git a/anchor/common/ssv_types/src/lib.rs b/anchor/common/ssv_types/src/lib.rs index ca3d929f3..2c533b4f1 100644 --- a/anchor/common/ssv_types/src/lib.rs +++ b/anchor/common/ssv_types/src/lib.rs @@ -1,5 +1,5 @@ pub use cluster::{ - compute_cluster_id, Cluster, ClusterId, ClusterMember, ValidatorIndex, ValidatorMetadata, + Cluster, ClusterId, ClusterMember, ValidatorIndex, ValidatorMetadata, }; pub use operator::{Operator, OperatorId}; pub use share::Share; diff --git a/anchor/common/ssv_types/src/operator.rs b/anchor/common/ssv_types/src/operator.rs index a8dd4d479..ce7f1cf58 100644 --- a/anchor/common/ssv_types/src/operator.rs +++ b/anchor/common/ssv_types/src/operator.rs @@ -5,7 +5,6 @@ use openssl::rsa::Rsa; use std::cmp::Eq; use std::fmt::Debug; use std::hash::Hash; -use std::str::Bytes; use types::Address; /// Unique identifier for an Operator. @@ -30,14 +29,6 @@ impl Operator { Ok(Self::new_with_pubkey(rsa_pubkey, operator_id, owner)) } - pub fn new_with_bytes( - rsa_bytes: Bytes, - operator_id: OperatorId, - owner: Address, - ) -> Result { - todo!() - } - // Creates a new operator from an existing RSA public key and OperatorId pub fn new_with_pubkey(rsa_pubkey: Rsa, id: OperatorId, owner: Address) -> Self { Self { diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml index 6d40654df..3de9e277e 100644 --- a/anchor/eth/Cargo.toml +++ b/anchor/eth/Cargo.toml @@ -5,8 +5,8 @@ edition = { workspace = true } authors = ["Sigma Prime "] [[bin]] -name = "integration" -path = "test.rs" +name = "execution" +path = "execution.rs" [dependencies] alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "contract", "pubsub", diff --git a/anchor/eth/test.rs b/anchor/eth/execution.rs similarity index 99% rename from anchor/eth/test.rs rename to anchor/eth/execution.rs index 8f220723f..74fe99c1f 100644 --- a/anchor/eth/test.rs +++ b/anchor/eth/execution.rs @@ -59,6 +59,4 @@ async fn main() { .await .expect("Failed to construct event syncer"); let _ = event_syncer.sync().await; - - info!("hello"); } From 7eebc329b59e4f91e5248da5e8a912f7bb5d18ae Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 17 Dec 2024 16:37:17 +0000 Subject: [PATCH 18/49] remove unneeded dep --- anchor/common/ssv_types/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/anchor/common/ssv_types/Cargo.toml b/anchor/common/ssv_types/Cargo.toml index ed093c6a4..5741ce1a7 100644 --- a/anchor/common/ssv_types/Cargo.toml +++ b/anchor/common/ssv_types/Cargo.toml @@ -6,7 +6,6 @@ authors = ["Sigma Prime "] [dependencies] base64 = { workspace = true } -rusqlite = { workspace = true } derive_more = { workspace = true } openssl = { workspace = true } types = { workspace = true } From 93c0416e74555e19f807c76293f1ce8939885b25 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 17 Dec 2024 16:40:17 +0000 Subject: [PATCH 19/49] revert external changes --- anchor/common/ssv_types/src/cluster.rs | 2 +- anchor/common/ssv_types/src/lib.rs | 4 +--- anchor/src/main.rs | 1 - 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs index 34c9846f3..308aee675 100644 --- a/anchor/common/ssv_types/src/cluster.rs +++ b/anchor/common/ssv_types/src/cluster.rs @@ -48,6 +48,6 @@ pub struct ValidatorMetadata { pub fee_recipient: Address, /// Graffiti pub graffiti: Graffiti, - /// The owner of the validator and cluster + /// The owner of the validator pub owner: Address, } diff --git a/anchor/common/ssv_types/src/lib.rs b/anchor/common/ssv_types/src/lib.rs index 2c533b4f1..4cf950d16 100644 --- a/anchor/common/ssv_types/src/lib.rs +++ b/anchor/common/ssv_types/src/lib.rs @@ -1,6 +1,4 @@ -pub use cluster::{ - Cluster, ClusterId, ClusterMember, ValidatorIndex, ValidatorMetadata, -}; +pub use cluster::{Cluster, ClusterId, ClusterMember, ValidatorIndex, ValidatorMetadata}; pub use operator::{Operator, OperatorId}; pub use share::Share; mod cluster; diff --git a/anchor/src/main.rs b/anchor/src/main.rs index ebc1c741d..0f4bc07f8 100644 --- a/anchor/src/main.rs +++ b/anchor/src/main.rs @@ -33,7 +33,6 @@ fn main() { // The clone's here simply copy the Arc of the runtime. We pass these through the main // execution task - let anchor_executor = core_executor.clone(); let shutdown_executor = core_executor.clone(); From 78c7f7b9b07ff4b2650f317ebb215a08fad6e544 Mon Sep 17 00:00:00 2001 From: Zac Holme <79027434+Zacholme7@users.noreply.github.com> Date: Tue, 17 Dec 2024 11:40:56 -0500 Subject: [PATCH 20/49] Update lib.rs --- anchor/common/ssv_types/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/anchor/common/ssv_types/src/lib.rs b/anchor/common/ssv_types/src/lib.rs index 4cf950d16..6d25f44d2 100644 --- a/anchor/common/ssv_types/src/lib.rs +++ b/anchor/common/ssv_types/src/lib.rs @@ -4,5 +4,4 @@ pub use share::Share; mod cluster; mod operator; mod share; -mod sql_conversions; mod util; From 7c656d961ed46edda81b33650a259ed06953c377 Mon Sep 17 00:00:00 2001 From: Zachary Holme Date: Sat, 21 Dec 2024 12:35:35 -0600 Subject: [PATCH 21/49] new cluster id computation --- anchor/eth/src/event_processor.rs | 31 ++++++++--------------- anchor/eth/src/util.rs | 42 ++++++++++++++++--------------- 2 files changed, 32 insertions(+), 41 deletions(-) diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index d653b07d8..0d886d665 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -7,7 +7,7 @@ use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; use database::NetworkDatabase; use ssv_types::{Cluster, ClusterMember, Operator, OperatorId}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::str::FromStr; use std::sync::Arc; use tracing::{debug, error, info, instrument, trace, warn}; @@ -218,7 +218,7 @@ impl EventProcessor { // Parse the share byte stream into a list of valid Shares and then verify the signature debug!(cluster_id = ?cluster_id, "Parsing and verifying shares"); - let (signature, shares) = parse_shares(shares.to_vec(), &operator_ids).map_err(|e| { + let (signature, shares) = parse_shares(shares.to_vec(), &operator_ids, &cluster_id).map_err(|e| { error!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); format!("Failed to parse shares: {e}") })?; @@ -231,34 +231,23 @@ impl EventProcessor { // fetch the validator metadata // todo!() need to hook up to beacon api let validator_metadata = - fetch_validator_metadata(&owner, &validator_pubkey).map_err(|e| { + fetch_validator_metadata(&validator_pubkey, &cluster_id).map_err(|e| { error!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); format!("Failed to fetch validator metadata: {e}") })?; - // Construct all of the cluster members - debug!(cluster_id = ?cluster_id, "Constructing cluster members"); - let cluster_members: Vec = shares - .iter() - .zip(operator_ids.iter()) - .map(|(share, op_id)| ClusterMember { - // todo!() check to see if one of these are this operator - operator_id: *op_id, - cluster_id, - share: share.to_owned(), - }) - .collect(); - + // Construct the cluster let cluster = Cluster { cluster_id, - cluster_members, + owner, + fee_recipient: owner, faulty: 0, liquidated: false, - validator_metadata, + cluster_members: HashSet::from_iter(operator_ids) }; // Finally, construct and insert the full cluster and insert into the database - self.db.insert_cluster(cluster).map_err(|e| { + self.db.insert_validator(cluster, validator_metadata, shares).map_err(|e| { error!(cluster_id = ?cluster_id, error = %e, "Failed to insert cluster"); format!("Failed to insert cluster: {e}") })?; @@ -430,7 +419,7 @@ impl EventProcessor { owner, recipientAddress, } = SSVContract::FeeRecipientAddressUpdated::decode_from_log(log)?; - + self.db.update_fee_recipient(owner, recipientAddress); info!( owner = ?owner, new_recipient = ?recipientAddress, @@ -447,7 +436,7 @@ impl EventProcessor { operatorIds, publicKey, } = SSVContract::ValidatorExited::decode_from_log(log)?; - + // todo!() how is this different from a validator removed info!( owner = ?owner, validator_pubkey = ?publicKey, diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index e88e263fe..c9c87d524 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -1,5 +1,5 @@ use super::sync::MAX_OPERATORS; -use alloy::primitives::Address; +use alloy::primitives::{keccak256, Address}; use ssv_types::Share; use ssv_types::{ClusterId, OperatorId, ValidatorIndex, ValidatorMetadata}; use std::collections::HashSet; @@ -19,6 +19,7 @@ const ENCRYPTED_KEY_LENGTH: usize = 256; // Leng pub fn parse_shares( shares: Vec, operator_ids: &[OperatorId], + cluster_id: &ClusterId ) -> Result<(Vec, Vec), String> { let operator_count = operator_ids.len(); @@ -49,7 +50,8 @@ pub fn parse_shares( let shares: Vec = share_public_keys .into_iter() .zip(encrypted_keys) - .map(|(public, encrypted)| { + .zip(operator_ids) + .map(|((public, encrypted), operator_id)| { // Add 0x prefix to the hex encoded public key let public_key_hex = format!("0x{}", hex::encode(&public)); @@ -63,6 +65,8 @@ pub fn parse_shares( .map_err(|_| "Encrypted key has wrong length".to_string())?; Ok(Share { + operator_id: *operator_id, + cluster_id: *cluster_id, share_pubkey, encrypted_private_key: encrypted_array, }) @@ -81,18 +85,17 @@ fn split_bytes(data: &[u8], chunk_size: usize) -> Vec> { // Fetch the metadata for a validator from the beacon chain pub fn fetch_validator_metadata( - owner: &Address, public_key: &PublicKey, + cluster_id: &ClusterId, ) -> Result { // todo!() fetch this from the chain use rand::Rng; use types::Graffiti; Ok(ValidatorMetadata { - validator_index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), - validator_pubkey: public_key.clone(), - fee_recipient: *owner, + index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), + public_key: public_key.clone(), graffiti: Graffiti::default(), - owner: *owner, + cluster_id: *cluster_id }) } @@ -139,24 +142,23 @@ pub fn validate_operators(operator_ids: &[OperatorId]) -> Result<(), String> { pub fn compute_cluster_id(owner: Address, mut operator_ids: Vec) -> ClusterId { // Sort the operator IDs operator_ids.sort(); + // 20 bytes for the address and num ids * 32 for ids + let data_size = 20 + (operator_ids.len() * 32); + let mut data: Vec = Vec::with_capacity(data_size); - // Create initial value from owner address - let mut result = owner - .as_slice() - .iter() // Convert address to bytes and iterate - .fold(0u64, |acc, &b| acc.wrapping_add(b as u64)); // Add up all bytes + // Add the address bytes + data.extend_from_slice(owner.as_slice()); - // Mix in each operator ID + // Add the operator IDs as 32 byte values for id in operator_ids { - result = result - .rotate_left(13) // Bit rotation - .wrapping_add(id); // Safe addition + let mut id_bytes = [0u8; 32]; + id_bytes[24..].copy_from_slice(&id.to_be_bytes()); + data.extend_from_slice(&id_bytes); } - // Stay within SQL INTEGER bounds - result %= 2_147_483_647; - - ClusterId(result) + // Hash it all + let hashed_data: [u8; 32] = keccak256(data).as_slice().try_into().expect("Conversion Failed"); + ClusterId(hashed_data) } #[cfg(test)] From 3a81b27292afdbb51f8631dbf1181f0a1ba23efb Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 23 Dec 2024 14:13:30 +0000 Subject: [PATCH 22/49] first 100 events successfully syncing --- anchor/eth/src/event_processor.rs | 66 ++++++++++++++++++++----------- anchor/eth/src/util.rs | 11 ++++-- 2 files changed, 50 insertions(+), 27 deletions(-) diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 0d886d665..8c2459f9a 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -5,8 +5,8 @@ use super::util::*; use alloy::primitives::B256; use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; -use database::NetworkDatabase; -use ssv_types::{Cluster, ClusterMember, Operator, OperatorId}; +use database::{NetworkDatabase, UniqueIndex}; +use ssv_types::{Cluster, Operator, OperatorId}; use std::collections::{HashMap, HashSet}; use std::str::FromStr; use std::sync::Arc; @@ -125,7 +125,6 @@ impl EventProcessor { format!("Failed to decode public key hex: {e}") })?; - // Make sure the data is the expected length if data.len() != 704 { error!(operator_id = ?operator_id, "Invalid data length"); @@ -218,10 +217,11 @@ impl EventProcessor { // Parse the share byte stream into a list of valid Shares and then verify the signature debug!(cluster_id = ?cluster_id, "Parsing and verifying shares"); - let (signature, shares) = parse_shares(shares.to_vec(), &operator_ids, &cluster_id).map_err(|e| { - error!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); - format!("Failed to parse shares: {e}") - })?; + let (signature, shares) = parse_shares(shares.to_vec(), &operator_ids, &cluster_id) + .map_err(|e| { + error!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); + format!("Failed to parse shares: {e}") + })?; if !verify_signature(signature) { error!(cluster_id = ?cluster_id, "Signature verification failed"); @@ -243,14 +243,16 @@ impl EventProcessor { fee_recipient: owner, faulty: 0, liquidated: false, - cluster_members: HashSet::from_iter(operator_ids) + cluster_members: HashSet::from_iter(operator_ids), }; // Finally, construct and insert the full cluster and insert into the database - self.db.insert_validator(cluster, validator_metadata, shares).map_err(|e| { - error!(cluster_id = ?cluster_id, error = %e, "Failed to insert cluster"); - format!("Failed to insert cluster: {e}") - })?; + self.db + .insert_validator(cluster, validator_metadata, shares) + .map_err(|e| { + error!(cluster_id = ?cluster_id, error = %e, "Failed to insert cluster"); + format!("Failed to insert cluster: {e}") + })?; info!( cluster_id = ?cluster_id, @@ -292,7 +294,13 @@ impl EventProcessor { "Processing validator removal" ); - let metadata = match self.db.get_validator_metadata(&cluster_id) { + let metadata = match self + .db + .state + .multi_state + .validator_metadata + .get_by(&validator_pubkey) + { Some(data) => data, None => { error!( @@ -303,25 +311,36 @@ impl EventProcessor { } }; + let cluster = match self.db.state.multi_state.clusters.get_by(&validator_pubkey) { + Some(data) => data, + None => { + error!( + cluster_id = ?cluster_id, + "Failed to fetch cluster from database" + ); + return Err("Failed to fetch cluster from database".to_string()); + } + }; + // Make sure the right owner is removing this validator - if owner != metadata.owner { + if owner != cluster.owner { error!( cluster_id = ?cluster_id, - expected_owner = ?metadata.owner, + expected_owner = ?cluster.owner, actual_owner = ?owner, "Owner mismatch for validator removal" ); return Err(format!( "Cluster already exists with a different owner address. Expected {}. Got {}", - metadata.owner, owner + cluster.owner, owner )); } // Make sure this is the correct validator - if validator_pubkey != metadata.validator_pubkey { + if validator_pubkey != metadata.public_key { error!( cluster_id = ?cluster_id, - expected_pubkey = %metadata.validator_pubkey, + expected_pubkey = %metadata.public_key, actual_pubkey = %validator_pubkey, "Validator pubkey mismatch" ); @@ -334,14 +353,15 @@ impl EventProcessor { // todo!(): Remove it from the internal keystore } - // Remove all cluster data corresponding to this validator - self.db.delete_cluster(cluster_id).map_err(|e| { + // remove the validator and all corresponding cluster data if needed + self.db.delete_validator(&validator_pubkey).map_err(|e| { error!( cluster_id = ?cluster_id, + pubkey = ?validator_pubkey, error = %e, - "Failed to delete cluster from database" + "Failed to delete valiidator from database" ); - format!("Failed to delete cluster: {e}") + format!("Failed to validator cluster: {e}") })?; info!( @@ -419,7 +439,7 @@ impl EventProcessor { owner, recipientAddress, } = SSVContract::FeeRecipientAddressUpdated::decode_from_log(log)?; - self.db.update_fee_recipient(owner, recipientAddress); + let _ = self.db.update_fee_recipient(owner, recipientAddress); info!( owner = ?owner, new_recipient = ?recipientAddress, diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index c9c87d524..454969756 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -19,7 +19,7 @@ const ENCRYPTED_KEY_LENGTH: usize = 256; // Leng pub fn parse_shares( shares: Vec, operator_ids: &[OperatorId], - cluster_id: &ClusterId + cluster_id: &ClusterId, ) -> Result<(Vec, Vec), String> { let operator_count = operator_ids.len(); @@ -95,7 +95,7 @@ pub fn fetch_validator_metadata( index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), public_key: public_key.clone(), graffiti: Graffiti::default(), - cluster_id: *cluster_id + cluster_id: *cluster_id, }) } @@ -157,7 +157,10 @@ pub fn compute_cluster_id(owner: Address, mut operator_ids: Vec) -> Cluster } // Hash it all - let hashed_data: [u8; 32] = keccak256(data).as_slice().try_into().expect("Conversion Failed"); + let hashed_data: [u8; 32] = keccak256(data) + .as_slice() + .try_into() + .expect("Conversion Failed"); ClusterId(hashed_data) } @@ -182,6 +185,6 @@ mod eth_util_tests { let share_data = hex::decode(share_data).expect("Failed to decode hex string"); let operators = vec![OperatorId(1), OperatorId(2), OperatorId(3), OperatorId(4)]; - assert!(parse_shares(share_data, &operators).is_ok()); + //assert!(parse_shares(share_data, &operators).is_ok()); } } From 4819cd6e9f443933cb87c73260a58db04d03663c Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 24 Dec 2024 14:16:13 +0000 Subject: [PATCH 23/49] syncing testnet events --- anchor/eth/Cargo.toml | 9 +++---- anchor/eth/execution.rs | 2 +- anchor/eth/src/event_processor.rs | 41 +++++++++++++++++++++---------- anchor/eth/src/sync.rs | 8 ++++-- anchor/eth/src/util.rs | 4 ++- 5 files changed, 42 insertions(+), 22 deletions(-) diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml index 3de9e277e..ad39870b9 100644 --- a/anchor/eth/Cargo.toml +++ b/anchor/eth/Cargo.toml @@ -9,19 +9,18 @@ name = "execution" path = "execution.rs" [dependencies] -alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "contract", "pubsub", -"provider-ws", "rpc-types"] } +alloy = { workspace = true } +base64 = { workspace = true } database = { workspace = true } futures = { workspace = true } +hex = { workspace = true } +openssl = { workspace = true } rand = "0.8.5" ssv_types = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } types = { workspace = true } -openssl = { workspace = true } -hex = "0.4.3" -base64 = { workspace = true } diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs index 74fe99c1f..11bbf8250 100644 --- a/anchor/eth/execution.rs +++ b/anchor/eth/execution.rs @@ -10,7 +10,7 @@ use tracing_subscriber::{fmt, prelude::*, EnvFilter}; #[tokio::main] async fn main() { let filter = EnvFilter::builder() - .parse("debug,hyper=off,hyper_util=off,alloy_transport_http=off,reqwest=off,alloy_rpc_client=off") + .parse("info,hyper=off,hyper_util=off,alloy_transport_http=off,reqwest=off,alloy_rpc_client=off") .expect("filter should be valid"); tracing_subscriber::registry() diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 8c2459f9a..948910de0 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -133,13 +133,13 @@ impl EventProcessor { let data = &data[64..]; let data = String::from_utf8(data.to_vec()).map_err(|e| { - error!(operator_id = ?operator_id, error = %e, "Invalid UTF-8 in public key"); - format!("Invalid UTF-8 in public key: {e}") + error!(operator_id = ?operator_id, error = %e, "Failed to convert to UTF8 String"); + format!("Failed to convert to UTF8 String: {e}") })?; - let public_key_data = data.trim_matches(char::from(0)).to_string(); + let data = data.trim_matches(char::from(0)).to_string(); // Construct the Operator and insert it into the database - let operator = Operator::new(&public_key_data, operator_id, owner).map_err(|e| { + let operator = Operator::new(&data, operator_id, owner).map_err(|e| { error!( operator_pubkey = ?publicKey, operator_id = ?operator_id, @@ -171,6 +171,17 @@ impl EventProcessor { fn process_operator_removed(&self, log: &Log) -> Result<(), String> { let SSVContract::OperatorRemoved { operatorId } = SSVContract::OperatorRemoved::decode_from_log(log)?; + let operator_id = OperatorId(operatorId); + debug!(operator_id = ?operator_id, "Processing operator removed"); + + self.db.delete_operator(operator_id).map_err(|e| { + error!( + operator_id = ?operator_id, + error = %e, + "Failed to remove operator" + ); + format!("Failed to remove operator: {e}") + })?; info!(operator_id = ?operatorId, "Operator removed from network"); Ok(()) @@ -257,7 +268,7 @@ impl EventProcessor { info!( cluster_id = ?cluster_id, validator_pubkey = %validator_pubkey, - "Successfully added validator and cluster" + "Successfully added validator" ); Ok(()) } @@ -294,13 +305,7 @@ impl EventProcessor { "Processing validator removal" ); - let metadata = match self - .db - .state - .multi_state - .validator_metadata - .get_by(&validator_pubkey) - { + let metadata = match self.db.metadata().get_by(&validator_pubkey) { Some(data) => data, None => { error!( @@ -311,7 +316,7 @@ impl EventProcessor { } }; - let cluster = match self.db.state.multi_state.clusters.get_by(&validator_pubkey) { + let cluster = match self.db.clusters().get_by(&validator_pubkey) { Some(data) => data, None => { error!( @@ -440,6 +445,16 @@ impl EventProcessor { recipientAddress, } = SSVContract::FeeRecipientAddressUpdated::decode_from_log(log)?; let _ = self.db.update_fee_recipient(owner, recipientAddress); + self.db + .update_fee_recipient(owner, recipientAddress) + .map_err(|e| { + error!( + owner = ?owner, + error = %e, + "Failed to update fee recipient" + ); + format!("Failed to update fee recipient: {e}") + })?; info!( owner = ?owner, new_recipient = ?recipientAddress, diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 18084e6ba..0e230c4b1 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -153,6 +153,7 @@ impl SsvEventSyncer { info!("Starting live sync"); self.live_sync(contract_address).await?; + // todo!(): should this spawn long running task and return or should the event processor // just be spawned in its own task? todo!() @@ -174,8 +175,6 @@ impl SsvEventSyncer { format!("Unable to fetch block number {}", e) })?; - let current_block = 400_000; - // Basic verification if current_block < FOLLOW_DISTANCE { debug!("Current block less than follow distance, breaking"); @@ -293,6 +292,11 @@ impl SsvEventSyncer { // actions #[instrument(skip(self, contract_address))] async fn live_sync(&mut self, contract_address: Address) -> Result<(), String> { + info!("Network up to sync.."); + info!("Current state"); + info!("{} Operators", self.event_processor.db.num_operators()); + info!("{} Clusters", self.event_processor.db.clusters().length()); + info!("{} Validators", self.event_processor.db.metadata().length()); info!(?contract_address, "Starting live sync"); loop { diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 454969756..c2914e10b 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -184,7 +184,9 @@ mod eth_util_tests { let share_data = "ab6c91297d2a604d2fc301ad161f99a16baa53e549fd1822acf0f6834450103555b03281d23d0ab7ee944d564f794e040ecd60ad9894747cc6b55ef017876079c1d6aa48595a1791cefc73aa6781c5e26bc644d515e9e9c5bbc8d2b5b173569ba547ba1edf393778d17ad13f2bc8c9b5c2e17b563998a2307b6dddda4d7c6ed3a7f261137fd9c2a81bb1ad1fea6896a8b9719027f01c9b496cf7ade5972e96c94e523e2671662bcfc80d5b6672877de39803d10251d7ecb76794252dea94aa348143c62887bcd62cfb680326c786e22b6a558895f037854e0a70019360c129a788fafe48c18374382cd97a4ea5797bcf982526e76eb89d132e5547f43e9ae9fdf64e061d2f5fcb5bd5ff1de8e7722b53730c6c6a1cc31791fceaabe2e5d79944a7c0d4459ec10153075996e9ef62e4fa9da730873652820c32476c1ddfd10a7b322e67e78759ed9cdec042a09069efc363778f620b3e5ffe01cb1a45bb278768f44342c45736b3a5ccdfbf10b0a10ed26a36af787363398dd776aea98d131738a881739b7e0ee4aa5e280355e2d2254f444ade07c239f5f6870fac2143de480e6ff5e3954d6e441fd16132296960b523bd23fa7b52e357ed03f8201ed4c9b4ed486a66c818e319418c8e34d844b3812f75a74a1607c9bb0eda11c89dbd67858730076e17ed3f6d021c2e57e94e9c3d53e1f6a9c7c2d8373fd5e3340e3a14951e97b7baa5fc1825ba59bb3990f1c607d22756fd178f1a0674d47ee476633f27e961ec3a79b236fb20f863814b47fb9eee75fdbdab99b6901087c41dd31d5320ac3e3c772a8982c64b1c138cbfb968e8a6e59f027bcc53adf2f4f171cbdc6f576dbf313b11485400356865f1f2b0b0533e576d7e3487d5d7d85e8d57aeab4314ec1e49f7647b3eea9a7f1fb805cb944b175c39a2668f96d4cd97afd3dc1258cbaccde6dc5e4b48d4bfd783396505e6f083c5cb3af9e24e90f1eac03f8e8cbc2664b9e6dc81543a1a68973bb03e84f50338ed6c1247447d3a3acef69879900fa9596492cce31130668621f038f365b8b4b1946c95e41e652d868421e574850f5b0b6befb481c93be55c3f9a90f613823942fbd71354ad8202b0121885a0da475d551a86da0c7a983b4d7b403d91adf275b3348fd09b797ccb6be7ebb96efe024588d2f8105e3b7ec5e6cbefd3bb287c82f717597244ea36df07753f0dcc4ce64570fff04447a96cb9f80c6359306c5e45a42e8bbaeb3de9e2ba37aeeed85bcaeb6c61f77c9d26dd4ca853ca09ea8e2e61c675b250c7c6c6c29d7829b3534e0749b9e69b67de569b21f6f0f9a46698b30aad615800aa26ae3629f4b91dfbc3d12cf6b61ed47846b0c0522db60ac41bfc3c4e233bd098180d0257310d58099592d0a5a87e4c6704b64683ee1c746f2a659a01939fbc2b72d196f94452a2b32fa945d1be80a76ba64061bdb73aa23fb83b9e96af949a13e3407a3b37529e79a79814eb172afe4ff56af68417a4191ede4c5c8521ca36c41c0f9e45a960bd32c8a14cb54442e27abf8cf96089736e14340eb017cadf640dbd30014f1802ba6c686e9039f6e5509384a5bfb3f82bef56a4db9778add48a7384d6e25357842a3c591c611908083d420c6e77699793dbf0f1cc597137b48933246c7f5693098a3218312c4ae030dd74b4291e3e1f95702c7f66c22dba7a8ac634e200534c1b6b9c6397c415ab1c448c4eb6481d35250dd83c599cdc05b6e222a4543147e289cf611755dbb1f0968a61c3741a7347db1599b9c4b71e39d4921c7b3bbe018a6a766c7c26fd31e77eb9b727a6a9ca1d72a44317a54e43004f4f42dd5731ed3e83248bc2d5ccef"; let share_data = hex::decode(share_data).expect("Failed to decode hex string"); + let cluster_id = ClusterId([0u8; 32]); + let operators = vec![OperatorId(1), OperatorId(2), OperatorId(3), OperatorId(4)]; - //assert!(parse_shares(share_data, &operators).is_ok()); + assert!(parse_shares(share_data, &operators, &cluster_id).is_ok()); } } From 1665cd0068ca689c5ee204f8c83e1fd4568ae02b Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 24 Dec 2024 16:58:15 +0000 Subject: [PATCH 24/49] network action parsing --- anchor/eth/src/event_processor.rs | 2 +- anchor/eth/src/network_actions.rs | 22 +++++++++++++--------- anchor/eth/src/util.rs | 4 +++- 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 948910de0..f4d7fe114 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -228,7 +228,7 @@ impl EventProcessor { // Parse the share byte stream into a list of valid Shares and then verify the signature debug!(cluster_id = ?cluster_id, "Parsing and verifying shares"); - let (signature, shares) = parse_shares(shares.to_vec(), &operator_ids, &cluster_id) + let (signature, shares) = parse_shares(shares.to_vec(), &operator_ids, &cluster_id, &validator_pubkey) .map_err(|e| { error!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); format!("Failed to parse shares: {e}") diff --git a/anchor/eth/src/network_actions.rs b/anchor/eth/src/network_actions.rs index 64fa6a4df..2af251628 100644 --- a/anchor/eth/src/network_actions.rs +++ b/anchor/eth/src/network_actions.rs @@ -3,11 +3,13 @@ use super::gen::SSVContract; use alloy::primitives::Address; use alloy::{rpc::types::Log, sol_types::SolEvent}; use ssv_types::OperatorId; +use std::str::FromStr; +use types::PublicKey; #[derive(Debug, PartialEq)] pub enum NetworkAction { StopValidator { - //pubkey: PublicKey, + validator_pubkey: PublicKey, }, LiquidateCluster { owner: Address, @@ -22,10 +24,7 @@ pub enum NetworkAction { recipient: Address, }, ExitValidator { - //pubkey: PublicKey, - //block_number: u64, - //validator_index: u64, - //own_validator: bool, + validator_pubkey: PublicKey, }, NoOp, } @@ -37,9 +36,11 @@ impl TryFrom<&Log> for NetworkAction { let topic0 = source.topic0().expect("The log should have a topic0"); match *topic0 { SSVContract::ValidatorRemoved::SIGNATURE_HASH => { - let _validator_removed_log = + let SSVContract::ValidatorRemoved { publicKey, .. } = SSVContract::ValidatorRemoved::decode_from_log(source)?; - Ok(NetworkAction::StopValidator {}) + let validator_pubkey = PublicKey::from_str(&publicKey.to_string()) + .map_err(|e| format!("Failed to create PublicKey: {e}"))?; + Ok(NetworkAction::StopValidator { validator_pubkey }) } SSVContract::ClusterLiquidated::SIGNATURE_HASH => { let SSVContract::ClusterLiquidated { @@ -68,8 +69,11 @@ impl TryFrom<&Log> for NetworkAction { }) } SSVContract::ValidatorExited::SIGNATURE_HASH => { - let _validator_exited_log = SSVContract::ValidatorExited::decode_from_log(source)?; - Ok(NetworkAction::ExitValidator {}) + let SSVContract::ValidatorExited { publicKey, .. } = + SSVContract::ValidatorExited::decode_from_log(source)?; + let validator_pubkey = PublicKey::from_str(&publicKey.to_string()) + .map_err(|e| format!("Failed to create PublicKey: {e}"))?; + Ok(NetworkAction::ExitValidator { validator_pubkey }) } _ => Ok(NetworkAction::NoOp), } diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index c2914e10b..279de218a 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -20,6 +20,7 @@ pub fn parse_shares( shares: Vec, operator_ids: &[OperatorId], cluster_id: &ClusterId, + validator_pubkey: &PublicKey, ) -> Result<(Vec, Vec), String> { let operator_count = operator_ids.len(); @@ -65,6 +66,7 @@ pub fn parse_shares( .map_err(|_| "Encrypted key has wrong length".to_string())?; Ok(Share { + validator_pubkey: validator_pubkey.clone(), operator_id: *operator_id, cluster_id: *cluster_id, share_pubkey, @@ -187,6 +189,6 @@ mod eth_util_tests { let cluster_id = ClusterId([0u8; 32]); let operators = vec![OperatorId(1), OperatorId(2), OperatorId(3), OperatorId(4)]; - assert!(parse_shares(share_data, &operators, &cluster_id).is_ok()); + //assert!(parse_shares(share_data, &operators, &cluster_id, ).is_ok()); } } From 01d1e3865bb8d3cf037cb5f529d3128cf10d702e Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 6 Jan 2025 15:26:30 +0000 Subject: [PATCH 25/49] test cases, cleanup, beacon client for metadata --- anchor/eth/Cargo.toml | 2 + anchor/eth/execution.rs | 9 ++- anchor/eth/src/event_parser.rs | 2 +- anchor/eth/src/event_processor.rs | 126 ++++++++++++++++++------------ anchor/eth/src/sync.rs | 3 +- anchor/eth/src/util.rs | 106 ++++++++++++++++++++++--- 6 files changed, 186 insertions(+), 62 deletions(-) diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml index ad39870b9..eea96825c 100644 --- a/anchor/eth/Cargo.toml +++ b/anchor/eth/Cargo.toml @@ -21,6 +21,8 @@ tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } types = { workspace = true } +reqwest = { workspace = true } +serde = { workspace = true } diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs index 11bbf8250..ebf2a49d3 100644 --- a/anchor/eth/execution.rs +++ b/anchor/eth/execution.rs @@ -10,8 +10,8 @@ use tracing_subscriber::{fmt, prelude::*, EnvFilter}; #[tokio::main] async fn main() { let filter = EnvFilter::builder() - .parse("info,hyper=off,hyper_util=off,alloy_transport_http=off,reqwest=off,alloy_rpc_client=off") - .expect("filter should be valid"); + .parse("info,hyper=off,hyper_util=off,alloy_transport_http=off,reqwest=off,alloy_rpc_client=off") + .expect("filter should be valid"); tracing_subscriber::registry() .with(fmt::layer()) @@ -21,12 +21,15 @@ async fn main() { let _guard = span.enter(); let rpc_endpoint = "http://127.0.0.1:8545"; + //let rpc_endpoint = "https://colo.sigp-dev.net/mainnet-ee/Nae2OmaelooG/"; let ws_endpoint = "ws://127.0.0.1:8546"; + let beacon_endpoint = "http://127.0.0.1:5052"; let config = Config { http_url: String::from(rpc_endpoint), ws_url: String::from(ws_endpoint), - network: Network::Holesky, + beacon_url: String::from(beacon_endpoint), + network: Network::Mainnet, }; let path = Path::new("db.sqlite"); diff --git a/anchor/eth/src/event_parser.rs b/anchor/eth/src/event_parser.rs index d46bc813d..c746d0b38 100644 --- a/anchor/eth/src/event_parser.rs +++ b/anchor/eth/src/event_parser.rs @@ -1,7 +1,7 @@ use super::gen::SSVContract; use alloy::{rpc::types::Log, sol_types::SolEvent}; -// Standardized event decoding via common Decoder trait. Reduces common boilerplate +// Standardized event decoding via common Decoder trait. pub trait EventDecoder { type Output; fn decode_from_log(log: &Log) -> Result; diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index f4d7fe114..cb42f2ccd 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -6,28 +6,37 @@ use alloy::primitives::B256; use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; use database::{NetworkDatabase, UniqueIndex}; -use ssv_types::{Cluster, Operator, OperatorId}; +use reqwest::Client; +use ssv_types::{Cluster, Operator, OperatorId, ValidatorIndex}; use std::collections::{HashMap, HashSet}; use std::str::FromStr; use std::sync::Arc; use tracing::{debug, error, info, instrument, trace, warn}; use types::PublicKey; -// Handler for a log +// Specific Handler for a log type type EventHandler = fn(&EventProcessor, &Log) -> Result<(), String>; /// Event Processor pub struct EventProcessor { /// Function handlers for event processing handlers: HashMap, - // Reference to the database + /// Reference to the database pub db: Arc, + /// Client to interact with the beacon chain + pub beacon_client: BeaconClient, +} + +/// Http client to fetch metadata from the beacon chain +pub(crate) struct BeaconClient { + pub client: Client, + pub base_url: String, } impl EventProcessor { /// Construct a new EventProcessor - pub fn new(db: Arc) -> Self { - // register log handlers for easy dispatch + pub fn new(db: Arc, beacon_url: &String) -> Self { + // Register log handlers for easy dispatch let mut handlers: HashMap = HashMap::new(); handlers.insert( SSVContract::OperatorAdded::SIGNATURE_HASH, @@ -62,7 +71,11 @@ impl EventProcessor { Self::process_validator_exited, ); - Self { handlers, db } + Self { + handlers, + db, + beacon_client: BeaconClient::new(beacon_url), + } } /// Process a new set of logs @@ -72,23 +85,27 @@ impl EventProcessor { for (index, log) in logs.iter().enumerate() { trace!(log_index = index, topic = ?log.topic0(), "Processing individual log"); + // extract the topic0 to retrieve log handler let topic0 = log.topic0().ok_or_else(|| { error!("Log missing topic0"); "Log missing topic0".to_string() })?; - let handler = self.handlers.get(topic0).ok_or_else(|| { error!(topic = ?topic0, "No handler found for topic"); "No handler found for topic".to_string() })?; - // todo!() determine how we should handle errors + // todo!() some way to gracefully handle errors? let _ = handler(self, log); - let action: NetworkAction = log.try_into()?; - if action != NetworkAction::NoOp && live { - debug!(action = ?action, "Network action ready for processing processing"); - // todo!() send off somewhere + // If live is true, then we are currently in a live sync and want to take some action in + // response to the log. Parse the log into a network action and send to be processed; + if live { + let action: NetworkAction = log.try_into()?; + if action != NetworkAction::NoOp && live { + debug!(action = ?action, "Network action ready for processing"); + // todo!() send off somewhere + } } } @@ -108,7 +125,7 @@ impl EventProcessor { } = SSVContract::OperatorAdded::decode_from_log(log)?; let operator_id = OperatorId(operatorId); - debug!(operator_id = ?operator_id, owner = ?owner, "Processing operator registration"); + debug!(operator_id = ?operator_id, owner = ?owner, "Processing operator added"); // Confirm that this operator does not already exist if self.db.operator_exists(&operator_id) { @@ -116,21 +133,21 @@ impl EventProcessor { return Err(String::from("Operator already exists in database")); } - // Parse ABI encoded public key string and trim off 0x prefix + // Parse ABI encoded public key string and trim off 0x prefix for hex decoding let public_key_str = publicKey.to_string(); let public_key_str = public_key_str.trim_start_matches("0x"); - let data = hex::decode(public_key_str).map_err(|e| { - error!(operator_id = ?operator_id, error = %e, "Failed to decode public key hex"); - format!("Failed to decode public key hex: {e}") + error!(operator_id = ?operator_id, error = %e, "Failed to decode public key data from hex"); + format!("Failed to decode public key data from hex: {e}") })?; // Make sure the data is the expected length if data.len() != 704 { - error!(operator_id = ?operator_id, "Invalid data length"); - return Err(String::from("Invalid data length")); + error!(operator_id = ?operator_id, expected = 704, actual = data.len(), "Invalid public key data length"); + return Err(String::from("Invalid public key data length")); } + // Remove abi encoding information and then convert to valid utf8 string let data = &data[64..]; let data = String::from_utf8(data.to_vec()).map_err(|e| { error!(operator_id = ?operator_id, error = %e, "Failed to convert to UTF8 String"); @@ -148,7 +165,6 @@ impl EventProcessor { ); format!("Failed to construct operator: {e}") })?; - self.db.insert_operator(&operator).map_err(|e| { error!( operator_id = ?operator_id, @@ -169,11 +185,13 @@ impl EventProcessor { // An Operator has been removed from the network #[instrument(skip(self, log), fields(operator_id))] fn process_operator_removed(&self, log: &Log) -> Result<(), String> { + // Extract the ID of the Operator let SSVContract::OperatorRemoved { operatorId } = SSVContract::OperatorRemoved::decode_from_log(log)?; let operator_id = OperatorId(operatorId); debug!(operator_id = ?operator_id, "Processing operator removed"); + // Delete the operator from database and in memory. Will handle existence check self.db.delete_operator(operator_id).map_err(|e| { error!( operator_id = ?operator_id, @@ -187,11 +205,13 @@ impl EventProcessor { Ok(()) } - // A new validator has entered the network. This means that a new cluster has formed and this - // operator is a potential member in the cluster. Perform verification, store all data, and - // extract the key if one belongs to us. + // A new validator has entered the network. This means that a either a new cluster has formed + // and this is the first validator for the cluster, or this validator is joining an existing + // cluster. Perform data verification, store all relevant data, and extract the KeyShare if it + // belongs to this operator #[instrument(skip(self, log), fields(validator_pubkey, cluster_id, owner))] fn process_validator_added(&self, log: &Log) -> Result<(), String> { + // Parse and destructure log let SSVContract::ValidatorAdded { owner, operatorIds, @@ -202,12 +222,15 @@ impl EventProcessor { debug!(owner = ?owner, operator_count = operatorIds.len(), "Processing validator addition"); + // Get the index of the validator + //let index = self.beacon_client.get_validator_index(&publicKey.to_string()); + // Process data into a usable form let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { error!( validator_pubkey = %publicKey, error = %e, - "Failed to construct validator pubkey" + "Failed to create PublicKey" ); format!("Failed to create PublicKey: {e}") })?; @@ -222,17 +245,22 @@ impl EventProcessor { debug!(cluster_id = ?cluster_id, "Validating operators"); validate_operators(&operator_ids)?; if operator_ids.iter().any(|id| !self.db.operator_exists(id)) { - error!(cluster_id = ?cluster_id, "One or more operators do not exist in database"); + error!(cluster_id = ?cluster_id, "One or more operators do not exist"); return Err("One or more operators do not exist".to_string()); } // Parse the share byte stream into a list of valid Shares and then verify the signature debug!(cluster_id = ?cluster_id, "Parsing and verifying shares"); - let (signature, shares) = parse_shares(shares.to_vec(), &operator_ids, &cluster_id, &validator_pubkey) - .map_err(|e| { - error!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); - format!("Failed to parse shares: {e}") - })?; + let (signature, shares) = parse_shares( + shares.to_vec(), + &operator_ids, + &cluster_id, + &validator_pubkey, + ) + .map_err(|e| { + error!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); + format!("Failed to parse shares: {e}") + })?; if !verify_signature(signature) { error!(cluster_id = ?cluster_id, "Signature verification failed"); @@ -240,14 +268,17 @@ impl EventProcessor { } // fetch the validator metadata - // todo!() need to hook up to beacon api - let validator_metadata = - fetch_validator_metadata(&validator_pubkey, &cluster_id).map_err(|e| { - error!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); - format!("Failed to fetch validator metadata: {e}") - })?; + let validator_metadata = fetch_validator_metadata( + &validator_pubkey, + /* ValidatorIndex(index), */ + &cluster_id, + ) + .map_err(|e| { + error!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); + format!("Failed to fetch validator metadata: {e}") + })?; - // Construct the cluster + // Finally, construct and insert the full cluster and insert into the database let cluster = Cluster { cluster_id, owner, @@ -256,13 +287,11 @@ impl EventProcessor { liquidated: false, cluster_members: HashSet::from_iter(operator_ids), }; - - // Finally, construct and insert the full cluster and insert into the database self.db - .insert_validator(cluster, validator_metadata, shares) + .insert_validator(cluster, validator_metadata.clone(), shares) .map_err(|e| { - error!(cluster_id = ?cluster_id, error = %e, "Failed to insert cluster"); - format!("Failed to insert cluster: {e}") + error!(cluster_id = ?cluster_id, error = %e, validator_metadata = ?validator_metadata.public_key, "Failed to insert validator into cluster"); + format!("Failed to insert validator into cluster: {e}") })?; info!( @@ -273,10 +302,10 @@ impl EventProcessor { Ok(()) } - // A validator has been removed from the network. Since this validator is no long in the - // network, the cluster that was responsible for it must be cleaned up + // A validator has been removed from the network and its respective cluster #[instrument(skip(self, log), fields(cluster_id, validator_pubkey, owner))] fn process_validator_removed(&self, log: &Log) -> Result<(), String> { + // Parse and destructure log let SSVContract::ValidatorRemoved { owner, operatorIds, @@ -291,7 +320,7 @@ impl EventProcessor { error!( validator_pubkey = %publicKey, error = %e, - "Failed to construct validator pubkey" + "Failed to construct validator pubkey in removal" ); format!("Failed to create PublicKey: {e}") })?; @@ -315,7 +344,6 @@ impl EventProcessor { return Err("Failed to fetch validator metadata from database".to_string()); } }; - let cluster = match self.db.clusters().get_by(&validator_pubkey) { Some(data) => data, None => { @@ -358,7 +386,7 @@ impl EventProcessor { // todo!(): Remove it from the internal keystore } - // remove the validator and all corresponding cluster data if needed + // Remove the validator and all corresponding cluster data self.db.delete_validator(&validator_pubkey).map_err(|e| { error!( cluster_id = ?cluster_id, @@ -390,6 +418,7 @@ impl EventProcessor { debug!(cluster_id = ?cluster_id, "Processing cluster liquidation"); + // Update the status of the cluster to be liquidated self.db.update_status(cluster_id, true).map_err(|e| { error!( cluster_id = ?cluster_id, @@ -407,7 +436,7 @@ impl EventProcessor { Ok(()) } - // A cluster that was previously liquidated has had more SSV deposited + // A cluster that was previously liquidated has had more SSV deposited and is now active #[instrument(skip(self, log), fields(cluster_id, owner))] fn process_cluster_reactivated(&self, log: &Log) -> Result<(), String> { let SSVContract::ClusterReactivated { @@ -420,6 +449,7 @@ impl EventProcessor { debug!(cluster_id = ?cluster_id, "Processing cluster reactivation"); + // Update the status of the cluster to be active self.db.update_status(cluster_id, false).map_err(|e| { error!( cluster_id = ?cluster_id, diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 0e230c4b1..bcbbc7b94 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -84,6 +84,7 @@ pub enum Network { pub struct Config { pub http_url: String, pub ws_url: String, + pub beacon_url: String, pub network: Network, } @@ -121,7 +122,7 @@ impl SsvEventSyncer { .map_err(|e| format!("Failed to bind to WS: {}, {}", &config.ws_url, e))?; // Construct an EventProcessor with access to the DB - let event_processor = EventProcessor::new(db); + let event_processor = EventProcessor::new(db, &config.beacon_url); Ok(Self { rpc_client, diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 279de218a..f75641b03 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -1,10 +1,16 @@ use super::sync::MAX_OPERATORS; use alloy::primitives::{keccak256, Address}; +use rand::Rng; +use reqwest::Client; +use serde::Deserialize; use ssv_types::Share; use ssv_types::{ClusterId, OperatorId, ValidatorIndex, ValidatorMetadata}; use std::collections::HashSet; use std::str::FromStr; -use types::PublicKey; +use types::{Graffiti, PublicKey}; + +use crate::event_processor::BeaconClient; +use std::time::Duration; // phase0.SignatureLength const SIGNATURE_LENGTH: usize = 96; @@ -13,6 +19,60 @@ const PUBLIC_KEY_LENGTH: usize = 48; // Length of an encrypted key const ENCRYPTED_KEY_LENGTH: usize = 256; // Leng +// Api endpoint to fetch index of a validator +const INDEX_ENDPOINT: &str = "/eth/v1/beacon/states/head/validators/"; + +#[derive(Deserialize, Default)] +struct ValidatorResponse { + data: ValidatorInfo, +} + +#[derive(Deserialize, Default)] +struct ValidatorInfo { + index: String, +} + +impl BeaconClient { + // Initialize a new client with default settings + pub fn new(base_url: &str) -> Self { + // Configure the client with reasonable defaults + let client = Client::builder() + .timeout(Duration::from_secs(10)) + .connect_timeout(Duration::from_secs(5)) + .build() + .expect("Failed to create HTTP client"); + + BeaconClient { + client, + base_url: base_url.to_string(), + } + } + + // Method to get validator information + pub async fn get_validator_index(&self, pubkey: &str) -> usize { + // Combine base URL with the specific validator endpoint + let url = format!( + "{}/eth/v1/beacon/states/head/validators/{}", + self.base_url, pubkey + ); + + // Handle the request's Result explicitly since Response doesn't implement Default + let response = match self.client.get(&url).send().await { + Ok(resp) => resp, + Err(_) => return 0, + }; + + // Then handle JSON parsing, using default if it fails + let validator_response = response + .json::() + .await + .unwrap_or_default(); + + // Finally parse the index to usize, defaulting to 0 if it fails + validator_response.data.index.parse().unwrap_or(0) + } +} + // Parses shares from a ValidatorAdded event // Event contains a bytes stream of the form // [signature | public keys | encrypted keys]. @@ -90,13 +150,15 @@ pub fn fetch_validator_metadata( public_key: &PublicKey, cluster_id: &ClusterId, ) -> Result { - // todo!() fetch this from the chain - use rand::Rng; - use types::Graffiti; + // Default Anchor-SSV Graffiti + let mut bytes = [0u8; 32]; + bytes[..10].copy_from_slice(b"Anchor-SSV"); + + Ok(ValidatorMetadata { - index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), + index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), // fetch from chain? public_key: public_key.clone(), - graffiti: Graffiti::default(), + graffiti: Graffiti::from(bytes), cluster_id: *cluster_id, }) } @@ -169,8 +231,10 @@ pub fn compute_cluster_id(owner: Address, mut operator_ids: Vec) -> Cluster #[cfg(test)] mod eth_util_tests { use super::*; + use alloy::primitives::address; #[test] + // Test to make sure cluster id computation is order independent fn test_cluster_id() { let owner = Address::random(); let operator_ids = vec![1, 3, 4, 2]; @@ -182,13 +246,37 @@ mod eth_util_tests { } #[test] + // Test to make sure a ClusterID matches a current onchain value + fn test_valid_cluster_id() { + // https://ssvscan.io/cluster/a3d1e25b31cb6da1b9636568a221b0d7ae1a57a7f14ace5c97d1093ebf6b786c + let onchain = "a3d1e25b31cb6da1b9636568a221b0d7ae1a57a7f14ace5c97d1093ebf6b786c"; + let operator_ids = vec![62, 256, 259, 282]; + let owner = address!("E1b2308852F0e85D9F23278A6A80131ac8901dBF"); + let cluster_id = compute_cluster_id(owner, operator_ids); + let cluster_id_hex = hex::encode(*cluster_id); + assert_eq!(onchain, cluster_id_hex); + } + + #[test] + // Test to make sure we can fetch the index of a validator + fn test_fetch_index() {} + + // Test to make sure we can properly verify signatures + #[test] + fn test_sig_verification() {} + + #[test] + // Ensure that we can properly parse share data into a set of shares fn test_parse_shares() { + // Onchain share data and some other corresponding info let share_data = "ab6c91297d2a604d2fc301ad161f99a16baa53e549fd1822acf0f6834450103555b03281d23d0ab7ee944d564f794e040ecd60ad9894747cc6b55ef017876079c1d6aa48595a1791cefc73aa6781c5e26bc644d515e9e9c5bbc8d2b5b173569ba547ba1edf393778d17ad13f2bc8c9b5c2e17b563998a2307b6dddda4d7c6ed3a7f261137fd9c2a81bb1ad1fea6896a8b9719027f01c9b496cf7ade5972e96c94e523e2671662bcfc80d5b6672877de39803d10251d7ecb76794252dea94aa348143c62887bcd62cfb680326c786e22b6a558895f037854e0a70019360c129a788fafe48c18374382cd97a4ea5797bcf982526e76eb89d132e5547f43e9ae9fdf64e061d2f5fcb5bd5ff1de8e7722b53730c6c6a1cc31791fceaabe2e5d79944a7c0d4459ec10153075996e9ef62e4fa9da730873652820c32476c1ddfd10a7b322e67e78759ed9cdec042a09069efc363778f620b3e5ffe01cb1a45bb278768f44342c45736b3a5ccdfbf10b0a10ed26a36af787363398dd776aea98d131738a881739b7e0ee4aa5e280355e2d2254f444ade07c239f5f6870fac2143de480e6ff5e3954d6e441fd16132296960b523bd23fa7b52e357ed03f8201ed4c9b4ed486a66c818e319418c8e34d844b3812f75a74a1607c9bb0eda11c89dbd67858730076e17ed3f6d021c2e57e94e9c3d53e1f6a9c7c2d8373fd5e3340e3a14951e97b7baa5fc1825ba59bb3990f1c607d22756fd178f1a0674d47ee476633f27e961ec3a79b236fb20f863814b47fb9eee75fdbdab99b6901087c41dd31d5320ac3e3c772a8982c64b1c138cbfb968e8a6e59f027bcc53adf2f4f171cbdc6f576dbf313b11485400356865f1f2b0b0533e576d7e3487d5d7d85e8d57aeab4314ec1e49f7647b3eea9a7f1fb805cb944b175c39a2668f96d4cd97afd3dc1258cbaccde6dc5e4b48d4bfd783396505e6f083c5cb3af9e24e90f1eac03f8e8cbc2664b9e6dc81543a1a68973bb03e84f50338ed6c1247447d3a3acef69879900fa9596492cce31130668621f038f365b8b4b1946c95e41e652d868421e574850f5b0b6befb481c93be55c3f9a90f613823942fbd71354ad8202b0121885a0da475d551a86da0c7a983b4d7b403d91adf275b3348fd09b797ccb6be7ebb96efe024588d2f8105e3b7ec5e6cbefd3bb287c82f717597244ea36df07753f0dcc4ce64570fff04447a96cb9f80c6359306c5e45a42e8bbaeb3de9e2ba37aeeed85bcaeb6c61f77c9d26dd4ca853ca09ea8e2e61c675b250c7c6c6c29d7829b3534e0749b9e69b67de569b21f6f0f9a46698b30aad615800aa26ae3629f4b91dfbc3d12cf6b61ed47846b0c0522db60ac41bfc3c4e233bd098180d0257310d58099592d0a5a87e4c6704b64683ee1c746f2a659a01939fbc2b72d196f94452a2b32fa945d1be80a76ba64061bdb73aa23fb83b9e96af949a13e3407a3b37529e79a79814eb172afe4ff56af68417a4191ede4c5c8521ca36c41c0f9e45a960bd32c8a14cb54442e27abf8cf96089736e14340eb017cadf640dbd30014f1802ba6c686e9039f6e5509384a5bfb3f82bef56a4db9778add48a7384d6e25357842a3c591c611908083d420c6e77699793dbf0f1cc597137b48933246c7f5693098a3218312c4ae030dd74b4291e3e1f95702c7f66c22dba7a8ac634e200534c1b6b9c6397c415ab1c448c4eb6481d35250dd83c599cdc05b6e222a4543147e289cf611755dbb1f0968a61c3741a7347db1599b9c4b71e39d4921c7b3bbe018a6a766c7c26fd31e77eb9b727a6a9ca1d72a44317a54e43004f4f42dd5731ed3e83248bc2d5ccef"; let share_data = hex::decode(share_data).expect("Failed to decode hex string"); - + let operator_ids = vec![OperatorId(1), OperatorId(2), OperatorId(3), OperatorId(4)]; let cluster_id = ClusterId([0u8; 32]); + let pubkey = PublicKey::from_str("0xb1d97447eeb16cffa0464040860db6f12ac0af6a1583a45f4f07fb61e1470f3733f8b7ec8e3c9ff4a9da83086d342ba1").expect("Failed to create public key"); - let operators = vec![OperatorId(1), OperatorId(2), OperatorId(3), OperatorId(4)]; - //assert!(parse_shares(share_data, &operators, &cluster_id, ).is_ok()); + let (_, shares) = parse_shares(share_data, &operator_ids, &cluster_id, &pubkey) + .expect("Failed to parse shares"); + assert_eq!(shares.len(), 4); } } From 134e2d1172b3b30f30cfc6686e61d4b2d8af2ad3 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 6 Jan 2025 16:33:23 +0000 Subject: [PATCH 26/49] batch block processing --- anchor/eth/execution.rs | 4 +-- anchor/eth/src/sync.rs | 80 +++++++++++++++++++++++++++-------------- 2 files changed, 56 insertions(+), 28 deletions(-) diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs index ebf2a49d3..2e9c46e0e 100644 --- a/anchor/eth/execution.rs +++ b/anchor/eth/execution.rs @@ -20,8 +20,8 @@ async fn main() { let span = info_span!("main"); let _guard = span.enter(); - let rpc_endpoint = "http://127.0.0.1:8545"; //let rpc_endpoint = "https://colo.sigp-dev.net/mainnet-ee/Nae2OmaelooG/"; + let rpc_endpoint = "http://127.0.0.1:8545"; let ws_endpoint = "ws://127.0.0.1:8546"; let beacon_endpoint = "http://127.0.0.1:5052"; @@ -29,7 +29,7 @@ async fn main() { http_url: String::from(rpc_endpoint), ws_url: String::from(ws_endpoint), beacon_url: String::from(beacon_endpoint), - network: Network::Mainnet, + network: Network::Holesky, }; let path = Path::new("db.sqlite"); diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index bcbbc7b94..d7f10d5b7 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -57,6 +57,9 @@ const HOLESKY_DEPLOYMENT_BLOCK: u64 = 181612; /// Batch size for log fetching const BATCH_SIZE: u64 = 10000; +/// Batch size for task groups +const GROUP_SIZE: usize = 50; + /// RPC and WS clients types type RpcClient = RootProvider>; type WsClient = RootProvider; @@ -193,10 +196,15 @@ impl SsvEventSyncer { break; } - info!(start_block, end_block, "Fetching logs for block range"); + // Here, we have a start..endblock that we need to sync the logs from. This range gets + // broken up into individual ranges of BATCH_SIZE where the logs are fetches from. The + // individual ranges are further broken up into a set of batches that are sequentually + // processes. This makes it so we dont have a ton of logs that all have to be processed + // in one pass + // Chunk the start and end block range into a set of ranges of size BATCH_SIZE // and construct a future to fetch the logs in each range - let tasks: Vec<_> = (start_block..=end_block) + let mut tasks: Vec<_> = (start_block..=end_block) .step_by(BATCH_SIZE as usize) .map(|start| { let (start, end) = (start, std::cmp::min(start + BATCH_SIZE - 1, end_block)); @@ -204,39 +212,58 @@ impl SsvEventSyncer { }) .collect(); - // Process batches, also in batches. - // todo!() based on number of logs - - // Await all of the futures. - let event_logs: Vec> = try_join_all(tasks).await?; - let event_logs: Vec = event_logs.into_iter().flatten().collect(); - - // The futures may join out of order block wise. The individual events within the block - // retain their tx ordering. Due to this, we can reassemble back into blocks and be - // confident the order is correct - let mut ordered_event_logs: BTreeMap> = BTreeMap::new(); - for log in event_logs { - let block_num = log.block_number.ok_or("Log is missing block number")?; - ordered_event_logs.entry(block_num).or_default().push(log); + // Further chunk the block ranges into groups where each group covers 500k blocks, so + // there are 50 tasks per group. BATCH_SIZE * 50 = 500k + let mut task_groups = Vec::new(); + while !tasks.is_empty() { + // drain takes elements from the original vector, moving them to a new vector + // take up to chunk_size elements (or whatever is left if less than chunk_size) + let chunk: Vec<_> = tasks.drain(..tasks.len().min(GROUP_SIZE)).collect(); + task_groups.push(chunk); } - let ordered_event_logs: Vec = ordered_event_logs.into_values().flatten().collect(); - // Logs are all fetched from the chain and in order, process them but do not send off to - // be processed since we are just reconstructing state info!( - "Processing events from blocks {} to {}", - start_block, end_block + start_block = start_block, + end_block = end_block, + "Syncing all events" ); - self.event_processor - .process_logs(ordered_event_logs, false)?; + for (index, group) in task_groups.into_iter().enumerate() { + let calculated_start = + start_block + (index as u64 * BATCH_SIZE * GROUP_SIZE as u64); + let calculated_end = calculated_start + (BATCH_SIZE * GROUP_SIZE as u64); + let calculated_end = std::cmp::min(calculated_end, end_block); + info!( + "Fetching blocks for range {}..{}", + calculated_start, calculated_end + ); + + // Await all of the futures. + let event_logs: Vec> = try_join_all(group).await?; + let event_logs: Vec = event_logs.into_iter().flatten().collect(); + + // The futures may join out of order block wise. The individual events within the block + // retain their tx ordering. Due to this, we can reassemble back into blocks and be + // confident the order is correct + let mut ordered_event_logs: BTreeMap> = BTreeMap::new(); + for log in event_logs { + let block_num = log.block_number.ok_or("Log is missing block number")?; + ordered_event_logs.entry(block_num).or_default().push(log); + } + let ordered_event_logs: Vec = + ordered_event_logs.into_values().flatten().collect(); - // update the block we have synced to + // Logs are all fetched from the chain and in order, process them but do not send off to + // be processed since we are just reconstructing state + self.event_processor + .process_logs(ordered_event_logs, false)?; + } + info!("Processed all events up to block {}", end_block); + // update processing information + start_block = end_block + 1; self.event_processor .db .processed_block(end_block) .expect("Failed to update last processed block number"); - - start_block = end_block + 1; } info!("Historical sync completed"); Ok(()) @@ -344,6 +371,7 @@ impl SsvEventSyncer { log_count = logs.len(), "Processing events from block {}", relevant_block ); + // TODO!() add error handling here self.event_processor.process_logs(logs, true)?; self.event_processor .db From c6f6f42a9e198c7841451d08eab413b860d605e8 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 6 Jan 2025 16:58:46 +0000 Subject: [PATCH 27/49] historical sync missing blocks while websocket down --- anchor/eth/src/sync.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index d7f10d5b7..f0e9625f6 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -230,7 +230,7 @@ impl SsvEventSyncer { for (index, group) in task_groups.into_iter().enumerate() { let calculated_start = start_block + (index as u64 * BATCH_SIZE * GROUP_SIZE as u64); - let calculated_end = calculated_start + (BATCH_SIZE * GROUP_SIZE as u64); + let calculated_end = calculated_start + (BATCH_SIZE * GROUP_SIZE as u64) - 1; let calculated_end = std::cmp::min(calculated_end, end_block); info!( "Fetching blocks for range {}..{}", @@ -345,7 +345,9 @@ impl SsvEventSyncer { if let Ok(ws_client) = ProviderBuilder::new().on_ws(ws).await { info!("Successfully reconnected to websocket. Catching back up"); self.ws_client = ws_client; - // Todo!() historical sync any missed blocks + // Historical sync any missed blocks while down, can pass 0 as deployment + // block since it will use last_processed_block from DB anyways + self.historical_sync(contract_address, 0).await?; } else { tokio::time::sleep(Duration::from_secs(1)).await; } From db751d1f3690cfcbd5f9304a9a7733a5aa7a2f8c Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 6 Jan 2025 17:56:44 +0000 Subject: [PATCH 28/49] signature verification --- anchor/eth/src/event_processor.rs | 8 +++++--- anchor/eth/src/util.rs | 22 +++++++++++++++++----- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index cb42f2ccd..18c2717a6 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -237,8 +237,10 @@ impl EventProcessor { let cluster_id = compute_cluster_id(owner, operatorIds.clone()); let operator_ids: Vec = operatorIds.iter().map(|id| OperatorId(*id)).collect(); - // Get expected nonce and and increment it. Wont the network handle this? What does it have - // to do with the database + // Get the expected nonce, and then increment it + let nonce = 10; + // let nonce = self.db.get_nonce(owner); + // self.db.bump_nonce(owner); // Perform verification on the operator set and make sure they are all registered in the // network @@ -262,7 +264,7 @@ impl EventProcessor { format!("Failed to parse shares: {e}") })?; - if !verify_signature(signature) { + if !verify_signature(signature, nonce, &owner, &validator_pubkey) { error!(cluster_id = ?cluster_id, "Signature verification failed"); return Err("Signature verification failed".to_string()); } diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index f75641b03..5de8552b6 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -1,4 +1,5 @@ use super::sync::MAX_OPERATORS; +use crate::event_processor::BeaconClient; use alloy::primitives::{keccak256, Address}; use rand::Rng; use reqwest::Client; @@ -7,9 +8,8 @@ use ssv_types::Share; use ssv_types::{ClusterId, OperatorId, ValidatorIndex, ValidatorMetadata}; use std::collections::HashSet; use std::str::FromStr; -use types::{Graffiti, PublicKey}; +use types::{Graffiti, Hash256, PublicKey, Signature}; -use crate::event_processor::BeaconClient; use std::time::Duration; // phase0.SignatureLength @@ -154,7 +154,6 @@ pub fn fetch_validator_metadata( let mut bytes = [0u8; 32]; bytes[..10].copy_from_slice(b"Anchor-SSV"); - Ok(ValidatorMetadata { index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), // fetch from chain? public_key: public_key.clone(), @@ -164,8 +163,21 @@ pub fn fetch_validator_metadata( } // Verify that the signature over the share data is correct -pub fn verify_signature(_signature: Vec) -> bool { - true +pub fn verify_signature( + signature: Vec, + nonce: u16, + owner: &Address, + public_key: &PublicKey, +) -> bool { + // Hash the owner and nonce concatinated + let data = format!("{}:{}", owner, nonce); + let hash = keccak256(data); + + // Deserialize the signature + let signature = Signature::deserialize(&signature).expect("Failed to deserialize signature"); + + // Verify the signature against the message + signature.verify(public_key, hash) } // Perform basic verification on the operator set From 788deea24ba52e683c68e966c805ac0b4a4383f9 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 6 Jan 2025 22:27:55 +0000 Subject: [PATCH 29/49] integrate nonce and test sig verification --- anchor/eth/src/event_processor.rs | 11 +++++++---- anchor/eth/src/sync.rs | 2 +- anchor/eth/src/util.rs | 21 +++++++++++++++------ 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 18c2717a6..90fc19237 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -238,9 +238,11 @@ impl EventProcessor { let operator_ids: Vec = operatorIds.iter().map(|id| OperatorId(*id)).collect(); // Get the expected nonce, and then increment it - let nonce = 10; - // let nonce = self.db.get_nonce(owner); - // self.db.bump_nonce(owner); + let nonce = self.db.get_nonce(&owner); + self.db.bump_nonce(&owner).map_err(|e| { + error!(owner = ?owner, "Failed to bump nonce"); + format!("Failed to bump nonce: {e}") + })?; // Perform verification on the operator set and make sure they are all registered in the // network @@ -264,6 +266,7 @@ impl EventProcessor { format!("Failed to parse shares: {e}") })?; + println!("{:?} {:?} {:?} {:?}", signature, nonce, owner, validator_pubkey); if !verify_signature(signature, nonce, &owner, &validator_pubkey) { error!(cluster_id = ?cluster_id, "Signature verification failed"); return Err("Signature verification failed".to_string()); @@ -503,7 +506,7 @@ impl EventProcessor { operatorIds, publicKey, } = SSVContract::ValidatorExited::decode_from_log(log)?; - // todo!() how is this different from a validator removed + // just create a validator exit task info!( owner = ?owner, validator_pubkey = ?publicKey, diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index f0e9625f6..7862fe08a 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -322,7 +322,7 @@ impl SsvEventSyncer { async fn live_sync(&mut self, contract_address: Address) -> Result<(), String> { info!("Network up to sync.."); info!("Current state"); - info!("{} Operators", self.event_processor.db.num_operators()); + //info!("{} Operators", self.event_processor.db.num_operators()); info!("{} Clusters", self.event_processor.db.clusters().length()); info!("{} Validators", self.event_processor.db.metadata().length()); info!(?contract_address, "Starting live sync"); diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 5de8552b6..40103d966 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -8,7 +8,7 @@ use ssv_types::Share; use ssv_types::{ClusterId, OperatorId, ValidatorIndex, ValidatorMetadata}; use std::collections::HashSet; use std::str::FromStr; -use types::{Graffiti, Hash256, PublicKey, Signature}; +use types::{Graffiti, PublicKey, Signature}; use std::time::Duration; @@ -17,7 +17,7 @@ const SIGNATURE_LENGTH: usize = 96; // phase0.PublicKeyLength const PUBLIC_KEY_LENGTH: usize = 48; // Length of an encrypted key -const ENCRYPTED_KEY_LENGTH: usize = 256; // Leng +const ENCRYPTED_KEY_LENGTH: usize = 256; // Api endpoint to fetch index of a validator const INDEX_ENDPOINT: &str = "/eth/v1/beacon/states/head/validators/"; @@ -56,13 +56,11 @@ impl BeaconClient { self.base_url, pubkey ); - // Handle the request's Result explicitly since Response doesn't implement Default + // Handle the Response, defaulting to 0 index let response = match self.client.get(&url).send().await { Ok(resp) => resp, Err(_) => return 0, }; - - // Then handle JSON parsing, using default if it fails let validator_response = response .json::() .await @@ -275,7 +273,18 @@ mod eth_util_tests { // Test to make sure we can properly verify signatures #[test] - fn test_sig_verification() {} + fn test_sig_verification() { + // random data that was taken from chain + let owner = address!("382f6ff5b9a29fcf1dd2bf8b86c3234dc7ed2df6"); + let public_key = PublicKey::from_str("0x94cbce91137bfda4a7638941a68d6b156712bd1ce80e5dc580adc74a445099cbbfb9f97a6c7c89c6a87e28e0657821ac").expect("Failed to create public key"); + let nonce = 8; + let signature_data = [151, 32, 191, 178, 170, 21, 45, 81, 34, 50, 220, 37, 95, 149, 101, 178, 38, 128, 11, 195, 98, 241, 226, 70, 46, 8, 168, 133, 99, 23, 73, 126, 61, 33, 197, 226, 105, 11, 134, 248, 226, 127, 60, 108, 102, 109, 148, 135, 16, 76, 114, 132, 123, 186, 148, 147, 170, 143, 204, 45, 71, 59, 76, 131, 220, 199, 179, 219, 47, 115, 45, 162, 168, 163, 223, 110, 38, 9, 166, 82, 34, 227, 53, 50, 31, 105, 74, 122, 179, 172, 22, 245, 89, 32, 214, 69].to_vec(); + assert!(verify_signature(signature_data.clone(), nonce, &owner, &public_key)); + + // make sure that a wrong nonce fails the signature check + assert!(!verify_signature(signature_data, nonce + 1, &owner, &public_key)); + } + #[test] // Ensure that we can properly parse share data into a set of shares From 4263a8fbe4dce335d5bac6aab147807efd0e529f Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 6 Jan 2025 23:10:27 +0000 Subject: [PATCH 30/49] beacon index test --- anchor/eth/src/util.rs | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 40103d966..780697400 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -267,9 +267,15 @@ mod eth_util_tests { assert_eq!(onchain, cluster_id_hex); } - #[test] + #[tokio::test] // Test to make sure we can fetch the index of a validator - fn test_fetch_index() {} + async fn test_fetch_index() { + let beacon_client = BeaconClient::new("http://127.0.0.1:5052"); + let public_key = "0x94cbce91137bfda4a7638941a68d6b156712bd1ce80e5dc580adc74a445099cbbfb9f97a6c7c89c6a87e28e0657821ac"; + + let index = beacon_client.get_validator_index(public_key).await; + assert_eq!(index, 1552545); + } // Test to make sure we can properly verify signatures #[test] @@ -278,14 +284,31 @@ mod eth_util_tests { let owner = address!("382f6ff5b9a29fcf1dd2bf8b86c3234dc7ed2df6"); let public_key = PublicKey::from_str("0x94cbce91137bfda4a7638941a68d6b156712bd1ce80e5dc580adc74a445099cbbfb9f97a6c7c89c6a87e28e0657821ac").expect("Failed to create public key"); let nonce = 8; - let signature_data = [151, 32, 191, 178, 170, 21, 45, 81, 34, 50, 220, 37, 95, 149, 101, 178, 38, 128, 11, 195, 98, 241, 226, 70, 46, 8, 168, 133, 99, 23, 73, 126, 61, 33, 197, 226, 105, 11, 134, 248, 226, 127, 60, 108, 102, 109, 148, 135, 16, 76, 114, 132, 123, 186, 148, 147, 170, 143, 204, 45, 71, 59, 76, 131, 220, 199, 179, 219, 47, 115, 45, 162, 168, 163, 223, 110, 38, 9, 166, 82, 34, 227, 53, 50, 31, 105, 74, 122, 179, 172, 22, 245, 89, 32, 214, 69].to_vec(); - assert!(verify_signature(signature_data.clone(), nonce, &owner, &public_key)); + let signature_data = [ + 151, 32, 191, 178, 170, 21, 45, 81, 34, 50, 220, 37, 95, 149, 101, 178, 38, 128, 11, + 195, 98, 241, 226, 70, 46, 8, 168, 133, 99, 23, 73, 126, 61, 33, 197, 226, 105, 11, + 134, 248, 226, 127, 60, 108, 102, 109, 148, 135, 16, 76, 114, 132, 123, 186, 148, 147, + 170, 143, 204, 45, 71, 59, 76, 131, 220, 199, 179, 219, 47, 115, 45, 162, 168, 163, + 223, 110, 38, 9, 166, 82, 34, 227, 53, 50, 31, 105, 74, 122, 179, 172, 22, 245, 89, 32, + 214, 69, + ] + .to_vec(); + assert!(verify_signature( + signature_data.clone(), + nonce, + &owner, + &public_key + )); // make sure that a wrong nonce fails the signature check - assert!(!verify_signature(signature_data, nonce + 1, &owner, &public_key)); + assert!(!verify_signature( + signature_data, + nonce + 1, + &owner, + &public_key + )); } - #[test] // Ensure that we can properly parse share data into a set of shares fn test_parse_shares() { From 92c4c5e91d647c15454cc885a8234a134f19cc59 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 7 Jan 2025 13:37:36 +0000 Subject: [PATCH 31/49] bump --- Cargo.lock | 453 ++++++++++++++++++++++------------------ Cargo.toml | 5 + anchor/eth/execution.rs | 1 - anchor/eth/src/util.rs | 1 + 4 files changed, 261 insertions(+), 199 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4e5065da9..9893f6c7e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -143,9 +143,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.1.48" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0161082e0edd9013d23083465cc04b20e44b7a15646d36ba7b0cdb7cd6fe18f" +checksum = "da226340862e036ab26336dc99ca85311c6b662267c1440e1733890fd688802c" dependencies = [ "alloy-primitives", "num_enum", @@ -203,9 +203,9 @@ dependencies = [ [[package]] name = "alloy-core" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c618bd382f0bc2ac26a7e4bfae01c9b015ca8f21b37ca40059ae35a7e62b3dc6" +checksum = "d0713007d14d88a6edb8e248cddab783b698dbb954a28b8eee4bab21cfb7e578" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -216,9 +216,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41056bde53ae10ffbbf11618efbe1e0290859e5eab0fe9ef82ebdb62f12a866f" +checksum = "44e3b98c37b3218924cd1d2a8570666b89662be54e5b182643855f783ea68b33" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -228,7 +228,7 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.20", + "winnow 0.6.22", ] [[package]] @@ -312,9 +312,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c357da577dfb56998d01f574d81ad7a1958d248740a7981b205d69d65a7da404" +checksum = "731ea743b3d843bc657e120fb1d1e9cc94f5dab8107e35a82125a63e6420a102" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -374,9 +374,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6259a506ab13e1d658796c31e6e39d2e2ee89243bcc505ddc613b35732e0a430" +checksum = "788bb18e8f61d5d9340b52143f27771daf7e1dccbaf2741621d2493f9debf52e" dependencies = [ "alloy-rlp", "arbitrary", @@ -388,7 +388,6 @@ dependencies = [ "foldhash", "getrandom", "hashbrown 0.15.2", - "hex-literal", "indexmap", "itoa", "k256 0.13.4", @@ -432,7 +431,7 @@ dependencies = [ "lru", "parking_lot 0.12.3", "pin-project", - "reqwest 0.12.9", + "reqwest 0.12.12", "schnellru", "serde", "serde_json", @@ -481,7 +480,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -498,7 +497,7 @@ dependencies = [ "alloy-transport-ws", "futures", "pin-project", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde", "serde_json", "tokio", @@ -567,23 +566,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9d64f851d95619233f74b310f12bcf16e0cbc27ee3762b6115c14a84809280a" +checksum = "a07b74d48661ab2e4b50bb5950d74dbff5e61dd8ed03bb822281b706d54ebacb" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bf7ed1574b699f48bf17caab4e6e54c6d12bc3c006ab33d58b1e227c1c3559f" +checksum = "19cc9c7f20b90f9be1a8f71a3d8e283a43745137b0837b1a1cb13159d37cad72" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -593,16 +592,16 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c02997ccef5f34f9c099277d4145f183b422938ed5322dc57a089fe9b9ad9ee" +checksum = "713b7e6dfe1cb2f55c80fb05fd22ed085a1b4e48217611365ed0ae598a74c6ac" dependencies = [ "alloy-json-abi", "const-hex", @@ -611,25 +610,25 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.90", + "syn 2.0.95", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce13ff37285b0870d0a0746992a4ae48efaf34b766ae4c2640fa15e5305f8e73" +checksum = "1eda2711ab2e1fb517fc6e2ffa9728c9a232e296d16810810e6957b781a1b8bc" dependencies = [ "serde", - "winnow 0.6.20", + "winnow 0.6.22", ] [[package]] name = "alloy-sol-types" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1174cafd6c6d810711b4e00383037bdb458efc4fe3dbafafa16567e0320c54d8" +checksum = "e3b478bc9c0c4737a04cd976accde4df7eba0bdc0d90ad6ff43d58bc93cf79c1" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -666,7 +665,7 @@ checksum = "5dc013132e34eeadaa0add7e74164c1503988bfba8bae885b32e0918ba85a8a6" dependencies = [ "alloy-json-rpc", "alloy-transport", - "reqwest 0.12.9", + "reqwest 0.12.12", "serde_json", "tower", "tracing", @@ -775,9 +774,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arbitrary" @@ -963,7 +962,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "synstructure", ] @@ -975,7 +974,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -1044,18 +1043,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -1082,6 +1081,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "attohttpc" version = "0.24.1" @@ -1101,7 +1106,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -1123,7 +1128,7 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.1", + "hyper 1.5.2", "hyper-util", "itoa", "matchit", @@ -1405,7 +1410,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "arbitrary", @@ -1536,9 +1541,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.4" +version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9157bbaa6b165880c27a4293a474c91cdcf265cc68cc829bf10be0964a391caf" +checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7" dependencies = [ "jobserver", "libc", @@ -1645,7 +1650,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -1674,7 +1679,7 @@ dependencies = [ [[package]] name = "clap_utils" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "clap", @@ -1698,7 +1703,7 @@ dependencies = [ "fdlimit", "http_api", "http_metrics", - "hyper 1.5.1", + "hyper 1.5.2", "network", "parking_lot 0.12.3", "processor", @@ -1738,7 +1743,7 @@ dependencies = [ [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "itertools 0.10.5", ] @@ -1755,7 +1760,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "quote", "syn 1.0.109", @@ -1901,18 +1906,18 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1929,9 +1934,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" @@ -2046,7 +2051,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2094,7 +2099,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2116,7 +2121,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2190,6 +2195,7 @@ name = "database" version = "0.1.0" dependencies = [ "base64 0.22.1", + "dashmap", "openssl", "parking_lot 0.12.3", "r2d2", @@ -2306,7 +2312,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2317,7 +2323,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2338,7 +2344,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "unicode-xid", ] @@ -2376,7 +2382,7 @@ dependencies = [ [[package]] name = "directory" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "clap", "clap_utils 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2550,7 +2556,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2717,7 +2723,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2788,6 +2794,8 @@ dependencies = [ "hex", "openssl", "rand", + "reqwest 0.12.12", + "serde", "ssv_types", "tokio", "tracing", @@ -2864,7 +2872,7 @@ dependencies = [ [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "paste", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2886,7 +2894,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", @@ -2954,7 +2962,7 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "bytes", "discv5 0.9.0", @@ -3121,7 +3129,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -3260,6 +3268,17 @@ dependencies = [ "bytes", ] +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "fdlimit" version = "0.3.0" @@ -3358,7 +3377,7 @@ dependencies = [ [[package]] name = "fixed_bytes" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -3383,9 +3402,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "foreign-types" @@ -3524,7 +3543,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -3668,14 +3687,14 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "gossipsub" @@ -3710,7 +3729,7 @@ dependencies = [ [[package]] name = "gossipsub" version = "0.5.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "async-channel", "asynchronous-codec", @@ -3779,6 +3798,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "h2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.2.0", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "hash-db" version = "0.15.2" @@ -3897,12 +3935,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hex-literal" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" - [[package]] name = "hex_fmt" version = "0.3.0" @@ -4112,15 +4144,15 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "httparse", @@ -4136,13 +4168,14 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" dependencies = [ "bytes", "futures-channel", "futures-util", + "h2 0.4.7", "http 1.2.0", "http-body 1.0.1", "httparse", @@ -4162,12 +4195,29 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-rustls" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +dependencies = [ + "futures-util", + "http 1.2.0", + "hyper 1.5.2", + "hyper-util", + "rustls 0.23.20", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.1", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -4175,7 +4225,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.31", + "hyper 0.14.32", "native-tls", "tokio", "tokio-native-tls", @@ -4189,7 +4239,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.5.1", + "hyper 1.5.2", "hyper-util", "native-tls", "tokio", @@ -4208,7 +4258,7 @@ dependencies = [ "futures-util", "http 1.2.0", "http-body 1.0.1", - "hyper 1.5.1", + "hyper 1.5.2", "pin-project-lite", "socket2 0.5.8", "tokio", @@ -4354,7 +4404,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -4428,7 +4478,7 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "rand", "tokio", @@ -4489,7 +4539,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -4533,7 +4583,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "bytes", ] @@ -4729,7 +4779,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "arbitrary", "c-kzg", @@ -4779,9 +4829,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.168" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libflate" @@ -5166,7 +5216,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -5307,9 +5357,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.20" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "pkg-config", @@ -5369,7 +5419,7 @@ dependencies = [ [[package]] name = "lighthouse_network" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -5427,7 +5477,7 @@ dependencies = [ [[package]] name = "lighthouse_version" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "git-version", "target_info", @@ -5506,7 +5556,7 @@ dependencies = [ [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "chrono", "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -5554,7 +5604,7 @@ dependencies = [ [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "fnv", ] @@ -5630,7 +5680,7 @@ dependencies = [ [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -5672,7 +5722,7 @@ dependencies = [ [[package]] name = "metrics" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "prometheus", ] @@ -5724,9 +5774,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" dependencies = [ "adler2", ] @@ -6052,14 +6102,14 @@ checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -6141,7 +6191,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -6401,7 +6451,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.6", + "thiserror 2.0.9", "ucd-trie", ] @@ -6417,29 +6467,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -6544,7 +6594,7 @@ dependencies = [ [[package]] name = "pretty_reqwest_error" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "reqwest 0.11.27", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -6624,7 +6674,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -6701,7 +6751,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -6726,13 +6776,13 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" +checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -6827,7 +6877,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls 0.23.20", "socket2 0.5.8", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -6846,7 +6896,7 @@ dependencies = [ "rustls 0.23.20", "rustls-pki-types", "slab", - "thiserror 2.0.6", + "thiserror 2.0.9", "tinyvec", "tracing", "web-time", @@ -6854,9 +6904,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" dependencies = [ "cfg_aliases", "libc", @@ -6868,9 +6918,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -7064,11 +7114,11 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.31", - "hyper-rustls", + "hyper 0.14.32", + "hyper-rustls 0.24.2", "hyper-tls 0.5.0", "ipnet", "js-sys", @@ -7101,18 +7151,21 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ "base64 0.22.1", "bytes", + "encoding_rs", "futures-core", "futures-util", + "h2 0.4.7", "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.1", + "hyper 1.5.2", + "hyper-rustls 0.27.5", "hyper-tls 0.6.0", "hyper-util", "ipnet", @@ -7128,8 +7181,10 @@ dependencies = [ "serde_json", "serde_urlencoded", "sync_wrapper 1.0.2", + "system-configuration 0.6.1", "tokio", "tokio-native-tls", + "tower", "tower-service", "url", "wasm-bindgen", @@ -7265,17 +7320,19 @@ dependencies = [ [[package]] name = "ruint" -version = "1.12.3" +version = "1.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +checksum = "f5ef8fb1dd8de3870cb8400d51b4c2023854bbafd5431a3ac7e7317243e22d2f" dependencies = [ "alloy-rlp", "arbitrary", "ark-ff 0.3.0", "ark-ff 0.4.2", "bytes", - "fastrlp", + "fastrlp 0.3.1", + "fastrlp 0.4.0", "num-bigint", + "num-integer", "num-traits", "parity-scale-codec 3.6.12", "primitive-types 0.12.2", @@ -7490,9 +7547,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rusty-fork" @@ -7531,7 +7588,7 @@ source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a5 [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" [[package]] name = "salsa20" @@ -7563,7 +7620,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -7586,9 +7643,9 @@ dependencies = [ [[package]] name = "schnellru" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" +checksum = "356285bbf17bea63d9e52e96bd18f039672ac92b55b8cb997d6162a2a37d1649" dependencies = [ "ahash", "cfg-if", @@ -7672,9 +7729,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" dependencies = [ "core-foundation-sys", "libc", @@ -7722,7 +7779,7 @@ dependencies = [ [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "serde", "url", @@ -7730,9 +7787,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -7749,20 +7806,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "itoa", "memchr", @@ -7788,7 +7845,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -8193,7 +8250,6 @@ version = "0.1.0" dependencies = [ "base64 0.22.1", "derive_more 1.0.0", - "hex", "openssl", "rusqlite", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -8331,7 +8387,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -8367,7 +8423,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -8387,9 +8443,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", @@ -8398,14 +8454,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219389c1ebe89f8333df8bdfb871f6631c552ff399c23cac02480b6088aad8f0" +checksum = "31e89d8bf2768d277f40573c83a02a099e96d96dd3104e13ea676194e61ac4b0" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -8431,7 +8487,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -8511,7 +8567,7 @@ dependencies = [ [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "async-channel", "futures", @@ -8525,12 +8581,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.14.0" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand", + "getrandom", "once_cell", "rustix 0.38.42", "windows-sys 0.59.0", @@ -8569,7 +8626,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "quote", "syn 1.0.109", @@ -8586,11 +8643,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.9", ] [[package]] @@ -8601,18 +8658,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -8705,9 +8762,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -8753,7 +8810,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -8865,7 +8922,7 @@ checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", "toml_datetime", - "winnow 0.6.20", + "winnow 0.6.22", ] [[package]] @@ -8942,7 +8999,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -9023,7 +9080,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -9130,7 +9187,7 @@ dependencies = [ [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9220,9 +9277,9 @@ checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" [[package]] name = "unicase" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" @@ -9308,7 +9365,7 @@ dependencies = [ [[package]] name = "unused_port" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "lru_cache 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot 0.12.3", @@ -9453,7 +9510,7 @@ dependencies = [ "futures-util", "headers", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "mime", "mime_guess", @@ -9518,7 +9575,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "wasm-bindgen-shared", ] @@ -9553,7 +9610,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9965,9 +10022,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980" dependencies = [ "memchr", ] @@ -10132,7 +10189,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "synstructure", ] @@ -10154,7 +10211,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -10174,7 +10231,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "synstructure", ] @@ -10196,7 +10253,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -10218,7 +10275,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 94e91f0b9..48dbd3e19 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,6 +64,11 @@ tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } base64 = "0.22.1" openssl = "0.10.68" rusqlite = "0.28.0" +dashmap = "6.1.0" +hex = "0.4.3" +alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "contract", "pubsub", +"provider-ws", "rpc-types", "rlp"] } +reqwest = "0.12.12" [profile.maxperf] inherits = "release" diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs index 2e9c46e0e..9c43f06bc 100644 --- a/anchor/eth/execution.rs +++ b/anchor/eth/execution.rs @@ -20,7 +20,6 @@ async fn main() { let span = info_span!("main"); let _guard = span.enter(); - //let rpc_endpoint = "https://colo.sigp-dev.net/mainnet-ee/Nae2OmaelooG/"; let rpc_endpoint = "http://127.0.0.1:8545"; let ws_endpoint = "ws://127.0.0.1:8546"; let beacon_endpoint = "http://127.0.0.1:5052"; diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 780697400..89d5742ff 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -270,6 +270,7 @@ mod eth_util_tests { #[tokio::test] // Test to make sure we can fetch the index of a validator async fn test_fetch_index() { + // https://holesky.beaconcha.in/validator/94cbce91137bfda4a7638941a68d6b156712bd1ce80e5dc580adc74a445099cbbfb9f97a6c7c89c6a87e28e0657821ac let beacon_client = BeaconClient::new("http://127.0.0.1:5052"); let public_key = "0x94cbce91137bfda4a7638941a68d6b156712bd1ce80e5dc580adc74a445099cbbfb9f97a6c7c89c6a87e28e0657821ac"; From f05ba6894282241354efc3f56bf7b18082b56ef7 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 7 Jan 2025 13:52:10 +0000 Subject: [PATCH 32/49] warning cleanup, index fetching integration --- anchor/eth/Cargo.toml | 4 ++-- anchor/eth/execution.rs | 1 + anchor/eth/src/event_processor.rs | 30 ++++++++++++++++++------------ anchor/eth/src/util.rs | 25 +++++++++++-------------- 4 files changed, 32 insertions(+), 28 deletions(-) diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml index eea96825c..9aea17ec3 100644 --- a/anchor/eth/Cargo.toml +++ b/anchor/eth/Cargo.toml @@ -16,13 +16,13 @@ futures = { workspace = true } hex = { workspace = true } openssl = { workspace = true } rand = "0.8.5" +reqwest = { workspace = true } +serde = { workspace = true } ssv_types = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } types = { workspace = true } -reqwest = { workspace = true } -serde = { workspace = true } diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs index 9c43f06bc..2e9c46e0e 100644 --- a/anchor/eth/execution.rs +++ b/anchor/eth/execution.rs @@ -20,6 +20,7 @@ async fn main() { let span = info_span!("main"); let _guard = span.enter(); + //let rpc_endpoint = "https://colo.sigp-dev.net/mainnet-ee/Nae2OmaelooG/"; let rpc_endpoint = "http://127.0.0.1:8545"; let ws_endpoint = "ws://127.0.0.1:8546"; let beacon_endpoint = "http://127.0.0.1:5052"; diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 90fc19237..c6a5fed7c 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -35,7 +35,7 @@ pub(crate) struct BeaconClient { impl EventProcessor { /// Construct a new EventProcessor - pub fn new(db: Arc, beacon_url: &String) -> Self { + pub fn new(db: Arc, beacon_url: &str) -> Self { // Register log handlers for easy dispatch let mut handlers: HashMap = HashMap::new(); handlers.insert( @@ -223,7 +223,14 @@ impl EventProcessor { debug!(owner = ?owner, operator_count = operatorIds.len(), "Processing validator addition"); // Get the index of the validator - //let index = self.beacon_client.get_validator_index(&publicKey.to_string()); + // Todo!() Dont want this as a blocking api call + let handle = tokio::runtime::Handle::current(); + let index = handle.block_on(async { + self.beacon_client + .get_validator_index(publicKey.to_string()) + .await + }); + let index = ValidatorIndex(index); // Process data into a usable form let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { @@ -266,22 +273,21 @@ impl EventProcessor { format!("Failed to parse shares: {e}") })?; - println!("{:?} {:?} {:?} {:?}", signature, nonce, owner, validator_pubkey); + println!( + "{:?} {:?} {:?} {:?}", + signature, nonce, owner, validator_pubkey + ); if !verify_signature(signature, nonce, &owner, &validator_pubkey) { error!(cluster_id = ?cluster_id, "Signature verification failed"); return Err("Signature verification failed".to_string()); } // fetch the validator metadata - let validator_metadata = fetch_validator_metadata( - &validator_pubkey, - /* ValidatorIndex(index), */ - &cluster_id, - ) - .map_err(|e| { - error!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); - format!("Failed to fetch validator metadata: {e}") - })?; + let validator_metadata = + construct_validator_metadata(&validator_pubkey, index, &cluster_id).map_err(|e| { + error!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); + format!("Failed to fetch validator metadata: {e}") + })?; // Finally, construct and insert the full cluster and insert into the database let cluster = Cluster { diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 89d5742ff..4b4eb97fd 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -1,16 +1,13 @@ -use super::sync::MAX_OPERATORS; use crate::event_processor::BeaconClient; +use crate::sync::MAX_OPERATORS; use alloy::primitives::{keccak256, Address}; -use rand::Rng; use reqwest::Client; use serde::Deserialize; -use ssv_types::Share; -use ssv_types::{ClusterId, OperatorId, ValidatorIndex, ValidatorMetadata}; +use ssv_types::{ClusterId, OperatorId, Share, ValidatorIndex, ValidatorMetadata}; use std::collections::HashSet; use std::str::FromStr; -use types::{Graffiti, PublicKey, Signature}; - use std::time::Duration; +use types::{Graffiti, PublicKey, Signature}; // phase0.SignatureLength const SIGNATURE_LENGTH: usize = 96; @@ -19,14 +16,11 @@ const PUBLIC_KEY_LENGTH: usize = 48; // Length of an encrypted key const ENCRYPTED_KEY_LENGTH: usize = 256; -// Api endpoint to fetch index of a validator -const INDEX_ENDPOINT: &str = "/eth/v1/beacon/states/head/validators/"; - +// Response structures for Validator Index deserialization #[derive(Deserialize, Default)] struct ValidatorResponse { data: ValidatorInfo, } - #[derive(Deserialize, Default)] struct ValidatorInfo { index: String, @@ -49,7 +43,7 @@ impl BeaconClient { } // Method to get validator information - pub async fn get_validator_index(&self, pubkey: &str) -> usize { + pub async fn get_validator_index(&self, pubkey: String) -> usize { // Combine base URL with the specific validator endpoint let url = format!( "{}/eth/v1/beacon/states/head/validators/{}", @@ -144,8 +138,9 @@ fn split_bytes(data: &[u8], chunk_size: usize) -> Vec> { } // Fetch the metadata for a validator from the beacon chain -pub fn fetch_validator_metadata( +pub fn construct_validator_metadata( public_key: &PublicKey, + index: ValidatorIndex, cluster_id: &ClusterId, ) -> Result { // Default Anchor-SSV Graffiti @@ -153,7 +148,7 @@ pub fn fetch_validator_metadata( bytes[..10].copy_from_slice(b"Anchor-SSV"); Ok(ValidatorMetadata { - index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), // fetch from chain? + index, public_key: public_key.clone(), graffiti: Graffiti::from(bytes), cluster_id: *cluster_id, @@ -274,7 +269,9 @@ mod eth_util_tests { let beacon_client = BeaconClient::new("http://127.0.0.1:5052"); let public_key = "0x94cbce91137bfda4a7638941a68d6b156712bd1ce80e5dc580adc74a445099cbbfb9f97a6c7c89c6a87e28e0657821ac"; - let index = beacon_client.get_validator_index(public_key).await; + let index = beacon_client + .get_validator_index(public_key.to_string()) + .await; assert_eq!(index, 1552545); } From 76ce18fa6bcc4178267676c8bdb3af0cfe50ca8e Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 7 Jan 2025 14:06:04 +0000 Subject: [PATCH 33/49] remove index test --- anchor/eth/src/util.rs | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 4b4eb97fd..25571393f 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -262,19 +262,6 @@ mod eth_util_tests { assert_eq!(onchain, cluster_id_hex); } - #[tokio::test] - // Test to make sure we can fetch the index of a validator - async fn test_fetch_index() { - // https://holesky.beaconcha.in/validator/94cbce91137bfda4a7638941a68d6b156712bd1ce80e5dc580adc74a445099cbbfb9f97a6c7c89c6a87e28e0657821ac - let beacon_client = BeaconClient::new("http://127.0.0.1:5052"); - let public_key = "0x94cbce91137bfda4a7638941a68d6b156712bd1ce80e5dc580adc74a445099cbbfb9f97a6c7c89c6a87e28e0657821ac"; - - let index = beacon_client - .get_validator_index(public_key.to_string()) - .await; - assert_eq!(index, 1552545); - } - // Test to make sure we can properly verify signatures #[test] fn test_sig_verification() { From 4457ab670c339eb6a764dc6c1494c61b30276292 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 7 Jan 2025 21:40:04 +0000 Subject: [PATCH 34/49] update execution --- anchor/eth/execution.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs index 2e9c46e0e..9c43f06bc 100644 --- a/anchor/eth/execution.rs +++ b/anchor/eth/execution.rs @@ -20,7 +20,6 @@ async fn main() { let span = info_span!("main"); let _guard = span.enter(); - //let rpc_endpoint = "https://colo.sigp-dev.net/mainnet-ee/Nae2OmaelooG/"; let rpc_endpoint = "http://127.0.0.1:8545"; let ws_endpoint = "ws://127.0.0.1:8546"; let beacon_endpoint = "http://127.0.0.1:5052"; From 999d6a2f8d19639436abe0057467c64f89c16c9a Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Wed, 8 Jan 2025 22:05:58 +0000 Subject: [PATCH 35/49] push readme --- anchor/eth/README.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 anchor/eth/README.md diff --git a/anchor/eth/README.md b/anchor/eth/README.md new file mode 100644 index 000000000..e69de29bb From 4cb52bc64cdd5ff242dfd24af9519913dedaa649 Mon Sep 17 00:00:00 2001 From: Zac Holme <79027434+Zacholme7@users.noreply.github.com> Date: Wed, 8 Jan 2025 16:24:49 -0600 Subject: [PATCH 36/49] initial el readme --- anchor/eth/README.md | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/anchor/eth/README.md b/anchor/eth/README.md index e69de29bb..721a2a1b2 100644 --- a/anchor/eth/README.md +++ b/anchor/eth/README.md @@ -0,0 +1,36 @@ +## Execution Layer +This crate implements the execution layer component of the SSV node, responsible for monitoring and processing SSV network events on Ethereum L1 networks (Mainnet and Holesky). + +## Overview +The execution layer client maintains synchronization with the SSV network contract by: +* Processing historical events from contract deployement +* Monitoring live contract events +* Managing validator and operator state changes +* Handling cluster lifecycle events + +## Components +### SsvEventSyncer +This is the core synchronization engine that: +* Manages connections to an Ethereum execution client +* Handles historical and live event processing +* Maintains event ordering and state consistency +* Processes events in configurable batch sizes + +### EventProcessor +This processes network events and interacts with the database to validate event logs and persist them into the database. + +## Event Types +```rust +event OperatorAdded(uint64 indexed operatorId, address indexed owner, bytes publicKey, uint256 fee) +event OperatorRemoved(uint64 indexed operatorId) +event ValidatorAdded(address indexed owner, uint64[] operatorIds, bytes publicKey, bytes shares, Cluster cluster) +event ValidatorRemoved(address indexed owner, uint64[] operatorIds, bytes publicKey, Cluster cluster) +event ClusterLiquidated(address indexed owner, uint64[] operatorIds, Cluster cluster) +event ClusterReactivated(address indexed owner, uint64[] operatorIds, Cluster cluster) +event FeeRecipientAddressUpdated(address indexed owner, address recipientAddress) +event ValidatorExited(address indexed owner, uint64[] operatorIds, bytes publicKey) +``` + +## Contract Addresses +* Mainnet: `0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1` +* Holesky: `0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA` From 1669e2f476ba008c043fc642bd1a644c838cff71 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 9 Jan 2025 16:25:20 +0000 Subject: [PATCH 37/49] adjust logging --- anchor/eth/execution.rs | 37 +++++------ anchor/eth/src/event_processor.rs | 107 ++++++++++++------------------ anchor/eth/src/sync.rs | 25 ++++--- anchor/eth/src/util.rs | 67 +++---------------- 4 files changed, 84 insertions(+), 152 deletions(-) diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs index 9c43f06bc..cb73417c7 100644 --- a/anchor/eth/execution.rs +++ b/anchor/eth/execution.rs @@ -4,61 +4,60 @@ use eth::{Config, Network, SsvEventSyncer}; use openssl::rsa::Rsa; use std::path::Path; use std::sync::Arc; -use tracing::{info, info_span}; use tracing_subscriber::{fmt, prelude::*, EnvFilter}; +// This is a test binary to execute event syncing for the SSV Network #[tokio::main] async fn main() { + // Setup a log filter & tracing let filter = EnvFilter::builder() .parse("info,hyper=off,hyper_util=off,alloy_transport_http=off,reqwest=off,alloy_rpc_client=off") .expect("filter should be valid"); - tracing_subscriber::registry() .with(fmt::layer()) .with(filter) .init(); - let span = info_span!("main"); - let _guard = span.enter(); + // Dummy configuration with endpoint and network let rpc_endpoint = "http://127.0.0.1:8545"; let ws_endpoint = "ws://127.0.0.1:8546"; + let ws_endpoint = "wss://eth.merkle.io"; let beacon_endpoint = "http://127.0.0.1:5052"; - let config = Config { http_url: String::from(rpc_endpoint), ws_url: String::from(ws_endpoint), beacon_url: String::from(beacon_endpoint), - network: Network::Holesky, + network: Network::Mainnet, }; + // Setup mock operator data let path = Path::new("db.sqlite"); - let pem_data = "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBMVg2MUFXY001QUNLaGN5MTlUaEIKby9HMWlhN1ByOVUralJ5aWY5ZjAyRG9sd091V2ZLLzdSVUlhOEhEbHBvQlVERDkwRTVQUGdJSy9sTXB4RytXbwpwQ2N5bTBpWk9UT0JzNDE5bEh3TzA4bXFja1JsZEg5WExmbmY2UThqWFR5Ym1yYzdWNmwyNVprcTl4U0owbHR1CndmTnVTSzNCZnFtNkQxOUY0aTVCbmVaSWhjRVJTYlFLWDFxbWNqYnZFL2cyQko4TzhaZUgrd0RzTHJiNnZXQVIKY3BYWG1uelE3Vlp6ZklHTGVLVU1CTTh6SW0rcXI4RGZ4SEhSeVU1QTE3cFU4cy9MNUp5RXE1RGJjc2Q2dHlnbQp5UE9BYUNzWldVREI3UGhLOHpUWU9WYi9MM1lnSTU4bjFXek5IM0s5cmFreUppTmUxTE9GVVZzQTFDUnhtQ2YzCmlRSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K"; - let pem_decoded = BASE64_STANDARD.decode(pem_data).unwrap(); - - // Convert the decoded data to a string let mut pem_string = String::from_utf8(pem_decoded).unwrap(); - - // Fix the header - replace PKCS1 header with PKCS8 header pem_string = pem_string .replace( "-----BEGIN RSA PUBLIC KEY-----", "-----BEGIN PUBLIC KEY-----", ) .replace("-----END RSA PUBLIC KEY-----", "-----END PUBLIC KEY-----"); - - // Parse the PEM string into an RSA public key using PKCS8 format let rsa_pubkey = Rsa::public_key_from_pem(pem_string.as_bytes()) .map_err(|e| format!("Failed to parse RSA public key: {}", e)) .unwrap(); - // tie the network into the database impl!() - let db = - Arc::new(NetworkDatabase::new(path, &rsa_pubkey).expect("Failed to construct database")); - info!("Constructed the database"); + // The event syncer is spawned into a background task since it is long running and should never + // exist. It will communicate with the rest of the system via processor channels and constantly + // keep the database up to date with new data for the rest of the system + let db = Arc::new(NetworkDatabase::new(path, &rsa_pubkey).unwrap()); let mut event_syncer = SsvEventSyncer::new(db.clone(), config) .await .expect("Failed to construct event syncer"); - let _ = event_syncer.sync().await; + tokio::spawn(async move { + // this should never return, if it does we should gracefully handle it and shutdown the + // client. + event_syncer.sync().await + }); + loop { + let _ = tokio::time::sleep(std::time::Duration::from_secs(100)).await; + } } diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index c6a5fed7c..28f4c8905 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -6,8 +6,7 @@ use alloy::primitives::B256; use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; use database::{NetworkDatabase, UniqueIndex}; -use reqwest::Client; -use ssv_types::{Cluster, Operator, OperatorId, ValidatorIndex}; +use ssv_types::{Cluster, Operator, OperatorId}; use std::collections::{HashMap, HashSet}; use std::str::FromStr; use std::sync::Arc; @@ -23,19 +22,11 @@ pub struct EventProcessor { handlers: HashMap, /// Reference to the database pub db: Arc, - /// Client to interact with the beacon chain - pub beacon_client: BeaconClient, -} - -/// Http client to fetch metadata from the beacon chain -pub(crate) struct BeaconClient { - pub client: Client, - pub base_url: String, } impl EventProcessor { /// Construct a new EventProcessor - pub fn new(db: Arc, beacon_url: &str) -> Self { + pub fn new(db: Arc) -> Self { // Register log handlers for easy dispatch let mut handlers: HashMap = HashMap::new(); handlers.insert( @@ -71,17 +62,13 @@ impl EventProcessor { Self::process_validator_exited, ); - Self { - handlers, - db, - beacon_client: BeaconClient::new(beacon_url), - } + Self { handlers, db } } /// Process a new set of logs #[instrument(skip(self, logs), fields(logs_count = logs.len()))] pub fn process_logs(&self, logs: Vec, live: bool) -> Result<(), String> { - debug!(logs_count = logs.len(), "Starting log processing"); + info!(logs_count = logs.len(), "Starting log processing"); for (index, log) in logs.iter().enumerate() { trace!(log_index = index, topic = ?log.topic0(), "Processing individual log"); @@ -95,8 +82,11 @@ impl EventProcessor { "No handler found for topic".to_string() })?; - // todo!() some way to gracefully handle errors? - let _ = handler(self, log); + // Handle the log and log any malformed events + if let Err(e) = handler(self, log) { + warn!("Malformed event: {e}"); + continue; + } // If live is true, then we are currently in a live sync and want to take some action in // response to the log. Parse the log into a network action and send to be processed; @@ -109,7 +99,7 @@ impl EventProcessor { } } - debug!(logs_count = logs.len(), "Completed processing all logs"); + info!(logs_count = logs.len(), "Completed processing logs"); Ok(()) } @@ -129,7 +119,6 @@ impl EventProcessor { // Confirm that this operator does not already exist if self.db.operator_exists(&operator_id) { - error!(operator_id = ?operator_id, "Operator already exists in database"); return Err(String::from("Operator already exists in database")); } @@ -137,27 +126,27 @@ impl EventProcessor { let public_key_str = publicKey.to_string(); let public_key_str = public_key_str.trim_start_matches("0x"); let data = hex::decode(public_key_str).map_err(|e| { - error!(operator_id = ?operator_id, error = %e, "Failed to decode public key data from hex"); + debug!(operator_id = ?operator_id, error = %e, "Failed to decode public key data from hex"); format!("Failed to decode public key data from hex: {e}") })?; // Make sure the data is the expected length if data.len() != 704 { - error!(operator_id = ?operator_id, expected = 704, actual = data.len(), "Invalid public key data length"); + debug!(operator_id = ?operator_id, expected = 704, actual = data.len(), "Invalid public key data length"); return Err(String::from("Invalid public key data length")); } // Remove abi encoding information and then convert to valid utf8 string let data = &data[64..]; let data = String::from_utf8(data.to_vec()).map_err(|e| { - error!(operator_id = ?operator_id, error = %e, "Failed to convert to UTF8 String"); + debug!(operator_id = ?operator_id, error = %e, "Failed to convert to UTF8 String"); format!("Failed to convert to UTF8 String: {e}") })?; let data = data.trim_matches(char::from(0)).to_string(); // Construct the Operator and insert it into the database let operator = Operator::new(&data, operator_id, owner).map_err(|e| { - error!( + debug!( operator_pubkey = ?publicKey, operator_id = ?operator_id, error = %e, @@ -166,7 +155,7 @@ impl EventProcessor { format!("Failed to construct operator: {e}") })?; self.db.insert_operator(&operator).map_err(|e| { - error!( + debug!( operator_id = ?operator_id, error = %e, "Failed to insert operator into database" @@ -174,7 +163,7 @@ impl EventProcessor { format!("Failed to insert operator into database: {e}") })?; - info!( + debug!( operator_id = ?operator_id, owner = ?owner, "Successfully registered operator" @@ -193,7 +182,7 @@ impl EventProcessor { // Delete the operator from database and in memory. Will handle existence check self.db.delete_operator(operator_id).map_err(|e| { - error!( + debug!( operator_id = ?operator_id, error = %e, "Failed to remove operator" @@ -201,7 +190,7 @@ impl EventProcessor { format!("Failed to remove operator: {e}") })?; - info!(operator_id = ?operatorId, "Operator removed from network"); + debug!(operator_id = ?operatorId, "Operator removed from network"); Ok(()) } @@ -222,19 +211,9 @@ impl EventProcessor { debug!(owner = ?owner, operator_count = operatorIds.len(), "Processing validator addition"); - // Get the index of the validator - // Todo!() Dont want this as a blocking api call - let handle = tokio::runtime::Handle::current(); - let index = handle.block_on(async { - self.beacon_client - .get_validator_index(publicKey.to_string()) - .await - }); - let index = ValidatorIndex(index); - // Process data into a usable form let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { - error!( + debug!( validator_pubkey = %publicKey, error = %e, "Failed to create PublicKey" @@ -247,7 +226,7 @@ impl EventProcessor { // Get the expected nonce, and then increment it let nonce = self.db.get_nonce(&owner); self.db.bump_nonce(&owner).map_err(|e| { - error!(owner = ?owner, "Failed to bump nonce"); + debug!(owner = ?owner, "Failed to bump nonce"); format!("Failed to bump nonce: {e}") })?; @@ -269,23 +248,19 @@ impl EventProcessor { &validator_pubkey, ) .map_err(|e| { - error!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); + debug!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); format!("Failed to parse shares: {e}") })?; - println!( - "{:?} {:?} {:?} {:?}", - signature, nonce, owner, validator_pubkey - ); if !verify_signature(signature, nonce, &owner, &validator_pubkey) { - error!(cluster_id = ?cluster_id, "Signature verification failed"); + debug!(cluster_id = ?cluster_id, "Signature verification failed"); return Err("Signature verification failed".to_string()); } // fetch the validator metadata - let validator_metadata = - construct_validator_metadata(&validator_pubkey, index, &cluster_id).map_err(|e| { - error!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); + let validator_metadata = construct_validator_metadata(&validator_pubkey, &cluster_id) + .map_err(|e| { + debug!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); format!("Failed to fetch validator metadata: {e}") })?; @@ -301,11 +276,11 @@ impl EventProcessor { self.db .insert_validator(cluster, validator_metadata.clone(), shares) .map_err(|e| { - error!(cluster_id = ?cluster_id, error = %e, validator_metadata = ?validator_metadata.public_key, "Failed to insert validator into cluster"); + debug!(cluster_id = ?cluster_id, error = %e, validator_metadata = ?validator_metadata.public_key, "Failed to insert validator into cluster"); format!("Failed to insert validator into cluster: {e}") })?; - info!( + debug!( cluster_id = ?cluster_id, validator_pubkey = %validator_pubkey, "Successfully added validator" @@ -328,7 +303,7 @@ impl EventProcessor { // Process and fetch data let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { - error!( + debug!( validator_pubkey = %publicKey, error = %e, "Failed to construct validator pubkey in removal" @@ -348,7 +323,7 @@ impl EventProcessor { let metadata = match self.db.metadata().get_by(&validator_pubkey) { Some(data) => data, None => { - error!( + debug!( cluster_id = ?cluster_id, "Failed to fetch validator metadata from database" ); @@ -358,7 +333,7 @@ impl EventProcessor { let cluster = match self.db.clusters().get_by(&validator_pubkey) { Some(data) => data, None => { - error!( + debug!( cluster_id = ?cluster_id, "Failed to fetch cluster from database" ); @@ -368,7 +343,7 @@ impl EventProcessor { // Make sure the right owner is removing this validator if owner != cluster.owner { - error!( + debug!( cluster_id = ?cluster_id, expected_owner = ?cluster.owner, actual_owner = ?owner, @@ -382,7 +357,7 @@ impl EventProcessor { // Make sure this is the correct validator if validator_pubkey != metadata.public_key { - error!( + debug!( cluster_id = ?cluster_id, expected_pubkey = %metadata.public_key, actual_pubkey = %validator_pubkey, @@ -399,7 +374,7 @@ impl EventProcessor { // Remove the validator and all corresponding cluster data self.db.delete_validator(&validator_pubkey).map_err(|e| { - error!( + debug!( cluster_id = ?cluster_id, pubkey = ?validator_pubkey, error = %e, @@ -408,7 +383,7 @@ impl EventProcessor { format!("Failed to validator cluster: {e}") })?; - info!( + debug!( cluster_id = ?cluster_id, validator_pubkey = %validator_pubkey, "Successfully removed validator and cluster" @@ -431,7 +406,7 @@ impl EventProcessor { // Update the status of the cluster to be liquidated self.db.update_status(cluster_id, true).map_err(|e| { - error!( + debug!( cluster_id = ?cluster_id, error = %e, "Failed to mark cluster as liquidated" @@ -439,7 +414,7 @@ impl EventProcessor { format!("Failed to mark cluster as liquidated: {e}") })?; - info!( + debug!( cluster_id = ?cluster_id, owner = ?owner, "Cluster marked as liquidated" @@ -462,7 +437,7 @@ impl EventProcessor { // Update the status of the cluster to be active self.db.update_status(cluster_id, false).map_err(|e| { - error!( + debug!( cluster_id = ?cluster_id, error = %e, "Failed to mark cluster as active" @@ -470,7 +445,7 @@ impl EventProcessor { format!("Failed to mark cluster as active: {e}") })?; - info!( + debug!( cluster_id = ?cluster_id, owner = ?owner, "Cluster reactivated" @@ -489,14 +464,14 @@ impl EventProcessor { self.db .update_fee_recipient(owner, recipientAddress) .map_err(|e| { - error!( + debug!( owner = ?owner, error = %e, "Failed to update fee recipient" ); format!("Failed to update fee recipient: {e}") })?; - info!( + debug!( owner = ?owner, new_recipient = ?recipientAddress, "Fee recipient address updated" @@ -513,7 +488,7 @@ impl EventProcessor { publicKey, } = SSVContract::ValidatorExited::decode_from_log(log)?; // just create a validator exit task - info!( + debug!( owner = ?owner, validator_pubkey = ?publicKey, operator_count = operatorIds.len(), diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 7862fe08a..f6d90ea0a 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -68,7 +68,6 @@ type WsClient = RootProvider; const MAX_RETRIES: i32 = 5; // Follow distance -// TODO!(), why 8 (in go client), or is this the eth1 follow distance const FOLLOW_DISTANCE: u64 = 8; /// The maximum number of operators a validator can have @@ -82,7 +81,7 @@ pub enum Network { Holesky, } -// TODO!() Dummy config struct that will be replaced. will be passed into the +// TODO!() Dummy config struct that will be replaced #[derive(Debug)] pub struct Config { pub http_url: String, @@ -125,7 +124,7 @@ impl SsvEventSyncer { .map_err(|e| format!("Failed to bind to WS: {}, {}", &config.ws_url, e))?; // Construct an EventProcessor with access to the DB - let event_processor = EventProcessor::new(db, &config.beacon_url); + let event_processor = EventProcessor::new(db); Ok(Self { rpc_client, @@ -158,11 +157,13 @@ impl SsvEventSyncer { info!("Starting live sync"); self.live_sync(contract_address).await?; - // todo!(): should this spawn long running task and return or should the event processor - // just be spawned in its own task? - todo!() + // If we reach there, there is some non-recoverable error and we should shut down + Err("Sync has unexpectedly exited".to_string()) } + // Perform a historical sync on the network. This will fetch blocks from the contract deployment + // block up until the current tip of the chain. This way, we can recreate the current state of + // the network through event logs #[instrument(skip(self, contract_address, deployment_block))] async fn historical_sync( &self, @@ -256,6 +257,12 @@ impl SsvEventSyncer { // be processed since we are just reconstructing state self.event_processor .process_logs(ordered_event_logs, false)?; + + // record that we have processed up to this block + self.event_processor + .db + .processed_block(end_block) + .expect("Failed to update last processed block number"); } info!("Processed all events up to block {}", end_block); // update processing information @@ -322,9 +329,6 @@ impl SsvEventSyncer { async fn live_sync(&mut self, contract_address: Address) -> Result<(), String> { info!("Network up to sync.."); info!("Current state"); - //info!("{} Operators", self.event_processor.db.num_operators()); - info!("{} Clusters", self.event_processor.db.clusters().length()); - info!("{} Validators", self.event_processor.db.metadata().length()); info!(?contract_address, "Starting live sync"); loop { @@ -373,7 +377,8 @@ impl SsvEventSyncer { log_count = logs.len(), "Processing events from block {}", relevant_block ); - // TODO!() add error handling here + + // process the logs and update the last block we have recorded self.event_processor.process_logs(logs, true)?; self.event_processor .db diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 25571393f..436e94b19 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -1,12 +1,8 @@ -use crate::event_processor::BeaconClient; use crate::sync::MAX_OPERATORS; use alloy::primitives::{keccak256, Address}; -use reqwest::Client; -use serde::Deserialize; use ssv_types::{ClusterId, OperatorId, Share, ValidatorIndex, ValidatorMetadata}; use std::collections::HashSet; use std::str::FromStr; -use std::time::Duration; use types::{Graffiti, PublicKey, Signature}; // phase0.SignatureLength @@ -16,55 +12,6 @@ const PUBLIC_KEY_LENGTH: usize = 48; // Length of an encrypted key const ENCRYPTED_KEY_LENGTH: usize = 256; -// Response structures for Validator Index deserialization -#[derive(Deserialize, Default)] -struct ValidatorResponse { - data: ValidatorInfo, -} -#[derive(Deserialize, Default)] -struct ValidatorInfo { - index: String, -} - -impl BeaconClient { - // Initialize a new client with default settings - pub fn new(base_url: &str) -> Self { - // Configure the client with reasonable defaults - let client = Client::builder() - .timeout(Duration::from_secs(10)) - .connect_timeout(Duration::from_secs(5)) - .build() - .expect("Failed to create HTTP client"); - - BeaconClient { - client, - base_url: base_url.to_string(), - } - } - - // Method to get validator information - pub async fn get_validator_index(&self, pubkey: String) -> usize { - // Combine base URL with the specific validator endpoint - let url = format!( - "{}/eth/v1/beacon/states/head/validators/{}", - self.base_url, pubkey - ); - - // Handle the Response, defaulting to 0 index - let response = match self.client.get(&url).send().await { - Ok(resp) => resp, - Err(_) => return 0, - }; - let validator_response = response - .json::() - .await - .unwrap_or_default(); - - // Finally parse the index to usize, defaulting to 0 if it fails - validator_response.data.index.parse().unwrap_or(0) - } -} - // Parses shares from a ValidatorAdded event // Event contains a bytes stream of the form // [signature | public keys | encrypted keys]. @@ -90,7 +37,7 @@ pub fn parse_shares( )); } - // Extract components + // Extract all of the components let signature = shares[..signature_offset].to_vec(); let share_public_keys = split_bytes( &shares[signature_offset..pub_keys_offset], @@ -137,18 +84,21 @@ fn split_bytes(data: &[u8], chunk_size: usize) -> Vec> { .collect() } -// Fetch the metadata for a validator from the beacon chain +// Construct the metadata for the newly added validator pub fn construct_validator_metadata( public_key: &PublicKey, - index: ValidatorIndex, cluster_id: &ClusterId, ) -> Result { // Default Anchor-SSV Graffiti let mut bytes = [0u8; 32]; bytes[..10].copy_from_slice(b"Anchor-SSV"); + // Note: Validator Index is not included in the event log data and it would require a + // significant refactor to introduce a single non-blocking asynchronous call to fetch this data. + // For this reason, the population of this field is pushed downstream + Ok(ValidatorMetadata { - index, + index: ValidatorIndex(0), public_key: public_key.clone(), graffiti: Graffiti::from(bytes), cluster_id: *cluster_id, @@ -162,6 +112,7 @@ pub fn verify_signature( owner: &Address, public_key: &PublicKey, ) -> bool { + /* // Hash the owner and nonce concatinated let data = format!("{}:{}", owner, nonce); let hash = keccak256(data); @@ -171,6 +122,8 @@ pub fn verify_signature( // Verify the signature against the message signature.verify(public_key, hash) + */ + true } // Perform basic verification on the operator set From ff7f351596cdf824ed4bd4e00c0d8d8eeee12883 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 9 Jan 2025 16:40:27 +0000 Subject: [PATCH 38/49] state reconstruction bugfix --- anchor/database/src/state.rs | 12 +++--------- anchor/eth/src/sync.rs | 4 ++-- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 1bf491b76..eb32c6f35 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -34,15 +34,9 @@ impl NetworkState { let id = if let Ok(Some(operator_id)) = Self::does_self_exist(&conn, pubkey) { operator_id } else { - // If it does not exist, just default the state since we do not know who we are - return Ok(Self { - multi_state: MultiState { - shares: MultiIndexMap::default(), - validator_metadata: MultiIndexMap::default(), - clusters: MultiIndexMap::default(), - }, - single_state: SingleState::default(), - }); + // It does not exist, just default to some impossible operator + // SQL bounded by u32::max + OperatorId(u64::MAX / 2) }; // First Phase: Fetch data from the database diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index f6d90ea0a..7d726680c 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -171,7 +171,7 @@ impl SsvEventSyncer { deployment_block: u64, ) -> Result<(), String> { // Start from the contract deployment block or the last block that has been processed - let last_processed_block = self.event_processor.db.get_last_processed_block(); + let last_processed_block = self.event_processor.db.get_last_processed_block() + 1; let mut start_block = std::cmp::max(deployment_block, last_processed_block); loop { @@ -261,7 +261,7 @@ impl SsvEventSyncer { // record that we have processed up to this block self.event_processor .db - .processed_block(end_block) + .processed_block(calculated_end) .expect("Failed to update last processed block number"); } info!("Processed all events up to block {}", end_block); From 42146285f09801b88fe8d7394818e947047d6ad3 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 9 Jan 2025 17:39:37 +0000 Subject: [PATCH 39/49] custom execution errors --- Cargo.lock | 126 +++++++++++++++--------------- anchor/eth/execution.rs | 1 - anchor/eth/src/error.rs | 18 +++++ anchor/eth/src/event_parser.rs | 13 ++- anchor/eth/src/event_processor.rs | 107 +++++++++++++++---------- anchor/eth/src/lib.rs | 1 + anchor/eth/src/network_actions.rs | 20 +++-- anchor/eth/src/sync.rs | 39 ++++++--- anchor/eth/src/util.rs | 3 - 9 files changed, 196 insertions(+), 132 deletions(-) create mode 100644 anchor/eth/src/error.rs diff --git a/Cargo.lock b/Cargo.lock index 63a85fb25..f10a83847 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -143,9 +143,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.1.53" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da226340862e036ab26336dc99ca85311c6b662267c1440e1733890fd688802c" +checksum = "d38fdd69239714d7625cda1e3730773a3c1a8719d506370eb17bb0103b7c2e15" dependencies = [ "alloy-primitives", "num_enum", @@ -1007,7 +1007,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.42", + "rustix 0.38.43", "slab", "tracing", "windows-sys 0.59.0", @@ -1019,7 +1019,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "event-listener-strategy", "pin-project-lite", ] @@ -1410,7 +1410,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "alloy-primitives", "arbitrary", @@ -1620,9 +1620,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "b95dca1b68188a08ca6af9d96a6576150f598824bdb528c1190460c2940a0b48" dependencies = [ "clap_builder", "clap_derive", @@ -1630,9 +1630,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "9ab52925392148efd3f7562f2136a81ffb778076bcc85727c6e020d6dd57cf15" dependencies = [ "anstream", "anstyle", @@ -1643,9 +1643,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1679,7 +1679,7 @@ dependencies = [ [[package]] name = "clap_utils" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "alloy-primitives", "clap", @@ -1743,7 +1743,7 @@ dependencies = [ [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "itertools 0.10.5", ] @@ -1760,7 +1760,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "quote", "syn 1.0.109", @@ -2382,7 +2382,7 @@ dependencies = [ [[package]] name = "directory" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "clap", "clap_utils 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2841,7 +2841,7 @@ dependencies = [ [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "paste", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2863,7 +2863,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", @@ -2931,7 +2931,7 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "bytes", "discv5 0.9.0", @@ -3135,9 +3135,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.3.1" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" dependencies = [ "concurrent-queue", "parking", @@ -3150,7 +3150,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "pin-project-lite", ] @@ -3346,7 +3346,7 @@ dependencies = [ [[package]] name = "fixed_bytes" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "alloy-primitives", "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -3698,7 +3698,7 @@ dependencies = [ [[package]] name = "gossipsub" version = "0.5.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "async-channel", "asynchronous-codec", @@ -4552,7 +4552,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "bytes", ] @@ -4748,7 +4748,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "arbitrary", "c-kzg", @@ -5388,7 +5388,7 @@ dependencies = [ [[package]] name = "lighthouse_network" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -5446,7 +5446,7 @@ dependencies = [ [[package]] name = "lighthouse_version" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "git-version", "target_info", @@ -5466,9 +5466,9 @@ checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -5525,7 +5525,7 @@ dependencies = [ [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "chrono", "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -5573,7 +5573,7 @@ dependencies = [ [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "fnv", ] @@ -5649,7 +5649,7 @@ dependencies = [ [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -5691,7 +5691,7 @@ dependencies = [ [[package]] name = "metrics" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "prometheus", ] @@ -6420,7 +6420,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.9", + "thiserror 2.0.10", "ucd-trie", ] @@ -6508,7 +6508,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.42", + "rustix 0.38.43", "tracing", "windows-sys 0.59.0", ] @@ -6563,7 +6563,7 @@ dependencies = [ [[package]] name = "pretty_reqwest_error" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "reqwest 0.11.27", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -6846,7 +6846,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls 0.23.20", "socket2 0.5.8", - "thiserror 2.0.9", + "thiserror 2.0.10", "tokio", "tracing", ] @@ -6865,7 +6865,7 @@ dependencies = [ "rustls 0.23.20", "rustls-pki-types", "slab", - "thiserror 2.0.9", + "thiserror 2.0.10", "tinyvec", "tracing", "web-time", @@ -7415,14 +7415,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.14", + "linux-raw-sys 0.4.15", "windows-sys 0.59.0", ] @@ -7557,7 +7557,7 @@ source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a5 [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" [[package]] name = "salsa20" @@ -7698,9 +7698,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.13.0" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -7748,7 +7748,7 @@ dependencies = [ [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "serde", "url", @@ -8392,7 +8392,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -8536,7 +8536,7 @@ dependencies = [ [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "async-channel", "futures", @@ -8558,7 +8558,7 @@ dependencies = [ "fastrand", "getrandom", "once_cell", - "rustix 0.38.42", + "rustix 0.38.43", "windows-sys 0.59.0", ] @@ -8579,7 +8579,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9" dependencies = [ - "rustix 0.38.42", + "rustix 0.38.43", "windows-sys 0.59.0", ] @@ -8595,7 +8595,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "quote", "syn 1.0.109", @@ -8612,11 +8612,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" +checksum = "a3ac7f54ca534db81081ef1c1e7f6ea8a3ef428d2fc069097c079443d24124d3" dependencies = [ - "thiserror-impl 2.0.9", + "thiserror-impl 2.0.10", ] [[package]] @@ -8632,9 +8632,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" +checksum = "9e9465d30713b56a37ede7185763c3492a91be2f5fa68d958c44e41ab9248beb" dependencies = [ "proc-macro2", "quote", @@ -8746,9 +8746,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -8773,9 +8773,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", @@ -9156,7 +9156,7 @@ dependencies = [ [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9334,7 +9334,7 @@ dependencies = [ [[package]] name = "unused_port" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#87b72dec21759acfbc749220be3aee11ac91cdf3" dependencies = [ "lru_cache 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot 0.12.3", @@ -10085,9 +10085,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.24" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea8b391c9a790b496184c29f7f93b9ed5b16abb306c05415b68bcc16e4d06432" +checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4" [[package]] name = "xmltree" diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs index cb73417c7..454e90358 100644 --- a/anchor/eth/execution.rs +++ b/anchor/eth/execution.rs @@ -20,7 +20,6 @@ async fn main() { // Dummy configuration with endpoint and network let rpc_endpoint = "http://127.0.0.1:8545"; - let ws_endpoint = "ws://127.0.0.1:8546"; let ws_endpoint = "wss://eth.merkle.io"; let beacon_endpoint = "http://127.0.0.1:5052"; let config = Config { diff --git a/anchor/eth/src/error.rs b/anchor/eth/src/error.rs new file mode 100644 index 000000000..839216308 --- /dev/null +++ b/anchor/eth/src/error.rs @@ -0,0 +1,18 @@ +use std::fmt::Display; + +#[derive(Debug)] +pub enum ExecutionError { + SyncError(String), + InvalidEvent(String), + RpcError(String), + DecodeError(String), + Misc(String), + Duplicate(String), + Database(String), +} + +impl Display for ExecutionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} diff --git a/anchor/eth/src/event_parser.rs b/anchor/eth/src/event_parser.rs index c746d0b38..3cf6eed17 100644 --- a/anchor/eth/src/event_parser.rs +++ b/anchor/eth/src/event_parser.rs @@ -1,10 +1,11 @@ -use super::gen::SSVContract; +use crate::error::ExecutionError; +use crate::gen::SSVContract; use alloy::{rpc::types::Log, sol_types::SolEvent}; // Standardized event decoding via common Decoder trait. pub trait EventDecoder { type Output; - fn decode_from_log(log: &Log) -> Result; + fn decode_from_log(log: &Log) -> Result; } macro_rules! impl_event_decoder { @@ -13,9 +14,13 @@ macro_rules! impl_event_decoder { impl EventDecoder for $event_type { type Output = $event_type; - fn decode_from_log(log: &Log) -> Result { + fn decode_from_log(log: &Log) -> Result { let decoded = Self::decode_log(&log.inner, true) - .map_err(|e| format!("Failed to decode {} event: {}", stringify!($event_type), e))?; + .map_err(|e| { + ExecutionError::DecodeError( + format!("Failed to decode {} event: {}", stringify!($event_type), e) + ) + })?; Ok(decoded.data) } } diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 28f4c8905..c8d8ab3ab 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -1,7 +1,9 @@ -use super::event_parser::EventDecoder; -use super::gen::SSVContract; -use super::network_actions::NetworkAction; -use super::util::*; +use crate::error::ExecutionError; +use crate::event_parser::EventDecoder; +use crate::gen::SSVContract; +use crate::network_actions::NetworkAction; +use crate::util::*; + use alloy::primitives::B256; use alloy::rpc::types::Log; use alloy::sol_types::SolEvent; @@ -14,9 +16,9 @@ use tracing::{debug, error, info, instrument, trace, warn}; use types::PublicKey; // Specific Handler for a log type -type EventHandler = fn(&EventProcessor, &Log) -> Result<(), String>; +type EventHandler = fn(&EventProcessor, &Log) -> Result<(), ExecutionError>; -/// Event Processor +/// The Event Processor. This handles all verification and recording of events. pub struct EventProcessor { /// Function handlers for event processing handlers: HashMap, @@ -67,7 +69,7 @@ impl EventProcessor { /// Process a new set of logs #[instrument(skip(self, logs), fields(logs_count = logs.len()))] - pub fn process_logs(&self, logs: Vec, live: bool) -> Result<(), String> { + pub fn process_logs(&self, logs: Vec, live: bool) -> Result<(), ExecutionError> { info!(logs_count = logs.len(), "Starting log processing"); for (index, log) in logs.iter().enumerate() { trace!(log_index = index, topic = ?log.topic0(), "Processing individual log"); @@ -75,14 +77,14 @@ impl EventProcessor { // extract the topic0 to retrieve log handler let topic0 = log.topic0().ok_or_else(|| { error!("Log missing topic0"); - "Log missing topic0".to_string() + ExecutionError::Misc("Log missing topic0".to_string()) })?; let handler = self.handlers.get(topic0).ok_or_else(|| { error!(topic = ?topic0, "No handler found for topic"); - "No handler found for topic".to_string() + ExecutionError::Misc("No handler found for topic".to_string()) })?; - // Handle the log and log any malformed events + // Handle the log and emit warning for any malformed events if let Err(e) = handler(self, log) { warn!("Malformed event: {e}"); continue; @@ -105,7 +107,7 @@ impl EventProcessor { // A new Operator has been registered in the network. #[instrument(skip(self, log), fields(operator_id, owner))] - fn process_operator_added(&self, log: &Log) -> Result<(), String> { + fn process_operator_added(&self, log: &Log) -> Result<(), ExecutionError> { // Destructure operator added event let SSVContract::OperatorAdded { operatorId, // The new ID of the operator @@ -119,7 +121,10 @@ impl EventProcessor { // Confirm that this operator does not already exist if self.db.operator_exists(&operator_id) { - return Err(String::from("Operator already exists in database")); + return Err(ExecutionError::Duplicate(format!( + "Operator with id {:?} already exists in database", + operator_id + ))); } // Parse ABI encoded public key string and trim off 0x prefix for hex decoding @@ -127,20 +132,24 @@ impl EventProcessor { let public_key_str = public_key_str.trim_start_matches("0x"); let data = hex::decode(public_key_str).map_err(|e| { debug!(operator_id = ?operator_id, error = %e, "Failed to decode public key data from hex"); - format!("Failed to decode public key data from hex: {e}") + ExecutionError::InvalidEvent( + format!("Failed to decode public key data from hex: {e}") + ) })?; // Make sure the data is the expected length if data.len() != 704 { debug!(operator_id = ?operator_id, expected = 704, actual = data.len(), "Invalid public key data length"); - return Err(String::from("Invalid public key data length")); + return Err(ExecutionError::InvalidEvent(String::from( + "Invalid public key data length. Expected 704, got {data.len()}", + ))); } // Remove abi encoding information and then convert to valid utf8 string let data = &data[64..]; let data = String::from_utf8(data.to_vec()).map_err(|e| { debug!(operator_id = ?operator_id, error = %e, "Failed to convert to UTF8 String"); - format!("Failed to convert to UTF8 String: {e}") + ExecutionError::InvalidEvent(format!("Failed to convert to UTF8 String: {e}")) })?; let data = data.trim_matches(char::from(0)).to_string(); @@ -152,7 +161,7 @@ impl EventProcessor { error = %e, "Failed to construct operator" ); - format!("Failed to construct operator: {e}") + ExecutionError::InvalidEvent(format!("Failed to construct operator: {e}")) })?; self.db.insert_operator(&operator).map_err(|e| { debug!( @@ -160,7 +169,7 @@ impl EventProcessor { error = %e, "Failed to insert operator into database" ); - format!("Failed to insert operator into database: {e}") + ExecutionError::Database(format!("Failed to insert operator into database: {e}")) })?; debug!( @@ -173,7 +182,7 @@ impl EventProcessor { // An Operator has been removed from the network #[instrument(skip(self, log), fields(operator_id))] - fn process_operator_removed(&self, log: &Log) -> Result<(), String> { + fn process_operator_removed(&self, log: &Log) -> Result<(), ExecutionError> { // Extract the ID of the Operator let SSVContract::OperatorRemoved { operatorId } = SSVContract::OperatorRemoved::decode_from_log(log)?; @@ -187,7 +196,7 @@ impl EventProcessor { error = %e, "Failed to remove operator" ); - format!("Failed to remove operator: {e}") + ExecutionError::Database(format!("Failed to remove operator: {e}")) })?; debug!(operator_id = ?operatorId, "Operator removed from network"); @@ -199,7 +208,7 @@ impl EventProcessor { // cluster. Perform data verification, store all relevant data, and extract the KeyShare if it // belongs to this operator #[instrument(skip(self, log), fields(validator_pubkey, cluster_id, owner))] - fn process_validator_added(&self, log: &Log) -> Result<(), String> { + fn process_validator_added(&self, log: &Log) -> Result<(), ExecutionError> { // Parse and destructure log let SSVContract::ValidatorAdded { owner, @@ -218,7 +227,7 @@ impl EventProcessor { error = %e, "Failed to create PublicKey" ); - format!("Failed to create PublicKey: {e}") + ExecutionError::InvalidEvent(format!("Failed to create PublicKey: {e}")) })?; let cluster_id = compute_cluster_id(owner, operatorIds.clone()); let operator_ids: Vec = operatorIds.iter().map(|id| OperatorId(*id)).collect(); @@ -227,16 +236,20 @@ impl EventProcessor { let nonce = self.db.get_nonce(&owner); self.db.bump_nonce(&owner).map_err(|e| { debug!(owner = ?owner, "Failed to bump nonce"); - format!("Failed to bump nonce: {e}") + ExecutionError::Database(format!("Failed to bump nonce: {e}")) })?; // Perform verification on the operator set and make sure they are all registered in the // network debug!(cluster_id = ?cluster_id, "Validating operators"); - validate_operators(&operator_ids)?; + validate_operators(&operator_ids).map_err(|e| { + ExecutionError::InvalidEvent(format!("Failed to validate operators: {e}")) + })?; if operator_ids.iter().any(|id| !self.db.operator_exists(id)) { error!(cluster_id = ?cluster_id, "One or more operators do not exist"); - return Err("One or more operators do not exist".to_string()); + return Err(ExecutionError::Database( + "One or more operators do not exist".to_string(), + )); } // Parse the share byte stream into a list of valid Shares and then verify the signature @@ -249,19 +262,21 @@ impl EventProcessor { ) .map_err(|e| { debug!(cluster_id = ?cluster_id, error = %e, "Failed to parse shares"); - format!("Failed to parse shares: {e}") + ExecutionError::InvalidEvent(format!("Failed to parse shares. {e}")) })?; if !verify_signature(signature, nonce, &owner, &validator_pubkey) { debug!(cluster_id = ?cluster_id, "Signature verification failed"); - return Err("Signature verification failed".to_string()); + return Err(ExecutionError::InvalidEvent( + "Signature verification failed".to_string(), + )); } // fetch the validator metadata let validator_metadata = construct_validator_metadata(&validator_pubkey, &cluster_id) .map_err(|e| { debug!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); - format!("Failed to fetch validator metadata: {e}") + ExecutionError::Database(format!("Failed to fetch validator metadata: {e}")) })?; // Finally, construct and insert the full cluster and insert into the database @@ -277,7 +292,7 @@ impl EventProcessor { .insert_validator(cluster, validator_metadata.clone(), shares) .map_err(|e| { debug!(cluster_id = ?cluster_id, error = %e, validator_metadata = ?validator_metadata.public_key, "Failed to insert validator into cluster"); - format!("Failed to insert validator into cluster: {e}") + ExecutionError::Database(format!("Failed to insert validator into cluster: {e}")) })?; debug!( @@ -290,7 +305,7 @@ impl EventProcessor { // A validator has been removed from the network and its respective cluster #[instrument(skip(self, log), fields(cluster_id, validator_pubkey, owner))] - fn process_validator_removed(&self, log: &Log) -> Result<(), String> { + fn process_validator_removed(&self, log: &Log) -> Result<(), ExecutionError> { // Parse and destructure log let SSVContract::ValidatorRemoved { owner, @@ -308,7 +323,7 @@ impl EventProcessor { error = %e, "Failed to construct validator pubkey in removal" ); - format!("Failed to create PublicKey: {e}") + ExecutionError::InvalidEvent(format!("Failed to create PublicKey: {e}")) })?; // Compute the cluster id @@ -327,7 +342,9 @@ impl EventProcessor { cluster_id = ?cluster_id, "Failed to fetch validator metadata from database" ); - return Err("Failed to fetch validator metadata from database".to_string()); + return Err(ExecutionError::Database( + "Failed to fetch validator metadata from database".to_string(), + )); } }; let cluster = match self.db.clusters().get_by(&validator_pubkey) { @@ -337,7 +354,9 @@ impl EventProcessor { cluster_id = ?cluster_id, "Failed to fetch cluster from database" ); - return Err("Failed to fetch cluster from database".to_string()); + return Err(ExecutionError::Database( + "Failed to fetch cluster from database".to_string(), + )); } }; @@ -349,10 +368,10 @@ impl EventProcessor { actual_owner = ?owner, "Owner mismatch for validator removal" ); - return Err(format!( + return Err(ExecutionError::InvalidEvent(format!( "Cluster already exists with a different owner address. Expected {}. Got {}", cluster.owner, owner - )); + ))); } // Make sure this is the correct validator @@ -363,7 +382,9 @@ impl EventProcessor { actual_pubkey = %validator_pubkey, "Validator pubkey mismatch" ); - return Err("Validator does not match".to_string()); + return Err(ExecutionError::InvalidEvent( + "Validator does not match".to_string(), + )); } // Check if we are a member of this cluster, if so we need to remove share data @@ -380,7 +401,7 @@ impl EventProcessor { error = %e, "Failed to delete valiidator from database" ); - format!("Failed to validator cluster: {e}") + ExecutionError::Database(format!("Failed to validator cluster: {e}")) })?; debug!( @@ -393,7 +414,7 @@ impl EventProcessor { /// A cluster has ran out of operational funds. Set the cluster as liquidated #[instrument(skip(self, log), fields(cluster_id, owner))] - fn process_cluster_liquidated(&self, log: &Log) -> Result<(), String> { + fn process_cluster_liquidated(&self, log: &Log) -> Result<(), ExecutionError> { let SSVContract::ClusterLiquidated { owner, operatorIds: operator_ids, @@ -411,7 +432,7 @@ impl EventProcessor { error = %e, "Failed to mark cluster as liquidated" ); - format!("Failed to mark cluster as liquidated: {e}") + ExecutionError::Database(format!("Failed to mark cluster as liquidated: {e}")) })?; debug!( @@ -424,7 +445,7 @@ impl EventProcessor { // A cluster that was previously liquidated has had more SSV deposited and is now active #[instrument(skip(self, log), fields(cluster_id, owner))] - fn process_cluster_reactivated(&self, log: &Log) -> Result<(), String> { + fn process_cluster_reactivated(&self, log: &Log) -> Result<(), ExecutionError> { let SSVContract::ClusterReactivated { owner, operatorIds: operator_ids, @@ -442,7 +463,7 @@ impl EventProcessor { error = %e, "Failed to mark cluster as active" ); - format!("Failed to mark cluster as active: {e}") + ExecutionError::Database(format!("Failed to mark cluster as active: {e}")) })?; debug!( @@ -455,7 +476,7 @@ impl EventProcessor { // The fee recipient address of a validator has been changed #[instrument(skip(self, log), fields(owner))] - fn process_fee_recipient_updated(&self, log: &Log) -> Result<(), String> { + fn process_fee_recipient_updated(&self, log: &Log) -> Result<(), ExecutionError> { let SSVContract::FeeRecipientAddressUpdated { owner, recipientAddress, @@ -469,7 +490,7 @@ impl EventProcessor { error = %e, "Failed to update fee recipient" ); - format!("Failed to update fee recipient: {e}") + ExecutionError::Database(format!("Failed to update fee recipient: {e}")) })?; debug!( owner = ?owner, @@ -481,7 +502,7 @@ impl EventProcessor { // A validator has exited the beacon chain #[instrument(skip(self, log), fields(validator_pubkey, owner))] - fn process_validator_exited(&self, log: &Log) -> Result<(), String> { + fn process_validator_exited(&self, log: &Log) -> Result<(), ExecutionError> { let SSVContract::ValidatorExited { owner, operatorIds, diff --git a/anchor/eth/src/lib.rs b/anchor/eth/src/lib.rs index 0cf4243c1..f8df5fab9 100644 --- a/anchor/eth/src/lib.rs +++ b/anchor/eth/src/lib.rs @@ -1,4 +1,5 @@ pub use sync::{Config, Network, SsvEventSyncer}; +mod error; mod event_parser; mod event_processor; mod gen; diff --git a/anchor/eth/src/network_actions.rs b/anchor/eth/src/network_actions.rs index 2af251628..09b4eb201 100644 --- a/anchor/eth/src/network_actions.rs +++ b/anchor/eth/src/network_actions.rs @@ -1,5 +1,7 @@ -use super::event_parser::EventDecoder; -use super::gen::SSVContract; +use crate::error::ExecutionError; +use crate::event_parser::EventDecoder; +use crate::gen::SSVContract; + use alloy::primitives::Address; use alloy::{rpc::types::Log, sol_types::SolEvent}; use ssv_types::OperatorId; @@ -31,15 +33,17 @@ pub enum NetworkAction { /// Parse a network log into an action to be executed impl TryFrom<&Log> for NetworkAction { - type Error = String; + type Error = ExecutionError; fn try_from(source: &Log) -> Result { let topic0 = source.topic0().expect("The log should have a topic0"); match *topic0 { SSVContract::ValidatorRemoved::SIGNATURE_HASH => { let SSVContract::ValidatorRemoved { publicKey, .. } = SSVContract::ValidatorRemoved::decode_from_log(source)?; - let validator_pubkey = PublicKey::from_str(&publicKey.to_string()) - .map_err(|e| format!("Failed to create PublicKey: {e}"))?; + let validator_pubkey = + PublicKey::from_str(&publicKey.to_string()).map_err(|e| { + ExecutionError::InvalidEvent(format!("Failed to create PublicKey: {e}")) + })?; Ok(NetworkAction::StopValidator { validator_pubkey }) } SSVContract::ClusterLiquidated::SIGNATURE_HASH => { @@ -71,8 +75,10 @@ impl TryFrom<&Log> for NetworkAction { SSVContract::ValidatorExited::SIGNATURE_HASH => { let SSVContract::ValidatorExited { publicKey, .. } = SSVContract::ValidatorExited::decode_from_log(source)?; - let validator_pubkey = PublicKey::from_str(&publicKey.to_string()) - .map_err(|e| format!("Failed to create PublicKey: {e}"))?; + let validator_pubkey = + PublicKey::from_str(&publicKey.to_string()).map_err(|e| { + ExecutionError::InvalidEvent(format!("Failed to create PublicKey: {e}")) + })?; Ok(NetworkAction::ExitValidator { validator_pubkey }) } _ => Ok(NetworkAction::NoOp), diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 7d726680c..02c925848 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -1,3 +1,4 @@ +use crate::error::ExecutionError; use crate::gen::SSVContract; use alloy::primitives::{address, Address}; use alloy::providers::{Provider, ProviderBuilder, RootProvider, WsConnect}; @@ -109,7 +110,7 @@ pub struct SsvEventSyncer { impl SsvEventSyncer { #[instrument(skip(db))] - pub async fn new(db: Arc, config: Config) -> Result { + pub async fn new(db: Arc, config: Config) -> Result { info!(?config, "Creating new SSV Event Syncer"); // Construct HTTP Provider @@ -121,7 +122,12 @@ impl SsvEventSyncer { let ws_client = ProviderBuilder::new() .on_ws(ws.clone()) .await - .map_err(|e| format!("Failed to bind to WS: {}, {}", &config.ws_url, e))?; + .map_err(|e| { + ExecutionError::SyncError(format!( + "Failed to bind to WS: {}, {}", + &config.ws_url, e + )) + })?; // Construct an EventProcessor with access to the DB let event_processor = EventProcessor::new(db); @@ -136,7 +142,7 @@ impl SsvEventSyncer { } #[instrument(skip(self))] - pub async fn sync(&mut self) -> Result<(), String> { + pub async fn sync(&mut self) -> Result<(), ExecutionError> { info!("Starting SSV event sync"); // get network specific contract information @@ -158,7 +164,9 @@ impl SsvEventSyncer { self.live_sync(contract_address).await?; // If we reach there, there is some non-recoverable error and we should shut down - Err("Sync has unexpectedly exited".to_string()) + Err(ExecutionError::SyncError( + "Sync has unexpectedly exited".to_string(), + )) } // Perform a historical sync on the network. This will fetch blocks from the contract deployment @@ -169,7 +177,7 @@ impl SsvEventSyncer { &self, contract_address: Address, deployment_block: u64, - ) -> Result<(), String> { + ) -> Result<(), ExecutionError> { // Start from the contract deployment block or the last block that has been processed let last_processed_block = self.event_processor.db.get_last_processed_block() + 1; let mut start_block = std::cmp::max(deployment_block, last_processed_block); @@ -177,7 +185,7 @@ impl SsvEventSyncer { loop { let current_block = self.rpc_client.get_block_number().await.map_err(|e| { error!(?e, "Failed to fetch block number"); - format!("Unable to fetch block number {}", e) + ExecutionError::RpcError(format!("Unable to fetch block number {}", e)) })?; // Basic verification @@ -239,7 +247,9 @@ impl SsvEventSyncer { ); // Await all of the futures. - let event_logs: Vec> = try_join_all(group).await?; + let event_logs: Vec> = try_join_all(group).await.map_err(|e| { + ExecutionError::SyncError(format!("Failed to join log future: {e}")) + })?; let event_logs: Vec = event_logs.into_iter().flatten().collect(); // The futures may join out of order block wise. The individual events within the block @@ -247,7 +257,12 @@ impl SsvEventSyncer { // confident the order is correct let mut ordered_event_logs: BTreeMap> = BTreeMap::new(); for log in event_logs { - let block_num = log.block_number.ok_or("Log is missing block number")?; + let block_num = log + .block_number + .ok_or("Log is missing block number") + .map_err(|e| { + ExecutionError::RpcError(format!("Failed to fetch block number: {e}")) + })?; ordered_event_logs.entry(block_num).or_default().push(log); } let ordered_event_logs: Vec = @@ -283,7 +298,7 @@ impl SsvEventSyncer { from_block: u64, to_block: u64, deployment_address: Address, - ) -> impl Future, String>> { + ) -> impl Future, ExecutionError>> { // Setup filter and rpc client let rpc_client = self.rpc_client.clone(); let filter = Filter::new() @@ -305,7 +320,9 @@ impl SsvEventSyncer { Err(e) => { if retry_cnt > MAX_RETRIES { error!(?e, retry_cnt, "Max retries exceeded while fetching logs"); - return Err("Unable to fetch logs".to_string()); + return Err(ExecutionError::RpcError( + "Unable to fetch logs".to_string(), + )); } warn!(?e, retry_cnt, "Error fetching logs, retrying"); @@ -326,7 +343,7 @@ impl SsvEventSyncer { // network. The events will be processed and duties will be created in response to network // actions #[instrument(skip(self, contract_address))] - async fn live_sync(&mut self, contract_address: Address) -> Result<(), String> { + async fn live_sync(&mut self, contract_address: Address) -> Result<(), ExecutionError> { info!("Network up to sync.."); info!("Current state"); info!(?contract_address, "Starting live sync"); diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 436e94b19..174b30743 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -112,7 +112,6 @@ pub fn verify_signature( owner: &Address, public_key: &PublicKey, ) -> bool { - /* // Hash the owner and nonce concatinated let data = format!("{}:{}", owner, nonce); let hash = keccak256(data); @@ -122,8 +121,6 @@ pub fn verify_signature( // Verify the signature against the message signature.verify(public_key, hash) - */ - true } // Perform basic verification on the operator set From 73501badf7d4e6f4383eca7f6d86e51aceeddc79 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 9 Jan 2025 18:01:52 +0000 Subject: [PATCH 40/49] fix error handling for event processing --- anchor/eth/execution.rs | 1 + anchor/eth/src/event_processor.rs | 24 +++++++++++++----------- anchor/eth/src/sync.rs | 4 ++-- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs index 454e90358..cb73417c7 100644 --- a/anchor/eth/execution.rs +++ b/anchor/eth/execution.rs @@ -20,6 +20,7 @@ async fn main() { // Dummy configuration with endpoint and network let rpc_endpoint = "http://127.0.0.1:8545"; + let ws_endpoint = "ws://127.0.0.1:8546"; let ws_endpoint = "wss://eth.merkle.io"; let beacon_endpoint = "http://127.0.0.1:5052"; let config = Config { diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index c8d8ab3ab..588f1654a 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -69,20 +69,17 @@ impl EventProcessor { /// Process a new set of logs #[instrument(skip(self, logs), fields(logs_count = logs.len()))] - pub fn process_logs(&self, logs: Vec, live: bool) -> Result<(), ExecutionError> { + pub fn process_logs(&self, logs: Vec, live: bool) { info!(logs_count = logs.len(), "Starting log processing"); for (index, log) in logs.iter().enumerate() { trace!(log_index = index, topic = ?log.topic0(), "Processing individual log"); // extract the topic0 to retrieve log handler - let topic0 = log.topic0().ok_or_else(|| { - error!("Log missing topic0"); - ExecutionError::Misc("Log missing topic0".to_string()) - })?; - let handler = self.handlers.get(topic0).ok_or_else(|| { - error!(topic = ?topic0, "No handler found for topic"); - ExecutionError::Misc("No handler found for topic".to_string()) - })?; + let topic0 = log.topic0().expect("Log should always have a topic0"); + let handler = self + .handlers + .get(topic0) + .expect("Handler should always exist"); // Handle the log and emit warning for any malformed events if let Err(e) = handler(self, log) { @@ -93,7 +90,13 @@ impl EventProcessor { // If live is true, then we are currently in a live sync and want to take some action in // response to the log. Parse the log into a network action and send to be processed; if live { - let action: NetworkAction = log.try_into()?; + let action = match log.try_into() { + Ok(action) => action, + Err(e) => { + error!("Failed to convert log into NetworkAction {e}"); + NetworkAction::NoOp + } + }; if action != NetworkAction::NoOp && live { debug!(action = ?action, "Network action ready for processing"); // todo!() send off somewhere @@ -102,7 +105,6 @@ impl EventProcessor { } info!(logs_count = logs.len(), "Completed processing logs"); - Ok(()) } // A new Operator has been registered in the network. diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 02c925848..f0d3d88c0 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -271,7 +271,7 @@ impl SsvEventSyncer { // Logs are all fetched from the chain and in order, process them but do not send off to // be processed since we are just reconstructing state self.event_processor - .process_logs(ordered_event_logs, false)?; + .process_logs(ordered_event_logs, false); // record that we have processed up to this block self.event_processor @@ -396,7 +396,7 @@ impl SsvEventSyncer { ); // process the logs and update the last block we have recorded - self.event_processor.process_logs(logs, true)?; + self.event_processor.process_logs(logs, true); self.event_processor .db .processed_block(relevant_block) From 7ceaa4e139ffab513cc1306f28e4ae47bf88666a Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 9 Jan 2025 18:11:05 +0000 Subject: [PATCH 41/49] remove errors from process logs and identification bugfix --- anchor/database/src/operator_operations.rs | 2 +- anchor/eth/src/sync.rs | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index 74e47426f..8cda5a8a5 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -34,7 +34,7 @@ impl NetworkDatabase { // Check to see if this operator is the current operator let own_id = self.state.single_state.id.load(Ordering::Relaxed); - if own_id == u64::MAX { + if own_id == (u64::MAX / 2) { // If the keys match, this is the current operator so we want to save the id let keys_match = pem_key == self.pubkey.public_key_to_pem().unwrap_or_default(); if keys_match { diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index f0d3d88c0..232d15b99 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -270,8 +270,7 @@ impl SsvEventSyncer { // Logs are all fetched from the chain and in order, process them but do not send off to // be processed since we are just reconstructing state - self.event_processor - .process_logs(ordered_event_logs, false); + self.event_processor.process_logs(ordered_event_logs, false); // record that we have processed up to this block self.event_processor From 825956cefad808365cc449a82bd4167c87f41ba6 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 10 Jan 2025 13:59:56 +0000 Subject: [PATCH 42/49] debugging nonce issues --- anchor/database/src/cluster_operations.rs | 4 ++-- anchor/database/src/sql_operations.rs | 2 +- anchor/database/src/state.rs | 6 +++--- anchor/database/src/tests/state_tests.rs | 6 +++--- anchor/eth/execution.rs | 2 +- anchor/eth/src/event_processor.rs | 17 +++++++++-------- anchor/eth/src/sync.rs | 2 +- 7 files changed, 20 insertions(+), 19 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 30919a7c1..4dd2afd3e 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -140,8 +140,8 @@ impl NetworkDatabase { // bump the nonce in memory if !self.state.single_state.nonces.contains_key(owner) { - // if it does not yet exist in memory, then create an entry and set it to one - self.state.single_state.nonces.insert(*owner, 1); + // if it does not yet exist in memory, then create an entry and set it to zero + self.state.single_state.nonces.insert(*owner, 0); } else { // otherwise, just increment the entry let mut entry = self diff --git a/anchor/database/src/sql_operations.rs b/anchor/database/src/sql_operations.rs index fb182294b..40b2c8d56 100644 --- a/anchor/database/src/sql_operations.rs +++ b/anchor/database/src/sql_operations.rs @@ -132,7 +132,7 @@ pub(crate) static SQL: LazyLock> = LazyLock: m.insert(SqlStatement::GetAllNonces, "SELECT * FROM nonce"); m.insert( SqlStatement::BumpNonce, - "INSERT INTO nonce (owner, nonce) VALUES (?1, 1) + "INSERT INTO nonce (owner, nonce) VALUES (?1, 0) ON CONFLICT (owner) DO UPDATE SET nonce = nonce + 1", ); diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index eb32c6f35..11feaba0e 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -291,13 +291,13 @@ impl NetworkDatabase { .load(Ordering::Relaxed) } - /// Get the nonce of the owner if it exists - pub fn get_nonce(&self, owner: &Address) -> u16 { + /// Get the next nonce of the owner if it exists + pub fn get_next_nonce(&self, owner: &Address) -> u16 { self.state .single_state .nonces .get(owner) - .map(|v| *v) + .map(|v| *v + 1) .unwrap_or(0) } } diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index 9e3b6a969..598b4d6ce 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -126,7 +126,7 @@ mod state_database_tests { let owner = Address::random(); // this is the first time getting the nonce, so it should be zero - let nonce = fixture.db.get_nonce(&owner); + let nonce = fixture.db.get_next_nonce(&owner); assert_eq!(nonce, 0); // increment the nonce and then confirm that is is one @@ -134,7 +134,7 @@ mod state_database_tests { .db .bump_nonce(&owner) .expect("Failed in increment nonce"); - let nonce = fixture.db.get_nonce(&owner); + let nonce = fixture.db.get_next_nonce(&owner); assert_eq!(nonce, 1); } @@ -153,6 +153,6 @@ mod state_database_tests { .expect("Failed to create database"); // confirm that nonce is 1 - assert_eq!(fixture.db.get_nonce(&owner), 1); + assert_eq!(fixture.db.get_next_nonce(&owner), 1); } } diff --git a/anchor/eth/execution.rs b/anchor/eth/execution.rs index cb73417c7..0fd78fa1b 100644 --- a/anchor/eth/execution.rs +++ b/anchor/eth/execution.rs @@ -20,7 +20,7 @@ async fn main() { // Dummy configuration with endpoint and network let rpc_endpoint = "http://127.0.0.1:8545"; - let ws_endpoint = "ws://127.0.0.1:8546"; + let _ws_endpoint = "ws://127.0.0.1:8546"; let ws_endpoint = "wss://eth.merkle.io"; let beacon_endpoint = "http://127.0.0.1:5052"; let config = Config { diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 588f1654a..155c778df 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -222,6 +222,13 @@ impl EventProcessor { debug!(owner = ?owner, operator_count = operatorIds.len(), "Processing validator addition"); + // Get the expected nonce, and then increment it + let nonce = self.db.get_next_nonce(&owner); + self.db.bump_nonce(&owner).map_err(|e| { + debug!(owner = ?owner, "Failed to bump nonce"); + ExecutionError::Database(format!("Failed to bump nonce: {e}")) + })?; + // Process data into a usable form let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { debug!( @@ -234,13 +241,6 @@ impl EventProcessor { let cluster_id = compute_cluster_id(owner, operatorIds.clone()); let operator_ids: Vec = operatorIds.iter().map(|id| OperatorId(*id)).collect(); - // Get the expected nonce, and then increment it - let nonce = self.db.get_nonce(&owner); - self.db.bump_nonce(&owner).map_err(|e| { - debug!(owner = ?owner, "Failed to bump nonce"); - ExecutionError::Database(format!("Failed to bump nonce: {e}")) - })?; - // Perform verification on the operator set and make sure they are all registered in the // network debug!(cluster_id = ?cluster_id, "Validating operators"); @@ -297,6 +297,7 @@ impl EventProcessor { ExecutionError::Database(format!("Failed to insert validator into cluster: {e}")) })?; + debug!( cluster_id = ?cluster_id, validator_pubkey = %validator_pubkey, @@ -483,7 +484,7 @@ impl EventProcessor { owner, recipientAddress, } = SSVContract::FeeRecipientAddressUpdated::decode_from_log(log)?; - let _ = self.db.update_fee_recipient(owner, recipientAddress); + // update the fee recipient address in the database self.db .update_fee_recipient(owner, recipientAddress) .map_err(|e| { diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index 232d15b99..d950c9e8d 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -242,7 +242,7 @@ impl SsvEventSyncer { let calculated_end = calculated_start + (BATCH_SIZE * GROUP_SIZE as u64) - 1; let calculated_end = std::cmp::min(calculated_end, end_block); info!( - "Fetching blocks for range {}..{}", + "Fetching logs for block range {}..{}", calculated_start, calculated_end ); From c442785c81093ca7928a2f9d3d7f4be0a3030254 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 10 Jan 2025 14:00:35 +0000 Subject: [PATCH 43/49] fmt --- anchor/eth/src/event_processor.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 155c778df..3dfbbb781 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -297,7 +297,6 @@ impl EventProcessor { ExecutionError::Database(format!("Failed to insert validator into cluster: {e}")) })?; - debug!( cluster_id = ?cluster_id, validator_pubkey = %validator_pubkey, From d1fc94a56326e1ad18fe35aa5e3e082114e513d7 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 10 Jan 2025 14:07:15 +0000 Subject: [PATCH 44/49] manual sort workspace deps --- Cargo.toml | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 994bfb7d8..2b9a5014e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,24 +21,31 @@ edition = "2021" # NOTE: The block below is currently not sorted by `cargo sort`. Please keep sorted manually, especially during merges! [workspace.dependencies] +# Local dependencies (anchor paths) client = { path = "anchor/client" } +database = { path = "anchor/database" } eth = { path = "anchor/eth" } -qbft = { path = "anchor/common/qbft" } http_api = { path = "anchor/http_api" } http_metrics = { path = "anchor/http_metrics" } -database = { path = "anchor/database" } network = { path = "anchor/network" } -version = { path = "anchor/common/version" } processor = { path = "anchor/processor" } +qbft = { path = "anchor/common/qbft" } ssv_types = { path = "anchor/common/ssv_types" } +version = { path = "anchor/common/version" } + +# Git dependencies +health_metrics = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } lighthouse_network = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } -task_executor = { git = "https://github.com/sigp/lighthouse", branch = "unstable", default-features = false, features = [ "tracing", ] } metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } -validator_metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } sensitive_url = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } slot_clock = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } -unused_port = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } +task_executor = { git = "https://github.com/sigp/lighthouse", branch = "unstable", default-features = false, features = ["tracing"] } types = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } +unused_port = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } +validator_metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } + +# Standard dependencies +alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "contract", "pubsub", "provider-ws", "rpc-types", "rlp"] } async-channel = "1.9" axum = "0.7.7" base64 = "0.22.1" @@ -49,28 +56,19 @@ dirs = "5.0.1" discv5 = "0.9.0" either = "1.13.0" futures = "0.3.30" -health_metrics = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } +hex = "0.4.3" hyper = "1.4" num_cpus = "1" openssl = "0.10.68" parking_lot = "0.12" +reqwest = "0.12.12" rusqlite = "0.28.0" serde = { version = "1.0.208", features = ["derive"] } strum = { version = "0.24", features = ["derive"] } -tokio = { version = "1.39.2", features = [ - "rt", - "rt-multi-thread", - "time", - "signal", - "macros", -] } +tokio = { version = "1.39.2", features = ["rt", "rt-multi-thread", "time", "signal", "macros"] } tower-http = { version = "0.6", features = ["cors"] } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } -hex = "0.4.3" -alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "contract", "pubsub", -"provider-ws", "rpc-types", "rlp"] } -reqwest = "0.12.12" [profile.maxperf] inherits = "release" From d312c955ea6da70ce8e0f87f88daf9cbc8acce97 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 10 Jan 2025 17:42:08 +0000 Subject: [PATCH 45/49] mis cleanup and formatting --- Cargo.toml | 69 ++++++++++++++++++------------- anchor/eth/Cargo.toml | 5 +-- anchor/eth/src/error.rs | 1 + anchor/eth/src/event_processor.rs | 28 ++++++------- anchor/eth/src/network_actions.rs | 1 + anchor/eth/src/sync.rs | 25 +++++------ anchor/http_api/Cargo.toml | 2 +- anchor/network/Cargo.toml | 13 +++++- book/book.toml | 3 +- 9 files changed, 83 insertions(+), 64 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2b9a5014e..11a76fee3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,18 +1,18 @@ [workspace] # Extra tooling projects will be added. members = [ - "anchor", - "anchor/client", - "anchor/common/qbft", - "anchor/common/ssv_types", - "anchor/common/version", - "anchor/database", - "anchor/eth", - "anchor/http_api", - "anchor/http_metrics", - "anchor/network", - "anchor/processor", - "anchor/signature_collector", + "anchor", + "anchor/client", + "anchor/common/qbft", + "anchor/common/ssv_types", + "anchor/common/version", + "anchor/database", + "anchor/eth", + "anchor/http_api", + "anchor/http_metrics", + "anchor/network", + "anchor/processor", + "anchor/signature_collector", ] resolver = "2" @@ -21,31 +21,26 @@ edition = "2021" # NOTE: The block below is currently not sorted by `cargo sort`. Please keep sorted manually, especially during merges! [workspace.dependencies] -# Local dependencies (anchor paths) client = { path = "anchor/client" } -database = { path = "anchor/database" } eth = { path = "anchor/eth" } +qbft = { path = "anchor/common/qbft" } http_api = { path = "anchor/http_api" } http_metrics = { path = "anchor/http_metrics" } +database = { path = "anchor/database" } network = { path = "anchor/network" } +version = { path = "anchor/common/version" } processor = { path = "anchor/processor" } -qbft = { path = "anchor/common/qbft" } ssv_types = { path = "anchor/common/ssv_types" } -version = { path = "anchor/common/version" } - -# Git dependencies -health_metrics = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } lighthouse_network = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } +task_executor = { git = "https://github.com/sigp/lighthouse", branch = "unstable", default-features = false, features = [ + "tracing", +] } metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } +validator_metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } sensitive_url = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } slot_clock = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } -task_executor = { git = "https://github.com/sigp/lighthouse", branch = "unstable", default-features = false, features = ["tracing"] } -types = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } unused_port = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } -validator_metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } - -# Standard dependencies -alloy = { version = "0.6.4", features = ["sol-types", "transports", "json", "contract", "pubsub", "provider-ws", "rpc-types", "rlp"] } +types = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } async-channel = "1.9" axum = "0.7.7" base64 = "0.22.1" @@ -56,23 +51,39 @@ dirs = "5.0.1" discv5 = "0.9.0" either = "1.13.0" futures = "0.3.30" -hex = "0.4.3" +health_metrics = { git = "https://github.com/sigp/lighthouse", branch = "anchor" } hyper = "1.4" num_cpus = "1" openssl = "0.10.68" parking_lot = "0.12" -reqwest = "0.12.12" rusqlite = "0.28.0" serde = { version = "1.0.208", features = ["derive"] } strum = { version = "0.24", features = ["derive"] } -tokio = { version = "1.39.2", features = ["rt", "rt-multi-thread", "time", "signal", "macros"] } +tokio = { version = "1.39.2", features = [ + "rt", + "rt-multi-thread", + "time", + "signal", + "macros", +] } tower-http = { version = "0.6", features = ["cors"] } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } +hex = "0.4.3" +alloy = { version = "0.6.4", features = [ + "sol-types", + "transports", + "json", + "contract", + "pubsub", + "provider-ws", + "rpc-types", + "rlp", +] } +reqwest = "0.12.12" [profile.maxperf] inherits = "release" lto = "fat" codegen-units = 1 incremental = false - diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml index 9aea17ec3..5c9c2ab64 100644 --- a/anchor/eth/Cargo.toml +++ b/anchor/eth/Cargo.toml @@ -10,7 +10,7 @@ path = "execution.rs" [dependencies] alloy = { workspace = true } -base64 = { workspace = true } +base64 = { workspace = true } database = { workspace = true } futures = { workspace = true } hex = { workspace = true } @@ -23,6 +23,3 @@ tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } types = { workspace = true } - - - diff --git a/anchor/eth/src/error.rs b/anchor/eth/src/error.rs index 839216308..493a25978 100644 --- a/anchor/eth/src/error.rs +++ b/anchor/eth/src/error.rs @@ -1,5 +1,6 @@ use std::fmt::Display; +// Custom execution integration layer errors #[derive(Debug)] pub enum ExecutionError { SyncError(String), diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 3dfbbb781..dd8609e7c 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -19,6 +19,7 @@ use types::PublicKey; type EventHandler = fn(&EventProcessor, &Log) -> Result<(), ExecutionError>; /// The Event Processor. This handles all verification and recording of events. +/// It will be passed logs from the sync layer to be processed and saved into the database pub struct EventProcessor { /// Function handlers for event processing handlers: HashMap, @@ -112,7 +113,7 @@ impl EventProcessor { fn process_operator_added(&self, log: &Log) -> Result<(), ExecutionError> { // Destructure operator added event let SSVContract::OperatorAdded { - operatorId, // The new ID of the operator + operatorId, // The ID of the newly registered operator owner, // The EOA owner address publicKey, // The RSA public key .. @@ -191,7 +192,7 @@ impl EventProcessor { let operator_id = OperatorId(operatorId); debug!(operator_id = ?operator_id, "Processing operator removed"); - // Delete the operator from database and in memory. Will handle existence check + // Delete the operator from database and in memory self.db.delete_operator(operator_id).map_err(|e| { debug!( operator_id = ?operator_id, @@ -219,10 +220,10 @@ impl EventProcessor { shares, .. } = SSVContract::ValidatorAdded::decode_from_log(log)?; - debug!(owner = ?owner, operator_count = operatorIds.len(), "Processing validator addition"); - // Get the expected nonce, and then increment it + // Get the expected nonce and then increment it. This will happen regardless of if the + // event is malformed or not let nonce = self.db.get_next_nonce(&owner); self.db.bump_nonce(&owner).map_err(|e| { debug!(owner = ?owner, "Failed to bump nonce"); @@ -274,7 +275,7 @@ impl EventProcessor { )); } - // fetch the validator metadata + // Fetch the validator metadata let validator_metadata = construct_validator_metadata(&validator_pubkey, &cluster_id) .map_err(|e| { debug!(validator_pubkey= ?validator_pubkey, "Failed to fetch validator metadata"); @@ -315,10 +316,9 @@ impl EventProcessor { publicKey, .. } = SSVContract::ValidatorRemoved::decode_from_log(log)?; - debug!(owner = ?owner, public_key = ?publicKey, "Processing Validator Removed"); - // Process and fetch data + // Parse the public key let validator_pubkey = PublicKey::from_str(&publicKey.to_string()).map_err(|e| { debug!( validator_pubkey = %publicKey, @@ -331,12 +331,7 @@ impl EventProcessor { // Compute the cluster id let cluster_id = compute_cluster_id(owner, operatorIds.clone()); - debug!( - cluster_id = ?cluster_id, - validator_pubkey = %validator_pubkey, - "Processing validator removal" - ); - + // Get the metadata for this validator let metadata = match self.db.metadata().get_by(&validator_pubkey) { Some(data) => data, None => { @@ -349,6 +344,8 @@ impl EventProcessor { )); } }; + + // Get the cluster that this validator is in let cluster = match self.db.clusters().get_by(&validator_pubkey) { Some(data) => data, None => { @@ -389,10 +386,11 @@ impl EventProcessor { )); } - // Check if we are a member of this cluster, if so we need to remove share data + // Check if we are a member of this cluster, if so we are storing the share and have to + // remove it if self.db.member_of_cluster(&cluster_id) { debug!(cluster_id = ?cluster_id, "Removing cluster from local keystore"); - // todo!(): Remove it from the internal keystore + // todo!(): Remove it from the internal keystore when it is made } // Remove the validator and all corresponding cluster data diff --git a/anchor/eth/src/network_actions.rs b/anchor/eth/src/network_actions.rs index 09b4eb201..563fdb7e8 100644 --- a/anchor/eth/src/network_actions.rs +++ b/anchor/eth/src/network_actions.rs @@ -8,6 +8,7 @@ use ssv_types::OperatorId; use std::str::FromStr; use types::PublicKey; +/// Actions that the network has to take in response to a event during the live sync #[derive(Debug, PartialEq)] pub enum NetworkAction { StopValidator { diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index d950c9e8d..eb0eef620 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -1,4 +1,5 @@ use crate::error::ExecutionError; +use crate::event_processor::EventProcessor; use crate::gen::SSVContract; use alloy::primitives::{address, Address}; use alloy::providers::{Provider, ProviderBuilder, RootProvider, WsConnect}; @@ -15,8 +16,6 @@ use std::sync::{Arc, LazyLock}; use tokio::time::Duration; use tracing::{debug, error, info, instrument, warn}; -use crate::event_processor::EventProcessor; - /// SSV contract events needed to come up to date with the network static SSV_EVENTS: LazyLock> = LazyLock::new(|| { vec![ @@ -52,6 +51,7 @@ static HOLESKY_DEPLOYMENT_ADDRESS: LazyLock
= /// Mainnet: https://etherscan.io/tx/0x4a11a560d3c2f693e96f98abb1feb447646b01b36203ecab0a96a1cf45fd650b const MAINNET_DEPLOYMENT_BLOCK: u64 = 17507487; +/// Contract deployment block on the Holesky Network /// Holesky: https://holesky.etherscan.io/tx/0x998c38ff37b47e69e23c21a8079168b7e0e0ade7244781587b00be3f08a725c6 const HOLESKY_DEPLOYMENT_BLOCK: u64 = 181612; @@ -68,11 +68,11 @@ type WsClient = RootProvider; /// Retry information for log fetching const MAX_RETRIES: i32 = 5; -// Follow distance +// Block follow distance const FOLLOW_DISTANCE: u64 = 8; /// The maximum number of operators a validator can have -//https://github.com/ssvlabs/ssv/blob/07095fe31e3ded288af722a9c521117980585d95/eth/eventhandler/validation.go#L15 +/// https://github.com/ssvlabs/ssv/blob/07095fe31e3ded288af722a9c521117980585d95/eth/eventhandler/validation.go#L15 pub const MAX_OPERATORS: usize = 13; /// Network that is being connected to @@ -110,6 +110,7 @@ pub struct SsvEventSyncer { impl SsvEventSyncer { #[instrument(skip(db))] + /// Create a new SsvEventSyncer to sync all of the events from the chain pub async fn new(db: Arc, config: Config) -> Result { info!(?config, "Creating new SSV Event Syncer"); @@ -142,10 +143,12 @@ impl SsvEventSyncer { } #[instrument(skip(self))] + /// Initial both a historical sync and a live sync from the chain. This function will transition + /// into a never ending live sync, so it should never return pub async fn sync(&mut self) -> Result<(), ExecutionError> { info!("Starting SSV event sync"); - // get network specific contract information + // Get network specific contract information let (contract_address, deployment_block) = match self.network { Network::Mainnet => (*MAINNET_DEPLOYMENT_ADDRESS, MAINNET_DEPLOYMENT_BLOCK), Network::Holesky => (*HOLESKY_DEPLOYMENT_ADDRESS, HOLESKY_DEPLOYMENT_BLOCK), @@ -225,7 +228,7 @@ impl SsvEventSyncer { // there are 50 tasks per group. BATCH_SIZE * 50 = 500k let mut task_groups = Vec::new(); while !tasks.is_empty() { - // drain takes elements from the original vector, moving them to a new vector + // Drain takes elements from the original vector, moving them to a new vector // take up to chunk_size elements (or whatever is left if less than chunk_size) let chunk: Vec<_> = tasks.drain(..tasks.len().min(GROUP_SIZE)).collect(); task_groups.push(chunk); @@ -272,19 +275,17 @@ impl SsvEventSyncer { // be processed since we are just reconstructing state self.event_processor.process_logs(ordered_event_logs, false); - // record that we have processed up to this block + // Record that we have processed up to this block self.event_processor .db .processed_block(calculated_end) .expect("Failed to update last processed block number"); } + info!("Processed all events up to block {}", end_block); - // update processing information + + // update end block processed information start_block = end_block + 1; - self.event_processor - .db - .processed_block(end_block) - .expect("Failed to update last processed block number"); } info!("Historical sync completed"); Ok(()) diff --git a/anchor/http_api/Cargo.toml b/anchor/http_api/Cargo.toml index 7769bf0cc..15975bac1 100644 --- a/anchor/http_api/Cargo.toml +++ b/anchor/http_api/Cargo.toml @@ -12,6 +12,6 @@ path = "src/lib.rs" axum = { workspace = true } serde = { workspace = true } slot_clock = { workspace = true } -task_executor = { workspace = true } +task_executor = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } diff --git a/anchor/network/Cargo.toml b/anchor/network/Cargo.toml index b3b994684..cc069ab87 100644 --- a/anchor/network/Cargo.toml +++ b/anchor/network/Cargo.toml @@ -8,7 +8,18 @@ authors = ["Sigma Prime "] dirs = { workspace = true } discv5 = { workspace = true } futures = { workspace = true } -libp2p = { version = "0.54", default-features = false, features = ["identify", "yamux", "noise", "secp256k1", "tcp", "tokio", "macros", "gossipsub", "quic", "ping"] } +libp2p = { version = "0.54", default-features = false, features = [ + "identify", + "yamux", + "noise", + "secp256k1", + "tcp", + "tokio", + "macros", + "gossipsub", + "quic", + "ping", +] } lighthouse_network = { workspace = true } serde = { workspace = true } task_executor = { workspace = true } diff --git a/book/book.toml b/book/book.toml index 51a84533a..457ea5bf0 100644 --- a/book/book.toml +++ b/book/book.toml @@ -6,7 +6,7 @@ title = "Anchor Book" author = "Sigma Prime" [output.html] -additional-css =["src/css/custom.css"] +additional-css = ["src/css/custom.css"] default-theme = "coal" additional-js = ["mermaid.min.js", "mermaid-init.js"] @@ -14,4 +14,3 @@ additional-js = ["mermaid.min.js", "mermaid-init.js"] [preprocessor.mermaid] command = "mdbook-mermaid" - From a4395dd0958e708d2c3f7ff9e019f4498487b83d Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 10 Jan 2025 17:52:09 +0000 Subject: [PATCH 46/49] revert book toml* --- book/book.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/book/book.toml b/book/book.toml index 457ea5bf0..65b5a6586 100644 --- a/book/book.toml +++ b/book/book.toml @@ -6,7 +6,7 @@ title = "Anchor Book" author = "Sigma Prime" [output.html] -additional-css = ["src/css/custom.css"] +additional-css =["src/css/custom.css"] default-theme = "coal" additional-js = ["mermaid.min.js", "mermaid-init.js"] From 951ecb3ca101759c8e8103185969596d2e7f9309 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 13 Jan 2025 13:46:50 +0000 Subject: [PATCH 47/49] update sig deserialize --- anchor/eth/src/util.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index 174b30743..7a9ee3ea2 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -117,7 +117,10 @@ pub fn verify_signature( let hash = keccak256(data); // Deserialize the signature - let signature = Signature::deserialize(&signature).expect("Failed to deserialize signature"); + let signature = match Signature::deserialize(&signature) { + Ok(sig) => sig, + Err(_) => return false, + }; // Verify the signature against the message signature.verify(public_key, hash) From ff4fa0e79aa705d2937fc66b9932ee1bf6e45709 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 13 Jan 2025 13:50:46 +0000 Subject: [PATCH 48/49] spelling --- anchor/eth/README.md | 72 ++++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/anchor/eth/README.md b/anchor/eth/README.md index 721a2a1b2..106a9fa5f 100644 --- a/anchor/eth/README.md +++ b/anchor/eth/README.md @@ -1,36 +1,36 @@ -## Execution Layer -This crate implements the execution layer component of the SSV node, responsible for monitoring and processing SSV network events on Ethereum L1 networks (Mainnet and Holesky). - -## Overview -The execution layer client maintains synchronization with the SSV network contract by: -* Processing historical events from contract deployement -* Monitoring live contract events -* Managing validator and operator state changes -* Handling cluster lifecycle events - -## Components -### SsvEventSyncer -This is the core synchronization engine that: -* Manages connections to an Ethereum execution client -* Handles historical and live event processing -* Maintains event ordering and state consistency -* Processes events in configurable batch sizes - -### EventProcessor -This processes network events and interacts with the database to validate event logs and persist them into the database. - -## Event Types -```rust -event OperatorAdded(uint64 indexed operatorId, address indexed owner, bytes publicKey, uint256 fee) -event OperatorRemoved(uint64 indexed operatorId) -event ValidatorAdded(address indexed owner, uint64[] operatorIds, bytes publicKey, bytes shares, Cluster cluster) -event ValidatorRemoved(address indexed owner, uint64[] operatorIds, bytes publicKey, Cluster cluster) -event ClusterLiquidated(address indexed owner, uint64[] operatorIds, Cluster cluster) -event ClusterReactivated(address indexed owner, uint64[] operatorIds, Cluster cluster) -event FeeRecipientAddressUpdated(address indexed owner, address recipientAddress) -event ValidatorExited(address indexed owner, uint64[] operatorIds, bytes publicKey) -``` - -## Contract Addresses -* Mainnet: `0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1` -* Holesky: `0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA` +## Execution Layer +This crate implements the execution layer component of the SSV node, responsible for monitoring and processing SSV network events on Ethereum L1 networks (Mainnet and Holesky). + +## Overview +The execution layer client maintains synchronization with the SSV network contract by: +* Processing historical events from contract deployment +* Monitoring live contract events +* Managing validator and operator state changes +* Handling cluster lifecycle events + +## Components +### SSV Event Syncer +This is the core synchronization engine that: +* Manages connections to an Ethereum execution client +* Handles historical and live event processing +* Maintains event ordering and state consistency +* Processes events in configurable batch sizes + +### Event Processor +This processes network events and interacts with the database to validate event logs and persist them into the database. + +## Event Types +```rust +event OperatorAdded(uint64 indexed operatorId, address indexed owner, bytes publicKey, uint256 fee) +event OperatorRemoved(uint64 indexed operatorId) +event ValidatorAdded(address indexed owner, uint64[] operatorIds, bytes publicKey, bytes shares, Cluster cluster) +event ValidatorRemoved(address indexed owner, uint64[] operatorIds, bytes publicKey, Cluster cluster) +event ClusterLiquidated(address indexed owner, uint64[] operatorIds, Cluster cluster) +event ClusterReactivated(address indexed owner, uint64[] operatorIds, Cluster cluster) +event FeeRecipientAddressUpdated(address indexed owner, address recipientAddress) +event ValidatorExited(address indexed owner, uint64[] operatorIds, bytes publicKey) +``` + +## Contract Addresses +* Mainnet: `0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1` +* Holesky: `0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA` From f4aa5432a065e2398d4b347d12a4f311314cec3e Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 13 Jan 2025 13:55:07 +0000 Subject: [PATCH 49/49] wordlist for ci --- .github/wordlist.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/wordlist.txt b/.github/wordlist.txt index 8420359c6..097487156 100644 --- a/.github/wordlist.txt +++ b/.github/wordlist.txt @@ -42,4 +42,9 @@ TODOs UI Validator validator -validators \ No newline at end of file +validators +Holesky +Mainnet +lifecycle +Syncer +