From 68460b018558fa7da9fd79a908d25df021f9013b Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Thu, 16 Oct 2025 18:33:04 -0300 Subject: [PATCH 001/114] generate 12-block chain --- .../chain_index/tests/proptest_blockgen.rs | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 zaino-state/src/chain_index/tests/proptest_blockgen.rs diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs new file mode 100644 index 000000000..7e721e93a --- /dev/null +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -0,0 +1,26 @@ +use proptest::{strategy::Strategy, test_runner::TestCaseResult}; +use zebra_chain::block::arbitrary; + +#[test] +fn make_chain() { + let chain_size = 12; + let mut runner = + proptest::test_runner::TestRunner::new(proptest::test_runner::Config::default()); + let overall_strat = arbitrary::LedgerState::genesis_strategy(None, None, true); + let chain_segment_strat = overall_strat.prop_flat_map(|ledger| { + zebra_chain::block::Block::partial_chain_strategy( + ledger, + chain_size, + arbitrary::allow_all_transparent_coinbase_spends, + false, + ) + }); + runner + .run(&chain_segment_strat, |segment| { + for block in segment { + println!("{:?}", block.coinbase_height()) + } + Ok(()) + }) + .unwrap(); +} From 89e1fcd07284fe56ff980a2005f2036bb9dcf13d Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Fri, 17 Oct 2025 16:14:30 -0300 Subject: [PATCH 002/114] make prev_hash-consistent mock branching chain --- .../chain_index/tests/proptest_blockgen.rs | 96 +++++++++++++++---- 1 file changed, 76 insertions(+), 20 deletions(-) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 7e721e93a..34c784d3a 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -1,26 +1,82 @@ -use proptest::{strategy::Strategy, test_runner::TestCaseResult}; -use zebra_chain::block::arbitrary; +use std::sync::Arc; + +use proptest::{ + prelude::{Arbitrary as _, BoxedStrategy, Just}, + strategy::Strategy, +}; +use zebra_chain::{ + block::arbitrary::{self, LedgerStateOverride}, + fmt::SummaryDebug, + parameters::GENESIS_PREVIOUS_BLOCK_HASH, + LedgerState, +}; #[test] fn make_chain() { - let chain_size = 12; - let mut runner = - proptest::test_runner::TestRunner::new(proptest::test_runner::Config::default()); - let overall_strat = arbitrary::LedgerState::genesis_strategy(None, None, true); - let chain_segment_strat = overall_strat.prop_flat_map(|ledger| { - zebra_chain::block::Block::partial_chain_strategy( - ledger, - chain_size, - arbitrary::allow_all_transparent_coinbase_spends, - false, - ) - }); - runner - .run(&chain_segment_strat, |segment| { - for block in segment { - println!("{:?}", block.coinbase_height()) + proptest::proptest!(|(segments in make_branching_chain(2, 12))| { + let (genesis_segment, branch_segments) = segments; + let mut prev_hash = GENESIS_PREVIOUS_BLOCK_HASH; + for block in genesis_segment { + assert_eq!(block.header.previous_block_hash, prev_hash); + println!("pre-divergence: {:?}", block.coinbase_height()); + prev_hash = block.hash(); + + } + let hash_atop_shared_chain = prev_hash; + for branch_segment in branch_segments { + for block in branch_segment { + assert_eq!(block.header.previous_block_hash, prev_hash); + println!("post-divergence: {:?}", block.coinbase_height()); + prev_hash = block.hash(); } - Ok(()) + prev_hash = hash_atop_shared_chain; + } + }); +} + +fn make_branching_chain( + num_branches: usize, + chain_size: usize, +) -> BoxedStrategy<( + SummaryDebug>>, + Vec>>>, +)> { + arbitrary::LedgerState::genesis_strategy(None, None, true) + .prop_flat_map(move |ledger| { + zebra_chain::block::Block::partial_chain_strategy( + ledger, + chain_size, + arbitrary::allow_all_transparent_coinbase_spends, + false, + ) + }) + .prop_flat_map(|segment| { + ( + Just(segment.clone()), + LedgerState::arbitrary_with(LedgerStateOverride { + height_override: segment.last().unwrap().coinbase_height().unwrap() + 1, + previous_block_hash_override: Some(segment.last().unwrap().hash()), + network_upgrade_override: None, + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + }), + ) + }) + .prop_flat_map(move |(segment, ledger)| { + ( + Just(segment), + std::iter::repeat_with(|| { + zebra_chain::block::Block::partial_chain_strategy( + ledger.clone(), + chain_size, + arbitrary::allow_all_transparent_coinbase_spends, + false, + ) + }) + .take(num_branches) + .collect::>(), + ) }) - .unwrap(); + .boxed() } From 8605f6b6dce36dbfa59cf17b914f54c6bcfc9ca9 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Fri, 17 Oct 2025 16:40:34 -0300 Subject: [PATCH 003/114] mockchain source todo impl --- zaino-state/src/chain_index/source.rs | 2 +- .../chain_index/tests/proptest_blockgen.rs | 77 +++++++++++++++++++ 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/zaino-state/src/chain_index/source.rs b/zaino-state/src/chain_index/source.rs index 5eac010e7..9bf1483d5 100644 --- a/zaino-state/src/chain_index/source.rs +++ b/zaino-state/src/chain_index/source.rs @@ -92,7 +92,7 @@ pub enum BlockchainSourceError { #[error("data from validator invalid: {0}")] pub struct InvalidData(String); -type BlockchainSourceResult = Result; +pub(crate) type BlockchainSourceResult = Result; /// ReadStateService based validator connector. /// diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 34c784d3a..61d73690e 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -4,12 +4,18 @@ use proptest::{ prelude::{Arbitrary as _, BoxedStrategy, Just}, strategy::Strategy, }; +use tonic::async_trait; use zebra_chain::{ block::arbitrary::{self, LedgerStateOverride}, fmt::SummaryDebug, parameters::GENESIS_PREVIOUS_BLOCK_HASH, LedgerState, }; +use zebra_state::HashOrHeight; + +use crate::{ + chain_index::source::BlockchainSourceResult, BlockHash, BlockchainSource, TransactionHash, +}; #[test] fn make_chain() { @@ -34,6 +40,77 @@ fn make_chain() { }); } +#[derive(Clone)] +struct ProptestMockchain { + genesis_segment: SummaryDebug>>, + branching_segments: Vec>>>, +} + +#[async_trait] +impl BlockchainSource for ProptestMockchain { + /// Returns the block by hash or height + async fn get_block( + &self, + id: HashOrHeight, + ) -> BlockchainSourceResult>> { + todo!() + } + + /// Returns the block commitment tree data by hash + async fn get_commitment_tree_roots( + &self, + id: BlockHash, + ) -> BlockchainSourceResult<( + Option<(zebra_chain::sapling::tree::Root, u64)>, + Option<(zebra_chain::orchard::tree::Root, u64)>, + )> { + todo!() + } + + /// Returns the sapling and orchard treestate by hash + async fn get_treestate( + &self, + id: BlockHash, + ) -> BlockchainSourceResult<(Option>, Option>)> { + todo!() + } + + /// Returns the complete list of txids currently in the mempool. + async fn get_mempool_txids( + &self, + ) -> BlockchainSourceResult>> { + todo!() + } + + /// Returns the transaction by txid + async fn get_transaction( + &self, + txid: TransactionHash, + ) -> BlockchainSourceResult>> { + todo!() + } + + /// Returns the hash of the block at the tip of the best chain. + async fn get_best_block_hash( + &self, + ) -> BlockchainSourceResult> { + todo!() + } + + /// Get a listener for new nonfinalized blocks, + /// if supported + async fn nonfinalized_listener( + &self, + ) -> Result< + Option< + tokio::sync::mpsc::Receiver<(zebra_chain::block::Hash, Arc)>, + >, + Box, + > { + todo!() + } +} + fn make_branching_chain( num_branches: usize, chain_size: usize, From dd6c43b2cb59e1f4ddedd63efeb960f80ccd116b Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Mon, 20 Oct 2025 17:11:53 -0300 Subject: [PATCH 004/114] first draft BlockchainSource mock impl --- Cargo.lock | 2049 ++++++++++------- zaino-state/Cargo.toml | 3 + .../chain_index/tests/proptest_blockgen.rs | 174 +- 3 files changed, 1352 insertions(+), 874 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f44b2306d..556cac1c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,7 +23,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ - "crypto-common 0.1.7", + "crypto-common 0.1.6", "generic-array", ] @@ -64,9 +64,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.4" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -164,22 +164,22 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.5" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" +checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.11" +version = "3.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" +checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -190,9 +190,9 @@ checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "append-only-vec" -version = "0.1.8" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114736faba96bcd79595c700d03183f61357b9fbce14852515e59f3bee4ed4a" +checksum = "7992085ec035cfe96992dd31bfd495a2ebd31969bb95f624471cb6c0b349e571" [[package]] name = "arc-swap" @@ -220,7 +220,7 @@ checksum = "4734bde002bb3d52e27ab808faa971a143d48d11dbd836d5c02edd1756cdab06" dependencies = [ "async-trait", "cfg-if", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "derive_more", "educe", @@ -274,7 +274,7 @@ dependencies = [ "asn1-rs-impl", "displaydoc", "nom", - "num-traits", + "num-traits 0.2.19", "rusticata-macros", "thiserror 2.0.17", ] @@ -287,7 +287,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", "synstructure", ] @@ -299,20 +299,21 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "async-compression" -version = "0.4.35" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07a926debf178f2d355197f9caddb08e54a9329d44748034bba349c5848cb519" +checksum = "5a89bce6054c720275ac2432fbba080a66a2106a44a1b804553930ca6909f4e0" dependencies = [ "compression-codecs", "compression-core", "futures-core", "futures-io", "pin-project-lite", + "tokio", ] [[package]] @@ -334,7 +335,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -345,7 +346,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -405,9 +406,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.15.1" +version = "1.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b5ce75405893cd713f9ab8e297d8e438f624dde7d706108285f7e17a25a180f" +checksum = "879b6c89592deb404ba4dc0ae6b58ffd1795c78991cbb5b8bc441c48a070440d" dependencies = [ "aws-lc-sys", "zeroize", @@ -415,10 +416,11 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.34.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "179c3777a8b5e70e90ea426114ffc565b2c1a9f82f6c4a0c5a34aa6ef5e781b6" +checksum = "107a4e9d9cab9963e04e84bb8dee0e25f2a987f9a8bad5ed054abd439caa8f8c" dependencies = [ + "bindgen 0.72.1", "cc", "cmake", "dunce", @@ -454,9 +456,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.8.7" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" +checksum = "8a18ed336352031311f4e0b4dd2ff392d4fbb370777c9d18d7fc9d7359f73871" dependencies = [ "axum-core 0.5.5", "bytes", @@ -527,7 +529,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-link", + "windows-link 0.2.1", ] [[package]] @@ -544,15 +546,15 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "bech32" -version = "0.11.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32637268377fc7b10a8c6d51de3e7fba1ce5dd371a96e342b34e6078db558e7f" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" [[package]] name = "bellman" @@ -590,7 +592,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "cexpr", "clang-sys", "itertools 0.12.1", @@ -601,7 +603,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -610,16 +612,18 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "cexpr", "clang-sys", "itertools 0.13.0", + "log", + "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -654,18 +658,18 @@ dependencies = [ [[package]] name = "bit-set" -version = "0.5.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" dependencies = [ "bit-vec", ] [[package]] name = "bit-vec" -version = "0.6.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitflags" @@ -675,9 +679,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.10.0" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" [[package]] name = "bitflags-serde-legacy" @@ -685,7 +689,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b64e60c28b6d25ad92e8b367801ff9aa12b41d05fc8798055d296bace4a60cc" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "serde", ] @@ -740,7 +744,7 @@ checksum = "e0b121a9fe0df916e362fb3271088d071159cdf11db0e4182d02152850756eff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -776,9 +780,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.6.0" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" dependencies = [ "borsh-derive", "cfg_aliases", @@ -786,24 +790,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.6.0" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", -] - -[[package]] -name = "bounded-vec" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09dc0086e469182132244e9b8d313a0742e1132da43a08c24b9dd3c18e0faf3a" -dependencies = [ - "thiserror 2.0.17", + "syn 2.0.106", ] [[package]] @@ -824,9 +819,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.12.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" +checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" dependencies = [ "memchr", "regex-automata", @@ -887,9 +882,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.11.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bzip2-sys" @@ -930,9 +925,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.49" +version = "1.2.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" +checksum = "ac9fe6cdbb24b6ade63616c0a0688e45bb56732262c158df3c0c4bea4ca47cb7" dependencies = [ "find-msvc-tools", "jobserver", @@ -993,10 +988,10 @@ checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ "iana-time-zone", "js-sys", - "num-traits", + "num-traits 0.2.19", "serde", "wasm-bindgen", - "windows-link", + "windows-link 0.2.1", ] [[package]] @@ -1005,7 +1000,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "crypto-common 0.1.7", + "crypto-common 0.1.6", "inout", "zeroize", ] @@ -1023,9 +1018,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.53" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" +checksum = "f4512b90fa68d3a9932cea5184017c5d200f5921df706d45e853537dea51508f" dependencies = [ "clap_builder", "clap_derive", @@ -1033,9 +1028,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.53" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" +checksum = "0025e98baa12e766c67ba13ff4695a887a1eba19569aad00a472546795bd6730" dependencies = [ "anstream", "anstyle", @@ -1052,7 +1047,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1102,9 +1097,9 @@ checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "compression-codecs" -version = "0.4.34" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34a3cbbb8b6eca96f3a5c4bf6938d5b27ced3675d69f95bb51948722870bc323" +checksum = "ef8a506ec4b81c460798f572caead636d57d3d7e940f998160f52bd254bf2d23" dependencies = [ "compression-core", "flate2", @@ -1115,9 +1110,9 @@ dependencies = [ [[package]] name = "compression-core" -version = "0.4.31" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" +checksum = "e47641d3deaf41fb1538ac1f54735925e275eaf3bf4d55c81b137fba797e5cbb" [[package]] name = "concurrent-queue" @@ -1128,6 +1123,18 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "console" +version = "0.15.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "windows-sys 0.59.0", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -1162,9 +1169,9 @@ checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "convert_case" -version = "0.10.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" dependencies = [ "unicode-segmentation", ] @@ -1207,6 +1214,16 @@ dependencies = [ "url", ] +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation" version = "0.10.1" @@ -1322,9 +1339,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.7" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", @@ -1373,7 +1390,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1421,7 +1438,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1443,7 +1460,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1487,7 +1504,7 @@ dependencies = [ "cookie-factory", "displaydoc", "nom", - "num-traits", + "num-traits 0.2.19", "rusticata-macros", ] @@ -1501,14 +1518,42 @@ dependencies = [ "serde", ] +[[package]] +name = "derive-deftly" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ea84d0109517cc2253d4a679bdda1e8989e9bd86987e9e4f75ffdda0095fd1" +dependencies = [ + "derive-deftly-macros 0.14.6", + "heck", +] + [[package]] name = "derive-deftly" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0015cb20a284ec944852820598af3aef6309ea8dc317a0304441272ed620f196" dependencies = [ - "derive-deftly-macros", + "derive-deftly-macros 1.0.1", + "heck", +] + +[[package]] +name = "derive-deftly-macros" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357422a457ccb850dc8f1c1680e0670079560feaad6c2e247e3f345c4fab8a3f" +dependencies = [ "heck", + "indexmap 2.11.4", + "itertools 0.14.0", + "proc-macro-crate", + "proc-macro2", + "quote", + "sha3", + "strum 0.27.2", + "syn 2.0.106", + "void", ] [[package]] @@ -1518,14 +1563,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b48e8e38a4aa565da767322b5ca55fb0f8347983c5bc7f7647db069405420479" dependencies = [ "heck", - "indexmap 2.12.1", + "indexmap 2.11.4", "itertools 0.14.0", "proc-macro-crate", "proc-macro2", "quote", "sha3", "strum 0.27.2", - "syn 2.0.111", + "syn 2.0.106", "void", ] @@ -1537,7 +1582,7 @@ checksum = "74ef43543e701c01ad77d3a5922755c6a1d71b22d942cb8042be4994b380caff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1584,24 +1629,23 @@ dependencies = [ [[package]] name = "derive_more" -version = "2.1.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.1.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version", - "syn 2.0.111", + "syn 2.0.106", "unicode-xid", ] @@ -1619,7 +1663,7 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "const-oid", - "crypto-common 0.1.7", + "crypto-common 0.1.6", "subtle", ] @@ -1672,14 +1716,14 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "document-features" -version = "0.2.12" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" +checksum = "95249b50c6c185bee49034bcb378a49dc2b5dff0be90ff6616d31d64febab05d" dependencies = [ "litrs", ] @@ -1720,7 +1764,7 @@ checksum = "7a4102713839a8c01c77c165bc38ef2e83948f6397fa1e1dcfacec0f07b149d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1818,6 +1862,21 @@ dependencies = [ "zeroize", ] +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + [[package]] name = "enum-ordinalize" version = "3.1.15" @@ -1825,10 +1884,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" dependencies = [ "num-bigint", - "num-traits", + "num-traits 0.2.19", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", +] + +[[package]] +name = "enum_primitive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4551092f4d519593039259a9ed8daedf0da12e5109c5280338073eaeb81180" +dependencies = [ + "num-traits 0.1.43", ] [[package]] @@ -1861,7 +1929,7 @@ dependencies = [ [[package]] name = "equihash" version = "0.2.2" -source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" dependencies = [ "blake2b_simd", "core2 0.3.3", @@ -1916,7 +1984,7 @@ dependencies = [ [[package]] name = "f4jumble" version = "0.1.1" -source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" dependencies = [ "blake2b_simd", ] @@ -1986,9 +2054,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.5" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" +checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" [[package]] name = "fixed-hash" @@ -2010,9 +2078,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9" dependencies = [ "crc32fast", "miniz_oxide", @@ -2036,6 +2104,21 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.2" @@ -2056,7 +2139,7 @@ dependencies = [ "libm", "num-bigint", "num-integer", - "num-traits", + "num-traits 0.2.19", ] [[package]] @@ -2153,7 +2236,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -2163,7 +2246,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.35", + "rustls 0.23.32", "rustls-pki-types", ] @@ -2199,9 +2282,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.7" +version = "0.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" dependencies = [ "typenum", "version_check", @@ -2255,7 +2338,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -2300,7 +2383,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.1", + "indexmap 2.11.4", "slab", "tokio", "tokio-util", @@ -2347,9 +2430,9 @@ dependencies = [ [[package]] name = "halo2_proofs" -version = "0.3.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05713f117155643ce10975e0bee44a274bcda2f4bb5ef29a999ad67c1fa8d4d3" +checksum = "019561b5f3be60731e7b72f3f7878c5badb4174362d860b03d3cf64cb47f90db" dependencies = [ "blake2b_simd", "ff", @@ -2393,9 +2476,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.16.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" [[package]] name = "hashlink" @@ -2462,11 +2545,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.12" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -2477,11 +2560,12 @@ checksum = "f558a64ac9af88b5ba400d99b579451af0d39c6d360980045b91aac966d705e2" [[package]] name = "http" -version = "1.4.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", + "fnv", "itoa", ] @@ -2553,9 +2637,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.8.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" dependencies = [ "atomic-waker", "bytes", @@ -2584,13 +2668,13 @@ dependencies = [ "hyper", "hyper-util", "log", - "rustls 0.23.35", + "rustls 0.23.32", "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", - "webpki-roots 1.0.4", + "webpki-roots 1.0.3", ] [[package]] @@ -2606,11 +2690,27 @@ dependencies = [ "tower-service", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" -version = "0.1.19" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" dependencies = [ "base64", "bytes", @@ -2625,9 +2725,11 @@ dependencies = [ "percent-encoding", "pin-project-lite", "socket2 0.6.1", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -2656,9 +2758,9 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", "potential_utf", @@ -2669,9 +2771,9 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -2682,10 +2784,11 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ + "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", @@ -2696,38 +2799,42 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "2.1.2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ + "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", + "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "2.1.2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", "icu_locale_core", + "stable_deref_trait", + "tinystr", "writeable", "yoke", "zerofrom", @@ -2788,7 +2895,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -2819,12 +2926,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.1" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.16.1", + "hashbrown 0.16.0", "serde", "serde_core", ] @@ -2841,7 +2948,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "inotify-sys", "libc", ] @@ -2864,6 +2971,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "insta" +version = "1.43.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46fdb647ebde000f43b5b53f773c30cf9b0cb4300453208713fa38b2c70935a0" +dependencies = [ + "console", + "once_cell", + "similar", +] + [[package]] name = "integration-tests" version = "0.1.2" @@ -2871,7 +2989,7 @@ dependencies = [ "anyhow", "core2 0.4.0", "futures", - "prost 0.13.5", + "prost", "serde_json", "tempfile", "tokio", @@ -2881,12 +2999,9 @@ dependencies = [ "zaino-proto", "zaino-state", "zaino-testutils", - "zainod", - "zcash_local_net 0.1.0 (git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4)", - "zebra-chain 3.1.0", + "zebra-chain", "zebra-rpc", "zebra-state", - "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4)", "zip32", ] @@ -2907,9 +3022,9 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.9" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" dependencies = [ "memchr", "serde", @@ -2917,9 +3032,9 @@ dependencies = [ [[package]] name = "is_terminal_polyfill" -version = "1.70.2" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" @@ -2966,9 +3081,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.83" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" dependencies = [ "once_cell", "wasm-bindgen", @@ -2982,9 +3097,9 @@ checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" [[package]] name = "jsonrpsee" -version = "0.24.10" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e281ae70cc3b98dac15fced3366a880949e65fc66e345ce857a5682d152f3e62" +checksum = "37b26c20e2178756451cfeb0661fb74c47dd5988cb7e3939de7e9241fd604d42" dependencies = [ "jsonrpsee-core", "jsonrpsee-proc-macros", @@ -2996,9 +3111,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.10" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348ee569eaed52926b5e740aae20863762b16596476e943c9e415a6479021622" +checksum = "456196007ca3a14db478346f58c7238028d55ee15c1df15115596e411ff27925" dependencies = [ "async-trait", "bytes", @@ -3019,22 +3134,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.10" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7398cddf5013cca4702862a2692b66c48a3bd6cf6ec681a47453c93d63cf8de5" +checksum = "5e65763c942dfc9358146571911b0cd1c361c2d63e2d2305622d40d36376ca80" dependencies = [ "heck", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "jsonrpsee-server" -version = "0.24.10" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21429bcdda37dcf2d43b68621b994adede0e28061f816b038b0f18c70c143d51" +checksum = "55e363146da18e50ad2b51a0a7925fc423137a0b1371af8235b1c231a0647328" dependencies = [ "futures-util", "http", @@ -3059,9 +3174,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.10" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0f05e0028e55b15dbd2107163b3c744cd3bb4474f193f95d9708acbf5677e44" +checksum = "08a8e70baf945b6b5752fc8eb38c918a48f1234daf11355e07106d963f860089" dependencies = [ "http", "serde", @@ -3138,9 +3253,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.178" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libloading" @@ -3149,7 +3264,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-link", + "windows-link 0.2.1", ] [[package]] @@ -3184,7 +3299,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "libc", "redox_syscall", ] @@ -3217,28 +3332,15 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.23" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "pkg-config", "vcpkg", ] -[[package]] -name = "libzcash_script" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8ce05b56f3cbc65ec7d0908adb308ed91281e022f61c8c3a0c9388b5380b17" -dependencies = [ - "bindgen 0.72.1", - "cc", - "thiserror 2.0.17", - "tracing", - "zcash_script", -] - [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -3253,15 +3355,15 @@ checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "litrs" -version = "1.0.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" +checksum = "f5e54036fe321fd421e10d732f155734c4e4afd610dd556d9a82833ab3ee0bed" [[package]] name = "lmdb" @@ -3296,11 +3398,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.29" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" dependencies = [ - "serde_core", + "serde", ] [[package]] @@ -3393,9 +3495,9 @@ checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "memmap2" -version = "0.9.9" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744133e4a0e0a658e1374cf3bf8e415c4052a15a111acd372764c55b4177d490" +checksum = "843a98750cd611cc2965a8213b53b43e715f13c37a9e096c6408e69990961db7" dependencies = [ "libc", ] @@ -3420,9 +3522,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.24.3" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d5312e9ba3771cfa961b585728215e3d972c950a3eed9252aa093d6301277e8" +checksum = "25dea7ac8057892855ec285c440160265225438c3c45072613c25a4b26e98ef5" dependencies = [ "ahash 0.8.12", "portable-atomic", @@ -3463,14 +3565,14 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", "wasi 0.11.1+wasi-snapshot-preview1", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -3491,13 +3593,30 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework 2.11.1", + "security-framework-sys", + "tempfile", +] + [[package]] name = "nix" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "cfg-if", "cfg_aliases", "libc", @@ -3525,7 +3644,7 @@ version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "inotify", "kqueue", "libc", @@ -3558,20 +3677,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", - "num-traits", + "num-traits 0.2.19", ] [[package]] name = "num-bigint-dig" -version = "0.8.6" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" dependencies = [ + "byteorder", "lazy_static", "libm", "num-integer", "num-iter", - "num-traits", + "num-traits 0.2.19", "rand 0.8.5", "smallvec", "zeroize", @@ -3589,7 +3709,7 @@ version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "num-traits", + "num-traits 0.2.19", ] [[package]] @@ -3600,7 +3720,16 @@ checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", - "num-traits", + "num-traits 0.2.19", +] + +[[package]] +name = "num-traits" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" +dependencies = [ + "num-traits 0.2.19", ] [[package]] @@ -3625,9 +3754,9 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.5" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" dependencies = [ "num_enum_derive", "rustversion", @@ -3635,14 +3764,14 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.5" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -3662,9 +3791,9 @@ checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "once_cell_polyfill" -version = "1.70.2" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" [[package]] name = "oneshot-fused-workaround" @@ -3681,12 +3810,50 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "openssl" +version = "0.10.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24ad14dd45412269e1a30f52ad8f0664f0f4f4a89ee8fe28c3b3527021ebb654" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "openssl-probe" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-sys" +version = "0.9.110" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a9f0075ba3c21b09f8e8b2026584b1d18d49388648f2fbbf3c97ea8deced8e2" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "option-ext" version = "0.2.0" @@ -3734,7 +3901,7 @@ version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" dependencies = [ - "num-traits", + "num-traits 0.2.19", ] [[package]] @@ -3834,7 +4001,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -3863,7 +4030,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-link", + "windows-link 0.2.1", ] [[package]] @@ -3928,7 +4095,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -3943,7 +4110,7 @@ dependencies = [ [[package]] name = "pepper-sync" version = "0.0.1" -source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" +source = "git+https://github.com/zingolabs/zingolib.git?rev=f88e1d76ea244d6cc48d7fd4c3a609c6598318dc#f88e1d76ea244d6cc48d7fd4c3a609c6598318dc" dependencies = [ "bip32", "byteorder", @@ -3957,23 +4124,22 @@ dependencies = [ "rayon", "sapling-crypto", "shardtree", - "simple-mermaid", "subtle", "thiserror 2.0.17", "tokio", "tonic 0.13.1", "tracing", - "zcash_address 0.10.0", - "zcash_client_backend", - "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_keys 0.12.0", + "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_keys 0.10.0", "zcash_note_encryption", - "zcash_primitives 0.26.0", - "zcash_protocol 0.7.0", - "zcash_transparent 0.6.0", + "zcash_primitives 0.24.0", + "zcash_protocol 0.6.1", + "zcash_transparent 0.4.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", "zingo-memo", "zingo-status", - "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", + "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/infrastructure.git?rev=cc2af1ac80cb6b7a7f0a0b8f331f7b0873d667bf)", "zip32", ] @@ -3990,7 +4156,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.12.1", + "indexmap 2.11.4", ] [[package]] @@ -4023,7 +4189,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4052,7 +4218,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4137,9 +4303,9 @@ dependencies = [ [[package]] name = "potential_utf" -version = "0.1.4" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" dependencies = [ "zerovec", ] @@ -4166,7 +4332,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4207,7 +4373,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93980406f12d9f8140ed5abe7155acb10bb1e69ea55c88960b9c2f117445ef96" dependencies = [ "equivalent", - "indexmap 2.12.1", + "indexmap 2.11.4", "serde", ] @@ -4217,7 +4383,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.9", + "toml_edit 0.23.7", ] [[package]] @@ -4239,14 +4405,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" dependencies = [ "unicode-ident", ] @@ -4259,49 +4425,50 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", "version_check", "yansi", ] [[package]] name = "proptest" -version = "1.2.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ "bit-set", - "bitflags 1.3.2", - "byteorder", + "bit-vec", + "bitflags 2.9.4", "lazy_static", - "num-traits", + "num-traits 0.2.19", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.6.29", + "regex-syntax", "rusty-fork", "tempfile", "unarray", ] [[package]] -name = "prost" -version = "0.13.5" +name = "proptest-derive" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ - "bytes", - "prost-derive 0.13.5", + "proc-macro2", + "quote", + "syn 2.0.106", ] [[package]] name = "prost" -version = "0.14.1" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", - "prost-derive 0.14.1", + "prost-derive", ] [[package]] @@ -4317,59 +4484,24 @@ dependencies = [ "once_cell", "petgraph", "prettyplease", - "prost 0.13.5", - "prost-types 0.13.5", + "prost", + "prost-types", "regex", - "syn 2.0.111", + "syn 2.0.106", "tempfile", ] [[package]] -name = "prost-build" -version = "0.14.1" +name = "prost-derive" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ - "heck", - "itertools 0.14.0", - "log", - "multimap", - "once_cell", - "petgraph", - "prettyplease", - "prost 0.14.1", - "prost-types 0.14.1", - "pulldown-cmark", - "pulldown-cmark-to-cmark", - "regex", - "syn 2.0.111", - "tempfile", -] - -[[package]] -name = "prost-derive" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" -dependencies = [ - "anyhow", + "anyhow", "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.111", -] - -[[package]] -name = "prost-derive" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" -dependencies = [ - "anyhow", - "itertools 0.14.0", - "proc-macro2", - "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4378,16 +4510,7 @@ version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" dependencies = [ - "prost 0.13.5", -] - -[[package]] -name = "prost-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" -dependencies = [ - "prost 0.14.1", + "prost", ] [[package]] @@ -4426,36 +4549,16 @@ dependencies = [ "psl-types", ] -[[package]] -name = "pulldown-cmark" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" -dependencies = [ - "bitflags 2.10.0", - "memchr", - "unicase", -] - -[[package]] -name = "pulldown-cmark-to-cmark" -version = "21.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8246feae3db61428fd0bb94285c690b460e4517d83152377543ca802357785f1" -dependencies = [ - "pulldown-cmark", -] - [[package]] name = "pwd-grp" -version = "1.0.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e2023f41b5fcb7c30eb5300a5733edfaa9e0e0d502d51b586f65633fd39e40c" +checksum = "b94fdf3867b7f2889a736f0022ea9386766280d2cca4bdbe41629ada9e4f3b8f" dependencies = [ - "derive-deftly", + "derive-deftly 0.14.6", "libc", "paste", - "thiserror 2.0.17", + "thiserror 1.0.69", ] [[package]] @@ -4499,7 +4602,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", - "rustls 0.23.35", + "rustls 0.23.32", "socket2 0.6.1", "thiserror 2.0.17", "tokio", @@ -4519,7 +4622,7 @@ dependencies = [ "rand 0.9.2", "ring 0.17.14", "rustc-hash 2.1.1", - "rustls 0.23.35", + "rustls 0.23.32", "rustls-pki-types", "slab", "thiserror 2.0.17", @@ -4544,9 +4647,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.42" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" dependencies = [ "proc-macro2", ] @@ -4729,7 +4832,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", ] [[package]] @@ -4760,7 +4863,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4772,7 +4875,7 @@ dependencies = [ "aho-corasick", "memchr", "regex-automata", - "regex-syntax 0.8.8", + "regex-syntax", ] [[package]] @@ -4783,15 +4886,9 @@ checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.8", + "regex-syntax", ] -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - [[package]] name = "regex-syntax" version = "0.8.8" @@ -4809,34 +4906,43 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.25" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6eff9328d40131d43bd911d42d79eb6a47312002a4daefc9e37f17e74a7701a" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ + "async-compression", "base64", "bytes", "cookie", "cookie_store", + "encoding_rs", "futures-core", + "futures-util", + "h2", "http", "http-body", "http-body-util", "hyper", "hyper-rustls", + "hyper-tls", "hyper-util", "js-sys", "log", + "mime", + "native-tls", "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.35", + "rustls 0.23.32", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", "tokio", + "tokio-native-tls", "tokio-rustls", + "tokio-util", "tower 0.5.2", "tower-http", "tower-service", @@ -4844,7 +4950,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 1.0.4", + "webpki-roots 1.0.3", ] [[package]] @@ -4966,15 +5072,15 @@ checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" [[package]] name = "rsa" -version = "0.9.9" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a0376c50d0358279d9d643e4bf7b7be212f1f4ff1da9070a7b54d22ef75c88" +checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b" dependencies = [ "const-oid", "digest 0.10.7", "num-bigint-dig", "num-integer", - "num-traits", + "num-traits 0.2.19", "pkcs1", "pkcs8", "rand_core 0.6.4", @@ -4991,7 +5097,7 @@ version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -5020,7 +5126,7 @@ dependencies = [ "proc-macro2", "quote", "rust-embed-utils", - "syn 2.0.111", + "syn 2.0.106", "walkdir", ] @@ -5043,7 +5149,7 @@ dependencies = [ "arrayvec", "borsh", "bytes", - "num-traits", + "num-traits 0.2.19", "rand 0.8.5", "rkyv", "serde", @@ -5098,7 +5204,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.4.15", @@ -5111,7 +5217,7 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.11.0", @@ -5132,16 +5238,16 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ "aws-lc-rs", "log", "once_cell", "ring 0.17.14", "rustls-pki-types", - "rustls-webpki 0.103.8", + "rustls-webpki 0.103.7", "subtle", "zeroize", ] @@ -5155,7 +5261,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.5.1", ] [[package]] @@ -5169,9 +5275,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" dependencies = [ "web-time", "zeroize", @@ -5189,9 +5295,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" dependencies = [ "aws-lc-rs", "ring 0.17.14", @@ -5310,9 +5416,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.1.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" +checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" dependencies = [ "dyn-clone", "ref-cast", @@ -5384,14 +5490,27 @@ dependencies = [ "zeroize", ] +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.9.4", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + [[package]] name = "security-framework" version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags 2.10.0", - "core-foundation", + "bitflags 2.9.4", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -5463,7 +5582,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5482,7 +5601,7 @@ version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.11.4", "itoa", "memchr", "ryu", @@ -5513,17 +5632,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.16.1" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" +checksum = "6093cd8c01b25262b84927e0f7151692158fab02d961e04c979d3903eba7ecc5" dependencies = [ "base64", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.1", + "indexmap 2.11.4", "schemars 0.9.0", - "schemars 1.1.0", + "schemars 1.0.4", "serde_core", "serde_json", "serde_with_macros", @@ -5532,14 +5651,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.16.1" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" +checksum = "a7e6c180db0816026a61afa1cff5344fb7ebded7e4d3062772179f2501481c27" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5548,13 +5667,24 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.11.4", "itoa", "ryu", "serde", "unsafe-libyaml", ] +[[package]] +name = "sha-1" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + [[package]] name = "sha1" version = "0.10.6" @@ -5613,7 +5743,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637e95dcd06bc1bb3f86ed9db1e1832a70125f32daae071ef37dcb7701b7d4fe" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "either", "incrementalmerkletree", "tracing", @@ -5638,9 +5768,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.7" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] @@ -5657,9 +5787,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.8" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" [[package]] name = "simdutf8" @@ -5668,10 +5798,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] -name = "simple-mermaid" -version = "0.2.0" +name = "similar" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589144a964b4b30fe3a83b4bb1a09e2475aac194ec832a046a23e75bddf9eb29" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" [[package]] name = "sinsemilla" @@ -5698,9 +5828,9 @@ checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "slotmap" -version = "1.1.1" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdd58c3c93c3d278ca835519292445cb4b0d4dc59ccfdf7ceadaab3f8aeb4038" +checksum = "dbff4acf519f630b3a3ddcfaea6c06b42174d9a44bc70c620e9ed1649d58b82a" dependencies = [ "serde", "version_check", @@ -5761,6 +5891,28 @@ dependencies = [ "sha1", ] +[[package]] +name = "spandoc" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ed5a886d0234ac48bea41d450e4253cdd0642656249d6454e74c023d0f8821" +dependencies = [ + "spandoc-attribute", + "tracing", + "tracing-futures", +] + +[[package]] +name = "spandoc-attribute" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bdfb59103e43a0f99a346b57860d50f2138a7008d08acd964e9ac0fef3ae9a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "spin" version = "0.5.2" @@ -5876,7 +6028,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5888,7 +6040,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5910,9 +6062,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.111" +version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", @@ -5936,7 +6088,28 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.9.4", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", ] [[package]] @@ -5984,7 +6157,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5995,7 +6168,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -6050,12 +6223,11 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", - "serde_core", "zerovec", ] @@ -6100,7 +6272,17 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", ] [[package]] @@ -6109,7 +6291,7 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.35", + "rustls 0.23.32", "tokio", ] @@ -6127,9 +6309,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", @@ -6184,7 +6366,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.11.4", "serde", "serde_spanned", "toml_datetime 0.6.11", @@ -6194,11 +6376,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.9" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d7cbc3b4b49633d57a0509303158ca50de80ae32c265093b24c414705807832" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.11.4", "toml_datetime 0.7.3", "toml_parser", "winnow", @@ -6239,8 +6421,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "prost 0.13.5", - "rustls-native-certs", + "prost", "rustls-pemfile", "socket2 0.5.10", "tokio", @@ -6259,7 +6440,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" dependencies = [ "async-trait", - "axum 0.8.7", + "axum 0.8.6", "base64", "bytes", "h2", @@ -6271,7 +6452,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "prost 0.13.5", + "prost", "socket2 0.5.10", "tokio", "tokio-rustls", @@ -6283,35 +6464,6 @@ dependencies = [ "webpki-roots 0.26.11", ] -[[package]] -name = "tonic" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" -dependencies = [ - "async-trait", - "axum 0.8.7", - "base64", - "bytes", - "h2", - "http", - "http-body", - "http-body-util", - "hyper", - "hyper-timeout", - "hyper-util", - "percent-encoding", - "pin-project", - "socket2 0.6.1", - "sync_wrapper", - "tokio", - "tokio-stream", - "tower 0.5.2", - "tower-layer", - "tower-service", - "tracing", -] - [[package]] name = "tonic-build" version = "0.12.3" @@ -6320,10 +6472,10 @@ checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", "proc-macro2", - "prost-build 0.13.5", - "prost-types 0.13.5", + "prost-build", + "prost-types", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -6334,63 +6486,23 @@ checksum = "eac6f67be712d12f0b41328db3137e0d0757645d8904b4cb7d51cd9c2279e847" dependencies = [ "prettyplease", "proc-macro2", - "prost-build 0.13.5", - "prost-types 0.13.5", - "quote", - "syn 2.0.111", -] - -[[package]] -name = "tonic-build" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40aaccc9f9eccf2cd82ebc111adc13030d23e887244bc9cfa5d1d636049de3" -dependencies = [ - "prettyplease", - "proc-macro2", + "prost-build", + "prost-types", "quote", - "syn 2.0.111", -] - -[[package]] -name = "tonic-prost" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" -dependencies = [ - "bytes", - "prost 0.14.1", - "tonic 0.14.2", -] - -[[package]] -name = "tonic-prost-build" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a16cba4043dc3ff43fcb3f96b4c5c154c64cbd18ca8dce2ab2c6a451d058a2" -dependencies = [ - "prettyplease", - "proc-macro2", - "prost-build 0.14.1", - "prost-types 0.14.1", - "quote", - "syn 2.0.111", - "tempfile", - "tonic-build 0.14.2", + "syn 2.0.106", ] [[package]] name = "tonic-reflection" -version = "0.14.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34da53e8387581d66db16ff01f98a70b426b091fdf76856e289d5c1bd386ed7b" +checksum = "878d81f52e7fcfd80026b7fdb6a9b578b3c3653ba987f87f0dce4b64043cba27" dependencies = [ - "prost 0.14.1", - "prost-types 0.14.1", + "prost", + "prost-types", "tokio", "tokio-stream", - "tonic 0.14.2", - "tonic-prost", + "tonic 0.12.3", ] [[package]] @@ -6399,7 +6511,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5294c85610f52bcbe36fddde04a3a994c4ec382ceed455cfdc8252be7046008" dependencies = [ - "derive-deftly", + "derive-deftly 1.0.1", "educe", "futures", "oneshot-fused-workaround", @@ -6435,7 +6547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "357650fb5bff5e94e5ecc7ee26c6af3f584c2be178b45da8f5ab81cf9f9d4795" dependencies = [ "bytes", - "derive-deftly", + "derive-deftly 1.0.1", "digest 0.10.7", "educe", "getrandom 0.2.16", @@ -6453,10 +6565,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5341a132563ebeffa45ff60e6519394ee7ba58cb5cf65ba99e7ef879789d87b7" dependencies = [ "amplify", - "bitflags 2.10.0", + "bitflags 2.9.4", "bytes", "caret", - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "educe", "paste", @@ -6593,7 +6705,7 @@ checksum = "bca6cc0af790f5f02d8a06c8f692fa471207de2739d8b2921c04f9570af34d75" dependencies = [ "amplify", "cfg-if", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "educe", "either", @@ -6762,7 +6874,7 @@ checksum = "29a8f3ddf135d23e2c5443e97fb30c635767daa44923b142915d22bdaf47e2ea" dependencies = [ "amplify", "base64ct", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "derive_more", "dyn-clone", @@ -6827,7 +6939,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22ecf1c5b6bfa7849bf92cad3daab16bbc741ac62a61c9fea47c8be2f982e01" dependencies = [ - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "downcast-rs", "paste", @@ -6851,7 +6963,7 @@ dependencies = [ "amplify", "arrayvec", "cfg-if", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "derive_more", "downcast-rs", @@ -6889,7 +7001,7 @@ dependencies = [ "base64ct", "by_address", "caret", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "derive_more", "hex", @@ -6918,7 +7030,7 @@ dependencies = [ "ctr", "curve25519-dalek", "der-parser", - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "digest 0.10.7", "ed25519-dalek", @@ -6963,7 +7075,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d63eef6dd4d38b16199cf201de07b6de4a6af310f67bd71067d22ef746eb1a1d" dependencies = [ - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "dyn-clone", "educe", @@ -6992,7 +7104,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e17883b3b2ef17a5f9ad4ae8a78de2c4b3d629ccfeb66c15c4cb33494384f08" dependencies = [ "async-trait", - "bitflags 2.10.0", + "bitflags 2.9.4", "derive_more", "futures", "humantime", @@ -7022,7 +7134,7 @@ checksum = "aec11efe729e4ca9c5b03a8702f94b82dfd0ab450c0d58c4ca5ee9e4c49e6f89" dependencies = [ "amplify", "base64ct", - "bitflags 2.10.0", + "bitflags 2.9.4", "cipher", "derive_builder_fork_arti", "derive_more", @@ -7060,7 +7172,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be9958219e20477aef5645f99d0d3695e01bb230bbd36a0fd4c207f5428abe6b" dependencies = [ - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "filetime", "fs-mistrust", @@ -7094,7 +7206,7 @@ dependencies = [ "caret", "cipher", "coarsetime", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "derive_more", "digest 0.10.7", @@ -7196,7 +7308,7 @@ checksum = "9077af79aac5ad0c5336af1cc41a31c617bbc09261210a2427deb84f14356857" dependencies = [ "amplify", "async-trait", - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "educe", "futures", @@ -7224,7 +7336,7 @@ checksum = "d3892f6d0c323b87a2390f41e91c0294c6d5852f00e955e41e85a0116636e82d" dependencies = [ "amplify", "caret", - "derive-deftly", + "derive-deftly 1.0.1", "educe", "safelog", "subtle", @@ -7239,7 +7351,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7388f506c9278d07421e6799aa8a912adee4ea6921b3dd08a1247a619de82124" dependencies = [ - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "thiserror 2.0.17", "tor-memquota", @@ -7273,7 +7385,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.1", + "indexmap 2.11.4", "pin-project-lite", "slab", "sync_wrapper", @@ -7286,9 +7398,9 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "1.0.1" +version = "0.2.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e6cf52578f98b4da47335c26c4f883f7993b1a9b9d2f5420eb8dbfd5dd19a28" +checksum = "5a6823ca72ad0d8ebf40ddfe11c104f0ccb242befb7fd3bc20c33b6798a31eba" dependencies = [ "futures", "futures-core", @@ -7315,11 +7427,11 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.8" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "bytes", "futures-util", "http", @@ -7345,9 +7457,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.43" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -7357,20 +7469,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.31" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "tracing-core" -version = "0.1.35" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -7409,9 +7521,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.22" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "matchers", "nu-ansi-term", @@ -7444,7 +7556,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -7455,7 +7567,7 @@ checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -7524,23 +7636,17 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicase" -version = "2.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" - [[package]] name = "unicode-ident" -version = "1.0.22" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "unicode-normalization" -version = "0.1.25" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -7563,7 +7669,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ - "crypto-common 0.1.7", + "crypto-common 0.1.6", "subtle", ] @@ -7620,9 +7726,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.19.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ "js-sys", "wasm-bindgen", @@ -7654,7 +7760,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -7779,9 +7885,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.106" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" dependencies = [ "cfg-if", "once_cell", @@ -7790,11 +7896,25 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.106", + "wasm-bindgen-shared", +] + [[package]] name = "wasm-bindgen-futures" -version = "0.4.56" +version = "0.4.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" dependencies = [ "cfg-if", "js-sys", @@ -7805,9 +7925,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.106" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7815,22 +7935,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.106" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ - "bumpalo", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", + "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.106" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" dependencies = [ "unicode-ident", ] @@ -7843,9 +7963,9 @@ checksum = "323f4da9523e9a669e1eaf9c6e763892769b1d38c623913647bfdc1532fe4549" [[package]] name = "web-sys" -version = "0.3.83" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" dependencies = [ "js-sys", "wasm-bindgen", @@ -7873,14 +7993,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.4", + "webpki-roots 1.0.3", ] [[package]] name = "webpki-roots" -version = "1.0.4" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" dependencies = [ "rustls-pki-types", ] @@ -7970,9 +8090,9 @@ checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-link", - "windows-result", - "windows-strings", + "windows-link 0.2.1", + "windows-result 0.4.1", + "windows-strings 0.5.1", ] [[package]] @@ -7983,7 +8103,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -7994,22 +8114,57 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-registry" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" +dependencies = [ + "windows-link 0.1.3", + "windows-result 0.3.4", + "windows-strings 0.4.2", +] + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link 0.1.3", +] + [[package]] name = "windows-result" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-link", + "windows-link 0.2.1", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link 0.1.3", ] [[package]] @@ -8018,7 +8173,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link", + "windows-link 0.2.1", ] [[package]] @@ -8054,7 +8209,7 @@ version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-link", + "windows-link 0.2.1", ] [[package]] @@ -8079,7 +8234,7 @@ version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows-link", + "windows-link 0.2.1", "windows_aarch64_gnullvm 0.53.1", "windows_aarch64_msvc 0.53.1", "windows_i686_gnu 0.53.1", @@ -8188,9 +8343,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.14" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr", ] @@ -8209,9 +8364,9 @@ checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" -version = "0.6.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "wyz" @@ -8258,10 +8413,11 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ + "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -8269,13 +8425,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", "synstructure", ] @@ -8285,7 +8441,7 @@ version = "0.1.2" dependencies = [ "serde", "thiserror 1.0.69", - "zebra-chain 3.1.0", + "zebra-chain", ] [[package]] @@ -8297,9 +8453,9 @@ dependencies = [ "derive_more", "hex", "http", - "indexmap 2.12.1", + "indexmap 2.11.4", "jsonrpsee-types", - "prost 0.13.5", + "prost", "reqwest", "serde", "serde_json", @@ -8311,7 +8467,7 @@ dependencies = [ "url", "zaino-proto", "zaino-testvectors", - "zebra-chain 3.1.0", + "zebra-chain", "zebra-rpc", ] @@ -8319,7 +8475,7 @@ dependencies = [ name = "zaino-proto" version = "0.1.2" dependencies = [ - "prost 0.13.5", + "prost", "tonic 0.12.3", "tonic-build 0.12.3", "which 4.4.2", @@ -8331,7 +8487,6 @@ version = "0.1.2" dependencies = [ "futures", "jsonrpsee", - "serde", "thiserror 1.0.69", "tokio", "tonic 0.12.3", @@ -8341,7 +8496,7 @@ dependencies = [ "zaino-fetch", "zaino-proto", "zaino-state", - "zebra-chain 3.1.0", + "zebra-chain", "zebra-rpc", ] @@ -8351,7 +8506,7 @@ version = "0.1.2" dependencies = [ "arc-swap", "async-trait", - "bitflags 2.10.0", + "bitflags 2.9.4", "blake2", "bs58", "cargo-lock", @@ -8361,15 +8516,16 @@ dependencies = [ "derive_more", "futures", "hex", - "indexmap 2.12.1", + "incrementalmerkletree", + "indexmap 2.11.4", "lmdb", "lmdb-sys", "nonempty", "once_cell", "primitive-types 0.13.1", - "prost 0.13.5", + "proptest", + "prost", "reqwest", - "sapling-crypto", "serde", "serde_json", "sha2 0.10.9", @@ -8385,12 +8541,13 @@ dependencies = [ "zaino-common", "zaino-fetch", "zaino-proto", - "zcash_address 0.9.0", + "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "zcash_keys 0.10.1", - "zcash_primitives 0.26.0", + "zcash_local_net", + "zcash_primitives 0.24.1", "zcash_protocol 0.6.2", - "zcash_transparent 0.4.0", - "zebra-chain 3.1.0", + "zcash_transparent 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zebra-chain", "zebra-rpc", "zebra-state", ] @@ -8406,25 +8563,20 @@ dependencies = [ "proptest", "tempfile", "tokio", - "tonic 0.13.1", - "tracing", + "tonic 0.12.3", "tracing-subscriber", "zaino-common", - "zaino-proto", - "zaino-serve", "zaino-state", "zaino-testvectors", "zainod", - "zcash_client_backend", - "zcash_local_net 0.1.0 (git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4)", - "zcash_protocol 0.7.0", - "zebra-chain 3.1.0", - "zebra-state", + "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_local_net", + "zcash_protocol 0.6.1", + "zebra-chain", "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?tag=zingo_common_components_v0.1.0)", - "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?rev=b64dfd6d6a2a597a5456d1cc7b2bc9b649328187)", - "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4)", + "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?tag=zingo_common_components_v0.1.0)", + "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?tag=zcash_local_net_v0.1.0)", "zingolib", - "zingolib_testutils", "zip32", ] @@ -8452,10 +8604,24 @@ dependencies = [ "zaino-fetch", "zaino-serve", "zaino-state", - "zebra-chain 3.1.0", + "zebra-chain", "zebra-state", ] +[[package]] +name = "zcash_address" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c020e943fc2df6303d22b2bcbb3c0fd25f9d2419cbec508d13e66dcd77e354a6" +dependencies = [ + "bech32", + "bs58", + "core2 0.3.3", + "f4jumble 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_protocol 0.5.4", +] + [[package]] name = "zcash_address" version = "0.9.0" @@ -8472,21 +8638,83 @@ dependencies = [ [[package]] name = "zcash_address" -version = "0.10.0" -source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" +version = "0.9.0" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" dependencies = [ "bech32", "bs58", "core2 0.3.3", - "f4jumble 0.1.1 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", - "zcash_protocol 0.7.0", + "f4jumble 0.1.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_protocol 0.6.1", ] [[package]] name = "zcash_client_backend" -version = "0.20.0" -source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed6bdec743667d0c3b69fa46aaebcce28a3897ab7317165cd1b719c59a732a0" +dependencies = [ + "arti-client", + "base64", + "bech32", + "bip32", + "bls12_381", + "bs58", + "crossbeam-channel", + "document-features", + "dynosaur", + "fs-mistrust", + "futures-util", + "group", + "hex", + "http-body-util", + "hyper", + "hyper-util", + "incrementalmerkletree", + "memuse", + "nonempty", + "orchard", + "pasta_curves", + "percent-encoding", + "prost", + "rand 0.8.5", + "rand_core 0.6.4", + "rayon", + "rust_decimal", + "sapling-crypto", + "secrecy", + "serde", + "serde_json", + "shardtree", + "subtle", + "time", + "time-core", + "tokio", + "tokio-rustls", + "tonic 0.13.1", + "tonic-build 0.13.1", + "tor-rtcompat", + "tower 0.5.2", + "tracing", + "trait-variant", + "webpki-roots 0.26.11", + "which 7.0.3", + "zcash_address 0.8.0", + "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_keys 0.9.0", + "zcash_note_encryption", + "zcash_primitives 0.23.1", + "zcash_protocol 0.5.4", + "zcash_transparent 0.3.0", + "zip32", + "zip321 0.4.0", +] + +[[package]] +name = "zcash_client_backend" +version = "0.19.1" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" dependencies = [ "arti-client", "base64", @@ -8512,13 +8740,12 @@ dependencies = [ "orchard", "pasta_curves", "percent-encoding", - "prost 0.13.5", + "prost", "rand 0.8.5", "rand_core 0.6.4", "rayon", "rust_decimal", "sapling-crypto", - "secp256k1", "secrecy", "serde", "serde_json", @@ -8536,16 +8763,15 @@ dependencies = [ "trait-variant", "webpki-roots 0.26.11", "which 7.0.3", - "zcash_address 0.10.0", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", - "zcash_keys 0.12.0", + "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_keys 0.10.0", "zcash_note_encryption", - "zcash_primitives 0.26.0", - "zcash_protocol 0.7.0", - "zcash_script", - "zcash_transparent 0.6.0", + "zcash_primitives 0.24.0", + "zcash_protocol 0.6.1", + "zcash_transparent 0.4.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", "zip32", - "zip321", + "zip321 0.5.0", ] [[package]] @@ -8561,7 +8787,7 @@ dependencies = [ [[package]] name = "zcash_encoding" version = "0.3.0" -source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" dependencies = [ "core2 0.3.3", "nonempty", @@ -8580,11 +8806,12 @@ dependencies = [ [[package]] name = "zcash_keys" -version = "0.10.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c8d3d5a08a66f76264c72172e692ec362218b091181cda30c04d00a4561cd8" +checksum = "20664d96a0e4de98f41b6b7a3b40a527e5f5428ca7f34758a084e60778d3b824" dependencies = [ "bech32", + "bip32", "blake2b_simd", "bls12_381", "bs58", @@ -8593,21 +8820,23 @@ dependencies = [ "group", "memuse", "nonempty", + "orchard", "rand_core 0.6.4", + "sapling-crypto", "secrecy", "subtle", "tracing", - "zcash_address 0.9.0", + "zcash_address 0.8.0", "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.6.2", - "zcash_transparent 0.4.0", + "zcash_protocol 0.5.4", + "zcash_transparent 0.3.0", "zip32", ] [[package]] name = "zcash_keys" -version = "0.12.0" -source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" +version = "0.10.0" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" dependencies = [ "bech32", "bip32", @@ -8626,40 +8855,43 @@ dependencies = [ "secrecy", "subtle", "tracing", - "zcash_address 0.10.0", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", - "zcash_protocol 0.7.0", - "zcash_transparent 0.6.0", + "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_protocol 0.6.1", + "zcash_transparent 0.4.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", "zip32", ] [[package]] -name = "zcash_local_net" -version = "0.1.0" -source = "git+https://github.com/zingolabs/infrastructure.git?branch=dev#c0897685d653faf484ee5ade2694b323a2dc9866" +name = "zcash_keys" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6c8d3d5a08a66f76264c72172e692ec362218b091181cda30c04d00a4561cd8" dependencies = [ - "getset", - "hex", - "http", - "json", - "portpicker", - "serde_json", - "tempfile", - "thiserror 1.0.69", - "tokio", + "bech32", + "blake2b_simd", + "bls12_381", + "bs58", + "core2 0.3.3", + "document-features", + "group", + "memuse", + "nonempty", + "rand_core 0.6.4", + "secrecy", + "subtle", "tracing", - "zcash_protocol 0.7.0", - "zebra-chain 3.1.0", - "zebra-node-services", - "zebra-rpc", - "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", - "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?branch=dev)", + "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_protocol 0.6.2", + "zcash_transparent 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zip32", ] [[package]] name = "zcash_local_net" version = "0.1.0" -source = "git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4#20f3c206321825952fdf2d9d84947f5c9d2a1cf4" +source = "git+https://github.com/zingolabs/infrastructure.git?tag=zcash_local_net_v0.1.0#9f479fe8610ac15bb67f06d9e65ee37bcfebd228" dependencies = [ "getset", "hex", @@ -8672,12 +8904,11 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "zcash_protocol 0.7.0", - "zebra-chain 3.1.0", + "zcash_protocol 0.6.1", + "zebra-chain", "zebra-node-services", "zebra-rpc", - "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", - "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4)", + "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?rev=23814ee904ee64913585c0b8f871c6dbd94504c6)", ] [[package]] @@ -8695,9 +8926,9 @@ dependencies = [ [[package]] name = "zcash_primitives" -version = "0.24.1" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76362b79e432bde2f22b3defcb6919d4fb50446985997169da3cc3ae4035a6d9" +checksum = "c0ff99bb0d3aa7558bbaf51a3d4c072733afe364b45df9b3cc31118dcb644a51" dependencies = [ "bip32", "blake2b_simd", @@ -8726,19 +8957,19 @@ dependencies = [ "sha2 0.10.9", "subtle", "tracing", - "zcash_address 0.9.0", + "zcash_address 0.8.0", "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "zcash_note_encryption", - "zcash_protocol 0.6.2", + "zcash_protocol 0.5.4", "zcash_spec", - "zcash_transparent 0.4.0", + "zcash_transparent 0.3.0", "zip32", ] [[package]] name = "zcash_primitives" -version = "0.26.0" -source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" +version = "0.24.0" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" dependencies = [ "bip32", "blake2b_simd", @@ -8747,7 +8978,7 @@ dependencies = [ "core2 0.3.3", "crypto-common 0.2.0-rc.1", "document-features", - "equihash 0.2.2 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", + "equihash 0.2.2 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", "ff", "fpe", "getset", @@ -8767,21 +8998,84 @@ dependencies = [ "sha2 0.10.9", "subtle", "tracing", - "zcash_address 0.10.0", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", + "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", "zcash_note_encryption", - "zcash_protocol 0.7.0", - "zcash_script", + "zcash_protocol 0.6.1", "zcash_spec", - "zcash_transparent 0.6.0", + "zcash_transparent 0.4.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zip32", +] + +[[package]] +name = "zcash_primitives" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76362b79e432bde2f22b3defcb6919d4fb50446985997169da3cc3ae4035a6d9" +dependencies = [ + "bip32", + "blake2b_simd", + "block-buffer 0.11.0-rc.3", + "bs58", + "core2 0.3.3", + "crypto-common 0.2.0-rc.1", + "document-features", + "equihash 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ff", + "fpe", + "getset", + "group", + "hex", + "incrementalmerkletree", + "jubjub", + "memuse", + "nonempty", + "orchard", + "rand 0.8.5", + "rand_core 0.6.4", + "redjubjub", + "ripemd 0.1.3", + "sapling-crypto", + "secp256k1", + "sha2 0.10.9", + "subtle", + "tracing", + "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_note_encryption", + "zcash_protocol 0.6.2", + "zcash_spec", + "zcash_transparent 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "zip32", ] [[package]] name = "zcash_proofs" -version = "0.26.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2c13bb673d542608a0e6502ac5494136e7ce4ce97e92dd239489b2523eed9" +checksum = "9f90d9521161f7308c2fe6bddf771947f1a0fcd01b9e8a3b624c30a5661ad945" +dependencies = [ + "bellman", + "blake2b_simd", + "bls12_381", + "document-features", + "group", + "home", + "jubjub", + "known-folders", + "lazy_static", + "rand_core 0.6.4", + "redjubjub", + "sapling-crypto", + "tracing", + "xdg", + "zcash_primitives 0.24.1", +] + +[[package]] +name = "zcash_proofs" +version = "0.24.0" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" dependencies = [ "bellman", "blake2b_simd", @@ -8797,16 +9091,15 @@ dependencies = [ "redjubjub", "sapling-crypto", "tracing", - "wagyu-zcash-parameters", "xdg", - "zcash_primitives 0.26.0", + "zcash_primitives 0.24.0", ] [[package]] name = "zcash_protocol" -version = "0.6.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc76dd1f77be473e5829dbd34890bcd36d08b1e8dde2da0aea355c812a8f28" +checksum = "42344f5735237d6e0eedd3680f1c92f64e9c4144045d7b5c82f4867c2cbc0a02" dependencies = [ "core2 0.3.3", "document-features", @@ -8816,8 +9109,20 @@ dependencies = [ [[package]] name = "zcash_protocol" -version = "0.7.0" -source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" +version = "0.6.1" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +dependencies = [ + "core2 0.3.3", + "document-features", + "hex", + "memuse", +] + +[[package]] +name = "zcash_protocol" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12cc76dd1f77be473e5829dbd34890bcd36d08b1e8dde2da0aea355c812a8f28" dependencies = [ "core2 0.3.3", "document-features", @@ -8827,17 +9132,20 @@ dependencies = [ [[package]] name = "zcash_script" -version = "0.4.2" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bed6cf5b2b4361105d4ea06b2752f0c8af4641756c7fbc9858a80af186c234f" +checksum = "caf6e76f310bb2d3cc233086a97c1710ba1de7ffbbf8198b8113407d0f427dfc" dependencies = [ - "bitflags 2.10.0", - "bounded-vec", + "bindgen 0.72.1", + "bitflags 2.9.4", + "cc", + "enum_primitive", "ripemd 0.1.3", "secp256k1", - "sha1", + "sha-1", "sha2 0.10.9", "thiserror 2.0.17", + "tracing", ] [[package]] @@ -8851,32 +9159,32 @@ dependencies = [ [[package]] name = "zcash_transparent" -version = "0.4.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a7c162a8aa6f708e842503ed5157032465dadfb1d7f63adf9db2d45213a0b11" +checksum = "5b1302cf726e88326c2c6e9bbd2634064bb344df7740e0b6bacf2245abd1eebe" dependencies = [ "bip32", "blake2b_simd", "bs58", "core2 0.3.3", - "document-features", "getset", "hex", "ripemd 0.1.3", "secp256k1", "sha2 0.10.9", "subtle", - "zcash_address 0.9.0", + "zcash_address 0.8.0", "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.6.2", + "zcash_protocol 0.5.4", "zcash_spec", "zip32", ] [[package]] name = "zcash_transparent" -version = "0.6.0" -source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a7c162a8aa6f708e842503ed5157032465dadfb1d7f63adf9db2d45213a0b11" dependencies = [ "bip32", "blake2b_simd", @@ -8889,79 +9197,44 @@ dependencies = [ "secp256k1", "sha2 0.10.9", "subtle", - "zcash_address 0.10.0", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", - "zcash_protocol 0.7.0", - "zcash_script", + "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_protocol 0.6.2", "zcash_spec", "zip32", ] [[package]] -name = "zebra-chain" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17a86ec712da2f25d3edc7e5cf0b1d15ef41ab35305e253f0f7cd9cecc0f1939" +name = "zcash_transparent" +version = "0.4.0" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" dependencies = [ - "bech32", - "bitflags 2.10.0", - "bitflags-serde-legacy", - "bitvec", + "bip32", "blake2b_simd", - "blake2s_simd", "bs58", - "byteorder", - "chrono", - "dirs", - "ed25519-zebra", - "equihash 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "futures", - "group", - "halo2_proofs", + "core2 0.3.3", + "document-features", + "getset", "hex", - "humantime", - "incrementalmerkletree", - "itertools 0.14.0", - "jubjub", - "lazy_static", - "num-integer", - "orchard", - "primitive-types 0.12.2", - "rand_core 0.6.4", - "rayon", - "reddsa", - "redjubjub", "ripemd 0.1.3", - "sapling-crypto", "secp256k1", - "serde", - "serde-big-array", - "serde_with", "sha2 0.10.9", - "sinsemilla", - "static_assertions", - "tempfile", - "thiserror 2.0.17", - "tracing", - "uint 0.10.0", - "x25519-dalek", - "zcash_address 0.9.0", - "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_history", - "zcash_note_encryption", - "zcash_primitives 0.24.1", - "zcash_protocol 0.6.2", - "zcash_transparent 0.4.0", + "subtle", + "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_protocol 0.6.1", + "zcash_spec", + "zip32", ] [[package]] name = "zebra-chain" -version = "3.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4aa7e85afd7bdf159e8c9a973d32bfc410be42ce82c2396690ae1208933bb8" +checksum = "17a86ec712da2f25d3edc7e5cf0b1d15ef41ab35305e253f0f7cd9cecc0f1939" dependencies = [ "bech32", - "bitflags 2.10.0", + "bitflags 2.9.4", "bitflags-serde-legacy", "bitvec", "blake2b_simd", @@ -8969,7 +9242,6 @@ dependencies = [ "bs58", "byteorder", "chrono", - "derive-getters", "dirs", "ed25519-zebra", "equihash 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -8985,6 +9257,10 @@ dependencies = [ "num-integer", "orchard", "primitive-types 0.12.2", + "proptest", + "proptest-derive", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_core 0.6.4", "rayon", "reddsa", @@ -9005,27 +9281,26 @@ dependencies = [ "tracing", "uint 0.10.0", "x25519-dalek", - "zcash_address 0.10.0", + "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "zcash_history", "zcash_note_encryption", - "zcash_primitives 0.26.0", - "zcash_protocol 0.7.0", - "zcash_script", - "zcash_transparent 0.6.0", + "zcash_primitives 0.24.1", + "zcash_protocol 0.6.2", + "zcash_transparent 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zebra-test", ] [[package]] name = "zebra-consensus" -version = "3.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770efa97a22262ee80290b0ffa251cddb34d7c50351c4e5255ed80f644d035e0" +checksum = "19a44698a96b007f00da9a2e4c4cdee6c5adfc22996bdeb06ccc781b96d597c0" dependencies = [ "bellman", "blake2b_simd", "bls12_381", "chrono", - "derive-getters", "futures", "futures-util", "halo2_proofs", @@ -9046,9 +9321,9 @@ dependencies = [ "tower-fallback", "tracing", "tracing-futures", - "zcash_proofs", - "zcash_protocol 0.7.0", - "zebra-chain 3.1.0", + "wagyu-zcash-parameters", + "zcash_proofs 0.24.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zebra-chain", "zebra-node-services", "zebra-script", "zebra-state", @@ -9056,11 +9331,11 @@ dependencies = [ [[package]] name = "zebra-network" -version = "2.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3edeb353c33962fb5f9012745ddb44d33ee90acb8c9410669bf54d72488b8cf" +checksum = "0230d1e515518e0ef33ca668f3abb98e364485f384f7ee101506ec64bca7e7d3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "byteorder", "bytes", "chrono", @@ -9068,7 +9343,7 @@ dependencies = [ "futures", "hex", "humantime-serde", - "indexmap 2.12.1", + "indexmap 2.11.4", "itertools 0.14.0", "lazy_static", "metrics", @@ -9088,14 +9363,14 @@ dependencies = [ "tracing", "tracing-error", "tracing-futures", - "zebra-chain 3.1.0", + "zebra-chain", ] [[package]] name = "zebra-node-services" -version = "2.1.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "863ddd987ed6373872b20629a471c535ae2ba114a193415227635840ec570ccd" +checksum = "c808614a9d245ae8d6d3177c06a8c78c2a8566219b090d6a85dc46f4364eadad" dependencies = [ "color-eyre", "jsonrpsee-types", @@ -9103,15 +9378,14 @@ dependencies = [ "serde", "serde_json", "tokio", - "tower 0.4.13", - "zebra-chain 3.1.0", + "zebra-chain", ] [[package]] name = "zebra-rpc" -version = "3.1.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c81f52b33b83f5a7f9b0377981843242c2657027053a406f01b06862dddbf747" +checksum = "b1ae56eb3c668366a751621f40e1c0569c32ae92467ec1f8bac5b755db308126" dependencies = [ "base64", "chrono", @@ -9122,34 +9396,31 @@ dependencies = [ "hex", "http-body-util", "hyper", - "indexmap 2.12.1", + "indexmap 2.11.4", "jsonrpsee", "jsonrpsee-proc-macros", "jsonrpsee-types", "nix", - "prost 0.14.1", + "prost", "rand 0.8.5", - "sapling-crypto", "semver", "serde", "serde_json", "serde_with", "tokio", "tokio-stream", - "tonic 0.14.2", - "tonic-prost", - "tonic-prost-build", + "tonic 0.12.3", + "tonic-build 0.12.3", "tonic-reflection", "tower 0.4.13", "tracing", "which 8.0.0", - "zcash_address 0.10.0", - "zcash_keys 0.12.0", - "zcash_primitives 0.26.0", - "zcash_protocol 0.7.0", - "zcash_script", - "zcash_transparent 0.6.0", - "zebra-chain 3.1.0", + "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_keys 0.10.1", + "zcash_primitives 0.24.1", + "zcash_protocol 0.6.2", + "zcash_transparent 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zebra-chain", "zebra-consensus", "zebra-network", "zebra-node-services", @@ -9159,35 +9430,32 @@ dependencies = [ [[package]] name = "zebra-script" -version = "3.0.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11acd2a4f9077e3698bbc51a3f04c247381f7bf7f77aacc408e5760d3246993b" +checksum = "a76a2e972e414caa3635b8c2d21f20c21a71c69f76b37bf7419d97ed0c2277e7" dependencies = [ - "libzcash_script", "thiserror 2.0.17", - "zcash_primitives 0.26.0", + "zcash_primitives 0.24.1", "zcash_script", - "zebra-chain 3.1.0", + "zebra-chain", ] [[package]] name = "zebra-state" -version = "3.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b47250eaaa047bebde853a54184ab00ab19f47ed451f35b7c1ae8fe17004d87a" +checksum = "129b32692f22207719dd1c5ddcbae59b96a322e2329664787f6247acef78c7f3" dependencies = [ "bincode", "chrono", "crossbeam-channel", - "derive-getters", - "derive-new", "dirs", "futures", "hex", "hex-literal", "human_bytes", "humantime-serde", - "indexmap 2.12.1", + "indexmap 2.11.4", "itertools 0.14.0", "lazy_static", "metrics", @@ -9196,7 +9464,6 @@ dependencies = [ "regex", "rlimit", "rocksdb", - "sapling-crypto", "semver", "serde", "tempfile", @@ -9204,28 +9471,56 @@ dependencies = [ "tokio", "tower 0.4.13", "tracing", - "zebra-chain 3.1.0", - "zebra-node-services", + "zebra-chain", +] + +[[package]] +name = "zebra-test" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97c615cfa095226c8a0db20cb734181b4b63d36241a40492a0fa359ffc6ebc50" +dependencies = [ + "color-eyre", + "futures", + "hex", + "humantime", + "indexmap 2.11.4", + "insta", + "itertools 0.14.0", + "lazy_static", + "once_cell", + "owo-colors", + "proptest", + "rand 0.8.5", + "regex", + "spandoc", + "thiserror 2.0.17", + "tinyvec", + "tokio", + "tower 0.4.13", + "tracing", + "tracing-error", + "tracing-subscriber", ] [[package]] name = "zerocopy" -version = "0.8.31" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.31" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -9245,7 +9540,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", "synstructure", ] @@ -9266,14 +9561,14 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "zerotrie" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" dependencies = [ "displaydoc", "yoke", @@ -9282,11 +9577,10 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.5" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ - "serde", "yoke", "zerofrom", "zerovec-derive", @@ -9294,31 +9588,56 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.2" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", +] + +[[package]] +name = "zingo-infra-services" +version = "0.1.0" +source = "git+https://github.com/zingolabs/infrastructure.git?tag=zingo-infra-services-0.4.0#8986db77041172aa3ed896d1fd37bef5723c8b87" +dependencies = [ + "getset", + "hex", + "http", + "json", + "portpicker", + "reqwest", + "serde_json", + "sha2 0.10.9", + "tempfile", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-subscriber", + "zcash_primitives 0.24.0", + "zcash_protocol 0.6.1", + "zebra-chain", + "zebra-node-services", + "zebra-rpc", ] [[package]] name = "zingo-memo" version = "0.0.1" -source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" +source = "git+https://github.com/zingolabs/zingolib.git?rev=f88e1d76ea244d6cc48d7fd4c3a609c6598318dc#f88e1d76ea244d6cc48d7fd4c3a609c6598318dc" dependencies = [ - "zcash_address 0.10.0", - "zcash_client_backend", - "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_keys 0.12.0", - "zcash_primitives 0.26.0", + "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_keys 0.10.0", + "zcash_primitives 0.24.0", ] [[package]] name = "zingo-price" version = "0.0.1" -source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" +source = "git+https://github.com/zingolabs/zingolib.git?rev=f88e1d76ea244d6cc48d7fd4c3a609c6598318dc#f88e1d76ea244d6cc48d7fd4c3a609c6598318dc" dependencies = [ "byteorder", "reqwest", @@ -9327,17 +9646,17 @@ dependencies = [ "serde", "serde_json", "thiserror 2.0.17", - "zcash_client_backend", - "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", ] [[package]] name = "zingo-status" version = "0.0.1" -source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" +source = "git+https://github.com/zingolabs/zingolib.git?rev=f88e1d76ea244d6cc48d7fd4c3a609c6598318dc#f88e1d76ea244d6cc48d7fd4c3a609c6598318dc" dependencies = [ "byteorder", - "zcash_primitives 0.26.0", + "zcash_primitives 0.24.0", ] [[package]] @@ -9345,21 +9664,21 @@ name = "zingo_common_components" version = "0.1.0" source = "git+https://github.com/zingolabs/zingo-common.git?tag=zingo_common_components_v0.1.0#7407028a9c561561d174740e70170c4c20529bcd" dependencies = [ - "zebra-chain 2.0.0", + "zebra-chain", ] [[package]] name = "zingo_common_components" version = "0.1.0" -source = "git+https://github.com/zingolabs/zingo-common.git?branch=dev#b64dfd6d6a2a597a5456d1cc7b2bc9b649328187" +source = "git+https://github.com/zingolabs/zingo-common.git?rev=23814ee904ee64913585c0b8f871c6dbd94504c6#23814ee904ee64913585c0b8f871c6dbd94504c6" dependencies = [ - "zebra-chain 3.1.0", + "zebra-chain", ] [[package]] name = "zingo_netutils" version = "0.1.0" -source = "git+https://github.com/zingolabs/zingo-common.git?branch=dev#b64dfd6d6a2a597a5456d1cc7b2bc9b649328187" +source = "git+https://github.com/zingolabs/zingo-common.git?tag=zingo_common_components_v0.1.0#7407028a9c561561d174740e70170c4c20529bcd" dependencies = [ "http", "http-body", @@ -9371,31 +9690,35 @@ dependencies = [ "tonic 0.13.1", "tower 0.5.2", "webpki-roots 0.25.4", - "zcash_client_backend", + "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", ] [[package]] name = "zingo_netutils" version = "0.1.0" -source = "git+https://github.com/zingolabs/zingo-common.git?rev=b64dfd6d6a2a597a5456d1cc7b2bc9b649328187#b64dfd6d6a2a597a5456d1cc7b2bc9b649328187" +source = "git+https://github.com/zingolabs/infrastructure.git?rev=cc2af1ac80cb6b7a7f0a0b8f331f7b0873d667bf#cc2af1ac80cb6b7a7f0a0b8f331f7b0873d667bf" dependencies = [ "http", "http-body", + "http-body-util", "hyper", "hyper-rustls", "hyper-util", + "prost", "thiserror 1.0.69", + "time", + "time-core", "tokio-rustls", "tonic 0.13.1", "tower 0.5.2", "webpki-roots 0.25.4", - "zcash_client_backend", + "zcash_client_backend 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "zingo_test_vectors" version = "0.0.1" -source = "git+https://github.com/zingolabs/infrastructure.git?branch=dev#c0897685d653faf484ee5ade2694b323a2dc9866" +source = "git+https://github.com/zingolabs/infrastructure.git?tag=zcash_local_net_v0.1.0#9f479fe8610ac15bb67f06d9e65ee37bcfebd228" dependencies = [ "bip0039", ] @@ -9403,7 +9726,7 @@ dependencies = [ [[package]] name = "zingo_test_vectors" version = "0.0.1" -source = "git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4#20f3c206321825952fdf2d9d84947f5c9d2a1cf4" +source = "git+https://github.com/zingolabs/infrastructure.git?rev=89e0b665967a0dd1950855ad37ce18d9c5a14709#89e0b665967a0dd1950855ad37ce18d9c5a14709" dependencies = [ "bip0039", ] @@ -9411,7 +9734,7 @@ dependencies = [ [[package]] name = "zingolib" version = "0.0.1" -source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" +source = "git+https://github.com/zingolabs/zingolib.git?rev=f88e1d76ea244d6cc48d7fd4c3a609c6598318dc#f88e1d76ea244d6cc48d7fd4c3a609c6598318dc" dependencies = [ "append-only-vec", "bech32", @@ -9436,11 +9759,11 @@ dependencies = [ "orchard", "pepper-sync", "portpicker", - "prost 0.13.5", + "prost", "rand 0.8.5", "ring 0.17.14", "rust-embed", - "rustls 0.23.35", + "rustls 0.23.32", "sapling-crypto", "secp256k1", "secrecy", @@ -9456,40 +9779,21 @@ dependencies = [ "tracing", "tracing-subscriber", "webpki-roots 0.25.4", - "zcash_address 0.10.0", - "zcash_client_backend", - "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_keys 0.12.0", - "zcash_primitives 0.26.0", - "zcash_proofs", - "zcash_protocol 0.7.0", - "zcash_transparent 0.6.0", - "zebra-chain 3.1.0", + "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_keys 0.10.0", + "zcash_primitives 0.24.0", + "zcash_proofs 0.24.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_protocol 0.6.1", + "zcash_transparent 0.4.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zebra-chain", + "zingo-infra-services", "zingo-memo", "zingo-price", "zingo-status", - "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", - "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", - "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?branch=dev)", - "zip32", -] - -[[package]] -name = "zingolib_testutils" -version = "0.1.0" -source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" -dependencies = [ - "bip0039", - "http", - "pepper-sync", - "portpicker", - "tempfile", - "zcash_local_net 0.1.0 (git+https://github.com/zingolabs/infrastructure.git?branch=dev)", - "zcash_protocol 0.7.0", - "zebra-chain 3.1.0", - "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", - "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?branch=dev)", - "zingolib", + "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/infrastructure.git?rev=cc2af1ac80cb6b7a7f0a0b8f331f7b0873d667bf)", + "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?rev=89e0b665967a0dd1950855ad37ce18d9c5a14709)", "zip32", ] @@ -9508,14 +9812,27 @@ dependencies = [ [[package]] name = "zip321" -version = "0.6.0" -source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f92f290c86ae1bdcdc4c41ce67fdabf8cd2a75bc89be8235cd5b1354efae06" +dependencies = [ + "base64", + "nom", + "percent-encoding", + "zcash_address 0.8.0", + "zcash_protocol 0.5.4", +] + +[[package]] +name = "zip321" +version = "0.5.0" +source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" dependencies = [ "base64", "nom", "percent-encoding", - "zcash_address 0.10.0", - "zcash_protocol 0.7.0", + "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_protocol 0.6.1", ] [[package]] diff --git a/zaino-state/Cargo.toml b/zaino-state/Cargo.toml index b3f3d332c..8822284ec 100644 --- a/zaino-state/Cargo.toml +++ b/zaino-state/Cargo.toml @@ -66,6 +66,9 @@ sapling-crypto = "0.5.0" tempfile = { workspace = true } tracing-subscriber = { workspace = true } once_cell = { workspace = true } +zebra-chain = { workspace = true, features = ["proptest-impl"] } +proptest.workspace = true +incrementalmerkletree = "*" [build-dependencies] whoami = { workspace = true } diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 61d73690e..e64f3092e 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -11,7 +11,7 @@ use zebra_chain::{ parameters::GENESIS_PREVIOUS_BLOCK_HASH, LedgerState, }; -use zebra_state::HashOrHeight; +use zebra_state::{FromDisk, HashOrHeight, IntoDisk as _}; use crate::{ chain_index::source::BlockchainSourceResult, BlockHash, BlockchainSource, TransactionHash, @@ -46,6 +46,69 @@ struct ProptestMockchain { branching_segments: Vec>>>, } +impl ProptestMockchain { + fn best_branch(&self) -> SummaryDebug>> { + let mut best_branch_and_work = None; + for branch in self.branching_segments.clone() { + let branch_chainwork: u128 = branch + .iter() + .map(|block| { + block + .header + .difficulty_threshold + .to_work() + .unwrap() + .as_u128() + }) + .sum(); + match best_branch_and_work { + Some((ref _b, w)) => { + if w < branch_chainwork { + best_branch_and_work = Some((branch, branch_chainwork)) + } + } + None => best_branch_and_work = Some((branch, branch_chainwork)), + } + } + best_branch_and_work.unwrap().0 + } + + fn all_blocks_arb_branch_order(&self) -> impl Iterator> { + self.genesis_segment.iter().chain( + self.branching_segments + .iter() + .flat_map(|branch| branch.iter()), + ) + } + + fn get_block_and_all_preceeding( + &self, + // This probably doesn't need to allow FnMut closures (Fn should suffice) + // but there's no cost to allowing it + mut block_identifier: impl FnMut(&zebra_chain::block::Block) -> bool, + ) -> std::option::Option>> { + let mut blocks = Vec::new(); + for block in self.genesis_segment.iter() { + blocks.push(block); + if block_identifier(block) { + return Some(blocks); + } + } + for branch in self.branching_segments.iter() { + let mut branch_blocks = Vec::new(); + for block in branch.iter() { + branch_blocks.push(block); + if block_identifier(block) { + blocks.extend_from_slice(&branch_blocks); + return Some(blocks); + } + } + } + + None + } +} + #[async_trait] impl BlockchainSource for ProptestMockchain { /// Returns the block by hash or height @@ -53,7 +116,32 @@ impl BlockchainSource for ProptestMockchain { &self, id: HashOrHeight, ) -> BlockchainSourceResult>> { - todo!() + match id { + HashOrHeight::Hash(hash) => { + let matches_hash = |block: &&Arc| block.hash() == hash; + Ok(self + .genesis_segment + .iter() + .find(matches_hash) + .or_else(|| { + self.branching_segments + .iter() + .flat_map(|vec| vec.iter()) + .find(matches_hash) + }) + .cloned()) + } + HashOrHeight::Height(height) => Ok(self + .genesis_segment + .iter() + .find(|block| block.coinbase_height().unwrap() == height) + .cloned() + .or_else(|| { + self.best_branch() + .into_iter() + .find(|block| block.coinbase_height().unwrap() == height) + })), + } } /// Returns the block commitment tree data by hash @@ -64,7 +152,61 @@ impl BlockchainSource for ProptestMockchain { Option<(zebra_chain::sapling::tree::Root, u64)>, Option<(zebra_chain::orchard::tree::Root, u64)>, )> { - todo!() + let Some(chain_up_to_block) = + self.get_block_and_all_preceeding(|block| block.hash().0 == id.0) + else { + return Ok((None, None)); + }; + + let (sapling, orchard) = + chain_up_to_block + .iter() + .fold((None, None), |(mut sapling, mut orchard), block| { + for transaction in &block.transactions { + for sap_commitment in transaction.sapling_note_commitments() { + let sap_commitment = zebra_chain::sapling::tree::Node::from_bytes( + sap_commitment.to_bytes(), + ); + + sapling = Some(sapling.unwrap_or_else(|| { + incrementalmerkletree::frontier::Frontier::<_, 32>::empty() + })); + + sapling = sapling.map(|mut tree| { + tree.append(sap_commitment); + tree + }); + } + for orc_commitment in transaction.orchard_note_commitments() { + let orc_commitment = + zebra_chain::orchard::tree::Node::from(*orc_commitment); + + orchard = Some(orchard.unwrap_or_else(|| { + incrementalmerkletree::frontier::Frontier::<_, 32>::empty() + })); + + orchard = orchard.map(|mut tree| { + tree.append(orc_commitment); + tree + }); + } + } + (sapling, orchard) + }); + Ok(( + sapling.map(|sap_front| { + ( + zebra_chain::sapling::tree::Root::from_bytes(sap_front.root().as_ref()), + sap_front.tree_size(), + ) + }), + orchard.map(|orc_front| { + ( + zebra_chain::orchard::tree::Root::from_bytes(orc_front.root().as_bytes()), + orc_front.tree_size(), + ) + }), + )) } /// Returns the sapling and orchard treestate by hash @@ -72,14 +214,15 @@ impl BlockchainSource for ProptestMockchain { &self, id: BlockHash, ) -> BlockchainSourceResult<(Option>, Option>)> { - todo!() + // I don't think this is used for sync? + unimplemented!() } /// Returns the complete list of txids currently in the mempool. async fn get_mempool_txids( &self, ) -> BlockchainSourceResult>> { - todo!() + Ok(None) } /// Returns the transaction by txid @@ -87,14 +230,20 @@ impl BlockchainSource for ProptestMockchain { &self, txid: TransactionHash, ) -> BlockchainSourceResult>> { - todo!() + Ok(self.all_blocks_arb_branch_order().find_map(|block| { + block + .transactions + .iter() + .find(|transaction| transaction.hash() == txid.into()) + .cloned() + })) } /// Returns the hash of the block at the tip of the best chain. async fn get_best_block_hash( &self, ) -> BlockchainSourceResult> { - todo!() + Ok(Some(self.best_branch().last().unwrap().hash())) } /// Get a listener for new nonfinalized blocks, @@ -107,7 +256,16 @@ impl BlockchainSource for ProptestMockchain { >, Box, > { - todo!() + let (sender, receiver) = tokio::sync::mpsc::channel(1_000); + let self_clone = self.clone(); + tokio::task::spawn((|| async move { + for block in self_clone.all_blocks_arb_branch_order() { + sender.send((block.hash(), block.clone())).await.unwrap() + } + })()) + .await + .unwrap(); + Ok(Some(receiver)) } } From 86feb70233a4b8f7ca95fd4c48f6826cf2e2c637 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Mon, 20 Oct 2025 19:07:49 -0300 Subject: [PATCH 005/114] some part of the block commitment hack is failing --- zaino-state/src/chain_index/mempool.rs | 1 + .../src/chain_index/non_finalised_state.rs | 7 +++ .../chain_index/tests/proptest_blockgen.rs | 59 ++++++++++++------- 3 files changed, 47 insertions(+), 20 deletions(-) diff --git a/zaino-state/src/chain_index/mempool.rs b/zaino-state/src/chain_index/mempool.rs index 607528a13..95f63c9a2 100644 --- a/zaino-state/src/chain_index/mempool.rs +++ b/zaino-state/src/chain_index/mempool.rs @@ -69,6 +69,7 @@ impl Mempool { } } + println!("get block hash"); let best_block_hash: BlockHash = match fetcher.get_best_block_hash().await { Ok(block_hash_opt) => match block_hash_opt { Some(hash) => hash.into(), diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index e312cf4c2..59cb2fe52 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -339,9 +339,11 @@ impl NonFinalizedState { ))) })? { + dbg!("got block", block.coinbase_height().unwrap()); let parent_hash = BlockHash::from(block.header.previous_block_hash); if parent_hash == best_tip.blockhash { // Normal chain progression + dbg!("normal chain"); let prev_block = match new_blocks.last() { Some(block) => block, None => initial_state @@ -360,6 +362,7 @@ impl NonFinalizedState { })?, }; let chainblock = self.block_to_chainblock(prev_block, &block).await?; + dbg!("created chainblock"); info!( "syncing block {} at height {}", &chainblock.index().hash(), @@ -673,11 +676,13 @@ impl NonFinalizedState { .get_tree_roots_from_source(block.hash().into()) .await .map_err(|e| { + dbg!(&e); SyncError::ZebradConnectionError(NodeConnectionError::UnrecoverableError(Box::new( InvalidData(format!("{}", e)), ))) })?; + dbg!("making block"); Self::create_indexed_block_with_optional_roots( block, &tree_roots, @@ -685,6 +690,7 @@ impl NonFinalizedState { self.network.clone(), ) .map_err(|e| { + dbg!(&e); SyncError::ZebradConnectionError(NodeConnectionError::UnrecoverableError(Box::new( InvalidData(e), ))) @@ -698,6 +704,7 @@ impl NonFinalizedState { ) -> Result { let (sapling_root_and_len, orchard_root_and_len) = self.source.get_commitment_tree_roots(block_hash).await?; + dbg!("got roots"); Ok(TreeRootData { sapling: sapling_root_and_len, diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index e64f3092e..3e3df07e1 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -1,42 +1,61 @@ -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use proptest::{ prelude::{Arbitrary as _, BoxedStrategy, Just}, strategy::Strategy, }; use tonic::async_trait; +use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageConfig}; use zebra_chain::{ block::arbitrary::{self, LedgerStateOverride}, fmt::SummaryDebug, - parameters::GENESIS_PREVIOUS_BLOCK_HASH, LedgerState, }; use zebra_state::{FromDisk, HashOrHeight, IntoDisk as _}; use crate::{ - chain_index::source::BlockchainSourceResult, BlockHash, BlockchainSource, TransactionHash, + chain_index::{source::BlockchainSourceResult, tests::init_tracing, NonFinalizedSnapshot}, + BlockCacheConfig, BlockHash, BlockchainSource, ChainIndex, NodeBackedChainIndex, + TransactionHash, }; #[test] fn make_chain() { + init_tracing(); proptest::proptest!(|(segments in make_branching_chain(2, 12))| { - let (genesis_segment, branch_segments) = segments; - let mut prev_hash = GENESIS_PREVIOUS_BLOCK_HASH; - for block in genesis_segment { - assert_eq!(block.header.previous_block_hash, prev_hash); - println!("pre-divergence: {:?}", block.coinbase_height()); - prev_hash = block.hash(); + let runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(2).enable_time().build().unwrap(); + runtime.block_on(async { + let (genesis_segment, branching_segments) = segments; + let mockchain = ProptestMockchain { + genesis_segment, + branching_segments, + }; + let temp_dir: tempfile::TempDir = tempfile::tempdir().unwrap(); + let db_path: std::path::PathBuf = temp_dir.path().to_path_buf(); - } - let hash_atop_shared_chain = prev_hash; - for branch_segment in branch_segments { - for block in branch_segment { - assert_eq!(block.header.previous_block_hash, prev_hash); - println!("post-divergence: {:?}", block.coinbase_height()); - prev_hash = block.hash(); - } - prev_hash = hash_atop_shared_chain; - } + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network: Network::Regtest(ActivationHeights::default()), + + no_sync: false, + no_db: false, + }; + + let indexer = NodeBackedChainIndex::new(mockchain.clone(), config) + .await + .unwrap(); + tokio::time::sleep(Duration::from_secs(10)).await; + let index_reader = indexer.subscriber().await; + let snapshot = index_reader.snapshot_nonfinalized_state(); + dbg!(snapshot.best_chaintip()); + }); }); } @@ -222,7 +241,7 @@ impl BlockchainSource for ProptestMockchain { async fn get_mempool_txids( &self, ) -> BlockchainSourceResult>> { - Ok(None) + Ok(Some(Vec::new())) } /// Returns the transaction by txid From f6d50b9e896cc67d278457c3f7efae41a00238c2 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Wed, 22 Oct 2025 11:34:10 -0300 Subject: [PATCH 006/114] wip --- zaino-state/src/chain_index/tests/proptest_blockgen.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 3e3df07e1..6b9e548a2 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -53,6 +53,7 @@ fn make_chain() { .unwrap(); tokio::time::sleep(Duration::from_secs(10)).await; let index_reader = indexer.subscriber().await; + dbg!(index_reader.status()); let snapshot = index_reader.snapshot_nonfinalized_state(); dbg!(snapshot.best_chaintip()); }); @@ -301,7 +302,7 @@ fn make_branching_chain( ledger, chain_size, arbitrary::allow_all_transparent_coinbase_spends, - false, + true, ) }) .prop_flat_map(|segment| { @@ -325,7 +326,7 @@ fn make_branching_chain( ledger.clone(), chain_size, arbitrary::allow_all_transparent_coinbase_spends, - false, + true, ) }) .take(num_branches) From dc1d00c9675f23faf9c4b1a6da98746e0e101431 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Wed, 22 Oct 2025 15:11:11 -0300 Subject: [PATCH 007/114] sync loop failure now triggers shutdown --- zaino-state/src/chain_index.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index db7358da8..c81a3f751 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -477,10 +477,15 @@ impl NodeBackedChainIndex { if status.load() == StatusType::Closing { break; } + let handle_error = |e| { + tracing::error!("Sync failure: {e:?}. Shutting down."); + status.store(StatusType::CriticalError); + e + }; status.store(StatusType::Syncing); // Sync nfs to chain tip, trimming blocks to finalized tip. - nfs.sync(fs.clone()).await?; + nfs.sync(fs.clone()).await.map_err(handle_error)?; // Sync fs to chain tip - 100. { @@ -490,7 +495,7 @@ impl NodeBackedChainIndex { .to_reader() .db_height() .await - .map_err(|_e| SyncError::CannotReadFinalizedState)? + .map_err(|_e| handle_error(SyncError::CannotReadFinalizedState))? .unwrap_or(types::Height(0)) .0 + 100) @@ -499,7 +504,7 @@ impl NodeBackedChainIndex { .to_reader() .db_height() .await - .map_err(|_e| SyncError::CannotReadFinalizedState)? + .map_err(|_e| handle_error(SyncError::CannotReadFinalizedState))? .map(|height| height + 1) .unwrap_or(types::Height(0)); let next_finalized_block = snapshot @@ -510,11 +515,11 @@ impl NodeBackedChainIndex { .get(&(next_finalized_height)) .ok_or(SyncError::CompetingSyncProcess)?, ) - .ok_or(SyncError::CompetingSyncProcess)?; + .ok_or_else(|| handle_error(SyncError::CompetingSyncProcess))?; // TODO: Handle write errors better (fix db and continue) fs.write_block(next_finalized_block.clone()) .await - .map_err(|_e| SyncError::CompetingSyncProcess)?; + .map_err(|_e| handle_error(SyncError::CompetingSyncProcess))?; } } status.store(StatusType::Ready); From 971f4184328c2402dcbfb737b089ec1c43d533b8 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Wed, 22 Oct 2025 16:09:57 -0300 Subject: [PATCH 008/114] test now apparently functions! Next step: actually assserting things... --- .../src/chain_index/non_finalised_state.rs | 5 - .../chain_index/tests/proptest_blockgen.rs | 149 +++++++++++++----- 2 files changed, 112 insertions(+), 42 deletions(-) diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index 59cb2fe52..b5b84eb6e 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -339,11 +339,9 @@ impl NonFinalizedState { ))) })? { - dbg!("got block", block.coinbase_height().unwrap()); let parent_hash = BlockHash::from(block.header.previous_block_hash); if parent_hash == best_tip.blockhash { // Normal chain progression - dbg!("normal chain"); let prev_block = match new_blocks.last() { Some(block) => block, None => initial_state @@ -362,7 +360,6 @@ impl NonFinalizedState { })?, }; let chainblock = self.block_to_chainblock(prev_block, &block).await?; - dbg!("created chainblock"); info!( "syncing block {} at height {}", &chainblock.index().hash(), @@ -682,7 +679,6 @@ impl NonFinalizedState { ))) })?; - dbg!("making block"); Self::create_indexed_block_with_optional_roots( block, &tree_roots, @@ -704,7 +700,6 @@ impl NonFinalizedState { ) -> Result { let (sapling_root_and_len, orchard_root_and_len) = self.source.get_commitment_tree_roots(block_hash).await?; - dbg!("got roots"); Ok(TreeRootData { sapling: sapling_root_and_len, diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 6b9e548a2..38bc40472 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -8,13 +8,17 @@ use tonic::async_trait; use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageConfig}; use zebra_chain::{ block::arbitrary::{self, LedgerStateOverride}, - fmt::SummaryDebug, + fmt::{HexDebug, SummaryDebug}, + parameters::{NetworkUpgrade, GENESIS_PREVIOUS_BLOCK_HASH}, LedgerState, }; use zebra_state::{FromDisk, HashOrHeight, IntoDisk as _}; use crate::{ - chain_index::{source::BlockchainSourceResult, tests::init_tracing, NonFinalizedSnapshot}, + chain_index::{ + source::BlockchainSourceResult, tests::init_tracing, types::GENESIS_HEIGHT, + NonFinalizedSnapshot, + }, BlockCacheConfig, BlockHash, BlockchainSource, ChainIndex, NodeBackedChainIndex, TransactionHash, }; @@ -22,10 +26,16 @@ use crate::{ #[test] fn make_chain() { init_tracing(); - proptest::proptest!(|(segments in make_branching_chain(2, 12))| { + // default is 256. As each case takes multiple seconds, this seems too many. + proptest::proptest!(proptest::test_runner::Config::with_cases(32), |(segments in make_branching_chain(2, 12))| { let runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(2).enable_time().build().unwrap(); runtime.block_on(async { let (genesis_segment, branching_segments) = segments; + for block in genesis_segment.clone() { + dbg!(block.coinbase_height()); + dbg!(block.commitment(&Network::Regtest(ActivationHeights::default()).to_zebra_network())); + + } let mockchain = ProptestMockchain { genesis_segment, branching_segments, @@ -51,7 +61,7 @@ fn make_chain() { let indexer = NodeBackedChainIndex::new(mockchain.clone(), config) .await .unwrap(); - tokio::time::sleep(Duration::from_secs(10)).await; + tokio::time::sleep(Duration::from_secs(2)).await; let index_reader = indexer.subscriber().await; dbg!(index_reader.status()); let snapshot = index_reader.snapshot_nonfinalized_state(); @@ -282,6 +292,8 @@ impl BlockchainSource for ProptestMockchain { for block in self_clone.all_blocks_arb_branch_order() { sender.send((block.hash(), block.clone())).await.unwrap() } + // don't drop the sender + std::mem::forget(sender); })()) .await .unwrap(); @@ -296,42 +308,105 @@ fn make_branching_chain( SummaryDebug>>, Vec>>>, )> { - arbitrary::LedgerState::genesis_strategy(None, None, true) - .prop_flat_map(move |ledger| { + arbitrary::LedgerState::arbitrary_with(LedgerStateOverride { + height_override: Some(GENESIS_HEIGHT.into()), + previous_block_hash_override: Some(GENESIS_PREVIOUS_BLOCK_HASH), + network_upgrade_override: Some(NetworkUpgrade::Genesis), + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + }) + .prop_flat_map(move |ledger| { + zebra_chain::block::Block::partial_chain_strategy( + ledger, + 1, + arbitrary::allow_all_transparent_coinbase_spends, + true, + ) + }) + .prop_flat_map(|segment| { + ( + Just(segment.clone()), + LedgerState::arbitrary_with(LedgerStateOverride { + height_override: segment.last().unwrap().coinbase_height().unwrap() + 1, + previous_block_hash_override: Some(segment.last().unwrap().hash()), + network_upgrade_override: Some(NetworkUpgrade::Canopy), + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + }), + ) + }) + .prop_flat_map(move |(segment, ledger)| { + ( + Just(segment), + zebra_chain::block::Block::partial_chain_strategy( + ledger, + 1, + arbitrary::allow_all_transparent_coinbase_spends, + true, + ), + ) + }) + .prop_flat_map(|(mut segment, mut segment2)| { + // We need to manually set the commitment to ChainHistoryActivationReserved + // as arbitrary block generation doesn'r enforce this + Arc::get_mut(&mut Arc::get_mut(segment2.first_mut().unwrap()).unwrap().header) + .unwrap() + .commitment_bytes = HexDebug([0; 32]); + + segment.extend_from_slice(&segment2); + ( + Just(segment.clone()), + LedgerState::arbitrary_with(LedgerStateOverride { + height_override: segment.last().unwrap().coinbase_height().unwrap() + 1, + previous_block_hash_override: Some(segment.last().unwrap().hash()), + network_upgrade_override: Some(NetworkUpgrade::Nu6), + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + }), + ) + }) + .prop_flat_map(move |(segment, ledger)| { + ( + Just(segment), zebra_chain::block::Block::partial_chain_strategy( ledger, chain_size, arbitrary::allow_all_transparent_coinbase_spends, true, - ) - }) - .prop_flat_map(|segment| { - ( - Just(segment.clone()), - LedgerState::arbitrary_with(LedgerStateOverride { - height_override: segment.last().unwrap().coinbase_height().unwrap() + 1, - previous_block_hash_override: Some(segment.last().unwrap().hash()), - network_upgrade_override: None, - transaction_version_override: None, - transaction_has_valid_network_upgrade: true, - always_has_coinbase: true, - }), - ) - }) - .prop_flat_map(move |(segment, ledger)| { - ( - Just(segment), - std::iter::repeat_with(|| { - zebra_chain::block::Block::partial_chain_strategy( - ledger.clone(), - chain_size, - arbitrary::allow_all_transparent_coinbase_spends, - true, - ) - }) - .take(num_branches) - .collect::>(), - ) - }) - .boxed() + ), + ) + }) + .prop_flat_map(|(mut segment1, segment2)| { + segment1.extend_from_slice(&segment2); + ( + Just(segment1.clone()), + LedgerState::arbitrary_with(LedgerStateOverride { + height_override: segment1.last().unwrap().coinbase_height().unwrap() + 1, + previous_block_hash_override: Some(segment1.last().unwrap().hash()), + network_upgrade_override: Some(NetworkUpgrade::Nu6), + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + }), + ) + }) + .prop_flat_map(move |(segment, ledger)| { + ( + Just(segment), + std::iter::repeat_with(|| { + zebra_chain::block::Block::partial_chain_strategy( + ledger.clone(), + chain_size, + arbitrary::allow_all_transparent_coinbase_spends, + true, + ) + }) + .take(num_branches) + .collect::>(), + ) + }) + .boxed() } From 08359edf0607f5760a9d2e2667971d490295ef10 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Thu, 23 Oct 2025 14:23:13 -0300 Subject: [PATCH 009/114] tidy make_chain --- .../chain_index/tests/proptest_blockgen.rs | 207 +++++++++--------- 1 file changed, 101 insertions(+), 106 deletions(-) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 38bc40472..694b77710 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -8,15 +8,16 @@ use tonic::async_trait; use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageConfig}; use zebra_chain::{ block::arbitrary::{self, LedgerStateOverride}, - fmt::{HexDebug, SummaryDebug}, - parameters::{NetworkUpgrade, GENESIS_PREVIOUS_BLOCK_HASH}, + fmt::SummaryDebug, + parameters::NetworkUpgrade, LedgerState, }; use zebra_state::{FromDisk, HashOrHeight, IntoDisk as _}; use crate::{ chain_index::{ - source::BlockchainSourceResult, tests::init_tracing, types::GENESIS_HEIGHT, + source::BlockchainSourceResult, + tests::{init_tracing, proptest_blockgen::proptest_helpers::add_segment}, NonFinalizedSnapshot, }, BlockCacheConfig, BlockHash, BlockchainSource, ChainIndex, NodeBackedChainIndex, @@ -31,11 +32,6 @@ fn make_chain() { let runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(2).enable_time().build().unwrap(); runtime.block_on(async { let (genesis_segment, branching_segments) = segments; - for block in genesis_segment.clone() { - dbg!(block.coinbase_height()); - dbg!(block.commitment(&Network::Regtest(ActivationHeights::default()).to_zebra_network())); - - } let mockchain = ProptestMockchain { genesis_segment, branching_segments, @@ -63,7 +59,6 @@ fn make_chain() { .unwrap(); tokio::time::sleep(Duration::from_secs(2)).await; let index_reader = indexer.subscriber().await; - dbg!(index_reader.status()); let snapshot = index_reader.snapshot_nonfinalized_state(); dbg!(snapshot.best_chaintip()); }); @@ -308,105 +303,105 @@ fn make_branching_chain( SummaryDebug>>, Vec>>>, )> { - arbitrary::LedgerState::arbitrary_with(LedgerStateOverride { - height_override: Some(GENESIS_HEIGHT.into()), - previous_block_hash_override: Some(GENESIS_PREVIOUS_BLOCK_HASH), - network_upgrade_override: Some(NetworkUpgrade::Genesis), - transaction_version_override: None, - transaction_has_valid_network_upgrade: true, - always_has_coinbase: true, - }) - .prop_flat_map(move |ledger| { - zebra_chain::block::Block::partial_chain_strategy( - ledger, - 1, - arbitrary::allow_all_transparent_coinbase_spends, - true, - ) - }) - .prop_flat_map(|segment| { - ( - Just(segment.clone()), - LedgerState::arbitrary_with(LedgerStateOverride { - height_override: segment.last().unwrap().coinbase_height().unwrap() + 1, - previous_block_hash_override: Some(segment.last().unwrap().hash()), - network_upgrade_override: Some(NetworkUpgrade::Canopy), - transaction_version_override: None, - transaction_has_valid_network_upgrade: true, - always_has_coinbase: true, - }), - ) - }) - .prop_flat_map(move |(segment, ledger)| { - ( - Just(segment), - zebra_chain::block::Block::partial_chain_strategy( - ledger, - 1, - arbitrary::allow_all_transparent_coinbase_spends, - true, - ), - ) - }) - .prop_flat_map(|(mut segment, mut segment2)| { - // We need to manually set the commitment to ChainHistoryActivationReserved - // as arbitrary block generation doesn'r enforce this - Arc::get_mut(&mut Arc::get_mut(segment2.first_mut().unwrap()).unwrap().header) - .unwrap() - .commitment_bytes = HexDebug([0; 32]); + add_segment(SummaryDebug(Vec::new()), NetworkUpgrade::Genesis, 1) + .prop_flat_map(|segment| add_segment(segment, NetworkUpgrade::Canopy, 1)) + .prop_flat_map(move |segment| add_segment(segment, NetworkUpgrade::Nu6, chain_size)) + .prop_flat_map(|segment| { + ( + Just(segment.clone()), + LedgerState::arbitrary_with(LedgerStateOverride { + height_override: segment.last().unwrap().coinbase_height().unwrap() + 1, + previous_block_hash_override: Some(segment.last().unwrap().hash()), + network_upgrade_override: Some(NetworkUpgrade::Nu6), + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + }), + ) + }) + .prop_flat_map(move |(segment, ledger)| { + ( + Just(segment), + std::iter::repeat_with(|| { + zebra_chain::block::Block::partial_chain_strategy( + ledger.clone(), + chain_size, + arbitrary::allow_all_transparent_coinbase_spends, + true, + ) + }) + .take(num_branches) + .collect::>(), + ) + }) + .boxed() +} - segment.extend_from_slice(&segment2); - ( - Just(segment.clone()), - LedgerState::arbitrary_with(LedgerStateOverride { - height_override: segment.last().unwrap().coinbase_height().unwrap() + 1, - previous_block_hash_override: Some(segment.last().unwrap().hash()), - network_upgrade_override: Some(NetworkUpgrade::Nu6), - transaction_version_override: None, - transaction_has_valid_network_upgrade: true, - always_has_coinbase: true, - }), - ) - }) - .prop_flat_map(move |(segment, ledger)| { - ( - Just(segment), - zebra_chain::block::Block::partial_chain_strategy( +type ProptestChainSegment = SummaryDebug>>; + +mod proptest_helpers { + use std::sync::Arc; + + use proptest::prelude::{Arbitrary, BoxedStrategy, Strategy}; + use zebra_chain::{ + block::{ + arbitrary::{allow_all_transparent_coinbase_spends, LedgerStateOverride}, + Block, Height, + }, + fmt::HexDebug, + parameters::{NetworkUpgrade, GENESIS_PREVIOUS_BLOCK_HASH}, + LedgerState, + }; + + use super::ProptestChainSegment; + + pub(super) fn add_segment( + previous_chain: ProptestChainSegment, + start_nu: NetworkUpgrade, + segment_length: usize, + ) -> BoxedStrategy { + LedgerState::arbitrary_with(LedgerStateOverride { + height_override: Some( + previous_chain + .last() + .map(|block| (block.coinbase_height().unwrap() + 1).unwrap()) + .unwrap_or(Height(0)), + ), + previous_block_hash_override: Some( + previous_chain + .last() + .map(|block| block.hash()) + .unwrap_or(GENESIS_PREVIOUS_BLOCK_HASH), + ), + network_upgrade_override: Some(start_nu), + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + }) + .prop_flat_map(move |ledger| { + Block::partial_chain_strategy( ledger, - chain_size, - arbitrary::allow_all_transparent_coinbase_spends, + segment_length, + allow_all_transparent_coinbase_spends, true, - ), - ) - }) - .prop_flat_map(|(mut segment1, segment2)| { - segment1.extend_from_slice(&segment2); - ( - Just(segment1.clone()), - LedgerState::arbitrary_with(LedgerStateOverride { - height_override: segment1.last().unwrap().coinbase_height().unwrap() + 1, - previous_block_hash_override: Some(segment1.last().unwrap().hash()), - network_upgrade_override: Some(NetworkUpgrade::Nu6), - transaction_version_override: None, - transaction_has_valid_network_upgrade: true, - always_has_coinbase: true, - }), - ) - }) - .prop_flat_map(move |(segment, ledger)| { - ( - Just(segment), - std::iter::repeat_with(|| { - zebra_chain::block::Block::partial_chain_strategy( - ledger.clone(), - chain_size, - arbitrary::allow_all_transparent_coinbase_spends, - true, + ) + }) + .prop_map(move |mut new_segment| { + if start_nu == NetworkUpgrade::Canopy { + // We need to manually set the commitment to ChainHistoryActivationReserved + // as arbitrary block generation doesn'r enforce this + Arc::get_mut( + &mut Arc::get_mut(new_segment.first_mut().unwrap()) + .unwrap() + .header, ) - }) - .take(num_branches) - .collect::>(), - ) - }) - .boxed() + .unwrap() + .commitment_bytes = HexDebug([0; 32]); + } + let mut full_chain = previous_chain.clone(); + full_chain.extend_from_slice(&new_segment); + full_chain + }) + .boxed() + } } From 71d0c27ff309e2737fcc3068d7fa83e1ff1c2a1f Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Tue, 9 Dec 2025 16:08:02 -0400 Subject: [PATCH 010/114] get compiling after zebra_3 update --- .../chain_index/tests/proptest_blockgen.rs | 56 ++++++++----------- 1 file changed, 24 insertions(+), 32 deletions(-) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 694b77710..2608d0e56 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -9,7 +9,6 @@ use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageC use zebra_chain::{ block::arbitrary::{self, LedgerStateOverride}, fmt::SummaryDebug, - parameters::NetworkUpgrade, LedgerState, }; use zebra_state::{FromDisk, HashOrHeight, IntoDisk as _}; @@ -27,8 +26,9 @@ use crate::{ #[test] fn make_chain() { init_tracing(); + let network = Network::Regtest(ActivationHeights::default()); // default is 256. As each case takes multiple seconds, this seems too many. - proptest::proptest!(proptest::test_runner::Config::with_cases(32), |(segments in make_branching_chain(2, 12))| { + proptest::proptest!(proptest::test_runner::Config::with_cases(32), |(segments in make_branching_chain(2, 12, network))| { let runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(2).enable_time().build().unwrap(); runtime.block_on(async { let (genesis_segment, branching_segments) = segments; @@ -48,10 +48,8 @@ fn make_chain() { ..Default::default() }, db_version: 1, - network: Network::Regtest(ActivationHeights::default()), + network , - no_sync: false, - no_db: false, }; let indexer = NodeBackedChainIndex::new(mockchain.clone(), config) @@ -189,9 +187,9 @@ impl BlockchainSource for ProptestMockchain { .fold((None, None), |(mut sapling, mut orchard), block| { for transaction in &block.transactions { for sap_commitment in transaction.sapling_note_commitments() { - let sap_commitment = zebra_chain::sapling::tree::Node::from_bytes( - sap_commitment.to_bytes(), - ); + let sap_commitment = + sapling_crypto::Node::from_bytes(sap_commitment.to_bytes()) + .unwrap(); sapling = Some(sapling.unwrap_or_else(|| { incrementalmerkletree::frontier::Frontier::<_, 32>::empty() @@ -221,7 +219,7 @@ impl BlockchainSource for ProptestMockchain { Ok(( sapling.map(|sap_front| { ( - zebra_chain::sapling::tree::Root::from_bytes(sap_front.root().as_ref()), + zebra_chain::sapling::tree::Root::from_bytes(sap_front.root().to_bytes()), sap_front.tree_size(), ) }), @@ -237,7 +235,7 @@ impl BlockchainSource for ProptestMockchain { /// Returns the sapling and orchard treestate by hash async fn get_treestate( &self, - id: BlockHash, + _id: BlockHash, ) -> BlockchainSourceResult<(Option>, Option>)> { // I don't think this is used for sync? unimplemented!() @@ -299,23 +297,29 @@ impl BlockchainSource for ProptestMockchain { fn make_branching_chain( num_branches: usize, chain_size: usize, + network_override: Network, ) -> BoxedStrategy<( SummaryDebug>>, Vec>>>, )> { - add_segment(SummaryDebug(Vec::new()), NetworkUpgrade::Genesis, 1) - .prop_flat_map(|segment| add_segment(segment, NetworkUpgrade::Canopy, 1)) - .prop_flat_map(move |segment| add_segment(segment, NetworkUpgrade::Nu6, chain_size)) - .prop_flat_map(|segment| { + let network_override = Some(network_override.to_zebra_network()); + // these feel like they shouldn't be needed. The closure lifetimes are fighting me + let n_o_clone = network_override.clone(); + let n_o_clone_2 = network_override.clone(); + add_segment(SummaryDebug(Vec::new()), network_override.clone(), 1) + .prop_flat_map(move |segment| add_segment(segment, n_o_clone.clone(), 1)) + .prop_flat_map(move |segment| add_segment(segment, n_o_clone_2.clone(), chain_size)) + .prop_flat_map(move |segment| { ( Just(segment.clone()), LedgerState::arbitrary_with(LedgerStateOverride { height_override: segment.last().unwrap().coinbase_height().unwrap() + 1, previous_block_hash_override: Some(segment.last().unwrap().hash()), - network_upgrade_override: Some(NetworkUpgrade::Nu6), + network_upgrade_override: None, transaction_version_override: None, transaction_has_valid_network_upgrade: true, always_has_coinbase: true, + network_override: network_override.clone(), }), ) }) @@ -340,7 +344,6 @@ fn make_branching_chain( type ProptestChainSegment = SummaryDebug>>; mod proptest_helpers { - use std::sync::Arc; use proptest::prelude::{Arbitrary, BoxedStrategy, Strategy}; use zebra_chain::{ @@ -348,8 +351,7 @@ mod proptest_helpers { arbitrary::{allow_all_transparent_coinbase_spends, LedgerStateOverride}, Block, Height, }, - fmt::HexDebug, - parameters::{NetworkUpgrade, GENESIS_PREVIOUS_BLOCK_HASH}, + parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, LedgerState, }; @@ -357,7 +359,7 @@ mod proptest_helpers { pub(super) fn add_segment( previous_chain: ProptestChainSegment, - start_nu: NetworkUpgrade, + network_override: Option, segment_length: usize, ) -> BoxedStrategy { LedgerState::arbitrary_with(LedgerStateOverride { @@ -373,10 +375,11 @@ mod proptest_helpers { .map(|block| block.hash()) .unwrap_or(GENESIS_PREVIOUS_BLOCK_HASH), ), - network_upgrade_override: Some(start_nu), + network_upgrade_override: None, transaction_version_override: None, transaction_has_valid_network_upgrade: true, always_has_coinbase: true, + network_override, }) .prop_flat_map(move |ledger| { Block::partial_chain_strategy( @@ -386,18 +389,7 @@ mod proptest_helpers { true, ) }) - .prop_map(move |mut new_segment| { - if start_nu == NetworkUpgrade::Canopy { - // We need to manually set the commitment to ChainHistoryActivationReserved - // as arbitrary block generation doesn'r enforce this - Arc::get_mut( - &mut Arc::get_mut(new_segment.first_mut().unwrap()) - .unwrap() - .header, - ) - .unwrap() - .commitment_bytes = HexDebug([0; 32]); - } + .prop_map(move |new_segment| { let mut full_chain = previous_chain.clone(); full_chain.extend_from_slice(&new_segment); full_chain From 45c9b0f4b4026fcb240e395ab88c9fc4467034c6 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Wed, 10 Dec 2025 13:32:57 -0400 Subject: [PATCH 011/114] bit more info --- zaino-state/src/chain_index/tests/proptest_blockgen.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 2608d0e56..aac41e9df 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -28,7 +28,8 @@ fn make_chain() { init_tracing(); let network = Network::Regtest(ActivationHeights::default()); // default is 256. As each case takes multiple seconds, this seems too many. - proptest::proptest!(proptest::test_runner::Config::with_cases(32), |(segments in make_branching_chain(2, 12, network))| { + // TODO: this should be higher than 1. Currently set to 1 for ease of iteration + proptest::proptest!(proptest::test_runner::Config::with_cases(1), |(segments in make_branching_chain(2, 12, network))| { let runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(2).enable_time().build().unwrap(); runtime.block_on(async { let (genesis_segment, branching_segments) = segments; @@ -59,6 +60,8 @@ fn make_chain() { let index_reader = indexer.subscriber().await; let snapshot = index_reader.snapshot_nonfinalized_state(); dbg!(snapshot.best_chaintip()); + dbg!(snapshot.blocks.len()); + dbg!(snapshot.heights_to_hashes.len()); }); }); } From 5b3a5900fcfbd06bdef2bb8ccf4b8fbb2233fe03 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Mon, 15 Dec 2025 12:32:11 -0400 Subject: [PATCH 012/114] fix after cherry-pick --- Cargo.lock | 1200 ++++++++++++++++++++++------------------------------ Cargo.toml | 2 +- 2 files changed, 516 insertions(+), 686 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 556cac1c1..3ae2e86e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -274,7 +274,7 @@ dependencies = [ "asn1-rs-impl", "displaydoc", "nom", - "num-traits 0.2.19", + "num-traits", "rusticata-macros", "thiserror 2.0.17", ] @@ -313,7 +313,6 @@ dependencies = [ "futures-core", "futures-io", "pin-project-lite", - "tokio", ] [[package]] @@ -529,7 +528,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -801,6 +800,15 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "bounded-vec" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09dc0086e469182132244e9b8d313a0742e1132da43a08c24b9dd3c18e0faf3a" +dependencies = [ + "thiserror 2.0.17", +] + [[package]] name = "bounded-vec-deque" version = "0.1.1" @@ -988,10 +996,10 @@ checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ "iana-time-zone", "js-sys", - "num-traits 0.2.19", + "num-traits", "serde", "wasm-bindgen", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -1214,16 +1222,6 @@ dependencies = [ "url", ] -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "core-foundation" version = "0.10.1" @@ -1504,7 +1502,7 @@ dependencies = [ "cookie-factory", "displaydoc", "nom", - "num-traits 0.2.19", + "num-traits", "rusticata-macros", ] @@ -1868,15 +1866,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if", -] - [[package]] name = "enum-ordinalize" version = "3.1.15" @@ -1884,21 +1873,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" dependencies = [ "num-bigint", - "num-traits 0.2.19", + "num-traits", "proc-macro2", "quote", "syn 2.0.106", ] -[[package]] -name = "enum_primitive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4551092f4d519593039259a9ed8daedf0da12e5109c5280338073eaeb81180" -dependencies = [ - "num-traits 0.1.43", -] - [[package]] name = "env_home" version = "0.1.0" @@ -1929,7 +1909,7 @@ dependencies = [ [[package]] name = "equihash" version = "0.2.2" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" dependencies = [ "blake2b_simd", "core2 0.3.3", @@ -1984,7 +1964,7 @@ dependencies = [ [[package]] name = "f4jumble" version = "0.1.1" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" dependencies = [ "blake2b_simd", ] @@ -2104,21 +2084,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.2.2" @@ -2139,7 +2104,7 @@ dependencies = [ "libm", "num-bigint", "num-integer", - "num-traits 0.2.19", + "num-traits", ] [[package]] @@ -2690,22 +2655,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" version = "0.1.17" @@ -2725,11 +2674,9 @@ dependencies = [ "percent-encoding", "pin-project-lite", "socket2 0.6.1", - "system-configuration", "tokio", "tower-service", "tracing", - "windows-registry", ] [[package]] @@ -2989,7 +2936,7 @@ dependencies = [ "anyhow", "core2 0.4.0", "futures", - "prost", + "prost 0.13.5", "serde_json", "tempfile", "tokio", @@ -2999,9 +2946,12 @@ dependencies = [ "zaino-proto", "zaino-state", "zaino-testutils", - "zebra-chain", + "zainod", + "zcash_local_net 0.1.0 (git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4)", + "zebra-chain 3.1.0", "zebra-rpc", "zebra-state", + "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4)", "zip32", ] @@ -3264,7 +3214,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -3341,6 +3291,19 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "libzcash_script" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f8ce05b56f3cbc65ec7d0908adb308ed91281e022f61c8c3a0c9388b5380b17" +dependencies = [ + "bindgen 0.72.1", + "cc", + "thiserror 2.0.17", + "tracing", + "zcash_script", +] + [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -3593,23 +3556,6 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" -[[package]] -name = "native-tls" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework 2.11.1", - "security-framework-sys", - "tempfile", -] - [[package]] name = "nix" version = "0.29.0" @@ -3677,7 +3623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", - "num-traits 0.2.19", + "num-traits", ] [[package]] @@ -3691,7 +3637,7 @@ dependencies = [ "libm", "num-integer", "num-iter", - "num-traits 0.2.19", + "num-traits", "rand 0.8.5", "smallvec", "zeroize", @@ -3709,7 +3655,7 @@ version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "num-traits 0.2.19", + "num-traits", ] [[package]] @@ -3720,16 +3666,7 @@ checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.19", -] - -[[package]] -name = "num-traits" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" -dependencies = [ - "num-traits 0.2.19", + "num-traits", ] [[package]] @@ -3810,50 +3747,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "openssl" -version = "0.10.74" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24ad14dd45412269e1a30f52ad8f0664f0f4f4a89ee8fe28c3b3527021ebb654" -dependencies = [ - "bitflags 2.9.4", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", -] - [[package]] name = "openssl-probe" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" -[[package]] -name = "openssl-sys" -version = "0.9.110" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a9f0075ba3c21b09f8e8b2026584b1d18d49388648f2fbbf3c97ea8deced8e2" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "option-ext" version = "0.2.0" @@ -3901,7 +3800,7 @@ version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" dependencies = [ - "num-traits 0.2.19", + "num-traits", ] [[package]] @@ -4030,7 +3929,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -4110,7 +4009,7 @@ dependencies = [ [[package]] name = "pepper-sync" version = "0.0.1" -source = "git+https://github.com/zingolabs/zingolib.git?rev=f88e1d76ea244d6cc48d7fd4c3a609c6598318dc#f88e1d76ea244d6cc48d7fd4c3a609c6598318dc" +source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" dependencies = [ "bip32", "byteorder", @@ -4124,22 +4023,23 @@ dependencies = [ "rayon", "sapling-crypto", "shardtree", + "simple-mermaid", "subtle", "thiserror 2.0.17", "tokio", "tonic 0.13.1", "tracing", - "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_keys 0.10.0", + "zcash_address 0.10.0", + "zcash_client_backend", + "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_keys 0.12.0", "zcash_note_encryption", - "zcash_primitives 0.24.0", - "zcash_protocol 0.6.1", - "zcash_transparent 0.4.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_primitives 0.26.0", + "zcash_protocol 0.7.0", + "zcash_transparent 0.6.0", "zingo-memo", "zingo-status", - "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/infrastructure.git?rev=cc2af1ac80cb6b7a7f0a0b8f331f7b0873d667bf)", + "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", "zip32", ] @@ -4440,7 +4340,7 @@ dependencies = [ "bit-vec", "bitflags 2.9.4", "lazy_static", - "num-traits 0.2.19", + "num-traits", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", @@ -4468,7 +4368,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.13.5", +] + +[[package]] +name = "prost" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +dependencies = [ + "bytes", + "prost-derive 0.14.1", ] [[package]] @@ -4484,8 +4394,30 @@ dependencies = [ "once_cell", "petgraph", "prettyplease", - "prost", - "prost-types", + "prost 0.13.5", + "prost-types 0.13.5", + "regex", + "syn 2.0.106", + "tempfile", +] + +[[package]] +name = "prost-build" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" +dependencies = [ + "heck", + "itertools 0.14.0", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost 0.14.1", + "prost-types 0.14.1", + "pulldown-cmark", + "pulldown-cmark-to-cmark", "regex", "syn 2.0.106", "tempfile", @@ -4504,13 +4436,35 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "prost-derive" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "prost-types" version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" dependencies = [ - "prost", + "prost 0.13.5", +] + +[[package]] +name = "prost-types" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" +dependencies = [ + "prost 0.14.1", ] [[package]] @@ -4549,6 +4503,26 @@ dependencies = [ "psl-types", ] +[[package]] +name = "pulldown-cmark" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" +dependencies = [ + "bitflags 2.9.4", + "memchr", + "unicase", +] + +[[package]] +name = "pulldown-cmark-to-cmark" +version = "21.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8246feae3db61428fd0bb94285c690b460e4517d83152377543ca802357785f1" +dependencies = [ + "pulldown-cmark", +] + [[package]] name = "pwd-grp" version = "1.0.0" @@ -4910,26 +4884,19 @@ version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ - "async-compression", "base64", "bytes", "cookie", "cookie_store", - "encoding_rs", "futures-core", - "futures-util", - "h2", "http", "http-body", "http-body-util", "hyper", "hyper-rustls", - "hyper-tls", "hyper-util", "js-sys", "log", - "mime", - "native-tls", "percent-encoding", "pin-project-lite", "quinn", @@ -4940,9 +4907,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-native-tls", "tokio-rustls", - "tokio-util", "tower 0.5.2", "tower-http", "tower-service", @@ -5080,7 +5045,7 @@ dependencies = [ "digest 0.10.7", "num-bigint-dig", "num-integer", - "num-traits 0.2.19", + "num-traits", "pkcs1", "pkcs8", "rand_core 0.6.4", @@ -5149,7 +5114,7 @@ dependencies = [ "arrayvec", "borsh", "bytes", - "num-traits 0.2.19", + "num-traits", "rand 0.8.5", "rkyv", "serde", @@ -5261,7 +5226,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.5.1", + "security-framework", ] [[package]] @@ -5490,19 +5455,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags 2.9.4", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - [[package]] name = "security-framework" version = "3.5.1" @@ -5510,7 +5462,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ "bitflags 2.9.4", - "core-foundation 0.10.1", + "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", @@ -5674,17 +5626,6 @@ dependencies = [ "unsafe-libyaml", ] -[[package]] -name = "sha-1" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.7", -] - [[package]] name = "sha1" version = "0.10.6" @@ -5803,6 +5744,12 @@ version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" +[[package]] +name = "simple-mermaid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589144a964b4b30fe3a83b4bb1a09e2475aac194ec832a046a23e75bddf9eb29" + [[package]] name = "sinsemilla" version = "0.1.0" @@ -6091,27 +6038,6 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "system-configuration" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" -dependencies = [ - "bitflags 2.9.4", - "core-foundation 0.9.4", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "tap" version = "1.0.1" @@ -6275,16 +6201,6 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.4" @@ -6421,7 +6337,8 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "prost", + "prost 0.13.5", + "rustls-native-certs", "rustls-pemfile", "socket2 0.5.10", "tokio", @@ -6452,7 +6369,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "prost", + "prost 0.13.5", "socket2 0.5.10", "tokio", "tokio-rustls", @@ -6464,6 +6381,35 @@ dependencies = [ "webpki-roots 0.26.11", ] +[[package]] +name = "tonic" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" +dependencies = [ + "async-trait", + "axum 0.8.6", + "base64", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "socket2 0.6.1", + "sync_wrapper", + "tokio", + "tokio-stream", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tonic-build" version = "0.12.3" @@ -6472,8 +6418,8 @@ checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", "proc-macro2", - "prost-build", - "prost-types", + "prost-build 0.13.5", + "prost-types 0.13.5", "quote", "syn 2.0.106", ] @@ -6486,36 +6432,76 @@ checksum = "eac6f67be712d12f0b41328db3137e0d0757645d8904b4cb7d51cd9c2279e847" dependencies = [ "prettyplease", "proc-macro2", - "prost-build", - "prost-types", + "prost-build 0.13.5", + "prost-types 0.13.5", "quote", "syn 2.0.106", ] [[package]] -name = "tonic-reflection" -version = "0.12.3" +name = "tonic-build" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "878d81f52e7fcfd80026b7fdb6a9b578b3c3653ba987f87f0dce4b64043cba27" +checksum = "4c40aaccc9f9eccf2cd82ebc111adc13030d23e887244bc9cfa5d1d636049de3" dependencies = [ - "prost", - "prost-types", - "tokio", - "tokio-stream", - "tonic 0.12.3", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.106", ] [[package]] -name = "tor-async-utils" -version = "0.28.0" +name = "tonic-prost" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5294c85610f52bcbe36fddde04a3a994c4ec382ceed455cfdc8252be7046008" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" dependencies = [ - "derive-deftly 1.0.1", - "educe", - "futures", - "oneshot-fused-workaround", - "pin-project", + "bytes", + "prost 0.14.1", + "tonic 0.14.2", +] + +[[package]] +name = "tonic-prost-build" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4a16cba4043dc3ff43fcb3f96b4c5c154c64cbd18ca8dce2ab2c6a451d058a2" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build 0.14.1", + "prost-types 0.14.1", + "quote", + "syn 2.0.106", + "tempfile", + "tonic-build 0.14.2", +] + +[[package]] +name = "tonic-reflection" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34da53e8387581d66db16ff01f98a70b426b091fdf76856e289d5c1bd386ed7b" +dependencies = [ + "prost 0.14.1", + "prost-types 0.14.1", + "tokio", + "tokio-stream", + "tonic 0.14.2", + "tonic-prost", +] + +[[package]] +name = "tor-async-utils" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5294c85610f52bcbe36fddde04a3a994c4ec382ceed455cfdc8252be7046008" +dependencies = [ + "derive-deftly 1.0.1", + "educe", + "futures", + "oneshot-fused-workaround", + "pin-project", "postage", "thiserror 2.0.17", "void", @@ -7398,9 +7384,9 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6823ca72ad0d8ebf40ddfe11c104f0ccb242befb7fd3bc20c33b6798a31eba" +checksum = "6e6cf52578f98b4da47335c26c4f883f7993b1a9b9d2f5420eb8dbfd5dd19a28" dependencies = [ "futures", "futures-core", @@ -7636,6 +7622,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + [[package]] name = "unicode-ident" version = "1.0.19" @@ -8090,9 +8082,9 @@ checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-link 0.2.1", - "windows-result 0.4.1", - "windows-strings 0.5.1", + "windows-link", + "windows-result", + "windows-strings", ] [[package]] @@ -8117,54 +8109,19 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" -[[package]] -name = "windows-registry" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" -dependencies = [ - "windows-link 0.1.3", - "windows-result 0.3.4", - "windows-strings 0.4.2", -] - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link 0.1.3", -] - [[package]] name = "windows-result" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-link 0.2.1", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link 0.1.3", + "windows-link", ] [[package]] @@ -8173,7 +8130,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -8209,7 +8166,7 @@ version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -8234,7 +8191,7 @@ version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows-link 0.2.1", + "windows-link", "windows_aarch64_gnullvm 0.53.1", "windows_aarch64_msvc 0.53.1", "windows_i686_gnu 0.53.1", @@ -8441,7 +8398,7 @@ version = "0.1.2" dependencies = [ "serde", "thiserror 1.0.69", - "zebra-chain", + "zebra-chain 3.1.0", ] [[package]] @@ -8455,7 +8412,7 @@ dependencies = [ "http", "indexmap 2.11.4", "jsonrpsee-types", - "prost", + "prost 0.13.5", "reqwest", "serde", "serde_json", @@ -8467,7 +8424,7 @@ dependencies = [ "url", "zaino-proto", "zaino-testvectors", - "zebra-chain", + "zebra-chain 3.1.0", "zebra-rpc", ] @@ -8475,7 +8432,7 @@ dependencies = [ name = "zaino-proto" version = "0.1.2" dependencies = [ - "prost", + "prost 0.13.5", "tonic 0.12.3", "tonic-build 0.12.3", "which 4.4.2", @@ -8487,6 +8444,7 @@ version = "0.1.2" dependencies = [ "futures", "jsonrpsee", + "serde", "thiserror 1.0.69", "tokio", "tonic 0.12.3", @@ -8496,7 +8454,7 @@ dependencies = [ "zaino-fetch", "zaino-proto", "zaino-state", - "zebra-chain", + "zebra-chain 3.1.0", "zebra-rpc", ] @@ -8524,8 +8482,9 @@ dependencies = [ "once_cell", "primitive-types 0.13.1", "proptest", - "prost", + "prost 0.13.5", "reqwest", + "sapling-crypto", "serde", "serde_json", "sha2 0.10.9", @@ -8541,13 +8500,12 @@ dependencies = [ "zaino-common", "zaino-fetch", "zaino-proto", - "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_address 0.9.0", "zcash_keys 0.10.1", - "zcash_local_net", - "zcash_primitives 0.24.1", + "zcash_primitives 0.26.0", "zcash_protocol 0.6.2", - "zcash_transparent 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zebra-chain", + "zcash_transparent 0.4.0", + "zebra-chain 3.1.0", "zebra-rpc", "zebra-state", ] @@ -8563,20 +8521,25 @@ dependencies = [ "proptest", "tempfile", "tokio", - "tonic 0.12.3", + "tonic 0.13.1", + "tracing", "tracing-subscriber", "zaino-common", + "zaino-proto", + "zaino-serve", "zaino-state", "zaino-testvectors", "zainod", - "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_local_net", - "zcash_protocol 0.6.1", - "zebra-chain", + "zcash_client_backend", + "zcash_local_net 0.1.0 (git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4)", + "zcash_protocol 0.7.0", + "zebra-chain 3.1.0", + "zebra-state", "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?tag=zingo_common_components_v0.1.0)", - "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?tag=zingo_common_components_v0.1.0)", - "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?tag=zcash_local_net_v0.1.0)", + "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?rev=b64dfd6d6a2a597a5456d1cc7b2bc9b649328187)", + "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4)", "zingolib", + "zingolib_testutils", "zip32", ] @@ -8604,24 +8567,10 @@ dependencies = [ "zaino-fetch", "zaino-serve", "zaino-state", - "zebra-chain", + "zebra-chain 3.1.0", "zebra-state", ] -[[package]] -name = "zcash_address" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c020e943fc2df6303d22b2bcbb3c0fd25f9d2419cbec508d13e66dcd77e354a6" -dependencies = [ - "bech32", - "bs58", - "core2 0.3.3", - "f4jumble 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.5.4", -] - [[package]] name = "zcash_address" version = "0.9.0" @@ -8638,83 +8587,21 @@ dependencies = [ [[package]] name = "zcash_address" -version = "0.9.0" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +version = "0.10.0" +source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" dependencies = [ "bech32", "bs58", "core2 0.3.3", - "f4jumble 0.1.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_protocol 0.6.1", + "f4jumble 0.1.1 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", + "zcash_protocol 0.7.0", ] [[package]] name = "zcash_client_backend" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed6bdec743667d0c3b69fa46aaebcce28a3897ab7317165cd1b719c59a732a0" -dependencies = [ - "arti-client", - "base64", - "bech32", - "bip32", - "bls12_381", - "bs58", - "crossbeam-channel", - "document-features", - "dynosaur", - "fs-mistrust", - "futures-util", - "group", - "hex", - "http-body-util", - "hyper", - "hyper-util", - "incrementalmerkletree", - "memuse", - "nonempty", - "orchard", - "pasta_curves", - "percent-encoding", - "prost", - "rand 0.8.5", - "rand_core 0.6.4", - "rayon", - "rust_decimal", - "sapling-crypto", - "secrecy", - "serde", - "serde_json", - "shardtree", - "subtle", - "time", - "time-core", - "tokio", - "tokio-rustls", - "tonic 0.13.1", - "tonic-build 0.13.1", - "tor-rtcompat", - "tower 0.5.2", - "tracing", - "trait-variant", - "webpki-roots 0.26.11", - "which 7.0.3", - "zcash_address 0.8.0", - "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_keys 0.9.0", - "zcash_note_encryption", - "zcash_primitives 0.23.1", - "zcash_protocol 0.5.4", - "zcash_transparent 0.3.0", - "zip32", - "zip321 0.4.0", -] - -[[package]] -name = "zcash_client_backend" -version = "0.19.1" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +version = "0.20.0" +source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" dependencies = [ "arti-client", "base64", @@ -8740,12 +8627,13 @@ dependencies = [ "orchard", "pasta_curves", "percent-encoding", - "prost", + "prost 0.13.5", "rand 0.8.5", "rand_core 0.6.4", "rayon", "rust_decimal", "sapling-crypto", + "secp256k1", "secrecy", "serde", "serde_json", @@ -8763,15 +8651,16 @@ dependencies = [ "trait-variant", "webpki-roots 0.26.11", "which 7.0.3", - "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_keys 0.10.0", + "zcash_address 0.10.0", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", + "zcash_keys 0.12.0", "zcash_note_encryption", - "zcash_primitives 0.24.0", - "zcash_protocol 0.6.1", - "zcash_transparent 0.4.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_primitives 0.26.0", + "zcash_protocol 0.7.0", + "zcash_script", + "zcash_transparent 0.6.0", "zip32", - "zip321 0.5.0", + "zip321", ] [[package]] @@ -8787,7 +8676,7 @@ dependencies = [ [[package]] name = "zcash_encoding" version = "0.3.0" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" dependencies = [ "core2 0.3.3", "nonempty", @@ -8806,12 +8695,11 @@ dependencies = [ [[package]] name = "zcash_keys" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20664d96a0e4de98f41b6b7a3b40a527e5f5428ca7f34758a084e60778d3b824" +checksum = "c6c8d3d5a08a66f76264c72172e692ec362218b091181cda30c04d00a4561cd8" dependencies = [ "bech32", - "bip32", "blake2b_simd", "bls12_381", "bs58", @@ -8820,23 +8708,21 @@ dependencies = [ "group", "memuse", "nonempty", - "orchard", "rand_core 0.6.4", - "sapling-crypto", "secrecy", "subtle", "tracing", - "zcash_address 0.8.0", + "zcash_address 0.9.0", "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.5.4", - "zcash_transparent 0.3.0", + "zcash_protocol 0.6.2", + "zcash_transparent 0.4.0", "zip32", ] [[package]] name = "zcash_keys" -version = "0.10.0" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +version = "0.12.0" +source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" dependencies = [ "bech32", "bip32", @@ -8855,43 +8741,40 @@ dependencies = [ "secrecy", "subtle", "tracing", - "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_protocol 0.6.1", - "zcash_transparent 0.4.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_address 0.10.0", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", + "zcash_protocol 0.7.0", + "zcash_transparent 0.6.0", "zip32", ] [[package]] -name = "zcash_keys" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c8d3d5a08a66f76264c72172e692ec362218b091181cda30c04d00a4561cd8" +name = "zcash_local_net" +version = "0.1.0" +source = "git+https://github.com/zingolabs/infrastructure.git?branch=dev#c0897685d653faf484ee5ade2694b323a2dc9866" dependencies = [ - "bech32", - "blake2b_simd", - "bls12_381", - "bs58", - "core2 0.3.3", - "document-features", - "group", - "memuse", - "nonempty", - "rand_core 0.6.4", - "secrecy", - "subtle", + "getset", + "hex", + "http", + "json", + "portpicker", + "serde_json", + "tempfile", + "thiserror 1.0.69", + "tokio", "tracing", - "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.6.2", - "zcash_transparent 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zip32", + "zcash_protocol 0.7.0", + "zebra-chain 3.1.0", + "zebra-node-services", + "zebra-rpc", + "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", + "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?branch=dev)", ] [[package]] name = "zcash_local_net" version = "0.1.0" -source = "git+https://github.com/zingolabs/infrastructure.git?tag=zcash_local_net_v0.1.0#9f479fe8610ac15bb67f06d9e65ee37bcfebd228" +source = "git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4#20f3c206321825952fdf2d9d84947f5c9d2a1cf4" dependencies = [ "getset", "hex", @@ -8904,11 +8787,12 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "zcash_protocol 0.6.1", - "zebra-chain", + "zcash_protocol 0.7.0", + "zebra-chain 3.1.0", "zebra-node-services", "zebra-rpc", - "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?rev=23814ee904ee64913585c0b8f871c6dbd94504c6)", + "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", + "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4)", ] [[package]] @@ -8926,9 +8810,9 @@ dependencies = [ [[package]] name = "zcash_primitives" -version = "0.23.1" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff99bb0d3aa7558bbaf51a3d4c072733afe364b45df9b3cc31118dcb644a51" +checksum = "76362b79e432bde2f22b3defcb6919d4fb50446985997169da3cc3ae4035a6d9" dependencies = [ "bip32", "blake2b_simd", @@ -8957,61 +8841,19 @@ dependencies = [ "sha2 0.10.9", "subtle", "tracing", - "zcash_address 0.8.0", + "zcash_address 0.9.0", "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "zcash_note_encryption", - "zcash_protocol 0.5.4", - "zcash_spec", - "zcash_transparent 0.3.0", - "zip32", -] - -[[package]] -name = "zcash_primitives" -version = "0.24.0" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" -dependencies = [ - "bip32", - "blake2b_simd", - "block-buffer 0.11.0-rc.3", - "bs58", - "core2 0.3.3", - "crypto-common 0.2.0-rc.1", - "document-features", - "equihash 0.2.2 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "ff", - "fpe", - "getset", - "group", - "hex", - "incrementalmerkletree", - "jubjub", - "memuse", - "nonempty", - "orchard", - "rand 0.8.5", - "rand_core 0.6.4", - "redjubjub", - "ripemd 0.1.3", - "sapling-crypto", - "secp256k1", - "sha2 0.10.9", - "subtle", - "tracing", - "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_note_encryption", - "zcash_protocol 0.6.1", + "zcash_protocol 0.6.2", "zcash_spec", - "zcash_transparent 0.4.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_transparent 0.4.0", "zip32", ] [[package]] name = "zcash_primitives" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76362b79e432bde2f22b3defcb6919d4fb50446985997169da3cc3ae4035a6d9" +version = "0.26.0" +source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" dependencies = [ "bip32", "blake2b_simd", @@ -9020,7 +8862,7 @@ dependencies = [ "core2 0.3.3", "crypto-common 0.2.0-rc.1", "document-features", - "equihash 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "equihash 0.2.2 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", "ff", "fpe", "getset", @@ -9040,42 +8882,21 @@ dependencies = [ "sha2 0.10.9", "subtle", "tracing", - "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_address 0.10.0", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", "zcash_note_encryption", - "zcash_protocol 0.6.2", + "zcash_protocol 0.7.0", + "zcash_script", "zcash_spec", - "zcash_transparent 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_transparent 0.6.0", "zip32", ] [[package]] name = "zcash_proofs" -version = "0.24.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f90d9521161f7308c2fe6bddf771947f1a0fcd01b9e8a3b624c30a5661ad945" -dependencies = [ - "bellman", - "blake2b_simd", - "bls12_381", - "document-features", - "group", - "home", - "jubjub", - "known-folders", - "lazy_static", - "rand_core 0.6.4", - "redjubjub", - "sapling-crypto", - "tracing", - "xdg", - "zcash_primitives 0.24.1", -] - -[[package]] -name = "zcash_proofs" -version = "0.24.0" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +checksum = "43a2c13bb673d542608a0e6502ac5494136e7ce4ce97e92dd239489b2523eed9" dependencies = [ "bellman", "blake2b_simd", @@ -9091,26 +8912,16 @@ dependencies = [ "redjubjub", "sapling-crypto", "tracing", + "wagyu-zcash-parameters", "xdg", - "zcash_primitives 0.24.0", + "zcash_primitives 0.26.0", ] [[package]] name = "zcash_protocol" -version = "0.5.4" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42344f5735237d6e0eedd3680f1c92f64e9c4144045d7b5c82f4867c2cbc0a02" -dependencies = [ - "core2 0.3.3", - "document-features", - "hex", - "memuse", -] - -[[package]] -name = "zcash_protocol" -version = "0.6.1" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +checksum = "12cc76dd1f77be473e5829dbd34890bcd36d08b1e8dde2da0aea355c812a8f28" dependencies = [ "core2 0.3.3", "document-features", @@ -9120,9 +8931,8 @@ dependencies = [ [[package]] name = "zcash_protocol" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc76dd1f77be473e5829dbd34890bcd36d08b1e8dde2da0aea355c812a8f28" +version = "0.7.0" +source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" dependencies = [ "core2 0.3.3", "document-features", @@ -9132,20 +8942,17 @@ dependencies = [ [[package]] name = "zcash_script" -version = "0.3.2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf6e76f310bb2d3cc233086a97c1710ba1de7ffbbf8198b8113407d0f427dfc" +checksum = "9bed6cf5b2b4361105d4ea06b2752f0c8af4641756c7fbc9858a80af186c234f" dependencies = [ - "bindgen 0.72.1", "bitflags 2.9.4", - "cc", - "enum_primitive", + "bounded-vec", "ripemd 0.1.3", "secp256k1", - "sha-1", + "sha1", "sha2 0.10.9", "thiserror 2.0.17", - "tracing", ] [[package]] @@ -9159,32 +8966,32 @@ dependencies = [ [[package]] name = "zcash_transparent" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b1302cf726e88326c2c6e9bbd2634064bb344df7740e0b6bacf2245abd1eebe" +checksum = "3a7c162a8aa6f708e842503ed5157032465dadfb1d7f63adf9db2d45213a0b11" dependencies = [ "bip32", "blake2b_simd", "bs58", "core2 0.3.3", + "document-features", "getset", "hex", "ripemd 0.1.3", "secp256k1", "sha2 0.10.9", "subtle", - "zcash_address 0.8.0", + "zcash_address 0.9.0", "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.5.4", + "zcash_protocol 0.6.2", "zcash_spec", "zip32", ] [[package]] name = "zcash_transparent" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a7c162a8aa6f708e842503ed5157032465dadfb1d7f63adf9db2d45213a0b11" +version = "0.6.0" +source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" dependencies = [ "bip32", "blake2b_simd", @@ -9197,41 +9004,76 @@ dependencies = [ "secp256k1", "sha2 0.10.9", "subtle", - "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.6.2", + "zcash_address 0.10.0", + "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=3ba772c9b8)", + "zcash_protocol 0.7.0", + "zcash_script", "zcash_spec", "zip32", ] [[package]] -name = "zcash_transparent" -version = "0.4.0" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +name = "zebra-chain" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17a86ec712da2f25d3edc7e5cf0b1d15ef41ab35305e253f0f7cd9cecc0f1939" dependencies = [ - "bip32", + "bech32", + "bitflags 2.9.4", + "bitflags-serde-legacy", + "bitvec", "blake2b_simd", + "blake2s_simd", "bs58", - "core2 0.3.3", - "document-features", - "getset", + "byteorder", + "chrono", + "dirs", + "ed25519-zebra", + "equihash 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "futures", + "group", + "halo2_proofs", "hex", + "humantime", + "incrementalmerkletree", + "itertools 0.14.0", + "jubjub", + "lazy_static", + "num-integer", + "orchard", + "primitive-types 0.12.2", + "rand_core 0.6.4", + "rayon", + "reddsa", + "redjubjub", "ripemd 0.1.3", + "sapling-crypto", "secp256k1", + "serde", + "serde-big-array", + "serde_with", "sha2 0.10.9", - "subtle", - "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_protocol 0.6.1", - "zcash_spec", - "zip32", + "sinsemilla", + "static_assertions", + "tempfile", + "thiserror 2.0.17", + "tracing", + "uint 0.10.0", + "x25519-dalek", + "zcash_address 0.9.0", + "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_history", + "zcash_note_encryption", + "zcash_primitives 0.24.1", + "zcash_protocol 0.6.2", + "zcash_transparent 0.4.0", ] [[package]] name = "zebra-chain" -version = "2.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17a86ec712da2f25d3edc7e5cf0b1d15ef41ab35305e253f0f7cd9cecc0f1939" +checksum = "6b4aa7e85afd7bdf159e8c9a973d32bfc410be42ce82c2396690ae1208933bb8" dependencies = [ "bech32", "bitflags 2.9.4", @@ -9242,6 +9084,7 @@ dependencies = [ "bs58", "byteorder", "chrono", + "derive-getters", "dirs", "ed25519-zebra", "equihash 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -9281,26 +9124,28 @@ dependencies = [ "tracing", "uint 0.10.0", "x25519-dalek", - "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_address 0.10.0", "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "zcash_history", "zcash_note_encryption", - "zcash_primitives 0.24.1", - "zcash_protocol 0.6.2", - "zcash_transparent 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_primitives 0.26.0", + "zcash_protocol 0.7.0", + "zcash_script", + "zcash_transparent 0.6.0", "zebra-test", ] [[package]] name = "zebra-consensus" -version = "2.0.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19a44698a96b007f00da9a2e4c4cdee6c5adfc22996bdeb06ccc781b96d597c0" +checksum = "770efa97a22262ee80290b0ffa251cddb34d7c50351c4e5255ed80f644d035e0" dependencies = [ "bellman", "blake2b_simd", "bls12_381", "chrono", + "derive-getters", "futures", "futures-util", "halo2_proofs", @@ -9321,9 +9166,9 @@ dependencies = [ "tower-fallback", "tracing", "tracing-futures", - "wagyu-zcash-parameters", - "zcash_proofs 0.24.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zebra-chain", + "zcash_proofs", + "zcash_protocol 0.7.0", + "zebra-chain 3.1.0", "zebra-node-services", "zebra-script", "zebra-state", @@ -9331,9 +9176,9 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.1.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0230d1e515518e0ef33ca668f3abb98e364485f384f7ee101506ec64bca7e7d3" +checksum = "c3edeb353c33962fb5f9012745ddb44d33ee90acb8c9410669bf54d72488b8cf" dependencies = [ "bitflags 2.9.4", "byteorder", @@ -9363,14 +9208,14 @@ dependencies = [ "tracing", "tracing-error", "tracing-futures", - "zebra-chain", + "zebra-chain 3.1.0", ] [[package]] name = "zebra-node-services" -version = "1.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c808614a9d245ae8d6d3177c06a8c78c2a8566219b090d6a85dc46f4364eadad" +checksum = "863ddd987ed6373872b20629a471c535ae2ba114a193415227635840ec570ccd" dependencies = [ "color-eyre", "jsonrpsee-types", @@ -9378,14 +9223,15 @@ dependencies = [ "serde", "serde_json", "tokio", - "zebra-chain", + "tower 0.4.13", + "zebra-chain 3.1.0", ] [[package]] name = "zebra-rpc" -version = "2.0.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1ae56eb3c668366a751621f40e1c0569c32ae92467ec1f8bac5b755db308126" +checksum = "c81f52b33b83f5a7f9b0377981843242c2657027053a406f01b06862dddbf747" dependencies = [ "base64", "chrono", @@ -9401,26 +9247,29 @@ dependencies = [ "jsonrpsee-proc-macros", "jsonrpsee-types", "nix", - "prost", + "prost 0.14.1", "rand 0.8.5", + "sapling-crypto", "semver", "serde", "serde_json", "serde_with", "tokio", "tokio-stream", - "tonic 0.12.3", - "tonic-build 0.12.3", + "tonic 0.14.2", + "tonic-prost", + "tonic-prost-build", "tonic-reflection", "tower 0.4.13", "tracing", "which 8.0.0", - "zcash_address 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_keys 0.10.1", - "zcash_primitives 0.24.1", - "zcash_protocol 0.6.2", - "zcash_transparent 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zebra-chain", + "zcash_address 0.10.0", + "zcash_keys 0.12.0", + "zcash_primitives 0.26.0", + "zcash_protocol 0.7.0", + "zcash_script", + "zcash_transparent 0.6.0", + "zebra-chain 3.1.0", "zebra-consensus", "zebra-network", "zebra-node-services", @@ -9430,25 +9279,28 @@ dependencies = [ [[package]] name = "zebra-script" -version = "2.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76a2e972e414caa3635b8c2d21f20c21a71c69f76b37bf7419d97ed0c2277e7" +checksum = "11acd2a4f9077e3698bbc51a3f04c247381f7bf7f77aacc408e5760d3246993b" dependencies = [ + "libzcash_script", "thiserror 2.0.17", - "zcash_primitives 0.24.1", + "zcash_primitives 0.26.0", "zcash_script", - "zebra-chain", + "zebra-chain 3.1.0", ] [[package]] name = "zebra-state" -version = "2.0.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "129b32692f22207719dd1c5ddcbae59b96a322e2329664787f6247acef78c7f3" +checksum = "b47250eaaa047bebde853a54184ab00ab19f47ed451f35b7c1ae8fe17004d87a" dependencies = [ "bincode", "chrono", "crossbeam-channel", + "derive-getters", + "derive-new", "dirs", "futures", "hex", @@ -9464,6 +9316,7 @@ dependencies = [ "regex", "rlimit", "rocksdb", + "sapling-crypto", "semver", "serde", "tempfile", @@ -9471,14 +9324,15 @@ dependencies = [ "tokio", "tower 0.4.13", "tracing", - "zebra-chain", + "zebra-chain 3.1.0", + "zebra-node-services", ] [[package]] name = "zebra-test" -version = "1.0.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97c615cfa095226c8a0db20cb734181b4b63d36241a40492a0fa359ffc6ebc50" +checksum = "6a7e1a3cc12e6a0cea765b16012f3c7dfc5394ba3e0a8fcaf10cd0fa3b57d8c0" dependencies = [ "color-eyre", "futures", @@ -9495,7 +9349,6 @@ dependencies = [ "regex", "spandoc", "thiserror 2.0.17", - "tinyvec", "tokio", "tower 0.4.13", "tracing", @@ -9597,47 +9450,22 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "zingo-infra-services" -version = "0.1.0" -source = "git+https://github.com/zingolabs/infrastructure.git?tag=zingo-infra-services-0.4.0#8986db77041172aa3ed896d1fd37bef5723c8b87" -dependencies = [ - "getset", - "hex", - "http", - "json", - "portpicker", - "reqwest", - "serde_json", - "sha2 0.10.9", - "tempfile", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-subscriber", - "zcash_primitives 0.24.0", - "zcash_protocol 0.6.1", - "zebra-chain", - "zebra-node-services", - "zebra-rpc", -] - [[package]] name = "zingo-memo" version = "0.0.1" -source = "git+https://github.com/zingolabs/zingolib.git?rev=f88e1d76ea244d6cc48d7fd4c3a609c6598318dc#f88e1d76ea244d6cc48d7fd4c3a609c6598318dc" +source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" dependencies = [ - "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_keys 0.10.0", - "zcash_primitives 0.24.0", + "zcash_address 0.10.0", + "zcash_client_backend", + "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_keys 0.12.0", + "zcash_primitives 0.26.0", ] [[package]] name = "zingo-price" version = "0.0.1" -source = "git+https://github.com/zingolabs/zingolib.git?rev=f88e1d76ea244d6cc48d7fd4c3a609c6598318dc#f88e1d76ea244d6cc48d7fd4c3a609c6598318dc" +source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" dependencies = [ "byteorder", "reqwest", @@ -9646,17 +9474,17 @@ dependencies = [ "serde", "serde_json", "thiserror 2.0.17", - "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_client_backend", + "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "zingo-status" version = "0.0.1" -source = "git+https://github.com/zingolabs/zingolib.git?rev=f88e1d76ea244d6cc48d7fd4c3a609c6598318dc#f88e1d76ea244d6cc48d7fd4c3a609c6598318dc" +source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" dependencies = [ "byteorder", - "zcash_primitives 0.24.0", + "zcash_primitives 0.26.0", ] [[package]] @@ -9664,21 +9492,21 @@ name = "zingo_common_components" version = "0.1.0" source = "git+https://github.com/zingolabs/zingo-common.git?tag=zingo_common_components_v0.1.0#7407028a9c561561d174740e70170c4c20529bcd" dependencies = [ - "zebra-chain", + "zebra-chain 2.0.0", ] [[package]] name = "zingo_common_components" version = "0.1.0" -source = "git+https://github.com/zingolabs/zingo-common.git?rev=23814ee904ee64913585c0b8f871c6dbd94504c6#23814ee904ee64913585c0b8f871c6dbd94504c6" +source = "git+https://github.com/zingolabs/zingo-common.git?branch=dev#b64dfd6d6a2a597a5456d1cc7b2bc9b649328187" dependencies = [ - "zebra-chain", + "zebra-chain 3.1.0", ] [[package]] name = "zingo_netutils" version = "0.1.0" -source = "git+https://github.com/zingolabs/zingo-common.git?tag=zingo_common_components_v0.1.0#7407028a9c561561d174740e70170c4c20529bcd" +source = "git+https://github.com/zingolabs/zingo-common.git?branch=dev#b64dfd6d6a2a597a5456d1cc7b2bc9b649328187" dependencies = [ "http", "http-body", @@ -9690,35 +9518,31 @@ dependencies = [ "tonic 0.13.1", "tower 0.5.2", "webpki-roots 0.25.4", - "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", + "zcash_client_backend", ] [[package]] name = "zingo_netutils" version = "0.1.0" -source = "git+https://github.com/zingolabs/infrastructure.git?rev=cc2af1ac80cb6b7a7f0a0b8f331f7b0873d667bf#cc2af1ac80cb6b7a7f0a0b8f331f7b0873d667bf" +source = "git+https://github.com/zingolabs/zingo-common.git?rev=b64dfd6d6a2a597a5456d1cc7b2bc9b649328187#b64dfd6d6a2a597a5456d1cc7b2bc9b649328187" dependencies = [ "http", "http-body", - "http-body-util", "hyper", "hyper-rustls", "hyper-util", - "prost", "thiserror 1.0.69", - "time", - "time-core", "tokio-rustls", "tonic 0.13.1", "tower 0.5.2", "webpki-roots 0.25.4", - "zcash_client_backend 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_client_backend", ] [[package]] name = "zingo_test_vectors" version = "0.0.1" -source = "git+https://github.com/zingolabs/infrastructure.git?tag=zcash_local_net_v0.1.0#9f479fe8610ac15bb67f06d9e65ee37bcfebd228" +source = "git+https://github.com/zingolabs/infrastructure.git?branch=dev#c0897685d653faf484ee5ade2694b323a2dc9866" dependencies = [ "bip0039", ] @@ -9726,7 +9550,7 @@ dependencies = [ [[package]] name = "zingo_test_vectors" version = "0.0.1" -source = "git+https://github.com/zingolabs/infrastructure.git?rev=89e0b665967a0dd1950855ad37ce18d9c5a14709#89e0b665967a0dd1950855ad37ce18d9c5a14709" +source = "git+https://github.com/zingolabs/infrastructure.git?rev=20f3c206321825952fdf2d9d84947f5c9d2a1cf4#20f3c206321825952fdf2d9d84947f5c9d2a1cf4" dependencies = [ "bip0039", ] @@ -9734,7 +9558,7 @@ dependencies = [ [[package]] name = "zingolib" version = "0.0.1" -source = "git+https://github.com/zingolabs/zingolib.git?rev=f88e1d76ea244d6cc48d7fd4c3a609c6598318dc#f88e1d76ea244d6cc48d7fd4c3a609c6598318dc" +source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" dependencies = [ "append-only-vec", "bech32", @@ -9759,7 +9583,7 @@ dependencies = [ "orchard", "pepper-sync", "portpicker", - "prost", + "prost 0.13.5", "rand 0.8.5", "ring 0.17.14", "rust-embed", @@ -9779,21 +9603,40 @@ dependencies = [ "tracing", "tracing-subscriber", "webpki-roots 0.25.4", - "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_client_backend 0.19.1 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_encoding 0.3.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_keys 0.10.0", - "zcash_primitives 0.24.0", - "zcash_proofs 0.24.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_protocol 0.6.1", - "zcash_transparent 0.4.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zebra-chain", - "zingo-infra-services", + "zcash_address 0.10.0", + "zcash_client_backend", + "zcash_encoding 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_keys 0.12.0", + "zcash_primitives 0.26.0", + "zcash_proofs", + "zcash_protocol 0.7.0", + "zcash_transparent 0.6.0", + "zebra-chain 3.1.0", "zingo-memo", "zingo-price", "zingo-status", - "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/infrastructure.git?rev=cc2af1ac80cb6b7a7f0a0b8f331f7b0873d667bf)", - "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?rev=89e0b665967a0dd1950855ad37ce18d9c5a14709)", + "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", + "zingo_netutils 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", + "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?branch=dev)", + "zip32", +] + +[[package]] +name = "zingolib_testutils" +version = "0.1.0" +source = "git+https://github.com/zingolabs/zingolib.git?branch=dev#a94c2e338cbddc76db5903bdc5c0760a0dea3e56" +dependencies = [ + "bip0039", + "http", + "pepper-sync", + "portpicker", + "tempfile", + "zcash_local_net 0.1.0 (git+https://github.com/zingolabs/infrastructure.git?branch=dev)", + "zcash_protocol 0.7.0", + "zebra-chain 3.1.0", + "zingo_common_components 0.1.0 (git+https://github.com/zingolabs/zingo-common.git?branch=dev)", + "zingo_test_vectors 0.0.1 (git+https://github.com/zingolabs/infrastructure.git?branch=dev)", + "zingolib", "zip32", ] @@ -9812,27 +9655,14 @@ dependencies = [ [[package]] name = "zip321" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f92f290c86ae1bdcdc4c41ce67fdabf8cd2a75bc89be8235cd5b1354efae06" -dependencies = [ - "base64", - "nom", - "percent-encoding", - "zcash_address 0.8.0", - "zcash_protocol 0.5.4", -] - -[[package]] -name = "zip321" -version = "0.5.0" -source = "git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b#d387aed7e04e881dbe30c6ff8b26a96c834c094b" +version = "0.6.0" +source = "git+https://github.com/zcash/librustzcash?rev=3ba772c9b8#3ba772c9b85c7d781a37c5a7dd5465727b583b7a" dependencies = [ "base64", "nom", "percent-encoding", - "zcash_address 0.9.0 (git+https://github.com/zcash/librustzcash?rev=d387aed7e04e881dbe30c6ff8b26a96c834c094b)", - "zcash_protocol 0.6.1", + "zcash_address 0.10.0", + "zcash_protocol 0.7.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index db39ae9b0..c2eea5f7b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,7 +117,7 @@ zaino-testvectors = { path = "zaino-testvectors" } zainod = { path = "zainod" } figment = "0.10" nonempty = "0.11.0" -proptest = "~1.2" +proptest = "~1.6" zip32 = "0.2.1" # Patch for vulnerable dependency From e5851524280aae9df19f3902cf2d2618708ffe4b Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Mon, 15 Dec 2025 13:27:46 -0400 Subject: [PATCH 013/114] Unify ChainSegment type --- zaino-state/src/chain_index/tests.rs | 1 + .../src/chain_index/tests/proptest_blockgen.rs | 18 +++++++----------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/zaino-state/src/chain_index/tests.rs b/zaino-state/src/chain_index/tests.rs index 026482906..8cd258023 100644 --- a/zaino-state/src/chain_index/tests.rs +++ b/zaino-state/src/chain_index/tests.rs @@ -2,6 +2,7 @@ pub(crate) mod finalised_state; pub(crate) mod mempool; +mod proptest_blockgen; pub(crate) mod vectors; pub(crate) fn init_tracing() { diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index aac41e9df..ee5a72b4b 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -68,8 +68,8 @@ fn make_chain() { #[derive(Clone)] struct ProptestMockchain { - genesis_segment: SummaryDebug>>, - branching_segments: Vec>>>, + genesis_segment: ChainSegment, + branching_segments: Vec, } impl ProptestMockchain { @@ -297,14 +297,12 @@ impl BlockchainSource for ProptestMockchain { } } +type ChainSegment = SummaryDebug>>; fn make_branching_chain( num_branches: usize, chain_size: usize, network_override: Network, -) -> BoxedStrategy<( - SummaryDebug>>, - Vec>>>, -)> { +) -> BoxedStrategy<(ChainSegment, Vec)> { let network_override = Some(network_override.to_zebra_network()); // these feel like they shouldn't be needed. The closure lifetimes are fighting me let n_o_clone = network_override.clone(); @@ -344,8 +342,6 @@ fn make_branching_chain( .boxed() } -type ProptestChainSegment = SummaryDebug>>; - mod proptest_helpers { use proptest::prelude::{Arbitrary, BoxedStrategy, Strategy}; @@ -358,13 +354,13 @@ mod proptest_helpers { LedgerState, }; - use super::ProptestChainSegment; + use super::ChainSegment; pub(super) fn add_segment( - previous_chain: ProptestChainSegment, + previous_chain: ChainSegment, network_override: Option, segment_length: usize, - ) -> BoxedStrategy { + ) -> BoxedStrategy { LedgerState::arbitrary_with(LedgerStateOverride { height_override: Some( previous_chain From 713b224b7061b7676e49ac3a64768b0788f0e81a Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Mon, 15 Dec 2025 14:09:21 -0400 Subject: [PATCH 014/114] test currently shows with a 50% chance of a reorg on any request for a block by height, zaino does not sync in the current waiting period --- Cargo.lock | 1 + zaino-state/Cargo.toml | 1 + .../chain_index/tests/proptest_blockgen.rs | 36 +++++++++++++++---- 3 files changed, 31 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3ae2e86e5..89cced7b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8483,6 +8483,7 @@ dependencies = [ "primitive-types 0.13.1", "proptest", "prost 0.13.5", + "rand 0.8.5", "reqwest", "sapling-crypto", "serde", diff --git a/zaino-state/Cargo.toml b/zaino-state/Cargo.toml index 8822284ec..a6a360b91 100644 --- a/zaino-state/Cargo.toml +++ b/zaino-state/Cargo.toml @@ -69,6 +69,7 @@ once_cell = { workspace = true } zebra-chain = { workspace = true, features = ["proptest-impl"] } proptest.workspace = true incrementalmerkletree = "*" +rand = "0.8.5" [build-dependencies] whoami = { workspace = true } diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index ee5a72b4b..3d45f3d54 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -4,6 +4,7 @@ use proptest::{ prelude::{Arbitrary as _, BoxedStrategy, Just}, strategy::Strategy, }; +use rand::seq::SliceRandom as _; use tonic::async_trait; use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageConfig}; use zebra_chain::{ @@ -25,11 +26,19 @@ use crate::{ #[test] fn make_chain() { - init_tracing(); + // init_tracing(); let network = Network::Regtest(ActivationHeights::default()); + // The length of the initial segment, and of the branches + // TODO: it would be useful to allow branches of different lengths. + let segment_length = 12; + + // The number of separate branches, after the branching point at the tip + // of the initial segment. + let branch_count = 2; + // default is 256. As each case takes multiple seconds, this seems too many. // TODO: this should be higher than 1. Currently set to 1 for ease of iteration - proptest::proptest!(proptest::test_runner::Config::with_cases(1), |(segments in make_branching_chain(2, 12, network))| { + proptest::proptest!(proptest::test_runner::Config::with_cases(30), |(segments in make_branching_chain(2, segment_length, network))| { let runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(2).enable_time().build().unwrap(); runtime.block_on(async { let (genesis_segment, branching_segments) = segments; @@ -59,9 +68,17 @@ fn make_chain() { tokio::time::sleep(Duration::from_secs(2)).await; let index_reader = indexer.subscriber().await; let snapshot = index_reader.snapshot_nonfinalized_state(); - dbg!(snapshot.best_chaintip()); - dbg!(snapshot.blocks.len()); - dbg!(snapshot.heights_to_hashes.len()); + let best_tip_hash = snapshot.best_chaintip().blockhash; + let best_tip_block = snapshot + .get_chainblock_by_hash(&best_tip_hash) + .unwrap(); + for (hash, block) in &snapshot.blocks { + if hash != &best_tip_hash { + assert!(block.chainwork().to_u256() <= best_tip_block.chainwork().to_u256()); + } + } + assert_eq!(snapshot.heights_to_hashes.len(), segment_length * 2); + assert_eq!(snapshot.heights_to_hashes.len(), segment_length * (branch_count + 1)); }); }); } @@ -157,15 +174,20 @@ impl BlockchainSource for ProptestMockchain { }) .cloned()) } + // This implementation selects a block from a random branch instead + // of the best branch. This is intended to simulate reorgs HashOrHeight::Height(height) => Ok(self .genesis_segment .iter() .find(|block| block.coinbase_height().unwrap() == height) .cloned() .or_else(|| { - self.best_branch() - .into_iter() + self.branching_segments + .choose(&mut rand::thread_rng()) + .unwrap() + .iter() .find(|block| block.coinbase_height().unwrap() == height) + .cloned() })), } } From 2d3fc5e2a46cff5ea2bf7c0e3b11a680eb973f63 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Mon, 15 Dec 2025 15:56:01 -0400 Subject: [PATCH 015/114] zaino attempting to reorg below genesis: investigate --- .../chain_index/tests/proptest_blockgen.rs | 110 ++++++++++++++---- 1 file changed, 89 insertions(+), 21 deletions(-) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 3d45f3d54..8d9faca17 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -1,10 +1,14 @@ -use std::{sync::Arc, time::Duration}; +use std::{ + sync::{Arc, Mutex}, + time::Duration, +}; +use primitive_types::U256; use proptest::{ prelude::{Arbitrary as _, BoxedStrategy, Just}, strategy::Strategy, }; -use rand::seq::SliceRandom as _; +use rand::{seq::SliceRandom, thread_rng}; use tonic::async_trait; use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageConfig}; use zebra_chain::{ @@ -26,7 +30,7 @@ use crate::{ #[test] fn make_chain() { - // init_tracing(); + init_tracing(); let network = Network::Regtest(ActivationHeights::default()); // The length of the initial segment, and of the branches // TODO: it would be useful to allow branches of different lengths. @@ -38,11 +42,12 @@ fn make_chain() { // default is 256. As each case takes multiple seconds, this seems too many. // TODO: this should be higher than 1. Currently set to 1 for ease of iteration - proptest::proptest!(proptest::test_runner::Config::with_cases(30), |(segments in make_branching_chain(2, segment_length, network))| { + proptest::proptest!(proptest::test_runner::Config::with_cases(1), |(segments in make_branching_chain(2, segment_length, network))| { let runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(2).enable_time().build().unwrap(); runtime.block_on(async { let (genesis_segment, branching_segments) = segments; let mockchain = ProptestMockchain { + best_block: Mutex::new(genesis_segment.first().unwrap().hash()), genesis_segment, branching_segments, }; @@ -65,7 +70,7 @@ fn make_chain() { let indexer = NodeBackedChainIndex::new(mockchain.clone(), config) .await .unwrap(); - tokio::time::sleep(Duration::from_secs(2)).await; + tokio::time::sleep(Duration::from_secs(5)).await; let index_reader = indexer.subscriber().await; let snapshot = index_reader.snapshot_nonfinalized_state(); let best_tip_hash = snapshot.best_chaintip().blockhash; @@ -83,10 +88,22 @@ fn make_chain() { }); } -#[derive(Clone)] struct ProptestMockchain { genesis_segment: ChainSegment, branching_segments: Vec, + // Updated each time we simulate a reorg, to keep us + // from reorging indefinitely + best_block: Mutex, +} + +impl Clone for ProptestMockchain { + fn clone(&self) -> Self { + Self { + genesis_segment: self.genesis_segment.clone(), + branching_segments: self.branching_segments.clone(), + best_block: Mutex::new(*self.best_block.lock().unwrap()), + } + } } impl ProptestMockchain { @@ -174,21 +191,72 @@ impl BlockchainSource for ProptestMockchain { }) .cloned()) } - // This implementation selects a block from a random branch instead - // of the best branch. This is intended to simulate reorgs - HashOrHeight::Height(height) => Ok(self - .genesis_segment - .iter() - .find(|block| block.coinbase_height().unwrap() == height) - .cloned() - .or_else(|| { - self.branching_segments - .choose(&mut rand::thread_rng()) - .unwrap() - .iter() - .find(|block| block.coinbase_height().unwrap() == height) - .cloned() - })), + // This implementation: + // a) Picks a random block in the branching section with + // chainwork >= the current 'best_block' + // b) sets that block as the new best block + // c) returns the block at the provided height, on the best_block's chain + HashOrHeight::Height(height) => Ok({ + let chainwork = self + .get_block_and_all_preceeding(|block| { + block.hash() == *self.best_block.lock().unwrap() + }) + .unwrap() + .iter() + .map(|block| { + block + .header + .difficulty_threshold + .to_work() + .unwrap() + .as_u128() + }) + .sum(); + let better_blocks: Vec<_> = self + .branching_segments + .iter() + .flat_map(|segment| segment.iter()) + .filter(|block| { + self.get_block_and_all_preceeding(|b| b.hash() == block.hash()) + .unwrap() + .iter() + .map(|block| { + block + .header + .difficulty_threshold + .to_work() + .unwrap() + .as_u128() + }) + .sum::() + >= chainwork + }) + .collect(); + println!("prev best chainwork: {chainwork}"); + *self.best_block.lock().unwrap() = better_blocks + .choose(&mut rand::thread_rng()) + .unwrap() + .hash(); + + self.genesis_segment + .iter() + .find(|block| block.coinbase_height().unwrap() == height) + .cloned() + .or_else(|| { + self.branching_segments + .iter() + .find(|branch| { + branch + .iter() + .find(|block| block.hash() == *self.best_block.lock().unwrap()) + .is_some() + }) + .unwrap() + .iter() + .find(|block| block.coinbase_height().unwrap() == height) + .cloned() + }) + }), } } From f0861c98d7c5b2538b8cdaf402606ca3ec19629f Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Tue, 16 Dec 2025 14:10:04 -0400 Subject: [PATCH 016/114] sync differently. Main chain path complete-ish, reorg path todoed --- .../src/chain_index/non_finalised_state.rs | 84 ++++++++++--------- .../chain_index/tests/proptest_blockgen.rs | 10 +-- 2 files changed, 48 insertions(+), 46 deletions(-) diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index b5b84eb6e..144618bb0 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -10,7 +10,7 @@ use primitive_types::U256; use std::{collections::HashMap, mem, sync::Arc}; use tokio::sync::mpsc; use tracing::{info, warn}; -use zebra_chain::parameters::Network; +use zebra_chain::{parameters::Network, serialization::BytesInDisplayOrder}; use zebra_state::HashOrHeight; /// Holds the block cache @@ -44,7 +44,7 @@ pub struct BestTip { pub blockhash: BlockHash, } -#[derive(Debug)] +#[derive(Debug, Clone)] /// A snapshot of the nonfinalized state as it existed when this was created. pub struct NonfinalizedBlockCacheSnapshot { /// the set of all known blocks < 100 blocks old @@ -176,6 +176,16 @@ impl NonfinalizedBlockCacheSnapshot { best_tip, }) } + + fn add_block_at_tip(&mut self, block: IndexedBlock) { + self.best_tip = BestTip { + height: self.best_tip.height + 1, + blockhash: *block.hash(), + }; + self.heights_to_hashes + .insert(block.height().expect("block to have height"), *block.hash()); + self.blocks.insert(*block.hash(), block); + } } impl NonFinalizedState { @@ -287,16 +297,11 @@ impl NonFinalizedState { /// sync to the top of the chain, trimming to the finalised tip. pub(super) async fn sync(&self, finalized_db: Arc) -> Result<(), SyncError> { - let initial_state = self.get_snapshot(); + let mut working_snapshot = self.get_snapshot().as_ref().clone(); let mut nonbest_blocks = HashMap::new(); // Fetch main chain blocks and handle reorgs - let new_blocks = self - .fetch_main_chain_blocks(&initial_state, &mut nonbest_blocks) - .await?; - - // Stage and update new blocks - self.stage_new_blocks(new_blocks, &finalized_db).await?; + let new_blocks = self.fetch_main_chain_blocks(&mut working_snapshot).await?; // Handle non-finalized change listener self.handle_nfs_change_listener(&mut nonbest_blocks).await?; @@ -314,11 +319,9 @@ impl NonFinalizedState { /// Fetch main chain blocks and handle reorgs async fn fetch_main_chain_blocks( &self, - initial_state: &NonfinalizedBlockCacheSnapshot, - nonbest_blocks: &mut HashMap>, - ) -> Result, SyncError> { - let mut new_blocks = Vec::new(); - let mut best_tip = initial_state.best_tip; + working_snapshot: &mut NonfinalizedBlockCacheSnapshot, + ) -> Result<(), SyncError> { + let mut best_tip = working_snapshot.best_tip; // currently this only gets main-chain blocks // once readstateservice supports serving sidechain data, this @@ -342,43 +345,42 @@ impl NonFinalizedState { let parent_hash = BlockHash::from(block.header.previous_block_hash); if parent_hash == best_tip.blockhash { // Normal chain progression - let prev_block = match new_blocks.last() { - Some(block) => block, - None => initial_state - .blocks - .get(&best_tip.blockhash) - .ok_or_else(|| { - SyncError::ReorgFailure(format!( - "found blocks {:?}, expected block {:?}", - initial_state - .blocks - .values() - .map(|block| (block.index().hash(), block.index().height())) - .collect::>(), - best_tip - )) - })?, - }; + let prev_block = working_snapshot + .blocks + .get(&best_tip.blockhash) + .ok_or_else(|| { + SyncError::ReorgFailure(format!( + "found blocks {:?}, expected block {:?}", + working_snapshot + .blocks + .values() + .map(|block| (block.index().hash(), block.index().height())) + .collect::>(), + best_tip + )) + })?; let chainblock = self.block_to_chainblock(prev_block, &block).await?; info!( "syncing block {} at height {}", &chainblock.index().hash(), best_tip.height + 1 ); - best_tip = BestTip { - height: best_tip.height + 1, - blockhash: *chainblock.hash(), - }; - new_blocks.push(chainblock.clone()); + working_snapshot.add_block_at_tip(chainblock); } else { - // Handle reorg - info!("Reorg detected at height {}", best_tip.height + 1); - best_tip = self.handle_reorg(initial_state, best_tip)?; - nonbest_blocks.insert(block.hash(), block); + match working_snapshot.blocks.values().find(|b| { + b.hash().bytes_in_display_order() + == block.header.previous_block_hash.bytes_in_display_order() + }) { + Some(mut prev_block) => loop {}, + None => todo!( + "Keep fetching parent blocks from source \ + until we know the parent block" + ), + } } } - Ok(new_blocks) + Ok(()) } /// Handle a blockchain reorg by finding the common ancestor diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 8d9faca17..5dd4162ab 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -229,14 +229,14 @@ impl BlockchainSource for ProptestMockchain { .as_u128() }) .sum::() - >= chainwork + > chainwork }) .collect(); println!("prev best chainwork: {chainwork}"); - *self.best_block.lock().unwrap() = better_blocks - .choose(&mut rand::thread_rng()) - .unwrap() - .hash(); + + if let Some(block) = better_blocks.choose(&mut rand::thread_rng()) { + *self.best_block.lock().unwrap() = block.hash() + } self.genesis_segment .iter() From 0576cea508061b64a161d1a1754969512615da96 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Wed, 17 Dec 2025 15:20:38 -0400 Subject: [PATCH 017/114] cleanup before reorg handling --- .../src/chain_index/non_finalised_state.rs | 245 +++--------------- 1 file changed, 42 insertions(+), 203 deletions(-) diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index 144618bb0..c48e935d1 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -7,7 +7,7 @@ use crate::{ use arc_swap::ArcSwap; use futures::lock::Mutex; use primitive_types::U256; -use std::{collections::HashMap, mem, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use tokio::sync::mpsc; use tracing::{info, warn}; use zebra_chain::{parameters::Network, serialization::BytesInDisplayOrder}; @@ -18,8 +18,6 @@ pub struct NonFinalizedState { /// We need access to the validator's best block hash, as well /// as a source of blocks pub(super) source: Source, - staged: Mutex>, - staging_sender: mpsc::Sender, /// This lock should not be exposed to consumers. Rather, /// clone the Arc and offer that. This means we can overwrite the arc /// without interfering with readers, who will hold a stale copy @@ -130,23 +128,6 @@ pub enum InitError { InitalBlockMissingHeight, } -/// Staging infrastructure for block processing -struct StagingChannel { - receiver: Mutex>, - sender: mpsc::Sender, -} - -impl StagingChannel { - /// Create new staging channel with the given buffer size - fn new(buffer_size: usize) -> Self { - let (sender, receiver) = mpsc::channel(buffer_size); - Self { - receiver: Mutex::new(receiver), - sender, - } - } -} - /// This is the core of the concurrent block cache. impl BestTip { /// Create a BestTip from an IndexedBlock @@ -186,6 +167,10 @@ impl NonfinalizedBlockCacheSnapshot { .insert(block.height().expect("block to have height"), *block.hash()); self.blocks.insert(*block.hash(), block); } + + fn remove_finalized_blocks(&self, finalized_height: Height) { + todo!() + } } impl NonFinalizedState { @@ -201,9 +186,6 @@ impl NonFinalizedState { ) -> Result { info!("Initialising non-finalised state."); - // Set up staging channel for block processing - let staging_channel = StagingChannel::new(100); - // Resolve the initial block (provided or genesis) let initial_block = Self::resolve_initial_block(&source, &network, start_block).await?; @@ -215,8 +197,6 @@ impl NonFinalizedState { Ok(Self { source, - staged: staging_channel.receiver, - staging_sender: staging_channel.sender, current: ArcSwap::new(Arc::new(snapshot)), network, nfs_change_listener, @@ -297,32 +277,32 @@ impl NonFinalizedState { /// sync to the top of the chain, trimming to the finalised tip. pub(super) async fn sync(&self, finalized_db: Arc) -> Result<(), SyncError> { - let mut working_snapshot = self.get_snapshot().as_ref().clone(); + let initial_state = self.get_snapshot(); + let working_snapshot = initial_state.as_ref().clone(); let mut nonbest_blocks = HashMap::new(); // Fetch main chain blocks and handle reorgs - let new_blocks = self.fetch_main_chain_blocks(&mut working_snapshot).await?; + let new_blocks = self + .fetch_main_chain_blocks( + finalized_db.clone(), + initial_state.clone(), + working_snapshot, + ) + .await?; // Handle non-finalized change listener self.handle_nfs_change_listener(&mut nonbest_blocks).await?; - // Update finalized state - self.update(finalized_db.clone()).await?; - - // Process non-best chain blocks - self.process_nonbest_blocks(nonbest_blocks, &finalized_db) - .await?; - Ok(()) } /// Fetch main chain blocks and handle reorgs async fn fetch_main_chain_blocks( &self, - working_snapshot: &mut NonfinalizedBlockCacheSnapshot, + finalized_db: Arc, + mut initial_state: Arc, + mut working_snapshot: NonfinalizedBlockCacheSnapshot, ) -> Result<(), SyncError> { - let mut best_tip = working_snapshot.best_tip; - // currently this only gets main-chain blocks // once readstateservice supports serving sidechain data, this // must be rewritten to match @@ -332,7 +312,7 @@ impl NonFinalizedState { while let Some(block) = self .source .get_block(HashOrHeight::Height(zebra_chain::block::Height( - u32::from(best_tip.height) + 1, + u32::from(working_snapshot.best_tip.height) + 1, ))) .await .map_err(|e| { @@ -343,11 +323,11 @@ impl NonFinalizedState { })? { let parent_hash = BlockHash::from(block.header.previous_block_hash); - if parent_hash == best_tip.blockhash { + if parent_hash == working_snapshot.best_tip.blockhash { // Normal chain progression let prev_block = working_snapshot .blocks - .get(&best_tip.blockhash) + .get(&working_snapshot.best_tip.blockhash) .ok_or_else(|| { SyncError::ReorgFailure(format!( "found blocks {:?}, expected block {:?}", @@ -356,14 +336,14 @@ impl NonFinalizedState { .values() .map(|block| (block.index().hash(), block.index().height())) .collect::>(), - best_tip + working_snapshot.best_tip )) })?; let chainblock = self.block_to_chainblock(prev_block, &block).await?; info!( "syncing block {} at height {}", &chainblock.index().hash(), - best_tip.height + 1 + working_snapshot.best_tip.height + 1 ); working_snapshot.add_block_at_tip(chainblock); } else { @@ -378,8 +358,17 @@ impl NonFinalizedState { ), } } + if initial_state.best_tip.height + 100 < working_snapshot.best_tip.height { + self.update(finalized_db.clone(), initial_state, working_snapshot) + .await?; + initial_state = self.current.load_full(); + working_snapshot = initial_state.as_ref().clone(); + } } + self.update(finalized_db.clone(), initial_state, working_snapshot) + .await?; + Ok(()) } @@ -416,23 +405,6 @@ impl NonFinalizedState { }) } - /// Stage new blocks and update the cache - async fn stage_new_blocks( - &self, - new_blocks: Vec, - finalized_db: &Arc, - ) -> Result<(), SyncError> { - for block in new_blocks { - if let Err(e) = self - .sync_stage_update_loop(block, finalized_db.clone()) - .await - { - return Err(e.into()); - } - } - Ok(()) - } - /// Handle non-finalized change listener events async fn handle_nfs_change_listener( &self, @@ -470,119 +442,13 @@ impl NonFinalizedState { Ok(()) } - /// Process non-best chain blocks iteratively - async fn process_nonbest_blocks( - &self, - mut nonbest_blocks: HashMap>, - finalized_db: &Arc, - ) -> Result<(), SyncError> { - let mut nonbest_chainblocks = HashMap::new(); - - loop { - let (next_up, later): (Vec<_>, Vec<_>) = nonbest_blocks - .into_iter() - .map(|(hash, block)| { - let prev_hash = - crate::chain_index::types::BlockHash(block.header.previous_block_hash.0); - ( - hash, - block, - self.current - .load() - .blocks - .get(&prev_hash) - .or_else(|| nonbest_chainblocks.get(&prev_hash)) - .cloned(), - ) - }) - .partition(|(_hash, _block, prev_block)| prev_block.is_some()); - - if next_up.is_empty() { - // Only store non-best chain blocks - // if we have a path from them - // to the chain - break; - } - - for (_hash, block, parent_block) in next_up { - let chainblock = self - .block_to_chainblock( - &parent_block.expect("partitioned, known to be some"), - &block, - ) - .await?; - nonbest_chainblocks.insert(*chainblock.hash(), chainblock); - } - nonbest_blocks = later - .into_iter() - .map(|(hash, block, _parent_block)| (hash, block)) - .collect(); - } - - for block in nonbest_chainblocks.into_values() { - if let Err(e) = self - .sync_stage_update_loop(block, finalized_db.clone()) - .await - { - return Err(e.into()); - } - } - Ok(()) - } - - async fn sync_stage_update_loop( + /// Add all blocks from the staging area, and save a new cache snapshot, trimming block below the finalised tip. + async fn update( &self, - block: IndexedBlock, finalized_db: Arc, + initial_state: Arc, + new_snapshot: NonfinalizedBlockCacheSnapshot, ) -> Result<(), UpdateError> { - if let Err(e) = self.stage(block.clone()) { - match *e { - mpsc::error::TrySendError::Full(_) => { - self.update(finalized_db.clone()).await?; - Box::pin(self.sync_stage_update_loop(block, finalized_db)).await?; - } - mpsc::error::TrySendError::Closed(_block) => { - return Err(UpdateError::ReceiverDisconnected) - } - } - } - Ok(()) - } - - /// Stage a block - fn stage( - &self, - block: IndexedBlock, - ) -> Result<(), Box>> { - self.staging_sender.try_send(block).map_err(Box::new) - } - - /// Add all blocks from the staging area, and save a new cache snapshot, trimming block below the finalised tip. - async fn update(&self, finalized_db: Arc) -> Result<(), UpdateError> { - let mut new = HashMap::::new(); - let mut staged = self.staged.lock().await; - loop { - match staged.try_recv() { - Ok(chain_block) => { - new.insert(*chain_block.index().hash(), chain_block); - } - Err(mpsc::error::TryRecvError::Empty) => break, - Err(mpsc::error::TryRecvError::Disconnected) => { - return Err(UpdateError::ReceiverDisconnected) - } - } - } - // at this point, we've collected everything in the staging area - // we can drop the stage lock, and more blocks can be staged while we finish setting current - mem::drop(staged); - let snapshot = self.get_snapshot(); - new.extend( - snapshot - .blocks - .iter() - .map(|(hash, block)| (*hash, block.clone())), - ); - let finalized_height = finalized_db .to_reader() .db_height() @@ -590,43 +456,16 @@ impl NonFinalizedState { .map_err(|_e| UpdateError::FinalizedStateCorruption)? .unwrap_or(Height(0)); - let (_finalized_blocks, blocks): (HashMap<_, _>, HashMap) = new - .into_iter() - .partition(|(_hash, block)| match block.index().height() { - Some(height) => height < finalized_height, - None => false, - }); - - let best_tip = blocks.iter().fold(snapshot.best_tip, |acc, (hash, block)| { - match block.index().height() { - Some(working_height) if working_height > acc.height => BestTip { - height: working_height, - blockhash: *hash, - }, - _ => acc, - } - }); - - let heights_to_hashes = blocks - .iter() - .filter_map(|(hash, chainblock)| { - chainblock.index().height().map(|height| (height, *hash)) - }) - .collect(); + new_snapshot.remove_finalized_blocks(finalized_height); // Need to get best hash at some point in this process - let stored = self.current.compare_and_swap( - &snapshot, - Arc::new(NonfinalizedBlockCacheSnapshot { - blocks, - heights_to_hashes, - best_tip, - }), - ); + let stored = self + .current + .compare_and_swap(&initial_state, Arc::new(new_snapshot)); - if Arc::ptr_eq(&stored, &snapshot) { - let stale_best_tip = snapshot.best_tip; - let new_best_tip = best_tip; + if Arc::ptr_eq(&stored, &initial_state) { + let stale_best_tip = initial_state.best_tip; + let new_best_tip = stored.best_tip; // Log chain tip change if new_best_tip != stale_best_tip { From 32c7fdb5544abbae85de90c6bfc753a854848c1e Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Wed, 17 Dec 2025 16:37:00 -0400 Subject: [PATCH 018/114] start reimplementing handle reorg --- .../src/chain_index/non_finalised_state.rs | 91 +++++++++++-------- 1 file changed, 54 insertions(+), 37 deletions(-) diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index c48e935d1..3d9345d76 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -347,16 +347,10 @@ impl NonFinalizedState { ); working_snapshot.add_block_at_tip(chainblock); } else { - match working_snapshot.blocks.values().find(|b| { - b.hash().bytes_in_display_order() - == block.header.previous_block_hash.bytes_in_display_order() - }) { - Some(mut prev_block) => loop {}, - None => todo!( - "Keep fetching parent blocks from source \ - until we know the parent block" - ), - } + self.handle_reorg(&mut working_snapshot, block.as_ref())? + // There's been a reorg. The fresh block is the new chaintip + // we need to work backwards from it and update heights_to_hashes + // with it and all its parents. } if initial_state.best_tip.height + 100 < working_snapshot.best_tip.height { self.update(finalized_db.clone(), initial_state, working_snapshot) @@ -375,34 +369,33 @@ impl NonFinalizedState { /// Handle a blockchain reorg by finding the common ancestor fn handle_reorg( &self, - initial_state: &NonfinalizedBlockCacheSnapshot, - current_tip: BestTip, - ) -> Result { - let mut next_height_down = current_tip.height - 1; - - let prev_hash = loop { - if next_height_down == Height(0) { - return Err(SyncError::ReorgFailure( - "attempted to reorg below chain genesis".to_string(), - )); - } - match initial_state - .blocks - .values() - .find(|block| block.height() == Some(next_height_down)) - .map(IndexedBlock::hash) - { - Some(hash) => break hash, - // There is a hole in our database. - // TODO: An error return may be more appropriate here - None => next_height_down = next_height_down - 1, + working_snapshot: &mut NonfinalizedBlockCacheSnapshot, + block: &impl Block, + ) -> Result<(), SyncError> { + match working_snapshot + .blocks + .values() + .find(|b| b.hash_bytes_serialized_order() == block.prev_hash_bytes_serialized_order()) + .cloned() + { + Some(prev_block) => { + if working_snapshot + .heights_to_hashes + .values() + .find(|hash| *hash == prev_block.hash()) + .is_some() + // The parent hash is in the best chain. We've hit the bottom + { + todo!() + } else { + self.handle_reorg(working_snapshot, &prev_block) + } } - }; - - Ok(BestTip { - height: next_height_down, - blockhash: *prev_hash, - }) + None => todo!( + "Keep fetching parent blocks from source \ + until we know the parent block" + ), + } } /// Handle non-finalized change listener events @@ -588,3 +581,27 @@ pub enum UpdateError { /// state. A full rebuild is likely needed FinalizedStateCorruption, } + +trait Block { + fn hash_bytes_serialized_order(&self) -> [u8; 32]; + fn prev_hash_bytes_serialized_order(&self) -> [u8; 32]; +} + +impl Block for IndexedBlock { + fn hash_bytes_serialized_order(&self) -> [u8; 32] { + self.hash().0 + } + + fn prev_hash_bytes_serialized_order(&self) -> [u8; 32] { + self.index.parent_hash.0 + } +} +impl Block for zebra_chain::block::Block { + fn hash_bytes_serialized_order(&self) -> [u8; 32] { + self.hash().bytes_in_serialized_order() + } + + fn prev_hash_bytes_serialized_order(&self) -> [u8; 32] { + self.header.previous_block_hash.bytes_in_serialized_order() + } +} From 4b6aa229f2272c3efbc10a51216aafe014ac4b3d Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Thu, 18 Dec 2025 14:14:47 -0400 Subject: [PATCH 019/114] reorg management complete untested --- .../src/chain_index/non_finalised_state.rs | 99 ++++++++++++++----- 1 file changed, 77 insertions(+), 22 deletions(-) diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index 3d9345d76..ba088b982 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -7,6 +7,7 @@ use crate::{ use arc_swap::ArcSwap; use futures::lock::Mutex; use primitive_types::U256; +use serde::Serializer; use std::{collections::HashMap, sync::Arc}; use tokio::sync::mpsc; use tracing::{info, warn}; @@ -72,6 +73,17 @@ pub enum NodeConnectionError { UnrecoverableError(Box), } +#[derive(Debug)] +struct MissingBlockError(String); + +impl std::fmt::Display for MissingBlockError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "missing block: {}", self.0) + } +} + +impl std::error::Error for MissingBlockError {} + #[derive(Debug)] /// An error occurred during sync of the NonFinalized State. pub enum SyncError { @@ -85,9 +97,7 @@ pub enum SyncError { /// Sync has been called multiple times in parallel, or another process has /// written to the block snapshot. CompetingSyncProcess, - /// Sync attempted a reorg, and something went wrong. Currently, this - /// only happens when we attempt to reorg below the start of the chain, - /// indicating an entirely separate regtest/testnet chain to what we expected + /// Sync attempted a reorg, and something went wrong. ReorgFailure(String), /// UnrecoverableFinalizedStateError CannotReadFinalizedState, @@ -158,9 +168,9 @@ impl NonfinalizedBlockCacheSnapshot { }) } - fn add_block_at_tip(&mut self, block: IndexedBlock) { + fn add_block_new_chaintip(&mut self, block: IndexedBlock) { self.best_tip = BestTip { - height: self.best_tip.height + 1, + height: block.height().expect("all blocks have height"), blockhash: *block.hash(), }; self.heights_to_hashes @@ -168,6 +178,12 @@ impl NonfinalizedBlockCacheSnapshot { self.blocks.insert(*block.hash(), block); } + fn get_block_by_hash_bytes_in_serialized_order(&self, hash: [u8; 32]) -> Option<&IndexedBlock> { + self.blocks + .values() + .find(|block| block.hash_bytes_serialized_order() == hash) + } + fn remove_finalized_blocks(&self, finalized_height: Height) { todo!() } @@ -345,9 +361,10 @@ impl NonFinalizedState { &chainblock.index().hash(), working_snapshot.best_tip.height + 1 ); - working_snapshot.add_block_at_tip(chainblock); + working_snapshot.add_block_new_chaintip(chainblock); } else { - self.handle_reorg(&mut working_snapshot, block.as_ref())? + self.handle_reorg(&mut working_snapshot, block.as_ref()) + .await?; // There's been a reorg. The fresh block is the new chaintip // we need to work backwards from it and update heights_to_hashes // with it and all its parents. @@ -367,15 +384,13 @@ impl NonFinalizedState { } /// Handle a blockchain reorg by finding the common ancestor - fn handle_reorg( + async fn handle_reorg( &self, working_snapshot: &mut NonfinalizedBlockCacheSnapshot, block: &impl Block, - ) -> Result<(), SyncError> { - match working_snapshot - .blocks - .values() - .find(|b| b.hash_bytes_serialized_order() == block.prev_hash_bytes_serialized_order()) + ) -> Result { + let prev_block = match working_snapshot + .get_block_by_hash_bytes_in_serialized_order(block.prev_hash_bytes_serialized_order()) .cloned() { Some(prev_block) => { @@ -383,19 +398,38 @@ impl NonFinalizedState { .heights_to_hashes .values() .find(|hash| *hash == prev_block.hash()) - .is_some() - // The parent hash is in the best chain. We've hit the bottom + .is_none() { - todo!() + Box::pin(self.handle_reorg(working_snapshot, &prev_block)).await? } else { - self.handle_reorg(working_snapshot, &prev_block) + prev_block } } - None => todo!( - "Keep fetching parent blocks from source \ - until we know the parent block" - ), - } + None => { + let prev_block = self + .source + .get_block(HashOrHeight::Hash( + zebra_chain::block::Hash::from_bytes_in_serialized_order( + block.prev_hash_bytes_serialized_order(), + ), + )) + .await + .map_err(|e| { + SyncError::ZebradConnectionError(NodeConnectionError::UnrecoverableError( + Box::new(e), + )) + })? + .ok_or(SyncError::ZebradConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(MissingBlockError( + "zebrad missing block in best chain".to_string(), + ))), + ))?; + Box::pin(self.handle_reorg(working_snapshot, &*prev_block)).await? + } + }; + let indexed_block = block.to_indexed_block(&prev_block, self).await?; + working_snapshot.add_block_new_chaintip(indexed_block.clone()); + Ok(indexed_block) } /// Handle non-finalized change listener events @@ -585,6 +619,11 @@ pub enum UpdateError { trait Block { fn hash_bytes_serialized_order(&self) -> [u8; 32]; fn prev_hash_bytes_serialized_order(&self) -> [u8; 32]; + async fn to_indexed_block( + &self, + prev_block: &IndexedBlock, + nfs: &NonFinalizedState, + ) -> Result; } impl Block for IndexedBlock { @@ -595,6 +634,14 @@ impl Block for IndexedBlock { fn prev_hash_bytes_serialized_order(&self) -> [u8; 32] { self.index.parent_hash.0 } + + async fn to_indexed_block( + &self, + _prev_block: &IndexedBlock, + _nfs: &NonFinalizedState, + ) -> Result { + Ok(self.clone()) + } } impl Block for zebra_chain::block::Block { fn hash_bytes_serialized_order(&self) -> [u8; 32] { @@ -604,4 +651,12 @@ impl Block for zebra_chain::block::Block { fn prev_hash_bytes_serialized_order(&self) -> [u8; 32] { self.header.previous_block_hash.bytes_in_serialized_order() } + + async fn to_indexed_block( + &self, + prev_block: &IndexedBlock, + nfs: &NonFinalizedState, + ) -> Result { + nfs.block_to_chainblock(prev_block, self).await + } } From 312dc9dd40cf87dc20e76831b86b6db1551f0e93 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Thu, 18 Dec 2025 14:55:07 -0400 Subject: [PATCH 020/114] revert to previous proptest reorg logic, finish nfs sync --- .../src/chain_index/non_finalised_state.rs | 93 ++++++++++------ .../chain_index/tests/proptest_blockgen.rs | 103 ++++-------------- 2 files changed, 80 insertions(+), 116 deletions(-) diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index ba088b982..b2d8e1727 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -7,7 +7,6 @@ use crate::{ use arc_swap::ArcSwap; use futures::lock::Mutex; use primitive_types::U256; -use serde::Serializer; use std::{collections::HashMap, sync::Arc}; use tokio::sync::mpsc; use tracing::{info, warn}; @@ -173,9 +172,7 @@ impl NonfinalizedBlockCacheSnapshot { height: block.height().expect("all blocks have height"), blockhash: *block.hash(), }; - self.heights_to_hashes - .insert(block.height().expect("block to have height"), *block.hash()); - self.blocks.insert(*block.hash(), block); + self.add_block(block) } fn get_block_by_hash_bytes_in_serialized_order(&self, hash: [u8; 32]) -> Option<&IndexedBlock> { @@ -184,8 +181,19 @@ impl NonfinalizedBlockCacheSnapshot { .find(|block| block.hash_bytes_serialized_order() == hash) } - fn remove_finalized_blocks(&self, finalized_height: Height) { - todo!() + fn remove_finalized_blocks(&mut self, finalized_height: Height) { + // Keep the last finalized block. This means we don't have to check + // the finalized state when the entire non-finalized state is reorged away. + self.blocks + .retain(|_hash, block| block.height().unwrap() >= finalized_height); + self.heights_to_hashes + .retain(|height, _hash| height >= &finalized_height); + } + + fn add_block(&mut self, block: IndexedBlock) { + self.heights_to_hashes + .insert(block.height().expect("block to have height"), *block.hash()); + self.blocks.insert(*block.hash(), block); } } @@ -293,32 +301,9 @@ impl NonFinalizedState { /// sync to the top of the chain, trimming to the finalised tip. pub(super) async fn sync(&self, finalized_db: Arc) -> Result<(), SyncError> { - let initial_state = self.get_snapshot(); - let working_snapshot = initial_state.as_ref().clone(); - let mut nonbest_blocks = HashMap::new(); - - // Fetch main chain blocks and handle reorgs - let new_blocks = self - .fetch_main_chain_blocks( - finalized_db.clone(), - initial_state.clone(), - working_snapshot, - ) - .await?; + let mut initial_state = self.get_snapshot(); + let mut working_snapshot = initial_state.as_ref().clone(); - // Handle non-finalized change listener - self.handle_nfs_change_listener(&mut nonbest_blocks).await?; - - Ok(()) - } - - /// Fetch main chain blocks and handle reorgs - async fn fetch_main_chain_blocks( - &self, - finalized_db: Arc, - mut initial_state: Arc, - mut working_snapshot: NonfinalizedBlockCacheSnapshot, - ) -> Result<(), SyncError> { // currently this only gets main-chain blocks // once readstateservice supports serving sidechain data, this // must be rewritten to match @@ -376,6 +361,9 @@ impl NonFinalizedState { working_snapshot = initial_state.as_ref().clone(); } } + // Handle non-finalized change listener + self.handle_nfs_change_listener(&mut working_snapshot) + .await?; self.update(finalized_db.clone(), initial_state, working_snapshot) .await?; @@ -435,7 +423,7 @@ impl NonFinalizedState { /// Handle non-finalized change listener events async fn handle_nfs_change_listener( &self, - nonbest_blocks: &mut HashMap>, + working_snapshot: &mut NonfinalizedBlockCacheSnapshot, ) -> Result<(), SyncError> { let Some(ref listener) = self.nfs_change_listener else { return Ok(()); @@ -455,7 +443,7 @@ impl NonFinalizedState { .blocks .contains_key(&types::BlockHash(hash.0)) { - nonbest_blocks.insert(block.hash(), block); + self.add_nonbest_block(working_snapshot, &*block).await?; } } Err(mpsc::error::TryRecvError::Empty) => break, @@ -474,7 +462,7 @@ impl NonFinalizedState { &self, finalized_db: Arc, initial_state: Arc, - new_snapshot: NonfinalizedBlockCacheSnapshot, + mut new_snapshot: NonfinalizedBlockCacheSnapshot, ) -> Result<(), UpdateError> { let finalized_height = finalized_db .to_reader() @@ -601,6 +589,43 @@ impl NonFinalizedState { let block_with_metadata = BlockWithMetadata::new(block, metadata); IndexedBlock::try_from(block_with_metadata) } + + async fn add_nonbest_block( + &self, + working_snapshot: &mut NonfinalizedBlockCacheSnapshot, + block: &impl Block, + ) -> Result { + let prev_block = match working_snapshot + .get_block_by_hash_bytes_in_serialized_order(block.prev_hash_bytes_serialized_order()) + .cloned() + { + Some(block) => block, + None => { + let prev_block = self + .source + .get_block(HashOrHeight::Hash( + zebra_chain::block::Hash::from_bytes_in_serialized_order( + block.prev_hash_bytes_serialized_order(), + ), + )) + .await + .map_err(|e| { + SyncError::ZebradConnectionError(NodeConnectionError::UnrecoverableError( + Box::new(e), + )) + })? + .ok_or(SyncError::ZebradConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(MissingBlockError( + "zebrad missing block".to_string(), + ))), + ))?; + Box::pin(self.add_nonbest_block(working_snapshot, &*prev_block)).await? + } + }; + let indexed_block = block.to_indexed_block(&prev_block, self).await?; + working_snapshot.add_block(indexed_block.clone()); + Ok(indexed_block) + } } /// Errors that occur during a snapshot update diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 5dd4162ab..2c7876575 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -47,7 +47,6 @@ fn make_chain() { runtime.block_on(async { let (genesis_segment, branching_segments) = segments; let mockchain = ProptestMockchain { - best_block: Mutex::new(genesis_segment.first().unwrap().hash()), genesis_segment, branching_segments, }; @@ -82,28 +81,19 @@ fn make_chain() { assert!(block.chainwork().to_u256() <= best_tip_block.chainwork().to_u256()); } } - assert_eq!(snapshot.heights_to_hashes.len(), segment_length * 2); - assert_eq!(snapshot.heights_to_hashes.len(), segment_length * (branch_count + 1)); + assert_eq!(snapshot.heights_to_hashes.len(), (segment_length * 2) + 2); + assert_eq!( + snapshot.blocks.len(), + (segment_length * (branch_count + 1)) + 2 + ); }); }); } +#[derive(Clone)] struct ProptestMockchain { genesis_segment: ChainSegment, branching_segments: Vec, - // Updated each time we simulate a reorg, to keep us - // from reorging indefinitely - best_block: Mutex, -} - -impl Clone for ProptestMockchain { - fn clone(&self) -> Self { - Self { - genesis_segment: self.genesis_segment.clone(), - branching_segments: self.branching_segments.clone(), - best_block: Mutex::new(*self.best_block.lock().unwrap()), - } - } } impl ProptestMockchain { @@ -191,72 +181,21 @@ impl BlockchainSource for ProptestMockchain { }) .cloned()) } - // This implementation: - // a) Picks a random block in the branching section with - // chainwork >= the current 'best_block' - // b) sets that block as the new best block - // c) returns the block at the provided height, on the best_block's chain - HashOrHeight::Height(height) => Ok({ - let chainwork = self - .get_block_and_all_preceeding(|block| { - block.hash() == *self.best_block.lock().unwrap() - }) - .unwrap() - .iter() - .map(|block| { - block - .header - .difficulty_threshold - .to_work() - .unwrap() - .as_u128() - }) - .sum(); - let better_blocks: Vec<_> = self - .branching_segments - .iter() - .flat_map(|segment| segment.iter()) - .filter(|block| { - self.get_block_and_all_preceeding(|b| b.hash() == block.hash()) - .unwrap() - .iter() - .map(|block| { - block - .header - .difficulty_threshold - .to_work() - .unwrap() - .as_u128() - }) - .sum::() - > chainwork - }) - .collect(); - println!("prev best chainwork: {chainwork}"); - - if let Some(block) = better_blocks.choose(&mut rand::thread_rng()) { - *self.best_block.lock().unwrap() = block.hash() - } - - self.genesis_segment - .iter() - .find(|block| block.coinbase_height().unwrap() == height) - .cloned() - .or_else(|| { - self.branching_segments - .iter() - .find(|branch| { - branch - .iter() - .find(|block| block.hash() == *self.best_block.lock().unwrap()) - .is_some() - }) - .unwrap() - .iter() - .find(|block| block.coinbase_height().unwrap() == height) - .cloned() - }) - }), + // This implementation selects a block from a random branch instead + // of the best branch. This is intended to simulate reorgs + HashOrHeight::Height(height) => Ok(self + .genesis_segment + .iter() + .find(|block| block.coinbase_height().unwrap() == height) + .cloned() + .or_else(|| { + self.branching_segments + .choose(&mut rand::thread_rng()) + .unwrap() + .iter() + .find(|block| block.coinbase_height().unwrap() == height) + .cloned() + })), } } From 5e0b9acf0df704beb8bbcfb55ad698cf6127ae89 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Fri, 19 Dec 2025 14:02:31 -0400 Subject: [PATCH 021/114] remove unused imports --- zaino-state/src/chain_index/tests/proptest_blockgen.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 2c7876575..1825c0121 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -1,14 +1,10 @@ -use std::{ - sync::{Arc, Mutex}, - time::Duration, -}; +use std::{sync::Arc, time::Duration}; -use primitive_types::U256; use proptest::{ prelude::{Arbitrary as _, BoxedStrategy, Just}, strategy::Strategy, }; -use rand::{seq::SliceRandom, thread_rng}; +use rand::seq::SliceRandom; use tonic::async_trait; use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageConfig}; use zebra_chain::{ @@ -62,7 +58,7 @@ fn make_chain() { ..Default::default() }, db_version: 1, - network , + network, }; From 00fc05df2eadd65e12ab1813299d5933e27a407f Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Fri, 19 Dec 2025 14:03:29 -0400 Subject: [PATCH 022/114] add minimal find fork point assertion --- zaino-state/src/chain_index/tests/proptest_blockgen.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 8d9faca17..2721dfb6a 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -80,6 +80,11 @@ fn make_chain() { for (hash, block) in &snapshot.blocks { if hash != &best_tip_hash { assert!(block.chainwork().to_u256() <= best_tip_block.chainwork().to_u256()); + if snapshot.heights_to_hashes.values().find(|h| block.hash() == *h).is_some() { + assert_eq!(index_reader.find_fork_point(&snapshot, hash).unwrap().unwrap().0, *hash); + } else { + assert_ne!(index_reader.find_fork_point(&snapshot, hash).unwrap().unwrap().0, *hash); + } } } assert_eq!(snapshot.heights_to_hashes.len(), segment_length * 2); From 1d2e4e97dc4375c69f66236616f6fe75237f4dba Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Fri, 19 Dec 2025 14:04:18 -0400 Subject: [PATCH 023/114] ignore failing test --- zaino-state/src/chain_index/tests/proptest_blockgen.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 2721dfb6a..cb7388c04 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -29,6 +29,7 @@ use crate::{ }; #[test] +#[ignore = "Failing due to sync reorg bugs"] fn make_chain() { init_tracing(); let network = Network::Regtest(ActivationHeights::default()); From 96aaefe43112814e16cb5bd2e1a78b79123b1320 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 19 Dec 2025 18:35:19 +0000 Subject: [PATCH 024/114] feat(config): add Docker DNS hostname resolution support - Add `zaino-common::net` module with `resolve_socket_addr()` and `try_resolve_address()` for hostname:port resolution - Change validator address config fields from `SocketAddr` to `String` - Add `AddressResolution` enum to distinguish between resolved addresses, valid hostnames pending DNS, and invalid formats - Reject malformed addresses (missing port, invalid format) at config time - Defer DNS resolution for valid hostnames (Docker DNS) to connection time - Update `From` to `TryFrom` for proper error handling - Mark DNS-dependent tests with `#[ignore]` to prevent CI flakiness --- Cargo.lock | 1 + integration-tests/tests/chain_cache.rs | 52 ++- integration-tests/tests/fetch_service.rs | 18 +- integration-tests/tests/json_server.rs | 4 +- integration-tests/tests/local_cache.rs | 2 +- integration-tests/tests/state_service.rs | 6 +- .../tests/wallet_to_validator.rs | 18 +- zaino-common/src/config/validator.rs | 7 +- zaino-common/src/lib.rs | 4 + zaino-common/src/net.rs | 329 ++++++++++++++++++ zaino-fetch/Cargo.toml | 1 + zaino-fetch/src/jsonrpsee/connector.rs | 38 +- zaino-state/src/backends/fetch.rs | 2 +- zaino-state/src/backends/state.rs | 2 +- zaino-state/src/config.rs | 14 +- zaino-testutils/src/lib.rs | 26 +- zainod/src/config.rs | 140 +++++--- zainod/src/indexer.rs | 2 +- 18 files changed, 542 insertions(+), 124 deletions(-) create mode 100644 zaino-common/src/net.rs diff --git a/Cargo.lock b/Cargo.lock index f44b2306d..b861d3e2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8309,6 +8309,7 @@ dependencies = [ "tonic 0.12.3", "tracing", "url", + "zaino-common", "zaino-proto", "zaino-testvectors", "zebra-chain 3.1.0", diff --git a/integration-tests/tests/chain_cache.rs b/integration-tests/tests/chain_cache.rs index 6fc6b795a..27b274756 100644 --- a/integration-tests/tests/chain_cache.rs +++ b/integration-tests/tests/chain_cache.rs @@ -15,7 +15,10 @@ async fn create_test_manager_and_connector( ) -> (TestManager, JsonRpSeeConnector) where T: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let test_manager = TestManager::::launch( @@ -32,7 +35,7 @@ where let json_service = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -68,10 +71,7 @@ mod chain_query_interface { }; use zcash_local_net::validator::{zcashd::Zcashd, zebrad::Zebrad}; use zebra_chain::{ - parameters::{ - testnet::{ConfiguredActivationHeights, RegtestParameters}, - NetworkKind, - }, + parameters::{testnet::RegtestParameters, NetworkKind}, serialization::{ZcashDeserialize, ZcashDeserializeInto}, }; @@ -92,7 +92,10 @@ mod chain_query_interface { ) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, json_service) = create_test_manager_and_connector::( @@ -111,11 +114,11 @@ mod chain_query_interface { None => test_manager.data_dir.clone(), }; let network = match test_manager.network { - NetworkKind::Regtest => zebra_chain::parameters::Network::new_regtest( - RegtestParameters::from(ConfiguredActivationHeights::from( + NetworkKind::Regtest => { + zebra_chain::parameters::Network::new_regtest(RegtestParameters::from( test_manager.local_net.get_activation_heights().await, - )), - ), + )) + } NetworkKind::Testnet => zebra_chain::parameters::Network::new_default_testnet(), NetworkKind::Mainnet => zebra_chain::parameters::Network::Mainnet, @@ -130,7 +133,7 @@ mod chain_query_interface { // todo: does this matter? should_backup_non_finalized_state: true, }, - test_manager.full_node_rpc_listen_address, + test_manager.full_node_rpc_listen_address.to_string(), test_manager.full_node_grpc_listen_address, false, None, @@ -229,7 +232,10 @@ mod chain_query_interface { async fn get_block_range(validator: &ValidatorKind) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = @@ -279,7 +285,10 @@ mod chain_query_interface { async fn find_fork_point(validator: &ValidatorKind) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = @@ -319,7 +328,10 @@ mod chain_query_interface { async fn get_raw_transaction(validator: &ValidatorKind) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = @@ -381,7 +393,10 @@ mod chain_query_interface { async fn get_transaction_status(validator: &ValidatorKind) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = @@ -427,7 +442,10 @@ mod chain_query_interface { async fn sync_large_chain(validator: &ValidatorKind) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, json_service, option_state_service, _chain_index, indexer) = diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index 36e038d15..0b2645217 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -187,7 +187,7 @@ async fn fetch_service_get_raw_mempool(validator: &ValidatorKin let json_service = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -618,7 +618,7 @@ async fn fetch_service_get_latest_block(validator: &ValidatorKi let json_service = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -662,7 +662,7 @@ async fn assert_fetch_service_difficulty_matches_rpc(validator: let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -691,7 +691,7 @@ async fn assert_fetch_service_mininginfo_matches_rpc(validator: let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -720,7 +720,7 @@ async fn assert_fetch_service_peerinfo_matches_rpc(validator: & let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -774,7 +774,7 @@ async fn fetch_service_get_block_subsidy(validator: &ValidatorK let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -857,7 +857,7 @@ async fn fetch_service_get_block_header(validator: &ValidatorKi let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -1746,7 +1746,7 @@ async fn assert_fetch_service_getnetworksols_matches_rpc( let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -1908,7 +1908,7 @@ mod zcashd { let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), diff --git a/integration-tests/tests/json_server.rs b/integration-tests/tests/json_server.rs index c349b600c..f727dd024 100644 --- a/integration-tests/tests/json_server.rs +++ b/integration-tests/tests/json_server.rs @@ -43,7 +43,7 @@ async fn create_zcashd_test_manager_and_fetch_services( println!("Launching zcashd fetch service.."); let zcashd_fetch_service = FetchService::spawn(FetchServiceConfig::new( - test_manager.full_node_rpc_listen_address, + test_manager.full_node_rpc_listen_address.to_string(), None, None, None, @@ -70,7 +70,7 @@ async fn create_zcashd_test_manager_and_fetch_services( println!("Launching zaino fetch service.."); let zaino_fetch_service = FetchService::spawn(FetchServiceConfig::new( - test_manager.full_node_rpc_listen_address, + test_manager.full_node_rpc_listen_address.to_string(), test_manager.json_server_cookie_dir.clone(), None, None, diff --git a/integration-tests/tests/local_cache.rs b/integration-tests/tests/local_cache.rs index 8a49e7d84..b37f819fb 100644 --- a/integration-tests/tests/local_cache.rs +++ b/integration-tests/tests/local_cache.rs @@ -40,7 +40,7 @@ async fn create_test_manager_and_block_cache( let json_service = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 7b189df1b..85a6ce05f 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -78,7 +78,7 @@ async fn create_test_manager_and_services( test_manager.local_net.print_stdout(); let fetch_service = FetchService::spawn(FetchServiceConfig::new( - test_manager.full_node_rpc_listen_address, + test_manager.full_node_rpc_listen_address.to_string(), None, None, None, @@ -116,7 +116,7 @@ async fn create_test_manager_and_services( debug_validity_check_interval: None, should_backup_non_finalized_state: false, }, - test_manager.full_node_rpc_listen_address, + test_manager.full_node_rpc_listen_address.to_string(), test_manager.full_node_grpc_listen_address, false, None, @@ -162,7 +162,7 @@ async fn generate_blocks_and_poll_all_chain_indexes( ) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { test_manager.generate_blocks_and_poll(n).await; diff --git a/integration-tests/tests/wallet_to_validator.rs b/integration-tests/tests/wallet_to_validator.rs index f4f10faec..615203162 100644 --- a/integration-tests/tests/wallet_to_validator.rs +++ b/integration-tests/tests/wallet_to_validator.rs @@ -18,7 +18,7 @@ async fn connect_to_node_get_info_for_validator(validator: &Validato where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -40,7 +40,7 @@ async fn send_to_orchard(validator: &ValidatorKind) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -88,7 +88,7 @@ async fn send_to_sapling(validator: &ValidatorKind) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -136,7 +136,7 @@ async fn send_to_transparent(validator: &ValidatorKind) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -167,7 +167,7 @@ where let fetch_service = zaino_fetch::jsonrpsee::connector::JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -234,7 +234,7 @@ async fn send_to_all(validator: &ValidatorKind) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -320,7 +320,7 @@ async fn shield_for_validator(validator: &ValidatorKind) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -388,7 +388,7 @@ async fn monitor_unverified_mempool_for_validator(validator: &Valida where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -443,7 +443,7 @@ where let fetch_service = zaino_fetch::jsonrpsee::connector::JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), diff --git a/zaino-common/src/config/validator.rs b/zaino-common/src/config/validator.rs index a5acc0b10..1e57c0cb5 100644 --- a/zaino-common/src/config/validator.rs +++ b/zaino-common/src/config/validator.rs @@ -2,16 +2,15 @@ // use serde::{Deserialize, Serialize}; // use zebra_chain::parameters::testnet::ConfiguredActivationHeights; -use std::net::SocketAddr; use std::path::PathBuf; /// Validator (full-node) type for Zaino configuration. #[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize, serde::Serialize)] pub struct ValidatorConfig { /// Full node / validator gprc listen port. Only exists for zebra - pub validator_grpc_listen_address: Option, - /// Full node / validator listen port. - pub validator_jsonrpc_listen_address: SocketAddr, + pub validator_grpc_listen_address: Option, + /// Full node / validator listen address (supports hostname:port or ip:port format). + pub validator_jsonrpc_listen_address: String, /// Path to the validator cookie file. Enable validator rpc cookie authentication with Some. pub validator_cookie_path: Option, /// Full node / validator Username. diff --git a/zaino-common/src/lib.rs b/zaino-common/src/lib.rs index d4ced5081..10621c052 100644 --- a/zaino-common/src/lib.rs +++ b/zaino-common/src/lib.rs @@ -4,6 +4,10 @@ //! and common utilities used across the Zaino blockchain indexer ecosystem. pub mod config; +pub mod net; + +// Re-export network utilities +pub use net::{resolve_socket_addr, try_resolve_address, AddressResolution}; // Re-export commonly used config types at crate root for backward compatibility. // This allows existing code using `use zaino_common::Network` to continue working. diff --git a/zaino-common/src/net.rs b/zaino-common/src/net.rs new file mode 100644 index 000000000..d53912b14 --- /dev/null +++ b/zaino-common/src/net.rs @@ -0,0 +1,329 @@ +//! Network utilities for Zaino. + +use std::net::{SocketAddr, ToSocketAddrs}; + +/// Result of attempting to resolve an address string. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum AddressResolution { + /// Successfully resolved to a socket address. + Resolved(SocketAddr), + /// Address appears to be a valid hostname:port format but DNS lookup failed. + /// This is acceptable for deferred resolution (e.g., Docker DNS). + UnresolvedHostname { + /// The original address string. + address: String, + /// The DNS error message. + error: String, + }, + /// Address format is invalid (missing port, garbage input, etc.). + /// This should always be treated as an error. + InvalidFormat { + /// The original address string. + address: String, + /// Description of what's wrong with the format. + reason: String, + }, +} + +impl AddressResolution { + /// Returns the resolved address if available. + pub fn resolved(&self) -> Option { + match self { + AddressResolution::Resolved(addr) => Some(*addr), + _ => None, + } + } + + /// Returns true if the address was successfully resolved. + pub fn is_resolved(&self) -> bool { + matches!(self, AddressResolution::Resolved(_)) + } + + /// Returns true if the address has a valid format but couldn't be resolved. + /// This is acceptable for deferred resolution scenarios like Docker DNS. + pub fn is_unresolved_hostname(&self) -> bool { + matches!(self, AddressResolution::UnresolvedHostname { .. }) + } + + /// Returns true if the address format is invalid. + pub fn is_invalid_format(&self) -> bool { + matches!(self, AddressResolution::InvalidFormat { .. }) + } +} + +/// Validates that an address string has a valid format (host:port). +/// +/// This performs basic format validation without DNS lookup: +/// - Must contain exactly one `:` separator (or be IPv6 format `[...]:port`) +/// - Port must be a valid number +/// - Host part must not be empty +fn validate_address_format(address: &str) -> Result<(), String> { + let address = address.trim(); + + if address.is_empty() { + return Err("Address cannot be empty".to_string()); + } + + // Handle IPv6 format: [::1]:port + if address.starts_with('[') { + let Some(bracket_end) = address.find(']') else { + return Err("IPv6 address missing closing bracket".to_string()); + }; + + if bracket_end + 1 >= address.len() { + return Err("Missing port after IPv6 address".to_string()); + } + + let after_bracket = &address[bracket_end + 1..]; + if !after_bracket.starts_with(':') { + return Err("Expected ':' after IPv6 address bracket".to_string()); + } + + let port_str = &after_bracket[1..]; + port_str + .parse::() + .map_err(|_| format!("Invalid port number: '{port_str}'"))?; + + return Ok(()); + } + + // Handle IPv4/hostname format: host:port + let parts: Vec<&str> = address.rsplitn(2, ':').collect(); + if parts.len() != 2 { + return Err("Missing port (expected format: 'host:port')".to_string()); + } + + let port_str = parts[0]; + let host = parts[1]; + + if host.is_empty() { + return Err("Host cannot be empty".to_string()); + } + + port_str + .parse::() + .map_err(|_| format!("Invalid port number: '{port_str}'"))?; + + Ok(()) +} + +/// Attempts to resolve an address string, returning detailed information about the result. +/// +/// This function distinguishes between: +/// - Successfully resolved addresses +/// - Valid hostname:port format that failed DNS lookup (acceptable for Docker DNS) +/// - Invalid address format (always an error) +/// +/// # Examples +/// +/// ``` +/// use zaino_common::net::{try_resolve_address, AddressResolution}; +/// +/// // IP:port format resolves immediately +/// let result = try_resolve_address("127.0.0.1:8080"); +/// assert!(result.is_resolved()); +/// +/// // Invalid format is detected +/// let result = try_resolve_address("no-port-here"); +/// assert!(result.is_invalid_format()); +/// ``` +pub fn try_resolve_address(address: &str) -> AddressResolution { + // First validate the format + if let Err(reason) = validate_address_format(address) { + return AddressResolution::InvalidFormat { + address: address.to_string(), + reason, + }; + } + + // Try parsing as SocketAddr first (handles ip:port format directly) + if let Ok(addr) = address.parse::() { + return AddressResolution::Resolved(addr); + } + + // Fall back to DNS resolution for hostname:port format + match address.to_socket_addrs() { + Ok(mut addrs) => { + let addrs_vec: Vec = addrs.by_ref().collect(); + + // Prefer IPv4 if available (more compatible, especially in Docker) + if let Some(ipv4_addr) = addrs_vec.iter().find(|addr| addr.is_ipv4()) { + AddressResolution::Resolved(*ipv4_addr) + } else if let Some(addr) = addrs_vec.into_iter().next() { + AddressResolution::Resolved(addr) + } else { + AddressResolution::UnresolvedHostname { + address: address.to_string(), + error: "DNS returned no addresses".to_string(), + } + } + } + Err(e) => AddressResolution::UnresolvedHostname { + address: address.to_string(), + error: e.to_string(), + }, + } +} + +/// Resolves an address string to a [`SocketAddr`]. +/// +/// Accepts both IP:port format (e.g., "127.0.0.1:8080") and hostname:port format +/// (e.g., "zebra:18232" for Docker DNS resolution). +/// +/// When both IPv4 and IPv6 addresses are available, IPv4 is preferred. +/// +/// # Examples +/// +/// ``` +/// use zaino_common::net::resolve_socket_addr; +/// +/// // IP:port format +/// let addr = resolve_socket_addr("127.0.0.1:8080").unwrap(); +/// assert_eq!(addr.port(), 8080); +/// +/// // Hostname resolution (localhost) +/// let addr = resolve_socket_addr("localhost:8080").unwrap(); +/// assert!(addr.ip().is_loopback()); +/// ``` +/// +/// # Errors +/// +/// Returns an error if: +/// - The address format is invalid (missing port, invalid IP, etc.) +/// - The hostname cannot be resolved (DNS lookup failure) +/// - No addresses are returned from resolution +pub fn resolve_socket_addr(address: &str) -> Result { + match try_resolve_address(address) { + AddressResolution::Resolved(addr) => Ok(addr), + AddressResolution::UnresolvedHostname { address, error } => Err(std::io::Error::new( + std::io::ErrorKind::NotFound, + format!("Cannot resolve hostname '{address}': {error}"), + )), + AddressResolution::InvalidFormat { address, reason } => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!("Invalid address format '{address}': {reason}"), + )), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::Ipv4Addr; + + // === Format validation tests (no DNS, always reliable) === + + #[test] + fn test_resolve_ipv4_address() { + let result = resolve_socket_addr("127.0.0.1:8080"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert_eq!(addr.ip().to_string(), "127.0.0.1"); + assert_eq!(addr.port(), 8080); + } + + #[test] + fn test_resolve_ipv4_any_address() { + let result = resolve_socket_addr("0.0.0.0:18232"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert_eq!(addr.ip(), Ipv4Addr::UNSPECIFIED); + assert_eq!(addr.port(), 18232); + } + + #[test] + fn test_resolve_ipv6_localhost() { + let result = resolve_socket_addr("[::1]:8080"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert!(addr.is_ipv6()); + assert_eq!(addr.port(), 8080); + } + + #[test] + fn test_resolve_missing_port() { + let result = try_resolve_address("127.0.0.1"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_empty_string() { + let result = try_resolve_address(""); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_invalid_port() { + let result = try_resolve_address("127.0.0.1:invalid"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_port_too_large() { + let result = try_resolve_address("127.0.0.1:99999"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_empty_host() { + let result = try_resolve_address(":8080"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_ipv6_missing_port() { + let result = try_resolve_address("[::1]"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_ipv6_missing_bracket() { + let result = try_resolve_address("[::1:8080"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_valid_hostname_format() { + // This hostname has valid format but won't resolve + let result = try_resolve_address("nonexistent-host.invalid:8080"); + // Should be unresolved hostname, not invalid format + assert!( + result.is_unresolved_hostname(), + "Expected UnresolvedHostname, got {:?}", + result + ); + } + + #[test] + fn test_docker_style_hostname_format() { + // Docker-style hostnames have valid format + let result = try_resolve_address("zebra:18232"); + // Can't resolve in unit tests, but format is valid + assert!( + result.is_unresolved_hostname(), + "Expected UnresolvedHostname for Docker-style hostname, got {:?}", + result + ); + } + + // === DNS-dependent tests (may be flaky in CI) === + + #[test] + #[ignore = "DNS-dependent: may be flaky in CI environments without reliable DNS"] + fn test_resolve_hostname_localhost() { + // "localhost" should resolve to 127.0.0.1 or ::1 + let result = resolve_socket_addr("localhost:8080"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert_eq!(addr.port(), 8080); + assert!(addr.ip().is_loopback()); + } + + #[test] + #[ignore = "DNS-dependent: behavior varies by system DNS configuration"] + fn test_resolve_invalid_hostname_dns() { + // This test verifies DNS lookup failure for truly invalid hostnames + let result = resolve_socket_addr("this-hostname-does-not-exist.invalid:8080"); + assert!(result.is_err()); + } +} diff --git a/zaino-fetch/Cargo.toml b/zaino-fetch/Cargo.toml index 3185e0eb3..3b2fcac76 100644 --- a/zaino-fetch/Cargo.toml +++ b/zaino-fetch/Cargo.toml @@ -9,6 +9,7 @@ license = { workspace = true } version = { workspace = true } [dependencies] +zaino-common = { workspace = true } zaino-proto = { workspace = true } # Zebra diff --git a/zaino-fetch/src/jsonrpsee/connector.rs b/zaino-fetch/src/jsonrpsee/connector.rs index 8c7148ccc..0f268a805 100644 --- a/zaino-fetch/src/jsonrpsee/connector.rs +++ b/zaino-fetch/src/jsonrpsee/connector.rs @@ -229,9 +229,10 @@ impl JsonRpSeeConnector { }) } - /// Helper function to create from parts of a StateServiceConfig or FetchServiceConfig + /// Helper function to create from parts of a StateServiceConfig or FetchServiceConfig. + /// Accepts both hostname:port (e.g., "zebra:18232") and ip:port (e.g., "127.0.0.1:18232") formats. pub async fn new_from_config_parts( - validator_rpc_address: SocketAddr, + validator_rpc_address: &str, validator_rpc_user: String, validator_rpc_password: String, validator_cookie_path: Option, @@ -871,13 +872,22 @@ async fn test_node_connection(url: Url, auth_method: AuthMethod) -> Result<(), T Ok(()) } -/// Tries to connect to zebrad/zcashd using the provided SocketAddr and returns the correct URL. +/// Resolves an address string (hostname:port or ip:port) to a SocketAddr. +fn resolve_address(address: &str) -> Result { + zaino_common::net::resolve_socket_addr(address) + .map_err(|e| TransportError::BadNodeData(Box::new(e), "address resolution")) +} + +/// Tries to connect to zebrad/zcashd using the provided address and returns the correct URL. +/// Accepts both hostname:port (e.g., "zebra:18232") and ip:port (e.g., "127.0.0.1:18232") formats. pub async fn test_node_and_return_url( - addr: SocketAddr, + address: &str, cookie_path: Option, user: Option, password: Option, ) -> Result { + let addr = resolve_address(address)?; + let auth_method = match cookie_path.is_some() { true => { let cookie_file_path_str = cookie_path.expect("validator rpc cookie path missing"); @@ -914,3 +924,23 @@ pub async fn test_node_and_return_url( error!("Error: Could not establish connection with node. Please check config and confirm node is listening at the correct address and the correct authorisation details have been entered. Exiting.."); std::process::exit(1); } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_resolve_address_wraps_common_function() { + // Verify the wrapper correctly converts io::Error to TransportError + let result = resolve_address("127.0.0.1:8080"); + assert!(result.is_ok()); + assert_eq!(result.unwrap().port(), 8080); + + let result = resolve_address("invalid"); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + TransportError::BadNodeData(_, "address resolution") + )); + } +} diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 5c5b7f519..2da3b9d9a 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -109,7 +109,7 @@ impl ZcashService for FetchService { info!("Launching Chain Fetch Service.."); let fetcher = JsonRpSeeConnector::new_from_config_parts( - config.validator_rpc_address, + &config.validator_rpc_address, config.validator_rpc_user.clone(), config.validator_rpc_password.clone(), config.validator_cookie_path.clone(), diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index de7e70ec5..655e8c5df 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -184,7 +184,7 @@ impl ZcashService for StateService { info!("Spawning State Service.."); let rpc_client = JsonRpSeeConnector::new_from_config_parts( - config.validator_rpc_address, + &config.validator_rpc_address, config.validator_rpc_user.clone(), config.validator_rpc_password.clone(), config.validator_cookie_path.clone(), diff --git a/zaino-state/src/config.rs b/zaino-state/src/config.rs index 1250b350f..e35a0e446 100644 --- a/zaino-state/src/config.rs +++ b/zaino-state/src/config.rs @@ -29,9 +29,9 @@ pub enum BackendConfig { pub struct StateServiceConfig { /// Zebra [`zebra_state::ReadStateService`] config data pub validator_state_config: zebra_state::Config, - /// Validator JsonRPC address. - pub validator_rpc_address: std::net::SocketAddr, - /// Validator gRPC address. + /// Validator JsonRPC address (supports hostname:port or ip:port format). + pub validator_rpc_address: String, + /// Validator gRPC address (requires ip:port format for Zebra state sync). pub validator_grpc_address: std::net::SocketAddr, /// Validator cookie auth. pub validator_cookie_auth: bool, @@ -56,7 +56,7 @@ impl StateServiceConfig { // TODO: replace with struct-literal init only? pub fn new( validator_state_config: zebra_state::Config, - validator_rpc_address: std::net::SocketAddr, + validator_rpc_address: String, validator_grpc_address: std::net::SocketAddr, validator_cookie_auth: bool, validator_cookie_path: Option, @@ -89,8 +89,8 @@ impl StateServiceConfig { #[derive(Debug, Clone)] #[deprecated] pub struct FetchServiceConfig { - /// Validator JsonRPC address. - pub validator_rpc_address: std::net::SocketAddr, + /// Validator JsonRPC address (supports hostname:port or ip:port format). + pub validator_rpc_address: String, /// Enable validator rpc cookie authentification with Some: path to the validator cookie file. pub validator_cookie_path: Option, /// Validator JsonRPC user. @@ -110,7 +110,7 @@ impl FetchServiceConfig { /// Returns a new instance of [`FetchServiceConfig`]. #[allow(clippy::too_many_arguments)] pub fn new( - validator_rpc_address: std::net::SocketAddr, + validator_rpc_address: String, validator_cookie_path: Option, validator_rpc_user: Option, validator_rpc_password: Option, diff --git a/zaino-testutils/src/lib.rs b/zaino-testutils/src/lib.rs index 6a89d32f4..ad4c607e2 100644 --- a/zaino-testutils/src/lib.rs +++ b/zaino-testutils/src/lib.rs @@ -217,13 +217,15 @@ impl ValidatorExt for Zebrad { ) -> Result<(Self, ValidatorConfig), LaunchError> { let zebrad = Zebrad::launch(config).await?; let validator_config = ValidatorConfig { - validator_jsonrpc_listen_address: SocketAddr::new( - IpAddr::V4(Ipv4Addr::LOCALHOST), - zebrad.rpc_listen_port(), + validator_jsonrpc_listen_address: format!( + "{}:{}", + Ipv4Addr::LOCALHOST, + zebrad.rpc_listen_port() ), - validator_grpc_listen_address: Some(SocketAddr::new( - IpAddr::V4(Ipv4Addr::LOCALHOST), - zebrad.indexer_listen_port(), + validator_grpc_listen_address: Some(format!( + "{}:{}", + Ipv4Addr::LOCALHOST, + zebrad.indexer_listen_port() )), validator_cookie_path: None, validator_user: Some("xxxxxx".to_string()), @@ -239,10 +241,7 @@ impl ValidatorExt for Zcashd { ) -> Result<(Self, ValidatorConfig), LaunchError> { let zcashd = Zcashd::launch(config).await?; let validator_config = ValidatorConfig { - validator_jsonrpc_listen_address: SocketAddr::new( - IpAddr::V4(Ipv4Addr::LOCALHOST), - zcashd.port(), - ), + validator_jsonrpc_listen_address: format!("{}:{}", Ipv4Addr::LOCALHOST, zcashd.port()), validator_grpc_listen_address: None, validator_cookie_path: None, validator_user: Some("xxxxxx".to_string()), @@ -256,7 +255,7 @@ impl TestManager where C: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { /// Launches zcash-local-net. @@ -378,7 +377,8 @@ where }; let (handle, service_subscriber) = Indexer::::launch_inner( - Service::Config::from(indexer_config.clone()), + Service::Config::try_from(indexer_config.clone()) + .expect("Failed to convert ZainodConfig to service config"), indexer_config, ) .await @@ -438,6 +438,8 @@ where full_node_rpc_listen_address, full_node_grpc_listen_address: validator_settings .validator_grpc_listen_address + .as_ref() + .and_then(|addr| addr.parse().ok()) .unwrap_or(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0)), zaino_handle, zaino_json_rpc_listen_address: zaino_json_listen_address, diff --git a/zainod/src/config.rs b/zainod/src/config.rs index 004bcf825..be58dbd03 100644 --- a/zainod/src/config.rs +++ b/zainod/src/config.rs @@ -4,7 +4,7 @@ use figment::{ Figment, }; use std::{ - net::{IpAddr, SocketAddr, ToSocketAddrs}, + net::{IpAddr, SocketAddr}, path::PathBuf, }; // Added for Serde deserialization helpers @@ -17,8 +17,8 @@ use serde::{ use tracing::warn; use tracing::{error, info}; use zaino_common::{ - CacheConfig, DatabaseConfig, DatabaseSize, Network, ServiceConfig, StorageConfig, - ValidatorConfig, + try_resolve_address, AddressResolution, CacheConfig, DatabaseConfig, DatabaseSize, Network, + ServiceConfig, StorageConfig, ValidatorConfig, }; use zaino_serve::server::config::{GrpcServerConfig, JsonRpcServerConfig}; @@ -117,18 +117,49 @@ impl ZainodConfig { let grpc_addr = fetch_socket_addr_from_hostname(&self.grpc_settings.listen_address.to_string())?; - let validator_addr = fetch_socket_addr_from_hostname( - &self - .validator_settings - .validator_jsonrpc_listen_address - .to_string(), - )?; + // Validate the validator address using the richer result type that distinguishes + // between format errors (always fail) and DNS lookup failures (can defer for Docker). + let validator_addr_result = + try_resolve_address(&self.validator_settings.validator_jsonrpc_listen_address); + + match validator_addr_result { + AddressResolution::Resolved(validator_addr) => { + // Successfully resolved - perform full IP-based validation. + if !is_private_listen_addr(&validator_addr) { + return Err(IndexerError::ConfigError( + "Zaino may only connect to Zebra with private IP addresses.".to_string(), + )); + } - // Ensure validator listen address is private. - if !is_private_listen_addr(&validator_addr) { - return Err(IndexerError::ConfigError( - "Zaino may only connect to Zebra with private IP addresses.".to_string(), - )); + #[cfg(not(feature = "no_tls_use_unencrypted_traffic"))] + { + // Require cookie auth for non-loopback addresses. + if !is_loopback_listen_addr(&validator_addr) + && self.validator_settings.validator_cookie_path.is_none() + { + return Err(IndexerError::ConfigError( + "Validator listen address is not loopback, so cookie authentication must be enabled." + .to_string(), + )); + } + } + } + AddressResolution::UnresolvedHostname { ref address, .. } => { + // Valid hostname format but DNS lookup failed (e.g., Docker DNS). + // Allow this - IP validation will happen at connection time. + info!( + "Validator address '{}' is a hostname that cannot be resolved at config time. \ + IP validation deferred to connection time.", + address + ); + } + AddressResolution::InvalidFormat { address, reason } => { + // Invalid address format - always fail immediately. + return Err(IndexerError::ConfigError(format!( + "Invalid validator address '{}': {}", + address, reason + ))); + } } #[cfg(not(feature = "no_tls_use_unencrypted_traffic"))] @@ -139,16 +170,6 @@ impl ZainodConfig { "TLS required when connecting to external addresses.".to_string(), )); } - - // Ensure validator rpc cookie authentication is used when connecting to non-loopback addresses. - if !is_loopback_listen_addr(&validator_addr) - && self.validator_settings.validator_cookie_path.is_none() - { - return Err(IndexerError::ConfigError( - "Validator listen address is not loopback, so cookie authentication must be enabled." - .to_string(), - )); - } } #[cfg(feature = "no_tls_use_unencrypted_traffic")] @@ -191,8 +212,8 @@ impl Default for ZainodConfig { tls: None, }, validator_settings: ValidatorConfig { - validator_grpc_listen_address: Some("127.0.0.1:18230".parse().unwrap()), - validator_jsonrpc_listen_address: "127.0.0.1:18232".parse().unwrap(), + validator_grpc_listen_address: Some("127.0.0.1:18230".to_string()), + validator_jsonrpc_listen_address: "127.0.0.1:18232".to_string(), validator_cookie_path: None, validator_user: Some("xxxxxx".to_string()), validator_password: Some("xxxxxx".to_string()), @@ -240,19 +261,8 @@ pub fn default_zebra_db_path() -> Result { /// Resolves a hostname to a SocketAddr. fn fetch_socket_addr_from_hostname(address: &str) -> Result { - address.parse::().or_else(|_| { - let addrs: Vec<_> = address - .to_socket_addrs() - .map_err(|e| IndexerError::ConfigError(format!("Invalid address '{address}': {e}")))? - .collect(); - if let Some(ipv4_addr) = addrs.iter().find(|addr| addr.is_ipv4()) { - Ok(*ipv4_addr) - } else { - addrs.into_iter().next().ok_or_else(|| { - IndexerError::ConfigError(format!("Unable to resolve address '{address}'")) - }) - } - }) + zaino_common::net::resolve_socket_addr(address) + .map_err(|e| IndexerError::ConfigError(format!("Invalid address '{address}': {e}"))) } /// Validates that the configured `address` is either: @@ -343,19 +353,41 @@ impl TryFrom for BackendConfig { fn try_from(cfg: ZainodConfig) -> Result { match cfg.backend { zaino_state::BackendType::State => { - Ok(BackendConfig::State(StateServiceConfig::from(cfg))) + Ok(BackendConfig::State(StateServiceConfig::try_from(cfg)?)) } zaino_state::BackendType::Fetch => { - Ok(BackendConfig::Fetch(FetchServiceConfig::from(cfg))) + Ok(BackendConfig::Fetch(FetchServiceConfig::try_from(cfg)?)) } } } } #[allow(deprecated)] -impl From for StateServiceConfig { - fn from(cfg: ZainodConfig) -> Self { - StateServiceConfig { +impl TryFrom for StateServiceConfig { + type Error = IndexerError; + + fn try_from(cfg: ZainodConfig) -> Result { + let grpc_listen_address = cfg + .validator_settings + .validator_grpc_listen_address + .as_ref() + .ok_or_else(|| { + IndexerError::ConfigError( + "Missing validator_grpc_listen_address in configuration".to_string(), + ) + })?; + let validator_grpc_address = + fetch_socket_addr_from_hostname(grpc_listen_address).map_err(|e| { + let msg = match e { + IndexerError::ConfigError(msg) => msg, + other => other.to_string(), + }; + IndexerError::ConfigError(format!( + "Invalid validator_grpc_listen_address '{grpc_listen_address}': {msg}" + )) + })?; + + Ok(StateServiceConfig { validator_state_config: zebra_state::Config { cache_dir: cfg.zebra_db_path.clone(), ephemeral: false, @@ -364,11 +396,11 @@ impl From for StateServiceConfig { debug_validity_check_interval: None, should_backup_non_finalized_state: true, }, - validator_rpc_address: cfg.validator_settings.validator_jsonrpc_listen_address, - validator_grpc_address: cfg + validator_rpc_address: cfg .validator_settings - .validator_grpc_listen_address - .expect("Zebra config with no grpc_listen_address"), + .validator_jsonrpc_listen_address + .clone(), + validator_grpc_address, validator_cookie_auth: cfg.validator_settings.validator_cookie_path.is_some(), validator_cookie_path: cfg.validator_settings.validator_cookie_path, validator_rpc_user: cfg @@ -382,14 +414,16 @@ impl From for StateServiceConfig { service: cfg.service, storage: cfg.storage, network: cfg.network, - } + }) } } #[allow(deprecated)] -impl From for FetchServiceConfig { - fn from(cfg: ZainodConfig) -> Self { - FetchServiceConfig { +impl TryFrom for FetchServiceConfig { + type Error = IndexerError; + + fn try_from(cfg: ZainodConfig) -> Result { + Ok(FetchServiceConfig { validator_rpc_address: cfg.validator_settings.validator_jsonrpc_listen_address, validator_cookie_path: cfg.validator_settings.validator_cookie_path, validator_rpc_user: cfg @@ -403,7 +437,7 @@ impl From for FetchServiceConfig { service: cfg.service, storage: cfg.storage, network: cfg.network, - } + }) } } diff --git a/zainod/src/indexer.rs b/zainod/src/indexer.rs index 81b7caff7..5b18bb5a5 100644 --- a/zainod/src/indexer.rs +++ b/zainod/src/indexer.rs @@ -45,7 +45,7 @@ pub async fn spawn_indexer( config.check_config()?; info!("Checking connection with node.."); let zebrad_uri = test_node_and_return_url( - config.validator_settings.validator_jsonrpc_listen_address, + &config.validator_settings.validator_jsonrpc_listen_address, config.validator_settings.validator_cookie_path.clone(), config.validator_settings.validator_user.clone(), config.validator_settings.validator_password.clone(), From 828e0f342ee1134dc9bc5daea3b1adef234d6ddc Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Fri, 19 Dec 2025 14:43:32 -0400 Subject: [PATCH 025/114] clippify --- integration-tests/tests/chain_cache.rs | 6 ++---- .../src/chain_index/tests/proptest_blockgen.rs | 12 +++++------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/integration-tests/tests/chain_cache.rs b/integration-tests/tests/chain_cache.rs index 6fc6b795a..17c40a422 100644 --- a/integration-tests/tests/chain_cache.rs +++ b/integration-tests/tests/chain_cache.rs @@ -69,7 +69,7 @@ mod chain_query_interface { use zcash_local_net::validator::{zcashd::Zcashd, zebrad::Zebrad}; use zebra_chain::{ parameters::{ - testnet::{ConfiguredActivationHeights, RegtestParameters}, + testnet::RegtestParameters, NetworkKind, }, serialization::{ZcashDeserialize, ZcashDeserializeInto}, @@ -112,9 +112,7 @@ mod chain_query_interface { }; let network = match test_manager.network { NetworkKind::Regtest => zebra_chain::parameters::Network::new_regtest( - RegtestParameters::from(ConfiguredActivationHeights::from( - test_manager.local_net.get_activation_heights().await, - )), + RegtestParameters::from(test_manager.local_net.get_activation_heights().await ), ), NetworkKind::Testnet => zebra_chain::parameters::Network::new_default_testnet(), diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index cb7388c04..038b51966 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -3,12 +3,11 @@ use std::{ time::Duration, }; -use primitive_types::U256; use proptest::{ prelude::{Arbitrary as _, BoxedStrategy, Just}, strategy::Strategy, }; -use rand::{seq::SliceRandom, thread_rng}; +use rand::seq::SliceRandom; use tonic::async_trait; use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageConfig}; use zebra_chain::{ @@ -81,7 +80,7 @@ fn make_chain() { for (hash, block) in &snapshot.blocks { if hash != &best_tip_hash { assert!(block.chainwork().to_u256() <= best_tip_block.chainwork().to_u256()); - if snapshot.heights_to_hashes.values().find(|h| block.hash() == *h).is_some() { + if snapshot.heights_to_hashes.values().any(|h| block.hash() == h) { assert_eq!(index_reader.find_fork_point(&snapshot, hash).unwrap().unwrap().0, *hash); } else { assert_ne!(index_reader.find_fork_point(&snapshot, hash).unwrap().unwrap().0, *hash); @@ -254,8 +253,7 @@ impl BlockchainSource for ProptestMockchain { .find(|branch| { branch .iter() - .find(|block| block.hash() == *self.best_block.lock().unwrap()) - .is_some() + .any(|block| block.hash() == *self.best_block.lock().unwrap()) }) .unwrap() .iter() @@ -380,13 +378,13 @@ impl BlockchainSource for ProptestMockchain { > { let (sender, receiver) = tokio::sync::mpsc::channel(1_000); let self_clone = self.clone(); - tokio::task::spawn((|| async move { + tokio::task::spawn(async move { for block in self_clone.all_blocks_arb_branch_order() { sender.send((block.hash(), block.clone())).await.unwrap() } // don't drop the sender std::mem::forget(sender); - })()) + }) .await .unwrap(); Ok(Some(receiver)) From 4b36bd3db2da971a813c53e2cff1b73589045fd9 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Fri, 19 Dec 2025 17:17:24 -0400 Subject: [PATCH 026/114] make block height non-optional, and use alternate means to determine presence on best chain --- integration-tests/tests/chain_cache.rs | 19 ++++---- zaino-state/src/chain_index.rs | 26 +++++++---- .../src/chain_index/finalised_state/db/v0.rs | 24 ++--------- .../src/chain_index/finalised_state/db/v1.rs | 43 +++++-------------- .../src/chain_index/non_finalised_state.rs | 24 ++++++++--- .../chain_index/tests/finalised_state/v1.rs | 8 ++-- .../chain_index/tests/proptest_blockgen.rs | 2 +- .../src/chain_index/types/db/legacy.rs | 23 ++++++---- zaino-state/src/chain_index/types/helpers.rs | 30 +++---------- 9 files changed, 84 insertions(+), 115 deletions(-) diff --git a/integration-tests/tests/chain_cache.rs b/integration-tests/tests/chain_cache.rs index 17c40a422..7f2049513 100644 --- a/integration-tests/tests/chain_cache.rs +++ b/integration-tests/tests/chain_cache.rs @@ -68,10 +68,7 @@ mod chain_query_interface { }; use zcash_local_net::validator::{zcashd::Zcashd, zebrad::Zebrad}; use zebra_chain::{ - parameters::{ - testnet::RegtestParameters, - NetworkKind, - }, + parameters::{testnet::RegtestParameters, NetworkKind}, serialization::{ZcashDeserialize, ZcashDeserializeInto}, }; @@ -111,9 +108,11 @@ mod chain_query_interface { None => test_manager.data_dir.clone(), }; let network = match test_manager.network { - NetworkKind::Regtest => zebra_chain::parameters::Network::new_regtest( - RegtestParameters::from(test_manager.local_net.get_activation_heights().await ), - ), + NetworkKind::Regtest => { + zebra_chain::parameters::Network::new_regtest(RegtestParameters::from( + test_manager.local_net.get_activation_heights().await, + )) + } NetworkKind::Testnet => zebra_chain::parameters::Network::new_default_testnet(), NetworkKind::Mainnet => zebra_chain::parameters::Network::Mainnet, @@ -345,9 +344,9 @@ mod chain_query_interface { assert_eq!( branch_id, - if height == Some(chain_index::types::GENESIS_HEIGHT) { + if height == chain_index::types::GENESIS_HEIGHT { None - } else if height == Some(Height::try_from(1).unwrap()) { + } else if height == Height::try_from(1).unwrap() { zebra_chain::parameters::NetworkUpgrade::Canopy .branch_id() .map(u32::from) @@ -404,7 +403,7 @@ mod chain_query_interface { .unwrap(); assert_eq!( transaction_status_best_chain.unwrap(), - BestChainLocation::Block(*block_hash, height.unwrap()) + BestChainLocation::Block(*block_hash, height) ); assert!(transaction_status_nonbest_chain.is_empty()); } diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index c81a3f751..d4464976a 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -639,7 +639,12 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Result, Self::Error> { match nonfinalized_snapshot.blocks.get(&hash).cloned() { - Some(block) => Ok(block.index().height()), + Some(block) => Ok(nonfinalized_snapshot + .heights_to_hashes + .values() + .find(|h| **h == hash) + // Canonical height is None for blocks not on the best chain + .map(|_| block.index().height())), None => match self.finalized_state.get_block_height(hash).await { Ok(height) => Ok(height), Err(_e) => Err(ChainIndexError::database_hole(hash)), @@ -712,8 +717,8 @@ impl ChainIndex for NodeBackedChainIndexSubscriber ChainIndex for NodeBackedChainIndexSubscriber ChainIndex for NodeBackedChainIndexSubscriber>(); let mut best_chain_block = blocks_containing_transaction .iter() - .find_map(|block| BestChainLocation::try_from(block).ok()); + .find(|block| snapshot.heights_to_hashes.get(&block.height()) == Some(block.hash())) + .map(|block| BestChainLocation::Block(*block.hash(), block.height())); let mut non_best_chain_blocks: HashSet = blocks_containing_transaction .iter() - .filter_map(|block| NonBestChainLocation::try_from(block).ok()) + .filter(|block| { + snapshot.heights_to_hashes.get(&block.height()) != Some(block.hash()) + }) + .map(|block| NonBestChainLocation::Block(*block.hash(), block.height())) .collect(); let in_mempool = self .mempool @@ -860,12 +869,11 @@ impl ChainIndex for NodeBackedChainIndexSubscriber { - let block_height = block - .index() - .height() - .expect("height always some in finalised state") - .0; + let block_height = block.index().height().0; let last_height = DbHeight::from_be_bytes( last_height_bytes.expect("Height is always some in the finalised state"), @@ -549,11 +537,7 @@ impl DbV0 { &self, block: &IndexedBlock, ) -> Result<(), FinalisedStateError> { - let zebra_height: ZebraHeight = block - .index() - .height() - .expect("height always some in the finalised state") - .into(); + let zebra_height: ZebraHeight = block.index().height().into(); let zebra_hash: ZebraHash = zebra_chain::block::Hash::from(*block.index().hash()); let height_key = DbHeight(zebra_height).to_be_bytes(); diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index 43fd80ed3..785940209 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -843,9 +843,7 @@ impl DbV1 { self.status.store(StatusType::Syncing); let block_hash = *block.index().hash(); let block_hash_bytes = block_hash.to_bytes()?; - let block_height = block.index().height().ok_or(FinalisedStateError::Custom( - "finalised state received non finalised block".to_string(), - ))?; + let block_height = block.index().height(); let block_height_bytes = block_height.to_bytes()?; // check this is the *next* block in the chain. @@ -883,12 +881,7 @@ impl DbV1 { })?; // Build DBHeight - let height_entry = StoredEntryFixed::new( - &block_hash_bytes, - block.index().height().ok_or(FinalisedStateError::Custom( - "finalised state received non finalised block".to_string(), - ))?, - ); + let height_entry = StoredEntryFixed::new(&block_hash_bytes, block.index().height()); // Build header let header_entry = StoredEntryVar::new( @@ -1014,7 +1007,7 @@ impl DbV1 { ); } else { return Err(FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: "Invalid block data: invalid transparent input.".to_string(), }); @@ -1206,10 +1199,7 @@ impl DbV1 { info!( "Successfully committed block {} at height {} to ZainoDB.", &block.index().hash(), - &block - .index() - .height() - .expect("height always some in the finalised state") + &block.index().height() ); Ok(()) @@ -1308,19 +1298,12 @@ impl DbV1 { block: &IndexedBlock, ) -> Result<(), FinalisedStateError> { // Check block height and hash - let block_height = block - .index() - .height() - .ok_or(FinalisedStateError::InvalidBlock { - height: 0, - hash: *block.hash(), - reason: "Invalid block data: Block does not contain finalised height".to_string(), - })?; + let block_height = block.index().height(); let block_height_bytes = block_height .to_bytes() .map_err(|_| FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: "Corrupt block data: failed to serialise hash".to_string(), })?; @@ -1330,7 +1313,7 @@ impl DbV1 { block_hash .to_bytes() .map_err(|_| FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: "Corrupt block data: failed to serialise hash".to_string(), })?; @@ -1416,12 +1399,12 @@ impl DbV1 { *prev_outpoint.prev_txid(), )) .map_err(|e| FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: e.to_string(), })? .ok_or_else(|| FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: "Invalid block data: invalid txid data.".to_string(), })?; @@ -1439,7 +1422,7 @@ impl DbV1 { ); } else { return Err(FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: "Invalid block data: invalid transparent input.".to_string(), }); @@ -3117,11 +3100,7 @@ impl DbV1 { // Construct CompactBlock Ok(zaino_proto::proto::compact_formats::CompactBlock { proto_version: 4, - height: header - .index() - .height() - .expect("height always present in finalised state.") - .0 as u64, + height: header.index().height().0 as u64, hash: header.index().hash().0.to_vec(), prev_hash: header.index().parent_hash().0.to_vec(), // Is this safe? diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index b2d8e1727..55b574e3a 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -108,6 +108,9 @@ impl From for SyncError { UpdateError::ReceiverDisconnected => SyncError::StagingChannelClosed, UpdateError::StaleSnapshot => SyncError::CompetingSyncProcess, UpdateError::FinalizedStateCorruption => SyncError::CannotReadFinalizedState, + UpdateError::DatabaseHole => { + SyncError::ReorgFailure(String::from("could not determine best chain")) + } } } } @@ -141,7 +144,7 @@ pub enum InitError { impl BestTip { /// Create a BestTip from an IndexedBlock fn from_block(block: &IndexedBlock) -> Result { - let height = block.height().ok_or(InitError::InitalBlockMissingHeight)?; + let height = block.height(); let blockhash = *block.hash(); Ok(Self { height, blockhash }) } @@ -169,7 +172,7 @@ impl NonfinalizedBlockCacheSnapshot { fn add_block_new_chaintip(&mut self, block: IndexedBlock) { self.best_tip = BestTip { - height: block.height().expect("all blocks have height"), + height: block.height(), blockhash: *block.hash(), }; self.add_block(block) @@ -185,14 +188,13 @@ impl NonfinalizedBlockCacheSnapshot { // Keep the last finalized block. This means we don't have to check // the finalized state when the entire non-finalized state is reorged away. self.blocks - .retain(|_hash, block| block.height().unwrap() >= finalized_height); + .retain(|_hash, block| block.height() >= finalized_height); self.heights_to_hashes .retain(|height, _hash| height >= &finalized_height); } fn add_block(&mut self, block: IndexedBlock) { - self.heights_to_hashes - .insert(block.height().expect("block to have height"), *block.hash()); + self.heights_to_hashes.insert(block.height(), *block.hash()); self.blocks.insert(*block.hash(), block); } } @@ -472,6 +474,15 @@ impl NonFinalizedState { .unwrap_or(Height(0)); new_snapshot.remove_finalized_blocks(finalized_height); + let best_block = &new_snapshot + .blocks + .values() + .max_by_key(|block| block.chainwork()) + .cloned() + .expect("empty snapshot impossible"); + self.handle_reorg(&mut new_snapshot, best_block) + .await + .map_err(|_e| UpdateError::DatabaseHole)?; // Need to get best hash at some point in this process let stored = self @@ -639,6 +650,9 @@ pub enum UpdateError { /// Something has gone unrecoverably wrong in the finalized /// state. A full rebuild is likely needed FinalizedStateCorruption, + + /// A block in the snapshot is missing + DatabaseHole, } trait Block { diff --git a/zaino-state/src/chain_index/tests/finalised_state/v1.rs b/zaino-state/src/chain_index/tests/finalised_state/v1.rs index 4ca42a83b..4b41d6315 100644 --- a/zaino-state/src/chain_index/tests/finalised_state/v1.rs +++ b/zaino-state/src/chain_index/tests/finalised_state/v1.rs @@ -236,7 +236,7 @@ async fn load_db_backend_from_file() { assert_eq!(prev_hash, block.index().parent_hash); } prev_hash = Some(block.index().hash); - assert_eq!(block.index.height, Some(Height(height))); + assert_eq!(block.index.height, Height(height)); } assert!(finalized_state_backend .get_chain_block(Height(101)) @@ -281,7 +281,7 @@ async fn try_write_invalid_block() { let mut chain_block = IndexedBlock::try_from(BlockWithMetadata::new(&zebra_block, metadata)).unwrap(); - chain_block.index.height = Some(crate::Height(height + 1)); + chain_block.index.height = crate::Height(height + 1); dbg!(chain_block.index.height); let db_err = dbg!(zaino_db.write_block(chain_block).await); @@ -912,7 +912,7 @@ async fn check_faucet_spent_map() { .find(|tx| { let (block_height, tx_idx) = (spender_index.block_height(), spender_index.tx_index()); - chain_block.index().height() == Some(Height(block_height)) + chain_block.index().height() == Height(block_height) && tx.index() == tx_idx as u64 }) .cloned() @@ -1081,7 +1081,7 @@ async fn check_recipient_spent_map() { .find(|tx| { let (block_height, tx_idx) = (spender_index.block_height(), spender_index.tx_index()); - chain_block.index().height() == Some(Height(block_height)) + chain_block.index().height() == Height(block_height) && tx.index() == tx_idx as u64 }) .cloned() diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 987c7f42f..60bfd080e 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -76,7 +76,7 @@ fn make_chain() { for (hash, block) in &snapshot.blocks { if hash != &best_tip_hash { assert!(block.chainwork().to_u256() <= best_tip_block.chainwork().to_u256()); - if snapshot.heights_to_hashes.values().any(|h| block.hash() == h) { + if snapshot.heights_to_hashes.get(&block.height()) == Some(block.hash()) { assert_eq!(index_reader.find_fork_point(&snapshot, hash).unwrap().unwrap().0, *hash); } else { assert_ne!(index_reader.find_fork_point(&snapshot, hash).unwrap().unwrap().0, *hash); diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index 59ceb5a3b..d692abba2 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -634,8 +634,8 @@ pub struct BlockIndex { pub parent_hash: BlockHash, /// The cumulative proof-of-work of the blockchain up to this block, used for chain selection. pub chainwork: ChainWork, - /// The height of this block if it's in the current best chain. None if it's part of a fork. - pub height: Option, + /// The height of this block. + pub height: Height, } impl BlockIndex { @@ -644,7 +644,7 @@ impl BlockIndex { hash: BlockHash, parent_hash: BlockHash, chainwork: ChainWork, - height: Option, + height: Height, ) -> Self { Self { hash, @@ -670,7 +670,7 @@ impl BlockIndex { } /// Returns the height of this block if it’s part of the best chain. - pub fn height(&self) -> Option { + pub fn height(&self) -> Height { self.height } } @@ -685,7 +685,7 @@ impl ZainoVersionedSerde for BlockIndex { self.parent_hash.serialize(&mut w)?; self.chainwork.serialize(&mut w)?; - write_option(&mut w, &self.height, |w, h| h.serialize(w)) + write_option(&mut w, &Some(self.height), |w, h| h.serialize(w)) } fn decode_latest(r: &mut R) -> io::Result { @@ -699,7 +699,12 @@ impl ZainoVersionedSerde for BlockIndex { let chainwork = ChainWork::deserialize(&mut r)?; let height = read_option(&mut r, |r| Height::deserialize(r))?; - Ok(BlockIndex::new(hash, parent_hash, chainwork, height)) + Ok(BlockIndex::new( + hash, + parent_hash, + chainwork, + height.expect("blocks always have height"), + )) } } @@ -1132,7 +1137,7 @@ impl IndexedBlock { } /// Returns the block height if available. - pub fn height(&self) -> Option { + pub fn height(&self) -> Height { self.index.height() } @@ -1149,7 +1154,7 @@ impl IndexedBlock { /// Converts this `IndexedBlock` into a CompactBlock protobuf message using proto v4 format. pub fn to_compact_block(&self) -> zaino_proto::proto::compact_formats::CompactBlock { // NOTE: Returns u64::MAX if the block is not in the best chain. - let height: u64 = self.height().map(|h| h.0.into()).unwrap_or(u64::MAX); + let height: u64 = self.height().0.into(); let hash = self.hash().0.to_vec(); let prev_hash = self.index().parent_hash().0.to_vec(); @@ -1344,7 +1349,7 @@ impl BlockHash::from(hash), BlockHash::from(parent_hash), chainwork, - Some(height), + height, ); Ok(IndexedBlock::new( diff --git a/zaino-state/src/chain_index/types/helpers.rs b/zaino-state/src/chain_index/types/helpers.rs index b1f31f4a6..99c88caff 100644 --- a/zaino-state/src/chain_index/types/helpers.rs +++ b/zaino-state/src/chain_index/types/helpers.rs @@ -32,9 +32,7 @@ pub enum BestChainLocation { #[derive(Debug, PartialEq, Eq, Hash)] pub enum NonBestChainLocation { /// the block containing the transaction - // TODO: in this case, returning a consensus branch - // ID would be useful - Block(BlockHash), + Block(BlockHash, Height), /// if the transaction is in the mempool /// but the mempool does not match the /// snapshot's chaintip, return the target height if known @@ -44,27 +42,6 @@ pub enum NonBestChainLocation { Mempool(Option), } -impl TryFrom<&IndexedBlock> for NonBestChainLocation { - type Error = (); - - fn try_from(value: &IndexedBlock) -> Result { - match value.height() { - Some(_) => Err(()), - None => Ok(NonBestChainLocation::Block(*value.hash())), - } - } -} -impl TryFrom<&IndexedBlock> for BestChainLocation { - type Error = (); - - fn try_from(value: &IndexedBlock) -> Result { - match value.height() { - None => Err(()), - Some(height) => Ok(BestChainLocation::Block(*value.hash(), height)), - } - } -} - /// Wrapper for optional commitment tree roots from blockchain source #[derive(Clone)] pub struct TreeRootData { @@ -295,7 +272,10 @@ impl<'a> BlockWithMetadata<'a> { let block = self.block; let hash = BlockHash::from(block.hash()); let parent_hash = BlockHash::from(block.header.previous_block_hash); - let height = block.coinbase_height().map(|height| Height(height.0)); + let height = block + .coinbase_height() + .map(|height| Height(height.0)) + .ok_or_else(|| String::from("Any valid block has a coinbase height"))?; let block_work = block.header.difficulty_threshold.to_work().ok_or_else(|| { "Failed to calculate block work from difficulty threshold".to_string() From c6ec85a81c35c5519b92569d6c94397077d9e177 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Fri, 19 Dec 2025 17:19:51 -0400 Subject: [PATCH 027/114] cleanup printlns --- zaino-state/src/chain_index/mempool.rs | 1 - zaino-state/src/chain_index/non_finalised_state.rs | 2 -- 2 files changed, 3 deletions(-) diff --git a/zaino-state/src/chain_index/mempool.rs b/zaino-state/src/chain_index/mempool.rs index 95f63c9a2..607528a13 100644 --- a/zaino-state/src/chain_index/mempool.rs +++ b/zaino-state/src/chain_index/mempool.rs @@ -69,7 +69,6 @@ impl Mempool { } } - println!("get block hash"); let best_block_hash: BlockHash = match fetcher.get_best_block_hash().await { Ok(block_hash_opt) => match block_hash_opt { Some(hash) => hash.into(), diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index b5b84eb6e..e312cf4c2 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -673,7 +673,6 @@ impl NonFinalizedState { .get_tree_roots_from_source(block.hash().into()) .await .map_err(|e| { - dbg!(&e); SyncError::ZebradConnectionError(NodeConnectionError::UnrecoverableError(Box::new( InvalidData(format!("{}", e)), ))) @@ -686,7 +685,6 @@ impl NonFinalizedState { self.network.clone(), ) .map_err(|e| { - dbg!(&e); SyncError::ZebradConnectionError(NodeConnectionError::UnrecoverableError(Box::new( InvalidData(e), ))) From 246fc798b608cbc8aeb7e80ad410be9b83acb2ed Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Fri, 19 Dec 2025 17:23:10 -0400 Subject: [PATCH 028/114] clippify --- zaino-state/src/chain_index/non_finalised_state.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index 40037b696..143f508ce 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -384,11 +384,9 @@ impl NonFinalizedState { .cloned() { Some(prev_block) => { - if working_snapshot + if !working_snapshot .heights_to_hashes - .values() - .find(|hash| *hash == prev_block.hash()) - .is_none() + .values().any(|hash| hash == prev_block.hash()) { Box::pin(self.handle_reorg(working_snapshot, &prev_block)).await? } else { From 89a103a314865cab92be2e9d6b56ed2cb8d68dc3 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Fri, 19 Dec 2025 17:27:30 -0400 Subject: [PATCH 029/114] don't ignore make_chain now that it passes --- zaino-state/src/chain_index/tests/proptest_blockgen.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 60bfd080e..795cbc925 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -25,7 +25,6 @@ use crate::{ }; #[test] -#[ignore = "Failing due to sync reorg bugs"] fn make_chain() { init_tracing(); let network = Network::Regtest(ActivationHeights::default()); From 4978f484f3109beb010cd2173c3c364944b87590 Mon Sep 17 00:00:00 2001 From: Hazel OHearn Date: Tue, 6 Jan 2026 10:25:38 -0400 Subject: [PATCH 030/114] fix get_transaction_status --- zaino-state/src/chain_index.rs | 7 ++++++- zaino-state/src/chain_index/tests.rs | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index d4464976a..12fe201c2 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -828,15 +828,20 @@ impl ChainIndex for NodeBackedChainIndexSubscriber>(); + let start_of_nonfinalized = snapshot.heights_to_hashes.keys().min().unwrap(); let mut best_chain_block = blocks_containing_transaction .iter() - .find(|block| snapshot.heights_to_hashes.get(&block.height()) == Some(block.hash())) + .find(|block| { + snapshot.heights_to_hashes.get(&block.height()) == Some(block.hash()) + || block.height() < *start_of_nonfinalized + }) .map(|block| BestChainLocation::Block(*block.hash(), block.height())); let mut non_best_chain_blocks: HashSet = blocks_containing_transaction .iter() .filter(|block| { snapshot.heights_to_hashes.get(&block.height()) != Some(block.hash()) + && block.height() >= *start_of_nonfinalized }) .map(|block| NonBestChainLocation::Block(*block.hash(), block.height())) .collect(); diff --git a/zaino-state/src/chain_index/tests.rs b/zaino-state/src/chain_index/tests.rs index 8cd258023..4c6215888 100644 --- a/zaino-state/src/chain_index/tests.rs +++ b/zaino-state/src/chain_index/tests.rs @@ -196,6 +196,7 @@ mod mockchain_tests { ) .await .unwrap(); + assert!(transaction_status_nonbest_chain.is_empty()); assert_eq!( transaction_status_best_chain.unwrap(), BestChainLocation::Block( @@ -203,7 +204,6 @@ mod mockchain_tests { crate::Height(block_height.unwrap().0) ) ); - assert!(transaction_status_nonbest_chain.is_empty()); } } From 9f72074ef5d2a315f91a0d1c97361dcdb19d2045 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 7 Jan 2026 11:37:37 +0000 Subject: [PATCH 031/114] refactor(config): make cookie auth config-based, not address-type-based Remove forced cookie authentication requirement for non-loopback IPs. Cookie auth is now determined solely by whether validator_cookie_path is configured, giving operators control over their security choices in trusted network environments. - Remove is_loopback_listen_addr function (no longer needed) - Simplify hostname resolution log message - Add tests for symmetric behavior across address types --- zainod/src/config.rs | 109 +++++++++++++++++++++++++++++++------------ 1 file changed, 79 insertions(+), 30 deletions(-) diff --git a/zainod/src/config.rs b/zainod/src/config.rs index be58dbd03..7fb33da6b 100644 --- a/zainod/src/config.rs +++ b/zainod/src/config.rs @@ -122,34 +122,21 @@ impl ZainodConfig { let validator_addr_result = try_resolve_address(&self.validator_settings.validator_jsonrpc_listen_address); + // Validator address validation: + // - Resolved IPs: must be private (RFC1918/ULA) + // - Hostnames: validated at connection time (supports Docker/K8s service discovery) + // - Cookie auth: determined by validator_cookie_path config, not enforced by address type match validator_addr_result { AddressResolution::Resolved(validator_addr) => { - // Successfully resolved - perform full IP-based validation. if !is_private_listen_addr(&validator_addr) { return Err(IndexerError::ConfigError( "Zaino may only connect to Zebra with private IP addresses.".to_string(), )); } - - #[cfg(not(feature = "no_tls_use_unencrypted_traffic"))] - { - // Require cookie auth for non-loopback addresses. - if !is_loopback_listen_addr(&validator_addr) - && self.validator_settings.validator_cookie_path.is_none() - { - return Err(IndexerError::ConfigError( - "Validator listen address is not loopback, so cookie authentication must be enabled." - .to_string(), - )); - } - } } AddressResolution::UnresolvedHostname { ref address, .. } => { - // Valid hostname format but DNS lookup failed (e.g., Docker DNS). - // Allow this - IP validation will happen at connection time. info!( - "Validator address '{}' is a hostname that cannot be resolved at config time. \ - IP validation deferred to connection time.", + "Validator address '{}' cannot be resolved at config time.", address ); } @@ -278,18 +265,6 @@ pub(crate) fn is_private_listen_addr(addr: &SocketAddr) -> bool { } } -/// Validates that the configured `address` is a loopback address. -/// -/// Returns `Ok(BindAddress)` if valid. -#[cfg_attr(feature = "no_tls_use_unencrypted_traffic", allow(dead_code))] -pub(crate) fn is_loopback_listen_addr(addr: &SocketAddr) -> bool { - let ip = addr.ip(); - match ip { - IpAddr::V4(ipv4) => ipv4.is_loopback(), - IpAddr::V6(ipv6) => ipv6.is_loopback(), - } -} - /// Attempts to load config data from a TOML file at the specified path. /// /// If the file cannot be read, or if its contents cannot be parsed into `ZainodConfig`, @@ -1019,4 +994,78 @@ mod test { Ok(()) }); } + + #[test] + /// Validates that cookie authentication is config-based, not address-type-based. + /// Non-loopback private IPs should work without cookie auth (operator's choice). + pub(crate) fn test_cookie_auth_not_forced_for_non_loopback_ip() { + Jail::expect_with(|jail| { + // Non-loopback private IP (192.168.x.x) WITHOUT cookie auth should succeed + let toml_str = r#" + backend = "fetch" + network = "Testnet" + + [validator_settings] + validator_jsonrpc_listen_address = "192.168.1.10:18232" + # Note: NO validator_cookie_path - this is intentional + + [grpc_settings] + listen_address = "127.0.0.1:8137" + "#; + let temp_toml_path = jail.directory().join("no_cookie_auth.toml"); + jail.create_file(&temp_toml_path, toml_str)?; + + let config_result = load_config(&temp_toml_path); + assert!( + config_result.is_ok(), + "Non-loopback IP without cookie auth should succeed. \ + Cookie auth is config-based, not address-type-based. Error: {:?}", + config_result.err() + ); + + let config = config_result.unwrap(); + assert!( + config.validator_settings.validator_cookie_path.is_none(), + "Cookie path should be None as configured" + ); + + Ok(()) + }); + } + + #[test] + /// Validates symmetric behavior: both IP and hostname addresses respect configuration. + /// Public IPs should still be rejected (private IP requirement remains). + pub(crate) fn test_public_ip_still_rejected() { + Jail::expect_with(|jail| { + // Public IP should be rejected regardless of cookie auth + let toml_str = r#" + backend = "fetch" + network = "Testnet" + + [validator_settings] + validator_jsonrpc_listen_address = "8.8.8.8:18232" + + [grpc_settings] + listen_address = "127.0.0.1:8137" + "#; + let temp_toml_path = jail.directory().join("public_ip.toml"); + jail.create_file(&temp_toml_path, toml_str)?; + + let config_result = load_config(&temp_toml_path); + assert!( + config_result.is_err(), + "Public IP should be rejected - private IP requirement still applies" + ); + + if let Err(IndexerError::ConfigError(msg)) = config_result { + assert!( + msg.contains("private IP"), + "Error should mention private IP requirement. Got: {msg}" + ); + } + + Ok(()) + }); + } } From 9153e75819723aa63e325bef80325a710c74c692 Mon Sep 17 00:00:00 2001 From: fluidvanadium Date: Fri, 9 Jan 2026 18:58:01 +0000 Subject: [PATCH 032/114] added proptest-regressions to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 7c79ed51a..592f8877e 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ docker_cargo/**/* container-target/ .local/ .failed-tests +**/proptest-regressions/** From 895c285ffaed2c4e204fc8eca23e7489bacd34c2 Mon Sep 17 00:00:00 2001 From: fluidvanadium Date: Fri, 9 Jan 2026 18:58:40 +0000 Subject: [PATCH 033/114] added clarifying comment --- zaino-state/src/chain_index/non_finalised_state.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index 143f508ce..9baccfeae 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -51,6 +51,7 @@ pub struct NonfinalizedBlockCacheSnapshot { /// removed by a reorg. Blocks reorged away have no height. pub blocks: HashMap, /// hashes indexed by height + /// Hashes in this map are part of the best chain. pub heights_to_hashes: HashMap, // Do we need height here? /// The highest known block @@ -386,7 +387,8 @@ impl NonFinalizedState { Some(prev_block) => { if !working_snapshot .heights_to_hashes - .values().any(|hash| hash == prev_block.hash()) + .values() + .any(|hash| hash == prev_block.hash()) { Box::pin(self.handle_reorg(working_snapshot, &prev_block)).await? } else { From f1469f1a509f54537fc2debbfadef04bf34d6e80 Mon Sep 17 00:00:00 2001 From: fluidvanadium Date: Fri, 9 Jan 2026 19:04:35 +0000 Subject: [PATCH 034/114] removed dbg --- zaino-state/src/chain_index.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index 12fe201c2..2557082ad 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -799,7 +799,6 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Date: Fri, 9 Jan 2026 19:34:39 +0000 Subject: [PATCH 035/114] Added explanation of a function. --- zaino-state/src/chain_index.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index 2557082ad..c2808e55f 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -576,6 +576,13 @@ impl NodeBackedChainIndexSubscriber { .transpose() } + /** + Searches finalized and non-finalized chains for any blocks containing the transaction. + Ordered with non-finalized first. + Warning: there might be multiple blocks containing the transaction. + In one case, diverging non-finalized chains might each confirm the transaction. + An uncertain case is if there is a gap that would allow a chain to confirm a block into finalized state, but this function is called before the invalidated chain is removed from the ``NonfinalizedBlockCacheSnapshot``. + */ async fn blocks_containing_transaction<'snapshot, 'self_lt, 'iter>( &'self_lt self, snapshot: &'snapshot NonfinalizedBlockCacheSnapshot, From 0abfaea8f31ee3b48214a422641ed28d79853ed0 Mon Sep 17 00:00:00 2001 From: fluidvanadium Date: Fri, 9 Jan 2026 19:39:24 +0000 Subject: [PATCH 036/114] Swapped order of blocks in blocks_containing_transaction to prioritize finalized. --- zaino-state/src/chain_index.rs | 46 ++++++++++++++++------------------ 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index c2808e55f..c0d4cd3c8 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -578,7 +578,8 @@ impl NodeBackedChainIndexSubscriber { /** Searches finalized and non-finalized chains for any blocks containing the transaction. - Ordered with non-finalized first. + Ordered with finalized blocks first. + Warning: there might be multiple blocks containing the transaction. In one case, diverging non-finalized chains might each confirm the transaction. An uncertain case is if there is a gap that would allow a chain to confirm a block into finalized state, but this function is called before the invalidated chain is removed from the ``NonfinalizedBlockCacheSnapshot``. @@ -592,35 +593,32 @@ impl NodeBackedChainIndexSubscriber { 'snapshot: 'iter, 'self_lt: 'iter, { - Ok(snapshot - .blocks - .values() - .filter_map(move |block| { + let finalized_blocks_containing_transaction = match self + .finalized_state + .get_tx_location(&types::TransactionHash(txid)) + .await? + { + Some(tx_location) => { + self.finalized_state + .get_chain_block(crate::Height(tx_location.block_height())) + .await? + } + + None => None, + } + .into_iter(); + let non_finalized_blocks_containing_transaction = + snapshot.blocks.values().filter_map(move |block| { block.transactions().iter().find_map(|transaction| { if transaction.txid().0 == txid { - Some(block) + Some(block.clone()) } else { None } }) - }) - .cloned() - .chain( - match self - .finalized_state - .get_tx_location(&types::TransactionHash(txid)) - .await? - { - Some(tx_location) => { - self.finalized_state - .get_chain_block(crate::Height(tx_location.block_height())) - .await? - } - - None => None, - } - .into_iter(), - )) + }); + Ok(finalized_blocks_containing_transaction + .chain(non_finalized_blocks_containing_transaction)) } } From 185a8e4680b55cd8f68ba94d432e3b97053e5397 Mon Sep 17 00:00:00 2001 From: fluidvanadium Date: Fri, 9 Jan 2026 19:44:25 +0000 Subject: [PATCH 037/114] Added clarifying comment. --- zaino-state/src/chain_index.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index c0d4cd3c8..9f724d5a2 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -838,6 +838,7 @@ impl ChainIndex for NodeBackedChainIndexSubscriber = From b50ea7d5175fff39216db4b93a5252d4e93b1ed4 Mon Sep 17 00:00:00 2001 From: fluidvanadium Date: Fri, 9 Jan 2026 20:04:18 +0000 Subject: [PATCH 038/114] Added some comments to a function. They contain -? because I think they need to be answered in the comments right there, but I dont know the answers. --- zaino-state/src/chain_index.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index 9f724d5a2..97d84fce4 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -820,8 +820,8 @@ impl ChainIndex for NodeBackedChainIndexSubscriber ChainIndex for NodeBackedChainIndexSubscriber Date: Fri, 9 Jan 2026 20:10:06 +0000 Subject: [PATCH 039/114] Clarifying comment. --- zaino-state/src/chain_index.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index 97d84fce4..34534dc96 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -882,6 +882,7 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Date: Mon, 12 Jan 2026 19:17:18 +0000 Subject: [PATCH 040/114] clarify comment --- zaino-state/src/chain_index.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index 34534dc96..eabffcd08 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -582,7 +582,7 @@ impl NodeBackedChainIndexSubscriber { Warning: there might be multiple blocks containing the transaction. In one case, diverging non-finalized chains might each confirm the transaction. - An uncertain case is if there is a gap that would allow a chain to confirm a block into finalized state, but this function is called before the invalidated chain is removed from the ``NonfinalizedBlockCacheSnapshot``. + An uncertain case: If a transaction, which is already on a NonBest chain, becomes Finalized, it might show up in both places in a single return of this function. */ async fn blocks_containing_transaction<'snapshot, 'self_lt, 'iter>( &'self_lt self, From c555901b620ada82afd3faa7d823d5ce56181058 Mon Sep 17 00:00:00 2001 From: fluidvanadium Date: Mon, 12 Jan 2026 19:22:35 +0000 Subject: [PATCH 041/114] remove debugs --- zaino-state/src/chain_index.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index eabffcd08..c16958d18 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -801,8 +801,7 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Date: Mon, 12 Jan 2026 19:38:16 +0000 Subject: [PATCH 042/114] Clarified comments. --- zaino-state/src/chain_index.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index c16958d18..90eeed8d8 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -871,11 +871,11 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Date: Wed, 14 Jan 2026 16:54:38 +0000 Subject: [PATCH 043/114] updated finalised state documentation --- .../src/chain_index/finalised_state.rs | 376 +++++++++++++-- .../chain_index/finalised_state/capability.rs | 454 +++++++++++++++--- .../src/chain_index/finalised_state/db.rs | 144 +++++- .../src/chain_index/finalised_state/db/v0.rs | 249 ++++++++-- .../src/chain_index/finalised_state/db/v1.rs | 282 +++++++++-- .../src/chain_index/finalised_state/entry.rs | 193 ++++++-- .../chain_index/finalised_state/migrations.rs | 211 +++++++- .../src/chain_index/finalised_state/reader.rs | 89 +++- .../src/chain_index/finalised_state/router.rs | 252 +++++++++- 9 files changed, 1990 insertions(+), 260 deletions(-) diff --git a/zaino-state/src/chain_index/finalised_state.rs b/zaino-state/src/chain_index/finalised_state.rs index ab7466794..c1aa3d297 100644 --- a/zaino-state/src/chain_index/finalised_state.rs +++ b/zaino-state/src/chain_index/finalised_state.rs @@ -1,4 +1,171 @@ -//! Holds the Finalised portion of the chain index on disk. +//! Finalised ChainIndex database (ZainoDB) +//! +//! This module provides `ZainoDB`, the **on-disk** backing store for the *finalised* portion of the +//! chain index. +//! +//! “Finalised” in this context means: All but the top 100 blocks in the blockchain. This follows +//! Zebra's model where a reorg of depth greater than 100 would require a complete network restart. +//! +//! `ZainoDB` is a facade around a versioned LMDB-backed database implementation. It is responsible +//! for: +//! - opening or creating the correct on-disk database version, +//! - coordinating **database version migrations** when the on-disk version is older than the configured +//! target, +//! - exposing a small set of core read/write operations to the rest of `chain_index`, +//! - and providing a read-only handle (`DbReader`) that should be used for all chain fetches. +//! +//! # Code layout (submodules) +//! +//! The finalised-state subsystem is split into the following files: +//! +//! - [`capability`] +//! - Defines the *capability model* used to represent which features a given DB version supports. +//! - Defines the core DB traits (`DbRead`, `DbWrite`, `DbCore`) and extension traits +//! (`BlockCoreExt`, `TransparentHistExt`, etc.). +//! - Defines versioned metadata (`DbMetadata`, `DbVersion`, `MigrationStatus`) persisted on disk. +//! +//! - [`db`] +//! - Houses concrete DB implementations by **major** version (`db::v0`, `db::v1`) and the +//! version-erased facade enum [`db::DbBackend`] that implements the capability traits. +//! +//! - [`router`] +//! - Implements [`router::Router`], a capability router that can direct calls to either the +//! primary DB or a shadow DB during major migrations. +//! +//! - [`migrations`] +//! - Implements migration orchestration (`MigrationManager`) and concrete migration steps. +//! +//! - [`reader`] +//! - Defines [`reader::DbReader`], a read-only view that routes each query through the router +//! using the appropriate capability request. +//! +//! - [`entry`] +//! - Defines integrity-preserving wrappers (`StoredEntryFixed`, `StoredEntryVar`) used by +//! versioned DB implementations for checksummed key/value storage. +//! +//! # Architecture overview +//! +//! At runtime the layering is: +//! +//! ```text +//! ZainoDB (facade; owns config; exposes simple methods) +//! └─ Router (capability-based routing; primary + optional shadow) +//! └─ DbBackend (enum; V0 / V1; implements core + extension traits) +//! ├─ db::v0::DbV0 (legacy schema; compact-block streamer) +//! └─ db::v1::DbV1 (current schema; full indices incl. transparent history indexing) +//! ``` +//! +//! Consumers should avoid depending on the concrete DB version; they should prefer `DbReader`, +//! which automatically routes each read to a backend that actually supports the requested feature. +//! +//! # Database types and serialization strategy +//! +//! The finalised database stores **only** types that are explicitly designed for persistence. +//! Concretely, values written into LMDB are composed from the database-serializable types in +//! [`crate::chain_index::types::db`] (re-exported via [`crate::chain_index::types`]). +//! +//! All persisted types implement [`crate::chain_index::encoding::ZainoVersionedSerde`], which +//! defines Zaino’s on-disk wire format: +//! - a **one-byte version tag** (`encoding::version::V1`, `V2`, …), +//! - followed by a version-specific body (little-endian unless stated otherwise). +//! +//! This “version-tagged value” model allows individual record layouts to evolve while keeping +//! backward compatibility via `decode_vN` implementations. Any incompatible change to persisted +//! types must be coordinated with the database schema versioning in this module (see +//! [`capability::DbVersion`]) and, where required, accompanied by a migration (see [`migrations`]). +//! +//! Database implementations additionally use the integrity wrappers in [`entry`] to store values +//! with a BLAKE2b-256 checksum bound to the encoded key (`key || encoded_value`), providing early +//! detection of corruption or key/value mismatches. +//! +//! # On-disk layout and version detection +//! +//! Database discovery is intentionally conservative: `try_find_current_db_version` returns the +//! **oldest** detected version, because the process may have been terminated mid-migration, leaving +//! multiple version directories on disk. +//! +//! The current logic recognises two layouts: +//! +//! - **Legacy v0 layout:** network directories `live/`, `test/`, `local/` containing LMDB +//! `data.mdb` + `lock.mdb`. +//! - **Versioned v1+ layout:** network directories `mainnet/`, `testnet/`, `regtest/` containing +//! version subdirectories enumerated by [`db::VERSION_DIRS`] (e.g. `v1/`). +//! +//! # Versioning and migration strategy +//! +//! `ZainoDB::spawn` selects a **target version** from `BlockCacheConfig::db_version` and compares it +//! against the **current on-disk version** read from `DbMetadata`. +//! +//! - If no database exists, a new DB is created at the configured target version. +//! - If a database exists and `current_version < target_version`, the [`migrations::MigrationManager`] +//! is invoked to migrate the database. +//! +//! Major migrations are designed to be low-downtime and disk-conscious: +//! - a *shadow* DB of the new version is built in parallel, +//! - the router continues serving from the primary DB until the shadow is complete, +//! - then the shadow is promoted to primary, and the old DB is deleted once all handles are dropped. +//! +//! Migration progress is tracked via `DbMetadata::migration_status` (see [`capability::MigrationStatus`]) +//! to support resumption after crashes. +//! +//! **Downgrades are not supported.** If a higher version exists on disk than the configured target, +//! the code currently opens the on-disk DB as-is; do not rely on “forcing” an older version via +//! config. +//! +//! # Core API and invariants +//! +//! `ZainoDB` provides: +//! +//! - Lifecycle: +//! - [`ZainoDB::spawn`], [`ZainoDB::shutdown`], [`ZainoDB::status`], [`ZainoDB::wait_until_ready`] +//! +//! - Writes: +//! - [`ZainoDB::write_block`]: append-only; **must** write `db_tip + 1` +//! - [`ZainoDB::delete_block_at_height`]/[`ZainoDB::delete_block`]: pop-only; **must** delete tip +//! - [`ZainoDB::sync_to_height`]: convenience sync loop that fetches blocks from a `BlockchainSource` +//! +//! - Reads: +//! - `db_height`, `get_block_height`, `get_block_hash`, `get_metadata` +//! +//! **Write invariants** matter for correctness across all DB versions: +//! - `write_block` must be called in strictly increasing height order and must not skip heights. +//! - `delete_block*` must only remove the current tip, and must keep all secondary indices consistent. +//! +//! # Usage (recommended pattern) +//! +//! - Construct the DB once at startup. +//! - Await readiness. +//! - Hand out `DbReader` handles for all read/query operations. +//! +//! ```rust,no_run +//! use std::sync::Arc; +//! +//! let db = Arc::new(crate::chain_index::finalised_state::ZainoDB::spawn(cfg, source).await?); +//! db.wait_until_ready().await; +//! +//! let reader = db.to_reader(); +//! let tip = reader.db_height().await?; +//! ``` +//! +//! # Development: extending the finalised DB safely +//! +//! Common tasks and where they belong: +//! +//! - **Add a new query/index:** implement it in the latest DB version (e.g. `db::v1`), then expose it +//! via a capability extension trait in [`capability`], route it via [`reader`], and gate it via +//! `Capability` / `DbVersion::capability`. +//! +//! - **Add a new DB major version (v2):** +//! 1. Add `db::v2` module and `DbV2` implementation. +//! 2. Extend [`db::DbBackend`] with a `V2(DbV2)` variant and delegate trait impls. +//! 3. Append `"v2"` to [`db::VERSION_DIRS`] (no gaps; order matters for discovery). +//! 4. Extend `ZainoDB::spawn` config mapping to accept `cfg.db_version == 2`. +//! 5. Update [`capability::DbVersion::capability`] for `(2, 0)`. +//! 6. Add a migration step in [`migrations`] and register it in `MigrationManager::get_migration`. +//! +//! - **Change an on-disk encoding:** treat it as a schema change. Either implement a migration or +//! bump the DB major version and rebuild in shadow. +//! // TODO / FIX - REMOVE THIS ONCE CHAININDEX LANDS! #![allow(dead_code)] @@ -30,17 +197,75 @@ use tokio::time::{interval, MissedTickBehavior}; use super::source::BlockchainSource; +/// Handle to the finalised on-disk chain index. +/// +/// `ZainoDB` is the owner-facing facade for the finalised portion of the ChainIndex: +/// - it opens or creates the appropriate on-disk database version, +/// - it coordinates migrations when `current_version < target_version`, +/// - and it exposes a small set of lifecycle, write, and core read methods. +/// +/// ## Concurrency model +/// Internally, `ZainoDB` holds an [`Arc`] to a [`Router`]. The router provides lock-free routing +/// between a primary database and (during major migrations) an optional shadow database. +/// +/// Query paths should not call `ZainoDB` methods directly. Instead, construct a [`DbReader`] using +/// [`ZainoDB::to_reader`] and perform all reads via that read-only API. This ensures capability- +/// correct routing (especially during migrations). +/// +/// ## Configuration +/// `ZainoDB` stores the [`BlockCacheConfig`] used to: +/// - determine network-specific on-disk paths, +/// - select a target database version (`cfg.db_version`), +/// - and compute per-block metadata (e.g., network selection for `BlockMetadata`). pub(crate) struct ZainoDB { + // Capability router for the active database backend(s). + /// + /// - In steady state, all requests route to the primary backend. + /// - During a major migration, some or all capabilities may route to a shadow backend until + /// promotion completes. db: Arc, + + /// Immutable configuration snapshot used for sync and metadata construction. cfg: BlockCacheConfig, } +/// Lifecycle, migration control, and core read/write API for the finalised database. +/// +/// This `impl` intentionally stays small and policy heavy: +/// - version selection and migration orchestration lives in [`ZainoDB::spawn`], +/// - the storage engine details are encapsulated behind [`DbBackend`] and the capability traits, +/// - higher-level query routing is provided by [`DbReader`]. impl ZainoDB { // ***** DB control ***** - /// Spawns a ZainoDB, opens an existing database if a path is given in the config else creates a new db. + /// Spawns a `ZainoDB` instance. + /// + /// This method: + /// 1. Detects the on-disk database version (if any) using [`ZainoDB::try_find_current_db_version`]. + /// 2. Selects a target schema version from `cfg.db_version`. + /// 3. Opens the existing database at the detected version, or creates a new database at the + /// target version. + /// 4. If an existing database is older than the target (`current_version < target_version`), + /// runs migrations using [`migrations::MigrationManager`]. + /// + /// ## Version selection rules + /// - `cfg.db_version == 0` targets `DbVersion { 0, 0, 0 }` (legacy layout). + /// - `cfg.db_version == 1` targets `DbVersion { 1, 0, 0 }` (current layout). + /// - Any other value returns an error. /// - /// Peeks at the db metadata store to load correct database version. + /// ## Migrations + /// Migrations are invoked only when a database already exists on disk and the opened database + /// reports a lower version than the configured target. + /// + /// Migrations may require access to chain data to rebuild indices. For that reason, a + /// [`BlockchainSource`] is provided here and passed into the migration manager. + /// + /// ## Errors + /// Returns [`FinalisedStateError`] if: + /// - the configured target version is unsupported, + /// - the on-disk database version is unsupported, + /// - opening or creating the database fails, + /// - or any migration step fails. pub(crate) async fn spawn( cfg: BlockCacheConfig, source: T, @@ -116,17 +341,35 @@ impl ZainoDB { Ok(Self { db: router, cfg }) } - /// Gracefully shuts down the running ZainoDB, closing all child processes. + /// Gracefully shuts down the running database backend(s). + /// + /// This delegates to the router, which shuts down: + /// - the primary backend, and + /// - any shadow backend currently present (during migrations). + /// + /// After this call returns `Ok(())`, database files may still remain on disk; shutdown does not + /// delete data. (Deletion of old versions is handled by migrations when applicable.) pub(crate) async fn shutdown(&self) -> Result<(), FinalisedStateError> { self.db.shutdown().await } - /// Returns the status of the running ZainoDB. + /// Returns the runtime status of the serving database. + /// + /// This status is provided by the backend implementing [`capability::DbCore::status`]. During + /// migrations, the router determines which backend serves `READ_CORE`, and the status reflects + /// that routing decision. pub(crate) fn status(&self) -> StatusType { self.db.status() } - /// Waits until the ZainoDB returns a Ready status. + /// Waits until the database reports [`StatusType::Ready`]. + /// + /// This polls the router at a fixed interval (100ms) using a Tokio timer. The polling loop uses + /// `MissedTickBehavior::Delay` to avoid catch-up bursts under load or when the runtime is + /// stalled. + /// + /// Call this after [`ZainoDB::spawn`] if downstream services require the database to be fully + /// initialised before handling requests. pub(crate) async fn wait_until_ready(&self) { let mut ticker = interval(Duration::from_millis(100)); ticker.set_missed_tick_behavior(MissedTickBehavior::Delay); @@ -138,21 +381,38 @@ impl ZainoDB { } } - /// Creates a read-only viewer onto the running ZainoDB. + /// Creates a read-only view onto the running database. /// - /// NOTE: **ALL** chain fetch should use DbReader instead of directly using ZainoDB. + /// All chain fetches should be performed through [`DbReader`] rather than calling read methods + /// directly on `ZainoDB`. pub(crate) fn to_reader(self: &Arc) -> DbReader { DbReader { inner: Arc::clone(self), } } - /// Look for known dirs to find current db version. + /// Attempts to detect the current on-disk database version from the filesystem layout. + /// + /// The detection is intentionally conservative: it returns the **oldest** detected version, + /// because the process may have been terminated mid-migration, leaving both an older primary + /// and a newer shadow directory on disk. + /// + /// ## Recognised layouts + /// + /// - **Legacy v0 layout** + /// - Network directories: `live/`, `test/`, `local/` + /// - Presence check: both `data.mdb` and `lock.mdb` exist + /// - Reported version: `Some(0)` /// - /// The oldest version is returned as the database may have been closed mid migration. + /// - **Versioned v1+ layout** + /// - Network directories: `mainnet/`, `testnet/`, `regtest/` + /// - Version subdirectories: enumerated by [`db::VERSION_DIRS`] (e.g. `"v1"`) + /// - Presence check: both `data.mdb` and `lock.mdb` exist within a version directory + /// - Reported version: `Some(i + 1)` where `i` is the index in `VERSION_DIRS` /// - /// * `Some(version)` – DB exists, version returned. - /// * `None` – directory or key is missing -> fresh DB. + /// Returns: + /// - `Some(version)` if a compatible database directory is found, + /// - `None` if no database is detected (fresh DB creation case). async fn try_find_current_db_version(cfg: &BlockCacheConfig) -> Option { let legacy_dir = match cfg.network.to_zebra_network().kind() { NetworkKind::Mainnet => "live", @@ -185,9 +445,15 @@ impl ZainoDB { None } - /// Returns the internal db backend for the given db capability. + /// Returns the database backend that should serve the requested capability. /// - /// Used by DbReader to route calls to the correct database during major migrations. + /// This is used by [`DbReader`] to route calls to the correct database during major migrations. + /// The router may return either the primary or shadow backend depending on the current routing + /// masks. + /// + /// ## Errors + /// Returns [`FinalisedStateError::FeatureUnavailable`] if neither backend currently serves the + /// requested capability. #[inline] pub(crate) fn backend_for_cap( &self, @@ -198,7 +464,32 @@ impl ZainoDB { // ***** Db Core Write ***** - /// Sync the database to the given height using the given BlockchainSource. + /// Sync the database up to and including `height` using a [`BlockchainSource`]. + /// + /// This method is a convenience ingestion loop that: + /// - determines the current database tip height, + /// - fetches each missing block from the source, + /// - fetches Sapling and Orchard commitment tree roots for each block, + /// - constructs [`BlockMetadata`] and an [`IndexedBlock`], + /// - and appends the block via [`ZainoDB::write_block`]. + /// + /// ## Chainwork handling + /// For database versions that expose [`capability::BlockCoreExt`], chainwork is retrieved from + /// stored header data and threaded through `BlockMetadata`. + /// + /// Legacy v0 databases do not expose header/chainwork APIs; in that case, chainwork is set to + /// zero. This is safe only insofar as v0 consumers do not rely on chainwork-dependent features. + /// + /// ## Invariants + /// - Blocks are written strictly in height order. + /// - This method assumes the source provides consistent block and commitment tree data. + /// + /// ## Errors + /// Returns [`FinalisedStateError`] if: + /// - a block is missing from the source at a required height, + /// - commitment tree roots are missing for Sapling or Orchard, + /// - constructing an [`IndexedBlock`] fails, + /// - or any underlying database write fails. pub(crate) async fn sync_to_height( &self, height: Height, @@ -300,20 +591,27 @@ impl ZainoDB { Ok(()) } - /// Writes a block to the database. + /// Appends a single fully constructed [`IndexedBlock`] to the database. + /// + /// This **must** be the next block after the current database tip (`db_tip_height + 1`). + /// Database implementations may assume append-only semantics to maintain secondary index + /// consistency. /// - /// This **MUST** be the *next* block in the chain (db_tip_height + 1). + /// For reorg handling, callers should delete tip blocks using [`ZainoDB::delete_block_at_height`] + /// or [`ZainoDB::delete_block`] before re-appending. pub(crate) async fn write_block(&self, b: IndexedBlock) -> Result<(), FinalisedStateError> { self.db.write_block(b).await } - /// Deletes a block from the database by height. + /// Deletes the block at height `h` from the database. /// - /// This **MUST** be the *top* block in the db. + /// This **must** be the current database tip. Deleting non-tip blocks is not supported because + /// it would require re-writing dependent indices for all higher blocks. /// - /// Uses `delete_block` internally, fails if the block to be deleted cannot be correctly built. - /// If this happens, the block to be deleted must be fetched from the validator and given to `delete_block` - /// to ensure the block has been completely wiped from the database. + /// This method delegates to the backend’s `delete_block_at_height` implementation. If that + /// deletion cannot be completed correctly (for example, if the backend cannot reconstruct all + /// derived index entries needed for deletion), callers must fall back to [`ZainoDB::delete_block`] + /// using an [`IndexedBlock`] fetched from the validator/source to ensure a complete wipe. pub(crate) async fn delete_block_at_height( &self, h: Height, @@ -321,21 +619,33 @@ impl ZainoDB { self.db.delete_block_at_height(h).await } - /// Deletes a given block from the database. + /// Deletes the provided block from the database. /// - /// This **MUST** be the *top* block in the db. + /// This **must** be the current database tip. The provided [`IndexedBlock`] is used to ensure + /// all derived indices created by that block can be removed deterministically. + /// + /// Prefer [`ZainoDB::delete_block_at_height`] when possible; use this method when the backend + /// requires full block contents to correctly reverse all indices. pub(crate) async fn delete_block(&self, b: &IndexedBlock) -> Result<(), FinalisedStateError> { self.db.delete_block(b).await } // ***** DB Core Read ***** - /// Returns the highest block height held in the database. + /// Returns the highest block height stored in the finalised database. + /// + /// Returns: + /// - `Ok(Some(height))` if at least one block is present, + /// - `Ok(None)` if the database is empty. pub(crate) async fn db_height(&self) -> Result, FinalisedStateError> { self.db.db_height().await } - /// Returns the block height for the given block hash *if* present in the finalised state. + /// Returns the main-chain height for `hash` if the block is present in the finalised database. + /// + /// Returns: + /// - `Ok(Some(height))` if the hash is indexed, + /// - `Ok(None)` if the hash is not present (not an error). pub(crate) async fn get_block_height( &self, hash: BlockHash, @@ -343,7 +653,11 @@ impl ZainoDB { self.db.get_block_height(hash).await } - /// Returns the block block hash for the given block height *if* present in the finlaised state. + /// Returns the main-chain block hash for `height` if the block is present in the finalised database. + /// + /// Returns: + /// - `Ok(Some(hash))` if the height is indexed, + /// - `Ok(None)` if the height is not present (not an error). pub(crate) async fn get_block_hash( &self, height: Height, @@ -351,11 +665,17 @@ impl ZainoDB { self.db.get_block_hash(height).await } - /// Returns metadata for the running ZainoDB. + /// Returns the persisted database metadata. + /// + /// See [`capability::DbMetadata`] for the precise fields and on-disk encoding. pub(crate) async fn get_metadata(&self) -> Result { self.db.get_metadata().await } + /// Returns the internal router (test-only). + /// + /// This is intended for unit/integration tests that need to observe or manipulate routing state + /// during migrations. Production code should not depend on the router directly. #[cfg(test)] pub(crate) fn router(&self) -> &Router { &self.db diff --git a/zaino-state/src/chain_index/finalised_state/capability.rs b/zaino-state/src/chain_index/finalised_state/capability.rs index 8318b3a73..f85e48bfd 100644 --- a/zaino-state/src/chain_index/finalised_state/capability.rs +++ b/zaino-state/src/chain_index/finalised_state/capability.rs @@ -1,4 +1,79 @@ -//! Holds ZainoDB capability traits and bitmaps. +//! Capability model, versioned metadata, and DB trait surface +//! +//! This file defines the **capability- and version-aware interface** that all `ZainoDB` database +//! implementations must conform to. +//! +//! The core idea is: +//! - Each concrete DB major version (e.g. `DbV0`, `DbV1`) implements a common set of traits. +//! - A `Capability` bitmap declares which parts of that trait surface are actually supported. +//! - The router (`Router`) and reader (`DbReader`) use *single-feature* requests +//! (`CapabilityRequest`) to route a call to a backend that is guaranteed to support it. +//! +//! This design enables: +//! - running mixed-version configurations during major migrations (primary + shadow), +//! - serving old data while building new indices, +//! - and gating API features cleanly when a backend does not support an extension. +//! +//! # What’s in this file +//! +//! ## Capability / routing types +//! - [`Capability`]: bitflags describing what an *open* database instance can serve. +//! - [`CapabilityRequest`]: a single-feature request (non-composite) used for routing. +//! +//! ## Versioned metadata +//! - [`DbVersion`]: schema version triple (major/minor/patch) plus a mapping to supported capabilities. +//! - [`DbMetadata`]: persisted singleton stored under the fixed key `"metadata"` in the LMDB +//! metadata database; includes: +//! - `version: DbVersion` +//! - `schema_hash: [u8; 32]` (BLAKE2b-256 of schema definition/contract) +//! - `migration_status: MigrationStatus` +//! - [`MigrationStatus`]: persisted migration progress marker to support resuming after shutdown. +//! +//! All metadata types in this file implement `ZainoVersionedSerde` and therefore have explicit +//! on-disk encoding versions. +//! +//! ## Trait surface +//! This file defines: +//! +//! - **Core traits** implemented by every DB version: +//! - [`DbRead`], [`DbWrite`], and [`DbCore`] +//! +//! - **Extension traits** implemented by *some* versions: +//! - [`BlockCoreExt`], [`BlockTransparentExt`], [`BlockShieldedExt`] +//! - [`CompactBlockExt`] +//! - [`IndexedBlockExt`] +//! - [`TransparentHistExt`] +//! +//! Extension traits must be capability-gated: if a DB does not advertise the corresponding capability +//! bit, routing must not hand that backend out for that request. +//! +//! # Versioning strategy (practical guidance) +//! +//! - `DbVersion::major` is the primary compatibility boundary: +//! - v0 is a legacy compact-block streamer. +//! - v1 adds richer indices (chain block data + transparent history). +//! +//! - `minor`/`patch` can be used for additive or compatible changes, but only if on-disk encodings +//! remain readable and all invariants remain satisfied. +//! +//! - `DbVersion::capability()` must remain conservative: +//! - only advertise capabilities that are fully correct for that on-disk schema. +//! +//! # Development: adding or changing features safely +//! +//! When adding a new feature/query that requires new persistent data: +//! +//! 1. Add a new capability bit to [`Capability`]. +//! 2. Add a corresponding variant to [`CapabilityRequest`] and map it in: +//! - `as_capability()` +//! - `name()` +//! 3. Add a new extension trait (or extend an existing one) that expresses the required operations. +//! 4. Implement the extension trait for the latest DB version(s). +//! 5. Update `DbVersion::capability()` for the version(s) that support it. +//! 6. Route it through `DbReader` by requesting the new `CapabilityRequest`. +//! +//! When changing persisted metadata formats, bump the `ZainoVersionedSerde::VERSION` for that type +//! and provide a decoding path in `decode_latest()`. use core::fmt; @@ -18,39 +93,73 @@ use core2::io::{self, Read, Write}; // ***** Capability definition structs ***** bitflags! { - /// Represents what an **open** ZainoDB can provide. + /// Capability bitmap describing what an **open** database instance can serve. /// - /// The façade (`ZainoDB`) sets these flags **once** at open-time from the - /// on-disk `SchemaVersion`, then consults them to decide which helper - /// (`writer()`, `block_core()`, …) it may expose. + /// A capability is an *implementation promise*: if a backend advertises a capability bit, then + /// the corresponding trait surface must be fully and correctly implemented for that backend’s + /// on-disk schema. /// - /// Each flag corresponds 1-for-1 with an extension trait. + /// ## How capabilities are used + /// - [`DbVersion::capability`] maps a persisted schema version to a conservative capability set. + /// - [`crate::chain_index::finalised_state::router::Router`] holds a primary and optional shadow + /// backend and uses masks to decide which backend may serve a given feature. + /// - [`crate::chain_index::finalised_state::reader::DbReader`] requests capabilities via + /// [`CapabilityRequest`] (single-feature requests) and therefore obtains a backend that is + /// guaranteed to support the requested operation. + /// + /// ## Extension trait mapping + /// Each bit corresponds 1-for-1 with a trait surface: + /// - `READ_CORE` / `WRITE_CORE` correspond to [`DbRead`] / [`DbWrite`] + /// - all other bits correspond to extension traits (e.g. [`BlockCoreExt`], [`TransparentHistExt`]) #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash, Default)] pub(crate) struct Capability: u32 { /* ------ core database functionality ------ */ - /// Implements `DbRead`. + + /// Backend implements [`DbRead`]. + /// + /// This includes: + /// - tip height (`db_height`) + /// - hash↔height lookups + /// - reading the persisted metadata singleton. const READ_CORE = 0b0000_0001; - /// Implements `DbWrite`. + + /// Backend implements [`DbWrite`]. + /// + /// This includes: + /// - appending tip blocks, + /// - deleting tip blocks, + /// - and updating the metadata singleton. const WRITE_CORE = 0b0000_0010; /* ---------- database extensions ---------- */ - /// Implements `BlockCoreExt`. + + /// Backend implements [`BlockCoreExt`] (header/txid and tx-index lookups). const BLOCK_CORE_EXT = 0b0000_0100; - /// Implements `BlockTransparentExt`. + + /// Backend implements [`BlockTransparentExt`] (transparent per-block/per-tx data). const BLOCK_TRANSPARENT_EXT = 0b0000_1000; - /// Implements `BlockShieldedExt`. + + /// Backend implements [`BlockShieldedExt`] (sapling/orchard per-block/per-tx data). const BLOCK_SHIELDED_EXT = 0b0001_0000; - /// Implements `CompactBlockExt`. + + /// Backend implements [`CompactBlockExt`] (CompactBlock materialization). const COMPACT_BLOCK_EXT = 0b0010_0000; - /// Implements `IndexedBlockExt`. + + /// Backend implements [`IndexedBlockExt`] (full `IndexedBlock` materialization). const CHAIN_BLOCK_EXT = 0b0100_0000; - /// Implements `TransparentHistExt`. + + /// Backend implements [`TransparentHistExt`] (transparent address history indices). const TRANSPARENT_HIST_EXT = 0b1000_0000; } } impl Capability { - /// All features supported by a **fresh v1** database. + /// Capability set supported by a **fresh** database at the latest major schema supported by this build. + /// + /// This value is used as the “expected modern baseline” for new DB instances. It must remain in + /// sync with: + /// - the latest on-disk schema (`DbV1` today, `DbV2` in the future), + /// - and [`DbVersion::capability`] for that schema. pub(crate) const LATEST: Capability = Capability::READ_CORE .union(Capability::WRITE_CORE) .union(Capability::BLOCK_CORE_EXT) @@ -60,28 +169,56 @@ impl Capability { .union(Capability::CHAIN_BLOCK_EXT) .union(Capability::TRANSPARENT_HIST_EXT); - /// Checks for the given capability. + /// Returns `true` if `self` includes **all** bits from `other`. + /// + /// This is primarily used for feature gating and routing assertions. #[inline] pub(crate) const fn has(self, other: Capability) -> bool { self.contains(other) } } -// A single-feature request type (cannot be composite). +/// A *single-feature* capability request used for routing. +/// +/// `CapabilityRequest` values are intentionally non-composite: each variant maps to exactly one +/// [`Capability`] bit. This keeps routing and error reporting unambiguous. +/// +/// The router uses the request to select a backend that advertises the requested capability. +/// If no backend advertises the capability, the call must fail with +/// [`FinalisedStateError::FeatureUnavailable`]. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub(crate) enum CapabilityRequest { + /// Request the [`DbRead`] core surface. ReadCore, + + /// Request the [`DbWrite`] core surface. WriteCore, + + /// Request the [`BlockCoreExt`] extension surface. BlockCoreExt, + + /// Request the [`BlockTransparentExt`] extension surface. BlockTransparentExt, + + /// Request the [`BlockShieldedExt`] extension surface. BlockShieldedExt, + + /// Request the [`CompactBlockExt`] extension surface. CompactBlockExt, + + /// Request the [`IndexedBlockExt`] extension surface. IndexedBlockExt, + + /// Request the [`TransparentHistExt`] extension surface. TransparentHistExt, } impl CapabilityRequest { - /// Map to the corresponding single-bit `Capability`. + /// Maps this request to the corresponding single-bit [`Capability`]. + /// + /// This mapping must remain 1-for-1 with: + /// - the definitions in [`Capability`], and + /// - the human-readable names returned by [`CapabilityRequest::name`]. #[inline] pub(crate) const fn as_capability(self) -> Capability { match self { @@ -96,7 +233,10 @@ impl CapabilityRequest { } } - /// Human-friendly feature name for errors and logs. + /// Returns a stable human-friendly feature name for errors and logs. + /// + /// This value is used in [`FinalisedStateError::FeatureUnavailable`] and must remain stable + /// across refactors to avoid confusing diagnostics. #[inline] pub(crate) const fn name(self) -> &'static str { match self { @@ -112,7 +252,7 @@ impl CapabilityRequest { } } -// Optional convenience conversions. +/// Convenience conversion from a routing request to its single-bit capability. impl From for Capability { #[inline] fn from(req: CapabilityRequest) -> Self { @@ -120,22 +260,43 @@ impl From for Capability { } } -/// Top-level database metadata entry, storing the current schema version. +// ***** Database metadata structs ***** + +/// Persisted database metadata singleton. /// -/// Stored under the fixed key `"metadata"` in the LMDB metadata database. +/// This record is stored under the fixed key `"metadata"` in the LMDB metadata database and is used to: +/// - identify the schema version currently on disk, +/// - bind the database to an explicit schema contract (`schema_hash`), +/// - and persist migration progress (`migration_status`) for crash-safe resumption. +/// +/// ## Encoding +/// `DbMetadata` implements [`ZainoVersionedSerde`]. The encoded body is: +/// - one versioned [`DbVersion`], +/// - a fixed 32-byte schema hash, +/// - one versioned [`MigrationStatus`]. #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash, Default)] #[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] pub(crate) struct DbMetadata { - /// Encodes the version and schema hash. + /// Schema version triple for the on-disk database. pub(crate) version: DbVersion, - /// BLAKE2b-256 hash of the schema definition (includes struct layout, types, etc.) + + /// BLAKE2b-256 hash of the schema definition/contract. + /// + /// This hash is intended to detect accidental schema drift (layout/type changes) across builds. + /// It is not a security boundary; it is a correctness and operator-safety signal. pub(crate) schema_hash: [u8; 32], - /// Migration status of the database, `Empty` outside of migrations. + + /// Persisted migration state, used to resume safely after shutdown/crash. + /// + /// Outside of migrations this should be [`MigrationStatus::Empty`]. pub(crate) migration_status: MigrationStatus, } impl DbMetadata { - /// Creates a new DbMetadata. + /// Constructs a new metadata record. + /// + /// Callers should ensure `schema_hash` matches the schema contract for `version`, and that + /// `migration_status` is set conservatively (typically `Empty` unless actively migrating). pub(crate) fn new( version: DbVersion, schema_hash: [u8; 32], @@ -148,22 +309,28 @@ impl DbMetadata { } } - /// Returns the version data. + /// Returns the persisted schema version. pub(crate) fn version(&self) -> DbVersion { self.version } - /// Returns the version schema hash. + /// Returns the schema contract hash. pub(crate) fn schema(&self) -> [u8; 32] { self.schema_hash } - /// Returns the migration status of the database. + /// Returns the persisted migration status. pub(crate) fn migration_status(&self) -> MigrationStatus { self.migration_status } } +/// Versioned on-disk encoding for the metadata singleton. +/// +/// Body layout (after the `ZainoVersionedSerde` tag byte): +/// 1. `DbVersion` (versioned, includes its own tag) +/// 2. `[u8; 32]` schema hash +/// 3. `MigrationStatus` (versioned, includes its own tag) impl ZainoVersionedSerde for DbMetadata { const VERSION: u8 = version::V1; @@ -189,12 +356,17 @@ impl ZainoVersionedSerde for DbMetadata { } } -// DbMetadata: its body is one *versioned* DbVersion (12 + 1 tag) + 32-byte schema hash -// + one *versioned* MigrationStatus (1 + 1 tag) = 47 bytes +/// `DbMetadata` has a fixed encoded body length. +/// +/// Body length = `DbVersion::VERSIONED_LEN` (12 + 1) + 32-byte schema hash +/// + `MigrationStatus::VERSIONED_LEN` (1 + 1) = 47 bytes. impl FixedEncodedLen for DbMetadata { const ENCODED_LEN: usize = DbVersion::VERSIONED_LEN + 32 + MigrationStatus::VERSIONED_LEN; } +/// Human-readable summary for logs. +/// +/// The schema hash is abbreviated to the first 4 bytes for readability. impl core::fmt::Display for DbMetadata { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!( @@ -213,9 +385,20 @@ impl core::fmt::Display for DbMetadata { } } -/// Database schema version information. +/// Database schema version triple. +/// +/// The version is interpreted as `{major}.{minor}.{patch}` and is used to: +/// - select a database backend implementation, +/// - determine supported capabilities for routing, +/// - and enforce safe upgrades via migrations. /// -/// This is used for schema migration safety and compatibility checks. +/// ## Compatibility model +/// - `major` is the primary compatibility boundary (schema family). +/// - `minor` and `patch` may be used for compatible changes, but only if all persisted record +/// encodings remain readable and correctness invariants are preserved. +/// +/// The authoritative capability mapping is provided by [`DbVersion::capability`], and must remain +/// conservative: only advertise features that are correct for the given on-disk schema. #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash, Default)] #[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] pub(crate) struct DbVersion { @@ -228,7 +411,7 @@ pub(crate) struct DbVersion { } impl DbVersion { - /// creates a new DbVersion. + /// Construct a new DbVersion. pub(crate) fn new(major: u32, minor: u32, patch: u32) -> Self { Self { major, @@ -252,6 +435,13 @@ impl DbVersion { self.patch } + /// Returns the conservative capability set for this schema version. + /// + /// Routing relies on this mapping for safety: if a capability is not included here, callers + /// must not assume the corresponding trait surface is available. + /// + /// If a schema version is unknown to this build, this returns [`Capability::empty`], ensuring + /// the router will reject feature requests rather than serving incorrect data. pub(crate) fn capability(&self) -> Capability { match (self.major, self.minor) { // V0: legacy compact block streamer. @@ -277,6 +467,10 @@ impl DbVersion { } } +/// Versioned on-disk encoding for database versions. +/// +/// Body layout (after the tag byte): three little-endian `u32` values: +/// `major`, `minor`, `patch`. impl ZainoVersionedSerde for DbVersion { const VERSION: u8 = version::V1; @@ -302,36 +496,53 @@ impl ZainoVersionedSerde for DbVersion { } } -/* DbVersion: body = 3*(4-byte u32) - 12 bytes */ +// DbVersion: body = 3*(4-byte u32) - 12 bytes impl FixedEncodedLen for DbVersion { const ENCODED_LEN: usize = 4 + 4 + 4; } +/// Formats as `{major}.{minor}.{patch}` for logs and diagnostics. impl core::fmt::Display for DbVersion { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "{}.{}.{}", self.major, self.minor, self.patch) } } -/// Holds migration data. +/// Persisted migration progress marker. /// -/// This is used when the database is shutdown mid-migration to ensure migration correctness. +/// This value exists to make migrations crash-resumable. A migration may: +/// - build a shadow database incrementally, +/// - optionally perform partial rebuild phases to limit disk amplification, +/// - and finally promote the shadow to primary. /// -/// NOTE: Some migrations run a partial database rebuild before the final build process. -/// This is done to minimise disk requirements during migrations, -/// enabling the deletion of the old database before the the database is rebuilt in full. +/// Database implementations and the migration manager must treat this value conservatively: +/// if the process is interrupted, the next startup should be able to determine the correct +/// resumption behavior from this status and the on-disk state. #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash)] #[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] #[derive(Default)] pub(crate) enum MigrationStatus { + /// No migration is in progress. #[default] Empty, + + /// A partial build phase is currently in progress. + /// + /// Some migrations split work into phases to limit disk usage (for example, deleting the old + /// database before rebuilding the new one in full). PartialBuidInProgress, + + /// The partial build phase completed successfully. PartialBuildComplete, + + /// The final build phase is currently in progress. FinalBuildInProgress, + + /// Migration work is complete and the database is ready for promotion/steady-state operation. Complete, } +/// Human-readable migration status for logs and diagnostics. impl fmt::Display for MigrationStatus { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let status_str = match self { @@ -345,6 +556,10 @@ impl fmt::Display for MigrationStatus { } } +/// Versioned on-disk encoding for migration status. +/// +/// Body layout (after the tag byte): one `u8` discriminator. +/// Unknown tags must fail decoding. impl ZainoVersionedSerde for MigrationStatus { const VERSION: u8 = version::V1; @@ -378,67 +593,115 @@ impl ZainoVersionedSerde for MigrationStatus { } } +/// `MigrationStatus` has a fixed 1-byte encoded body (discriminator). impl FixedEncodedLen for MigrationStatus { const ENCODED_LEN: usize = 1; } // ***** Core Database functionality ***** -/// Read-only operations that *every* ZainoDB version must support. +/// Core read-only operations that *every* database schema version must support. +/// +/// These operations form the minimum required surface for: +/// - determining the chain tip stored on disk, +/// - mapping hashes to heights and vice versa, +/// - and reading the persisted schema metadata. +/// +/// All methods must be consistent with the database’s *finalised* chain view. #[async_trait] pub trait DbRead: Send + Sync { - /// Highest block height stored (or `None` if DB empty). + /// Returns the highest block height stored, or `None` if the database is empty. + /// + /// Implementations must treat the stored height as the authoritative tip for all other core + /// lookups. async fn db_height(&self) -> Result, FinalisedStateError>; - /// Lookup height of a block by its hash. + /// Returns the height for `hash` if present. + /// + /// Returns: + /// - `Ok(Some(height))` if indexed, + /// - `Ok(None)` if not present (not an error). async fn get_block_height( &self, hash: BlockHash, ) -> Result, FinalisedStateError>; - /// Lookup hash of a block by its height. + /// Returns the hash for `height` if present. + /// + /// Returns: + /// - `Ok(Some(hash))` if indexed, + /// - `Ok(None)` if not present (not an error). async fn get_block_hash( &self, height: Height, ) -> Result, FinalisedStateError>; - /// Return the persisted `DbMetadata` singleton. + /// Returns the persisted metadata singleton. + /// + /// This must reflect the schema actually used by the backend instance. async fn get_metadata(&self) -> Result; } -/// Write operations that *every* ZainoDB version must support. +/// Core write operations that *every* database schema version must support. +/// +/// The finalised database is updated using *stack semantics*: +/// - blocks are appended at the tip (`write_block`), +/// - and removed only from the tip (`delete_block_at_height` / `delete_block`). +/// +/// Implementations must keep all secondary indices internally consistent with these operations. #[async_trait] pub trait DbWrite: Send + Sync { - /// Persist a fully-validated block to the database. + /// Appends a fully-validated block to the database. + /// + /// Invariant: `block` must be the next height after the current tip (no gaps, no rewrites). async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError>; - /// Deletes a block identified height from every finalised table. + /// Deletes the tip block identified by `height` from every finalised table. + /// + /// Invariant: `height` must be the current database tip height. async fn delete_block_at_height(&self, height: Height) -> Result<(), FinalisedStateError>; - /// Wipe the given block data from every finalised table. + /// Deletes the provided tip block from every finalised table. /// - /// Takes a IndexedBlock as input and ensures all data from this block is wiped from the database. + /// This is the “full-information” deletion path: it takes an [`IndexedBlock`] so the backend + /// can deterministically remove all derived index entries even if reconstructing them from + /// height alone is not possible. /// - /// Used as a backup when delete_block_at_height fails. + /// Invariant: `block` must be the current database tip block. async fn delete_block(&self, block: &IndexedBlock) -> Result<(), FinalisedStateError>; - /// Update the metadata store with the given DbMetadata + /// Replaces the persisted metadata singleton with `metadata`. + /// + /// Implementations must ensure this update is atomic with respect to readers (within the + /// backend’s concurrency model). async fn update_metadata(&self, metadata: DbMetadata) -> Result<(), FinalisedStateError>; } -/// Core database functionality that *every* ZainoDB version must support. +/// Core runtime surface implemented by every backend instance. +/// +/// This trait binds together: +/// - the core read/write operations, and +/// - lifecycle and status reporting for background tasks. +/// +/// In practice, [`crate::chain_index::finalised_state::router::Router`] implements this by +/// delegating to the currently routed core backend(s). #[async_trait] pub trait DbCore: DbRead + DbWrite + Send + Sync { /// Returns the current runtime status (`Starting`, `Syncing`, `Ready`, …). fn status(&self) -> StatusType; - /// Stops background tasks, syncs, etc. + /// Initiates a graceful shutdown of background tasks and closes database resources. async fn shutdown(&self) -> Result<(), FinalisedStateError>; } // ***** Database Extension traits ***** -/// Core block data extension. +/// Core block indexing extension. +/// +/// This extension covers header and txid range fetches plus transaction indexing by [`TxLocation`]. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise [`Capability::BLOCK_CORE_EXT`]. #[async_trait] pub trait BlockCoreExt: Send + Sync { /// Return block header data by height. @@ -447,7 +710,9 @@ pub trait BlockCoreExt: Send + Sync { height: Height, ) -> Result; - /// Return block headers for the given height range. + /// Returns block headers for the inclusive range `[start, end]`. + /// + /// Callers should ensure `start <= end`. async fn get_block_range_headers( &self, start: Height, @@ -458,41 +723,59 @@ pub trait BlockCoreExt: Send + Sync { async fn get_block_txids(&self, height: Height) -> Result; /// Return block txids for the given height range. + /// + /// Callers should ensure `start <= end`. async fn get_block_range_txids( &self, start: Height, end: Height, ) -> Result, FinalisedStateError>; - /// Fetch the txid bytes for a given TxLocation. + /// Returns the transaction hash for the given [`TxLocation`]. + /// + /// `TxLocation` is the internal transaction index key used by the database. async fn get_txid( &self, tx_location: TxLocation, ) -> Result; - /// Fetch the TxLocation for the given txid, transaction data is indexed by TxLocation internally. + /// Returns the [`TxLocation`] for `txid` if the transaction is indexed. + /// + /// Returns: + /// - `Ok(Some(location))` if indexed, + /// - `Ok(None)` if not present (not an error). + /// + /// NOTE: transaction data is indexed by TxLocation internally. async fn get_tx_location( &self, txid: &TransactionHash, ) -> Result, FinalisedStateError>; } -/// Transparent block data extension. +/// Transparent transaction indexing extension. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::BLOCK_TRANSPARENT_EXT`]. #[async_trait] pub trait BlockTransparentExt: Send + Sync { - /// Fetch the serialized TransparentCompactTx for the given TxLocation, if present. + /// Returns the serialized [`TransparentCompactTx`] for `tx_location`, if present. + /// + /// Returns: + /// - `Ok(Some(tx))` if present, + /// - `Ok(None)` if not present (not an error). async fn get_transparent( &self, tx_location: TxLocation, ) -> Result, FinalisedStateError>; - /// Fetch block transparent transaction data by height. + /// Fetch block transparent transaction data for given block height. async fn get_block_transparent( &self, height: Height, ) -> Result; - /// Fetches block transparent tx data for the given height range. + /// Returns transparent transaction tx data for the inclusive block height range `[start, end]`. async fn get_block_range_transparent( &self, start: Height, @@ -500,7 +783,11 @@ pub trait BlockTransparentExt: Send + Sync { ) -> Result, FinalisedStateError>; } -/// Transparent block data extension. +/// Shielded transaction indexing extension (Sapling + Orchard + commitment tree data). +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::BLOCK_SHIELDED_EXT`]. #[async_trait] pub trait BlockShieldedExt: Send + Sync { /// Fetch the serialized SaplingCompactTx for the given TxLocation, if present. @@ -513,7 +800,7 @@ pub trait BlockShieldedExt: Send + Sync { async fn get_block_sapling(&self, height: Height) -> Result; - /// Fetches block sapling tx data for the given height range. + /// Fetches block sapling tx data for the given (inclusive) height range. async fn get_block_range_sapling( &self, start: Height, @@ -530,7 +817,7 @@ pub trait BlockShieldedExt: Send + Sync { async fn get_block_orchard(&self, height: Height) -> Result; - /// Fetches block orchard tx data for the given height range. + /// Fetches block orchard tx data for the given (inclusive) height range. async fn get_block_range_orchard( &self, start: Height, @@ -543,7 +830,7 @@ pub trait BlockShieldedExt: Send + Sync { height: Height, ) -> Result; - /// Fetches block commitment tree data for the given height range. + /// Fetches block commitment tree data for the given (inclusive) height range. async fn get_block_range_commitment_tree_data( &self, start: Height, @@ -551,31 +838,54 @@ pub trait BlockShieldedExt: Send + Sync { ) -> Result, FinalisedStateError>; } -/// CompactBlock extension. +/// CompactBlock materialization extension. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::COMPACT_BLOCK_EXT`]. #[async_trait] pub trait CompactBlockExt: Send + Sync { /// Returns the CompactBlock for the given Height. /// - /// TODO: Add separate range fetch method! + /// TODO: Add separate range fetch method as this method is slow for fetching large ranges! async fn get_compact_block( &self, height: Height, ) -> Result; } -/// IndexedBlock v1 extension. +/// `IndexedBlock` materialization extension. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::CHAIN_BLOCK_EXT`]. #[async_trait] pub trait IndexedBlockExt: Send + Sync { - /// Returns the IndexedBlock for the given Height. + /// Returns the [`IndexedBlock`] for `height`, if present. /// - /// TODO: Add separate range fetch method! + /// Returns: + /// - `Ok(Some(block))` if present, + /// - `Ok(None)` if not present (not an error). + /// + /// TODO: Add separate range fetch method as this method is slow for fetching large ranges! async fn get_chain_block( &self, height: Height, ) -> Result, FinalisedStateError>; } -/// IndexedBlock v1 extension. +/// Transparent address history indexing extension. +/// +/// This extension provides address-scoped queries backed by persisted indices built from the +/// transparent transaction graph (outputs, spends, and derived address events). +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::TRANSPARENT_HIST_EXT`]. +/// +/// Range semantics: +/// - Methods that accept `start_height` and `end_height` interpret the range as inclusive: +/// `[start_height, end_height]`. #[async_trait] pub trait TransparentHistExt: Send + Sync { /// Fetch all address history records for a given transparent address. diff --git a/zaino-state/src/chain_index/finalised_state/db.rs b/zaino-state/src/chain_index/finalised_state/db.rs index 6b1b84d72..57922590f 100644 --- a/zaino-state/src/chain_index/finalised_state/db.rs +++ b/zaino-state/src/chain_index/finalised_state/db.rs @@ -1,4 +1,57 @@ -//! Holds Database implementations by *major* version. +//! Versioned database backends (DbBackend) and major-version dispatch +//! +//! This file defines the major-version split for the on-disk finalised database and provides +//! [`DbBackend`], a version-erased enum used throughout the finalised-state subsystem. +//! +//! Concrete database implementations live in: +//! - [`v0`]: legacy schema (compact-block streamer) +//! - [`v1`]: current schema (expanded indices and query surface) +//! +//! `DbBackend` delegates the core DB traits (`DbCore`, `DbRead`, `DbWrite`) and all extension traits +//! to the appropriate concrete implementation. +//! +//! # Capability model integration +//! +//! Each `DbBackend` instance declares its supported [`Capability`] set via `DbBackend::capability()`. +//! This must remain consistent with: +//! - [`capability::DbVersion::capability()`] (schema version → capability mapping), and +//! - the extension trait impls in this file (unsupported methods must return `FeatureUnavailable`). +//! +//! In particular: +//! - v0 supports READ/WRITE core + `CompactBlockExt`. +//! - v1 supports the full current capability set (`Capability::LATEST`), including: +//! - block header/txid/location indexing, +//! - transparent + shielded compact tx access, +//! - indexed block retrieval, +//! - transparent address history indices. +//! +//! # On-disk directory layout (v1+) +//! +//! [`VERSION_DIRS`] enumerates the version subdirectory names used for versioned layouts under the +//! per-network directory (`mainnet/`, `testnet/`, `regtest/`). +//! +//! **Important:** new versions must be appended to `VERSION_DIRS` in order, with no gaps, because +//! discovery code assumes index+1 corresponds to the version number. +//! +//! # Adding a new major version (v2) — checklist +//! +//! 1. Create `db::v2` and implement `DbV2::spawn(cfg)`. +//! 2. Add `V2(DbV2)` variant to [`DbBackend`]. +//! 3. Add `spawn_v2` constructor. +//! 4. Append `"v2"` to [`VERSION_DIRS`]. +//! 5. Extend all trait delegation `match` arms in this file. +//! 6. Update `DbBackend::capability()` and `DbVersion::capability()` for the new version. +//! 7. Add a migration step in `migrations.rs` and register it with `MigrationManager`. +//! +//! # Development: adding new indices/queries +//! +//! Prefer implementing new indices in the latest DB version first (e.g. `v1`) and exposing them via: +//! - a capability bit + extension trait in `capability.rs`, +//! - routing via `DbReader` and `Router`, +//! - and a migration/rebuild plan if the index requires historical backfill. +//! +//! Keep unsupported methods explicit: if a DB version does not provide a feature, return +//! `FinalisedStateError::FeatureUnavailable(...)` rather than silently degrading semantics. pub(crate) mod v0; pub(crate) mod v1; @@ -27,29 +80,63 @@ use tokio::time::{interval, MissedTickBehavior}; use super::capability::Capability; -/// New versions must be also be appended to this list and there must be no missing versions for correct functionality. +/// Version subdirectory names for versioned on-disk layouts. +/// +/// This list defines the supported major-version directory names under a per-network directory. +/// For example, a v1 database is stored under `/v1/`. +/// +/// Invariants: +/// - New versions must be appended to this list in order. +/// - There must be no missing versions between entries. +/// - Discovery code assumes `VERSION_DIRS[index]` corresponds to major version `index + 1`. pub(super) const VERSION_DIRS: [&str; 1] = ["v1"]; -/// All concrete database implementations. +/// Version-erased database backend. +/// +/// This enum is the central dispatch point for the finalised-state database: +/// - It is constructed by spawning a concrete backend (for example, v0 or v1). +/// - It implements the core database traits (`DbCore`, `DbRead`, `DbWrite`). +/// - It implements capability extension traits by delegating to the concrete implementation, or by +/// returning [`FinalisedStateError::FeatureUnavailable`] when unsupported. +/// +/// Capability reporting is provided by [`DbBackend::capability`] and must match the methods that +/// successfully dispatch in the extension trait implementations below. pub(crate) enum DbBackend { + /// Legacy schema backend. V0(DbV0), + + /// Current schema backend. V1(DbV1), } // ***** Core database functionality ***** impl DbBackend { - /// Spawn a v0 database. + /// Spawn a v0 database backend. + /// + /// This constructs and initializes the legacy schema implementation and returns it wrapped in + /// [`DbBackend::V0`]. pub(crate) async fn spawn_v0(cfg: &BlockCacheConfig) -> Result { Ok(Self::V0(DbV0::spawn(cfg).await?)) } - /// Spawn a v1 database. + /// Spawn a v1 database backend. + /// + /// This constructs and initializes the current schema implementation and returns it wrapped in + /// [`DbBackend::V1`]. pub(crate) async fn spawn_v1(cfg: &BlockCacheConfig) -> Result { Ok(Self::V1(DbV1::spawn(cfg).await?)) } - /// Waits until the ZainoDB returns a Ready status. + /// Wait until the database backend reports [`StatusType::Ready`]. + /// + /// This polls `DbCore::status()` on a fixed interval. It is intended for startup sequencing in + /// components that require the database to be fully initialized before accepting requests. + /// + /// Notes: + /// - This method does not return an error. If the database never becomes ready, it will loop. + /// - The polling interval is intentionally small and uses `MissedTickBehavior::Delay` to avoid + /// burst catch-up behavior under load. pub(crate) async fn wait_until_ready(&self) { let mut ticker = interval(Duration::from_millis(100)); ticker.set_missed_tick_behavior(MissedTickBehavior::Delay); @@ -62,7 +149,10 @@ impl DbBackend { } } - /// Returns the capabilities supported by this database instance. + /// Return the capabilities supported by this database instance. + /// + /// This is the authoritative runtime capability set for this backend and must remain consistent + /// with the dispatch behavior in the extension trait implementations below. pub(crate) fn capability(&self) -> Capability { match self { Self::V0(_) => { @@ -74,12 +164,14 @@ impl DbBackend { } impl From for DbBackend { + /// Wrap an already-constructed v0 database backend. fn from(value: DbV0) -> Self { Self::V0(value) } } impl From for DbBackend { + /// Wrap an already-constructed v1 database backend. fn from(value: DbV1) -> Self { Self::V1(value) } @@ -87,14 +179,19 @@ impl From for DbBackend { #[async_trait] impl DbCore for DbBackend { + /// Return the current status of the backend. + /// + /// This is a thin delegation wrapper over the concrete implementation. fn status(&self) -> StatusType { match self { - // TODO private Self::V0(db) => db.status(), Self::V1(db) => db.status(), } } + /// Shut down the backend and release associated resources. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn shutdown(&self) -> Result<(), FinalisedStateError> { match self { Self::V0(db) => db.shutdown().await, @@ -105,6 +202,9 @@ impl DbCore for DbBackend { #[async_trait] impl DbRead for DbBackend { + /// Return the highest stored height in the database, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn db_height(&self) -> Result, FinalisedStateError> { match self { Self::V0(db) => db.db_height().await, @@ -112,6 +212,9 @@ impl DbRead for DbBackend { } } + /// Resolve a block hash to its stored height, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn get_block_height( &self, hash: BlockHash, @@ -122,6 +225,9 @@ impl DbRead for DbBackend { } } + /// Resolve a block height to its stored block hash, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn get_block_hash( &self, height: Height, @@ -132,6 +238,10 @@ impl DbRead for DbBackend { } } + /// Read the database metadata record. + /// + /// This includes versioning and migration status and is used by the migration manager and + /// compatibility checks. async fn get_metadata(&self) -> Result { match self { Self::V0(db) => db.get_metadata().await, @@ -142,6 +252,9 @@ impl DbRead for DbBackend { #[async_trait] impl DbWrite for DbBackend { + /// Write a fully-indexed block into the database. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { match self { Self::V0(db) => db.write_block(block).await, @@ -149,6 +262,9 @@ impl DbWrite for DbBackend { } } + /// Delete the block at a given height, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn delete_block_at_height(&self, height: Height) -> Result<(), FinalisedStateError> { match self { Self::V0(db) => db.delete_block_at_height(height).await, @@ -156,6 +272,9 @@ impl DbWrite for DbBackend { } } + /// Delete a specific indexed block from the database. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn delete_block(&self, block: &IndexedBlock) -> Result<(), FinalisedStateError> { match self { Self::V0(db) => db.delete_block(block).await, @@ -163,6 +282,9 @@ impl DbWrite for DbBackend { } } + /// Update the database metadata record. + /// + /// This is used by migrations and schema management logic. async fn update_metadata(&self, metadata: DbMetadata) -> Result<(), FinalisedStateError> { match self { Self::V0(db) => db.update_metadata(metadata).await, @@ -172,6 +294,12 @@ impl DbWrite for DbBackend { } // ***** Database capability extension traits ***** +// +// Each extension trait corresponds to a distinct capability group. The dispatch rules are: +// - If the backend supports the capability, delegate to the concrete implementation. +// - If unsupported, return `FinalisedStateError::FeatureUnavailable("")`. +// +// These names must remain consistent with the capability wiring in `capability.rs`. #[async_trait] impl BlockCoreExt for DbBackend { diff --git a/zaino-state/src/chain_index/finalised_state/db/v0.rs b/zaino-state/src/chain_index/finalised_state/db/v0.rs index 4552e32c2..3704dbf10 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v0.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v0.rs @@ -2,8 +2,43 @@ //! //! WARNING: This is a legacy development database and should not be used in production environments. //! -//! NOTE: This database version was implemented before zaino's `ZainoVersionedSerde` was defined, -//! for this reason ZainoDB-V0 does not use the standard serialisation schema used elswhere in Zaino. +//! This module implements the original “v0” finalised-state database backend. It exists primarily +//! for backward compatibility and for development/testing scenarios where the historical v0 +//! on-disk layout must be opened. +//! +//! ## Important constraints +//! +//! - **Not schema-versioned in the modern sense:** this database version predates Zaino’s +//! `ZainoVersionedSerde` wire format, therefore it does not store version-tagged records and does +//! not participate in fine-grained schema evolution. +//! - **Legacy encoding strategy:** +//! - keys and values are stored as JSON via `serde_json` for most types, +//! - `CompactBlock` values are encoded as raw Prost bytes via a custom `Serialize`/`Deserialize` +//! wrapper (`DbCompactBlock`) so they can still flow through `serde_json`. +//! - **Limited feature surface:** v0 only supports the core height/hash mapping and compact block +//! retrieval. It does not provide the richer indices introduced in v1 (header data, transaction +//! locations, transparent history indexing, etc.). +//! +//! ## On-disk layout +//! +//! The v0 database uses the legacy network directory names: +//! - mainnet: `live/` +//! - testnet: `test/` +//! - regtest: `local/` +//! +//! Each network directory contains an LMDB environment with (at minimum) these tables: +//! - `heights_to_hashes`: `` +//! - `hashes_to_blocks`: `` (where the compact block is stored +//! as raw Prost bytes wrapped by JSON) +//! +//! ## Runtime model +//! +//! `DbV0` spawns a lightweight background maintenance task that: +//! - publishes `StatusType::Ready` once spawned, +//! - periodically calls `clean_trailing()` to reclaim stale LMDB reader slots. +//! +//! This backend uses `tokio::task::block_in_place` / `tokio::task::spawn_blocking` around LMDB +//! operations to avoid blocking the async runtime. use crate::{ chain_index::{ @@ -35,12 +70,21 @@ use tracing::{info, warn}; // ───────────────────────── ZainoDb v0 Capabilities ───────────────────────── +/// `DbRead` implementation for the legacy v0 backend. +/// +/// Note: v0 exposes only a minimal read surface. Missing data is mapped to `Ok(None)` where the +/// core trait expects optional results. #[async_trait] impl DbRead for DbV0 { + /// Returns the database tip height (`None` if empty). async fn db_height(&self) -> Result, FinalisedStateError> { self.tip_height().await } + /// Returns the block height for a given block hash, if known. + /// + /// For v0, absence is represented as either `DataUnavailable` or `FeatureUnavailable` from the + /// legacy helper; both are mapped to `Ok(None)` here. async fn get_block_height( &self, hash: crate::BlockHash, @@ -55,6 +99,10 @@ impl DbRead for DbV0 { } } + /// Returns the block hash for a given block height, if known. + /// + /// For v0, absence is represented as either `DataUnavailable` or `FeatureUnavailable` from the + /// legacy helper; both are mapped to `Ok(None)` here. async fn get_block_hash( &self, height: crate::Height, @@ -69,17 +117,27 @@ impl DbRead for DbV0 { } } + /// Returns synthetic metadata for v0. + /// + /// v0 does not persist `DbMetadata` on disk; this returns a constructed value describing + /// version `0.0.0` and a default schema hash. async fn get_metadata(&self) -> Result { self.get_metadata().await } } +/// `DbWrite` implementation for the legacy v0 backend. +/// +/// v0 supports append-only writes and pop-only deletes at the tip, enforced by explicit checks in +/// the legacy methods. #[async_trait] impl DbWrite for DbV0 { + /// Writes a fully-validated finalised block, enforcing strict height monotonicity. async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { self.write_block(block).await } + /// Deletes a block at the given height, enforcing that it is the current tip. async fn delete_block_at_height( &self, height: crate::Height, @@ -87,22 +145,37 @@ impl DbWrite for DbV0 { self.delete_block_at_height(height).await } + /// Deletes a block by explicit content. + /// + /// This is a fallback path used when tip-based deletion cannot safely determine the full set of + /// keys to delete (for example, when corruption is suspected). async fn delete_block(&self, block: &IndexedBlock) -> Result<(), FinalisedStateError> { self.delete_block(block).await } - /// NOTE: V0 does not hold metadata! + /// Updates the metadata singleton. + /// + /// NOTE: v0 does not persist metadata on disk; this is a no-op to satisfy the trait. async fn update_metadata(&self, _metadata: DbMetadata) -> Result<(), FinalisedStateError> { Ok(()) } } +/// `DbCore` implementation for the legacy v0 backend. +/// +/// The core lifecycle API is implemented in terms of a status flag and a lightweight background +/// maintenance task. #[async_trait] impl DbCore for DbV0 { + /// Returns the current runtime status published by this backend. fn status(&self) -> StatusType { self.status.load() } + /// Requests shutdown of background tasks and syncs the LMDB environment before returning. + /// + /// This method is best-effort: background tasks are aborted after a timeout and the LMDB + /// environment is fsync’d before exit. async fn shutdown(&self) -> Result<(), FinalisedStateError> { self.status.store(StatusType::Closing); @@ -120,8 +193,12 @@ impl DbCore for DbV0 { } } +/// `CompactBlockExt` implementation for v0. +/// +/// v0’s primary purpose is serving compact blocks (as used by lightwallet protocols). #[async_trait] impl CompactBlockExt for DbV0 { + /// Fetches the compact block at the given height. async fn get_compact_block( &self, height: Height, @@ -130,34 +207,57 @@ impl CompactBlockExt for DbV0 { } } -/// Finalised part of the chain, held in an LMDB database. +/// Finalised part of the chain, held in an LMDB database (legacy v0). +/// +/// `DbV0` maintains two simple indices: +/// - height → hash +/// - hash → compact block +/// +/// It does **not** implement the richer v1 indices (header data, tx location maps, address history, +/// commitment tree tables, etc.). #[derive(Debug)] pub struct DbV0 { - /// LMDB Database Environmant. + /// LMDB database environment handle. + /// + /// The environment is shared between tasks using `Arc` and is configured for high read + /// concurrency (`max_readers`) and reduced I/O overhead (`NO_READAHEAD`). env: Arc, - /// LMDB Databas containing ``. + /// LMDB database containing ``. + /// + /// Heights are stored as 4-byte big-endian keys for correct lexicographic ordering. heights_to_hashes: Database, - /// LMDB Databas containing ``. + /// LMDB database containing ``. + /// + /// The compact block is stored via the `DbCompactBlock` wrapper: raw Prost bytes embedded in a + /// JSON payload. hashes_to_blocks: Database, - /// Database handler task handle. + /// Background maintenance task handle. + /// + /// This task periodically performs housekeeping (currently reader-slot cleanup). db_handler: Option>, - /// Non-finalised state status. + /// Backend lifecycle status. status: AtomicStatus, - /// BlockCache config data. + + /// Configuration snapshot used for path/network selection and sizing parameters. config: BlockCacheConfig, } impl DbV0 { - /// Spawns a new [`DbV0`] and syncs the FinalisedState to the servers finalised state. + /// Spawns a new [`DbV0`] backend. /// - /// Uses ReadStateService to fetch chain data if given else uses JsonRPC client. + /// This: + /// - derives the v0 network directory name (`live` / `test` / `local`), + /// - opens or creates the LMDB environment and required databases, + /// - configures LMDB reader concurrency based on CPU count, + /// - spawns a background maintenance task, + /// - and returns the opened backend. /// - /// Inputs: - /// - config: ChainIndexConfig. + /// # Errors + /// Returns `FinalisedStateError` on any filesystem, LMDB, or task-spawn failure. pub(crate) async fn spawn(config: &BlockCacheConfig) -> Result { info!("Launching ZainoDB"); @@ -214,7 +314,13 @@ impl DbV0 { Ok(zaino_db) } - /// Try graceful shutdown, fall back to abort after a timeout. + /// Attempts a graceful shutdown and falls back to aborting the maintenance task after a timeout. + /// + /// This is a legacy lifecycle method retained for v0 compatibility. Newer backends should + /// implement shutdown via the `DbCore` trait. + /// + /// # Errors + /// Returns `FinalisedStateError` if LMDB cleanup or sync fails. pub(crate) async fn close(&mut self) -> Result<(), FinalisedStateError> { self.status.store(StatusType::Closing); @@ -244,12 +350,15 @@ impl DbV0 { Ok(()) } - /// Returns the status of ZainoDB. + /// Returns the current backend status. pub(crate) fn status(&self) -> StatusType { self.status.load() } - /// Awaits until the DB returns a Ready status. + /// Blocks until the backend reports `StatusType::Ready`. + /// + /// This is primarily used during startup sequencing so callers do not issue reads before the + /// backend is ready to serve queries. pub(crate) async fn wait_until_ready(&self) { let mut ticker = interval(Duration::from_millis(100)); ticker.set_missed_tick_behavior(MissedTickBehavior::Delay); @@ -264,13 +373,15 @@ impl DbV0 { // *** Internal Control Methods *** - /// Spawns the background validator / maintenance task. + /// Spawns the background maintenance task. + /// + /// The v0 maintenance task is intentionally minimal: + /// - publishes `StatusType::Ready` after spawning, + /// - periodically calls `clean_trailing()` to purge stale LMDB reader slots, + /// - exits when status transitions to `StatusType::Closing`. /// - /// * **Startup** – runs a full‐DB validation pass (`initial_root_scan` → - /// `initial_block_scan`). - /// * **Steady-state** – every 5 s tries to validate the next block that - /// appeared after the current `validated_tip`. - /// Every 60 s it also calls `clean_trailing()` to purge stale reader slots. + /// Note: historical comments refer to validation passes; the current implementation only + /// performs maintenance and does not validate chain contents. async fn spawn_handler(&mut self) -> Result<(), FinalisedStateError> { // Clone everything the task needs so we can move it into the async block. let zaino_db = Self { @@ -306,6 +417,10 @@ impl DbV0 { } /// Helper method to wait for the next loop iteration or perform maintenance. + /// + /// This selects between: + /// - a short sleep (steady-state pacing), and + /// - the maintenance tick (currently reader-slot cleanup). async fn zaino_db_handler_sleep(&self, maintenance: &mut tokio::time::Interval) { tokio::select! { _ = tokio::time::sleep(Duration::from_secs(5)) => {}, @@ -317,14 +432,19 @@ impl DbV0 { } } - /// Clears stale reader slots by opening and closing a read transaction. + /// Clears stale LMDB reader slots by opening and closing a read transaction. + /// + /// LMDB only reclaims reader slots when transactions are closed; this method is a cheap and safe + /// way to encourage reclamation in long-running services. async fn clean_trailing(&self) -> Result<(), FinalisedStateError> { let txn = self.env.begin_ro_txn()?; drop(txn); Ok(()) } - /// Opens an lmdb database if present else creates a new one. + /// Opens an LMDB database if present, otherwise creates it. + /// + /// v0 uses this helper for all tables to make environment creation idempotent across restarts. async fn open_or_create_db( env: &Environment, name: &str, @@ -342,7 +462,18 @@ impl DbV0 { // *** DB write / delete methods *** // These should only ever be used in a single DB control task. - /// Writes a given (finalised) [`IndexedBlock`] to ZainoDB. + /// Writes a given (finalised) [`IndexedBlock`] to the v0 database. + /// + /// This method enforces the v0 write invariant: + /// - if the database is non-empty, the new block height must equal `current_tip + 1`, + /// - if the database is empty, the first write must be genesis (`GENESIS_HEIGHT`). + /// + /// The following records are written atomically in a single LMDB write transaction: + /// - `heights_to_hashes[height_be] = hash_json` + /// - `hashes_to_blocks[hash_json] = compact_block_json` + /// + /// On failure, the method attempts to delete the partially-written block (best effort) and + /// returns an `InvalidBlock` error that includes the height/hash context. pub(crate) async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { self.status.store(StatusType::Syncing); @@ -462,7 +593,14 @@ impl DbV0 { } } - /// Deletes a block identified height from every finalised table. + /// Deletes the block at `height` from every v0 table. + /// + /// This method enforces the v0 delete invariant: + /// - the requested height must equal the current database tip. + /// + /// The method determines the tip hash from `heights_to_hashes`, then deletes: + /// - `heights_to_hashes[height_be]` + /// - `hashes_to_blocks[hash_json]` pub(crate) async fn delete_block_at_height( &self, height: crate::Height, @@ -534,7 +672,9 @@ impl DbV0 { Ok(()) } - /// This is used as a backup when delete_block_at_height fails. + /// Deletes the provided block’s entries from every v0 table. + /// + /// This is used as a backup when `delete_block_at_height` fails. /// /// Takes a IndexedBlock as input and ensures all data from this block is wiped from the database. /// @@ -592,8 +732,10 @@ impl DbV0 { // ***** DB fetch methods ***** - // Returns the greatest `Height` stored in `headers` - /// (`None` if the DB is still empty). + /// Returns the greatest `Height` stored in `heights_to_hashes` (`None` if empty). + /// + /// Heights are stored as big-endian keys, so the LMDB `MDB_LAST` cursor position corresponds to + /// the maximum height. pub(crate) async fn tip_height(&self) -> Result, FinalisedStateError> { tokio::task::block_in_place(|| { let ro = self.env.begin_ro_txn()?; @@ -616,7 +758,10 @@ impl DbV0 { }) } - /// Fetch the block height in the main chain for a given block hash. + /// Fetches the block height for a given block hash. + /// + /// v0 resolves hash → compact block via `hashes_to_blocks` and then reads the embedded height + /// from the compact block message. async fn get_block_height_by_hash( &self, hash: crate::BlockHash, @@ -635,6 +780,9 @@ impl DbV0 { }) } + /// Fetches the block hash for a given block height. + /// + /// v0 resolves height → hash via `heights_to_hashes`. async fn get_block_hash_by_height( &self, height: crate::Height, @@ -652,6 +800,12 @@ impl DbV0 { }) } + /// Returns constructed metadata for v0. + /// + /// v0 does not persist real metadata. This method returns: + /// - version `0.0.0`, + /// - a zero schema hash, + /// - `MigrationStatus::Complete` (v0 does not participate in resumable migrations). async fn get_metadata(&self) -> Result { Ok(DbMetadata { version: DbVersion { @@ -665,6 +819,10 @@ impl DbV0 { }) } + /// Fetches the compact block for a given height. + /// + /// This resolves height → hash via `heights_to_hashes`, then hash → compact block via + /// `hashes_to_blocks`. async fn get_compact_block( &self, height: crate::Height, @@ -683,18 +841,25 @@ impl DbV0 { } } -/// Wrapper for `Height`. +/// Wrapper for `ZebraHeight` used for key encoding. +/// +/// v0 stores heights as 4-byte **big-endian** keys to preserve numeric ordering under LMDB’s +/// lexicographic key ordering. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] struct DbHeight(pub ZebraHeight); impl DbHeight { - /// Converts `[DbHeight]` to 4-byte **big-endian** bytes. - /// Used when storing as an LMDB key. + /// Converts this height to 4-byte **big-endian** bytes. + /// + /// This is used when storing heights as LMDB keys so that increasing heights sort correctly. fn to_be_bytes(self) -> [u8; 4] { self.0 .0.to_be_bytes() } - /// Parse a 4-byte **big-endian** array into a `[DbHeight]`. + /// Parses a 4-byte **big-endian** key into a `DbHeight`. + /// + /// # Errors + /// Returns an error if the key is not exactly 4 bytes long. fn from_be_bytes(bytes: &[u8]) -> Result { let arr: [u8; 4] = bytes .try_into() @@ -703,15 +868,23 @@ impl DbHeight { } } -/// Wrapper for `Hash`. +/// Wrapper for `ZebraHash` so it can be JSON-serialized as an LMDB value/key payload. +/// +/// v0 stores hashes using `serde_json` rather than Zaino’s versioned binary encoding. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] struct DbHash(pub ZebraHash); -/// Wrapper for `CompactBlock`. +/// Wrapper for `CompactBlock` for JSON storage. +/// +/// `CompactBlock` is a Prost message; v0 stores it by encoding to raw bytes and embedding those +/// bytes inside a serde payload. #[derive(Debug, Clone, PartialEq)] struct DbCompactBlock(pub CompactBlock); /// Custom `Serialize` implementation using Prost's `encode_to_vec()`. +/// +/// This serializes the compact block as raw bytes so it can be stored via `serde_json` as a byte +/// array payload. impl Serialize for DbCompactBlock { fn serialize(&self, serializer: S) -> Result where @@ -723,6 +896,8 @@ impl Serialize for DbCompactBlock { } /// Custom `Deserialize` implementation using Prost's `decode()`. +/// +/// This reverses the `Serialize` strategy by decoding the stored raw bytes into a `CompactBlock`. impl<'de> Deserialize<'de> for DbCompactBlock { fn deserialize(deserializer: D) -> Result where diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index 43fd80ed3..3c2c8e171 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -1,4 +1,28 @@ -//! ZainoDB V1 Implementation +//! ZainoDB Finalised State (Schema V1) +//! +//! This module provides the **V1** implementation of Zaino’s LMDB-backed finalised-state database. +//! It stores a validated, append-only view of the best chain and exposes a set of capability traits +//! (read, write, metadata, block-range fetchers, compact-block generation, and transparent history). +//! +//! ## On-disk layout +//! The V1 on-disk layout is described by an ASCII schema file that is embedded into the binary at +//! compile time (`db_schema_v1_0.txt`). A fixed 32-byte BLAKE2b checksum of that schema description +//! is stored in / compared against the database metadata to detect accidental schema drift. +//! +//! ## Validation model +//! The database maintains a monotonically increasing **validated tip** (`validated_tip`) and a set +//! of validated heights above that tip (`validated_set`) to support out-of-order validation. Reads +//! that require correctness use `resolve_validated_hash_or_height()` to ensure the requested height +//! is validated (performing on-demand validation if required). +//! +//! A background task performs: +//! - an initial full scan of the stored data for checksum / structural correctness, then +//! - steady-state incremental validation of newly appended blocks. +//! +//! ## Concurrency model +//! LMDB supports many concurrent readers and a single writer per environment. This implementation +//! uses `tokio::task::block_in_place` / `spawn_blocking` for LMDB operations to avoid blocking the +//! async runtime, and configures `max_readers` to support high read concurrency. use crate::{ chain_index::{ @@ -46,8 +70,11 @@ use tracing::{error, info, warn}; // ───────────────────────── Schema v1 constants ───────────────────────── /// Full V1 schema text file. -// 1. Bring the *exact* ASCII description of the on-disk layout into the binary -// at compile-time. The path is relative to this source file. +/// +/// This is the exact ASCII description of the V1 on-disk layout embedded into the binary at +/// compile-time. The path is relative to this source file. +/// +/// 1. Bring the *exact* ASCII description of the on-disk layout into the binary at compile-time. pub(crate) const DB_SCHEMA_V1_TEXT: &str = include_str!("db_schema_v1_0.txt"); /* @@ -73,6 +100,9 @@ pub(crate) const DB_SCHEMA_V1_TEXT: &str = include_str!("db_schema_v1_0.txt"); */ /// *Current* database V1 schema hash, used for version validation. +/// +/// This value is compared against the schema hash stored in the metadata record to detect schema +/// drift without a corresponding version bump. pub(crate) const DB_SCHEMA_V1_HASH: [u8; 32] = [ 0xbc, 0x13, 0x52, 0x47, 0xb4, 0x6b, 0xb4, 0x6a, 0x4a, 0x97, 0x1e, 0x4c, 0x27, 0x07, 0x82, 0x6f, 0x80, 0x95, 0xe6, 0x62, 0xb6, 0x91, 0x9d, 0x28, 0x87, 0x2c, 0x71, 0xb6, 0xbd, 0x67, 0x65, 0x93, @@ -87,6 +117,10 @@ pub(crate) const DB_VERSION_V1: DbVersion = DbVersion { // ───────────────────────── ZainoDb v1 Capabilities ───────────────────────── +/// [`DbRead`] capability implementation for [`DbV1`]. +/// +/// This trait is the read-only surface used by higher layers. Methods typically delegate to +/// inherent async helpers that enforce validated reads where required. #[async_trait] impl DbRead for DbV1 { async fn db_height(&self) -> Result, FinalisedStateError> { @@ -126,6 +160,10 @@ impl DbRead for DbV1 { } } +/// [`DbWrite`] capability implementation for [`DbV1`]. +/// +/// This trait represents the mutating surface (append / delete tip / update metadata). Writes are +/// performed via LMDB write transactions and validated before becoming visible as “known-good”. #[async_trait] impl DbWrite for DbV1 { async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { @@ -145,6 +183,9 @@ impl DbWrite for DbV1 { } } +/// [`DbCore`] capability implementation for [`DbV1`]. +/// +/// This trait exposes lifecycle operations and a high-level status indicator. #[async_trait] impl DbCore for DbV1 { fn status(&self) -> StatusType { @@ -168,6 +209,9 @@ impl DbCore for DbV1 { } } +/// [`BlockCoreExt`] capability implementation for [`DbV1`]. +/// +/// Provides access to block headers, txid lists, and transaction location mapping. #[async_trait] impl BlockCoreExt for DbV1 { async fn get_block_header( @@ -212,6 +256,10 @@ impl BlockCoreExt for DbV1 { } } +/// [`BlockTransparentExt`] capability implementation for [`DbV1`]. +/// +/// Provides access to transparent compact transaction data at both per-transaction and per-block +/// granularity. #[async_trait] impl BlockTransparentExt for DbV1 { async fn get_transparent( @@ -237,6 +285,10 @@ impl BlockTransparentExt for DbV1 { } } +/// [`BlockShieldedExt`] capability implementation for [`DbV1`]. +/// +/// Provides access to Sapling / Orchard compact transaction data and per-block commitment tree +/// metadata. #[async_trait] impl BlockShieldedExt for DbV1 { async fn get_sapling( @@ -299,6 +351,10 @@ impl BlockShieldedExt for DbV1 { } } +/// [`CompactBlockExt`] capability implementation for [`DbV1`]. +/// +/// Exposes `zcash_client_backend`-compatible compact blocks derived from stored header + shielded +/// transaction data. #[async_trait] impl CompactBlockExt for DbV1 { async fn get_compact_block( @@ -309,6 +365,9 @@ impl CompactBlockExt for DbV1 { } } +/// [`IndexedBlockExt`] capability implementation for [`DbV1`]. +/// +/// Exposes reconstructed [`IndexedBlock`] values from stored per-height entries. #[async_trait] impl IndexedBlockExt for DbV1 { async fn get_chain_block( @@ -319,6 +378,10 @@ impl IndexedBlockExt for DbV1 { } } +/// [`TransparentHistExt`] capability implementation for [`DbV1`]. +/// +/// Provides address history queries built over the LMDB `DUP_SORT`/`DUP_FIXED` address-history +/// database. #[async_trait] impl TransparentHistExt for DbV1 { async fn addr_records( @@ -383,49 +446,67 @@ impl TransparentHistExt for DbV1 { // ───────────────────────── ZainoDb v1 Implementation ───────────────────────── -/// Zaino’s Finalised state database V1. -/// Implements a persistent LMDB-backed chain index for fast read access and verified data. +/// Zaino’s Finalised State database V1. +/// +/// This type owns an LMDB [`Environment`] and a fixed set of named databases representing the V1 +/// schema. It implements the capability traits used by the rest of the chain indexer. +/// +/// Data is stored per-height in “best chain” order and is validated (checksums and continuity) +/// before being treated as reliable for downstream reads. pub(crate) struct DbV1 { /// Shared LMDB environment. env: Arc, - /// Block headers: `Height` -> `StoredEntry` + /// Block headers: `Height` -> `StoredEntryVar` /// /// Stored per-block, in order. headers: Database, - /// Txids: `Height` -> `StoredEntry` + + /// Txids: `Height` -> `StoredEntryVar` /// /// Stored per-block, in order. txids: Database, - /// Transparent: `Height` -> `StoredEntry>` + + /// Transparent: `Height` -> `StoredEntryVar>` /// /// Stored per-block, in order. transparent: Database, - /// Sapling: `Height` -> `StoredEntry>` + + /// Sapling: `Height` -> `StoredEntryVar>` /// /// Stored per-block, in order. sapling: Database, - /// Orchard: `Height` -> `StoredEntry>` + + /// Orchard: `Height` -> `StoredEntryVar>` /// /// Stored per-block, in order. orchard: Database, - /// Block commitment tree data: `Height` -> `StoredEntry>` + + /// Block commitment tree data: `Height` -> `StoredEntryFixed>` /// /// Stored per-block, in order. commitment_tree_data: Database, - /// Heights: `Hash` -> `StoredEntry` + + /// Heights: `Hash` -> `StoredEntryFixed` /// /// Used for hash based fetch of the best chain (and random access). heights: Database, - /// Spent outpoints: `Outpoint` -> `StoredEntry>` + + /// Spent outpoints: `Outpoint` -> `StoredEntryFixed>` /// /// Used to check spent status of given outpoints, retuning spending tx. spent: Database, - /// Transparent address history: `AddrScript` -> `StoredEntry` + + /// Transparent address history: `AddrScript` -> duplicate values of `StoredEntryFixed`. + /// + /// Stored as an LMDB `DUP_SORT | DUP_FIXED` database keyed by address script bytes. Each duplicate + /// value is a fixed-size entry encoding one address event (mined output or spending input), + /// including flags and checksum. /// /// Used to search all transparent address indexes (txids, utxos, balances, deltas) address_history: Database, - /// Metadata: singleton entry "metadata" -> `StoredEntry` + + /// Metadata: singleton entry "metadata" -> `StoredEntryFixed` metadata: Database, /// Contiguous **water-mark**: every height ≤ `validated_tip` is known-good. @@ -433,6 +514,7 @@ pub(crate) struct DbV1 { /// Wrapped in an `Arc` so the background validator and any foreground tasks /// all see (and update) the **same** atomic. validated_tip: Arc, + /// Heights **above** the tip that have also been validated. /// /// Whenever the next consecutive height is inserted we pop it @@ -440,7 +522,7 @@ pub(crate) struct DbV1 { /// grows beyond the number of “holes” in the sequence. validated_set: DashSet, - /// Database handler task handle. + /// Background validator / maintenance task handle. db_handler: Option>, /// ZainoDB status. @@ -450,13 +532,23 @@ pub(crate) struct DbV1 { config: BlockCacheConfig, } +/// Inherent implementation for [`DbV1`]. +/// +/// This block contains: +/// - environment / database setup (`spawn`, `open_or_create_db`, schema checks), +/// - background validation task management, +/// - write/delete operations for finalised blocks, +/// - validated read fetchers used by the capability trait implementations, and +/// - internal validation / indexing helpers. impl DbV1 { - /// Spawns a new [`DbV1`] and syncs the FinalisedState to the servers finalised state. + /// Spawns a new [`DbV1`] and opens (or creates) the LMDB environment for the configured network. /// - /// Uses ReadStateService to fetch chain data if given else uses JsonRPC client. - /// - /// Inputs: - /// - config: ChainIndexConfig. + /// This method: + /// - chooses a versioned path suffix (`...//v1`), + /// - configures LMDB map size and reader slots, + /// - opens or creates all V1 named databases, + /// - validates or initializes the `"metadata"` record (schema hash + version), and + /// - spawns the background validator / maintenance task. pub(crate) async fn spawn(config: &BlockCacheConfig) -> Result { info!("Launching ZainoDB"); @@ -578,7 +670,9 @@ impl DbV1 { self.status.load() } - /// Awaits until the DB returns a Ready status. + /// Waits until the DB reaches [`StatusType::Ready`]. + /// + /// NOTE: This does not currently backpressure on LMDB reader availability. /// /// TODO: check db for free readers and wait if busy. pub(crate) async fn wait_until_ready(&self) { @@ -597,11 +691,11 @@ impl DbV1 { /// Spawns the background validator / maintenance task. /// - /// * **Startup** – runs a full‐DB validation pass (`initial_root_scan` → - /// `initial_block_scan`). - /// * **Steady-state** – every 5 s tries to validate the next block that - /// appeared after the current `validated_tip`. - /// Every 60 s it also calls `clean_trailing()` to purge stale reader slots. + /// The task runs: + /// - **Startup:** full validation passes (`initial_spent_scan`, `initial_address_history_scan`, + /// `initial_block_scan`). + /// - **Steady state:** periodically attempts to validate the next height after `validated_tip`. + /// Separately, it performs periodic trailing-reader cleanup via `clean_trailing()`. async fn spawn_handler(&mut self) -> Result<(), FinalisedStateError> { // Clone everything the task needs so we can move it into the async block. let zaino_db = Self { @@ -718,7 +812,7 @@ impl DbV1 { } } - /// Validate every stored `TxLocation`. + /// Validates every stored spent-outpoint entry (`Outpoint` -> `TxLocation`) by checksum. async fn initial_spent_scan(&self) -> Result<(), FinalisedStateError> { let env = self.env.clone(); let spent = self.spent; @@ -745,7 +839,7 @@ impl DbV1 { .map_err(|e| FinalisedStateError::Custom(format!("Tokio task error: {e}")))? } - /// Validate every stored `AddrEventBytes`. + /// Validates every stored address-history record (`AddrScript` duplicates of `AddrEventBytes`) by checksum. async fn initial_address_history_scan(&self) -> Result<(), FinalisedStateError> { let env = self.env.clone(); let address_history = self.address_history; @@ -773,7 +867,7 @@ impl DbV1 { .map_err(|e| FinalisedStateError::Custom(format!("spawn_blocking failed: {e}")))? } - /// Scan the whole finalised chain once at start-up and validate every block. + /// Scans the whole finalised chain once at start-up and validates every block by checksum and continuity. async fn initial_block_scan(&self) -> Result<(), FinalisedStateError> { let zaino_db = Self { env: Arc::clone(&self.env), @@ -3155,20 +3249,67 @@ impl DbV1 { } // *** Internal DB validation / varification *** - - /// Return `true` if *height* is already known-good. + // + // The finalised-state database supports **incremental, concurrency-safe validation** of blocks that + // have already been written to LMDB. + // + // Validation is tracked using two structures: + // + // - `validated_tip` (atomic u32): every height `<= validated_tip` is known-good (contiguous prefix). + // - `validated_set` (DashSet): a sparse set of individually validated heights `> validated_tip` + // (i.e., “holes” validated out-of-order). + // + // This scheme provides: + // - O(1) fast-path for the common case (`height <= validated_tip`), + // - O(1) expected membership tests above the tip, + // - and an efficient “coalescing” step that advances `validated_tip` when gaps are filled. + // + // IMPORTANT: + // - Validation here is *structural / integrity* validation of stored records plus basic chain + // continuity checks (parent hash, header merkle root vs txids). + // - It is intentionally “lightweight” and does **not** attempt full consensus verification. + // - NOTE: It is planned to add basic shielded tx data validation using the "block_commitments" + // field in [`BlockData`] however this is currently unimplemented. + + /// Return `true` if `height` is already known-good. /// - /// O(1) look-ups: we check the tip first (fast) and only hit the DashSet - /// when `h > tip`. + /// Semantics: + /// - `height <= validated_tip` is always validated (contiguous prefix). + /// - For `height > validated_tip`, membership is tracked in `validated_set`. + /// + /// Performance: + /// - O(1) in the fast-path (`height <= validated_tip`). + /// - O(1) expected for DashSet membership checks when `height > validated_tip`. + /// + /// Concurrency: + /// - `validated_tip` is read with `Acquire` so subsequent reads of dependent state in the same + /// thread are not reordered before the tip read. fn is_validated(&self, h: u32) -> bool { let tip = self.validated_tip.load(Ordering::Acquire); h <= tip || self.validated_set.contains(&h) } - /// Mark *height* as validated and coalesce contiguous ranges. + /// Mark `height` as validated and coalesce contiguous ranges into `validated_tip`. + /// + /// This method maintains the invariant: + /// - After completion, all heights `<= validated_tip` are validated. + /// - All validated heights `> validated_tip` remain represented in `validated_set`. + /// + /// Algorithm: + /// 1. If `height == validated_tip + 1`, attempt to atomically advance `validated_tip`. + /// 2. If that succeeds, repeatedly consume `validated_tip + 1` from `validated_set` and advance + /// `validated_tip` until the next height is not present. + /// 3. If `height > validated_tip + 1`, record it as an out-of-order validated “hole” in + /// `validated_set`. + /// 4. If `height <= validated_tip`, it is already covered by the contiguous prefix; no action. /// - /// 1. Insert it into the DashSet (if it was a “hole”). - /// 2. While `validated_tip + 1` is now present, pop it and advance the tip. + /// Concurrency: + /// - Uses CAS to ensure only one thread advances `validated_tip` at a time. + /// - Stores after successful coalescing use `Release` so other threads observing the new tip do not + /// see older state re-ordered after the tip update. + /// + /// NOTE: + /// - This function is intentionally tolerant of races: redundant inserts / removals are benign. fn mark_validated(&self, h: u32) { let mut next = h; loop { @@ -3205,9 +3346,40 @@ impl DbV1 { /// Lightweight per-block validation. /// - /// *Confirms the checksum* in each of the three per-block tables. + /// This validates the internal consistency of the LMDB-backed records for the specified + /// `(height, hash)` pair and marks the height as validated on success. /// - /// WARNING: This is a blocking function and **MUST** be called within a blocking thread / task. + /// Validations performed: + /// - Per-height tables: checksum + deserialization integrity for: + /// - `headers` (BlockHeaderData) + /// - `txids` (TxidList) + /// - `transparent` (TransparentTxList) + /// - `sapling` (SaplingTxList) + /// - `orchard` (OrchardTxList) + /// - `commitment_tree_data` (CommitmentTreeData; fixed entry) + /// - Hash→height mapping: + /// - checksum integrity under `hash_key` + /// - mapped height equals the requested `height` + /// - Chain continuity: + /// - for `height > 1`, the block header `parent_hash` equals the stored hash at `height - 1` + /// - Header merkle root: + /// - merkle root computed from `txids` matches the header’s merkle root + /// - Transparent indices / histories: + /// - each non-coinbase transparent input must have a `spent` record pointing at this tx + /// - each transparent output must have an addrhist mined record + /// - each non-coinbase transparent input must have an addrhist input record + /// + /// Fast-path: + /// - If `height` is already known validated (`is_validated`), this is a no-op. + /// + /// Error semantics: + /// - Returns `FinalisedStateError::InvalidBlock { .. }` when any integrity/continuity check fails. + /// - Returns LMDB errors for underlying storage failures (e.g., missing keys), which are then + /// typically mapped by callers into `DataUnavailable` where appropriate. + /// + /// WARNING: + /// - This is a blocking function and **MUST** be called from a blocking context + /// (`tokio::task::block_in_place` or `spawn_blocking`). fn validate_block_blocking( &self, height: Height, @@ -3455,7 +3627,9 @@ impl DbV1 { Ok(()) } - /// Double‑SHA‑256 (SHA256d) as used by Bitcoin/Zcash headers and Merkle nodes. + /// Double-SHA-256 (SHA256d), as used by Bitcoin/Zcash headers and merkle nodes. + /// + /// Input and output are raw bytes (no endianness conversions are performed here). fn sha256d(data: &[u8]) -> [u8; 32] { let mut hasher = Sha256::new(); Digest::update(&mut hasher, data); // first pass @@ -3468,8 +3642,16 @@ impl DbV1 { out } - /// Compute the Merkle root of a non‑empty slice of 32‑byte transaction IDs. - /// `txids` must be in block order and already in internal (little‑endian) byte order. + /// Compute the merkle root of a non-empty slice of 32-byte transaction IDs. + /// + /// Requirements: + /// - `txids` must be in block order. + /// - `txids` must already be in the internal byte order (little endian) expected by the header merkle root + /// comparison performed by this module (no byte order transforms are applied here). + /// + /// Behavior: + /// - Duplicates the final element when the layer width is odd, matching Bitcoin/Zcash merkle rules. + /// - Uses SHA256d over 64-byte concatenated pairs at each layer. fn calculate_block_merkle_root(txids: &[[u8; 32]]) -> [u8; 32] { assert!( !txids.is_empty(), @@ -3503,10 +3685,20 @@ impl DbV1 { layer[0] } - /// Validate a contiguous range of block heights `[start, end]` inclusive. + /// Validate a contiguous inclusive range of block heights `[start, end]`. + /// + /// This method is optimized to skip heights already known validated via `validated_tip` / + /// `validated_set`. + /// + /// Semantics: + /// - If `end < start`, returns an error. + /// - If the entire range is already validated, returns `(start, end)` without touching LMDB. + /// - Otherwise, validates each missing height in ascending order using `validate_block_blocking`. /// - /// Optimized to skip blocks already known to be validated. - /// Returns the full requested `(start, end)` range on success. + /// WARNING: + /// - This uses `tokio::task::block_in_place` internally and performs LMDB reads; callers should + /// avoid invoking it from latency-sensitive async paths unless they explicitly intend to + /// validate on-demand. async fn validate_block_range( &self, start: Height, diff --git a/zaino-state/src/chain_index/finalised_state/entry.rs b/zaino-state/src/chain_index/finalised_state/entry.rs index eea461c49..73183f376 100644 --- a/zaino-state/src/chain_index/finalised_state/entry.rs +++ b/zaino-state/src/chain_index/finalised_state/entry.rs @@ -1,4 +1,69 @@ -//! DB stored data wrappers structs. +//! Checksummed database entry wrappers (fixed and variable length) +//! +//! This file defines small wrapper types used by concrete DB versions for storing values in +//! LMDB with an **integrity checksum**. +//! +//! Each wrapper stores: +//! - the inner *versioned* record `T: ZainoVersionedSerde`, and +//! - a BLAKE2b-256 checksum computed over `key || encoded_item`. +//! +//! The checksum is intended to: +//! - detect corruption or partial writes, +//! - detect accidental key/value mismatches (e.g., writing under the wrong key encoding), +//! - and provide a cheap integrity check during migrations or debugging. +//! +//! ## Integrity model (scope) +//! +//! The checksum is a **corruption and correctness** signal, not a cryptographic authentication +//! mechanism. It helps detect accidental corruption, partial writes, or key/value mismatches, but +//! it does not provide authenticity against a malicious database writer, this must be ensured in +//! actual database implementations by validating block data on startup and on block writes. +//! +//! # Two wrapper forms +//! +//! - [`StoredEntryFixed`] for fixed-length values: +//! - requires `T: FixedEncodedLen` so that the total encoded value length is constant. +//! - important when LMDB uses `DUP_SORT` and/or `DUP_FIXED` flags where record sizing matters. +//! +//! - [`StoredEntryVar`] for variable-length values: +//! - prefixes the serialized record with a CompactSize length so decoding is bounded and safe. +//! +//! Both wrappers are themselves versioned (`ZainoVersionedSerde`), which means their outer layout can +//! evolve in a controlled way if required. +//! +//! # Encoding contract (conceptual) +//! +//! `StoredEntryFixed` encodes as: +//! - StoredEntry version tag +//! - `T::serialize()` bytes (which include `T`'s own record version tag) +//! - 32-byte checksum +//! +//! `StoredEntryVar` encodes as: +//! - StoredEntry version tag +//! - CompactSize(length of `T::serialize()` bytes) +//! - `T::serialize()` bytes +//! - 32-byte checksum +//! +//! # Usage guidelines +//! +//! - Always compute the checksum using the **exact bytes** used as the DB key (i.e. the encoded key). +//! - On read, verify the checksum before trusting decoded contents. +//! - Treat checksum mismatch as a corruption/incompatibility signal: +//! - return a hard error, +//! - or trigger a rebuild path, depending on the calling context. +//! +//! # Development: when to pick fixed vs var +//! +//! - Use `StoredEntryFixed` when: +//! - `T` has a stable, fixed-size encoding and you want predictable sizing, or +//! - the LMDB table relies on fixed-size duplicates. +//! +//! - Use `StoredEntryVar` when: +//! - `T` naturally contains variable-length payloads (vectors, scripts, etc.), or +//! - the value size may grow over time and you want to avoid schema churn. +//! +//! If you change the wrapper layout itself, bump the wrapper’s `ZainoVersionedSerde::VERSION` and +//! maintain a decode path (or bump the DB major version and migrate). use crate::{ read_fixed_le, version, write_fixed_le, CompactSize, FixedEncodedLen, ZainoVersionedSerde, @@ -10,25 +75,47 @@ use blake2::{ }; use core2::io::{self, Read, Write}; -/// A fixed length database entry. -/// This is an important distinction for correct usage of DUP_SORT and DUP_FIXED -/// LMDB database flags. +/// Fixed-length checksummed database value wrapper. /// -/// Encoded Format: +/// This wrapper is designed for LMDB tables that rely on fixed-size value records, including those +/// configured with `DUP_SORT` and/or `DUP_FIXED`. /// -/// ┌─────── byte 0 ───────┬───── byte 1 ─────┬───── T::raw_len() bytes ──────┬─── 32 bytes ────┐ -/// │ StoredEntry version │ Record version │ Body │ B2B256 hash │ -/// └──────────────────────┴──────────────────┴───────────────────────────────┴─────────────────┘ +/// The wrapper stores: +/// - a versioned record `T` (encoded via [`ZainoVersionedSerde`]), and +/// - a 32-byte BLAKE2b-256 checksum computed over `encoded_key || encoded_item`. +/// +/// ## Invariants +/// - `T` must have a fixed encoded length (including its own version tag), enforced by +/// [`FixedEncodedLen`]. +/// - The checksum must be computed using the **exact key bytes** used in LMDB for this entry. +/// - On read, callers should verify the checksum before trusting decoded contents. +/// +/// ## Encoded format (conceptual) +/// +/// ┌─────── byte 0 ───────┬────────────── T::serialize() bytes ──────────────┬─── 32 bytes ────┐ +/// │ StoredEntry version │ (includes T's own record version tag + body) │ B2B256 checksum │ +/// └──────────────────────┴──────────────────────────────────────────────────┴─────────────────┘ +/// +/// Where the checksum is: +/// `blake2b256(encoded_key || encoded_item_bytes)`. #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct StoredEntryFixed { - /// Inner record + /// The inner record stored in this entry. pub(crate) item: T, - /// Entry checksum + + /// BLAKE2b-256 checksum of `encoded_key || encoded_item_bytes`. pub(crate) checksum: [u8; 32], } impl StoredEntryFixed { - /// Create a new entry, hashing `key || encoded_item`. + /// Constructs a new checksummed entry for `item` under `key`. + /// + /// The checksum is computed as: + /// `blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. If the caller + /// hashes a different key encoding than what is used for storage, verification will fail. pub(crate) fn new>(key: K, item: T) -> Self { let body = { let mut v = Vec::with_capacity(T::VERSIONED_LEN); @@ -39,8 +126,17 @@ impl StoredEntryFixed { Self { item, checksum } } - /// Verify checksum given the DB key. - /// Returns `true` if `self.checksum == blake2b256(key || item.serialize())`. + /// Verifies the checksum for this entry under `key`. + /// + /// Returns `true` if and only if: + /// `self.checksum == blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. + /// + /// # Usage + /// Callers should treat a checksum mismatch as a corruption or incompatibility signal and + /// return a hard error (or trigger a rebuild path), depending on context. pub(crate) fn verify>(&self, key: K) -> bool { let body = { let mut v = Vec::with_capacity(T::VERSIONED_LEN); @@ -51,12 +147,14 @@ impl StoredEntryFixed { candidate == self.checksum } - /// Returns a reference to the inner item. + /// Returns a reference to the inner record. pub(crate) fn inner(&self) -> &T { &self.item } - /// Computes a BLAKE2b-256 checksum. + /// Computes a BLAKE2b-256 checksum over `data`. + /// + /// This is the hashing primitive used by both wrappers. The checksum is not keyed. pub(crate) fn blake2b256(data: &[u8]) -> [u8; 32] { let mut hasher = Blake2bVar::new(32).expect("Failed to create hasher"); hasher.update(data); @@ -68,6 +166,13 @@ impl StoredEntryFixed { } } +/// Versioned on-disk encoding for fixed-length checksummed entries. +/// +/// Body layout (after the `StoredEntryFixed` version tag): +/// 1. `T::serialize()` bytes (fixed length: `T::VERSIONED_LEN`) +/// 2. 32-byte checksum +/// +/// Note: `T::serialize()` includes `T`’s own version tag and body. impl ZainoVersionedSerde for StoredEntryFixed { const VERSION: u8 = version::V1; @@ -90,26 +195,46 @@ impl ZainoVersionedSerde for StoredEnt } } +/// `StoredEntryFixed` has a fixed encoded body length. +/// +/// Body length = `T::VERSIONED_LEN` + 32 bytes checksum. impl FixedEncodedLen for StoredEntryFixed { const ENCODED_LEN: usize = T::VERSIONED_LEN + 32; } -/// Variable-length database value. -/// Layout (little-endian unless noted): +/// Variable-length checksummed database value wrapper. +/// +/// This wrapper is used for values whose serialized representation is not fixed-size. It stores: +/// - a versioned record `T` (encoded via [`ZainoVersionedSerde`]), +/// - a CompactSize length prefix for the serialized record, +/// - and a 32-byte BLAKE2b-256 checksum computed over `encoded_key || encoded_item`. +/// +/// The length prefix allows decoding to be bounded and avoids reading untrusted trailing bytes. +/// +/// ## Encoded format (conceptual) /// -/// ┌────── byte 0 ───────┬─────── CompactSize(len) ─────┬──── 1 byte ────┬── len - 1 bytes ───┬─ 32 bytes ─┐ -/// │ StoredEntry version │ (length of item.serialize()) │ Record version │ Body │ Hash │ -/// └─────────────────────┴──────────────────────────────┴────────────────┴────────────────────┴────────────┘ +/// ┌────── byte 0 ───────┬────── CompactSize(len) ──────┬────── len bytes ──────┬─ 32 bytes ─┐ +/// │ StoredEntry version │ len = item.serialize().len() │ T::serialize() bytes │ checksum │ +/// └─────────────────────┴──────────────────────────────┴───────────────────────┴────────────┘ +/// +/// Where the checksum is: +/// `blake2b256(encoded_key || encoded_item_bytes)`. #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct StoredEntryVar { - /// Inner record + /// The inner record stored in this entry. pub(crate) item: T, - /// Entry checksum + /// BLAKE2b-256 checksum of `encoded_key || encoded_item_bytes`. pub(crate) checksum: [u8; 32], } impl StoredEntryVar { - /// Create a new entry, hashing `encoded_key || encoded_item`. + /// Constructs a new checksummed entry for `item` under `key`. + /// + /// The checksum is computed as: + /// `blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. pub(crate) fn new>(key: K, item: T) -> Self { let body = { let mut v = Vec::new(); @@ -120,8 +245,13 @@ impl StoredEntryVar { Self { item, checksum } } - /// Verify checksum given the DB key. - /// Returns `true` if `self.checksum == blake2b256(key || item.serialize())`. + /// Verifies the checksum for this entry under `key`. + /// + /// Returns `true` if and only if: + /// `self.checksum == blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. pub(crate) fn verify>(&self, key: K) -> bool { let mut body = Vec::new(); self.item.serialize(&mut body).unwrap(); @@ -129,12 +259,12 @@ impl StoredEntryVar { candidate == self.checksum } - /// Returns a reference to the inner item. + /// Returns a reference to the inner record. pub(crate) fn inner(&self) -> &T { &self.item } - /// Computes a BLAKE2b-256 checksum. + /// Computes a BLAKE2b-256 checksum over `data`. pub(crate) fn blake2b256(data: &[u8]) -> [u8; 32] { let mut hasher = Blake2bVar::new(32).expect("Failed to create hasher"); hasher.update(data); @@ -146,6 +276,15 @@ impl StoredEntryVar { } } +/// Versioned on-disk encoding for variable-length checksummed entries. +/// +/// Body layout (after the `StoredEntryVar` version tag): +/// 1. CompactSize `len` (the length of `T::serialize()` bytes) +/// 2. `len` bytes of `T::serialize()` (includes `T`’s own version tag and body) +/// 3. 32-byte checksum +/// +/// Implementations must ensure the length prefix matches the exact serialized record bytes written, +/// otherwise decoding will fail or misalign. impl ZainoVersionedSerde for StoredEntryVar { const VERSION: u8 = version::V1; diff --git a/zaino-state/src/chain_index/finalised_state/migrations.rs b/zaino-state/src/chain_index/finalised_state/migrations.rs index e2ba64078..24be5f635 100644 --- a/zaino-state/src/chain_index/finalised_state/migrations.rs +++ b/zaino-state/src/chain_index/finalised_state/migrations.rs @@ -1,4 +1,110 @@ -//! Migration management and implementations. +//! Database version migration framework and implementations +//! +//! This file defines how `ZainoDB` migrates on-disk databases between database versions. +//! +//! Migrations are orchestrated by [`MigrationManager`], which is invoked from `ZainoDB::spawn` when +//! `current_version < target_version`. +//! +//! The migration model is **stepwise**: +//! - each migration maps one concrete `DbVersion` to the next supported `DbVersion`, +//! - the manager iteratively applies steps until the target is reached. +//! +//! # Key concepts +//! +//! - [`Migration`] trait: +//! - declares `CURRENT_VERSION` and `TO_VERSION` constants, +//! - provides an async `migrate(...)` entry point. +//! +//! - [`MigrationManager`]: +//! - holds the router, config, current and target versions, and a `BlockchainSource`, +//! - repeatedly selects and runs the next migration via `get_migration()`. +//! +//! - [`capability::MigrationStatus`]: +//! - stored in `DbMetadata` and used to resume work safely after shutdown. +//! +//! # How major migrations work in this codebase +//! +//! This module is designed around the router’s **primary + shadow** model: +//! +//! - The *primary* DB continues serving read/write traffic. +//! - A *shadow* DB (new schema version) is created and built in parallel. +//! - Once the shadow DB is fully built and marked complete, it is promoted to primary. +//! - The old primary DB is shut down and deleted from disk once all handles are dropped. +//! +//! This minimises downtime and allows migrations that require a full rebuild (rather than an +//! in-place rewrite) without duplicating the entire DB indefinitely. +//! +//! It ia also possible (if migration allows) to partially build the new database version, switch +//! specific functionality to the shadow, and partialy delete old the database version, rather than +//! building the new database in full. This enables developers to minimise transient disk usage +//! during migrations. +//! +//! # Implemented migrations +//! +//! ## v0.0.0 → v1.0.0 +//! +//! `Migration0_0_0To1_0_0` performs a **full shadow rebuild from genesis**. +//! +//! Rationale (as enforced by code/comments): +//! - The legacy v0 DB is a lightwallet-specific store that only builds compact blocks from Sapling +//! activation onwards. +//! - v1 requires data from genesis (notably for transparent address history indices), therefore a +//! partial “continue from Sapling” build is insufficient. +//! +//! Mechanics: +//! - Spawn v1 as a shadow backend. +//! - Determine the current shadow tip (to resume if interrupted). +//! - Fetch blocks and commitment tree roots from the `BlockchainSource` starting at either genesis +//! or `shadow_tip + 1`, building `BlockMetadata` and `IndexedBlock`. +//! - Keep building until the shadow catches up to the primary tip (looping because the primary can +//! advance during the build). +//! - Mark `migration_status = Complete` in shadow metadata. +//! - Promote shadow to primary via `router.promote_shadow()`. +//! - Delete the old v0 directory asynchronously once all strong references are dropped. +//! +//! # Development: adding a new migration step +//! +//! 1. Introduce a new `struct MigrationX_Y_ZToA_B_C;` and implement `Migration`. +//! 2. Register it in `MigrationManager::get_migration()` by matching on the *current* version. +//! 3. Ensure the migration is: +//! - deterministic, +//! - resumable (use `DbMetadata::migration_status` and/or shadow tip), +//! - crash-safe (never leaves a partially promoted DB). +//! 4. Add tests/fixtures for: +//! - starting from the old version, +//! - resuming mid-build if applicable, +//! - validating the promoted DB serves required capabilities. +//! +//! # Notes on MigrationType +//! Database versioning (and migration) is split into three distinct types, dependant of the severity +//! of changes being made to the database: +//! - Major versions / migrations: +//! - Major schema / capability changes, notably changes that require refetching the complete +//! blockchain from the backing validator / finaliser to build / update database indices. +//! - Migrations should follow the "primary" database / "shadow" database model. The legacy database +//! should be spawned as the "primary" and set to carry on serving data during migration. The new +//! database version is then spawned as the "shadow" and built in a background process. Once the +//! "shadow" is built to "primary" db tip height it is promoted to primary, taking over serving +//! data from the legacy database, the demoted database can then be safely removed from disk. It is +//! also possible to partially build the new database version , promote specific database capability, +//! and delete specific tables from the legacy database, reducing transient disk usage. +//! - Minor versions / migrations: +//! - Updates involving minor schema / capability changes, notably changes that can be rebuilt in place +//! (changes that do not require fetching new data from the backing validator / finaliser) or that can +//! rely on updates to the versioned serialisation / deserialisation of database structures. +//! - Migrations for minor patch bumps can follow several paths. If the database table being updated +//! holds variable length items, and the actual data being held is not changed (only format changes +//! being applied) then it may be possible to rely on serialisation / deserialisation updates to the +//! items being chenged, with the database table holding a mix of serialisation versions. However, +//! if the table being updated is of fixed length items, or the actual data held is being updated, +//! then it will be necessary to rebuild that table in full, possibly requiring database downtime for +//! the migration. Since this only involves moving data already held in the database (rather than +//! fetching new data from the backing validator) migration should be quick and short downtimes are +//! accepted. +//! - Patch versions / migrations: +//! - Changes to database code that do not touch the database schema, these include bug fixes, +//! performance improvements etc. +//! - Migrations for patch updates only need to handle updating the stored DbMetadata singleton. use super::{ capability::{ @@ -20,26 +126,70 @@ use std::sync::Arc; use tracing::info; use zebra_chain::parameters::NetworkKind; +/// Broad categorisation of migration severity. +/// +/// This enum exists as a design aid to communicate intent and constraints: +/// - **Patch**: code-only changes; schema is unchanged; typically only `DbMetadata` needs updating. +/// - **Minor**: compatible schema / encoding evolution; may require in-place rebuilds of selected tables. +/// - **Major**: capability or schema changes that require rebuilding indices from the backing validator, +/// typically using the router’s primary/shadow model. +/// +/// Note: this enum is not currently used to dispatch behaviour in this file; concrete steps are +/// selected by [`MigrationManager::get_migration`]. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum MigrationType { + /// Patch-level changes: no schema change; metadata updates only. Patch, + + /// Minor-level changes: compatible schema/encoding changes; may require in-place table rebuild. Minor, + + /// Major-level changes: new schema/capabilities; usually requires shadow rebuild and promotion. Major, } +/// A single migration step from one concrete on-disk version to the next. +/// +/// Migrations are designed to be **composable** and **stepwise**: each implementation should map a +/// specific `CURRENT_VERSION` to a specific `TO_VERSION`. The manager then iterates until the target +/// version is reached. +/// +/// ## Resumability and crash-safety +/// Migration implementations are expected to be resumable where practical. In this codebase, major +/// migrations typically use: +/// - a shadow database that can be incrementally built, +/// - the shadow tip height as an implicit progress marker, +/// - and [`MigrationStatus`] in `DbMetadata` as an explicit progress marker. +/// +/// Implementations must never promote a partially-correct database to primary. #[async_trait] pub trait Migration { + /// The exact on-disk version this step migrates *from*. const CURRENT_VERSION: DbVersion; + + /// The exact on-disk version this step migrates *to*. const TO_VERSION: DbVersion; + /// Returns the version this step migrates *from*. fn current_version(&self) -> DbVersion { Self::CURRENT_VERSION } + /// Returns the version this step migrates *to*. fn to_version(&self) -> DbVersion { Self::TO_VERSION } + /// Performs the migration step. + /// + /// Implementations may: + /// - spawn a shadow backend, + /// - build or rebuild indices, + /// - update metadata and migration status, + /// - and promote the shadow backend to primary via the router. + /// + /// # Errors + /// Returns `FinalisedStateError` if the migration cannot proceed safely or deterministically. async fn migrate( &self, router: Arc, @@ -48,16 +198,43 @@ pub trait Migration { ) -> Result<(), FinalisedStateError>; } +/// Orchestrates a sequence of migration steps until `target_version` is reached. +/// +/// `MigrationManager` is constructed by `ZainoDB::spawn` when it detects that the on-disk database +/// is older than the configured target version. +/// +/// The manager: +/// - selects the next step based on the current version, +/// - runs it, +/// - then advances `current_version` to the step’s `TO_VERSION` and repeats. +/// +/// The router is shared so that migration steps can use the primary/shadow routing model. pub(super) struct MigrationManager { + /// Router controlling primary/shadow backends and capability routing. pub(super) router: Arc, + + /// Block-cache configuration (paths, network, configured target DB version, etc.). pub(super) cfg: BlockCacheConfig, + + /// The on-disk version currently detected/opened. pub(super) current_version: DbVersion, + + /// The configured target version to migrate to. pub(super) target_version: DbVersion, + + /// Backing data source used to fetch blocks / tree roots for rebuild-style migrations. pub(super) source: T, } impl MigrationManager { /// Iteratively performs each migration step from current version to target version. + /// + /// The manager applies steps in order, where each step maps one specific `DbVersion` to the next. + /// The loop terminates once `current_version >= target_version`. + /// + /// # Errors + /// Returns an error if a migration step is missing for the current version, or if any migration + /// step fails. pub(super) async fn migrate(&mut self) -> Result<(), FinalisedStateError> { while self.current_version < self.target_version { let migration = self.get_migration()?; @@ -74,7 +251,10 @@ impl MigrationManager { Ok(()) } - /// Return the next migration for the current version. + /// Returns the next migration step for the current on-disk version. + /// + /// This must be updated whenever a new supported DB version is introduced. The match is strict: + /// if a step is missing, migration is aborted rather than attempting an unsafe fallback. fn get_migration(&self) -> Result, FinalisedStateError> { match ( self.current_version.major, @@ -92,6 +272,13 @@ impl MigrationManager { // ***** Migrations ***** +/// Major migration: v0.0.0 → v1.0.0. +/// +/// This migration performs a shadow rebuild of the v1 database from genesis, then promotes the +/// completed shadow to primary and schedules deletion of the old v0 database directory once all +/// handles are dropped. +/// +/// See the module-level documentation for the detailed rationale and mechanics. struct Migration0_0_0To1_0_0; #[async_trait] @@ -107,11 +294,21 @@ impl Migration for Migration0_0_0To1_0_0 { patch: 0, }; - /// The V0 database that we are migrating from was a lightwallet specific database - /// that only built compact block data from sapling activation onwards. - /// DbV1 is required to be built from genasis to correctly build the transparent address indexes. - /// For this reason we do not do any partial builds in the V0 to V1 migration. - /// We just run V0 as primary until V1 is fully built in shadow, then switch primary, deleting V0. + /// Performs the v0 → v1 major migration using the router’s primary/shadow model. + /// + /// The legacy v0 database only supports compact block data from Sapling activation onwards. + /// DbV1 requires a complete rebuild from genesis to correctly build indices (notably transparent + /// address history). For this reason, this migration does not attempt partial incremental builds + /// from Sapling; it rebuilds v1 in full in a shadow backend, then promotes it. + /// + /// ## Resumption behaviour + /// If the process is shut down mid-migration: + /// - the v1 shadow DB directory may already exist, + /// - shadow tip height is used to resume from `shadow_tip + 1`, + /// - and `MigrationStatus` is used as a coarse progress marker. + /// + /// Promotion occurs only after the v1 build loop has caught up to the primary tip and the shadow + /// metadata is marked `Complete`. async fn migrate( &self, router: Arc, diff --git a/zaino-state/src/chain_index/finalised_state/reader.rs b/zaino-state/src/chain_index/finalised_state/reader.rs index 163b6c98c..575e70ef1 100644 --- a/zaino-state/src/chain_index/finalised_state/reader.rs +++ b/zaino-state/src/chain_index/finalised_state/reader.rs @@ -1,6 +1,48 @@ -//! ZainoDbReader: Read only view onto a running ZainoDB +//! Read-only view onto a running `ZainoDB` (DbReader) //! -//! This should be used to fetch chain data in *all* cases. +//! This file defines [`DbReader`], the **read-only** interface that should be used for *all* chain +//! data fetches from the finalised database. +//! +//! `DbReader` exists for two reasons: +//! +//! 1. **API hygiene:** it narrows the surface to reads and discourages accidental use of write APIs +//! from query paths. +//! 2. **Migration safety:** it routes each call through [`Router`](super::router::Router) using a +//! [`CapabilityRequest`](crate::chain_index::finalised_state::capability::CapabilityRequest), +//! ensuring the underlying backend supports the requested feature (especially important during +//! major migrations where different DB versions may coexist). +//! +//! # How routing works +//! +//! Each method in `DbReader` requests a specific capability (e.g. `BlockCoreExt`, `TransparentHistExt`). +//! Internally, `DbReader::db(cap)` calls `ZainoDB::backend_for_cap(cap)`, which consults the router. +//! +//! - If the capability is currently served by the shadow DB (shadow mask contains the bit), the +//! query runs against shadow. +//! - Otherwise, it runs against primary if primary supports it. +//! - If neither backend supports it, the call returns `FinalisedStateError::FeatureUnavailable(...)`. +//! +//! # Version constraints and error handling +//! +//! Some queries are only available in newer DB versions (notably most v1 extension traits). +//! Callers should either: +//! - require a minimum DB version (via configuration and/or metadata checks), or +//! - handle `FeatureUnavailable` errors gracefully when operating against legacy databases. +//! +//! # Development: adding a new read method +//! +//! 1. Decide whether the new query belongs under an existing extension trait or needs a new one. +//! 2. If a new capability is required: +//! - add a new `Capability` bit and `CapabilityRequest` variant in `capability.rs`, +//! - implement the corresponding extension trait for supported DB versions, +//! - delegate through `DbBackend` and route via the router. +//! 3. Add the new method on `DbReader` that requests the corresponding `CapabilityRequest` and calls +//! into the backend. +//! +//! # Usage pattern +//! +//! `DbReader` is created from an `Arc` using [`ZainoDB::to_reader`](super::ZainoDB::to_reader). +//! Prefer passing `DbReader` through query layers rather than passing `ZainoDB` directly. use crate::{ chain_index::{ @@ -24,45 +66,66 @@ use super::{ use std::sync::Arc; -/// Immutable view onto an already-running [`ZainoDB`]. +/// Read-only, capability-routed handle to the finalised database. /// -/// Carries a plain reference with the same lifetime as the parent DB +/// `DbReader` is the preferred entry point for serving chain queries: +/// - it exposes only read APIs, +/// - it routes each operation via [`CapabilityRequest`] to ensure the selected backend supports the +/// requested feature, +/// - and it remains stable across major migrations because routing is handled internally by the +/// [`Router`](super::router::Router). +/// +/// ## Cloning and sharing +/// `DbReader` is cheap to clone; clones share the underlying `Arc`. #[derive(Clone)] pub(crate) struct DbReader { - /// Immutable read-only view onto the running ZainoDB + /// Shared handle to the running `ZainoDB` instance. pub(crate) inner: Arc, } impl DbReader { - /// Returns the internal db backend for the given db capability. + /// Resolves the backend that should serve `cap` right now. + /// + /// This is the single routing choke-point for all `DbReader` methods. It delegates to + /// `ZainoDB::backend_for_cap`, which consults the router’s primary/shadow masks. + /// + /// # Errors + /// Returns `FinalisedStateError::FeatureUnavailable(...)` if no currently-open backend + /// advertises the requested capability. #[inline(always)] fn db(&self, cap: CapabilityRequest) -> Result, FinalisedStateError> { self.inner.backend_for_cap(cap) } + // ***** DB Core Read ***** - /// Returns the status of the serving ZainoDB. + /// Returns the current runtime status of the serving database. + /// + /// This reflects the status of the backend currently serving `READ_CORE`, which is the minimum + /// capability required for basic chain queries. pub(crate) fn status(&self) -> StatusType { self.inner.status() } - /// Returns the greatest block `Height` stored in the db - /// (`None` if the DB is still empty). + /// Returns the greatest block `Height` stored in the database, or `None` if the DB is empty. pub(crate) async fn db_height(&self) -> Result, FinalisedStateError> { self.inner.db_height().await } - /// Fetch database metadata. + /// Fetches the persisted database metadata singleton (`DbMetadata`). pub(crate) async fn get_metadata(&self) -> Result { self.inner.get_metadata().await } - /// Awaits untile the DB returns a Ready status. + /// Waits until the database reports [`StatusType::Ready`]. + /// + /// This is a convenience wrapper around `ZainoDB::wait_until_ready` and should typically be + /// awaited once during startup before serving queries. pub(crate) async fn wait_until_ready(&self) { self.inner.wait_until_ready().await } - /// Fetch the block height in the main chain for a given block hash. + /// Fetches the main-chain height for a given block hash, if present in finalised state. pub(crate) async fn get_block_height( &self, hash: BlockHash, @@ -70,7 +133,7 @@ impl DbReader { self.inner.get_block_height(hash).await } - /// Fetch the block hash in the main chain for a given block height. + /// Fetches the main-chain block hash for a given block height, if present in finalised state. pub(crate) async fn get_block_hash( &self, height: Height, diff --git a/zaino-state/src/chain_index/finalised_state/router.rs b/zaino-state/src/chain_index/finalised_state/router.rs index 0010f0a23..b34628d42 100644 --- a/zaino-state/src/chain_index/finalised_state/router.rs +++ b/zaino-state/src/chain_index/finalised_state/router.rs @@ -1,8 +1,69 @@ -//! Implements the ZainoDB Router, used to selectively route database capabilities during major migrations. +//! Capability-based database router (primary + shadow) //! -//! The Router allows incremental database migrations by splitting read and write capability groups between primary and shadow databases. -//! This design enables partial migrations without duplicating the entire chain database, -//! greatly reducing disk usage and ensuring minimal downtime. +//! This file implements [`Router`], which allows `ZainoDB` to selectively route operations to one of +//! two database backends: +//! - a **primary** (active) DB, and +//! - an optional **shadow** DB used during major migrations. +//! +//! The router is designed to support incremental and low-downtime migrations by splitting the DB +//! feature set into capability groups. Each capability group can be served by either backend, +//! controlled by atomic bitmasks. +//! +//! # Why a router exists +//! +//! Major schema upgrades are often most safely implemented as a rebuild into a new DB rather than an +//! in-place rewrite. The router enables that by allowing the system to: +//! - keep serving requests from the old DB while building the new one, +//! - optionally move specific read capabilities to the shadow DB once they are correct there, +//! - then atomically promote the shadow DB to primary at the end. +//! +//! # Concurrency and atomicity model +//! +//! The router uses `ArcSwap` / `ArcSwapOption` for lock-free backend swapping and `AtomicU32` masks +//! for capability routing. +//! +//! - Backend selection (`backend(...)`) is wait-free and based on the current masks. +//! - Promotion (`promote_shadow`) swaps the primary Arc atomically; existing in-flight operations +//! remain valid because they hold an `Arc`. +//! +//! Memory ordering is explicit (`Acquire`/`Release`/`AcqRel`) to ensure mask updates are observed +//! consistently relative to backend pointer updates. +//! +//! # Capability routing semantics +//! +//! `Router::backend(req)` resolves as: +//! 1. If `shadow_mask` contains the requested bit and shadow exists → return shadow. +//! 2. Else if `primary_mask` contains the requested bit → return primary. +//! 3. Else → return `FinalisedStateError::FeatureUnavailable`. +//! +//! # Shadow lifecycle (migration-only API) +//! +//! The following methods are intended to be called **only** by the migration manager: +//! - `set_shadow(...)` +//! - `extend_shadow_caps(...)` +//! - `promote_shadow()` +//! +//! Promotion performs: +//! - shadow → primary swap, +//! - resets shadow and shadow mask, +//! - updates the primary mask from the promoted backend’s declared capabilities, +//! - returns the old primary backend so the migration can shut it down and delete its files safely. +//! +//! # Trait impls +//! +//! `Router` implements the core DB traits (`DbCore`, `DbRead`, `DbWrite`) by routing READ_CORE/WRITE_CORE +//! to whichever backend currently serves those capabilities. +//! +//! # Development notes +//! +//! - If you introduce a new capability bit, ensure it is: +//! - added to `CapabilityRequest`, +//! - implemented by the relevant DB version(s), +//! - and considered in migration routing policy (whether it can move to shadow incrementally). +//! +//! - When implementing incremental migrations (moving caps before final promotion), ensure the shadow +//! backend is kept consistent with the primary for those capabilities (or restrict such caps to +//! read-only queries that can tolerate lag with explicit semantics). use super::{ capability::{Capability, DbCore, DbMetadata, DbRead, DbWrite}, @@ -21,26 +82,85 @@ use std::sync::{ Arc, }; +/// Capability-based database router. +/// +/// `Router` is the internal dispatch layer used by `ZainoDB` to route operations to either: +/// - a **primary** database backend (the active DB), or +/// - an optional **shadow** backend used during major version migrations. +/// +/// Routing is driven by per-backend **capability bitmasks**: +/// - If a requested capability bit is set in the shadow mask and a shadow backend exists, the call +/// is routed to shadow. +/// - Otherwise, if the bit is set in the primary mask, the call is routed to primary. +/// - Otherwise, the feature is reported as unavailable. +/// +/// ## Concurrency model +/// - Backend pointers are stored using `ArcSwap` / `ArcSwapOption` to allow atomic, lock-free swaps. +/// - Capability masks are stored in `AtomicU32` and read using `Acquire` ordering in the hot path. +/// - Promoting shadow to primary is atomic and safe for in-flight calls because callers hold +/// `Arc` clones. +/// +/// ## Intended usage +/// The shadow-related APIs (`set_shadow`, `extend_shadow_caps`, `promote_shadow`) are intended to be +/// used only by the migration manager to support low-downtime rebuild-style migrations. pub(crate) struct Router { - /// Primary active database. + /// Primary active database backend. + /// + /// This is the default backend used for any capability bit that is not explicitly routed to the + /// shadow backend via [`Router::shadow_mask`]. + /// + /// Stored behind [`ArcSwap`] so it can be replaced atomically during promotion without locking. primary: ArcSwap, - /// Shadow database, new version to be built during major migration. + + /// Shadow database backend (optional). + /// + /// During a major migration, a new-version backend is built and installed here. Individual + /// capability groups can be routed to the shadow by setting bits in [`Router::shadow_mask`]. + /// + /// Outside of migrations this should remain `None`. shadow: ArcSwapOption, - /// Capability mask for primary database. + + /// Capability mask for the primary backend. + /// + /// A bit being set means “this capability may be served by the primary backend”. + /// + /// The mask is initialized from `primary.capability()` and can be restricted/extended during + /// migrations to ensure that requests are only routed to backends that can satisfy them. primary_mask: AtomicU32, - /// Capability mask dictating what database capalility (if any) should be served by the shadow. + + /// Capability mask for the shadow backend. + /// + /// A bit being set means “this capability should be served by the shadow backend (if present)”. + /// + /// Routing precedence is: + /// 1. shadow if the bit is set and shadow exists, + /// 2. else primary if the bit is set, + /// 3. else feature unavailable. shadow_mask: AtomicU32, } /// Database version router. /// -/// Routes database capability to the correct database during major migrations. +/// Routes database capabilities to either a primary backend or (during major migrations) an optional +/// shadow backend. +/// +/// ## Routing guarantees +/// - The router only returns a backend if the corresponding capability bit is enabled in the +/// backend’s active mask. +/// - Backend selection is lock-free and safe for concurrent use. +/// - Promotion swaps the primary backend atomically; in-flight operations remain valid because they +/// hold their own `Arc` clones. impl Router { // ***** Router creation ***** - /// Creatues a new database router, setting primary the given database. + /// Creates a new [`Router`] with `primary` installed as the active backend. + /// + /// The primary capability mask is initialized from `primary.capability()`. The shadow backend is + /// initially unset and must only be configured during major migrations. /// - /// Shadow is spawned as none and should only be set to some during major database migrations. + /// ## Notes + /// - The router does not validate that `primary.capability()` matches the masks that may later be + /// set by migration code; migration orchestration must keep the masks conservative. pub(crate) fn new(primary: Arc) -> Self { let cap = primary.capability(); Self { @@ -53,7 +173,18 @@ impl Router { // ***** Capability router ***** - /// Return the database backend for a given capability, or an error if none is available. + /// Returns the database backend that should serve `cap`. + /// + /// Routing order: + /// 1. If the shadow mask contains the requested bit *and* a shadow backend exists, return shadow. + /// 2. Else if the primary mask contains the requested bit, return primary. + /// 3. Otherwise return [`FinalisedStateError::FeatureUnavailable`]. + /// + /// ## Correctness contract + /// The masks are the source of truth for routing. If migration code enables a bit on the shadow + /// backend before the corresponding data/index is correct there, callers may observe incorrect + /// results. Therefore, migrations must only route a capability to shadow once it is complete and + /// consistent for that capability’s semantics. #[inline] pub(crate) fn backend( &self, @@ -77,27 +208,48 @@ impl Router { // // These methods should only ever be used by the migration manager. - /// Sets the shadow to the given database. + /// Installs `shadow` as the current shadow backend and sets its routed capability mask to `caps`. + /// + /// This is the entry point for starting a major migration: + /// - spawn/open the new-version backend, + /// - call `set_shadow(new_backend, initial_caps)`, + /// - optionally expand shadow routing incrementally with [`Router::extend_shadow_caps`]. + /// + /// ## Ordering + /// The shadow backend pointer is stored first, then the shadow mask is published with `Release` + /// ordering. Readers use `Acquire` to observe both consistently. pub(crate) fn set_shadow(&self, shadow: Arc, caps: Capability) { self.shadow.store(Some(shadow)); self.shadow_mask.store(caps.bits(), Ordering::Release); } - /// Move additional capability bits to the *current* shadow. + /// Adds additional capabilities to the shadow routing mask. + /// + /// This enables incremental migrations where certain read capabilities can move to the shadow + /// backend once the corresponding indices are complete there. + /// + /// ## Notes + /// - This only changes routing; it does not validate the shadow backend’s correctness. + /// - Use conservative routing policies: prefer moving read-only capabilities first. pub(crate) fn extend_shadow_caps(&self, caps: Capability) { self.shadow_mask.fetch_or(caps.bits(), Ordering::AcqRel); } - /// Promotes the shadow database to primary, resets shadow, - /// and updates the primary capability mask from the new backend. + /// Promotes the current shadow backend to become the new primary backend. /// - /// Used at the end of major migrations to move the active database to the new version. + /// Promotion performs the following steps: + /// - Removes the shadow backend (`shadow = None`). + /// - Sets `primary_mask` to the promoted backend’s declared capabilities. + /// - Clears `shadow_mask`. + /// - Atomically swaps the `primary` backend pointer to the promoted backend. /// - /// Returns the initial primary value. + /// Returns the old primary backend so the caller (migration manager) can: + /// - wait for all outstanding `Arc` clones to drop, + /// - shut it down, + /// - and finally remove the old on-disk directory safely. /// - /// # Error - /// - /// Returns a critical error if the shadow is not found. + /// # Errors + /// Returns [`FinalisedStateError::Critical`] if no shadow backend is currently installed. pub(crate) fn promote_shadow(&self) -> Result, FinalisedStateError> { let Some(new_primary) = self.shadow.swap(None) else { return Err(FinalisedStateError::Critical( @@ -114,17 +266,29 @@ impl Router { // ***** Primary database capability control ***** - /// Disables specific capabilities on the primary backend. + /// Disables specific capabilities on the primary backend by clearing bits in `primary_mask`. + /// + /// This is primarily used during migrations to prevent routing particular operations to the old + /// backend once the migration wants them served elsewhere. + /// + /// ## Safety + /// This only affects routing. It does not stop in-flight operations already holding an + /// `Arc` clone. pub(crate) fn limit_primary_caps(&self, caps: Capability) { self.primary_mask.fetch_and(!caps.bits(), Ordering::AcqRel); } - /// Enables specific capabilities on the primary backend. + /// Enables specific capabilities on the primary backend by setting bits in `primary_mask`. + /// + /// This can be used to restore routing to the primary backend after temporarily restricting it. pub(crate) fn extend_primary_caps(&self, caps: Capability) { self.primary_mask.fetch_or(caps.bits(), Ordering::AcqRel); } /// Overwrites the entire primary capability mask. + /// + /// This is a sharp tool intended for migration orchestration. Prefer incremental helpers + /// (`limit_primary_caps`, `extend_primary_caps`) unless a full reset is required. pub(crate) fn set_primary_mask(&self, new_mask: Capability) { self.primary_mask.store(new_mask.bits(), Ordering::Release); } @@ -132,8 +296,18 @@ impl Router { // ***** Core DB functionality ***** +/// Core database façade implementation for the router. +/// +/// `DbCore` methods are routed via capability selection: +/// - `status()` consults the backend that currently serves `READ_CORE`. +/// - `shutdown()` attempts to shut down both primary and shadow backends (if present). #[async_trait] impl DbCore for Router { + /// Returns the runtime status of the database system. + /// + /// This is derived from whichever backend currently serves `READ_CORE`. If `READ_CORE` is not + /// available (misconfiguration or partial migration state), this returns [`StatusType::Busy`] + /// as a conservative fallback. fn status(&self) -> StatusType { match self.backend(CapabilityRequest::ReadCore) { Ok(backend) => backend.status(), @@ -141,6 +315,15 @@ impl DbCore for Router { } } + /// Shuts down both the primary and shadow backends (if any). + /// + /// Shutdown is attempted for the primary first, then the shadow. If primary shutdown fails, the + /// error is returned immediately (the shadow shutdown result is not returned in that case). + /// + /// ## Migration note + /// During major migrations, the old primary backend may need to stay alive until all outstanding + /// handles are dropped. That waiting logic lives outside the router (typically in the migration + /// manager). async fn shutdown(&self) -> Result<(), FinalisedStateError> { let primary_shutdown_result = self.primary.load_full().shutdown().await; @@ -155,26 +338,37 @@ impl DbCore for Router { } } +/// Core write surface routed through `WRITE_CORE`. +/// +/// All writes are delegated to the backend currently selected for [`CapabilityRequest::WriteCore`]. +/// During migrations this allows writers to remain on the old backend until the new backend is ready +/// (or to be switched deliberately by migration orchestration). #[async_trait] impl DbWrite for Router { + /// Writes a block via the backend currently serving `WRITE_CORE`. async fn write_block(&self, blk: IndexedBlock) -> Result<(), FinalisedStateError> { self.backend(CapabilityRequest::WriteCore)? .write_block(blk) .await } + /// Deletes the block at height `h` via the backend currently serving `WRITE_CORE`. async fn delete_block_at_height(&self, h: Height) -> Result<(), FinalisedStateError> { self.backend(CapabilityRequest::WriteCore)? .delete_block_at_height(h) .await } + /// Deletes the provided block via the backend currently serving `WRITE_CORE`. async fn delete_block(&self, blk: &IndexedBlock) -> Result<(), FinalisedStateError> { self.backend(CapabilityRequest::WriteCore)? .delete_block(blk) .await } + /// Updates the persisted metadata singleton via the backend currently serving `WRITE_CORE`. + /// + /// This is used by migrations to record progress and completion status. async fn update_metadata(&self, metadata: DbMetadata) -> Result<(), FinalisedStateError> { self.backend(CapabilityRequest::WriteCore)? .update_metadata(metadata) @@ -182,12 +376,19 @@ impl DbWrite for Router { } } +/// Core read surface routed through `READ_CORE`. +/// +/// All reads are delegated to the backend currently selected for [`CapabilityRequest::ReadCore`]. +/// During migrations this allows reads to continue from the old backend unless/until explicitly +/// moved. #[async_trait] impl DbRead for Router { + /// Returns the database tip height via the backend currently serving `READ_CORE`. async fn db_height(&self) -> Result, FinalisedStateError> { self.backend(CapabilityRequest::ReadCore)?.db_height().await } + /// Returns the height for `hash` via the backend currently serving `READ_CORE`. async fn get_block_height( &self, hash: BlockHash, @@ -197,12 +398,17 @@ impl DbRead for Router { .await } + /// Returns the hash for `h` via the backend currently serving `READ_CORE`. async fn get_block_hash(&self, h: Height) -> Result, FinalisedStateError> { self.backend(CapabilityRequest::ReadCore)? .get_block_hash(h) .await } + /// Returns database metadata via the backend currently serving `READ_CORE`. + /// + /// During migrations, callers should expect `DbMetadata::migration_status` to reflect the state + /// of the active backend selected by routing. async fn get_metadata(&self) -> Result { self.backend(CapabilityRequest::ReadCore)? .get_metadata() From 5420ce14049e08bd26f91f54cf73da782147808d Mon Sep 17 00:00:00 2001 From: fluidvanadium Date: Wed, 14 Jan 2026 20:00:57 +0000 Subject: [PATCH 044/114] Updated comments as per Github discussion. --- zaino-state/src/chain_index.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index 90eeed8d8..1211ba61e 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -580,9 +580,7 @@ impl NodeBackedChainIndexSubscriber { Searches finalized and non-finalized chains for any blocks containing the transaction. Ordered with finalized blocks first. - Warning: there might be multiple blocks containing the transaction. - In one case, diverging non-finalized chains might each confirm the transaction. - An uncertain case: If a transaction, which is already on a NonBest chain, becomes Finalized, it might show up in both places in a single return of this function. + WARNING: there might be multiple chains, each containing a block with the transaction. */ async fn blocks_containing_transaction<'snapshot, 'self_lt, 'iter>( &'self_lt self, @@ -872,7 +870,7 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Date: Sun, 18 Jan 2026 17:31:15 +0000 Subject: [PATCH 045/114] improved error message --- zaino-fetch/src/jsonrpsee/connector.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zaino-fetch/src/jsonrpsee/connector.rs b/zaino-fetch/src/jsonrpsee/connector.rs index 8c7148ccc..8b9d17dcb 100644 --- a/zaino-fetch/src/jsonrpsee/connector.rs +++ b/zaino-fetch/src/jsonrpsee/connector.rs @@ -911,6 +911,6 @@ pub async fn test_node_and_return_url( } interval.tick().await; } - error!("Error: Could not establish connection with node. Please check config and confirm node is listening at the correct address and the correct authorisation details have been entered. Exiting.."); + error!("Error: Could not establish connection with node. Please check config and confirm node is listening at {url} and the correct authorisation details have been entered. Exiting.."); std::process::exit(1); } From d1f5a60869ee98c6f41a08a5b2da3c19ead9885c Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 27 Jan 2026 10:46:50 +0000 Subject: [PATCH 046/114] refactor(config): migrate from figment to config-rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace figment with config-rs for configuration management, following the 3-layer hierarchy: defaults → TOML → environment variables. Key changes: - Add sensitive key protection that errors on env vars ending with password, secret, token, cookie, or private_key - Support nested config via ZAINO_ prefix with __ separator - Add #[serde(deny_unknown_fields)] for strict TOML validation - Add #[serde(default)] to structs for partial config support - Add serde alias for backwards compatibility with grpc_listen_address - Simplify BackendType matching in indexer (remove BackendConfig wrapper) - Convert tests from Figment Jail to EnvGuard pattern Security: Environment variables for sensitive fields are blocked with clear error messages, requiring use of config files instead. --- Cargo.lock | 98 +- Cargo.toml | 2 +- zaino-common/src/config/storage.rs | 2 + zaino-common/src/config/validator.rs | 14 + zaino-serve/src/server/config.rs | 1 + zaino-state/src/config.rs | 3 +- zainod/Cargo.toml | 3 +- zainod/src/config.rs | 1364 ++++++++++++-------------- zainod/src/indexer.rs | 21 +- 9 files changed, 709 insertions(+), 799 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cb8bd233f..b9e23b9fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1131,6 +1131,18 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "config" +version = "0.15.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b30fa8254caad766fc03cb0ccae691e14bf3bd72bfff27f72802ce729551b3d6" +dependencies = [ + "pathdiff", + "serde_core", + "toml 0.9.8", + "winnow", +] + [[package]] name = "console" version = "0.15.11" @@ -2011,10 +2023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" dependencies = [ "atomic 0.6.1", - "parking_lot", - "pear", "serde", - "tempfile", "toml 0.8.23", "uncased", "version_check", @@ -2883,12 +2892,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "inlinable_string" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" - [[package]] name = "inotify" version = "0.11.0" @@ -3964,6 +3967,12 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pathdiff" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" + [[package]] name = "pbkdf2" version = "0.12.2" @@ -3974,29 +3983,6 @@ dependencies = [ "password-hash", ] -[[package]] -name = "pear" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" -dependencies = [ - "inlinable_string", - "pear_codegen", - "yansi", -] - -[[package]] -name = "pear_codegen" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" -dependencies = [ - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn 2.0.106", -] - [[package]] name = "pem-rfc7468" version = "0.7.0" @@ -4317,19 +4303,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "proc-macro2-diagnostics" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.106", - "version_check", - "yansi", -] - [[package]] name = "proptest" version = "1.6.0" @@ -5570,6 +5543,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" +dependencies = [ + "serde_core", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -6253,11 +6235,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", - "serde_spanned", + "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_edit 0.22.27", ] +[[package]] +name = "toml" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +dependencies = [ + "serde_core", + "serde_spanned 1.0.4", + "toml_datetime 0.7.3", + "toml_parser", + "winnow", +] + [[package]] name = "toml_datetime" version = "0.6.11" @@ -6284,7 +6279,7 @@ checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap 2.11.4", "serde", - "serde_spanned", + "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_write", "winnow", @@ -8362,12 +8357,6 @@ version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" -[[package]] -name = "yansi" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" - [[package]] name = "yoke" version = "0.8.0" @@ -8557,9 +8546,10 @@ name = "zainod" version = "0.1.2" dependencies = [ "clap", - "figment", + "config", "http", "serde", + "tempfile", "thiserror 1.0.69", "tokio", "toml 0.5.11", diff --git a/Cargo.toml b/Cargo.toml index c2eea5f7b..2de396592 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,7 +115,7 @@ zaino-common.path = "zaino-common" zaino-testutils = { path = "zaino-testutils" } zaino-testvectors = { path = "zaino-testvectors" } zainod = { path = "zainod" } -figment = "0.10" +config = { version = "0.15", default-features = false, features = ["toml"] } nonempty = "0.11.0" proptest = "~1.6" zip32 = "0.2.1" diff --git a/zaino-common/src/config/storage.rs b/zaino-common/src/config/storage.rs index ccfde0aa3..11a539298 100644 --- a/zaino-common/src/config/storage.rs +++ b/zaino-common/src/config/storage.rs @@ -4,6 +4,7 @@ use std::path::PathBuf; /// Cache configuration for DashMaps. #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +#[serde(default)] pub struct CacheConfig { /// Capacity of the DashMaps used for caching pub capacity: usize, @@ -69,6 +70,7 @@ impl DatabaseSize { /// Configures the file path and size limits for persistent storage /// used by Zaino services. #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +#[serde(default)] pub struct DatabaseConfig { /// Database file path. pub path: PathBuf, diff --git a/zaino-common/src/config/validator.rs b/zaino-common/src/config/validator.rs index 1e57c0cb5..c6bbe72d8 100644 --- a/zaino-common/src/config/validator.rs +++ b/zaino-common/src/config/validator.rs @@ -6,6 +6,7 @@ use std::path::PathBuf; /// Validator (full-node) type for Zaino configuration. #[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize, serde::Serialize)] +#[serde(default)] pub struct ValidatorConfig { /// Full node / validator gprc listen port. Only exists for zebra pub validator_grpc_listen_address: Option, @@ -18,3 +19,16 @@ pub struct ValidatorConfig { /// full node / validator Password. pub validator_password: Option, } + +/// Required by `#[serde(default)]` to fill missing fields when deserializing partial TOML configs. +impl Default for ValidatorConfig { + fn default() -> Self { + Self { + validator_grpc_listen_address: Some("127.0.0.1:18230".to_string()), + validator_jsonrpc_listen_address: "127.0.0.1:18232".to_string(), + validator_cookie_path: None, + validator_user: Some("xxxxxx".to_string()), + validator_password: Some("xxxxxx".to_string()), + } + } +} diff --git a/zaino-serve/src/server/config.rs b/zaino-serve/src/server/config.rs index 03a1e244a..31661dd99 100644 --- a/zaino-serve/src/server/config.rs +++ b/zaino-serve/src/server/config.rs @@ -19,6 +19,7 @@ pub struct GrpcTls { #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] pub struct GrpcServerConfig { /// gRPC server bind addr. + #[serde(alias = "grpc_listen_address")] pub listen_address: SocketAddr, /// Enables TLS. pub tls: Option, diff --git a/zaino-state/src/config.rs b/zaino-state/src/config.rs index e35a0e446..ed2b7376c 100644 --- a/zaino-state/src/config.rs +++ b/zaino-state/src/config.rs @@ -3,13 +3,14 @@ use std::path::PathBuf; use zaino_common::{Network, ServiceConfig, StorageConfig}; -#[derive(Debug, Clone, serde::Deserialize, PartialEq, Copy)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Deserialize, serde::Serialize)] #[serde(rename_all = "lowercase")] /// Type of backend to be used. pub enum BackendType { /// Uses ReadStateService (Zebrad) State, /// Uses JsonRPC client (Zcashd. Zainod) + #[default] Fetch, } diff --git a/zainod/Cargo.toml b/zainod/Cargo.toml index cdcef6c51..66d3eb804 100644 --- a/zainod/Cargo.toml +++ b/zainod/Cargo.toml @@ -49,4 +49,5 @@ thiserror = { workspace = true } # Formats toml = { workspace = true } -figment= { workspace = true, features = ["toml", "env", "test"] } +config = { workspace = true } +tempfile = { workspace = true } diff --git a/zainod/src/config.rs b/zainod/src/config.rs index 7fb33da6b..dcee228e4 100644 --- a/zainod/src/config.rs +++ b/zainod/src/config.rs @@ -1,58 +1,42 @@ //! Zaino config. -use figment::{ - providers::{Format, Serialized, Toml}, - Figment, -}; + use std::{ net::{IpAddr, SocketAddr}, path::PathBuf, }; -// Added for Serde deserialization helpers -use crate::error::IndexerError; -use serde::{ - de::{self, Deserializer}, - Deserialize, Serialize, -}; + +use serde::{Deserialize, Serialize}; +use tracing::info; #[cfg(feature = "no_tls_use_unencrypted_traffic")] use tracing::warn; -use tracing::{error, info}; + +use crate::error::IndexerError; use zaino_common::{ try_resolve_address, AddressResolution, CacheConfig, DatabaseConfig, DatabaseSize, Network, ServiceConfig, StorageConfig, ValidatorConfig, }; use zaino_serve::server::config::{GrpcServerConfig, JsonRpcServerConfig}; - #[allow(deprecated)] -use zaino_state::{BackendConfig, FetchServiceConfig, StateServiceConfig}; - -/// Custom deserialization function for `BackendType` from a String. -/// Used by Serde's `deserialize_with`. -fn deserialize_backendtype_from_string<'de, D>( - deserializer: D, -) -> Result -where - D: Deserializer<'de>, -{ - let s = String::deserialize(deserializer)?; - match s.to_lowercase().as_str() { - "state" => Ok(zaino_state::BackendType::State), - "fetch" => Ok(zaino_state::BackendType::Fetch), - _ => Err(de::Error::custom(format!( - "Invalid backend type '{s}', valid options are 'state' or 'fetch'" - ))), - } +use zaino_state::{BackendType, FetchServiceConfig, StateServiceConfig}; + +/// Sensitive key suffixes that should not be set via environment variables. +const SENSITIVE_KEY_SUFFIXES: [&str; 5] = ["password", "secret", "token", "cookie", "private_key"]; + +/// Checks if a key is sensitive and should not be set via environment variables. +fn is_sensitive_leaf_key(leaf_key: &str) -> bool { + let key = leaf_key.to_ascii_lowercase(); + SENSITIVE_KEY_SUFFIXES + .iter() + .any(|suffix| key.ends_with(suffix)) } /// Config information required for Zaino. #[derive(Debug, Clone, Deserialize, Serialize)] -#[serde(default)] +#[serde(deny_unknown_fields, default)] pub struct ZainodConfig { /// Type of backend to be used. - #[serde(deserialize_with = "deserialize_backendtype_from_string")] - #[serde(serialize_with = "serialize_backendtype_to_string")] - pub backend: zaino_state::BackendType, + pub backend: BackendType, /// Enable JsonRPC server with a valid Some value. - #[serde(default)] pub json_server_settings: Option, /// gRPC server settings including listen addr, tls status, key and cert. pub grpc_settings: GrpcServerConfig, @@ -62,9 +46,7 @@ pub struct ZainodConfig { pub service: ServiceConfig, /// Storage configuration (cache and database). pub storage: StorageConfig, - /// Block Cache database file path. - /// - /// ZebraDB location. + /// Block Cache database file path (ZebraDB location). pub zebra_db_path: PathBuf, /// Network chain type. pub network: Network, @@ -73,33 +55,21 @@ pub struct ZainodConfig { impl ZainodConfig { /// Performs checks on config data. pub(crate) fn check_config(&self) -> Result<(), IndexerError> { - // Network type is validated at the type level via Network enum. // Check TLS settings. if self.grpc_settings.tls.is_some() { - // then check if cert path exists or return error - let c_path = &self - .grpc_settings - .tls - .as_ref() - .expect("to be Some") - .cert_path; - if !std::path::Path::new(&c_path).exists() { + let tls = self.grpc_settings.tls.as_ref().expect("to be Some"); + + if !std::path::Path::new(&tls.cert_path).exists() { return Err(IndexerError::ConfigError(format!( "TLS is enabled, but certificate path {:?} does not exist.", - c_path + tls.cert_path ))); } - let k_path = &self - .grpc_settings - .tls - .as_ref() - .expect("to be Some") - .key_path; - if !std::path::Path::new(&k_path).exists() { + if !std::path::Path::new(&tls.key_path).exists() { return Err(IndexerError::ConfigError(format!( "TLS is enabled, but key path {:?} does not exist.", - k_path + tls.key_path ))); } } @@ -107,9 +77,10 @@ impl ZainodConfig { // Check validator cookie authentication settings if let Some(ref cookie_path) = self.validator_settings.validator_cookie_path { if !std::path::Path::new(cookie_path).exists() { - return Err(IndexerError::ConfigError( - format!("Validator cookie authentication is enabled, but cookie path '{:?}' does not exist.", cookie_path), - )); + return Err(IndexerError::ConfigError(format!( + "Validator cookie authentication is enabled, but cookie path '{:?}' does not exist.", + cookie_path + ))); } } @@ -167,17 +138,13 @@ impl ZainodConfig { } // Check gRPC and JsonRPC server are not listening on the same address. - if self.json_server_settings.is_some() - && self - .json_server_settings - .as_ref() - .expect("json_server_settings to be Some") - .json_rpc_listen_address - == self.grpc_settings.listen_address - { - return Err(IndexerError::ConfigError( - "gRPC server and JsonRPC server must listen on different addresses.".to_string(), - )); + if let Some(ref json_settings) = self.json_server_settings { + if json_settings.json_rpc_listen_address == self.grpc_settings.listen_address { + return Err(IndexerError::ConfigError( + "gRPC server and JsonRPC server must listen on different addresses." + .to_string(), + )); + } } Ok(()) @@ -192,7 +159,7 @@ impl ZainodConfig { impl Default for ZainodConfig { fn default() -> Self { Self { - backend: zaino_state::BackendType::Fetch, + backend: BackendType::default(), json_server_settings: None, grpc_settings: GrpcServerConfig { listen_address: "127.0.0.1:8137".parse().unwrap(), @@ -254,9 +221,7 @@ fn fetch_socket_addr_from_hostname(address: &str) -> Result bool { let ip = addr.ip(); match ip { @@ -265,76 +230,84 @@ pub(crate) fn is_private_listen_addr(addr: &SocketAddr) -> bool { } } -/// Attempts to load config data from a TOML file at the specified path. +/// Loads configuration from a TOML file with optional environment variable overrides. /// -/// If the file cannot be read, or if its contents cannot be parsed into `ZainodConfig`, -/// a warning is logged, and a default configuration is returned. -/// Finally, there is an override of the config using environmental variables. -/// The loaded or default configuration undergoes further checks and finalization. -pub fn load_config(file_path: &PathBuf) -> Result { - // Configuration sources are layered: Env > TOML > Defaults. - let figment = Figment::new() - // 1. Base defaults from `ZainodConfig::default()`. - .merge(Serialized::defaults(ZainodConfig::default())) - // 2. Override with values from the TOML configuration file. - .merge(Toml::file(file_path)) - // 3. Override with values from environment variables prefixed with "ZAINO_". - .merge(figment::providers::Env::prefixed("ZAINO_").split("__")); - - match figment.extract::() { - Ok(mut parsed_config) => { - if parsed_config - .json_server_settings - .clone() - .is_some_and(|json_settings| { - json_settings.cookie_dir.is_some() - && json_settings - .cookie_dir - .expect("cookie_dir to be Some") - .as_os_str() - // if the assigned pathbuf is empty (cookies enabled but no path defined). - .is_empty() - }) - { - if let Some(ref mut json_config) = parsed_config.json_server_settings { - json_config.cookie_dir = Some(default_ephemeral_cookie_path()); - } - }; +/// Configuration is layered: Defaults → TOML file → Environment variables (prefix: ZAINO_). +/// Sensitive keys (password, secret, token, cookie, private_key) are blocked from env vars. +pub fn load_config(file_path: &std::path::Path) -> Result { + load_config_with_env(file_path, "ZAINO") +} - parsed_config.check_config()?; - info!( - "Successfully loaded and validated config. Base TOML file checked: '{}'", - file_path.display() - ); - Ok(parsed_config) - } - Err(figment_error) => { - error!( - "Failed to extract configuration using figment: {}", - figment_error - ); - Err(IndexerError::ConfigError(format!( - "Zaino configuration loading failed during figment extract '{}' (could be TOML file or environment variables). Details: {}", - file_path.display(), figment_error - ))) +/// Loads configuration with a custom environment variable prefix. +pub fn load_config_with_env( + file_path: &std::path::Path, + env_prefix: &str, +) -> Result { + // Check for sensitive keys in environment variables before loading + let required_prefix = format!("{}_", env_prefix); + for (key, _) in std::env::vars() { + if let Some(without_prefix) = key.strip_prefix(&required_prefix) { + if let Some(leaf) = without_prefix.split("__").last() { + if is_sensitive_leaf_key(leaf) { + return Err(IndexerError::ConfigError(format!( + "Environment variable '{}' contains sensitive key '{}' - use config file instead", + key, leaf + ))); + } + } } } -} - -impl TryFrom for BackendConfig { - type Error = IndexerError; - #[allow(deprecated)] - fn try_from(cfg: ZainodConfig) -> Result { - match cfg.backend { - zaino_state::BackendType::State => { - Ok(BackendConfig::State(StateServiceConfig::try_from(cfg)?)) - } - zaino_state::BackendType::Fetch => { - Ok(BackendConfig::Fetch(FetchServiceConfig::try_from(cfg)?)) - } + let mut builder = config::Config::builder() + .set_default("backend", "fetch") + .map_err(|e| IndexerError::ConfigError(e.to_string()))?; + + // Add TOML file source + builder = builder.add_source( + config::File::from(file_path) + .format(config::FileFormat::Toml) + .required(true), + ); + + // Add environment variable source with ZAINO_ prefix and __ separator for nesting + // Note: config-rs lowercases all env var keys after stripping the prefix + builder = builder.add_source( + config::Environment::with_prefix(env_prefix) + .prefix_separator("_") + .separator("__") + .try_parsing(true), + ); + + let settings = builder + .build() + .map_err(|e| IndexerError::ConfigError(format!("Configuration loading failed: {}", e)))?; + + let mut parsed_config: ZainodConfig = settings + .try_deserialize() + .map_err(|e| IndexerError::ConfigError(format!("Configuration parsing failed: {}", e)))?; + + // Handle empty cookie_dir: if json_server_settings exists with empty cookie_dir, set default + if parsed_config + .json_server_settings + .as_ref() + .is_some_and(|json_settings| { + json_settings + .cookie_dir + .as_ref() + .is_some_and(|dir| dir.as_os_str().is_empty()) + }) + { + if let Some(ref mut json_config) = parsed_config.json_server_settings { + json_config.cookie_dir = Some(default_ephemeral_cookie_path()); } } + + parsed_config.check_config()?; + info!( + "Successfully loaded and validated config. Base TOML file checked: '{}'", + file_path.display() + ); + Ok(parsed_config) } #[allow(deprecated)] @@ -351,6 +324,7 @@ impl TryFrom for StateServiceConfig { "Missing validator_grpc_listen_address in configuration".to_string(), ) })?; + let validator_grpc_address = fetch_socket_addr_from_hostname(grpc_listen_address).map_err(|e| { let msg = match e { @@ -416,656 +390,582 @@ impl TryFrom for FetchServiceConfig { } } -/// Custom serializer for BackendType -fn serialize_backendtype_to_string( - backend_type: &zaino_state::BackendType, - serializer: S, -) -> Result -where - S: serde::Serializer, -{ - serializer.serialize_str(match backend_type { - zaino_state::BackendType::State => "state", - zaino_state::BackendType::Fetch => "fetch", - }) -} #[cfg(test)] -mod test { - use crate::error::IndexerError; +mod tests { + use super::*; + use std::{env, sync::Mutex}; + use tempfile::TempDir; + + const ZAINO_ENV_PREFIX: &str = "ZAINO_"; + static TEST_MUTEX: Mutex<()> = Mutex::new(()); + + /// RAII guard for managing environment variables in tests. + /// Ensures test isolation by clearing ZAINO_* vars before tests + /// and restoring original values after. + struct EnvGuard { + _guard: std::sync::MutexGuard<'static, ()>, + original_vars: Vec<(String, String)>, + } - use super::ZainodConfig; + impl EnvGuard { + fn new() -> Self { + let guard = TEST_MUTEX.lock().unwrap_or_else(|e| e.into_inner()); + let original_vars: Vec<_> = env::vars() + .filter(|(k, _)| k.starts_with(ZAINO_ENV_PREFIX)) + .collect(); + // Clear all ZAINO_* vars for test isolation + for (key, _) in &original_vars { + env::remove_var(key); + } + Self { + _guard: guard, + original_vars, + } + } - use super::load_config; + fn set_var(&self, key: &str, value: &str) { + env::set_var(key, value); + } + } - use figment::Jail; + impl Drop for EnvGuard { + fn drop(&mut self) { + // Clear test vars + for (k, _) in env::vars().filter(|(k, _)| k.starts_with(ZAINO_ENV_PREFIX)) { + env::remove_var(&k); + } + // Restore originals + for (k, v) in &self.original_vars { + env::set_var(k, v); + } + } + } - use std::path::PathBuf; + fn create_test_config_file(dir: &TempDir, content: &str, filename: &str) -> PathBuf { + let path = dir.path().join(filename); + std::fs::write(&path, content).unwrap(); + path + } - use zaino_common::{DatabaseSize, Network}; + #[test] + fn test_deserialize_full_valid_config() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // Create mock files + let cert_file = temp_dir.path().join("test_cert.pem"); + let key_file = temp_dir.path().join("test_key.pem"); + let validator_cookie_file = temp_dir.path().join("validator.cookie"); + let zaino_cookie_dir = temp_dir.path().join("zaino_cookies_dir"); + let zaino_db_dir = temp_dir.path().join("zaino_db_dir"); + let zebra_db_dir = temp_dir.path().join("zebra_db_dir"); + + std::fs::write(&cert_file, "mock cert content").unwrap(); + std::fs::write(&key_file, "mock key content").unwrap(); + std::fs::write(&validator_cookie_file, "mock validator cookie content").unwrap(); + std::fs::create_dir_all(&zaino_cookie_dir).unwrap(); + std::fs::create_dir_all(&zaino_db_dir).unwrap(); + std::fs::create_dir_all(&zebra_db_dir).unwrap(); + + let toml_content = format!( + r#" +backend = "fetch" +zebra_db_path = "{}" +network = "Mainnet" + +[storage.database] +path = "{}" + +[validator_settings] +validator_jsonrpc_listen_address = "192.168.1.10:18232" +validator_cookie_path = "{}" +validator_user = "user" +validator_password = "password" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8000" +cookie_dir = "{}" + +[grpc_settings] +listen_address = "0.0.0.0:9000" + +[grpc_settings.tls] +cert_path = "{}" +key_path = "{}" +"#, + zebra_db_dir.display(), + zaino_db_dir.display(), + validator_cookie_file.display(), + zaino_cookie_dir.display(), + cert_file.display(), + key_file.display(), + ); + + let config_path = create_test_config_file(&temp_dir, &toml_content, "full_config.toml"); + let config = load_config(&config_path).expect("load_config failed"); + + assert_eq!(config.backend, BackendType::Fetch); + assert!(config.json_server_settings.is_some()); + assert_eq!( + config + .json_server_settings + .as_ref() + .unwrap() + .json_rpc_listen_address, + "127.0.0.1:8000".parse().unwrap() + ); + assert_eq!(config.network, Network::Mainnet); + assert_eq!( + config.grpc_settings.listen_address, + "0.0.0.0:9000".parse().unwrap() + ); + assert!(config.grpc_settings.tls.is_some()); + assert_eq!( + config.validator_settings.validator_user, + Some("user".to_string()) + ); + assert_eq!( + config.validator_settings.validator_password, + Some("password".to_string()) + ); + } - // Use the explicit library name `zainodlib` as defined in Cargo.toml [lib] name. + #[test] + fn test_deserialize_optional_fields_missing() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +backend = "state" +network = "Testnet" +zebra_db_path = "/opt/zebra/data" + +[storage.database] +path = "/opt/zaino/data" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "optional_missing.toml"); + let config = load_config(&config_path).expect("load_config failed"); + let default_values = ZainodConfig::default(); + + assert_eq!(config.backend, BackendType::State); + assert!(config.json_server_settings.is_none()); + assert_eq!( + config.validator_settings.validator_user, + default_values.validator_settings.validator_user + ); + assert_eq!( + config.storage.cache.capacity, + default_values.storage.cache.capacity + ); + } - // If BackendType is used directly in assertions beyond what IndexerConfig holds: - use zaino_state::BackendType as ZainoBackendType; + #[test] + fn test_cookie_dir_logic() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // Scenario 1: auth enabled, cookie_dir empty (should use default ephemeral path) + let toml_content = r#" +backend = "fetch" +network = "Testnet" +zebra_db_path = "/zebra/db" + +[storage.database] +path = "/zaino/db" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8237" +cookie_dir = "" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "s1.toml"); + let config = load_config(&config_path).expect("Config S1 failed"); + assert!(config.json_server_settings.is_some()); + assert!(config + .json_server_settings + .as_ref() + .unwrap() + .cookie_dir + .is_some()); + + // Scenario 2: auth enabled, cookie_dir specified + let toml_content2 = r#" +backend = "fetch" +network = "Testnet" +zebra_db_path = "/zebra/db" + +[storage.database] +path = "/zaino/db" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8237" +cookie_dir = "/my/cookie/path" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path2 = create_test_config_file(&temp_dir, toml_content2, "s2.toml"); + let config2 = load_config(&config_path2).expect("Config S2 failed"); + assert_eq!( + config2.json_server_settings.as_ref().unwrap().cookie_dir, + Some(PathBuf::from("/my/cookie/path")) + ); + + // Scenario 3: cookie_dir not specified (should be None) + let toml_content3 = r#" +backend = "fetch" +network = "Testnet" +zebra_db_path = "/zebra/db" + +[storage.database] +path = "/zaino/db" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8237" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path3 = create_test_config_file(&temp_dir, toml_content3, "s3.toml"); + let config3 = load_config(&config_path3).expect("Config S3 failed"); + assert!(config3.json_server_settings.unwrap().cookie_dir.is_none()); + } #[test] - // Validates loading a valid configuration via `load_config`, - // ensuring fields are parsed and `check_config` passes with mocked prerequisite files. - pub(crate) fn test_deserialize_full_valid_config() { - Jail::expect_with(|jail| { - // Define RELATIVE paths/filenames for use within the jail - let cert_file_name = "test_cert.pem"; - let key_file_name = "test_key.pem"; - let validator_cookie_file_name = "validator.cookie"; - let zaino_cookie_dir_name = "zaino_cookies_dir"; - let zaino_db_dir_name = "zaino_db_dir"; - let zebra_db_dir_name = "zebra_db_dir"; - - // Create the directories within the jail FIRST - jail.create_dir(zaino_cookie_dir_name)?; - jail.create_dir(zaino_db_dir_name)?; - jail.create_dir(zebra_db_dir_name)?; - - // Use relative paths in the TOML string - let toml_str = format!( - r#" - backend = "fetch" - storage.database.path = "{zaino_db_dir_name}" - zebra_db_path = "{zebra_db_dir_name}" - db_size = 100 - network = "Mainnet" - no_db = false - slow_sync = false - - [validator_settings] - validator_jsonrpc_listen_address = "192.168.1.10:18232" - validator_cookie_path = "{validator_cookie_file_name}" - validator_user = "user" - validator_password = "password" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8000" - cookie_dir = "{zaino_cookie_dir_name}" - - [grpc_settings] - listen_address = "0.0.0.0:9000" - - [grpc_settings.tls] - cert_path = "{cert_file_name}" - key_path = "{key_file_name}" - "# - ); + fn test_deserialize_empty_string_yields_default() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // Minimal valid config + let toml_content = r#" +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "empty.toml"); + let config = load_config(&config_path).expect("Empty TOML load failed"); + let default_config = ZainodConfig::default(); + + assert_eq!(config.network, default_config.network); + assert_eq!(config.backend, default_config.backend); + assert_eq!( + config.storage.cache.capacity, + default_config.storage.cache.capacity + ); + } - let temp_toml_path = jail.directory().join("full_config.toml"); - jail.create_file(&temp_toml_path, &toml_str)?; + #[test] + fn test_deserialize_invalid_backend_type() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); - // Create the actual mock files within the jail using the relative names - jail.create_file(cert_file_name, "mock cert content")?; - jail.create_file(key_file_name, "mock key content")?; - jail.create_file(validator_cookie_file_name, "mock validator cookie content")?; + let toml_content = r#" +backend = "invalid_type" - let config_result = load_config(&temp_toml_path); - assert!( - config_result.is_ok(), - "load_config failed: {:?}", - config_result.err() - ); - let finalized_config = config_result.unwrap(); - - assert_eq!(finalized_config.backend, ZainoBackendType::Fetch); - assert!(finalized_config.json_server_settings.is_some()); - assert_eq!( - finalized_config - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .json_rpc_listen_address, - "127.0.0.1:8000".parse().unwrap() - ); - assert_eq!( - finalized_config - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .cookie_dir, - Some(PathBuf::from(zaino_cookie_dir_name)) - ); - assert_eq!( - finalized_config - .clone() - .grpc_settings - .tls - .expect("tls to be Some in finalized conifg") - .cert_path, - PathBuf::from(cert_file_name) - ); - assert_eq!( - finalized_config - .clone() - .grpc_settings - .tls - .expect("tls to be Some in finalized_conifg") - .key_path, - PathBuf::from(key_file_name) - ); - assert_eq!( - finalized_config.validator_settings.validator_cookie_path, - Some(PathBuf::from(validator_cookie_file_name)) - ); - assert_eq!( - finalized_config.storage.database.path, - PathBuf::from(zaino_db_dir_name) - ); - assert_eq!( - finalized_config.zebra_db_path, - PathBuf::from(zebra_db_dir_name) - ); - assert_eq!(finalized_config.network, Network::Mainnet); - assert_eq!( - finalized_config.grpc_settings.listen_address, - "0.0.0.0:9000".parse().unwrap() - ); - assert!(finalized_config.grpc_settings.tls.is_some()); - assert_eq!( - finalized_config.validator_settings.validator_user, - Some("user".to_string()) - ); - assert_eq!( - finalized_config.validator_settings.validator_password, - Some("password".to_string()) - ); - assert_eq!(finalized_config.storage.cache.capacity, 10000); - assert_eq!(finalized_config.storage.cache.shard_count(), 16); - assert_eq!( - finalized_config.storage.database.size.to_byte_count(), - 128 * 1024 * 1024 * 1024 - ); - assert!(match finalized_config.storage.database.size { - DatabaseSize::Gb(0) => false, - DatabaseSize::Gb(_) => true, - }); +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" - Ok(()) - }); - } +[storage.database] +path = "/zaino/db" - #[test] - // Verifies that when optional fields are omitted from TOML, `load_config` ensures they correctly adopt default values. - pub(crate) fn test_deserialize_optional_fields_missing() { - Jail::expect_with(|jail| { - let toml_str = r#" - backend = "state" - json_rpc_listen_address = "127.0.0.1:8237" - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/opt/zaino/data" - zebra_db_path = "/opt/zebra/data" - network = "Testnet" - "#; - let temp_toml_path = jail.directory().join("optional_missing.toml"); - jail.create_file(&temp_toml_path, toml_str)?; - - let config = load_config(&temp_toml_path).expect("load_config failed"); - let default_values = ZainodConfig::default(); - - assert_eq!(config.backend, ZainoBackendType::State); - assert_eq!( - config.json_server_settings.is_some(), - default_values.json_server_settings.is_some() - ); - assert_eq!( - config.validator_settings.validator_user, - default_values.validator_settings.validator_user - ); - assert_eq!( - config.validator_settings.validator_password, - default_values.validator_settings.validator_password - ); - assert_eq!( - config.storage.cache.capacity, - default_values.storage.cache.capacity - ); - assert_eq!( - config.storage.cache.shard_count(), - default_values.storage.cache.shard_count(), - ); - assert_eq!( - config.storage.database.size, - default_values.storage.database.size - ); - assert_eq!( - config.storage.database.size.to_byte_count(), - default_values.storage.database.size.to_byte_count() - ); - Ok(()) - }); - } +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; - #[test] - // Tests the logic (via `load_config` and its internal call to `finalize_config_logic`) - // for setting `cookie_dir` based on `enable_cookie_auth`. - pub(crate) fn test_cookie_dir_logic() { - Jail::expect_with(|jail| { - // Scenario 1: auth enabled, cookie_dir missing (should use default ephemeral path) - let s1_path = jail.directory().join("s1.toml"); - jail.create_file( - &s1_path, - r#" - backend = "fetch" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8237" - cookie_dir = "" - - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/zaino/db" - zebra_db_path = "/zebra/db" - network = "Testnet" - "#, - )?; - - let config1 = load_config(&s1_path).expect("Config S1 failed"); - assert!(config1.json_server_settings.is_some()); - assert!(config1 - .json_server_settings - .as_ref() - .expect("json settings is Some") - .cookie_dir - .is_some()); - - // Scenario 2: auth enabled, cookie_dir specified - let s2_path = jail.directory().join("s2.toml"); - jail.create_file( - &s2_path, - r#" - backend = "fetch" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8237" - cookie_dir = "/my/cookie/path" - - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/zaino/db" - zebra_db_path = "/zebra/db" - network = "Testnet" - "#, - )?; - let config2 = load_config(&s2_path).expect("Config S2 failed"); - assert!(config2.json_server_settings.is_some()); - assert_eq!( - config2 - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .cookie_dir, - Some(PathBuf::from("/my/cookie/path")) + let config_path = create_test_config_file(&temp_dir, toml_content, "invalid_backend.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + if let Err(IndexerError::ConfigError(msg)) = result { + assert!( + msg.contains("unknown variant") || msg.contains("invalid_type"), + "Unexpected error message: {}", + msg ); - let s3_path = jail.directory().join("s3.toml"); - jail.create_file( - &s3_path, - r#" - backend = "fetch" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8237" - - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/zaino/db" - zebra_db_path = "/zebra/db" - network = "Testnet" - "#, - )?; - let config3 = load_config(&s3_path).expect("Config S3 failed"); - assert!(config3 - .json_server_settings - .expect("json server settings to unwrap in config S3") - .cookie_dir - .is_none()); - Ok(()) - }); + } } #[test] - pub(crate) fn test_string_none_as_path_for_cookie_dir() { - Jail::expect_with(|jail| { - let toml_auth_enabled_path = jail.directory().join("auth_enabled.toml"); - // cookie auth on but no dir assigned - jail.create_file( - &toml_auth_enabled_path, - r#" - backend = "fetch" - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/zaino/db" - zebra_db_path = "/zebra/db" - network = "Testnet" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8237" - cookie_dir = "" - "#, - )?; - let config_auth_enabled = - load_config(&toml_auth_enabled_path).expect("Auth enabled failed"); - assert!(config_auth_enabled.json_server_settings.is_some()); - assert!(config_auth_enabled - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .cookie_dir - .is_some()); - - // omitting cookie_dir will set it to None - let toml_auth_disabled_path = jail.directory().join("auth_disabled.toml"); - jail.create_file( - &toml_auth_disabled_path, - r#" - backend = "fetch" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8237" - - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/zaino/db" - zebra_db_path = "/zebra/db" - network = "Testnet" - "#, - )?; - let config_auth_disabled = - load_config(&toml_auth_disabled_path).expect("Auth disabled failed"); - assert!(config_auth_disabled.json_server_settings.is_some()); - assert_eq!( - config_auth_disabled - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .cookie_dir, - None - ); - Ok(()) - }); + fn test_deserialize_invalid_socket_address() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +[json_server_settings] +json_rpc_listen_address = "not-a-valid-address" +cookie_dir = "" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "invalid_socket.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); } #[test] - // Checks that `load_config` with an empty TOML string results in the default `IndexerConfig` values. - pub(crate) fn test_deserialize_empty_string_yields_default() { - Jail::expect_with(|jail| { - let empty_toml_path = jail.directory().join("empty.toml"); - jail.create_file(&empty_toml_path, "")?; - let config = load_config(&empty_toml_path).expect("Empty TOML load failed"); - let default_config = ZainodConfig::default(); - // Compare relevant fields that should come from default - assert_eq!(config.network, default_config.network); - assert_eq!(config.backend, default_config.backend); - assert_eq!( - config.json_server_settings.is_some(), - default_config.json_server_settings.is_some() - ); - assert_eq!( - config.validator_settings.validator_user, - default_config.validator_settings.validator_user - ); - assert_eq!( - config.validator_settings.validator_password, - default_config.validator_settings.validator_password - ); - assert_eq!( - config.storage.cache.capacity, - default_config.storage.cache.capacity - ); - assert_eq!( - config.storage.cache.shard_count(), - default_config.storage.cache.shard_count() - ); - assert_eq!( - config.storage.database.size, - default_config.storage.database.size - ); - assert_eq!( - config.storage.database.size.to_byte_count(), - default_config.storage.database.size.to_byte_count() - ); - Ok(()) - }); + fn test_parse_zindexer_toml_integration() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + let zindexer_toml_content = include_str!("../zindexer.toml"); + + let config_path = + create_test_config_file(&temp_dir, zindexer_toml_content, "zindexer_test.toml"); + let config = load_config(&config_path).expect("load_config failed to parse zindexer.toml"); + let defaults = ZainodConfig::default(); + + assert_eq!(config.backend, BackendType::Fetch); + assert_eq!( + config.validator_settings.validator_user, + defaults.validator_settings.validator_user + ); } #[test] - // Ensures `load_config` returns an error for an invalid `backend` type string in TOML. - pub(crate) fn test_deserialize_invalid_backend_type() { - Jail::expect_with(|jail| { - let invalid_toml_path = jail.directory().join("invalid_backend.toml"); - jail.create_file(&invalid_toml_path, r#"backend = "invalid_type""#)?; - let result = load_config(&invalid_toml_path); - assert!(result.is_err()); - if let Err(IndexerError::ConfigError(msg)) = result { - assert!(msg.contains("Invalid backend type")); - } - Ok(()) - }); + fn test_env_override_toml_and_defaults() { + let guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +network = "Testnet" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + guard.set_var("ZAINO_NETWORK", "Mainnet"); + guard.set_var( + "ZAINO_JSON_SERVER_SETTINGS__JSON_RPC_LISTEN_ADDRESS", + "127.0.0.1:0", + ); + guard.set_var("ZAINO_JSON_SERVER_SETTINGS__COOKIE_DIR", "/env/cookie/path"); + guard.set_var("ZAINO_STORAGE__CACHE__CAPACITY", "12345"); + + let config_path = create_test_config_file(&temp_dir, toml_content, "test_config.toml"); + let config = load_config(&config_path).expect("load_config should succeed"); + + assert_eq!(config.network, Network::Mainnet); + assert_eq!(config.storage.cache.capacity, 12345); + assert!(config.json_server_settings.is_some()); + assert_eq!( + config.json_server_settings.as_ref().unwrap().cookie_dir, + Some(PathBuf::from("/env/cookie/path")) + ); } #[test] - // Ensures `load_config` returns an error for an invalid socket address string in TOML. - pub(crate) fn test_deserialize_invalid_socket_address() { - Jail::expect_with(|jail| { - let invalid_toml_path = jail.directory().join("invalid_socket.toml"); - jail.create_file( - &invalid_toml_path, - r#" - [json_server_settings] - json_rpc_listen_address = "not-a-valid-address" - cookie_dir = "" - "#, - )?; - let result = load_config(&invalid_toml_path); - assert!(result.is_err()); - if let Err(IndexerError::ConfigError(msg)) = result { - assert!(msg.contains("invalid socket address syntax")); - } - Ok(()) - }); + fn test_toml_overrides_defaults() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // json_server_settings without a listening address is forbidden + let toml_content = r#" +network = "Regtest" + +[json_server_settings] +json_rpc_listen_address = "" +cookie_dir = "" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "test_config.toml"); + assert!(load_config(&config_path).is_err()); } #[test] - // Validates that the actual zindexer.toml file (with optional values commented out) - // is parsed correctly by `load_config`, applying defaults for missing optional fields. - pub(crate) fn test_parse_zindexer_toml_integration() { - let zindexer_toml_content = include_str!("../zindexer.toml"); + fn test_invalid_env_var_type() { + let guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); - Jail::expect_with(|jail| { - let temp_toml_path = jail.directory().join("zindexer_test.toml"); - jail.create_file(&temp_toml_path, zindexer_toml_content)?; + let toml_content = r#" +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" - let config_result = load_config(&temp_toml_path); - assert!( - config_result.is_ok(), - "load_config failed to parse zindexer.toml: {:?}", - config_result.err() - ); - let config = config_result.unwrap(); - let defaults = ZainodConfig::default(); +[storage.database] +path = "/zaino/db" - assert_eq!(config.backend, ZainoBackendType::Fetch); - assert_eq!( - config.validator_settings.validator_user, - defaults.validator_settings.validator_user - ); +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; - Ok(()) - }); + guard.set_var("ZAINO_STORAGE__CACHE__CAPACITY", "not_a_number"); + + let config_path = create_test_config_file(&temp_dir, toml_content, "test_config.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); } - // Figment-specific tests below are generally self-descriptive by name #[test] - pub(crate) fn test_figment_env_override_toml_and_defaults() { - Jail::expect_with(|jail| { - jail.create_file( - "test_config.toml", - r#" - network = "Testnet" - "#, - )?; - jail.set_env("ZAINO_NETWORK", "Mainnet"); - jail.set_env( - "ZAINO_JSON_SERVER_SETTINGS__JSON_RPC_LISTEN_ADDRESS", - "127.0.0.1:0", - ); - jail.set_env("ZAINO_JSON_SERVER_SETTINGS__COOKIE_DIR", "/env/cookie/path"); - jail.set_env("ZAINO_STORAGE__CACHE__CAPACITY", "12345"); - - let temp_toml_path = jail.directory().join("test_config.toml"); - let config = load_config(&temp_toml_path).expect("load_config should succeed"); - - assert_eq!(config.network, Network::Mainnet); - assert_eq!(config.storage.cache.capacity, 12345); - assert!(config.json_server_settings.is_some()); - assert_eq!( - config - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .cookie_dir, - Some(PathBuf::from("/env/cookie/path")) - ); - assert!(config.grpc_settings.tls.is_none()); - Ok(()) - }); + fn test_cookie_auth_not_forced_for_non_loopback_ip() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +backend = "fetch" +network = "Testnet" + +[validator_settings] +validator_jsonrpc_listen_address = "192.168.1.10:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "no_cookie_auth.toml"); + let config_result = load_config(&config_path); + assert!( + config_result.is_ok(), + "Non-loopback IP without cookie auth should succeed. Error: {:?}", + config_result.err() + ); + + let config = config_result.unwrap(); + assert!(config.validator_settings.validator_cookie_path.is_none()); } #[test] - pub(crate) fn test_figment_toml_overrides_defaults() { - Jail::expect_with(|jail| { - jail.create_file( - "test_config.toml", - r#" - network = "Regtest" - - [json_server_settings] - json_rpc_listen_address = "" - cookie_dir = "" - "#, - )?; - let temp_toml_path = jail.directory().join("test_config.toml"); - // a json_server_setting without a listening address is forbidden - assert!(load_config(&temp_toml_path).is_err()); - Ok(()) - }); + fn test_public_ip_still_rejected() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +backend = "fetch" +network = "Testnet" + +[validator_settings] +validator_jsonrpc_listen_address = "8.8.8.8:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "public_ip.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + + if let Err(IndexerError::ConfigError(msg)) = result { + assert!(msg.contains("private IP")); + } } #[test] - pub(crate) fn test_figment_all_defaults() { - Jail::expect_with(|jail| { - jail.create_file("empty_config.toml", "")?; - let temp_toml_path = jail.directory().join("empty_config.toml"); - let config = - load_config(&temp_toml_path).expect("load_config should succeed with empty toml"); - let defaults = ZainodConfig::default(); - assert_eq!(config.network, defaults.network); - assert_eq!( - config.json_server_settings.is_some(), - defaults.json_server_settings.is_some() - ); - assert_eq!( - config.storage.cache.capacity, - defaults.storage.cache.capacity - ); - Ok(()) - }); + fn test_sensitive_env_var_blocked() { + let guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + guard.set_var("ZAINO_VALIDATOR_SETTINGS__VALIDATOR_PASSWORD", "secret123"); + + let config_path = + create_test_config_file(&temp_dir, toml_content, "sensitive_env_test.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + + if let Err(IndexerError::ConfigError(msg)) = result { + assert!(msg.contains("sensitive key")); + assert!(msg.contains("VALIDATOR_PASSWORD")); + } } #[test] - pub(crate) fn test_figment_invalid_env_var_type() { - Jail::expect_with(|jail| { - jail.create_file("test_config.toml", "")?; - jail.set_env("ZAINO_STORAGE__CACHE__CAPACITY", "not_a_number"); - let temp_toml_path = jail.directory().join("test_config.toml"); - let result = load_config(&temp_toml_path); - assert!(result.is_err()); - if let Err(IndexerError::ConfigError(msg)) = result { - assert!(msg.to_lowercase().contains("storage.cache.capacity") && msg.contains("invalid type"), - "Error message should mention 'map_capacity' (case-insensitive) and 'invalid type'. Got: {msg}"); - } else { - panic!("Expected ConfigError, got {result:?}"); - } - Ok(()) - }); + fn test_sensitive_key_detection() { + assert!(is_sensitive_leaf_key("password")); + assert!(is_sensitive_leaf_key("PASSWORD")); + assert!(is_sensitive_leaf_key("validator_password")); + assert!(is_sensitive_leaf_key("VALIDATOR_PASSWORD")); + assert!(is_sensitive_leaf_key("secret")); + assert!(is_sensitive_leaf_key("api_token")); + assert!(is_sensitive_leaf_key("cookie")); + assert!(is_sensitive_leaf_key("private_key")); + + assert!(!is_sensitive_leaf_key("username")); + assert!(!is_sensitive_leaf_key("address")); + assert!(!is_sensitive_leaf_key("network")); } #[test] - /// Validates that cookie authentication is config-based, not address-type-based. - /// Non-loopback private IPs should work without cookie auth (operator's choice). - pub(crate) fn test_cookie_auth_not_forced_for_non_loopback_ip() { - Jail::expect_with(|jail| { - // Non-loopback private IP (192.168.x.x) WITHOUT cookie auth should succeed - let toml_str = r#" - backend = "fetch" - network = "Testnet" - - [validator_settings] - validator_jsonrpc_listen_address = "192.168.1.10:18232" - # Note: NO validator_cookie_path - this is intentional - - [grpc_settings] - listen_address = "127.0.0.1:8137" - "#; - let temp_toml_path = jail.directory().join("no_cookie_auth.toml"); - jail.create_file(&temp_toml_path, toml_str)?; - - let config_result = load_config(&temp_toml_path); - assert!( - config_result.is_ok(), - "Non-loopback IP without cookie auth should succeed. \ - Cookie auth is config-based, not address-type-based. Error: {:?}", - config_result.err() - ); + fn test_unknown_fields_rejected() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); - let config = config_result.unwrap(); - assert!( - config.validator_settings.validator_cookie_path.is_none(), - "Cookie path should be None as configured" - ); + let toml_content = r#" +unknown_field = "value" - Ok(()) - }); - } +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" - #[test] - /// Validates symmetric behavior: both IP and hostname addresses respect configuration. - /// Public IPs should still be rejected (private IP requirement remains). - pub(crate) fn test_public_ip_still_rejected() { - Jail::expect_with(|jail| { - // Public IP should be rejected regardless of cookie auth - let toml_str = r#" - backend = "fetch" - network = "Testnet" - - [validator_settings] - validator_jsonrpc_listen_address = "8.8.8.8:18232" - - [grpc_settings] - listen_address = "127.0.0.1:8137" - "#; - let temp_toml_path = jail.directory().join("public_ip.toml"); - jail.create_file(&temp_toml_path, toml_str)?; - - let config_result = load_config(&temp_toml_path); - assert!( - config_result.is_err(), - "Public IP should be rejected - private IP requirement still applies" - ); +[storage.database] +path = "/zaino/db" - if let Err(IndexerError::ConfigError(msg)) = config_result { - assert!( - msg.contains("private IP"), - "Error should mention private IP requirement. Got: {msg}" - ); - } +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; - Ok(()) - }); + let config_path = create_test_config_file(&temp_dir, toml_content, "unknown_fields.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); } } diff --git a/zainod/src/indexer.rs b/zainod/src/indexer.rs index 5b18bb5a5..91329fe9d 100644 --- a/zainod/src/indexer.rs +++ b/zainod/src/indexer.rs @@ -5,11 +5,10 @@ use tracing::info; use zaino_fetch::jsonrpsee::connector::test_node_and_return_url; use zaino_serve::server::{config::GrpcServerConfig, grpc::TonicServer, jsonrpc::JsonRpcServer}; - #[allow(deprecated)] use zaino_state::{ - BackendConfig, FetchService, IndexerService, LightWalletService, StateService, StatusType, - ZcashIndexer, ZcashService, + BackendType, FetchService, FetchServiceConfig, IndexerService, LightWalletService, + StateService, StateServiceConfig, StatusType, ZcashIndexer, ZcashService, }; use crate::{config::ZainodConfig, error::IndexerError}; @@ -38,7 +37,6 @@ pub async fn start_indexer( } /// Spawns a new Indexer server. -#[allow(deprecated)] pub async fn spawn_indexer( config: ZainodConfig, ) -> Result>, IndexerError> { @@ -56,18 +54,21 @@ pub async fn spawn_indexer( " - Connected to node using JsonRPSee at address {}.", zebrad_uri ); - match BackendConfig::try_from(config.clone()) { - Ok(BackendConfig::State(state_service_config)) => { - Indexer::::launch_inner(state_service_config, config) + + #[allow(deprecated)] + match config.backend { + BackendType::State => { + let state_config = StateServiceConfig::try_from(config.clone())?; + Indexer::::launch_inner(state_config, config) .await .map(|res| res.0) } - Ok(BackendConfig::Fetch(fetch_service_config)) => { - Indexer::::launch_inner(fetch_service_config, config) + BackendType::Fetch => { + let fetch_config = FetchServiceConfig::try_from(config.clone())?; + Indexer::::launch_inner(fetch_config, config) .await .map(|res| res.0) } - Err(e) => Err(e), } } From 5a6a525ac0e5ddf1cc6d0ebb291890c1754abee8 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 27 Jan 2026 11:01:26 +0000 Subject: [PATCH 047/114] docs(config): fix outdated comment about cookie auth requirement The comment incorrectly stated cookie auth was required for non-localhost addresses. Update to reflect actual behavior: authentication is recommended but not enforced. --- zainod/zindexer.toml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/zainod/zindexer.toml b/zainod/zindexer.toml index e856aa6a7..24c1cb210 100644 --- a/zainod/zindexer.toml +++ b/zainod/zindexer.toml @@ -40,13 +40,11 @@ backend = "fetch" # Validator config: # Required for valid zainod config. [validator_settings] - # Full node / validator listen address. + # Full node / validator gRPC listen address (Zebra only). # - # Must be a "private" address as defined in [IETF RFC 1918] for ipv4 addreses and [IETF RFC 4193] for ipv6 addreses. - # - # Must use validator rpc cookie authentication when connecting to non localhost addresses. - # Required - validator_grpc_listen_address = "127.0.0.1:18232" + # Must be a "private" address as defined in [IETF RFC 1918] for IPv4 or [IETF RFC 4193] for IPv6. + # Cookie or user/password authentication is recommended for non-localhost addresses. + validator_grpc_listen_address = "127.0.0.1:18232" # SocketAddr, Required. validator_jsonrpc_listen_address = "127.0.0.1:18230" From 67c5789d280573b5530854377040e7c399430501 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Sun, 28 Dec 2025 17:08:45 -0300 Subject: [PATCH 048/114] Squashed 'zaino-proto/lightwallet-protocol/' changes from b314868d..23f0768e 23f0768e Release lightwallet-protocol v0.4.0 41156c76 Merge pull request #11 from zcash/feature/get_mempool_tx_pools 7c130e88 Add `lightwalletProtocolVersion` field to `LightdInfo` struct. edbb726d Apply suggestion from code review 38fddd73 Apply suggestions from code review 0250f272 Add pool type filtering to `GetMempoolTx` argument. 54ccaadd Change semantics of pool-based pruning of compact transactions from "may prune" to "must prune". b0667ec9 Merge pull request #9 from zcash/2025-11-doc-TransparentAddressBlockFilter f3fea7bd doc: TransparentAddressBlockFilter doesn't include mempool a67dd323 Merge pull request #8 from zcash/2025-11-lightdinfo-upgrade-info 11da4b7e add next upgrade info to LightdInfo structure (GetLightdInfo) 42cd8f72 Transparent data docs update (#7) c0cf957a Merge pull request #5 from zcash/2025-11-comments 912fc360 Minor clarification in GetBlockRange documentation. 6b03f2cc Documentation (comments) only d978256a Merge pull request #1 from zcash/compact_tx_transparent 7eeb82e7 Merge pull request #4 from zcash/add_changelog a95359dc Apply suggestions from code review 592b637a Add transparent data to the `CompactBlock` format. 9d1fb2c4 Add a CHANGELOG.md that documents the evolution of the light client protocol. 180717df Merge pull request #3 from zcash/merge_librustzcash_history 450bd418 Merge the history of the .proto files from `librustzcash` for complete history preservation. a4859d11 Move protobuf files into place for use in `zcash/lightwallet-protocol` 2e66cdd9 Update zcash_client_backend/proto/service.proto eda01251 fix comment f838d10a Add gRPC LightdInfo Donation Address db12c041 Merge pull request #1473 from nuttycom/wallet/enrichment_queue 698feba9 Apply suggestions from code review 20ce57ab zcash_client_backend: Add `block_height` argument to `decrypt_and_store_transaction` a6dea1da Merge pull request #1482 from zancas/doc_tweak 4d2d45fc fix incorrect doc-comment e826f474 update CompactBlock doc-comment, to cover non-Sapling shielded notes, and addresses e9a6c00b Various documentation improvements 988bc721 Merge pull request #872 from nuttycom/feature/pre_dag_sync-suggest_scan_ranges 58d07d46 Implement `suggest_scan_ranges` and `update_chain_tip` a9222b33 Address comments from code review. e2031085 Rename proto::compact::{BlockMetadata => ChainMetadata} ac63418c Reorganize Sapling and Orchard note commitment tree sizes in CompactBlock. 0fdca14f zcash_client_backend: Add note commitment tree sizes to `CompactBlock` serialization. 2a0c2b8b zcash_client_backend: Add gRPC bindings behind feature flag 1342f048 zcash_client_backend: Address compact_formats.proto comments 68aa4e01 zcash_client_backend: Bring in latest `compact_formats.proto` e712eb1b Add prevHash field to CompactBlock 440384c3 Build protobufs for compact formats git-subtree-dir: zaino-proto/lightwallet-protocol git-subtree-split: 23f0768ea4471b63285f3c0e9b6fbb361674aa2b --- CHANGELOG.md | 171 ++++++++++++++++++ .../walletrpc/compact_formats.proto | 73 ++++++-- .../walletrpc/service.proto | 114 +++++++++--- 3 files changed, 314 insertions(+), 44 deletions(-) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..59a13060f --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,171 @@ +# Changelog +All notable changes to this library will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this library adheres to Rust's notion of +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Unreleased + +## [v0.4.0] - 2025-12-03 + +### Added +- `compact_formats.CompactTxIn` +- `compact_formats.TxOut` +- `service.PoolType` +- `service.LightdInfo` has added fields `upgradeName`, `upgradeHeight`, and + `lightwalletProtocolVersion` +- `compact_formats.CompactTx` has added fields `vin` and `vout`, + which may be used to represent transparent transaction input and output data. +- `service.BlockRange` has added field `poolTypes`, which allows + the caller of service methods that take this type as input to cause returned + data to be filtered to include information only for the specified protocols. + For backwards compatibility, when this field is set the default (empty) value, + servers should return Sapling and Orchard data. This field is to be ignored + when the type is used as part of a `service.TransparentAddressBlockFilter`. + +### Changed +- The `hash` field of `compact_formats.CompactTx` has been renamed to `txid`. + This is a serialization-compatible clarification, as the index of this field + in the .proto type does not change. +- `service.Exclude` has been renamed to `service.GetMempoolTxRequest` and has + an added `poolTypes` field, which allows the caller of this method to specify + which pools the resulting `CompactTx` values should contain data for. + +### Deprecated +- `service.CompactTxStreamer`: + - The `GetBlockNullifiers` and `GetBlockRangeNullifiers` methods are + deprecated. + +## [v0.3.6] - 2025-05-20 + +### Added +- `service.LightdInfo` has added field `donationAddress` +- `service.CompactTxStreamer.GetTaddressTransactions`. This duplicates + the `GetTaddressTxids` method, but is more accurately named. + +### Deprecated +- `service.CompactTxStreamer.GetTaddressTxids`. Use `GetTaddressTransactions` + instead. + +## [v0.3.5] - 2023-07-03 + +### Added +- `compact_formats.ChainMetadata` +- `service.ShieldedProtocol` +- `service.GetSubtreeRootsArg` +- `service.SubtreeRoot` +- `service.CompactTxStreamer.GetBlockNullifiers` +- `service.CompactTxStreamer.GetBlockRangeNullifiers` +- `service.CompactTxStreamer.SubtreeRoots` + +### Changed +- `compact_formats.CompactBlock` has added field `chainMetadata` +- `compact_formats.CompactSaplingOutput.epk` has been renamed to `ephemeralKey` + +## [v0.3.4] - UNKNOWN + +### Added +- `service.CompactTxStreamer.GetLatestTreeState` + +## [v0.3.3] - 2022-04-02 + +### Added +- `service.TreeState` has added field `orchardTree` + +### Changed +- `service.TreeState.tree` has been renamed to `saplingTree` + +## [v0.3.2] - 2021-12-09 + +### Changed +- `compact_formats.CompactOrchardAction.encCiphertext` has been renamed to + `CompactOrchardAction.ciphertext` + +## [v0.3.1] - 2021-12-09 + +### Added +- `compact_formats.CompactOrchardAction` +- `service.CompactTxStreamer.GetMempoolTx` (removed in 0.3.0) has been reintroduced. +- `service.Exclude` (removed in 0.3.0) has been reintroduced. + +### Changed +- `compact_formats.CompactSpend` has been renamed `CompactSaplingSpend` +- `compact_formats.CompactOutput` has been renamed `CompactSaplingOutput` + +## [v0.3.0] - 2021-07-23 + +### Added +- `service.CompactTxStreamer.GetMempoolStream` + +### Removed +- `service.CompactTxStreamer.GetMempoolTx` has been replaced by `GetMempoolStream` +- `service.Exclude` has been removed as it is now unused. + +## [v0.2.4] - 2021-01-14 + +### Changed +- `service.GetAddressUtxosArg.address` has been replaced by the + repeated field `addresses`. This is a [conditionally-safe](https://protobuf.dev/programming-guides/proto3/#conditionally-safe-changes) + format change. +- `service.GetAddressUtxosReply` has added field `address` + +## [v0.2.3] - 2021-01-14 + +### Added +- `service.LightdInfo` has added fields: + - `estimatedHeight` + - `zcashdBuild` + - `zcashdSubversion` + +## [v0.2.2] - 2020-10-22 + +### Added +- `service.TreeState` +- `service.GetAddressUtxosArg` +- `service.GetAddressUtxosReply` +- `service.GetAddressUtxosReplyList` +- `service.CompactTxStreamer.GetTreeState` +- `service.CompactTxStreamer.GetAddressUtxos` +- `service.CompactTxStreamer.GetAddressUtxosStream` + +## [v0.2.1] - 2020-10-06 + +### Added +- `service.Address` +- `service.AddressList` +- `service.Balance` +- `service.Exclude` +- `service.CompactTxStreamer.GetTaddressBalance` +- `service.CompactTxStreamer.GetTaddressBalanceStream` +- `service.CompactTxStreamer.GetMempoolTx` +- `service.LightdInfo` has added fields: + - `gitCommit` + - `branch` + - `buildDate` + - `buildUser` + +## [v0.2.0] - 2020-04-24 + +### Added +- `service.Duration` +- `service.PingResponse` +- `service.CompactTxStreamer.Ping` + +### Removed +- `service.TransparentAddress` was removed (it was unused in any service API). + +## [v0.1.1] - 2019-11-27 + +### Added +- `service.Empty` +- `service.LightdInfo` +- `service.TransparentAddress` +- `service.TransparentAddressBlockFilter` +- `service.CompactTxStreamer.GetTaddressTxids` +- `service.CompactTxStreamer.GetLightdInfo` +- `service.RawTransaction` has added field `height` + +## [v0.1.0] - 2019-09-19 + +Initial release diff --git a/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto b/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto index c799448ce..c62c7acbb 100644 --- a/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto +++ b/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto @@ -7,8 +7,8 @@ package cash.z.wallet.sdk.rpc; option go_package = "lightwalletd/walletrpc"; option swift_prefix = ""; -// Remember that proto3 fields are all optional. A field that is not present will be set to its zero value. -// bytes fields of hashes are in canonical little-endian format. +// REMINDER: proto3 fields are all optional. A field that is not present will be set to its zero/false/empty +// value. // Information about the state of the chain as of a given block. message ChainMetadata { @@ -16,34 +16,40 @@ message ChainMetadata { uint32 orchardCommitmentTreeSize = 2; // the size of the Orchard note commitment tree as of the end of this block } -// A compact representation of the shielded data in a Zcash block. +// A compact representation of a Zcash block. // // CompactBlock is a packaging of ONLY the data from a block that's needed to: // 1. Detect a payment to your Shielded address // 2. Detect a spend of your Shielded notes // 3. Update your witnesses to generate new spend proofs. +// 4. Spend UTXOs associated to t-addresses of your wallet. message CompactBlock { uint32 protoVersion = 1; // the version of this wire format, for storage uint64 height = 2; // the height of this block bytes hash = 3; // the ID (hash) of this block, same as in block explorers bytes prevHash = 4; // the ID (hash) of this block's predecessor uint32 time = 5; // Unix epoch time when the block was mined - bytes header = 6; // (hash, prevHash, and time) OR (full header) + bytes header = 6; // full header (as returned by the getblock RPC) repeated CompactTx vtx = 7; // zero or more compact transactions from this block ChainMetadata chainMetadata = 8; // information about the state of the chain as of this block } -// A compact representation of the shielded data in a Zcash transaction. +// A compact representation of a Zcash transaction. // // CompactTx contains the minimum information for a wallet to know if this transaction -// is relevant to it (either pays to it or spends from it) via shielded elements -// only. This message will not encode a transparent-to-transparent transaction. +// is relevant to it (either pays to it or spends from it) via shielded elements. Additionally, +// it can optionally include the minimum necessary data to detect payments to transparent addresses +// related to your wallet. message CompactTx { - // Index and hash will allow the receiver to call out to chain - // explorers or other data structures to retrieve more information - // about this transaction. - uint64 index = 1; // the index within the full block - bytes hash = 2; // the ID (hash) of this transaction, same as in block explorers + // The index of the transaction within the block. + uint64 index = 1; + + // The id of the transaction as defined in + // [§ 7.1.1 ‘Transaction Identifiers’](https://zips.z.cash/protocol/protocol.pdf#txnidentifiers) + // This byte array MUST be in protocol order and MUST NOT be reversed + // or hex-encoded; the byte-reversed and hex-encoded representation is + // exclusively a textual representation of a txid. + bytes txid = 2; // The transaction fee: present if server can provide. In the case of a // stateless server and a transaction with transparent inputs, this will be @@ -55,6 +61,41 @@ message CompactTx { repeated CompactSaplingSpend spends = 4; repeated CompactSaplingOutput outputs = 5; repeated CompactOrchardAction actions = 6; + + // `CompactTxIn` values corresponding to the `vin` entries of the full transaction. + // + // Note: the single null-outpoint input for coinbase transactions is omitted. Light + // clients can test `CompactTx.index == 0` to determine whether a `CompactTx` + // represents a coinbase transaction, as the coinbase transaction is always the + // first transaction in any block. + repeated CompactTxIn vin = 7; + + // A sequence of transparent outputs being created by the transaction. + repeated TxOut vout = 8; +} + +// A compact representation of a transparent transaction input. +message CompactTxIn { + // The id of the transaction that generated the output being spent. This + // byte array must be in protocol order and MUST NOT be reversed or + // hex-encoded. + bytes prevoutTxid = 1; + + // The index of the output being spent in the `vout` array of the + // transaction referred to by `prevoutTxid`. + uint32 prevoutIndex = 2; +} + +// A transparent output being created by the transaction. +// +// This contains identical data to the `TxOut` type in the transaction itself, and +// thus it is not "compact". +message TxOut { + // The value of the output, in Zatoshis. + uint64 value = 1; + + // The script pubkey that must be satisfied in order to spend this output. + bytes scriptPubKey = 2; } // A compact representation of a [Sapling Spend](https://zips.z.cash/protocol/protocol.pdf#spendencodingandconsensus). @@ -62,7 +103,7 @@ message CompactTx { // CompactSaplingSpend is a Sapling Spend Description as described in 7.3 of the Zcash // protocol specification. message CompactSaplingSpend { - bytes nf = 1; // nullifier (see the Zcash protocol specification) + bytes nf = 1; // Nullifier (see the Zcash protocol specification) } // A compact representation of a [Sapling Output](https://zips.z.cash/protocol/protocol.pdf#outputencodingandconsensus). @@ -70,9 +111,9 @@ message CompactSaplingSpend { // It encodes the `cmu` field, `ephemeralKey` field, and a 52-byte prefix of the // `encCiphertext` field of a Sapling Output Description. Total size is 116 bytes. message CompactSaplingOutput { - bytes cmu = 1; // note commitment u-coordinate - bytes ephemeralKey = 2; // ephemeral public key - bytes ciphertext = 3; // first 52 bytes of ciphertext + bytes cmu = 1; // Note commitment u-coordinate. + bytes ephemeralKey = 2; // Ephemeral public key. + bytes ciphertext = 3; // First 52 bytes of ciphertext. } // A compact representation of an [Orchard Action](https://zips.z.cash/protocol/protocol.pdf#actionencodingandconsensus). diff --git a/zaino-proto/lightwallet-protocol/walletrpc/service.proto b/zaino-proto/lightwallet-protocol/walletrpc/service.proto index 0a0989c7d..d3dc8ba04 100644 --- a/zaino-proto/lightwallet-protocol/walletrpc/service.proto +++ b/zaino-proto/lightwallet-protocol/walletrpc/service.proto @@ -8,18 +8,35 @@ option go_package = "lightwalletd/walletrpc"; option swift_prefix = ""; import "compact_formats.proto"; +// An identifier for a Zcash value pool. +enum PoolType { + POOL_TYPE_INVALID = 0; + TRANSPARENT = 1; + SAPLING = 2; + ORCHARD = 3; +} + // A BlockID message contains identifiers to select a block: a height or a // hash. Specification by hash is not implemented, but may be in the future. message BlockID { - uint64 height = 1; - bytes hash = 2; + uint64 height = 1; + bytes hash = 2; } // BlockRange specifies a series of blocks from start to end inclusive. // Both BlockIDs must be heights; specification by hash is not yet supported. +// +// If no pool types are specified, the server should default to the legacy +// behavior of returning only data relevant to the shielded (Sapling and +// Orchard) pools; otherwise, the server should prune `CompactBlocks` returned +// to include only data relevant to the requested pool types. Clients MUST +// verify that the version of the server they are connected to are capable +// of returning pruned and/or transparent data before setting `poolTypes` +// to a non-empty value. message BlockRange { BlockID start = 1; BlockID end = 2; + repeated PoolType poolTypes = 3; } // A TxFilter contains the information needed to identify a particular @@ -93,13 +110,21 @@ message LightdInfo { string zcashdBuild = 13; // example: "v4.1.1-877212414" string zcashdSubversion = 14; // example: "/MagicBean:4.1.1/" string donationAddress = 15; // Zcash donation UA address + string upgradeName = 16; // name of next pending network upgrade, empty if none scheduled + uint64 upgradeHeight = 17; // height of next pending upgrade, zero if none is scheduled + string lightwalletProtocolVersion = 18; // version of https://github.com/zcash/lightwallet-protocol served by this server } -// TransparentAddressBlockFilter restricts the results to the given address -// or block range. +// TransparentAddressBlockFilter restricts the results of the GRPC methods that +// use it to the transactions that involve the given address and were mined in +// the specified block range. Non-default values for both the address and the +// block range must be specified. Mempool transactions are not included. +// +// The `poolTypes` field of the `range` argument should be ignored. +// Implementations MAY consider it an error if any pool types are specified. message TransparentAddressBlockFilter { string address = 1; // t-address - BlockRange range = 2; // start, end heights + BlockRange range = 2; // start, end heights only } // Duration is currently used only for testing, so that the Ping rpc @@ -127,10 +152,23 @@ message Balance { int64 valueZat = 1; } -// The a shortened transaction ID is the prefix in big-endian (hex) format -// (then converted to binary). -message Exclude { - repeated bytes txid = 1; +// Request parameters for the `GetMempoolTx` RPC. +message GetMempoolTxRequest { + // A list of transaction ID byte string suffixes that should be excluded + // from the response. These suffixes may be produced either directly from + // the underlying txid bytes, or, if the source values are encoded txid + // strings, by truncating the hexadecimal representation of each + // transaction ID to an even number of characters, and then hex-decoding + // and then byte-reversing this value to obtain the byte representation. + repeated bytes exclude_txid_suffixes = 1; + // We reserve field number 2 for a potential future `exclude_txid_prefixes` + // field. + reserved 2; + // The server must prune `CompactTx`s returned to include only data + // relevant to the requested pool types. If no pool types are specified, + // the server should default to the legacy behavior of returning only data + // relevant to the shielded (Sapling and Orchard) pools. + repeated PoolType poolTypes = 3; } // The TreeState is derived from the Zcash z_gettreestate rpc. @@ -181,44 +219,63 @@ message GetAddressUtxosReplyList { service CompactTxStreamer { // Return the BlockID of the block at the tip of the best chain rpc GetLatestBlock(ChainSpec) returns (BlockID) {} + // Return the compact block corresponding to the given block identifier rpc GetBlock(BlockID) returns (CompactBlock) {} - // Same as GetBlock except actions contain only nullifiers + + // Same as GetBlock except the returned CompactBlock value contains only + // nullifiers. + // + // Note: this method is deprecated. Implementations should ignore any + // `PoolType::TRANSPARENT` member of the `poolTypes` argument. rpc GetBlockNullifiers(BlockID) returns (CompactBlock) {} - // Return a list of consecutive compact blocks + + // Return a list of consecutive compact blocks in the specified range, + // which is inclusive of `range.end`. + // + // If range.start <= range.end, blocks are returned increasing height order; + // otherwise blocks are returned in decreasing height order. rpc GetBlockRange(BlockRange) returns (stream CompactBlock) {} - // Same as GetBlockRange except actions contain only nullifiers + + // Same as GetBlockRange except the returned CompactBlock values contain + // only nullifiers. + // + // Note: this method is deprecated. Implementations should ignore any + // `PoolType::TRANSPARENT` member of the `poolTypes` argument. rpc GetBlockRangeNullifiers(BlockRange) returns (stream CompactBlock) {} // Return the requested full (not compact) transaction (as from zcashd) rpc GetTransaction(TxFilter) returns (RawTransaction) {} + // Submit the given transaction to the Zcash network rpc SendTransaction(RawTransaction) returns (SendResponse) {} - // Return the transactions corresponding to the given t-address within the given block range - // NB - this method is misnamed, it returns transactions, not transaction IDs. + // Return RawTransactions that match the given transparent address filter. + // + // Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. // NOTE: this method is deprecated, please use GetTaddressTransactions instead. rpc GetTaddressTxids(TransparentAddressBlockFilter) returns (stream RawTransaction) {} - // Return the transactions corresponding to the given t-address within the given block range + // Return the transactions corresponding to the given t-address within the given block range. + // Mempool transactions are not included in the results. rpc GetTaddressTransactions(TransparentAddressBlockFilter) returns (stream RawTransaction) {} rpc GetTaddressBalance(AddressList) returns (Balance) {} rpc GetTaddressBalanceStream(stream Address) returns (Balance) {} - // Return the compact transactions currently in the mempool; the results - // can be a few seconds out of date. If the Exclude list is empty, return - // all transactions; otherwise return all *except* those in the Exclude list - // (if any); this allows the client to avoid receiving transactions that it - // already has (from an earlier call to this rpc). The transaction IDs in the - // Exclude list can be shortened to any number of bytes to make the request - // more bandwidth-efficient; if two or more transactions in the mempool - // match a shortened txid, they are all sent (none is excluded). Transactions - // in the exclude list that don't exist in the mempool are ignored. - // - // The a shortened transaction ID is the prefix in big-endian (hex) format - // (then converted to binary). See smoke-test.bash for examples. - rpc GetMempoolTx(Exclude) returns (stream CompactTx) {} + // Returns a stream of the compact transaction representation for transactions + // currently in the mempool. The results of this operation may be a few + // seconds out of date. If the `exclude_txid_suffixes` list is empty, + // return all transactions; otherwise return all *except* those in the + // `exclude_txid_suffixes` list (if any); this allows the client to avoid + // receiving transactions that it already has (from an earlier call to this + // RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + // shortened to any number of bytes to make the request more + // bandwidth-efficient; if two or more transactions in the mempool match a + // txid suffix, none of the matching transactions are excluded. Txid + // suffixes in the exclude list that don't match any transactions in the + // mempool are ignored. + rpc GetMempoolTx(GetMempoolTxRequest) returns (stream CompactTx) {} // Return a stream of current Mempool transactions. This will keep the output stream open while // there are mempool transactions. It will close the returned stream when a new block is mined. @@ -240,6 +297,7 @@ service CompactTxStreamer { // Return information about this lightwalletd instance and the blockchain rpc GetLightdInfo(Empty) returns (LightdInfo) {} + // Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) rpc Ping(Duration) returns (PingResponse) {} } From 19371c5947cc99ad05a823525bd8b7142ead04f4 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Sun, 28 Dec 2025 17:26:18 -0300 Subject: [PATCH 049/114] Enable build.rs --- zaino-proto/build.rs | 86 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 zaino-proto/build.rs diff --git a/zaino-proto/build.rs b/zaino-proto/build.rs new file mode 100644 index 000000000..f60dade06 --- /dev/null +++ b/zaino-proto/build.rs @@ -0,0 +1,86 @@ +use std::env; +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +const COMPACT_FORMATS_PROTO: &str = "proto/compact_formats.proto"; +const PROPOSAL_PROTO: &str = "proto/proposal.proto"; +const SERVICE_PROTO: &str = "proto/service.proto"; + +fn main() -> io::Result<()> { + // Check and compile proto files if needed + if Path::new(COMPACT_FORMATS_PROTO).exists() + && env::var_os("PROTOC") + .map(PathBuf::from) + .or_else(|| which::which("protoc").ok()) + .is_some() + { + build()?; + } + + Ok(()) +} + +fn build() -> io::Result<()> { + let out: PathBuf = env::var_os("OUT_DIR") + .expect("Cannot find OUT_DIR environment variable") + .into(); + + // Build the compact format types. + tonic_build::compile_protos(COMPACT_FORMATS_PROTO)?; + + // Copy the generated types into the source tree so changes can be committed. + fs::copy( + out.join("cash.z.wallet.sdk.rpc.rs"), + "src/proto/compact_formats.rs", + )?; + + // Build the gRPC types and client. + tonic_build::configure() + .build_server(true) + // .client_mod_attribute( + // "cash.z.wallet.sdk.rpc", + // r#"#[cfg(feature = "lightwalletd-tonic")]"#, + // ) + .extern_path( + ".cash.z.wallet.sdk.rpc.ChainMetadata", + "crate::proto::compact_formats::ChainMetadata", + ) + .extern_path( + ".cash.z.wallet.sdk.rpc.CompactBlock", + "crate::proto::compact_formats::CompactBlock", + ) + .extern_path( + ".cash.z.wallet.sdk.rpc.CompactTx", + "crate::proto::compact_formats::CompactTx", + ) + .extern_path( + ".cash.z.wallet.sdk.rpc.CompactSaplingSpend", + "crate::proto::compact_formats::CompactSaplingSpend", + ) + .extern_path( + ".cash.z.wallet.sdk.rpc.CompactSaplingOutput", + "crate::proto::compact_formats::CompactSaplingOutput", + ) + .extern_path( + ".cash.z.wallet.sdk.rpc.CompactOrchardAction", + "crate::proto::compact_formats::CompactOrchardAction", + ) + .compile_protos(&[SERVICE_PROTO], &["proto/"])?; + + // Build the proposal types. + tonic_build::compile_protos(PROPOSAL_PROTO)?; + + // Copy the generated types into the source tree so changes can be committed. + fs::copy( + out.join("cash.z.wallet.sdk.ffi.rs"), + "src/proto/proposal.rs", + )?; + + // Copy the generated types into the source tree so changes can be committed. The + // file has the same name as for the compact format types because they have the + // same package, but we've set things up so this only contains the service types. + fs::copy(out.join("cash.z.wallet.sdk.rpc.rs"), "src/proto/service.rs")?; + + Ok(()) +} From ce8a418680bec52d5788499aaa26db2b5188d6d8 Mon Sep 17 00:00:00 2001 From: Pacu Date: Tue, 28 Oct 2025 20:57:07 -0300 Subject: [PATCH 050/114] WIP - Add `vin` and `vout` to `CompactTx` this starts to implement this feature while asking questions on implementation details to discuss with the rest of the Zaino team --- zaino-fetch/src/chain/transaction.rs | 40 ++++++++++++++++++- zaino-state/src/backends/fetch.rs | 1 + zaino-state/src/backends/state.rs | 4 +- .../src/chain_index/finalised_state/db/v1.rs | 15 +++++++ .../src/chain_index/types/db/legacy.rs | 32 +++++++++++++++ 5 files changed, 90 insertions(+), 2 deletions(-) diff --git a/zaino-fetch/src/chain/transaction.rs b/zaino-fetch/src/chain/transaction.rs index fe2bd370b..194f2b1cf 100644 --- a/zaino-fetch/src/chain/transaction.rs +++ b/zaino-fetch/src/chain/transaction.rs @@ -6,7 +6,8 @@ use crate::chain::{ }; use std::io::Cursor; use zaino_proto::proto::compact_formats::{ - CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTx, + CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTx, CompactTxIn, + OutPoint, TxOut as CompactTxOut, }; /// Txin format as described in @@ -27,6 +28,12 @@ impl TxIn { fn into_inner(self) -> (Vec, u32, Vec) { (self.prev_txid, self.prev_index, self.script_sig) } + + /// Returns `true` if this `OutPoint` is "null" in the Bitcoin sense: it has txid set to + /// all-zeroes and output index set to `u32::MAX`. + fn is_null(&self) -> bool { + self.prev_txid.as_slice() == [0u8; 32] && self.prev_index == u32::MAX + } } impl ParseFromSlice for TxIn { @@ -1160,6 +1167,35 @@ impl FullTransaction { }) .collect(); + let vout = self + .raw_transaction + .transparent_outputs + .iter() + .map(|t_out| CompactTxOut { + value: t_out.value, + script_pub_key: t_out.script_hash.clone(), + }) + .collect(); + + let vin = self + .raw_transaction + .transparent_inputs + .iter() + .map(|t_in| { + if t_in.is_null() { + None + } else { + Some(CompactTxIn { + prevout: Some(OutPoint { + txid: t_in.prev_txid.clone(), + index: t_in.prev_index, + }), + }) + } + }) + .filter_map(|t_in| t_in) + .collect(); + Ok(CompactTx { index, hash, @@ -1167,6 +1203,8 @@ impl FullTransaction { spends, outputs, actions, + vin, + vout, }) } diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 2da3b9d9a..df5049a26 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -1644,6 +1644,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { estimated_height: blockchain_info.estimated_height().0 as u64, zcashd_build: self.data.zebra_build(), zcashd_subversion: self.data.zebra_subversion(), + donation_address: "".to_string(), }) } diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index 655e8c5df..b3efcef91 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -2538,7 +2538,9 @@ impl LightWalletIndexer for StateServiceSubscriber { build_user: self.data.build_info().build_user(), estimated_height: blockchain_info.estimated_height().0 as u64, zcashd_build: self.data.zebra_build(), - zcashd_subversion: self.data.zebra_subversion(), + zcashd_subversion: self.data.zebra_subversion(), + // TODO: support donation addresses see https://github.com/zingolabs/zaino/issues/626 + donation_address: "".to_string(), }) } diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index 82b4097d8..5b3cc5b19 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -3041,6 +3041,21 @@ impl DbV1 { }) } + + // NOTE: @AloeAreV: can you take a look at this? there seems to be a method that + // transforms an Indexed block into a compact block already. IDK why the `get_compact_blocks` + // method doesn't use it. + async fn get_compact_block2( + &self, + height: Height, + + ) -> Result { + let block = self.get_chain_block(height) + .await? + .unwrap(); // FIX + + Ok(block.to_compact_block()) + } /// Returns the CompactBlock for the given Height. /// /// TODO: Add separate range fetch method! diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index d692abba2..ee91912ee 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -34,6 +34,7 @@ use hex::{FromHex, ToHex}; use primitive_types::U256; use std::{fmt, io::Cursor}; use zebra_chain::serialization::BytesInDisplayOrder as _; +use zaino_proto::proto::compact_formats::{CompactTxIn, OutPoint, TxOut}; use crate::chain_index::encoding::{ read_fixed_le, read_i64_le, read_option, read_u16_be, read_u32_be, read_u32_le, read_u64_le, @@ -1470,6 +1471,35 @@ impl CompactTxData { ) .collect(); + let vout = self + .transparent + .vout + .iter() + .map(|tx_out| TxOut { + value: tx_out.value, + script_pub_key: tx_out.script_hash.to_vec(), + }) + .collect(); + + let vin = self + .transparent + .vin + .iter() + .map(|t_in| { + if t_in.is_null_prevout() { + None + } else { + Some(CompactTxIn { + prevout: Some(OutPoint { + txid: t_in.prevout_txid.to_vec(), + index: t_in.prevout_index, + }), + }) + } + }) + .filter_map(|t_in| t_in) + .collect(); + zaino_proto::proto::compact_formats::CompactTx { index: self.index(), hash: self.txid().0.to_vec(), @@ -1477,6 +1507,8 @@ impl CompactTxData { spends, outputs, actions, + vin, + vout, } } } From eff6206e59f3f86a7c440e3d507b21ddba4121e9 Mon Sep 17 00:00:00 2001 From: Pacu Date: Sat, 1 Nov 2025 17:58:28 -0300 Subject: [PATCH 051/114] Fix compiler errors from updated proto files --- integration-tests/tests/fetch_service.rs | 21 ++- integration-tests/tests/state_service.rs | 15 +- zaino-fetch/src/chain/transaction.rs | 16 +- zaino-serve/src/rpc/grpc/service.rs | 6 + zaino-state/src/backends/fetch.rs | 8 + zaino-state/src/backends/state.rs | 12 +- .../src/chain_index/finalised_state/db/v1.rs | 174 +----------------- .../src/chain_index/types/db/legacy.rs | 10 +- zaino-state/src/indexer.rs | 6 + 9 files changed, 75 insertions(+), 193 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index 0b2645217..637b481d8 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -3,7 +3,7 @@ use futures::StreamExt as _; use zaino_fetch::jsonrpsee::connector::{test_node_and_return_url, JsonRpSeeConnector}; use zaino_proto::proto::service::{ - AddressList, BlockId, BlockRange, Exclude, GetAddressUtxosArg, GetSubtreeRootsArg, + AddressList, BlockId, BlockRange, Exclude, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, TransparentAddressBlockFilter, TxFilter, }; use zaino_state::FetchServiceSubscriber; @@ -1040,6 +1040,11 @@ async fn fetch_service_get_block_range(validator: &ValidatorKin height: 10, hash: Vec::new(), }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], }; let fetch_service_stream = fetch_service_subscriber @@ -1080,6 +1085,11 @@ async fn fetch_service_get_block_range_nullifiers(validator: &V height: 10, hash: Vec::new(), }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], }; let fetch_service_stream = fetch_service_subscriber @@ -1263,6 +1273,11 @@ async fn fetch_service_get_taddress_txids(validator: &Validator height: chain_height as u64, hash: Vec::new(), }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], }), }; @@ -1412,7 +1427,7 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind .collect(); let mut sorted_fetch_mempool_tx = fetch_mempool_tx.clone(); - sorted_fetch_mempool_tx.sort_by_key(|tx| tx.hash.clone()); + sorted_fetch_mempool_tx.sort_by_key(|tx| tx.txid.clone()); let tx1_bytes = *tx_1.first().as_ref(); let tx2_bytes = *tx_2.first().as_ref(); @@ -1439,7 +1454,7 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind .collect(); let mut sorted_exclude_fetch_mempool_tx = exclude_fetch_mempool_tx.clone(); - sorted_exclude_fetch_mempool_tx.sort_by_key(|tx| tx.hash.clone()); + sorted_exclude_fetch_mempool_tx.sort_by_key(|tx| tx.txid.clone()); assert_eq!(sorted_exclude_fetch_mempool_tx[0].hash, sorted_txids[1]); assert_eq!(sorted_exclude_fetch_mempool_tx.len(), 1); diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 85a6ce05f..505b916de 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -1827,7 +1827,8 @@ mod zebra { pub(crate) mod lightwallet_indexer { use futures::StreamExt as _; use zaino_proto::proto::service::{ - AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetSubtreeRootsArg, TxFilter, + AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, + TxFilter, }; use zebra_rpc::methods::{GetAddressTxIdsRequest, GetBlock}; @@ -2121,7 +2122,15 @@ mod zebra { height: 5, hash: vec![], }); - let request = BlockRange { start, end }; + let request = BlockRange { + start, + end, + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }; if nullifiers_only { let fetch_service_get_block_range = fetch_service_subscriber .get_block_range_nullifiers(request.clone()) @@ -2215,7 +2224,7 @@ mod zebra { .await .unwrap(); let coinbase_tx = state_service_block_by_height.vtx.first().unwrap(); - let hash = coinbase_tx.hash.clone(); + let hash = coinbase_tx.txid.clone(); let request = TxFilter { block: None, index: 0, diff --git a/zaino-fetch/src/chain/transaction.rs b/zaino-fetch/src/chain/transaction.rs index 194f2b1cf..b2d84bc5a 100644 --- a/zaino-fetch/src/chain/transaction.rs +++ b/zaino-fetch/src/chain/transaction.rs @@ -7,7 +7,7 @@ use crate::chain::{ use std::io::Cursor; use zaino_proto::proto::compact_formats::{ CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTx, CompactTxIn, - OutPoint, TxOut as CompactTxOut, + TxOut as CompactTxOut, }; /// Txin format as described in @@ -1130,9 +1130,11 @@ impl FullTransaction { /// Converts a zcash full transaction into a compact transaction. pub fn to_compact(self, index: u64) -> Result { - let hash = self.tx_id; + let hash = self.tx_id(); - // NOTE: LightWalletD currently does not return a fee and is not currently priority here. Please open an Issue or PR at the Zingo-Indexer github (https://github.com/zingolabs/zingo-indexer) if you require this functionality. + // NOTE: LightWalletD currently does not return a fee and is not currently priority here. + // Please open an Issue or PR at the Zingo-Indexer github (https://github.com/zingolabs/zingo-indexer) + // if you require this functionality. let fee = 0; let spends = self @@ -1186,10 +1188,8 @@ impl FullTransaction { None } else { Some(CompactTxIn { - prevout: Some(OutPoint { - txid: t_in.prev_txid.clone(), - index: t_in.prev_index, - }), + prevout_txid: t_in.prev_txid.clone(), + prevout_index: t_in.prev_index, }) } }) @@ -1198,7 +1198,7 @@ impl FullTransaction { Ok(CompactTx { index, - hash, + txid: hash, fee, spends, outputs, diff --git a/zaino-serve/src/rpc/grpc/service.rs b/zaino-serve/src/rpc/grpc/service.rs index 6e34e78dd..2f180413d 100644 --- a/zaino-serve/src/rpc/grpc/service.rs +++ b/zaino-serve/src/rpc/grpc/service.rs @@ -142,6 +142,8 @@ where get_transaction(TxFilter) -> RawTransaction, "submit the given transaction to the zcash network." send_transaction(RawTransaction) -> SendResponse, + "Return the transactions corresponding to the given t-address within the given block range" + get_taddress_transactions(TransparentAddressBlockFilter) -> Self::GetTaddressTransactionsStream as streaming, "This name is misleading, returns the full transactions that have either inputs or outputs connected to the given transparent address." get_taddress_txids(TransparentAddressBlockFilter) -> Self::GetTaddressTxidsStream as streaming, "Returns the total balance for a list of taddrs" @@ -199,6 +201,10 @@ where #[doc = " Server streaming response type for the GetBlockRangeNullifiers method."] type GetBlockRangeNullifiersStream = std::pin::Pin>; + /// Server streaming response type for the GetTaddressTransactions method. + #[doc = "Server streaming response type for the GetTaddressTransactions method."] + type GetTaddressTransactionsStream = std::pin::Pin>; + /// Server streaming response type for the GetTaddressTxids method. #[doc = "Server streaming response type for the GetTaddressTxids method."] type GetTaddressTxidsStream = std::pin::Pin>; diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index df5049a26..ea52d1e19 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -1041,6 +1041,14 @@ impl LightWalletIndexer for FetchServiceSubscriber { }) } + // Return the transactions corresponding to the given t-address within the given block range + async fn get_taddress_transactions( + &self, + request: TransparentAddressBlockFilter, + ) -> Result { + todo!("Implement this method") + } + /// Return the txids corresponding to the given t-address within the given block range #[allow(deprecated)] async fn get_taddress_txids( diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index b3efcef91..05d83ced1 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -2039,6 +2039,14 @@ impl LightWalletIndexer for StateServiceSubscriber { }) } + /// Return the transactions corresponding to the given t-address within the given block range + async fn get_taddress_transactions( + &self, + request: TransparentAddressBlockFilter, + ) -> Result { + todo!() + } + /// Return the txids corresponding to the given t-address within the given block range async fn get_taddress_txids( &self, @@ -2538,9 +2546,9 @@ impl LightWalletIndexer for StateServiceSubscriber { build_user: self.data.build_info().build_user(), estimated_height: blockchain_info.estimated_height().0 as u64, zcashd_build: self.data.zebra_build(), - zcashd_subversion: self.data.zebra_subversion(), + zcashd_subversion: self.data.zebra_subversion(), // TODO: support donation addresses see https://github.com/zingolabs/zaino/issues/626 - donation_address: "".to_string(), + donation_address: "".to_string(), }) } diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index 5b3cc5b19..3d09aa784 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -3041,21 +3041,6 @@ impl DbV1 { }) } - - // NOTE: @AloeAreV: can you take a look at this? there seems to be a method that - // transforms an Indexed block into a compact block already. IDK why the `get_compact_blocks` - // method doesn't use it. - async fn get_compact_block2( - &self, - height: Height, - - ) -> Result { - let block = self.get_chain_block(height) - .await? - .unwrap(); // FIX - - Ok(block.to_compact_block()) - } /// Returns the CompactBlock for the given Height. /// /// TODO: Add separate range fetch method! @@ -3063,162 +3048,9 @@ impl DbV1 { &self, height: Height, ) -> Result { - let validated_height = self - .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) - .await?; - let height_bytes = validated_height.to_bytes()?; - - tokio::task::block_in_place(|| { - let txn = self.env.begin_ro_txn()?; - - // Fetch header data - let raw = match txn.get(self.headers, &height_bytes) { - Ok(val) => val, - Err(lmdb::Error::NotFound) => { - return Err(FinalisedStateError::DataUnavailable( - "block data missing from db".into(), - )); - } - Err(e) => return Err(FinalisedStateError::LmdbError(e)), - }; - let header: BlockHeaderData = *StoredEntryVar::from_bytes(raw) - .map_err(|e| FinalisedStateError::Custom(format!("header decode error: {e}")))? - .inner(); - - // fetch transaction data - let raw = match txn.get(self.txids, &height_bytes) { - Ok(val) => val, - Err(lmdb::Error::NotFound) => { - return Err(FinalisedStateError::DataUnavailable( - "block data missing from db".into(), - )); - } - Err(e) => return Err(FinalisedStateError::LmdbError(e)), - }; - let txids_list = StoredEntryVar::::from_bytes(raw) - .map_err(|e| FinalisedStateError::Custom(format!("txids decode error: {e}")))? - .inner() - .clone(); - let txids = txids_list.txids(); - - let raw = match txn.get(self.sapling, &height_bytes) { - Ok(val) => val, - Err(lmdb::Error::NotFound) => { - return Err(FinalisedStateError::DataUnavailable( - "block data missing from db".into(), - )); - } - Err(e) => return Err(FinalisedStateError::LmdbError(e)), - }; - let sapling_list = StoredEntryVar::::from_bytes(raw) - .map_err(|e| FinalisedStateError::Custom(format!("sapling decode error: {e}")))? - .inner() - .clone(); - let sapling = sapling_list.tx(); - - let raw = match txn.get(self.orchard, &height_bytes) { - Ok(val) => val, - Err(lmdb::Error::NotFound) => { - return Err(FinalisedStateError::DataUnavailable( - "block data missing from db".into(), - )); - } - Err(e) => return Err(FinalisedStateError::LmdbError(e)), - }; - let orchard_list = StoredEntryVar::::from_bytes(raw) - .map_err(|e| FinalisedStateError::Custom(format!("orchard decode error: {e}")))? - .inner() - .clone(); - let orchard = orchard_list.tx(); - - let vtx: Vec = txids - .iter() - .enumerate() - .filter_map(|(i, txid)| { - let spends = sapling - .get(i) - .and_then(|opt| opt.as_ref()) - .map(|s| { - s.spends() - .iter() - .map(|sp| sp.into_compact()) - .collect::>() - }) - .unwrap_or_default(); - - let outputs = sapling - .get(i) - .and_then(|opt| opt.as_ref()) - .map(|s| { - s.outputs() - .iter() - .map(|o| o.into_compact()) - .collect::>() - }) - .unwrap_or_default(); - - let actions = orchard - .get(i) - .and_then(|opt| opt.as_ref()) - .map(|o| { - o.actions() - .iter() - .map(|a| a.into_compact()) - .collect::>() - }) - .unwrap_or_default(); - - // SKIP transparent-only txs: - if spends.is_empty() && outputs.is_empty() && actions.is_empty() { - return None; - } - - Some(zaino_proto::proto::compact_formats::CompactTx { - index: i as u64, - hash: txid.0.to_vec(), - fee: 0, - spends, - outputs, - actions, - }) - }) - .collect(); - - // fetch commitment tree data - let raw = match txn.get(self.commitment_tree_data, &height_bytes) { - Ok(val) => val, - Err(lmdb::Error::NotFound) => { - return Err(FinalisedStateError::DataUnavailable( - "block data missing from db".into(), - )); - } - Err(e) => return Err(FinalisedStateError::LmdbError(e)), - }; - - let commitment_tree_data: CommitmentTreeData = *StoredEntryFixed::from_bytes(raw) - .map_err(|e| { - FinalisedStateError::Custom(format!("commitment_tree decode error: {e}")) - })? - .inner(); - - let chain_metadata = zaino_proto::proto::compact_formats::ChainMetadata { - sapling_commitment_tree_size: commitment_tree_data.sizes().sapling(), - orchard_commitment_tree_size: commitment_tree_data.sizes().orchard(), - }; - - // Construct CompactBlock - Ok(zaino_proto::proto::compact_formats::CompactBlock { - proto_version: 4, - height: header.index().height().0 as u64, - hash: header.index().hash().0.to_vec(), - prev_hash: header.index().parent_hash().0.to_vec(), - // Is this safe? - time: header.data().time() as u32, - header: Vec::new(), - vtx, - chain_metadata: Some(chain_metadata), - }) - }) + let block = self.get_chain_block(height).await?.unwrap(); // FIX + + Ok(block.to_compact_block()) } /// Fetch database metadata. diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index ee91912ee..685990917 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -34,7 +34,7 @@ use hex::{FromHex, ToHex}; use primitive_types::U256; use std::{fmt, io::Cursor}; use zebra_chain::serialization::BytesInDisplayOrder as _; -use zaino_proto::proto::compact_formats::{CompactTxIn, OutPoint, TxOut}; +use zaino_proto::proto::compact_formats::{CompactTxIn, TxOut}; use crate::chain_index::encoding::{ read_fixed_le, read_i64_le, read_option, read_u16_be, read_u32_be, read_u32_le, read_u64_le, @@ -1490,10 +1490,8 @@ impl CompactTxData { None } else { Some(CompactTxIn { - prevout: Some(OutPoint { - txid: t_in.prevout_txid.to_vec(), - index: t_in.prevout_index, - }), + prevout_txid: t_in.prevout_txid.to_vec(), + prevout_index: t_in.prevout_index, }) } }) @@ -1502,7 +1500,7 @@ impl CompactTxData { zaino_proto::proto::compact_formats::CompactTx { index: self.index(), - hash: self.txid().0.to_vec(), + txid: self.txid().bytes_in_display_order().to_vec(), fee, spends, outputs, diff --git a/zaino-state/src/indexer.rs b/zaino-state/src/indexer.rs index 0c92b2283..c871a4454 100644 --- a/zaino-state/src/indexer.rs +++ b/zaino-state/src/indexer.rs @@ -603,6 +603,12 @@ pub trait LightWalletIndexer: Send + Sync + Clone + ZcashIndexer + 'static { /// Submit the given transaction to the Zcash network async fn send_transaction(&self, request: RawTransaction) -> Result; + /// Return the transactions corresponding to the given t-address within the given block range + async fn get_taddress_transactions( + &self, + request: TransparentAddressBlockFilter, + ) -> Result; + /// Return the txids corresponding to the given t-address within the given block range async fn get_taddress_txids( &self, From b27f28954adf46a41f34c81bb7461b39ad8d352e Mon Sep 17 00:00:00 2001 From: Pacu Date: Wed, 5 Nov 2025 20:18:54 -0300 Subject: [PATCH 052/114] Deprecate get_address_txids for Light Clients --- integration-tests/tests/state_service.rs | 85 +++++++++++++++++++++++- zaino-state/src/backends/fetch.rs | 9 +++ zaino-state/src/backends/state.rs | 17 ++--- zaino-state/src/stream.rs | 1 + 4 files changed, 101 insertions(+), 11 deletions(-) diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 505b916de..ead500422 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -1,7 +1,9 @@ +use futures::StreamExt; use zaino_common::network::ActivationHeights; use zaino_common::{DatabaseConfig, ServiceConfig, StorageConfig}; use zaino_fetch::jsonrpsee::response::address_deltas::GetAddressDeltasParams; -use zaino_state::{LightWalletService, ZcashService}; +use zaino_proto::proto::service::{BlockId, BlockRange, PoolType, TransparentAddressBlockFilter}; +use zaino_state::{LightWalletService, ZcashService, BackendType}; #[allow(deprecated)] use zaino_state::{ @@ -957,7 +959,79 @@ async fn state_service_get_raw_transaction_testnet() { test_manager.close().await; } -async fn state_service_get_address_tx_ids(validator: &ValidatorKind) { +async fn state_service_get_address_transactions_regtest(validator: &ValidatorKind) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager.local_net.generate_blocks(100).await.unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.local_net.generate_blocks(1).await.unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let tx = from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + test_manager.local_net.generate_blocks(1).await.unwrap(); + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + let chain_height = fetch_service_subscriber + .block_cache + .get_chain_height() + .await + .unwrap() + .0; + dbg!(&chain_height); + + let state_service_txids = state_service_subscriber + .get_taddress_transactions(TransparentAddressBlockFilter { + address: recipient_taddr, + range: Some(BlockRange { + start: Some(BlockId { + height: (chain_height - 2) as u64, + hash: vec![], + }), + end: Some(BlockId { + height: chain_height as u64, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }), + }) + .await + .unwrap(); + + dbg!(&tx); + + dbg!(&state_service_txids); + assert!(state_service_txids.count().await > 0); + + test_manager.close().await; +} +async fn state_service_get_address_tx_ids(validator: &ValidatorKind) { let ( mut test_manager, _fetch_service, @@ -1359,7 +1433,12 @@ mod zebra { state_service_get_address_utxos_testnet().await; } - #[tokio::test(flavor = "multi_thread")] + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn taddress_transactions_regtest() { + state_service_get_address_transactions_regtest(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn address_tx_ids_regtest() { state_service_get_address_tx_ids::(&ValidatorKind::Zebrad).await; } diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index ea52d1e19..c257b601f 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -1096,6 +1096,15 @@ impl LightWalletIndexer for FetchServiceSubscriber { Ok(RawTransactionStream::new(receiver)) } + /// Return the txids corresponding to the given t-address within the given block range + /// this function is deprecated: use `get_taddress_transactions` + async fn get_taddress_txids( + &self, + request: TransparentAddressBlockFilter, + ) -> Result { + self.get_taddress_transactions(request).await + } + /// Returns the total balance for a list of taddrs async fn get_taddress_balance(&self, request: AddressList) -> Result { let taddrs = GetAddressBalanceRequest::new(request.addresses); diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index 05d83ced1..8357f0bcf 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -2043,14 +2043,6 @@ impl LightWalletIndexer for StateServiceSubscriber { async fn get_taddress_transactions( &self, request: TransparentAddressBlockFilter, - ) -> Result { - todo!() - } - - /// Return the txids corresponding to the given t-address within the given block range - async fn get_taddress_txids( - &self, - request: TransparentAddressBlockFilter, ) -> Result { let txids = self.get_taddress_txids_helper(request).await?; let chain_height = self.chain_height().await?; @@ -2092,6 +2084,15 @@ impl LightWalletIndexer for StateServiceSubscriber { Ok(RawTransactionStream::new(receiver)) } + /// Return the txids corresponding to the given t-address within the given block range + /// This function is deprecated. Use `get_taddress_transactions`. + async fn get_taddress_txids( + &self, + request: TransparentAddressBlockFilter, + ) -> Result { + self.get_taddress_transactions(request).await + } + /// Returns the total balance for a list of taddrs async fn get_taddress_balance( &self, diff --git a/zaino-state/src/stream.rs b/zaino-state/src/stream.rs index 1cf5db873..7816f47fd 100644 --- a/zaino-state/src/stream.rs +++ b/zaino-state/src/stream.rs @@ -7,6 +7,7 @@ use zaino_proto::proto::{ }; /// Stream of RawTransactions, output type of get_taddress_txids. +#[derive(Debug)] pub struct RawTransactionStream { inner: ReceiverStream>, } From 370aa4f844b119b49a0d0e7b85e5ab8f395c493a Mon Sep 17 00:00:00 2001 From: Pacu Date: Thu, 6 Nov 2025 17:14:04 -0300 Subject: [PATCH 053/114] Prune CompactTx according to requested PoolType vector --- zaino-state/src/backends/state.rs | 41 ++++++++++++++++++++++++++++--- zaino-state/src/indexer.rs | 2 ++ zaino-state/src/local_cache.rs | 41 ++++++++++++++++++++++++++++++- 3 files changed, 80 insertions(+), 4 deletions(-) diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index 8357f0bcf..f70e99954 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -11,7 +11,10 @@ use crate::{ indexer::{ handle_raw_transaction, IndexerSubscriber, LightWalletIndexer, ZcashIndexer, ZcashService, }, - local_cache::{compact_block_to_nullifiers, BlockCache, BlockCacheSubscriber}, + local_cache::{ + compact_block_to_nullifiers, compact_block_with_pool_types, BlockCache, + BlockCacheSubscriber, + }, status::{AtomicStatus, StatusType}, stream::{ AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, @@ -42,8 +45,8 @@ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ AddressList, Balance, BlockId, BlockRange, Exclude, GetAddressUtxosArg, - GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, RawTransaction, - SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, + GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, PoolType, + RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, }; @@ -594,6 +597,36 @@ impl StateServiceSubscriber { let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + + // TODO: make helper function for this + let pool_types = if request.pool_types.is_empty() { + vec![PoolType::Sapling, PoolType::Orchard] + } else { + let mut pool_types: Vec = vec![]; + + for pool in request.pool_types.iter() { + match PoolType::try_from(*pool) { + Ok(pool_type) => { + if pool_type == PoolType::Invalid { + return Err(StateServiceError::Custom(format!( + "Invalid PoolType {}. See proto::PoolType for valid pool types", + pool_type.as_str_name() + ))); + } else { + pool_types.push(pool_type); + } + } + Err(_) => { + return Err(StateServiceError::Custom(format!( + "Invalid PoolType. See proto::PoolType for valid pool types" + ))) + } + }; + } + + pool_types.clone() + }; + tokio::spawn(async move { let timeout = timeout( time::Duration::from_secs((service_timeout * 4) as u64), @@ -643,6 +676,8 @@ impl StateServiceSubscriber { Ok(mut block) => { if trim_non_nullifier { block = compact_block_to_nullifiers(block); + } else { + block = compact_block_with_pool_types(block, pool_types.clone()); } Ok(block) } diff --git a/zaino-state/src/indexer.rs b/zaino-state/src/indexer.rs index c871a4454..56a241e17 100644 --- a/zaino-state/src/indexer.rs +++ b/zaino-state/src/indexer.rs @@ -610,6 +610,8 @@ pub trait LightWalletIndexer: Send + Sync + Clone + ZcashIndexer + 'static { ) -> Result; /// Return the txids corresponding to the given t-address within the given block range + /// Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. + /// Note: this method is deprecated, please use GetTaddressTransactions instead. async fn get_taddress_txids( &self, request: TransparentAddressBlockFilter, diff --git a/zaino-state/src/local_cache.rs b/zaino-state/src/local_cache.rs index 58b52b655..e81079fba 100644 --- a/zaino-state/src/local_cache.rs +++ b/zaino-state/src/local_cache.rs @@ -21,7 +21,10 @@ use zaino_fetch::{ response::{GetBlockError, GetBlockResponse}, }, }; -use zaino_proto::proto::compact_formats::{ChainMetadata, CompactBlock, CompactOrchardAction}; +use zaino_proto::proto::{ + compact_formats::{ChainMetadata, CompactBlock, CompactOrchardAction}, + service::PoolType, +}; use zebra_chain::{ block::{Hash, Height}, parameters::Network, @@ -372,6 +375,42 @@ pub(crate) fn display_txids_to_server(txids: Vec) -> Result> .collect::>, _>>() } +/// prunes a compact block from transaction in formation related to pools not included in the +/// `pool_types` vector. +/// Note: for backwards compatibility an empty vector will return Sapling and Orchard Tx info. +pub(crate) fn compact_block_with_pool_types( + mut block: CompactBlock, + pool_types: Vec, +) -> CompactBlock { + if pool_types.is_empty() { + for compact_tx in &mut block.vtx { + // strip out transparent inputs if not Requested + if !pool_types.contains(&PoolType::Transparent) { + compact_tx.vin = Vec::new(); + compact_tx.vout = Vec::new(); + } + } + } + + for compact_tx in &mut block.vtx { + // strip out transparent inputs if not Requested + if !pool_types.contains(&PoolType::Transparent) { + compact_tx.vin = Vec::new(); + compact_tx.vout = Vec::new(); + } + // strip out sapling if not requested + if !pool_types.contains(&PoolType::Sapling) { + compact_tx.spends = Vec::new(); + compact_tx.outputs = Vec::new(); + } + // strip out orchard if not requested + if !pool_types.contains(&PoolType::Orchard) { + compact_tx.actions = Vec::new(); + } + } + + block +} /// Strips the ouputs and from all transactions, retains only /// the nullifier from all orcard actions, and clears the chain /// metadata from the block From 7ba3dfe6301d6a662614066672075cf4c7f39a1f Mon Sep 17 00:00:00 2001 From: Pacu Date: Wed, 12 Nov 2025 16:28:08 -0300 Subject: [PATCH 054/114] Fix `get_compact_block` Optional unwrap --- zaino-state/src/chain_index/finalised_state/db/v1.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index 3d09aa784..cd920d815 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -33,8 +33,7 @@ use crate::{ TransparentHistExt, }, entry::{StoredEntryFixed, StoredEntryVar}, - }, - types::{AddrEventBytes, TransactionHash, GENESIS_HEIGHT}, + }, source::BlockchainSourceError, types::{AddrEventBytes, TransactionHash, GENESIS_HEIGHT} }, config::BlockCacheConfig, error::FinalisedStateError, @@ -3048,9 +3047,12 @@ impl DbV1 { &self, height: Height, ) -> Result { - let block = self.get_chain_block(height).await?.unwrap(); // FIX - - Ok(block.to_compact_block()) + let block = self.get_chain_block(height).await?; + + match block { + Some(b) => Ok(b.to_compact_block()), + None => Err(FinalisedStateError::DataUnavailable(format!("Block {} not present in validator's state.", height))) + } } /// Fetch database metadata. From 48de13838d8249be531ffb8d08b28dd91a376def Mon Sep 17 00:00:00 2001 From: Pacu Date: Wed, 12 Nov 2025 16:50:05 -0300 Subject: [PATCH 055/114] use helper function to convert pool_types from `Vec` to type cargo fmt cargo clippy --all-features Fix `cargo clippy --all-features` issues --- zaino-fetch/src/chain/transaction.rs | 3 +- zaino-state/src/backends/state.rs | 76 ++++++++++++------- .../src/chain_index/finalised_state/db/v1.rs | 2 +- .../src/chain_index/types/db/legacy.rs | 3 +- 4 files changed, 52 insertions(+), 32 deletions(-) diff --git a/zaino-fetch/src/chain/transaction.rs b/zaino-fetch/src/chain/transaction.rs index b2d84bc5a..bbc65891f 100644 --- a/zaino-fetch/src/chain/transaction.rs +++ b/zaino-fetch/src/chain/transaction.rs @@ -1183,7 +1183,7 @@ impl FullTransaction { .raw_transaction .transparent_inputs .iter() - .map(|t_in| { + .filter_map(|t_in| { if t_in.is_null() { None } else { @@ -1193,7 +1193,6 @@ impl FullTransaction { }) } }) - .filter_map(|t_in| t_in) .collect(); Ok(CompactTx { diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index f70e99954..e2fd35547 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -23,7 +23,6 @@ use crate::{ utils::{blockid_to_hashorheight, get_build_info, ServiceMetadata}, BackendType, MempoolKey, }; - use nonempty::NonEmpty; use tokio_stream::StreamExt as _; use zaino_fetch::{ @@ -598,34 +597,21 @@ impl StateServiceSubscriber { let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); - // TODO: make helper function for this - let pool_types = if request.pool_types.is_empty() { - vec![PoolType::Sapling, PoolType::Orchard] - } else { - let mut pool_types: Vec = vec![]; - - for pool in request.pool_types.iter() { - match PoolType::try_from(*pool) { - Ok(pool_type) => { - if pool_type == PoolType::Invalid { - return Err(StateServiceError::Custom(format!( - "Invalid PoolType {}. See proto::PoolType for valid pool types", - pool_type.as_str_name() - ))); - } else { - pool_types.push(pool_type); - } - } - Err(_) => { - return Err(StateServiceError::Custom(format!( - "Invalid PoolType. See proto::PoolType for valid pool types" - ))) + let pool_types = match pool_types_from_vector(&request.pool_types) { + Ok(p) => Ok(p), + Err(e) => { + Err( + match e { + PoolTypeError::InvalidPoolType => StateServiceError::UnhandledRpcError( + "PoolType::Invalid specified as argument in `BlockRange`.".to_string() + ), + PoolTypeError::UnknownPoolType(t) => StateServiceError::UnhandledRpcError( + format!("Unknown value specified in `BlockRange`. Value '{}' is not a known PoolType.", t) + ) } - }; + ) } - - pool_types.clone() - }; + }?; tokio::spawn(async move { let timeout = timeout( @@ -1053,6 +1039,42 @@ impl StateServiceSubscriber { } } +/// Errors that can arise when mapping `PoolType` from an `i32` value. +enum PoolTypeError { + /// Pool Type value was map to the enum `PoolType::Invalid`. + InvalidPoolType, + /// Pool Type value was mapped to value that can't be mapped to a known pool type. + UnknownPoolType(i32), +} + +// Converts a vector of pool_types (i32) into its rich-type representation +// Returns `None` when invalid `pool_types` are found +fn pool_types_from_vector(pool_types: &[i32]) -> Result, PoolTypeError> { + let pools = if pool_types.is_empty() { + vec![PoolType::Sapling, PoolType::Orchard] + } else { + let mut pools: Vec = vec![]; + + for pool in pool_types.iter() { + match PoolType::try_from(*pool) { + Ok(pool_type) => { + if pool_type == PoolType::Invalid { + return Err(PoolTypeError::InvalidPoolType); + } else { + pools.push(pool_type); + } + } + Err(_) => { + return Err(PoolTypeError::UnknownPoolType(*pool)); + } + }; + } + + pools.clone() + }; + Ok(pools) +} + #[async_trait] #[allow(deprecated)] impl ZcashIndexer for StateServiceSubscriber { diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index cd920d815..24c1427d8 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -33,7 +33,7 @@ use crate::{ TransparentHistExt, }, entry::{StoredEntryFixed, StoredEntryVar}, - }, source::BlockchainSourceError, types::{AddrEventBytes, TransactionHash, GENESIS_HEIGHT} + }, types::{AddrEventBytes, TransactionHash, GENESIS_HEIGHT} }, config::BlockCacheConfig, error::FinalisedStateError, diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index 685990917..de84f1e33 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -1485,7 +1485,7 @@ impl CompactTxData { .transparent .vin .iter() - .map(|t_in| { + .filter_map(|t_in| { if t_in.is_null_prevout() { None } else { @@ -1495,7 +1495,6 @@ impl CompactTxData { }) } }) - .filter_map(|t_in| t_in) .collect(); zaino_proto::proto::compact_formats::CompactTx { From 8ac24631c98d13ea78d3bb5b7fd04bce2fd93233 Mon Sep 17 00:00:00 2001 From: Pacu Date: Wed, 12 Nov 2025 20:39:26 -0300 Subject: [PATCH 056/114] fix rebase issues --- integration-tests/tests/fetch_service.rs | 6 +++--- integration-tests/tests/state_service.rs | 6 +++--- zaino-state/src/backends/fetch.rs | 12 ++---------- 3 files changed, 8 insertions(+), 16 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index 637b481d8..834ac8959 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -1434,8 +1434,8 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind let mut sorted_txids = [tx1_bytes, tx2_bytes]; sorted_txids.sort_by_key(|hash| *hash); - assert_eq!(sorted_fetch_mempool_tx[0].hash, sorted_txids[0]); - assert_eq!(sorted_fetch_mempool_tx[1].hash, sorted_txids[1]); + assert_eq!(sorted_fetch_mempool_tx[0].txid, sorted_txids[0]); + assert_eq!(sorted_fetch_mempool_tx[1].txid, sorted_txids[1]); assert_eq!(sorted_fetch_mempool_tx.len(), 2); let exclude_list = Exclude { @@ -1456,7 +1456,7 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind let mut sorted_exclude_fetch_mempool_tx = exclude_fetch_mempool_tx.clone(); sorted_exclude_fetch_mempool_tx.sort_by_key(|tx| tx.txid.clone()); - assert_eq!(sorted_exclude_fetch_mempool_tx[0].hash, sorted_txids[1]); + assert_eq!(sorted_exclude_fetch_mempool_tx[0].txid, sorted_txids[1]); assert_eq!(sorted_exclude_fetch_mempool_tx.len(), 1); test_manager.close().await; diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index ead500422..73d2f3d74 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -3,7 +3,7 @@ use zaino_common::network::ActivationHeights; use zaino_common::{DatabaseConfig, ServiceConfig, StorageConfig}; use zaino_fetch::jsonrpsee::response::address_deltas::GetAddressDeltasParams; use zaino_proto::proto::service::{BlockId, BlockRange, PoolType, TransparentAddressBlockFilter}; -use zaino_state::{LightWalletService, ZcashService, BackendType}; +use zaino_state::{LightWalletService, ZcashService}; #[allow(deprecated)] use zaino_state::{ @@ -1031,7 +1031,7 @@ async fn state_service_get_address_transactions_regtest(validat test_manager.close().await; } -async fn state_service_get_address_tx_ids(validator: &ValidatorKind) { +async fn state_service_get_address_tx_ids(validator: &ValidatorKind) { let ( mut test_manager, _fetch_service, @@ -1435,7 +1435,7 @@ mod zebra { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn taddress_transactions_regtest() { - state_service_get_address_transactions_regtest(&ValidatorKind::Zebrad).await; + state_service_get_address_transactions_regtest::(&ValidatorKind::Zebrad).await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index c257b601f..52eea4430 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -1046,16 +1046,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { &self, request: TransparentAddressBlockFilter, ) -> Result { - todo!("Implement this method") - } - - /// Return the txids corresponding to the given t-address within the given block range - #[allow(deprecated)] - async fn get_taddress_txids( - &self, - request: TransparentAddressBlockFilter, - ) -> Result { - let chain_height = self.chain_height().await?; + let chain_height = self.chain_height().await?; let txids = self.get_taddress_txids_helper(request).await?; let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; @@ -1098,6 +1089,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { /// Return the txids corresponding to the given t-address within the given block range /// this function is deprecated: use `get_taddress_transactions` + #[allow(deprecated)] async fn get_taddress_txids( &self, request: TransparentAddressBlockFilter, From bf9c2b984431346159adcc137cec7d0d2674c1bd Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Sun, 28 Dec 2025 17:31:13 -0300 Subject: [PATCH 057/114] Create CHANGELOG.md file for zaino-proto --- zaino-proto/CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 zaino-proto/CHANGELOG.md diff --git a/zaino-proto/CHANGELOG.md b/zaino-proto/CHANGELOG.md new file mode 100644 index 000000000..728527b2e --- /dev/null +++ b/zaino-proto/CHANGELOG.md @@ -0,0 +1,13 @@ +# Changelog +All notable changes to this library will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this library adheres to Rust's notion of +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + + +### Added +- utils submodule to handle `PoolType` conversions +- `PoolTypeError` defines conversion errors between i32 and known `PoolType` variants From ef57266a5d025ec2bea1061e993b2bf2579f3f79 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Mon, 29 Dec 2025 11:00:10 -0300 Subject: [PATCH 058/114] Create a utils.rs file for helper methods --- zaino-proto/src/proto.rs | 1 + zaino-proto/src/proto/utils.rs | 42 +++++++++++++++++++++++++++++++ zaino-state/src/backends/state.rs | 39 +++------------------------- 3 files changed, 46 insertions(+), 36 deletions(-) create mode 100644 zaino-proto/src/proto/utils.rs diff --git a/zaino-proto/src/proto.rs b/zaino-proto/src/proto.rs index 7e04b9499..2ce891332 100644 --- a/zaino-proto/src/proto.rs +++ b/zaino-proto/src/proto.rs @@ -3,3 +3,4 @@ pub mod compact_formats; pub mod proposal; pub mod service; +pub mod utils; diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs new file mode 100644 index 000000000..d70926de1 --- /dev/null +++ b/zaino-proto/src/proto/utils.rs @@ -0,0 +1,42 @@ +use crate::proto::service::PoolType; + +/// Errors that can arise when mapping `PoolType` from an `i32` value. +pub enum PoolTypeError { + /// Pool Type value was map to the enum `PoolType::Invalid`. + InvalidPoolType, + /// Pool Type value was mapped to value that can't be mapped to a known pool type. + UnknownPoolType(i32), +} + +// Converts a vector of pool_types (i32) into its rich-type representation +// Returns `None` when invalid `pool_types` are found +pub fn pool_types_from_vector(pool_types: &[i32]) -> Result, PoolTypeError> { + let pools = if pool_types.is_empty() { + vec![PoolType::Sapling, PoolType::Orchard] + } else { + let mut pools: Vec = vec![]; + + for pool in pool_types.iter() { + match PoolType::try_from(*pool) { + Ok(pool_type) => { + if pool_type == PoolType::Invalid { + return Err(PoolTypeError::InvalidPoolType); + } else { + pools.push(pool_type); + } + } + Err(_) => { + return Err(PoolTypeError::UnknownPoolType(*pool)); + } + }; + } + + pools.clone() + }; + Ok(pools) +} + +/// Converts a `Vec` into a `Vec` +pub fn pool_types_into_i32_vec(pool_types: Vec) -> Vec { + pool_types.iter().map(|p| *p as i32).collect() +} diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index e2fd35547..bb4a8340e 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -47,6 +47,9 @@ use zaino_proto::proto::{ GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, PoolType, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, + utils::{ + pool_types_from_vector, PoolTypeError + } }; use zcash_protocol::consensus::NetworkType; @@ -1039,42 +1042,6 @@ impl StateServiceSubscriber { } } -/// Errors that can arise when mapping `PoolType` from an `i32` value. -enum PoolTypeError { - /// Pool Type value was map to the enum `PoolType::Invalid`. - InvalidPoolType, - /// Pool Type value was mapped to value that can't be mapped to a known pool type. - UnknownPoolType(i32), -} - -// Converts a vector of pool_types (i32) into its rich-type representation -// Returns `None` when invalid `pool_types` are found -fn pool_types_from_vector(pool_types: &[i32]) -> Result, PoolTypeError> { - let pools = if pool_types.is_empty() { - vec![PoolType::Sapling, PoolType::Orchard] - } else { - let mut pools: Vec = vec![]; - - for pool in pool_types.iter() { - match PoolType::try_from(*pool) { - Ok(pool_type) => { - if pool_type == PoolType::Invalid { - return Err(PoolTypeError::InvalidPoolType); - } else { - pools.push(pool_type); - } - } - Err(_) => { - return Err(PoolTypeError::UnknownPoolType(*pool)); - } - }; - } - - pools.clone() - }; - Ok(pools) -} - #[async_trait] #[allow(deprecated)] impl ZcashIndexer for StateServiceSubscriber { From d0e6b615839fe7ac20dc5ab9bcefec533034f266 Mon Sep 17 00:00:00 2001 From: Pacu Date: Sat, 22 Nov 2025 15:42:11 -0300 Subject: [PATCH 059/114] Create a GetBlockRange Error Enum --- zaino-proto/src/proto/utils.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index d70926de1..b72413d72 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -40,3 +40,18 @@ pub fn pool_types_from_vector(pool_types: &[i32]) -> Result, PoolT pub fn pool_types_into_i32_vec(pool_types: Vec) -> Vec { pool_types.iter().map(|p| *p as i32).collect() } + +/// Errors that can be present in the request of the GetBlockRange RPC +pub enum GetBlockRangeError { + /// Error: No start height given. + NoStartHeightProvided, + /// Error: No end height given. + NoEndHeightProvided, + /// Start height out of range. Failed to convert to u32. + StartHeightOutOfRange, + + /// End height out of range. Failed to convert to u32. + EndHeightOutOfRange, + /// An invalid pool type request was provided. + PoolTypArgumentError(PoolTypeError), +} From c5e67cc36f818c80dbbc5738f81d05c2caf45474 Mon Sep 17 00:00:00 2001 From: Pacu Date: Sat, 22 Nov 2025 15:55:44 -0300 Subject: [PATCH 060/114] add Scaffold for BlockRange validation --- zaino-proto/src/proto/utils.rs | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index b72413d72..5e3beec17 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -1,4 +1,4 @@ -use crate::proto::service::PoolType; +use crate::proto::service::{BlockRange, PoolType}; /// Errors that can arise when mapping `PoolType` from an `i32` value. pub enum PoolTypeError { @@ -55,3 +55,27 @@ pub enum GetBlockRangeError { /// An invalid pool type request was provided. PoolTypArgumentError(PoolTypeError), } + +pub struct ValidatedBlockRangeRequest { + start: u32, + end: u32, + pool_types: Vec, +} + +impl ValidatedBlockRangeRequest { + /// validates a BlockRange in terms of the `GetBlockRange` RPC + pub fn validate_get_block_range_request( + request: &BlockRange, + ) -> Result { + Err(GetBlockRangeError::StartHeightOutOfRange) + } + + /// checks whether this request is specified in reversed order + pub fn is_reverse_ordered(&self) -> bool { + if self.start > self.end { + true + } else { + false + } + } +} From db7962630762015fbedf4bbce068acec77590963 Mon Sep 17 00:00:00 2001 From: Pacu Date: Sat, 22 Nov 2025 16:27:47 -0300 Subject: [PATCH 061/114] Impl ValidatedGetBlockRangeRequest --- zaino-proto/src/proto/utils.rs | 51 ++++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 2 deletions(-) diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 5e3beec17..6ad15c09e 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -64,10 +64,52 @@ pub struct ValidatedBlockRangeRequest { impl ValidatedBlockRangeRequest { /// validates a BlockRange in terms of the `GetBlockRange` RPC - pub fn validate_get_block_range_request( + pub fn new_from_block_range( request: &BlockRange, ) -> Result { - Err(GetBlockRangeError::StartHeightOutOfRange) + let start: u32 = match &request.start { + Some(block_id) => match block_id.height.try_into() { + Ok(height) => height, + Err(_) => { + return Err(GetBlockRangeError::StartHeightOutOfRange); + } + }, + None => { + return Err(GetBlockRangeError::NoStartHeightProvided); + } + }; + let end: u32 = match &request.end { + Some(block_id) => match block_id.height.try_into() { + Ok(height) => height, + Err(_) => { + return Err(GetBlockRangeError::EndHeightOutOfRange); + } + }, + None => { + return Err(GetBlockRangeError::NoEndHeightProvided); + } + }; + + let pool_types = pool_types_from_vector(&request.pool_types) + .map_err(|e| GetBlockRangeError::PoolTypArgumentError(e))?; + + Ok(ValidatedBlockRangeRequest { + start: start, + end: end, + pool_types: pool_types, + }) + } + + pub fn start(&self) -> u32 { + self.start + } + + pub fn end(&self) -> u32 { + self.end + } + + pub fn pool_types(&self) -> Vec { + self.pool_types.clone() } /// checks whether this request is specified in reversed order @@ -78,4 +120,9 @@ impl ValidatedBlockRangeRequest { false } } + + /// Reverses the order of this request + pub fn reverse(&mut self) { + (self.start, self.end) = (self.end, self.start); + } } From a6f1ec427fcac4b0b73cb57a93cdc3f650e6af87 Mon Sep 17 00:00:00 2001 From: Pacu Date: Sat, 22 Nov 2025 16:48:10 -0300 Subject: [PATCH 062/114] Refactor BlockRange request validation --- zaino-state/src/backends/fetch.rs | 58 +++++++++++++++---------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 52eea4430..5659d88ff 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -43,7 +43,7 @@ use zaino_proto::proto::{ AddressList, Balance, BlockId, BlockRange, Duration, Exclude, GetAddressUtxosArg, GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, - }, + }, utils::{GetBlockRangeError, ValidatedBlockRangeRequest}, }; use crate::TransactionHash; @@ -808,46 +808,46 @@ impl LightWalletIndexer for FetchServiceSubscriber { &self, request: BlockRange, ) -> Result { - let mut start: u32 = match request.start { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(FetchServiceError::TonicStatusError( + let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + .map_err(|e| { + match e { + GetBlockRangeError::StartHeightOutOfRange => FetchServiceError::TonicStatusError( tonic::Status::invalid_argument( "Error: Start height out of range. Failed to convert to u32.", ), - )); - } - }, - None => { - return Err(FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - )); - } - }; - let mut end: u32 = match request.end { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(FetchServiceError::TonicStatusError( + ), + GetBlockRangeError::NoStartHeightProvided => FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument( + "Error: Start height out of range. Failed to convert to u32.", + ), + ), + GetBlockRangeError::EndHeightOutOfRange => FetchServiceError::TonicStatusError( tonic::Status::invalid_argument( "Error: End height out of range. Failed to convert to u32.", ), - )); - } - }, - None => { - return Err(FetchServiceError::TonicStatusError( + ), + GetBlockRangeError::NoEndHeightProvided => FetchServiceError::TonicStatusError( tonic::Status::invalid_argument("Error: No start height given."), - )); + ), + GetBlockRangeError::PoolTypArgumentError(e) => FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument("Error: No start height given."), + ), + } - }; - let rev_order = if start > end { - (start, end) = (end, start); + })?; + + + // FIXME: this should be changed but this logic is hard to understand and we lack tests. + // we will maintain the behaviour with less smelly code + let rev_order = if validated_request.is_reverse_ordered() { + validated_request.reverse(); true } else { false }; + let start = validated_request.start(); + let end = validated_request.end(); + let chain_height = self.block_cache.get_chain_height().await?.0; let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; From a17d467ded8e3953a5d91396e38784be4e33e2f5 Mon Sep 17 00:00:00 2001 From: Pacu Date: Sat, 22 Nov 2025 17:03:21 -0300 Subject: [PATCH 063/114] Refactor request validation for get_block_range_nullifiers --- zaino-state/src/backends/fetch.rs | 60 +++++++------------------------ zaino-state/src/error.rs | 29 +++++++++++++++ 2 files changed, 41 insertions(+), 48 deletions(-) diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 5659d88ff..cba56a644 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -809,33 +809,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { request: BlockRange, ) -> Result { let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) - .map_err(|e| { - match e { - GetBlockRangeError::StartHeightOutOfRange => FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: Start height out of range. Failed to convert to u32.", - ), - ), - GetBlockRangeError::NoStartHeightProvided => FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: Start height out of range. Failed to convert to u32.", - ), - ), - GetBlockRangeError::EndHeightOutOfRange => FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: End height out of range. Failed to convert to u32.", - ), - ), - GetBlockRangeError::NoEndHeightProvided => FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - ), - GetBlockRangeError::PoolTypArgumentError(e) => FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - ), - - } - })?; - + .map_err(FetchServiceError::from_get_block_change_error)?; // FIXME: this should be changed but this logic is hard to understand and we lack tests. // we will maintain the behaviour with less smelly code @@ -920,31 +894,21 @@ impl LightWalletIndexer for FetchServiceSubscriber { async fn get_block_range_nullifiers( &self, request: BlockRange, - ) -> Result { - let tonic_status_error = - |err| FetchServiceError::TonicStatusError(tonic::Status::invalid_argument(err)); - let mut start = match request.start { - Some(block_id) => match u32::try_from(block_id.height) { - Ok(height) => Ok(height), - Err(_) => Err("Error: Start height out of range. Failed to convert to u32."), - }, - None => Err("Error: No start height given."), - } - .map_err(tonic_status_error)?; - let mut end = match request.end { - Some(block_id) => match u32::try_from(block_id.height) { - Ok(height) => Ok(height), - Err(_) => Err("Error: End height out of range. Failed to convert to u32."), - }, - None => Err("Error: No start height given."), - } - .map_err(tonic_status_error)?; - let rev_order = if start > end { - (start, end) = (end, start); + ) -> Result { + let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + .map_err(FetchServiceError::from_get_block_change_error)?; + + // FIXME: this should be changed but this logic is hard to understand and we lack tests. + // we will maintain the behaviour with less smelly code + let rev_order = if validated_request.is_reverse_ordered() { + validated_request.reverse(); true } else { false }; + + let start = validated_request.start(); + let end = validated_request.end(); let chain_height = self.block_cache.get_chain_height().await?.0; let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; diff --git a/zaino-state/src/error.rs b/zaino-state/src/error.rs index 7b999e7d7..778049999 100644 --- a/zaino-state/src/error.rs +++ b/zaino-state/src/error.rs @@ -8,6 +8,7 @@ use crate::BlockHash; use std::{any::type_name, fmt::Display}; use zaino_fetch::jsonrpsee::connector::RpcRequestError; +use zaino_proto::proto::utils::GetBlockRangeError; impl From> for StateServiceError { fn from(value: RpcRequestError) -> Self { @@ -199,6 +200,34 @@ pub enum FetchServiceError { SerializationError(#[from] zebra_chain::serialization::SerializationError), } +impl FetchServiceError { + pub(crate) fn from_get_block_change_error(error: GetBlockRangeError) -> Self { + match error { + GetBlockRangeError::StartHeightOutOfRange => { + FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Error: Start height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoStartHeightProvided => { + FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Error: Start height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::EndHeightOutOfRange => { + FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + "Error: End height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoEndHeightProvided => FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument("Error: No start height given."), + ), + GetBlockRangeError::PoolTypArgumentError(e) => FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument("Error: No start height given."), + ), + } + } +} + #[allow(deprecated)] impl From for tonic::Status { fn from(error: FetchServiceError) -> Self { From 42f5fc7668dc258253c6f89a57eda0cb72a63859 Mon Sep 17 00:00:00 2001 From: Pacu Date: Mon, 24 Nov 2025 13:27:13 -0300 Subject: [PATCH 064/114] refactor state service `get_block_range` to use a validated request also cargo fmt --- integration-tests/tests/state_service.rs | 2 +- zaino-proto/src/proto/compact_formats.rs | 78 +- zaino-proto/src/proto/proposal.rs | 34 +- zaino-proto/src/proto/service.rs | 1349 +++++++++++++++------- zaino-state/src/backends/fetch.rs | 7 +- zaino-state/src/backends/state.rs | 54 +- 6 files changed, 997 insertions(+), 527 deletions(-) diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 73d2f3d74..b8468be8c 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -2218,7 +2218,7 @@ mod zebra { .map(Result::unwrap) .collect::>() .await; - let state_service_get_block_range = state_service_subscriber + let state_serviget_block_range = state_service_subscriber .get_block_range_nullifiers(request) .await .unwrap() diff --git a/zaino-proto/src/proto/compact_formats.rs b/zaino-proto/src/proto/compact_formats.rs index 44455378f..82c2eea51 100644 --- a/zaino-proto/src/proto/compact_formats.rs +++ b/zaino-proto/src/proto/compact_formats.rs @@ -1,6 +1,6 @@ +// This file is @generated by prost-build. /// Information about the state of the chain as of a given block. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ChainMetadata { /// the size of the Sapling note commitment tree as of the end of this block #[prost(uint32, tag = "1")] @@ -9,13 +9,13 @@ pub struct ChainMetadata { #[prost(uint32, tag = "2")] pub orchard_commitment_tree_size: u32, } -/// A compact representation of the shielded data in a Zcash block. +/// A compact representation of a Zcash block. /// /// CompactBlock is a packaging of ONLY the data from a block that's needed to: -/// 1. Detect a payment to your shielded Sapling address -/// 2. Detect a spend of your shielded Sapling notes -/// 3. Update your witnesses to generate new Sapling spend proofs. -#[allow(clippy::derive_partial_eq_without_eq)] +/// 1. Detect a payment to your Shielded address +/// 2. Detect a spend of your Shielded notes +/// 3. Update your witnesses to generate new spend proofs. +/// 4. Spend UTXOs associated to t-addresses of your wallet. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactBlock { /// the version of this wire format, for storage @@ -33,7 +33,7 @@ pub struct CompactBlock { /// Unix epoch time when the block was mined #[prost(uint32, tag = "5")] pub time: u32, - /// (hash, prevHash, and time) OR (full header) + /// full header (as returned by the getblock RPC) #[prost(bytes = "vec", tag = "6")] pub header: ::prost::alloc::vec::Vec, /// zero or more compact transactions from this block @@ -43,24 +43,24 @@ pub struct CompactBlock { #[prost(message, optional, tag = "8")] pub chain_metadata: ::core::option::Option, } -/// A compact representation of the shielded data in a Zcash transaction. +/// A compact representation of a Zcash transaction. /// /// CompactTx contains the minimum information for a wallet to know if this transaction -/// is relevant to it (either pays to it or spends from it) via shielded elements -/// only. This message will not encode a transparent-to-transparent transaction. -#[allow(clippy::derive_partial_eq_without_eq)] +/// is relevant to it (either pays to it or spends from it) via shielded elements. Additionally, +/// it can optionally include the minimum necessary data to detect payments to transparent addresses +/// related to your wallet. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactTx { - /// Index and hash will allow the receiver to call out to chain - /// explorers or other data structures to retrieve more information - /// about this transaction. - /// - /// the index within the full block + /// The index of the transaction within the block. #[prost(uint64, tag = "1")] pub index: u64, - /// the ID (hash) of this transaction, same as in block explorers + /// The id of the transaction as defined in + /// [§ 7.1.1 ‘Transaction Identifiers’]() + /// This byte array MUST be in protocol order and MUST NOT be reversed + /// or hex-encoded; the byte-reversed and hex-encoded representation is + /// exclusively a textual representation of a txid. #[prost(bytes = "vec", tag = "2")] - pub hash: ::prost::alloc::vec::Vec, + pub txid: ::prost::alloc::vec::Vec, /// The transaction fee: present if server can provide. In the case of a /// stateless server and a transaction with transparent inputs, this will be /// unset because the calculation requires reference to prior transactions. @@ -74,12 +74,48 @@ pub struct CompactTx { pub outputs: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "6")] pub actions: ::prost::alloc::vec::Vec, + /// `CompactTxIn` values corresponding to the `vin` entries of the full transaction. + /// + /// Note: the single null-outpoint input for coinbase transactions is omitted. Light + /// clients can test `CompactTx.index == 0` to determine whether a `CompactTx` + /// represents a coinbase transaction, as the coinbase transaction is always the + /// first transaction in any block. + #[prost(message, repeated, tag = "7")] + pub vin: ::prost::alloc::vec::Vec, + /// A sequence of transparent outputs being created by the transaction. + #[prost(message, repeated, tag = "8")] + pub vout: ::prost::alloc::vec::Vec, +} +/// A compact representation of a transparent transaction input. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompactTxIn { + /// The id of the transaction that generated the output being spent. This + /// byte array must be in protocol order and MUST NOT be reversed or + /// hex-encoded. + #[prost(bytes = "vec", tag = "1")] + pub prevout_txid: ::prost::alloc::vec::Vec, + /// The index of the output being spent in the `vout` array of the + /// transaction referred to by `prevoutTxid`. + #[prost(uint32, tag = "2")] + pub prevout_index: u32, +} +/// A transparent output being created by the transaction. +/// +/// This contains identical data to the `TxOut` type in the transaction itself, and +/// thus it is not "compact". +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxOut { + /// The value of the output, in Zatoshis. + #[prost(uint64, tag = "1")] + pub value: u64, + /// The script pubkey that must be satisfied in order to spend this output. + #[prost(bytes = "vec", tag = "2")] + pub script_pub_key: ::prost::alloc::vec::Vec, } /// A compact representation of a [Sapling Spend](). /// /// CompactSaplingSpend is a Sapling Spend Description as described in 7.3 of the Zcash /// protocol specification. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactSaplingSpend { /// Nullifier (see the Zcash protocol specification) @@ -90,7 +126,6 @@ pub struct CompactSaplingSpend { /// /// It encodes the `cmu` field, `ephemeralKey` field, and a 52-byte prefix of the /// `encCiphertext` field of a Sapling Output Description. Total size is 116 bytes. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactSaplingOutput { /// Note commitment u-coordinate. @@ -104,7 +139,6 @@ pub struct CompactSaplingOutput { pub ciphertext: ::prost::alloc::vec::Vec, } /// A compact representation of an [Orchard Action](). -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactOrchardAction { /// \[32\] The nullifier of the input note diff --git a/zaino-proto/src/proto/proposal.rs b/zaino-proto/src/proto/proposal.rs index 1ea321afc..eed2b14a7 100644 --- a/zaino-proto/src/proto/proposal.rs +++ b/zaino-proto/src/proto/proposal.rs @@ -1,5 +1,5 @@ +// This file is @generated by prost-build. /// A data structure that describes a series of transactions to be created. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Proposal { /// The version of this serialization format. @@ -20,7 +20,6 @@ pub struct Proposal { } /// A data structure that describes the inputs to be consumed and outputs to /// be produced in a proposed transaction. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProposalStep { /// ZIP 321 serialized transaction request @@ -50,8 +49,7 @@ pub struct ProposalStep { /// A mapping from ZIP 321 payment index to the output pool that has been chosen /// for that payment, based upon the payment address and the selected inputs to /// the transaction. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PaymentOutputPool { #[prost(uint32, tag = "1")] pub payment_index: u32, @@ -60,7 +58,6 @@ pub struct PaymentOutputPool { } /// The unique identifier and value for each proposed input that does not /// require a back-reference to a prior step of the proposal. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReceivedOutput { #[prost(bytes = "vec", tag = "1")] @@ -74,8 +71,7 @@ pub struct ReceivedOutput { } /// A reference to a payment in a prior step of the proposal. This payment must /// belong to the wallet. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PriorStepOutput { #[prost(uint32, tag = "1")] pub step_index: u32, @@ -83,8 +79,7 @@ pub struct PriorStepOutput { pub payment_index: u32, } /// A reference to a change or ephemeral output from a prior step of the proposal. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PriorStepChange { #[prost(uint32, tag = "1")] pub step_index: u32, @@ -92,7 +87,6 @@ pub struct PriorStepChange { pub change_index: u32, } /// The unique identifier and value for an input to be used in the transaction. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProposedInput { #[prost(oneof = "proposed_input::Value", tags = "1, 2, 3")] @@ -100,7 +94,6 @@ pub struct ProposedInput { } /// Nested message and enum types in `ProposedInput`. pub mod proposed_input { - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { #[prost(message, tag = "1")] @@ -112,7 +105,6 @@ pub mod proposed_input { } } /// The proposed change outputs and fee value. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionBalance { /// A list of change or ephemeral output values. @@ -129,7 +121,6 @@ pub struct TransactionBalance { /// an ephemeral output, which must be spent by a subsequent step. This is /// only supported for transparent outputs. Each ephemeral output will be /// given a unique t-address. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ChangeValue { /// The value of a change or ephemeral output to be created, in zatoshis. @@ -148,7 +139,6 @@ pub struct ChangeValue { } /// An object wrapper for memo bytes, to facilitate representing the /// `change_memo == None` case. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MemoBytes { #[prost(bytes = "vec", tag = "1")] @@ -176,10 +166,10 @@ impl ValuePool { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - ValuePool::PoolNotSpecified => "PoolNotSpecified", - ValuePool::Transparent => "Transparent", - ValuePool::Sapling => "Sapling", - ValuePool::Orchard => "Orchard", + Self::PoolNotSpecified => "PoolNotSpecified", + Self::Transparent => "Transparent", + Self::Sapling => "Sapling", + Self::Orchard => "Orchard", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -216,10 +206,10 @@ impl FeeRule { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - FeeRule::NotSpecified => "FeeRuleNotSpecified", - FeeRule::PreZip313 => "PreZip313", - FeeRule::Zip313 => "Zip313", - FeeRule::Zip317 => "Zip317", + Self::NotSpecified => "FeeRuleNotSpecified", + Self::PreZip313 => "PreZip313", + Self::Zip313 => "Zip313", + Self::Zip317 => "Zip317", } } /// Creates an enum from field names used in the ProtoBuf definition. diff --git a/zaino-proto/src/proto/service.rs b/zaino-proto/src/proto/service.rs index 36834c1e2..2441bc93f 100644 --- a/zaino-proto/src/proto/service.rs +++ b/zaino-proto/src/proto/service.rs @@ -1,6 +1,32 @@ +// This file is @generated by prost-build. +/// A compact representation of a transparent transaction input. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompactTxIn { + /// The id of the transaction that generated the output being spent. This + /// byte array must be in protocol order and MUST NOT be reversed or + /// hex-encoded. + #[prost(bytes = "vec", tag = "1")] + pub prevout_txid: ::prost::alloc::vec::Vec, + /// The index of the output being spent in the `vout` array of the + /// transaction referred to by `prevoutTxid`. + #[prost(uint32, tag = "2")] + pub prevout_index: u32, +} +/// A transparent output being created by the transaction. +/// +/// This contains identical data to the `TxOut` type in the transaction itself, and +/// thus it is not "compact". +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxOut { + /// The value of the output, in Zatoshis. + #[prost(uint64, tag = "1")] + pub value: u64, + /// The script pubkey that must be satisfied in order to spend this output. + #[prost(bytes = "vec", tag = "2")] + pub script_pub_key: ::prost::alloc::vec::Vec, +} /// A BlockID message contains identifiers to select a block: a height or a /// hash. Specification by hash is not implemented, but may be in the future. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockId { #[prost(uint64, tag = "1")] @@ -10,18 +36,26 @@ pub struct BlockId { } /// BlockRange specifies a series of blocks from start to end inclusive. /// Both BlockIDs must be heights; specification by hash is not yet supported. -#[allow(clippy::derive_partial_eq_without_eq)] +/// +/// If no pool types are specified, the server should default to the legacy +/// behavior of returning only data relevant to the shielded (Sapling and +/// Orchard) pools; otherwise, the server should prune `CompactBlocks` returned +/// to include only data relevant to the requested pool types. Clients MUST +/// verify that the version of the server they are connected to are capable +/// of returning pruned and/or transparent data before setting `poolTypes` +/// to a non-empty value. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockRange { #[prost(message, optional, tag = "1")] pub start: ::core::option::Option, #[prost(message, optional, tag = "2")] pub end: ::core::option::Option, + #[prost(enumeration = "PoolType", repeated, tag = "3")] + pub pool_types: ::prost::alloc::vec::Vec, } /// A TxFilter contains the information needed to identify a particular /// transaction: either a block and an index, or a direct transaction hash. /// Currently, only specification by hash is supported. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TxFilter { /// block identifier, height or hash @@ -37,20 +71,38 @@ pub struct TxFilter { /// RawTransaction contains the complete transaction data. It also optionally includes /// the block height in which the transaction was included, or, when returned /// by GetMempoolStream(), the latest block height. -#[allow(clippy::derive_partial_eq_without_eq)] +/// +/// FIXME: the documentation here about mempool status contradicts the documentation +/// for the `height` field. See #[derive(Clone, PartialEq, ::prost::Message)] pub struct RawTransaction { - /// exact data returned by Zcash 'getrawtransaction' + /// The serialized representation of the Zcash transaction. #[prost(bytes = "vec", tag = "1")] pub data: ::prost::alloc::vec::Vec, - /// height that the transaction was mined (or -1) + /// The height at which the transaction is mined, or a sentinel value. + /// + /// Due to an error in the original protobuf definition, it is necessary to + /// reinterpret the result of the `getrawtransaction` RPC call. Zcashd will + /// return the int64 value `-1` for the height of transactions that appear + /// in the block index, but which are not mined in the main chain. Here, the + /// height field of `RawTransaction` was erroneously created as a `uint64`, + /// and as such we must map the response from the zcashd RPC API to be + /// representable within this space. Additionally, the `height` field will + /// be absent for transactions in the mempool, resulting in the default + /// value of `0` being set. Therefore, the meanings of the `height` field of + /// the `RawTransaction` type are as follows: + /// + /// * height 0: the transaction is in the mempool + /// * height 0xffffffffffffffff: the transaction has been mined on a fork that + /// is not currently the main chain + /// * any other height: the transaction has been mined in the main chain at the + /// given height #[prost(uint64, tag = "2")] pub height: u64, } /// A SendResponse encodes an error code and a string. It is currently used /// only by SendTransaction(). If error code is zero, the operation was /// successful; if non-zero, it and the message specify the failure. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SendResponse { #[prost(int32, tag = "1")] @@ -59,16 +111,13 @@ pub struct SendResponse { pub error_message: ::prost::alloc::string::String, } /// Chainspec is a placeholder to allow specification of a particular chain fork. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ChainSpec {} /// Empty is for gRPCs that take no arguments, currently only GetLightdInfo. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Empty {} /// LightdInfo returns various information about this lightwalletd instance /// and the state of the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LightdInfo { #[prost(string, tag = "1")] @@ -107,24 +156,39 @@ pub struct LightdInfo { /// example: "/MagicBean:4.1.1/" #[prost(string, tag = "14")] pub zcashd_subversion: ::prost::alloc::string::String, + /// Zcash donation UA address + #[prost(string, tag = "15")] + pub donation_address: ::prost::alloc::string::String, + /// name of next pending network upgrade, empty if none scheduled + #[prost(string, tag = "16")] + pub upgrade_name: ::prost::alloc::string::String, + /// height of next pending upgrade, zero if none is scheduled + #[prost(uint64, tag = "17")] + pub upgrade_height: u64, + /// version of served by this server + #[prost(string, tag = "18")] + pub lightwallet_protocol_version: ::prost::alloc::string::String, } -/// TransparentAddressBlockFilter restricts the results to the given address -/// or block range. -#[allow(clippy::derive_partial_eq_without_eq)] +/// TransparentAddressBlockFilter restricts the results of the GRPC methods that +/// use it to the transactions that involve the given address and were mined in +/// the specified block range. Non-default values for both the address and the +/// block range must be specified. Mempool transactions are not included. +/// +/// The `poolTypes` field of the `range` argument should be ignored. +/// Implementations MAY consider it an error if any pool types are specified. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransparentAddressBlockFilter { /// t-address #[prost(string, tag = "1")] pub address: ::prost::alloc::string::String, - /// start, end heights + /// start, end heights only #[prost(message, optional, tag = "2")] pub range: ::core::option::Option, } /// Duration is currently used only for testing, so that the Ping rpc /// can simulate a delay, to create many simultaneous connections. Units /// are microseconds. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Duration { #[prost(int64, tag = "1")] pub interval_us: i64, @@ -132,40 +196,47 @@ pub struct Duration { /// PingResponse is used to indicate concurrency, how many Ping rpcs /// are executing upon entry and upon exit (after the delay). /// This rpc is used for testing only. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PingResponse { #[prost(int64, tag = "1")] pub entry: i64, #[prost(int64, tag = "2")] pub exit: i64, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Address { #[prost(string, tag = "1")] pub address: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AddressList { #[prost(string, repeated, tag = "1")] pub addresses: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Balance { #[prost(int64, tag = "1")] pub value_zat: i64, } -#[allow(clippy::derive_partial_eq_without_eq)] +/// Request parameters for the `GetMempoolTx` RPC. #[derive(Clone, PartialEq, ::prost::Message)] -pub struct Exclude { +pub struct GetMempoolTxRequest { + /// A list of transaction ID byte string suffixes that should be excluded + /// from the response. These suffixes may be produced either directly from + /// the underlying txid bytes, or, if the source values are encoded txid + /// strings, by truncating the hexadecimal representation of each + /// transaction ID to an even number of characters, and then hex-decoding + /// and then byte-reversing this value to obtain the byte representation. #[prost(bytes = "vec", repeated, tag = "1")] - pub txid: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + pub exclude_txid_suffixes: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// The server must prune `CompactTx`s returned to include only data + /// relevant to the requested pool types. If no pool types are specified, + /// the server should default to the legacy behavior of returning only data + /// relevant to the shielded (Sapling and Orchard) pools. + #[prost(enumeration = "PoolType", repeated, tag = "3")] + pub pool_types: ::prost::alloc::vec::Vec, } /// The TreeState is derived from the Zcash z_gettreestate rpc. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TreeState { /// "main" or "test" @@ -187,8 +258,7 @@ pub struct TreeState { #[prost(string, tag = "6")] pub orchard_tree: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetSubtreeRootsArg { /// Index identifying where to start returning subtree roots #[prost(uint32, tag = "1")] @@ -200,7 +270,6 @@ pub struct GetSubtreeRootsArg { #[prost(uint32, tag = "3")] pub max_entries: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SubtreeRoot { /// The 32-byte Merkle root of the subtree. @@ -215,7 +284,6 @@ pub struct SubtreeRoot { } /// Results are sorted by height, which makes it easy to issue another /// request that picks up from where the previous left off. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetAddressUtxosArg { #[prost(string, repeated, tag = "1")] @@ -226,7 +294,6 @@ pub struct GetAddressUtxosArg { #[prost(uint32, tag = "3")] pub max_entries: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetAddressUtxosReply { #[prost(string, tag = "6")] @@ -242,12 +309,44 @@ pub struct GetAddressUtxosReply { #[prost(uint64, tag = "5")] pub height: u64, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetAddressUtxosReplyList { #[prost(message, repeated, tag = "1")] pub address_utxos: ::prost::alloc::vec::Vec, } +/// An identifier for a Zcash value pool. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum PoolType { + Invalid = 0, + Transparent = 1, + Sapling = 2, + Orchard = 3, +} +impl PoolType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Invalid => "POOL_TYPE_INVALID", + Self::Transparent => "TRANSPARENT", + Self::Sapling => "SAPLING", + Self::Orchard => "ORCHARD", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "POOL_TYPE_INVALID" => Some(Self::Invalid), + "TRANSPARENT" => Some(Self::Transparent), + "SAPLING" => Some(Self::Sapling), + "ORCHARD" => Some(Self::Orchard), + _ => None, + } + } +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum ShieldedProtocol { @@ -261,8 +360,8 @@ impl ShieldedProtocol { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - ShieldedProtocol::Sapling => "sapling", - ShieldedProtocol::Orchard => "orchard", + Self::Sapling => "sapling", + Self::Orchard => "orchard", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -276,9 +375,15 @@ impl ShieldedProtocol { } /// Generated client implementations. pub mod compact_tx_streamer_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::http::Uri; + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct CompactTxStreamerClient { inner: tonic::client::Grpc, @@ -298,8 +403,8 @@ pub mod compact_tx_streamer_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -322,8 +427,9 @@ pub mod compact_tx_streamer_client { >::ResponseBody, >, >, - >>::Error: - Into + Send + Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { CompactTxStreamerClient::new(InterceptedService::new(inner, interceptor)) } @@ -358,26 +464,31 @@ pub mod compact_tx_streamer_client { self.inner = self.inner.max_encoding_message_size(limit); self } - /// Return the height of the tip of the best chain + /// Return the BlockID of the block at the tip of the best chain pub async fn get_latest_block( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestBlock", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestBlock", + ), + ); self.inner.unary(req, path, codec).await } /// Return the compact block corresponding to the given block identifier @@ -388,24 +499,33 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlock", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlock", + ), + ); self.inner.unary(req, path, codec).await } - /// Same as GetBlock except actions contain only nullifiers + /// Same as GetBlock except the returned CompactBlock value contains only + /// nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. pub async fn get_block_nullifiers( &mut self, request: impl tonic::IntoRequest, @@ -413,71 +533,98 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockNullifiers", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockNullifiers", + ), + ); self.inner.unary(req, path, codec).await } - /// Return a list of consecutive compact blocks + /// Return a list of consecutive compact blocks in the specified range, + /// which is inclusive of `range.end`. + /// + /// If range.start <= range.end, blocks are returned increasing height order; + /// otherwise blocks are returned in decreasing height order. pub async fn get_block_range( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRange", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRange", + ), + ); self.inner.server_streaming(req, path, codec).await } - /// Same as GetBlockRange except actions contain only nullifiers + /// Same as GetBlockRange except the returned CompactBlock values contain + /// only nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. pub async fn get_block_range_nullifiers( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRangeNullifiers", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRangeNullifiers", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return the requested full (not compact) transaction (as from zcashd) @@ -485,21 +632,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTransaction", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTransaction", + ), + ); self.inner.unary(req, path, codec).await } /// Submit the given transaction to the Zcash network @@ -507,24 +659,32 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "SendTransaction", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "SendTransaction", + ), + ); self.inner.unary(req, path, codec).await } - /// Return the txids corresponding to the given t-address within the given block range + /// Return RawTransactions that match the given transparent address filter. + /// + /// Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. + /// NOTE: this method is deprecated, please use GetTaddressTransactions instead. pub async fn get_taddress_txids( &mut self, request: impl tonic::IntoRequest, @@ -532,96 +692,152 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressTxids", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTxids", + ), + ); + self.inner.server_streaming(req, path, codec).await + } + /// Return the transactions corresponding to the given t-address within the given block range. + /// Mempool transactions are not included in the results. + pub async fn get_taddress_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTransactions", + ), + ); self.inner.server_streaming(req, path, codec).await } pub async fn get_taddress_balance( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalance", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalance", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_taddress_balance_stream( &mut self, request: impl tonic::IntoStreamingRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream", ); let mut req = request.into_streaming_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalanceStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalanceStream", + ), + ); self.inner.client_streaming(req, path, codec).await } - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. pub async fn get_mempool_tx( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolTx", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolTx", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return a stream of current Mempool transactions. This will keep the output stream open while @@ -633,21 +849,26 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolStream", + ), + ); self.inner.server_streaming(req, path, codec).await } /// GetTreeState returns the note commitment tree state corresponding to the given block. @@ -658,46 +879,56 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTreeState", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTreeState", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_latest_tree_state( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestTreeState", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestTreeState", + ), + ); self.inner.unary(req, path, codec).await } - /// Returns a stream of information about roots of subtrees of the Sapling and Orchard - /// note commitment trees. + /// Returns a stream of information about roots of subtrees of the note commitment tree + /// for the specified shielded protocol (Sapling or Orchard). pub async fn get_subtree_roots( &mut self, request: impl tonic::IntoRequest, @@ -705,43 +936,55 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetSubtreeRoots", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetSubtreeRoots", + ), + ); self.inner.server_streaming(req, path, codec).await } pub async fn get_address_utxos( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxos", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxos", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_address_utxos_stream( @@ -751,21 +994,26 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxosStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxosStream", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return information about this lightwalletd instance and the blockchain @@ -773,21 +1021,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLightdInfo", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLightdInfo", + ), + ); self.inner.unary(req, path, codec).await } /// Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) @@ -795,33 +1048,41 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "Ping", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("cash.z.wallet.sdk.rpc.CompactTxStreamer", "Ping"), + ); self.inner.unary(req, path, codec).await } } } /// Generated server implementations. pub mod compact_tx_streamer_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with CompactTxStreamerServer. #[async_trait] - pub trait CompactTxStreamer: Send + Sync + 'static { - /// Return the height of the tip of the best chain + pub trait CompactTxStreamer: std::marker::Send + std::marker::Sync + 'static { + /// Return the BlockID of the block at the tip of the best chain async fn get_latest_block( &self, request: tonic::Request, @@ -834,7 +1095,11 @@ pub mod compact_tx_streamer_server { tonic::Response, tonic::Status, >; - /// Same as GetBlock except actions contain only nullifiers + /// Same as GetBlock except the returned CompactBlock value contains only + /// nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. async fn get_block_nullifiers( &self, request: tonic::Request, @@ -848,26 +1113,42 @@ pub mod compact_tx_streamer_server { crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > + Send + > + + std::marker::Send + 'static; - /// Return a list of consecutive compact blocks + /// Return a list of consecutive compact blocks in the specified range, + /// which is inclusive of `range.end`. + /// + /// If range.start <= range.end, blocks are returned increasing height order; + /// otherwise blocks are returned in decreasing height order. async fn get_block_range( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetBlockRangeNullifiers method. type GetBlockRangeNullifiersStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result< crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > + Send + > + + std::marker::Send + 'static; - /// Same as GetBlockRange except actions contain only nullifiers + /// Same as GetBlockRange except the returned CompactBlock values contain + /// only nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. async fn get_block_range_nullifiers( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Return the requested full (not compact) transaction (as from zcashd) async fn get_transaction( &self, @@ -881,13 +1162,35 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetTaddressTxids method. type GetTaddressTxidsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + Send + > + + std::marker::Send + 'static; - /// Return the txids corresponding to the given t-address within the given block range + /// Return RawTransactions that match the given transparent address filter. + /// + /// Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. + /// NOTE: this method is deprecated, please use GetTaddressTransactions instead. async fn get_taddress_txids( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the GetTaddressTransactions method. + type GetTaddressTransactionsStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// Return the transactions corresponding to the given t-address within the given block range. + /// Mempool transactions are not included in the results. + async fn get_taddress_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_taddress_balance( &self, request: tonic::Request, @@ -898,33 +1201,47 @@ pub mod compact_tx_streamer_server { ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetMempoolTx method. type GetMempoolTxStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > + Send + Item = std::result::Result< + crate::proto::compact_formats::CompactTx, + tonic::Status, + >, + > + + std::marker::Send + 'static; - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. async fn get_mempool_tx( &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetMempoolStream method. type GetMempoolStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + Send + > + + std::marker::Send + 'static; /// Return a stream of current Mempool transactions. This will keep the output stream open while /// there are mempool transactions. It will close the returned stream when a new block is mined. async fn get_mempool_stream( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// GetTreeState returns the note commitment tree state corresponding to the given block. /// See section 3.7 of the Zcash protocol specification. It returns several other useful /// values also (even though they can be obtained using GetBlock). @@ -940,27 +1257,38 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetSubtreeRoots method. type GetSubtreeRootsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + Send + > + + std::marker::Send + 'static; - /// Returns a stream of information about roots of subtrees of the Sapling and Orchard - /// note commitment trees. + /// Returns a stream of information about roots of subtrees of the note commitment tree + /// for the specified shielded protocol (Sapling or Orchard). async fn get_subtree_roots( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_address_utxos( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetAddressUtxosStream method. type GetAddressUtxosStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + Send + > + + std::marker::Send + 'static; async fn get_address_utxos_stream( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Return information about this lightwalletd instance and the blockchain async fn get_lightd_info( &self, @@ -973,20 +1301,18 @@ pub mod compact_tx_streamer_server { ) -> std::result::Result, tonic::Status>; } #[derive(Debug)] - pub struct CompactTxStreamerServer { - inner: _Inner, + pub struct CompactTxStreamerServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl CompactTxStreamerServer { + impl CompactTxStreamerServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -995,7 +1321,10 @@ pub mod compact_tx_streamer_server { max_encoding_message_size: None, } } - pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService where F: tonic::service::Interceptor, { @@ -1033,8 +1362,8 @@ pub mod compact_tx_streamer_server { impl tonic::codegen::Service> for CompactTxStreamerServer where T: CompactTxStreamer, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -1046,21 +1375,27 @@ pub mod compact_tx_streamer_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock" => { #[allow(non_camel_case_types)] struct GetLatestBlockSvc(pub Arc); - impl tonic::server::UnaryService for GetLatestBlockSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetLatestBlockSvc { type Response = super::BlockId; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_block(&inner, request).await + ::get_latest_block(&inner, request) + .await }; Box::pin(fut) } @@ -1071,7 +1406,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetLatestBlockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1091,9 +1425,14 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock" => { #[allow(non_camel_case_types)] struct GetBlockSvc(pub Arc); - impl tonic::server::UnaryService for GetBlockSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService for GetBlockSvc { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -1111,7 +1450,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1131,18 +1469,25 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockNullifiersSvc(pub Arc); - impl tonic::server::UnaryService - for GetBlockNullifiersSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetBlockNullifiersSvc { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_nullifiers(&inner, request) + ::get_block_nullifiers( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1154,7 +1499,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockNullifiersSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1174,21 +1518,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange" => { #[allow(non_camel_case_types)] struct GetBlockRangeSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetBlockRangeSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetBlockRangeSvc { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_range(&inner, request).await + ::get_block_range(&inner, request) + .await }; Box::pin(fut) } @@ -1199,7 +1546,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockRangeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1219,14 +1565,16 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockRangeNullifiersSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetBlockRangeNullifiersSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetBlockRangeNullifiersSvc { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeNullifiersStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -1234,9 +1582,10 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_block_range_nullifiers( - &inner, request, - ) - .await + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1247,7 +1596,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockRangeNullifiersSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1267,16 +1615,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction" => { #[allow(non_camel_case_types)] struct GetTransactionSvc(pub Arc); - impl tonic::server::UnaryService for GetTransactionSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTransactionSvc { type Response = super::RawTransaction; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_transaction(&inner, request).await + ::get_transaction(&inner, request) + .await }; Box::pin(fut) } @@ -1287,7 +1642,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTransactionSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1307,18 +1661,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction" => { #[allow(non_camel_case_types)] struct SendTransactionSvc(pub Arc); - impl tonic::server::UnaryService - for SendTransactionSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for SendTransactionSvc { type Response = super::SendResponse; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::send_transaction(&inner, request).await + ::send_transaction(&inner, request) + .await }; Box::pin(fut) } @@ -1329,7 +1688,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SendTransactionSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1349,21 +1707,28 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids" => { #[allow(non_camel_case_types)] struct GetTaddressTxidsSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetTaddressTxidsSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService< + super::TransparentAddressBlockFilter, + > for GetTaddressTxidsSvc { type Response = super::RawTransaction; type ResponseStream = T::GetTaddressTxidsStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_txids(&inner, request).await + ::get_taddress_txids( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1374,7 +1739,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTaddressTxidsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1391,21 +1755,79 @@ pub mod compact_tx_streamer_server { }; Box::pin(fut) } + "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions" => { + #[allow(non_camel_case_types)] + struct GetTaddressTransactionsSvc(pub Arc); + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService< + super::TransparentAddressBlockFilter, + > for GetTaddressTransactionsSvc { + type Response = super::RawTransaction; + type ResponseStream = T::GetTaddressTransactionsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_taddress_transactions( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTaddressTransactionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceSvc(pub Arc); - impl tonic::server::UnaryService - for GetTaddressBalanceSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTaddressBalanceSvc { type Response = super::Balance; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_balance(&inner, request) + ::get_taddress_balance( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1417,7 +1839,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTaddressBalanceSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1437,11 +1858,15 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceStreamSvc(pub Arc); - impl tonic::server::ClientStreamingService - for GetTaddressBalanceStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ClientStreamingService + for GetTaddressBalanceStreamSvc { type Response = super::Balance; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request>, @@ -1449,9 +1874,10 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_taddress_balance_stream( - &inner, request, - ) - .await + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1462,7 +1888,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTaddressBalanceStreamSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1482,20 +1907,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx" => { #[allow(non_camel_case_types)] struct GetMempoolTxSvc(pub Arc); - impl tonic::server::ServerStreamingService - for GetMempoolTxSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetMempoolTxSvc { type Response = crate::proto::compact_formats::CompactTx; type ResponseStream = T::GetMempoolTxStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_tx(&inner, request).await + ::get_mempool_tx(&inner, request) + .await }; Box::pin(fut) } @@ -1506,7 +1935,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetMempoolTxSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1526,17 +1954,27 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream" => { #[allow(non_camel_case_types)] struct GetMempoolStreamSvc(pub Arc); - impl tonic::server::ServerStreamingService - for GetMempoolStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetMempoolStreamSvc { type Response = super::RawTransaction; type ResponseStream = T::GetMempoolStreamStream; - type Future = - BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_stream(&inner, request).await + ::get_mempool_stream( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1547,7 +1985,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetMempoolStreamSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1567,16 +2004,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState" => { #[allow(non_camel_case_types)] struct GetTreeStateSvc(pub Arc); - impl tonic::server::UnaryService for GetTreeStateSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_tree_state(&inner, request).await + ::get_tree_state(&inner, request) + .await }; Box::pin(fut) } @@ -1587,7 +2031,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTreeStateSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1607,13 +2050,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState" => { #[allow(non_camel_case_types)] struct GetLatestTreeStateSvc(pub Arc); - impl tonic::server::UnaryService for GetLatestTreeStateSvc { + impl tonic::server::UnaryService + for GetLatestTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_tree_state(&inner, request) + ::get_latest_tree_state( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1625,7 +2078,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetLatestTreeStateSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1645,21 +2097,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots" => { #[allow(non_camel_case_types)] struct GetSubtreeRootsSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetSubtreeRootsSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetSubtreeRootsSvc { type Response = super::SubtreeRoot; type ResponseStream = T::GetSubtreeRootsStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_subtree_roots(&inner, request).await + ::get_subtree_roots(&inner, request) + .await }; Box::pin(fut) } @@ -1670,7 +2125,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetSubtreeRootsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1690,19 +2144,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos" => { #[allow(non_camel_case_types)] struct GetAddressUtxosSvc(pub Arc); - impl - tonic::server::UnaryService - for GetAddressUtxosSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetAddressUtxosSvc { type Response = super::GetAddressUtxosReplyList; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos(&inner, request).await + ::get_address_utxos(&inner, request) + .await }; Box::pin(fut) } @@ -1713,7 +2171,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetAddressUtxosSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1733,21 +2190,26 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream" => { #[allow(non_camel_case_types)] struct GetAddressUtxosStreamSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetAddressUtxosStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetAddressUtxosStreamSvc { type Response = super::GetAddressUtxosReply; type ResponseStream = T::GetAddressUtxosStreamStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos_stream(&inner, request) + ::get_address_utxos_stream( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1759,7 +2221,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetAddressUtxosStreamSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1779,13 +2240,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo" => { #[allow(non_camel_case_types)] struct GetLightdInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetLightdInfoSvc { + impl tonic::server::UnaryService + for GetLightdInfoSvc { type Response = super::LightdInfo; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_lightd_info(&inner, request).await + ::get_lightd_info(&inner, request) + .await }; Box::pin(fut) } @@ -1796,7 +2265,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetLightdInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1816,9 +2284,14 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping" => { #[allow(non_camel_case_types)] struct PingSvc(pub Arc); - impl tonic::server::UnaryService for PingSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService for PingSvc { type Response = super::PingResponse; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -1836,7 +2309,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = PingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1853,18 +2325,27 @@ pub mod compact_tx_streamer_server { }; Box::pin(fut) } - _ => Box::pin(async move { - Ok(http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap()) - }), + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } } } } - impl Clone for CompactTxStreamerServer { + impl Clone for CompactTxStreamerServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -1876,17 +2357,9 @@ pub mod compact_tx_streamer_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for CompactTxStreamerServer { - const NAME: &'static str = "cash.z.wallet.sdk.rpc.CompactTxStreamer"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "cash.z.wallet.sdk.rpc.CompactTxStreamer"; + impl tonic::server::NamedService for CompactTxStreamerServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index cba56a644..d419425ea 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -43,7 +43,8 @@ use zaino_proto::proto::{ AddressList, Balance, BlockId, BlockRange, Duration, Exclude, GetAddressUtxosArg, GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, - }, utils::{GetBlockRangeError, ValidatedBlockRangeRequest}, + }, + utils::{GetBlockRangeError, ValidatedBlockRangeRequest}, }; use crate::TransactionHash; @@ -894,7 +895,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { async fn get_block_range_nullifiers( &self, request: BlockRange, - ) -> Result { + ) -> Result { let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) .map_err(FetchServiceError::from_get_block_change_error)?; @@ -906,7 +907,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { } else { false }; - + let start = validated_request.start(); let end = validated_request.end(); let chain_height = self.block_cache.get_chain_height().await?.0; diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index bb4a8340e..bcfb4cc9b 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -44,12 +44,10 @@ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ AddressList, Balance, BlockId, BlockRange, Exclude, GetAddressUtxosArg, - GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, PoolType, - RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, + GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, RawTransaction, + SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, - utils::{ - pool_types_from_vector, PoolTypeError - } + utils::{pool_types_from_vector, PoolTypeError, ValidatedBlockRangeRequest}, }; use zcash_protocol::consensus::NetworkType; @@ -555,46 +553,20 @@ impl StateServiceSubscriber { request: BlockRange, trim_non_nullifier: bool, ) -> Result { - let mut start: u32 = match request.start { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(StateServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: Start height out of range. Failed to convert to u32.", - ), - )); - } - }, - None => { - return Err(StateServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - )); - } - }; - let mut end: u32 = match request.end { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(StateServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: End height out of range. Failed to convert to u32.", - ), - )); - } - }, - None => { - return Err(StateServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - )); - } - }; - let lowest_to_highest = if start > end { - (start, end) = (end, start); + let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + .map_err(|_| StateServiceError::Custom("fixme".to_string()))?; + + // FIXME: this should be changed but this logic is hard to understand and we lack tests. + // we will maintain the behaviour with less smelly code + let lowest_to_highest = if validated_request.is_reverse_ordered() { + validated_request.reverse(); false } else { true }; + + let start = validated_request.start(); + let end = validated_request.end(); let chain_height = self.block_cache.get_chain_height().await?.0; let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; From 2eba5cb12eb7d219bb869c1f5bf150c6d2450339 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Tue, 2 Dec 2025 21:09:50 -0300 Subject: [PATCH 065/114] Document ValidatedBlockRangeRequest --- zaino-proto/src/proto/utils.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 6ad15c09e..571bfabd5 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -56,9 +56,11 @@ pub enum GetBlockRangeError { PoolTypArgumentError(PoolTypeError), } +/// `BlockRange` request that has been validated in terms of the semantics +/// of `GetBlockRange` RPC. pub struct ValidatedBlockRangeRequest { - start: u32, - end: u32, + start: u64, + end: u64, pool_types: Vec, } @@ -67,7 +69,7 @@ impl ValidatedBlockRangeRequest { pub fn new_from_block_range( request: &BlockRange, ) -> Result { - let start: u32 = match &request.start { + let start = match &request.start { Some(block_id) => match block_id.height.try_into() { Ok(height) => height, Err(_) => { @@ -78,7 +80,7 @@ impl ValidatedBlockRangeRequest { return Err(GetBlockRangeError::NoStartHeightProvided); } }; - let end: u32 = match &request.end { + let end = match &request.end { Some(block_id) => match block_id.height.try_into() { Ok(height) => height, Err(_) => { @@ -100,14 +102,17 @@ impl ValidatedBlockRangeRequest { }) } - pub fn start(&self) -> u32 { + /// Start Height of the BlockRange Request + pub fn start(&self) -> u64 { self.start } - pub fn end(&self) -> u32 { + /// End Height of the BlockRange Request + pub fn end(&self) -> u64 { self.end } + /// Pool Types of the BlockRange request pub fn pool_types(&self) -> Vec { self.pool_types.clone() } From 372cb2706fb9fcda0d1afdce05bcef933f7caa01 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Tue, 2 Dec 2025 21:13:07 -0300 Subject: [PATCH 066/114] Add changes to zaino-proto's CHANGELOG --- zaino-proto/CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zaino-proto/CHANGELOG.md b/zaino-proto/CHANGELOG.md index 728527b2e..9292cdb02 100644 --- a/zaino-proto/CHANGELOG.md +++ b/zaino-proto/CHANGELOG.md @@ -9,5 +9,7 @@ and this library adheres to Rust's notion of ### Added +- `ValidatedBlockRangeRequest` type that encapsulates validations of the + `GetBlockRange` RPC request - utils submodule to handle `PoolType` conversions - `PoolTypeError` defines conversion errors between i32 and known `PoolType` variants From bf5c6db67236e4d3ffa591e732058e260f7b02f4 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Sun, 28 Dec 2025 17:50:47 -0300 Subject: [PATCH 067/114] Adopt changes from Lightclient-protocol v0.4.0 --- zaino-serve/src/rpc/grpc/service.rs | 29 +++++++------- zaino-state/src/backends/fetch.rs | 62 +++++++++++++++++++---------- zaino-state/src/backends/state.rs | 39 +++++++++++++----- zaino-state/src/indexer.rs | 25 +++++++----- 4 files changed, 99 insertions(+), 56 deletions(-) diff --git a/zaino-serve/src/rpc/grpc/service.rs b/zaino-serve/src/rpc/grpc/service.rs index 2f180413d..a4ad9807c 100644 --- a/zaino-serve/src/rpc/grpc/service.rs +++ b/zaino-serve/src/rpc/grpc/service.rs @@ -7,10 +7,7 @@ use crate::rpc::GrpcClient; use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - compact_tx_streamer_server::CompactTxStreamer, Address, AddressList, Balance, BlockId, - BlockRange, ChainSpec, Duration, Empty, Exclude, GetAddressUtxosArg, - GetAddressUtxosReplyList, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, - SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, + compact_tx_streamer_server::CompactTxStreamer, Address, AddressList, Balance, BlockId, BlockRange, ChainSpec, Duration, Empty, GetAddressUtxosArg, GetAddressUtxosReplyList, GetMempoolTxRequest, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter }, }; use zaino_state::{ @@ -148,16 +145,20 @@ where get_taddress_txids(TransparentAddressBlockFilter) -> Self::GetTaddressTxidsStream as streaming, "Returns the total balance for a list of taddrs" get_taddress_balance(AddressList) -> Balance, - "Return the compact transactions currently in the mempool; the results \ - can be a few seconds out of date. If the Exclude list is empty, return \ - all transactions; otherwise return all *except* those in the Exclude list \ - (if any); this allows the client to avoid receiving transactions that it \ - already has (from an earlier call to this rpc). The transaction IDs in the \ - Exclude list can be shortened to any number of bytes to make the request \ - more bandwidth-efficient; if two or more transactions in the mempool \ - match a shortened txid, they are all sent (none is excluded). Transactions \ - in the exclude list that don't exist in the mempool are ignored." - get_mempool_tx(Exclude) -> Self::GetMempoolTxStream as streaming, + + "Returns a stream of the compact transaction representation for transactions \ + currently in the mempool. The results of this operation may be a few \ + seconds out of date. If the `exclude_txid_suffixes` list is empty, \ + return all transactions; otherwise return all *except* those in the \ + `exclude_txid_suffixes` list (if any); this allows the client to avoid \ + receiving transactions that it already has (from an earlier call to this \ + RPC). The transaction IDs in the `exclude_txid_suffixes` list can be \ + shortened to any number of bytes to make the request more \ + bandwidth-efficient; if two or more transactions in the mempool match a \ + txid suffix, none of the matching transactions are excluded. Txid \ + suffixes in the exclude list that don't match any transactions in the \ + mempool are ignored." + get_mempool_tx(GetMempoolTxRequest) -> Self::GetMempoolTxStream as streaming, "GetTreeState returns the note commitment tree state corresponding to the given block. \ See section 3.7 of the Zcash protocol specification. It returns several other useful \ values also (even though they can be obtained using GetBlock). diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index d419425ea..e1ffdd0a0 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -40,7 +40,7 @@ use zaino_fetch::{ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - AddressList, Balance, BlockId, BlockRange, Duration, Exclude, GetAddressUtxosArg, + AddressList, Balance, BlockId, BlockRange, Duration, GetMempoolTxRequest, GetAddressUtxosArg, GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, @@ -1177,30 +1177,39 @@ impl LightWalletIndexer for FetchServiceSubscriber { } } - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. #[allow(deprecated)] async fn get_mempool_tx( &self, - request: Exclude, + request: GetMempoolTxRequest, ) -> Result { - let exclude_txids: Vec = request - .txid - .iter() - .map(|txid_bytes| { - // NOTE: the TransactionHash methods cannot be used for this hex encoding as exclusions could be truncated to less than 32 bytes - let reversed_txid_bytes: Vec = txid_bytes.iter().cloned().rev().collect(); - hex::encode(&reversed_txid_bytes) - }) - .collect(); + let mut exclude_txids: Vec = vec![]; + + for (i, excluded_id) in request.exclude_txid_suffixes.iter().enumerate() { + if excluded_id.len() > 32 { + return Err(FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + format!("Error: excluded txid {} is larger than 32 bytes", i) + ))) + } + + // NOTE: the TransactionHash methods cannot be used for this hex encoding as exclusions could be truncated to less than 32 bytes + let reversed_txid_bytes: Vec = excluded_id.iter().cloned().rev().collect(); + let hex_string_txid: String = hex::encode(&reversed_txid_bytes); + exclude_txids.push(hex_string_txid); + } + let mempool = self.mempool.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); @@ -1593,6 +1602,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { })? .into(), ); + let sapling_activation_height = blockchain_info .upgrades() .get(&sapling_id) @@ -1603,6 +1613,15 @@ impl LightWalletIndexer for FetchServiceSubscriber { ) .to_string(); + let nu_info = blockchain_info.upgrades() + .last() + .expect("Expected validator to have a consenus activated.") + .1 + .into_parts(); + + let nu_name = nu_info.0; + let nu_height = nu_info.1; + Ok(LightdInfo { version: self.data.build_info().version(), vendor: "ZingoLabs ZainoD".to_string(), @@ -1619,6 +1638,9 @@ impl LightWalletIndexer for FetchServiceSubscriber { zcashd_build: self.data.zebra_build(), zcashd_subversion: self.data.zebra_subversion(), donation_address: "".to_string(), + upgrade_name: nu_name.to_string(), + upgrade_height: nu_height.0 as u64, + lightwallet_protocol_version: "v0.4.0".to_string(), }) } diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index bcfb4cc9b..dbc1245f5 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -43,7 +43,7 @@ use zaino_fetch::{ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - AddressList, Balance, BlockId, BlockRange, Exclude, GetAddressUtxosArg, + AddressList, Balance, BlockId, BlockRange, GetMempoolTxRequest, GetAddressUtxosArg, GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, @@ -2218,16 +2218,22 @@ impl LightWalletIndexer for StateServiceSubscriber { /// in the exclude list that don't exist in the mempool are ignored. async fn get_mempool_tx( &self, - request: Exclude, + request: GetMempoolTxRequest, ) -> Result { - let exclude_txids: Vec = request - .txid - .iter() - .map(|txid_bytes| { - let reversed_txid_bytes: Vec = txid_bytes.iter().cloned().rev().collect(); - hex::encode(&reversed_txid_bytes) - }) - .collect(); + let mut exclude_txids: Vec = vec![]; + + for (i, excluded_id) in request.exclude_txid_suffixes.iter().enumerate() { + if excluded_id.len() > 32 { + return Err(StateServiceError::TonicStatusError(tonic::Status::invalid_argument( + format!("Error: excluded txid {} is larger than 32 bytes", i) + ))) + } + + // NOTE: the TransactionHash methods cannot be used for this hex encoding as exclusions could be truncated to less than 32 bytes + let reversed_txid_bytes: Vec = excluded_id.iter().cloned().rev().collect(); + let hex_string_txid: String = hex::encode(&reversed_txid_bytes); + exclude_txids.push(hex_string_txid); + } let mempool = self.mempool.clone(); let service_timeout = self.config.service.timeout; @@ -2529,6 +2535,15 @@ impl LightWalletIndexer for StateServiceSubscriber { ) .to_string(); + let nu_info = blockchain_info.upgrades() + .last() + .expect("Expected validator to have a consenus activated.") + .1 + .into_parts(); + + let nu_name = nu_info.0; + let nu_height = nu_info.1; + Ok(LightdInfo { version: self.data.build_info().version(), vendor: "ZingoLabs ZainoD".to_string(), @@ -2544,8 +2559,10 @@ impl LightWalletIndexer for StateServiceSubscriber { estimated_height: blockchain_info.estimated_height().0 as u64, zcashd_build: self.data.zebra_build(), zcashd_subversion: self.data.zebra_subversion(), - // TODO: support donation addresses see https://github.com/zingolabs/zaino/issues/626 donation_address: "".to_string(), + upgrade_name: nu_name.to_string(), + upgrade_height: nu_height.0 as u64, + lightwallet_protocol_version: "v0.4.0".to_string(), }) } diff --git a/zaino-state/src/indexer.rs b/zaino-state/src/indexer.rs index 56a241e17..44837e478 100644 --- a/zaino-state/src/indexer.rs +++ b/zaino-state/src/indexer.rs @@ -16,7 +16,7 @@ use zaino_fetch::jsonrpsee::response::{ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - AddressList, Balance, BlockId, BlockRange, Duration, Exclude, GetAddressUtxosArg, + AddressList, Balance, BlockId, BlockRange, Duration, GetMempoolTxRequest, GetAddressUtxosArg, GetAddressUtxosReplyList, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, SendResponse, ShieldedProtocol, SubtreeRoot, TransparentAddressBlockFilter, TreeState, TxFilter, @@ -628,18 +628,21 @@ pub trait LightWalletIndexer: Send + Sync + Clone + ZcashIndexer + 'static { request: AddressStream, ) -> Result; - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. async fn get_mempool_tx( &self, - request: Exclude, + request: GetMempoolTxRequest, ) -> Result; /// Return a stream of current Mempool transactions. This will keep the output stream open while From 7a8e129ed2480808c7b4ddb8522ebcc05a8c7754 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Thu, 11 Dec 2025 22:36:30 -0300 Subject: [PATCH 068/114] cargo fmt --- zaino-proto/src/proto/service.rs | 906 +++++++++------------------- zaino-serve/src/rpc/grpc/service.rs | 5 +- zaino-state/src/backends/fetch.rs | 36 +- zaino-state/src/backends/state.rs | 28 +- zaino-state/src/indexer.rs | 8 +- 5 files changed, 333 insertions(+), 650 deletions(-) diff --git a/zaino-proto/src/proto/service.rs b/zaino-proto/src/proto/service.rs index 2441bc93f..cbe8a5d0f 100644 --- a/zaino-proto/src/proto/service.rs +++ b/zaino-proto/src/proto/service.rs @@ -380,10 +380,10 @@ pub mod compact_tx_streamer_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value, + clippy::let_unit_value )] - use tonic::codegen::*; use tonic::codegen::http::Uri; + use tonic::codegen::*; #[derive(Debug, Clone)] pub struct CompactTxStreamerClient { inner: tonic::client::Grpc, @@ -427,9 +427,8 @@ pub mod compact_tx_streamer_client { >::ResponseBody, >, >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, + >>::Error: + Into + std::marker::Send + std::marker::Sync, { CompactTxStreamerClient::new(InterceptedService::new(inner, interceptor)) } @@ -469,26 +468,18 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestBlock", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestBlock", + )); self.inner.unary(req, path, codec).await } /// Return the compact block corresponding to the given block identifier @@ -499,26 +490,18 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlock", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlock", + )); self.inner.unary(req, path, codec).await } /// Same as GetBlock except the returned CompactBlock value contains only @@ -533,26 +516,18 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockNullifiers", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockNullifiers", + )); self.inner.unary(req, path, codec).await } /// Return a list of consecutive compact blocks in the specified range, @@ -564,31 +539,21 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response< - tonic::codec::Streaming, - >, + tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRange", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRange", + )); self.inner.server_streaming(req, path, codec).await } /// Same as GetBlockRange except the returned CompactBlock values contain @@ -600,31 +565,21 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response< - tonic::codec::Streaming, - >, + tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRangeNullifiers", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRangeNullifiers", + )); self.inner.server_streaming(req, path, codec).await } /// Return the requested full (not compact) transaction (as from zcashd) @@ -632,26 +587,18 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTransaction", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTransaction", + )); self.inner.unary(req, path, codec).await } /// Submit the given transaction to the Zcash network @@ -659,26 +606,18 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "SendTransaction", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "SendTransaction", + )); self.inner.unary(req, path, codec).await } /// Return RawTransactions that match the given transparent address filter. @@ -692,26 +631,18 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressTxids", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTxids", + )); self.inner.server_streaming(req, path, codec).await } /// Return the transactions corresponding to the given t-address within the given block range. @@ -723,78 +654,54 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressTransactions", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTransactions", + )); self.inner.server_streaming(req, path, codec).await } pub async fn get_taddress_balance( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalance", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalance", + )); self.inner.unary(req, path, codec).await } pub async fn get_taddress_balance_stream( &mut self, request: impl tonic::IntoStreamingRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream", ); let mut req = request.into_streaming_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalanceStream", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalanceStream", + )); self.inner.client_streaming(req, path, codec).await } /// Returns a stream of the compact transaction representation for transactions @@ -813,31 +720,21 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response< - tonic::codec::Streaming, - >, + tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolTx", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolTx", + )); self.inner.server_streaming(req, path, codec).await } /// Return a stream of current Mempool transactions. This will keep the output stream open while @@ -849,26 +746,18 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolStream", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolStream", + )); self.inner.server_streaming(req, path, codec).await } /// GetTreeState returns the note commitment tree state corresponding to the given block. @@ -879,52 +768,36 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTreeState", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTreeState", + )); self.inner.unary(req, path, codec).await } pub async fn get_latest_tree_state( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestTreeState", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestTreeState", + )); self.inner.unary(req, path, codec).await } /// Returns a stream of information about roots of subtrees of the note commitment tree @@ -936,55 +809,37 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetSubtreeRoots", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetSubtreeRoots", + )); self.inner.server_streaming(req, path, codec).await } pub async fn get_address_utxos( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxos", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxos", + )); self.inner.unary(req, path, codec).await } pub async fn get_address_utxos_stream( @@ -994,26 +849,18 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxosStream", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxosStream", + )); self.inner.server_streaming(req, path, codec).await } /// Return information about this lightwalletd instance and the blockchain @@ -1021,26 +868,18 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLightdInfo", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLightdInfo", + )); self.inner.unary(req, path, codec).await } /// Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) @@ -1048,23 +887,18 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("cash.z.wallet.sdk.rpc.CompactTxStreamer", "Ping"), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "Ping", + )); self.inner.unary(req, path, codec).await } } @@ -1076,7 +910,7 @@ pub mod compact_tx_streamer_server { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value, + clippy::let_unit_value )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with CompactTxStreamerServer. @@ -1113,8 +947,7 @@ pub mod compact_tx_streamer_server { crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Return a list of consecutive compact blocks in the specified range, /// which is inclusive of `range.end`. @@ -1124,18 +957,14 @@ pub mod compact_tx_streamer_server { async fn get_block_range( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetBlockRangeNullifiers method. type GetBlockRangeNullifiersStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result< crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Same as GetBlockRange except the returned CompactBlock values contain /// only nullifiers. @@ -1145,10 +974,7 @@ pub mod compact_tx_streamer_server { async fn get_block_range_nullifiers( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Return the requested full (not compact) transaction (as from zcashd) async fn get_transaction( &self, @@ -1162,8 +988,7 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetTaddressTxids method. type GetTaddressTxidsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Return RawTransactions that match the given transparent address filter. /// @@ -1172,25 +997,18 @@ pub mod compact_tx_streamer_server { async fn get_taddress_txids( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetTaddressTransactions method. type GetTaddressTransactionsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Return the transactions corresponding to the given t-address within the given block range. /// Mempool transactions are not included in the results. async fn get_taddress_transactions( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn get_taddress_balance( &self, request: tonic::Request, @@ -1201,12 +1019,8 @@ pub mod compact_tx_streamer_server { ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetMempoolTx method. type GetMempoolTxStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result< - crate::proto::compact_formats::CompactTx, - tonic::Status, - >, - > - + std::marker::Send + Item = std::result::Result, + > + std::marker::Send + 'static; /// Returns a stream of the compact transaction representation for transactions /// currently in the mempool. The results of this operation may be a few @@ -1223,25 +1037,18 @@ pub mod compact_tx_streamer_server { async fn get_mempool_tx( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetMempoolStream method. type GetMempoolStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Return a stream of current Mempool transactions. This will keep the output stream open while /// there are mempool transactions. It will close the returned stream when a new block is mined. async fn get_mempool_stream( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// GetTreeState returns the note commitment tree state corresponding to the given block. /// See section 3.7 of the Zcash protocol specification. It returns several other useful /// values also (even though they can be obtained using GetBlock). @@ -1257,38 +1064,27 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetSubtreeRoots method. type GetSubtreeRootsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Returns a stream of information about roots of subtrees of the note commitment tree /// for the specified shielded protocol (Sapling or Orchard). async fn get_subtree_roots( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn get_address_utxos( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetAddressUtxosStream method. type GetAddressUtxosStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > - + std::marker::Send + > + std::marker::Send + 'static; async fn get_address_utxos_stream( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Return information about this lightwalletd instance and the blockchain async fn get_lightd_info( &self, @@ -1321,10 +1117,7 @@ pub mod compact_tx_streamer_server { max_encoding_message_size: None, } } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService where F: tonic::service::Interceptor, { @@ -1379,23 +1172,16 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock" => { #[allow(non_camel_case_types)] struct GetLatestBlockSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetLatestBlockSvc { + impl tonic::server::UnaryService for GetLatestBlockSvc { type Response = super::BlockId; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_block(&inner, request) - .await + ::get_latest_block(&inner, request).await }; Box::pin(fut) } @@ -1425,14 +1211,9 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock" => { #[allow(non_camel_case_types)] struct GetBlockSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService for GetBlockSvc { + impl tonic::server::UnaryService for GetBlockSvc { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -1469,25 +1250,18 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockNullifiersSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetBlockNullifiersSvc { + impl tonic::server::UnaryService + for GetBlockNullifiersSvc + { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_nullifiers( - &inner, - request, - ) + ::get_block_nullifiers(&inner, request) .await }; Box::pin(fut) @@ -1518,24 +1292,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange" => { #[allow(non_camel_case_types)] struct GetBlockRangeSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetBlockRangeSvc { + impl + tonic::server::ServerStreamingService + for GetBlockRangeSvc + { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_range(&inner, request) - .await + ::get_block_range(&inner, request).await }; Box::pin(fut) } @@ -1565,16 +1336,14 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockRangeNullifiersSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetBlockRangeNullifiersSvc { + impl + tonic::server::ServerStreamingService + for GetBlockRangeNullifiersSvc + { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeNullifiersStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -1582,10 +1351,9 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_block_range_nullifiers( - &inner, - request, - ) - .await + &inner, request, + ) + .await }; Box::pin(fut) } @@ -1615,23 +1383,16 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction" => { #[allow(non_camel_case_types)] struct GetTransactionSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetTransactionSvc { + impl tonic::server::UnaryService for GetTransactionSvc { type Response = super::RawTransaction; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_transaction(&inner, request) - .await + ::get_transaction(&inner, request).await }; Box::pin(fut) } @@ -1661,23 +1422,18 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction" => { #[allow(non_camel_case_types)] struct SendTransactionSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for SendTransactionSvc { + impl tonic::server::UnaryService + for SendTransactionSvc + { type Response = super::SendResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::send_transaction(&inner, request) - .await + ::send_transaction(&inner, request).await }; Box::pin(fut) } @@ -1707,28 +1463,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids" => { #[allow(non_camel_case_types)] struct GetTaddressTxidsSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService< - super::TransparentAddressBlockFilter, - > for GetTaddressTxidsSvc { + impl + tonic::server::ServerStreamingService + for GetTaddressTxidsSvc + { type Response = super::RawTransaction; type ResponseStream = T::GetTaddressTxidsStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_txids( - &inner, - request, - ) - .await + ::get_taddress_txids(&inner, request).await }; Box::pin(fut) } @@ -1758,27 +1507,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions" => { #[allow(non_camel_case_types)] struct GetTaddressTransactionsSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService< - super::TransparentAddressBlockFilter, - > for GetTaddressTransactionsSvc { + impl + tonic::server::ServerStreamingService + for GetTaddressTransactionsSvc + { type Response = super::RawTransaction; type ResponseStream = T::GetTaddressTransactionsStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_transactions( - &inner, - request, - ) + ::get_taddress_transactions(&inner, request) .await }; Box::pin(fut) @@ -1809,25 +1552,18 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetTaddressBalanceSvc { + impl tonic::server::UnaryService + for GetTaddressBalanceSvc + { type Response = super::Balance; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_balance( - &inner, - request, - ) + ::get_taddress_balance(&inner, request) .await }; Box::pin(fut) @@ -1858,15 +1594,11 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceStreamSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ClientStreamingService - for GetTaddressBalanceStreamSvc { + impl tonic::server::ClientStreamingService + for GetTaddressBalanceStreamSvc + { type Response = super::Balance; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request>, @@ -1874,10 +1606,9 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_taddress_balance_stream( - &inner, - request, - ) - .await + &inner, request, + ) + .await }; Box::pin(fut) } @@ -1907,24 +1638,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx" => { #[allow(non_camel_case_types)] struct GetMempoolTxSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetMempoolTxSvc { + impl + tonic::server::ServerStreamingService + for GetMempoolTxSvc + { type Response = crate::proto::compact_formats::CompactTx; type ResponseStream = T::GetMempoolTxStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_tx(&inner, request) - .await + ::get_mempool_tx(&inner, request).await }; Box::pin(fut) } @@ -1954,27 +1682,17 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream" => { #[allow(non_camel_case_types)] struct GetMempoolStreamSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetMempoolStreamSvc { + impl tonic::server::ServerStreamingService + for GetMempoolStreamSvc + { type Response = super::RawTransaction; type ResponseStream = T::GetMempoolStreamStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = + BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_stream( - &inner, - request, - ) - .await + ::get_mempool_stream(&inner, request).await }; Box::pin(fut) } @@ -2004,23 +1722,16 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState" => { #[allow(non_camel_case_types)] struct GetTreeStateSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetTreeStateSvc { + impl tonic::server::UnaryService for GetTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_tree_state(&inner, request) - .await + ::get_tree_state(&inner, request).await }; Box::pin(fut) } @@ -2050,23 +1761,13 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState" => { #[allow(non_camel_case_types)] struct GetLatestTreeStateSvc(pub Arc); - impl tonic::server::UnaryService - for GetLatestTreeStateSvc { + impl tonic::server::UnaryService for GetLatestTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_tree_state( - &inner, - request, - ) + ::get_latest_tree_state(&inner, request) .await }; Box::pin(fut) @@ -2097,24 +1798,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots" => { #[allow(non_camel_case_types)] struct GetSubtreeRootsSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetSubtreeRootsSvc { + impl + tonic::server::ServerStreamingService + for GetSubtreeRootsSvc + { type Response = super::SubtreeRoot; type ResponseStream = T::GetSubtreeRootsStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_subtree_roots(&inner, request) - .await + ::get_subtree_roots(&inner, request).await }; Box::pin(fut) } @@ -2144,23 +1842,19 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos" => { #[allow(non_camel_case_types)] struct GetAddressUtxosSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetAddressUtxosSvc { + impl + tonic::server::UnaryService + for GetAddressUtxosSvc + { type Response = super::GetAddressUtxosReplyList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos(&inner, request) - .await + ::get_address_utxos(&inner, request).await }; Box::pin(fut) } @@ -2190,26 +1884,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream" => { #[allow(non_camel_case_types)] struct GetAddressUtxosStreamSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetAddressUtxosStreamSvc { + impl + tonic::server::ServerStreamingService + for GetAddressUtxosStreamSvc + { type Response = super::GetAddressUtxosReply; type ResponseStream = T::GetAddressUtxosStreamStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos_stream( - &inner, - request, - ) + ::get_address_utxos_stream(&inner, request) .await }; Box::pin(fut) @@ -2240,21 +1929,13 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo" => { #[allow(non_camel_case_types)] struct GetLightdInfoSvc(pub Arc); - impl tonic::server::UnaryService - for GetLightdInfoSvc { + impl tonic::server::UnaryService for GetLightdInfoSvc { type Response = super::LightdInfo; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_lightd_info(&inner, request) - .await + ::get_lightd_info(&inner, request).await }; Box::pin(fut) } @@ -2284,14 +1965,9 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping" => { #[allow(non_camel_case_types)] struct PingSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService for PingSvc { + impl tonic::server::UnaryService for PingSvc { type Response = super::PingResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -2325,23 +2001,19 @@ pub mod compact_tx_streamer_server { }; Box::pin(fut) } - _ => { - Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } + _ => Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers.insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers.insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }), } } } diff --git a/zaino-serve/src/rpc/grpc/service.rs b/zaino-serve/src/rpc/grpc/service.rs index a4ad9807c..cf723095b 100644 --- a/zaino-serve/src/rpc/grpc/service.rs +++ b/zaino-serve/src/rpc/grpc/service.rs @@ -7,7 +7,10 @@ use crate::rpc::GrpcClient; use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - compact_tx_streamer_server::CompactTxStreamer, Address, AddressList, Balance, BlockId, BlockRange, ChainSpec, Duration, Empty, GetAddressUtxosArg, GetAddressUtxosReplyList, GetMempoolTxRequest, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter + compact_tx_streamer_server::CompactTxStreamer, Address, AddressList, Balance, BlockId, + BlockRange, ChainSpec, Duration, Empty, GetAddressUtxosArg, GetAddressUtxosReplyList, + GetMempoolTxRequest, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, + SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, }; use zaino_state::{ diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index e1ffdd0a0..5c4299a8e 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -40,9 +40,10 @@ use zaino_fetch::{ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - AddressList, Balance, BlockId, BlockRange, Duration, GetMempoolTxRequest, GetAddressUtxosArg, - GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, RawTransaction, - SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, + AddressList, Balance, BlockId, BlockRange, Duration, GetAddressUtxosArg, + GetAddressUtxosReply, GetAddressUtxosReplyList, GetMempoolTxRequest, LightdInfo, + PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, + TxFilter, }, utils::{GetBlockRangeError, ValidatedBlockRangeRequest}, }; @@ -1194,14 +1195,16 @@ impl LightWalletIndexer for FetchServiceSubscriber { &self, request: GetMempoolTxRequest, ) -> Result { - let mut exclude_txids: Vec = vec![]; - + for (i, excluded_id) in request.exclude_txid_suffixes.iter().enumerate() { if excluded_id.len() > 32 { - return Err(FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( - format!("Error: excluded txid {} is larger than 32 bytes", i) - ))) + return Err(FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument(format!( + "Error: excluded txid {} is larger than 32 bytes", + i + )), + )); } // NOTE: the TransactionHash methods cannot be used for this hex encoding as exclusions could be truncated to less than 32 bytes @@ -1209,7 +1212,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { let hex_string_txid: String = hex::encode(&reversed_txid_bytes); exclude_txids.push(hex_string_txid); } - + let mempool = self.mempool.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); @@ -1602,7 +1605,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { })? .into(), ); - + let sapling_activation_height = blockchain_info .upgrades() .get(&sapling_id) @@ -1613,15 +1616,16 @@ impl LightWalletIndexer for FetchServiceSubscriber { ) .to_string(); - let nu_info = blockchain_info.upgrades() - .last() - .expect("Expected validator to have a consenus activated.") - .1 - .into_parts(); + let nu_info = blockchain_info + .upgrades() + .last() + .expect("Expected validator to have a consenus activated.") + .1 + .into_parts(); let nu_name = nu_info.0; let nu_height = nu_info.1; - + Ok(LightdInfo { version: self.data.build_info().version(), vendor: "ZingoLabs ZainoD".to_string(), diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index dbc1245f5..c47046885 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -43,8 +43,8 @@ use zaino_fetch::{ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - AddressList, Balance, BlockId, BlockRange, GetMempoolTxRequest, GetAddressUtxosArg, - GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, RawTransaction, + AddressList, Balance, BlockId, BlockRange, GetAddressUtxosArg, GetAddressUtxosReply, + GetAddressUtxosReplyList, GetMempoolTxRequest, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, utils::{pool_types_from_vector, PoolTypeError, ValidatedBlockRangeRequest}, @@ -2221,12 +2221,15 @@ impl LightWalletIndexer for StateServiceSubscriber { request: GetMempoolTxRequest, ) -> Result { let mut exclude_txids: Vec = vec![]; - + for (i, excluded_id) in request.exclude_txid_suffixes.iter().enumerate() { if excluded_id.len() > 32 { - return Err(StateServiceError::TonicStatusError(tonic::Status::invalid_argument( - format!("Error: excluded txid {} is larger than 32 bytes", i) - ))) + return Err(StateServiceError::TonicStatusError( + tonic::Status::invalid_argument(format!( + "Error: excluded txid {} is larger than 32 bytes", + i + )), + )); } // NOTE: the TransactionHash methods cannot be used for this hex encoding as exclusions could be truncated to less than 32 bytes @@ -2535,15 +2538,16 @@ impl LightWalletIndexer for StateServiceSubscriber { ) .to_string(); - let nu_info = blockchain_info.upgrades() - .last() - .expect("Expected validator to have a consenus activated.") - .1 - .into_parts(); + let nu_info = blockchain_info + .upgrades() + .last() + .expect("Expected validator to have a consenus activated.") + .1 + .into_parts(); let nu_name = nu_info.0; let nu_height = nu_info.1; - + Ok(LightdInfo { version: self.data.build_info().version(), vendor: "ZingoLabs ZainoD".to_string(), diff --git a/zaino-state/src/indexer.rs b/zaino-state/src/indexer.rs index 44837e478..bfa1ac6e2 100644 --- a/zaino-state/src/indexer.rs +++ b/zaino-state/src/indexer.rs @@ -16,10 +16,10 @@ use zaino_fetch::jsonrpsee::response::{ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - AddressList, Balance, BlockId, BlockRange, Duration, GetMempoolTxRequest, GetAddressUtxosArg, - GetAddressUtxosReplyList, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, - SendResponse, ShieldedProtocol, SubtreeRoot, TransparentAddressBlockFilter, TreeState, - TxFilter, + AddressList, Balance, BlockId, BlockRange, Duration, GetAddressUtxosArg, + GetAddressUtxosReplyList, GetMempoolTxRequest, GetSubtreeRootsArg, LightdInfo, + PingResponse, RawTransaction, SendResponse, ShieldedProtocol, SubtreeRoot, + TransparentAddressBlockFilter, TreeState, TxFilter, }, }; use zebra_chain::{ From 2b94987e24478f3f060e2a9bec6b13ea307b553e Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Fri, 12 Dec 2025 17:47:32 -0300 Subject: [PATCH 069/114] Fix compiler errors --- integration-tests/tests/fetch_service.rs | 11 +- zaino-proto/src/proto/service.rs | 906 +++++++++++++++-------- 2 files changed, 623 insertions(+), 294 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index 834ac8959..3011a61b2 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -3,7 +3,7 @@ use futures::StreamExt as _; use zaino_fetch::jsonrpsee::connector::{test_node_and_return_url, JsonRpSeeConnector}; use zaino_proto::proto::service::{ - AddressList, BlockId, BlockRange, Exclude, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, + AddressList, BlockId, BlockRange, GetMempoolTxRequest, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, TransparentAddressBlockFilter, TxFilter, }; use zaino_state::FetchServiceSubscriber; @@ -1413,8 +1413,8 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - let exclude_list_empty = Exclude { txid: Vec::new() }; - + let exclude_list_empty = GetMempoolTxRequest { exclude_txid_suffixes: Vec::new(), pool_types: Vec::new() }; + let fetch_service_stream = fetch_service_subscriber .get_mempool_tx(exclude_list_empty.clone()) .await @@ -1438,8 +1438,9 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind assert_eq!(sorted_fetch_mempool_tx[1].txid, sorted_txids[1]); assert_eq!(sorted_fetch_mempool_tx.len(), 2); - let exclude_list = Exclude { - txid: vec![sorted_txids[0][8..].to_vec()], + let exclude_list = GetMempoolTxRequest { + exclude_txid_suffixes: vec![sorted_txids[0][8..].to_vec()], + pool_types: vec![], }; let exclude_fetch_service_stream = fetch_service_subscriber diff --git a/zaino-proto/src/proto/service.rs b/zaino-proto/src/proto/service.rs index cbe8a5d0f..2441bc93f 100644 --- a/zaino-proto/src/proto/service.rs +++ b/zaino-proto/src/proto/service.rs @@ -380,10 +380,10 @@ pub mod compact_tx_streamer_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::http::Uri; use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct CompactTxStreamerClient { inner: tonic::client::Grpc, @@ -427,8 +427,9 @@ pub mod compact_tx_streamer_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { CompactTxStreamerClient::new(InterceptedService::new(inner, interceptor)) } @@ -468,18 +469,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestBlock", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestBlock", + ), + ); self.inner.unary(req, path, codec).await } /// Return the compact block corresponding to the given block identifier @@ -490,18 +499,26 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlock", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlock", + ), + ); self.inner.unary(req, path, codec).await } /// Same as GetBlock except the returned CompactBlock value contains only @@ -516,18 +533,26 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockNullifiers", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockNullifiers", + ), + ); self.inner.unary(req, path, codec).await } /// Return a list of consecutive compact blocks in the specified range, @@ -539,21 +564,31 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRange", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRange", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Same as GetBlockRange except the returned CompactBlock values contain @@ -565,21 +600,31 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRangeNullifiers", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRangeNullifiers", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return the requested full (not compact) transaction (as from zcashd) @@ -587,18 +632,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTransaction", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTransaction", + ), + ); self.inner.unary(req, path, codec).await } /// Submit the given transaction to the Zcash network @@ -606,18 +659,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "SendTransaction", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "SendTransaction", + ), + ); self.inner.unary(req, path, codec).await } /// Return RawTransactions that match the given transparent address filter. @@ -631,18 +692,26 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressTxids", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTxids", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return the transactions corresponding to the given t-address within the given block range. @@ -654,54 +723,78 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressTransactions", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTransactions", + ), + ); self.inner.server_streaming(req, path, codec).await } pub async fn get_taddress_balance( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalance", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalance", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_taddress_balance_stream( &mut self, request: impl tonic::IntoStreamingRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream", ); let mut req = request.into_streaming_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalanceStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalanceStream", + ), + ); self.inner.client_streaming(req, path, codec).await } /// Returns a stream of the compact transaction representation for transactions @@ -720,21 +813,31 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolTx", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolTx", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return a stream of current Mempool transactions. This will keep the output stream open while @@ -746,18 +849,26 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolStream", + ), + ); self.inner.server_streaming(req, path, codec).await } /// GetTreeState returns the note commitment tree state corresponding to the given block. @@ -768,36 +879,52 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTreeState", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTreeState", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_latest_tree_state( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestTreeState", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestTreeState", + ), + ); self.inner.unary(req, path, codec).await } /// Returns a stream of information about roots of subtrees of the note commitment tree @@ -809,37 +936,55 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetSubtreeRoots", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetSubtreeRoots", + ), + ); self.inner.server_streaming(req, path, codec).await } pub async fn get_address_utxos( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxos", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxos", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_address_utxos_stream( @@ -849,18 +994,26 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxosStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxosStream", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return information about this lightwalletd instance and the blockchain @@ -868,18 +1021,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLightdInfo", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLightdInfo", + ), + ); self.inner.unary(req, path, codec).await } /// Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) @@ -887,18 +1048,23 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "Ping", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("cash.z.wallet.sdk.rpc.CompactTxStreamer", "Ping"), + ); self.inner.unary(req, path, codec).await } } @@ -910,7 +1076,7 @@ pub mod compact_tx_streamer_server { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with CompactTxStreamerServer. @@ -947,7 +1113,8 @@ pub mod compact_tx_streamer_server { crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Return a list of consecutive compact blocks in the specified range, /// which is inclusive of `range.end`. @@ -957,14 +1124,18 @@ pub mod compact_tx_streamer_server { async fn get_block_range( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetBlockRangeNullifiers method. type GetBlockRangeNullifiersStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result< crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Same as GetBlockRange except the returned CompactBlock values contain /// only nullifiers. @@ -974,7 +1145,10 @@ pub mod compact_tx_streamer_server { async fn get_block_range_nullifiers( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Return the requested full (not compact) transaction (as from zcashd) async fn get_transaction( &self, @@ -988,7 +1162,8 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetTaddressTxids method. type GetTaddressTxidsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Return RawTransactions that match the given transparent address filter. /// @@ -997,18 +1172,25 @@ pub mod compact_tx_streamer_server { async fn get_taddress_txids( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetTaddressTransactions method. type GetTaddressTransactionsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Return the transactions corresponding to the given t-address within the given block range. /// Mempool transactions are not included in the results. async fn get_taddress_transactions( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_taddress_balance( &self, request: tonic::Request, @@ -1019,8 +1201,12 @@ pub mod compact_tx_streamer_server { ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetMempoolTx method. type GetMempoolTxStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > + std::marker::Send + Item = std::result::Result< + crate::proto::compact_formats::CompactTx, + tonic::Status, + >, + > + + std::marker::Send + 'static; /// Returns a stream of the compact transaction representation for transactions /// currently in the mempool. The results of this operation may be a few @@ -1037,18 +1223,25 @@ pub mod compact_tx_streamer_server { async fn get_mempool_tx( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetMempoolStream method. type GetMempoolStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Return a stream of current Mempool transactions. This will keep the output stream open while /// there are mempool transactions. It will close the returned stream when a new block is mined. async fn get_mempool_stream( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// GetTreeState returns the note commitment tree state corresponding to the given block. /// See section 3.7 of the Zcash protocol specification. It returns several other useful /// values also (even though they can be obtained using GetBlock). @@ -1064,27 +1257,38 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetSubtreeRoots method. type GetSubtreeRootsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Returns a stream of information about roots of subtrees of the note commitment tree /// for the specified shielded protocol (Sapling or Orchard). async fn get_subtree_roots( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_address_utxos( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetAddressUtxosStream method. type GetAddressUtxosStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + std::marker::Send + > + + std::marker::Send + 'static; async fn get_address_utxos_stream( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Return information about this lightwalletd instance and the blockchain async fn get_lightd_info( &self, @@ -1117,7 +1321,10 @@ pub mod compact_tx_streamer_server { max_encoding_message_size: None, } } - pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService where F: tonic::service::Interceptor, { @@ -1172,16 +1379,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock" => { #[allow(non_camel_case_types)] struct GetLatestBlockSvc(pub Arc); - impl tonic::server::UnaryService for GetLatestBlockSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetLatestBlockSvc { type Response = super::BlockId; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_block(&inner, request).await + ::get_latest_block(&inner, request) + .await }; Box::pin(fut) } @@ -1211,9 +1425,14 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock" => { #[allow(non_camel_case_types)] struct GetBlockSvc(pub Arc); - impl tonic::server::UnaryService for GetBlockSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService for GetBlockSvc { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -1250,18 +1469,25 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockNullifiersSvc(pub Arc); - impl tonic::server::UnaryService - for GetBlockNullifiersSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetBlockNullifiersSvc { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_nullifiers(&inner, request) + ::get_block_nullifiers( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1292,21 +1518,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange" => { #[allow(non_camel_case_types)] struct GetBlockRangeSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetBlockRangeSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetBlockRangeSvc { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_range(&inner, request).await + ::get_block_range(&inner, request) + .await }; Box::pin(fut) } @@ -1336,14 +1565,16 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockRangeNullifiersSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetBlockRangeNullifiersSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetBlockRangeNullifiersSvc { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeNullifiersStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -1351,9 +1582,10 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_block_range_nullifiers( - &inner, request, - ) - .await + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1383,16 +1615,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction" => { #[allow(non_camel_case_types)] struct GetTransactionSvc(pub Arc); - impl tonic::server::UnaryService for GetTransactionSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTransactionSvc { type Response = super::RawTransaction; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_transaction(&inner, request).await + ::get_transaction(&inner, request) + .await }; Box::pin(fut) } @@ -1422,18 +1661,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction" => { #[allow(non_camel_case_types)] struct SendTransactionSvc(pub Arc); - impl tonic::server::UnaryService - for SendTransactionSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for SendTransactionSvc { type Response = super::SendResponse; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::send_transaction(&inner, request).await + ::send_transaction(&inner, request) + .await }; Box::pin(fut) } @@ -1463,21 +1707,28 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids" => { #[allow(non_camel_case_types)] struct GetTaddressTxidsSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetTaddressTxidsSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService< + super::TransparentAddressBlockFilter, + > for GetTaddressTxidsSvc { type Response = super::RawTransaction; type ResponseStream = T::GetTaddressTxidsStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_txids(&inner, request).await + ::get_taddress_txids( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1507,21 +1758,27 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions" => { #[allow(non_camel_case_types)] struct GetTaddressTransactionsSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetTaddressTransactionsSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService< + super::TransparentAddressBlockFilter, + > for GetTaddressTransactionsSvc { type Response = super::RawTransaction; type ResponseStream = T::GetTaddressTransactionsStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_transactions(&inner, request) + ::get_taddress_transactions( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1552,18 +1809,25 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceSvc(pub Arc); - impl tonic::server::UnaryService - for GetTaddressBalanceSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTaddressBalanceSvc { type Response = super::Balance; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_balance(&inner, request) + ::get_taddress_balance( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1594,11 +1858,15 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceStreamSvc(pub Arc); - impl tonic::server::ClientStreamingService - for GetTaddressBalanceStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ClientStreamingService + for GetTaddressBalanceStreamSvc { type Response = super::Balance; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request>, @@ -1606,9 +1874,10 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_taddress_balance_stream( - &inner, request, - ) - .await + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1638,21 +1907,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx" => { #[allow(non_camel_case_types)] struct GetMempoolTxSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetMempoolTxSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetMempoolTxSvc { type Response = crate::proto::compact_formats::CompactTx; type ResponseStream = T::GetMempoolTxStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_tx(&inner, request).await + ::get_mempool_tx(&inner, request) + .await }; Box::pin(fut) } @@ -1682,17 +1954,27 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream" => { #[allow(non_camel_case_types)] struct GetMempoolStreamSvc(pub Arc); - impl tonic::server::ServerStreamingService - for GetMempoolStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetMempoolStreamSvc { type Response = super::RawTransaction; type ResponseStream = T::GetMempoolStreamStream; - type Future = - BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_stream(&inner, request).await + ::get_mempool_stream( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1722,16 +2004,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState" => { #[allow(non_camel_case_types)] struct GetTreeStateSvc(pub Arc); - impl tonic::server::UnaryService for GetTreeStateSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_tree_state(&inner, request).await + ::get_tree_state(&inner, request) + .await }; Box::pin(fut) } @@ -1761,13 +2050,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState" => { #[allow(non_camel_case_types)] struct GetLatestTreeStateSvc(pub Arc); - impl tonic::server::UnaryService for GetLatestTreeStateSvc { + impl tonic::server::UnaryService + for GetLatestTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_tree_state(&inner, request) + ::get_latest_tree_state( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1798,21 +2097,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots" => { #[allow(non_camel_case_types)] struct GetSubtreeRootsSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetSubtreeRootsSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetSubtreeRootsSvc { type Response = super::SubtreeRoot; type ResponseStream = T::GetSubtreeRootsStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_subtree_roots(&inner, request).await + ::get_subtree_roots(&inner, request) + .await }; Box::pin(fut) } @@ -1842,19 +2144,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos" => { #[allow(non_camel_case_types)] struct GetAddressUtxosSvc(pub Arc); - impl - tonic::server::UnaryService - for GetAddressUtxosSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetAddressUtxosSvc { type Response = super::GetAddressUtxosReplyList; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos(&inner, request).await + ::get_address_utxos(&inner, request) + .await }; Box::pin(fut) } @@ -1884,21 +2190,26 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream" => { #[allow(non_camel_case_types)] struct GetAddressUtxosStreamSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetAddressUtxosStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetAddressUtxosStreamSvc { type Response = super::GetAddressUtxosReply; type ResponseStream = T::GetAddressUtxosStreamStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos_stream(&inner, request) + ::get_address_utxos_stream( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1929,13 +2240,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo" => { #[allow(non_camel_case_types)] struct GetLightdInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetLightdInfoSvc { + impl tonic::server::UnaryService + for GetLightdInfoSvc { type Response = super::LightdInfo; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_lightd_info(&inner, request).await + ::get_lightd_info(&inner, request) + .await }; Box::pin(fut) } @@ -1965,9 +2284,14 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping" => { #[allow(non_camel_case_types)] struct PingSvc(pub Arc); - impl tonic::server::UnaryService for PingSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService for PingSvc { type Response = super::PingResponse; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -2001,19 +2325,23 @@ pub mod compact_tx_streamer_server { }; Box::pin(fut) } - _ => Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers.insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers.insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }), + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } } } } From b30a70e5e46546c5ed20ceaf8c7e1fb159465c9f Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Fri, 12 Dec 2025 18:22:46 -0300 Subject: [PATCH 070/114] deprecate FullTransaction::to_compact() in favor of to_compact_tx --- CHANGELOG.md | 5 ++++- zaino-fetch/src/chain/transaction.rs | 24 ++++++++++++++++++++---- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 59a13060f..3695775c0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,7 +36,10 @@ and this library adheres to Rust's notion of - `service.CompactTxStreamer`: - The `GetBlockNullifiers` and `GetBlockRangeNullifiers` methods are deprecated. - +- `zaino_fetch::FullTransaction::to_compact` deprecated in favor of `to_compact_tx` which includes + an optional for index to explicitly specify that the transaction is in the mempool and has no + index and `Vec` to filter pool types according to the transparent data changes of + lightclient-protocol v0.4.0 ## [v0.3.6] - 2025-05-20 ### Added diff --git a/zaino-fetch/src/chain/transaction.rs b/zaino-fetch/src/chain/transaction.rs index bbc65891f..f375b4e10 100644 --- a/zaino-fetch/src/chain/transaction.rs +++ b/zaino-fetch/src/chain/transaction.rs @@ -5,9 +5,12 @@ use crate::chain::{ utils::{read_bytes, read_i64, read_u32, read_u64, skip_bytes, CompactSize, ParseFromSlice}, }; use std::io::Cursor; -use zaino_proto::proto::compact_formats::{ - CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTx, CompactTxIn, - TxOut as CompactTxOut, +use zaino_proto::proto::{ + compact_formats::{ + CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTx, CompactTxIn, + TxOut as CompactTxOut, + }, + service::PoolType, }; /// Txin format as described in @@ -1129,7 +1132,20 @@ impl FullTransaction { } /// Converts a zcash full transaction into a compact transaction. + #[deprecated] pub fn to_compact(self, index: u64) -> Result { + self.to_compact_tx(Some(index), vec![]) + } + + /// Converts a Zcash Transaction into a `CompactTx` of the Light wallet protocol. + /// if the transaction you want to convert is a mempool transaction you can specify `None`. + /// specify the `PoolType`s that the transaction should include in the `pool_type` argument. + /// a `vec![]` will default to `[PoolType::Sapling, PoolType::Orchard]`. + pub fn to_compact_tx( + self, + index: Option, + pool_types: Vec, + ) -> Result { let hash = self.tx_id(); // NOTE: LightWalletD currently does not return a fee and is not currently priority here. @@ -1196,7 +1212,7 @@ impl FullTransaction { .collect(); Ok(CompactTx { - index, + index: index.unwrap_or(0), // this assumes that mempool txs have a zeroed index txid: hash, fee, spends, From 31d961b2d7149abe9112a6734837c18146beac30 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Tue, 16 Dec 2025 22:13:14 -0300 Subject: [PATCH 071/114] Deprecation of `to_compact` for Block and Transaction --- CHANGELOG.md | 7 ++ zaino-fetch/src/chain/block.rs | 30 +++++- zaino-fetch/src/chain/transaction.rs | 137 +++++++++++++----------- zaino-proto/src/proto/utils.rs | 151 +++++++++++++++++++++++++++ zaino-state/src/local_cache.rs | 5 +- 5 files changed, 262 insertions(+), 68 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3695775c0..13a3c1917 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,13 @@ and this library adheres to Rust's notion of an optional for index to explicitly specify that the transaction is in the mempool and has no index and `Vec` to filter pool types according to the transparent data changes of lightclient-protocol v0.4.0 +- `zaino_fetch::chain::Block::to_compact` deprecated in favor of `to_compact_block` allowing callers + to specify `PoolTypeFilter` to filter pools that are included into the compact block according to + lightclient-protocol v0.4.0 +- `zaino_fetch::chain::Transaction::to_compact` deprecated in favor of `to_compact_tx` allowing callers + to specify `PoolTypFilter` to filter pools that are included into the compact transaction according + to lightclient-protocol v0.4.0. + ## [v0.3.6] - 2025-05-20 ### Added diff --git a/zaino-fetch/src/chain/block.rs b/zaino-fetch/src/chain/block.rs index a0a81d7f3..78b729cd9 100644 --- a/zaino-fetch/src/chain/block.rs +++ b/zaino-fetch/src/chain/block.rs @@ -7,7 +7,10 @@ use crate::chain::{ }; use sha2::{Digest, Sha256}; use std::io::Cursor; -use zaino_proto::proto::compact_formats::{ChainMetadata, CompactBlock}; +use zaino_proto::proto::{ + compact_formats::{ChainMetadata, CompactBlock}, + utils::PoolTypeFilter, +}; /// A block header, containing metadata about a block. /// @@ -362,17 +365,20 @@ impl FullBlock { return Err(ParseError::InvalidData(format!( "Error decoding full block - {} bytes of Remaining data. Compact Block Created: ({:?})", remaining_data.len(), - full_block.into_compact(0, 0) + full_block.into_compact_block(0, 0, PoolTypeFilter::default()) ))); } Ok(full_block) } - /// Converts a zcash full block into a compact block. - pub fn into_compact( + /// Turns this Block into a Compact Block according to the Lightclient protocol [ZIP-307](https://zips.z.cash/zip-0307) + /// callers can choose which pools to include in this compact block by specifying a + /// `PoolTypeFilter` accordingly. + pub fn into_compact_block( self, sapling_commitment_tree_size: u32, orchard_commitment_tree_size: u32, + pool_types: PoolTypeFilter, ) -> Result { let vtx = self .vtx @@ -380,7 +386,7 @@ impl FullBlock { .enumerate() .filter_map(|(index, tx)| { if tx.has_shielded_elements() { - Some(tx.to_compact(index as u64)) + Some(tx.to_compact_tx(Some(index as u64), pool_types.clone())) } else { None } @@ -408,6 +414,20 @@ impl FullBlock { Ok(compact_block) } + #[deprecated] + /// Converts a zcash full block into a compact block. + pub fn into_compact( + self, + sapling_commitment_tree_size: u32, + orchard_commitment_tree_size: u32, + ) -> Result { + self.into_compact_block( + sapling_commitment_tree_size, + orchard_commitment_tree_size, + PoolTypeFilter::default(), + ) + } + /// Extracts the block height from the coinbase transaction. fn get_block_height(transactions: &[FullTransaction]) -> Result { let transparent_inputs = transactions[0].transparent_inputs(); diff --git a/zaino-fetch/src/chain/transaction.rs b/zaino-fetch/src/chain/transaction.rs index f375b4e10..81d6e1ced 100644 --- a/zaino-fetch/src/chain/transaction.rs +++ b/zaino-fetch/src/chain/transaction.rs @@ -10,7 +10,7 @@ use zaino_proto::proto::{ CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTx, CompactTxIn, TxOut as CompactTxOut, }, - service::PoolType, + utils::PoolTypeFilter, }; /// Txin format as described in @@ -1134,17 +1134,17 @@ impl FullTransaction { /// Converts a zcash full transaction into a compact transaction. #[deprecated] pub fn to_compact(self, index: u64) -> Result { - self.to_compact_tx(Some(index), vec![]) + self.to_compact_tx(Some(index), PoolTypeFilter::default()) } /// Converts a Zcash Transaction into a `CompactTx` of the Light wallet protocol. /// if the transaction you want to convert is a mempool transaction you can specify `None`. - /// specify the `PoolType`s that the transaction should include in the `pool_type` argument. - /// a `vec![]` will default to `[PoolType::Sapling, PoolType::Orchard]`. + /// specify the `PoolType`s that the transaction should include in the `pool_types` argument + /// with a `PoolTypeFilter` indicating which pools the compact block should include. pub fn to_compact_tx( self, index: Option, - pool_types: Vec, + pool_types: PoolTypeFilter, ) -> Result { let hash = self.tx_id(); @@ -1153,63 +1153,78 @@ impl FullTransaction { // if you require this functionality. let fee = 0; - let spends = self - .raw_transaction - .shielded_spends - .iter() - .map(|spend| CompactSaplingSpend { - nf: spend.nullifier.clone(), - }) - .collect(); + let spends = if pool_types.includes_sapling() { + self.raw_transaction + .shielded_spends + .iter() + .map(|spend| CompactSaplingSpend { + nf: spend.nullifier.clone(), + }) + .collect() + } else { + vec![] + }; - let outputs = self - .raw_transaction - .shielded_outputs - .iter() - .map(|output| CompactSaplingOutput { - cmu: output.cmu.clone(), - ephemeral_key: output.ephemeral_key.clone(), - ciphertext: output.enc_ciphertext[..52].to_vec(), - }) - .collect(); - - let actions = self - .raw_transaction - .orchard_actions - .iter() - .map(|action| CompactOrchardAction { - nullifier: action.nullifier.clone(), - cmx: action.cmx.clone(), - ephemeral_key: action.ephemeral_key.clone(), - ciphertext: action.enc_ciphertext[..52].to_vec(), - }) - .collect(); - - let vout = self - .raw_transaction - .transparent_outputs - .iter() - .map(|t_out| CompactTxOut { - value: t_out.value, - script_pub_key: t_out.script_hash.clone(), - }) - .collect(); - - let vin = self - .raw_transaction - .transparent_inputs - .iter() - .filter_map(|t_in| { - if t_in.is_null() { - None - } else { - Some(CompactTxIn { - prevout_txid: t_in.prev_txid.clone(), - prevout_index: t_in.prev_index, - }) - } - }) - .collect(); + let outputs = if pool_types.includes_sapling() { + self.raw_transaction + .shielded_outputs + .iter() + .map(|output| CompactSaplingOutput { + cmu: output.cmu.clone(), + ephemeral_key: output.ephemeral_key.clone(), + ciphertext: output.enc_ciphertext[..52].to_vec(), + }) + .collect() + } else { + vec![] + }; + + let actions = if pool_types.includes_orchard() { + self.raw_transaction + .orchard_actions + .iter() + .map(|action| CompactOrchardAction { + nullifier: action.nullifier.clone(), + cmx: action.cmx.clone(), + ephemeral_key: action.ephemeral_key.clone(), + ciphertext: action.enc_ciphertext[..52].to_vec(), + }) + .collect() + } else { + vec![] + }; + + let vout = if pool_types.includes_tranparent() { + self.raw_transaction + .transparent_outputs + .iter() + .map(|t_out| CompactTxOut { + value: t_out.value, + script_pub_key: t_out.script_hash.clone(), + }) + .collect() + } else { + vec![] + }; + + let vin = if pool_types.includes_tranparent() { + self.raw_transaction + .transparent_inputs + .iter() + .filter_map(|t_in| { + if t_in.is_null() { + None + } else { + Some(CompactTxIn { + prevout_txid: t_in.prev_txid.clone(), + prevout_index: t_in.prev_index, + }) + } + }) + .collect() + } else { + vec![] + }; Ok(CompactTx { index: index.unwrap_or(0), // this assumes that mempool txs have a zeroed index diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 571bfabd5..08ad0bf66 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -131,3 +131,154 @@ impl ValidatedBlockRangeRequest { (self.start, self.end) = (self.end, self.start); } } +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct PoolTypeFilter { + include_transparent: bool, + include_sapling: bool, + include_orchard: bool, +} + +impl PoolTypeFilter { + /// By default PoolType includes `Sapling` and `Orchard` pools. + pub fn default() -> Self { + PoolTypeFilter { + include_transparent: false, + include_sapling: true, + include_orchard: true, + } + } + + /// create a `PoolTypeFilter` from a vector of `PoolType` + /// If the vector is empty it will return `Self::default()`. + /// If the vector contains `PoolType::Invalid`, returns `None` + /// If the vector contains more than 3 elements, returns `None` + pub fn new_from_pool_types(pool_types: &Vec) -> Option { + if pool_types.len() > PoolType::Orchard as usize { + return None; + } + + if pool_types.is_empty() { + Some(Self::default()) + } else { + let mut filter = PoolTypeFilter::empty(); + + for pool_type in pool_types { + match pool_type { + PoolType::Invalid => return None, + PoolType::Transparent => filter.include_transparent = true, + PoolType::Sapling => filter.include_sapling = true, + PoolType::Orchard => filter.include_orchard = true, + } + } + + // guard against returning an invalid state this shouls never happen. + if filter.is_empty() { + return Some(Self::default()); + } else { + return Some(filter); + } + } + } + + /// only internal use. this in an invalid state. + fn empty() -> Self { + Self { + include_transparent: false, + include_sapling: false, + include_orchard: false, + } + } + + /// only internal use + fn is_empty(&self) -> bool { + !self.include_transparent && !self.include_sapling && !self.include_orchard + } + + /// retuns whether the filter includes transparent data + pub fn includes_tranparent(&self) -> bool { + self.include_transparent + } + + /// returns whether the filter includes orchard data + pub fn includes_sapling(&self) -> bool { + self.include_sapling + } + + // returnw whether the filter includes orchard data + pub fn includes_orchard(&self) -> bool { + self.include_orchard + } + + /// testing only + #[allow(dead_code)] + pub(crate) fn from_checked_parts( + include_transparent: bool, + include_sapling: bool, + include_orchard: bool, + ) -> Self { + PoolTypeFilter { + include_transparent, + include_sapling, + include_orchard, + } + } +} + +#[cfg(test)] +mod test { + use crate::proto::{service::PoolType, utils::PoolTypeFilter}; + + #[test] + fn test_pool_type_filter_none_when_invalid() { + let pools = [ + PoolType::Transparent, + PoolType::Sapling, + PoolType::Orchard, + PoolType::Invalid, + ] + .to_vec(); + + assert_eq!(PoolTypeFilter::new_from_pool_types(&pools), None); + } + + #[test] + fn test_pool_type_filter_none_when_too_many_items() { + let pools = [ + PoolType::Transparent, + PoolType::Sapling, + PoolType::Orchard, + PoolType::Orchard, + ] + .to_vec(); + + assert_eq!(PoolTypeFilter::new_from_pool_types(&pools), None); + } + + #[test] + fn test_pool_type_filter_t_z_o() { + let pools = [PoolType::Transparent, PoolType::Sapling, PoolType::Orchard].to_vec(); + + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Some(PoolTypeFilter::from_checked_parts(true, true, false)) + ); + } + + #[test] + fn test_pool_type_filter_t() { + let pools = [PoolType::Transparent].to_vec(); + + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Some(PoolTypeFilter::from_checked_parts(true, false, false)) + ); + } + + #[test] + fn test_pool_type_filter_default() { + assert_eq!( + PoolTypeFilter::new_from_pool_types(&vec![]), + Some(PoolTypeFilter::default()) + ); + } +} diff --git a/zaino-state/src/local_cache.rs b/zaino-state/src/local_cache.rs index e81079fba..5fcd9c5c7 100644 --- a/zaino-state/src/local_cache.rs +++ b/zaino-state/src/local_cache.rs @@ -23,7 +23,7 @@ use zaino_fetch::{ }; use zaino_proto::proto::{ compact_formats::{ChainMetadata, CompactBlock, CompactOrchardAction}, - service::PoolType, + service::PoolType, utils::PoolTypeFilter, }; use zebra_chain::{ block::{Hash, Height}, @@ -334,7 +334,7 @@ async fn try_fetcher_path( type_name::(), )) })? - .into_compact( + .into_compact_block( u32::try_from(trees.sapling()).map_err(|e| { RpcRequestError::Transport(TransportError::BadNodeData( Box::new(e), @@ -347,6 +347,7 @@ async fn try_fetcher_path( type_name::(), )) })?, + PoolTypeFilter::default(), ) .map_err(|e| { RpcRequestError::Transport(TransportError::BadNodeData( From 4257eb416bf03e3d942fc973d72b9af9ce58ad68 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Mon, 29 Dec 2025 11:00:36 -0300 Subject: [PATCH 072/114] Add a test that checks if transparent data is there when requested --- integration-tests/tests/state_service.rs | 79 +++++++++++++++++++++++- 1 file changed, 76 insertions(+), 3 deletions(-) diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index b8468be8c..86ded89fa 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -1905,10 +1905,10 @@ mod zebra { pub(crate) mod lightwallet_indexer { use futures::StreamExt as _; - use zaino_proto::proto::service::{ + use zaino_proto::proto::{service::{ AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, TxFilter, - }; + }, utils::pool_types_into_i32_vec}; use zebra_rpc::methods::{GetAddressTxIdsRequest, GetBlock}; use super::*; @@ -2533,5 +2533,78 @@ mod zebra { state_service_taddress_balance ); } - } + + #[tokio::test(flavor = "multi_thread")] + async fn gat_transparent_data_from_compact_block_when_requested() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + true, + Some(NetworkKind::Regtest), + ) + .await; + + let clients = test_manager.clients.take().unwrap(); + let taddr = clients.get_faucet_address("transparent").await; + generate_blocks_and_poll_all_chain_indexes( + 5, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let state_service_taddress_balance = state_service_subscriber + .get_taddress_balance(AddressList { + addresses: vec![taddr.clone()], + }) + .await + .unwrap(); + let fetch_service_taddress_balance = fetch_service_subscriber + .get_taddress_balance(AddressList { + addresses: vec![taddr], + }) + .await + .unwrap(); + assert_eq!( + fetch_service_taddress_balance, + state_service_taddress_balance + ); + + let compact_block_range = state_service_subscriber.get_block_range( + BlockRange { + start: None, + end: None, + pool_types: pool_types_into_i32_vec( + [ + PoolType::Transparent, + PoolType::Sapling, + PoolType::Orchard + ].to_vec() + ) + } + ) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + for cb in compact_block_range.into_iter() { + for tx in cb.vtx { + // first transaction of a block is coinbase + assert!(tx.vin.first().unwrap().prevout_txid.is_empty()); + // script pub key of this transaction is not empty + assert!(!tx.vout.first().unwrap().script_pub_key.is_empty()); + } + } + } + } } From b5737148511c78b5d6da4897ff5a3144b9df845e Mon Sep 17 00:00:00 2001 From: Pacu Date: Tue, 25 Nov 2025 20:22:45 -0300 Subject: [PATCH 073/114] Add tests for fetch and state servico on default pool type requests note: I haven't run them yet --- integration-tests/tests/fetch_service.rs | 66 ++++++++++++++++- integration-tests/tests/state_service.rs | 94 +++++++++++++++++++++++- zaino-proto/src/proto/utils.rs | 5 +- zaino-state/src/backends/fetch.rs | 2 +- zaino-state/src/error.rs | 4 +- 5 files changed, 164 insertions(+), 7 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index 3011a61b2..ff83a63d9 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -2,6 +2,7 @@ use futures::StreamExt as _; use zaino_fetch::jsonrpsee::connector::{test_node_and_return_url, JsonRpSeeConnector}; +use zaino_proto::proto::compact_formats::CompactBlock; use zaino_proto::proto::service::{ AddressList, BlockId, BlockRange, GetMempoolTxRequest, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, TransparentAddressBlockFilter, TxFilter, @@ -1063,6 +1064,59 @@ async fn fetch_service_get_block_range(validator: &ValidatorKin test_manager.close().await; } +#[allow(deprecated)] +async fn fetch_service_get_block_range_no_pools_returs_sapling_orchard(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(10, &fetch_service_subscriber) + .await; + + let block_range = BlockRange { + start: Some(BlockId { + height: 1, + hash: Vec::new(), + }), + end: Some(BlockId { + height: 10, + hash: Vec::new(), + }), + pool_types: vec![], + }; + + let fetch_service_stream = fetch_service_subscriber + .get_block_range(block_range.clone()) + .await + .unwrap(); + let fetch_service_compact_blocks: Vec<_> = fetch_service_stream.collect().await; + + let fetch_blocks: Vec<_> = fetch_service_compact_blocks + .into_iter() + .filter_map(|result| result.ok()) + .collect(); + + // no transparent data on outputs + for compact_block in fetch_blocks { + + let first_transaction = compact_block.vtx.first().unwrap(); + + // no transparent data for coinbase transaction + assert!(first_transaction.vin.is_empty()); + + for transaction in &compact_block.vtx[1..] { + assert!(transaction.vin.is_empty(), "vin should be empty if transparent pool type not requested"); + assert!(transaction.vout.is_empty(), "vout should be empty if transparent pool type not requested"); + } + } + + test_manager.close().await; +} + #[allow(deprecated)] async fn fetch_service_get_block_range_nullifiers(validator: &ValidatorKind) { let mut test_manager = @@ -1098,7 +1152,7 @@ async fn fetch_service_get_block_range_nullifiers(validator: &V .unwrap(); let fetch_service_compact_blocks: Vec<_> = fetch_service_stream.collect().await; - let fetch_nullifiers: Vec<_> = fetch_service_compact_blocks + let fetch_nullifiers: Vec = fetch_service_compact_blocks .into_iter() .filter_map(|result| result.ok()) .collect(); @@ -1979,6 +2033,11 @@ mod zcashd { fetch_service_get_block_range::(&ValidatorKind::Zcashd).await; } + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { + fetch_service_get_block_range_no_pools_returs_sapling_orchard::(&ValidatorKind::Zcashd).await; + } + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_nullifiers() { fetch_service_get_block_range_nullifiers::(&ValidatorKind::Zcashd).await; @@ -2154,6 +2213,11 @@ mod zebrad { pub(crate) async fn block() { fetch_service_get_block::(&ValidatorKind::Zebrad).await; } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { + fetch_service_get_block_range_no_pools_returs_sapling_orchard::(&ValidatorKind::Zebrad).await; + } #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_header() { diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 86ded89fa..19d3a574f 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -636,6 +636,90 @@ async fn state_service_get_raw_mempool_testnet() { test_manager.close().await; } +async fn state_service_get_block_range_returns_default_pools(validator: &ValidatorKind) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(BlockRange { start: Some(BlockId { height: 101, hash: vec![]}), end: Some(BlockId { height: 102, hash: vec![]}), pool_types: vec![]}) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + + let state_service_get_block_range = state_service_subscriber + .get_block_range(BlockRange { start: Some(BlockId { height: 101, hash: vec![]}), end: Some(BlockId { height: 102, hash: vec![]}), pool_types: vec![]}) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + // check that the block range is the same + assert_eq!(fetch_service_get_block_range, state_service_get_block_range); + + let compact_block = state_service_get_block_range.first().unwrap(); + + assert_eq!(compact_block.height, 101); + + // the compact block has 2 transactions: coinbase and the quick_shield one + assert_eq!(compact_block.vtx.len(), 2); + + let coinbase_tx = compact_block.vtx.first().unwrap(); + assert_eq!(coinbase_tx.index, 0); + // tranparent data should not be present when no pool types are requested + assert_eq!(coinbase_tx.vin, vec![], "transparent data should not be present when no pool types are specified in the request."); + assert_eq!(coinbase_tx.vout, vec![], "transparent data should not be present when no pool types are specified in the request."); + test_manager.close().await; +} + + async fn state_service_z_get_treestate(validator: &ValidatorKind) { let ( mut test_manager, @@ -1719,6 +1803,11 @@ mod zebra { use super::*; + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn get_block_range_default_request_returns_no_t_data_regtest() { + state_service_get_block_range_returns_default_pools::(&ValidatorKind::Zebrad).await; + } + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn subtrees_by_index_regtest() { state_service_z_get_subtrees_by_index::(&ValidatorKind::Zebrad).await; @@ -2218,7 +2307,7 @@ mod zebra { .map(Result::unwrap) .collect::>() .await; - let state_serviget_block_range = state_service_subscriber + let state_service_get_block_range = state_service_subscriber .get_block_range_nullifiers(request) .await .unwrap() @@ -2245,6 +2334,9 @@ mod zebra { } } + + + #[tokio::test(flavor = "multi_thread")] async fn get_block_range_full() { get_block_range_helper(false).await; diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 08ad0bf66..9e699cd48 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -1,5 +1,6 @@ use crate::proto::service::{BlockRange, PoolType}; +#[derive(Debug)] /// Errors that can arise when mapping `PoolType` from an `i32` value. pub enum PoolTypeError { /// Pool Type value was map to the enum `PoolType::Invalid`. @@ -53,7 +54,7 @@ pub enum GetBlockRangeError { /// End height out of range. Failed to convert to u32. EndHeightOutOfRange, /// An invalid pool type request was provided. - PoolTypArgumentError(PoolTypeError), + PoolTypeArgumentError(PoolTypeError), } /// `BlockRange` request that has been validated in terms of the semantics @@ -93,7 +94,7 @@ impl ValidatedBlockRangeRequest { }; let pool_types = pool_types_from_vector(&request.pool_types) - .map_err(|e| GetBlockRangeError::PoolTypArgumentError(e))?; + .map_err(|e| GetBlockRangeError::PoolTypeArgumentError(e))?; Ok(ValidatedBlockRangeRequest { start: start, diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 5c4299a8e..79a7a49d7 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -45,7 +45,7 @@ use zaino_proto::proto::{ PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, - utils::{GetBlockRangeError, ValidatedBlockRangeRequest}, + utils::ValidatedBlockRangeRequest, }; use crate::TransactionHash; diff --git a/zaino-state/src/error.rs b/zaino-state/src/error.rs index 778049999..6ffbf8497 100644 --- a/zaino-state/src/error.rs +++ b/zaino-state/src/error.rs @@ -221,8 +221,8 @@ impl FetchServiceError { GetBlockRangeError::NoEndHeightProvided => FetchServiceError::TonicStatusError( tonic::Status::invalid_argument("Error: No start height given."), ), - GetBlockRangeError::PoolTypArgumentError(e) => FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), + GetBlockRangeError::PoolTypeArgumentError(_) => FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument("Error: invalid pool type"), ), } } From 71842d592e285fbef736e50cb7642a6e7e27e722 Mon Sep 17 00:00:00 2001 From: Pacu Date: Thu, 27 Nov 2025 20:24:53 -0300 Subject: [PATCH 074/114] Add state_service_get_block_range_returns_xxxx_pools tests --- integration-tests/tests/fetch_service.rs | 17 +- integration-tests/tests/state_service.rs | 280 +++++++++++++++++++++-- zaino-proto/src/proto/utils.rs | 14 +- zaino-state/src/backends/fetch.rs | 9 +- zaino-state/src/backends/state.rs | 14 +- zaino-state/src/local_cache.rs | 36 ++- 6 files changed, 306 insertions(+), 64 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index ff83a63d9..dad003209 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -1102,18 +1102,23 @@ async fn fetch_service_get_block_range_no_pools_returs_sapling_orchard(&ValidatorKind::Zebrad).await; } - + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { fetch_service_get_block_range_no_pools_returs_sapling_orchard::(&ValidatorKind::Zebrad).await; diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 19d3a574f..1a8d3cc03 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -684,41 +684,285 @@ async fn state_service_get_block_range_returns_default_pools(val ) .await; + let start_height: u64 = 100; + let end_height: u64 = 103; + let fetch_service_get_block_range = fetch_service_subscriber - .get_block_range(BlockRange { start: Some(BlockId { height: 101, hash: vec![]}), end: Some(BlockId { height: 102, hash: vec![]}), pool_types: vec![]}) + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![], + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let fetch_service_get_block_range_specifying_pools = fetch_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![PoolType::Sapling as i32, PoolType::Orchard as i32], + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + assert_eq!( + fetch_service_get_block_range, + fetch_service_get_block_range_specifying_pools + ); + + let state_service_get_block_range_specifying_pools = state_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![PoolType::Sapling as i32, PoolType::Orchard as i32], + }) .await .unwrap() .map(Result::unwrap) .collect::>() .await; - let state_service_get_block_range = state_service_subscriber - .get_block_range(BlockRange { start: Some(BlockId { height: 101, hash: vec![]}), end: Some(BlockId { height: 102, hash: vec![]}), pool_types: vec![]}) + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![], + }) .await .unwrap() .map(Result::unwrap) .collect::>() .await; - // check that the block range is the same + assert_eq!( + state_service_get_block_range, + state_service_get_block_range_specifying_pools + ); + + // check that the block range is the same between fetch service and state service assert_eq!(fetch_service_get_block_range, state_service_get_block_range); - let compact_block = state_service_get_block_range.first().unwrap(); + let compact_block = state_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); - assert_eq!(compact_block.height, 101); + // the compact block has 1 transactions + assert_eq!(compact_block.vtx.len(), 1); - // the compact block has 2 transactions: coinbase and the quick_shield one - assert_eq!(compact_block.vtx.len(), 2); - - let coinbase_tx = compact_block.vtx.first().unwrap(); - assert_eq!(coinbase_tx.index, 0); + let shielded_tx = compact_block.vtx.first().unwrap(); + assert_eq!(shielded_tx.index, 1); // tranparent data should not be present when no pool types are requested - assert_eq!(coinbase_tx.vin, vec![], "transparent data should not be present when no pool types are specified in the request."); - assert_eq!(coinbase_tx.vout, vec![], "transparent data should not be present when no pool types are specified in the request."); + assert_eq!( + shielded_tx.vin, + vec![], + "transparent data should not be present when no pool types are specified in the request." + ); + assert_eq!( + shielded_tx.vout, + vec![], + "transparent data should not be present when no pool types are specified in the request." + ); test_manager.close().await; } +/// tests whether the `GetBlockRange` RPC returns all pools when requested +async fn state_service_get_block_range_returns_all_pools(validator: &ValidatorKind) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + } + }; + + let recipient_transparent = clients.get_recipient_address("transparent").await; + from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_transparent, 250_000, None)], + ) + .await + .unwrap(); + + let recipient_sapling = clients.get_recipient_address("sapling").await; + from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_sapling, 250_000, None)], + ) + .await + .unwrap(); + + let recipient_ua = clients.get_recipient_address("unified").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let start_height: u64 = 100; + let end_height: u64 = 106; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let state_service_get_block_range = state_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + // check that the block range is the same + assert_eq!(fetch_service_get_block_range, state_service_get_block_range); + + let compact_block = state_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + // the compact block has 3 transactions + assert_eq!(compact_block.vtx.len(), 3); + + let deshielding_tx = compact_block.vtx.first().unwrap(); + + dbg!("deshielding TX"); + + dbg!(deshielding_tx); + + assert_eq!(deshielding_tx.index, 1); + // tranparent data should not be present when no pool types are requested + assert!( + !deshielding_tx.vin.is_empty(), + "transparent data should be present when all pool types are specified in the request." + ); + assert!( + !deshielding_tx.vout.is_empty(), + "transparent data should not be present when no pool types are specified in the request." + ); + + let sapling_tx = compact_block.vtx[1].clone(); + assert_eq!(sapling_tx.index, 2); + + assert!( + !sapling_tx.spends.is_empty(), + "sapling data should be present when all pool types are specified in the request." + ); + assert!( + !sapling_tx.outputs.is_empty(), + "sapling data should be present when all pool types are specified in the request." + ); + + let sapling_tx = compact_block.vtx[1].clone(); + assert_eq!(sapling_tx.index, 2); + + assert!( + !sapling_tx.spends.is_empty(), + "sapling data should be present when all pool types are specified in the request." + ); + assert!( + !sapling_tx.outputs.is_empty(), + "sapling data should be present when all pool types are specified in the request." + ); + + test_manager.close().await; +} async fn state_service_z_get_treestate(validator: &ValidatorKind) { let ( @@ -1807,7 +2051,12 @@ mod zebra { pub(crate) async fn get_block_range_default_request_returns_no_t_data_regtest() { state_service_get_block_range_returns_default_pools::(&ValidatorKind::Zebrad).await; } - + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn get_block_range_default_request_returns_all_pools_regtest() { + state_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad).await; + } + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn subtrees_by_index_regtest() { state_service_z_get_subtrees_by_index::(&ValidatorKind::Zebrad).await; @@ -2334,9 +2583,6 @@ mod zebra { } } - - - #[tokio::test(flavor = "multi_thread")] async fn get_block_range_full() { get_block_range_helper(false).await; diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 9e699cd48..29ecff965 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -71,23 +71,13 @@ impl ValidatedBlockRangeRequest { request: &BlockRange, ) -> Result { let start = match &request.start { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(GetBlockRangeError::StartHeightOutOfRange); - } - }, + Some(block_id) => block_id.height, None => { return Err(GetBlockRangeError::NoStartHeightProvided); } }; let end = match &request.end { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(GetBlockRangeError::EndHeightOutOfRange); - } - }, + Some(block_id) => block_id.height, None => { return Err(GetBlockRangeError::NoEndHeightProvided); } diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 79a7a49d7..7b8b3c462 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -60,7 +60,7 @@ use crate::{ indexer::{ handle_raw_transaction, IndexerSubscriber, LightWalletIndexer, ZcashIndexer, ZcashService, }, - local_cache::{BlockCache, BlockCacheSubscriber}, + local_cache::{compact_block_with_pool_types, BlockCache, BlockCacheSubscriber}, status::StatusType, stream::{ AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, @@ -824,7 +824,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { let start = validated_request.start(); let end = validated_request.end(); - let chain_height = self.block_cache.get_chain_height().await?.0; + let chain_height = self.block_cache.get_chain_height().await?.0 as u64; let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); @@ -839,7 +839,8 @@ impl LightWalletIndexer for FetchServiceSubscriber { match fetch_service_clone.block_cache.get_compact_block( height.to_string(), ).await { - Ok(block) => { + Ok(mut block) => { + block = compact_block_with_pool_types(block, validated_request.pool_types()); if channel_tx.send(Ok(block)).await.is_err() { break; } @@ -911,7 +912,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { let start = validated_request.start(); let end = validated_request.end(); - let chain_height = self.block_cache.get_chain_height().await?.0; + let chain_height = self.block_cache.get_chain_height().await?.0 as u64; let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index c47046885..d64622a63 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -567,7 +567,7 @@ impl StateServiceSubscriber { let start = validated_request.start(); let end = validated_request.end(); - let chain_height = self.block_cache.get_chain_height().await?.0; + let chain_height: u64 = self.block_cache.get_chain_height().await?.0 as u64; let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); @@ -587,7 +587,7 @@ impl StateServiceSubscriber { ) } }?; - + // FIX: find out why there's repeated code fetching the chain tip and then the rest tokio::spawn(async move { let timeout = timeout( time::Duration::from_secs((service_timeout * 4) as u64), @@ -599,10 +599,12 @@ impl StateServiceSubscriber { .await { Ok(mut block) => { - if trim_non_nullifier { - block = compact_block_to_nullifiers(block); - } - Ok(block) + if trim_non_nullifier { + block = compact_block_to_nullifiers(block); + } else { + block = compact_block_with_pool_types(block, pool_types.clone()); + } + Ok(block) } Err(e) => { if end >= chain_height { diff --git a/zaino-state/src/local_cache.rs b/zaino-state/src/local_cache.rs index 5fcd9c5c7..76ec4b57c 100644 --- a/zaino-state/src/local_cache.rs +++ b/zaino-state/src/local_cache.rs @@ -384,29 +384,27 @@ pub(crate) fn compact_block_with_pool_types( pool_types: Vec, ) -> CompactBlock { if pool_types.is_empty() { + for compact_tx in &mut block.vtx { + // strip out transparent inputs if not Requested + compact_tx.vin.clear(); + compact_tx.vout.clear(); + } + } else { for compact_tx in &mut block.vtx { // strip out transparent inputs if not Requested if !pool_types.contains(&PoolType::Transparent) { - compact_tx.vin = Vec::new(); - compact_tx.vout = Vec::new(); + compact_tx.vin.clear(); + compact_tx.vout.clear(); + } + // strip out sapling if not requested + if !pool_types.contains(&PoolType::Sapling) { + compact_tx.spends.clear(); + compact_tx.outputs.clear(); + } + // strip out orchard if not requested + if !pool_types.contains(&PoolType::Orchard) { + compact_tx.actions.clear(); } - } - } - - for compact_tx in &mut block.vtx { - // strip out transparent inputs if not Requested - if !pool_types.contains(&PoolType::Transparent) { - compact_tx.vin = Vec::new(); - compact_tx.vout = Vec::new(); - } - // strip out sapling if not requested - if !pool_types.contains(&PoolType::Sapling) { - compact_tx.spends = Vec::new(); - compact_tx.outputs = Vec::new(); - } - // strip out orchard if not requested - if !pool_types.contains(&PoolType::Orchard) { - compact_tx.actions = Vec::new(); } } From ccca705ea15c6c29f2083e00e5958a304591f251 Mon Sep 17 00:00:00 2001 From: Pacu Date: Thu, 27 Nov 2025 20:38:37 -0300 Subject: [PATCH 075/114] Fix state_service_get_block_range_returns_all_pools test --- integration-tests/tests/state_service.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 1a8d3cc03..76287002e 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -922,16 +922,8 @@ async fn state_service_get_block_range_returns_all_pools(valida let deshielding_tx = compact_block.vtx.first().unwrap(); - dbg!("deshielding TX"); - - dbg!(deshielding_tx); - assert_eq!(deshielding_tx.index, 1); // tranparent data should not be present when no pool types are requested - assert!( - !deshielding_tx.vin.is_empty(), - "transparent data should be present when all pool types are specified in the request." - ); assert!( !deshielding_tx.vout.is_empty(), "transparent data should not be present when no pool types are specified in the request." From d6efb828c9276ca4640b494b3d4c0f7e0fb52e08 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Tue, 2 Dec 2025 19:43:41 -0300 Subject: [PATCH 076/114] Fix get_block_range_default_request_returns_all_pools_regtest cargo fmt cargo clippy --fix --- integration-tests/tests/state_service.rs | 56 +++++++++++++----------- zaino-proto/src/proto/utils.rs | 14 +++--- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 76287002e..a412b493b 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -835,25 +835,29 @@ async fn state_service_get_block_range_returns_all_pools(valida }; let recipient_transparent = clients.get_recipient_address("transparent").await; - from_inputs::quick_send( + let deshielding_txid = from_inputs::quick_send( &mut clients.faucet, vec![(&recipient_transparent, 250_000, None)], ) .await - .unwrap(); + .unwrap() + .head; let recipient_sapling = clients.get_recipient_address("sapling").await; - from_inputs::quick_send( + let sapling_txid = from_inputs::quick_send( &mut clients.faucet, vec![(&recipient_sapling, 250_000, None)], ) .await - .unwrap(); + .unwrap() + .head; let recipient_ua = clients.get_recipient_address("unified").await; - from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) - .await - .unwrap(); + let orchard_txid = + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap() + .head; generate_blocks_and_poll_all_chain_indexes( 1, @@ -920,37 +924,39 @@ async fn state_service_get_block_range_returns_all_pools(valida // the compact block has 3 transactions assert_eq!(compact_block.vtx.len(), 3); - let deshielding_tx = compact_block.vtx.first().unwrap(); + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let deshielding_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == deshielding_txid.as_ref().to_vec()) + .unwrap(); - assert_eq!(deshielding_tx.index, 1); - // tranparent data should not be present when no pool types are requested assert!( !deshielding_tx.vout.is_empty(), - "transparent data should not be present when no pool types are specified in the request." + "transparent data should be present when transaparent pool type is specified in the request." ); - let sapling_tx = compact_block.vtx[1].clone(); - assert_eq!(sapling_tx.index, 2); + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let sapling_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == sapling_txid.as_ref().to_vec()) + .unwrap(); - assert!( - !sapling_tx.spends.is_empty(), - "sapling data should be present when all pool types are specified in the request." - ); assert!( !sapling_tx.outputs.is_empty(), "sapling data should be present when all pool types are specified in the request." ); - let sapling_tx = compact_block.vtx[1].clone(); - assert_eq!(sapling_tx.index, 2); + let orchard_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == orchard_txid.as_ref().to_vec()) + .unwrap(); assert!( - !sapling_tx.spends.is_empty(), - "sapling data should be present when all pool types are specified in the request." - ); - assert!( - !sapling_tx.outputs.is_empty(), - "sapling data should be present when all pool types are specified in the request." + !orchard_tx.actions.is_empty(), + "orchard data should be present when all pool types are specified in the request." ); test_manager.close().await; diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 29ecff965..e341254ac 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -84,12 +84,12 @@ impl ValidatedBlockRangeRequest { }; let pool_types = pool_types_from_vector(&request.pool_types) - .map_err(|e| GetBlockRangeError::PoolTypeArgumentError(e))?; + .map_err(GetBlockRangeError::PoolTypeArgumentError)?; Ok(ValidatedBlockRangeRequest { - start: start, - end: end, - pool_types: pool_types, + start, + end, + pool_types, }) } @@ -110,11 +110,7 @@ impl ValidatedBlockRangeRequest { /// checks whether this request is specified in reversed order pub fn is_reverse_ordered(&self) -> bool { - if self.start > self.end { - true - } else { - false - } + self.start > self.end } /// Reverses the order of this request From 7ce0dc193c0de6dbd6991b6e539cc43d5b13251d Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Tue, 2 Dec 2025 20:59:56 -0300 Subject: [PATCH 077/114] Implement transparent data tests for fetch service cargo fmt & cargo clippy --- integration-tests/tests/fetch_service.rs | 162 ++++++++++++++++++++++- 1 file changed, 156 insertions(+), 6 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index dad003209..bd49e7ddb 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -1041,11 +1041,7 @@ async fn fetch_service_get_block_range(validator: &ValidatorKin height: 10, hash: Vec::new(), }), - pool_types: vec![ - PoolType::Transparent as i32, - PoolType::Sapling as i32, - PoolType::Orchard as i32, - ], + pool_types: vec![], }; let fetch_service_stream = fetch_service_subscriber @@ -1064,6 +1060,150 @@ async fn fetch_service_get_block_range(validator: &ValidatorKin test_manager.close().await; } +#[allow(deprecated)] +async fn fetch_service_get_block_range_returns_all_pools(validator: &ValidatorKind) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, false) + .await + .unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + } + } else { + // zcashd + test_manager + .generate_blocks_and_poll_indexer(11, &fetch_service_subscriber) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + } + } + + let recipient_transparent = clients.get_recipient_address("transparent").await; + let deshielding_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_transparent, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_sapling = clients.get_recipient_address("sapling").await; + let sapling_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_sapling, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_ua = clients.get_recipient_address("unified").await; + let orchard_txid = + zaino_testutils::from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap() + .head; + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let start_height: u64 = 100; + let end_height: u64 = 106; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let compact_block = fetch_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + // the compact block has 3 transactions + assert_eq!(compact_block.vtx.len(), 3); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let deshielding_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == deshielding_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !deshielding_tx.vout.is_empty(), + "transparent data should be present when transaparent pool type is specified in the request." + ); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let sapling_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == sapling_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !sapling_tx.outputs.is_empty(), + "sapling data should be present when all pool types are specified in the request." + ); + + let orchard_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == orchard_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !orchard_tx.actions.is_empty(), + "orchard data should be present when all pool types are specified in the request." + ); + + test_manager.close().await; +} + #[allow(deprecated)] async fn fetch_service_get_block_range_no_pools_returs_sapling_orchard(validator: &ValidatorKind) { let mut test_manager = @@ -2043,6 +2183,11 @@ mod zcashd { fetch_service_get_block_range_no_pools_returs_sapling_orchard::(&ValidatorKind::Zcashd).await; } + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_returns_all_blocks() { + fetch_service_get_block_range_returns_all_pools::(&ValidatorKind::Zcashd).await; + } + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_nullifiers() { fetch_service_get_block_range_nullifiers::(&ValidatorKind::Zcashd).await; @@ -2265,10 +2410,15 @@ mod zebrad { } #[tokio::test(flavor = "multi_thread")] - pub(crate) async fn block_range() { + pub(crate) async fn block_range_returns_default_pools() { fetch_service_get_block_range::(&ValidatorKind::Zebrad).await; } + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_returns_all_pools_when_requested() { + fetch_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad).await; + } + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_nullifiers() { fetch_service_get_block_range_nullifiers::(&ValidatorKind::Zebrad).await; From a83e213eac742c8815a609558ac348f91e810839 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Tue, 30 Dec 2025 18:00:48 -0300 Subject: [PATCH 078/114] Fix test wallet client initialization failure --- integration-tests/tests/fetch_service.rs | 181 ++++++++++++++++------- 1 file changed, 131 insertions(+), 50 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index bd49e7ddb..404bef7e7 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -1062,8 +1062,9 @@ async fn fetch_service_get_block_range(validator: &ValidatorKin #[allow(deprecated)] async fn fetch_service_get_block_range_returns_all_pools(validator: &ValidatorKind) { + let mut test_manager = - TestManager::::launch(validator, None, None, None, true, false, false) + TestManager::::launch(validator, None, None, None, true, false, true) .await .unwrap(); @@ -1173,6 +1174,8 @@ async fn fetch_service_get_block_range_returns_all_pools(valida .find(|tx| tx.txid == deshielding_txid.as_ref().to_vec()) .unwrap(); + dbg!(deshielding_tx); + assert!( !deshielding_tx.vout.is_empty(), "transparent data should be present when transaparent pool type is specified in the request." @@ -1205,59 +1208,142 @@ async fn fetch_service_get_block_range_returns_all_pools(valida } #[allow(deprecated)] -async fn fetch_service_get_block_range_no_pools_returs_sapling_orchard(validator: &ValidatorKind) { +async fn fetch_service_get_block_range_no_pools_returns_sapling_orchard(validator: &ValidatorKind) { + let mut test_manager = - TestManager::::launch(validator, None, None, None, true, false, false) + TestManager::::launch(validator, None, None, None, true, false, true) .await .unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + } + } else { + // zcashd + test_manager + .generate_blocks_and_poll_indexer(11, &fetch_service_subscriber) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + } + } + + let recipient_transparent = clients.get_recipient_address("transparent").await; + let deshielding_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_transparent, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_sapling = clients.get_recipient_address("sapling").await; + let sapling_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_sapling, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_ua = clients.get_recipient_address("unified").await; + let orchard_txid = + zaino_testutils::from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap() + .head; + test_manager - .generate_blocks_and_poll_indexer(10, &fetch_service_subscriber) + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) .await; - let block_range = BlockRange { - start: Some(BlockId { - height: 1, - hash: Vec::new(), - }), - end: Some(BlockId { - height: 10, - hash: Vec::new(), - }), - pool_types: vec![], - }; + let start_height: u64 = 100; + let end_height: u64 = 106; - let fetch_service_stream = fetch_service_subscriber - .get_block_range(block_range.clone()) + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![], + }) .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let compact_block = fetch_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + // the compact block has 3 transactions + assert_eq!(compact_block.vtx.len(), 3); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let deshielding_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == deshielding_txid.as_ref().to_vec()) .unwrap(); - let fetch_service_compact_blocks: Vec<_> = fetch_service_stream.collect().await; - let fetch_blocks: Vec<_> = fetch_service_compact_blocks - .into_iter() - .filter_map(|result| result.ok()) - .collect(); + assert!( + deshielding_tx.vout.is_empty(), + "transparent data should not be present when transaparent pool type is specified in the request." + ); - // no transparent data on outputs - for compact_block in fetch_blocks { - let first_transaction = compact_block.vtx.first().unwrap(); + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let sapling_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == sapling_txid.as_ref().to_vec()) + .unwrap(); - // no transparent data for coinbase transaction - assert!(first_transaction.vin.is_empty()); + assert!( + !sapling_tx.outputs.is_empty(), + "sapling data should be present when default pool types are specified in the request." + ); - for transaction in &compact_block.vtx[1..] { - assert!( - transaction.vin.is_empty(), - "vin should be empty if transparent pool type not requested" - ); - assert!( - transaction.vout.is_empty(), - "vout should be empty if transparent pool type not requested" - ); - } - } + let orchard_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == orchard_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !orchard_tx.actions.is_empty(), + "orchard data should be present when default pool types are specified in the request." + ); test_manager.close().await; } @@ -2180,7 +2266,7 @@ mod zcashd { #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { - fetch_service_get_block_range_no_pools_returs_sapling_orchard::(&ValidatorKind::Zcashd).await; + fetch_service_get_block_range_no_pools_returns_sapling_orchard::(&ValidatorKind::Zcashd).await; } #[tokio::test(flavor = "multi_thread")] @@ -2364,9 +2450,14 @@ mod zebrad { fetch_service_get_block::(&ValidatorKind::Zebrad).await; } + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_returns_all_pools_when_requested() { + fetch_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad).await; + } + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { - fetch_service_get_block_range_no_pools_returs_sapling_orchard::(&ValidatorKind::Zebrad).await; + fetch_service_get_block_range_no_pools_returns_sapling_orchard::(&ValidatorKind::Zebrad).await; } #[tokio::test(flavor = "multi_thread")] @@ -2409,16 +2500,6 @@ mod zebrad { fetch_service_get_block_nullifiers::(&ValidatorKind::Zebrad).await; } - #[tokio::test(flavor = "multi_thread")] - pub(crate) async fn block_range_returns_default_pools() { - fetch_service_get_block_range::(&ValidatorKind::Zebrad).await; - } - - #[tokio::test(flavor = "multi_thread")] - pub(crate) async fn block_range_returns_all_pools_when_requested() { - fetch_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad).await; - } - #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_nullifiers() { fetch_service_get_block_range_nullifiers::(&ValidatorKind::Zebrad).await; From 89abb7822e50a4dd599cbdaafa0f5d173908e3a0 Mon Sep 17 00:00:00 2001 From: Francisco Gindre Date: Tue, 30 Dec 2025 18:46:31 -0300 Subject: [PATCH 079/114] Add documentation on how to update zaino-proto's git subtree --- zaino-proto/README.md | 65 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 zaino-proto/README.md diff --git a/zaino-proto/README.md b/zaino-proto/README.md new file mode 100644 index 000000000..780175b6c --- /dev/null +++ b/zaino-proto/README.md @@ -0,0 +1,65 @@ +# Zaino Proto files module + +This module encapsulates the lightclient-protocol functionality and imports the canonicals files +using `git subtree`. + + +Below you can see the structure of the module + +```` +zaino-proto +├── build.rs +├── build.rs.bak +├── Cargo.toml +├── CHANGELOG.md +├── lightwallet-protocol <=== this is the git subtree +│   ├── CHANGELOG.md +│   ├── LICENSE +│   └── walletrpc +│   ├── compact_formats.proto +│   └── service.proto +├── proto +│   ├── compact_formats.proto -> ../lightwallet-protocol/walletrpc/compact_formats.proto +│   ├── proposal.proto +│   └── service.proto -> ../lightwallet-protocol/walletrpc/service.proto +└── src + ├── lib.rs + ├── proto + │   ├── compact_formats.rs + │   ├── proposal.rs + │   ├── service.rs + │   └── utils.rs + └── proto.rs +``` + +Handling maintaining the git subtree history has its own tricks. We recommend developers updating +zaino proto that they are wary of these shortcomings. + +If you need to update the canonical files to for your feature, maintain a linear and simple git +commit history in your PR. + +We recommend that PRs that change the reference to the git subtree do so in this fashion. + +for example: +============ + +when doing +``` +git subtree --prefix=zaino-proto/lightwallet-protocol pull git@github.com:zcash/lightwallet-protocol.git v0.4.0 --squash +``` + +your branch's commits must be sequenced like this. + +``` + your-branch-name + - commit applying the git subtree command + - commit merging the canonical files + - commits fixing compiler errors + - commit indicating the version adopted in the CHANGELOG.md of zaino-proto +``` + +If you are developing the `lightclient-protocol` and adopting it on Zaino, it is recommended that +you don't do subsequent `git subtree` to revisions and always rebase against the latest latest version +that you will be using in your latest commit to avoid rebasing issues and also keeping a coherent +git commit history for when your branch merges to `dev`. + From b895ad13d211227cdaa53f9e87574c74ca0b0b00 Mon Sep 17 00:00:00 2001 From: pacu Date: Thu, 15 Jan 2026 18:20:50 -0800 Subject: [PATCH 080/114] Clean up tests a bit and add docs --- integration-tests/tests/state_service.rs | 67 ++++++++---------------- 1 file changed, 21 insertions(+), 46 deletions(-) diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index a412b493b..33f740710 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -636,6 +636,9 @@ async fn state_service_get_raw_mempool_testnet() { test_manager.close().await; } +/// Tests whether that calls to `get_block_range` with the same block range are the same when +/// specifying the default `PoolType`s and passing and empty Vec to verify that the method falls +/// back to the default pools when these are not explicitly specified. async fn state_service_get_block_range_returns_default_pools(validator: &ValidatorKind) { let ( mut test_manager, @@ -687,8 +690,7 @@ async fn state_service_get_block_range_returns_default_pools(val let start_height: u64 = 100; let end_height: u64 = 103; - let fetch_service_get_block_range = fetch_service_subscriber - .get_block_range(BlockRange { + let default_pools_request = BlockRange { start: Some(BlockId { height: start_height, hash: vec![], @@ -698,15 +700,17 @@ async fn state_service_get_block_range_returns_default_pools(val hash: vec![], }), pool_types: vec![], - }) + }; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(default_pools_request.clone()) .await .unwrap() .map(Result::unwrap) .collect::>() .await; - let fetch_service_get_block_range_specifying_pools = fetch_service_subscriber - .get_block_range(BlockRange { + let explicit_default_pool_request = BlockRange { start: Some(BlockId { height: start_height, hash: vec![], @@ -716,7 +720,10 @@ async fn state_service_get_block_range_returns_default_pools(val hash: vec![], }), pool_types: vec![PoolType::Sapling as i32, PoolType::Orchard as i32], - }) + }; + + let fetch_service_get_block_range_specifying_pools = fetch_service_subscriber + .get_block_range(explicit_default_pool_request.clone()) .await .unwrap() .map(Result::unwrap) @@ -729,17 +736,7 @@ async fn state_service_get_block_range_returns_default_pools(val ); let state_service_get_block_range_specifying_pools = state_service_subscriber - .get_block_range(BlockRange { - start: Some(BlockId { - height: start_height, - hash: vec![], - }), - end: Some(BlockId { - height: end_height, - hash: vec![], - }), - pool_types: vec![PoolType::Sapling as i32, PoolType::Orchard as i32], - }) + .get_block_range(explicit_default_pool_request) .await .unwrap() .map(Result::unwrap) @@ -747,17 +744,7 @@ async fn state_service_get_block_range_returns_default_pools(val .await; let state_service_get_block_range = state_service_subscriber - .get_block_range(BlockRange { - start: Some(BlockId { - height: start_height, - hash: vec![], - }), - end: Some(BlockId { - height: end_height, - hash: vec![], - }), - pool_types: vec![], - }) + .get_block_range(default_pools_request) .await .unwrap() .map(Result::unwrap) @@ -870,8 +857,7 @@ async fn state_service_get_block_range_returns_all_pools(valida let start_height: u64 = 100; let end_height: u64 = 106; - let fetch_service_get_block_range = fetch_service_subscriber - .get_block_range(BlockRange { + let block_range = BlockRange { start: Some(BlockId { height: start_height, hash: vec![], @@ -885,7 +871,10 @@ async fn state_service_get_block_range_returns_all_pools(valida PoolType::Sapling as i32, PoolType::Orchard as i32, ], - }) + }; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(block_range.clone()) .await .unwrap() .map(Result::unwrap) @@ -893,21 +882,7 @@ async fn state_service_get_block_range_returns_all_pools(valida .await; let state_service_get_block_range = state_service_subscriber - .get_block_range(BlockRange { - start: Some(BlockId { - height: start_height, - hash: vec![], - }), - end: Some(BlockId { - height: end_height, - hash: vec![], - }), - pool_types: vec![ - PoolType::Transparent as i32, - PoolType::Sapling as i32, - PoolType::Orchard as i32, - ], - }) + .get_block_range(block_range) .await .unwrap() .map(Result::unwrap) From 130ab236fff785bcb1876d341e6c6f33686ab4f9 Mon Sep 17 00:00:00 2001 From: pacu Date: Tue, 20 Jan 2026 20:14:03 -0300 Subject: [PATCH 081/114] Add convenience methods to `PoolTypeFilter` and tests --- zaino-proto/src/proto/utils.rs | 66 ++++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 19 deletions(-) diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index e341254ac..3f96aef98 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -1,6 +1,6 @@ use crate::proto::service::{BlockRange, PoolType}; -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] /// Errors that can arise when mapping `PoolType` from an `i32` value. pub enum PoolTypeError { /// Pool Type value was map to the enum `PoolType::Invalid`. @@ -9,8 +9,9 @@ pub enum PoolTypeError { UnknownPoolType(i32), } -// Converts a vector of pool_types (i32) into its rich-type representation -// Returns `None` when invalid `pool_types` are found +/// Converts a vector of pool_types (i32) into its rich-type representation +/// Returns `PoolTypeError::InvalidPoolType` when invalid `pool_types` are found +/// or `PoolTypeError::UnknownPoolType` if unknown ones are found. pub fn pool_types_from_vector(pool_types: &[i32]) -> Result, PoolTypeError> { let pools = if pool_types.is_empty() { vec![PoolType::Sapling, PoolType::Orchard] @@ -135,23 +136,42 @@ impl PoolTypeFilter { } } + /// A PoolType Filter that will include all existing pool types. + pub fn includes_all() -> Self { + PoolTypeFilter { + include_transparent: true, + include_sapling: true, + include_orchard: true + } + } + + /// create a `PoolTypeFilter` from a vector of raw i32 `PoolType`s + /// If the vector is empty it will return `Self::default()`. + /// If the vector contains `PoolType::Invalid` or the vector contains more than 3 elements + /// returns `PoolTypeError::InvalidPoolType` + pub fn new_from_slice(pool_types: &[i32]) -> Result { + let pool_types = pool_types_from_vector(pool_types)?; + + Self::new_from_pool_types(&pool_types) + } + /// create a `PoolTypeFilter` from a vector of `PoolType` /// If the vector is empty it will return `Self::default()`. - /// If the vector contains `PoolType::Invalid`, returns `None` - /// If the vector contains more than 3 elements, returns `None` - pub fn new_from_pool_types(pool_types: &Vec) -> Option { + /// If the vector contains `PoolType::Invalid` or the vector contains more than 3 elements + /// returns `PoolTypeError::InvalidPoolType` + pub fn new_from_pool_types(pool_types: &Vec) -> Result { if pool_types.len() > PoolType::Orchard as usize { - return None; + return Err(PoolTypeError::InvalidPoolType); } if pool_types.is_empty() { - Some(Self::default()) + Ok(Self::default()) } else { let mut filter = PoolTypeFilter::empty(); for pool_type in pool_types { match pool_type { - PoolType::Invalid => return None, + PoolType::Invalid => return Err(PoolTypeError::InvalidPoolType), PoolType::Transparent => filter.include_transparent = true, PoolType::Sapling => filter.include_sapling = true, PoolType::Orchard => filter.include_orchard = true, @@ -160,9 +180,9 @@ impl PoolTypeFilter { // guard against returning an invalid state this shouls never happen. if filter.is_empty() { - return Some(Self::default()); + return Ok(Self::default()); } else { - return Some(filter); + return Ok(filter); } } } @@ -213,10 +233,10 @@ impl PoolTypeFilter { #[cfg(test)] mod test { - use crate::proto::{service::PoolType, utils::PoolTypeFilter}; + use crate::proto::{service::PoolType, utils::{PoolTypeError, PoolTypeFilter}}; #[test] - fn test_pool_type_filter_none_when_invalid() { + fn test_pool_type_filter_fails_when_invalid() { let pools = [ PoolType::Transparent, PoolType::Sapling, @@ -225,11 +245,11 @@ mod test { ] .to_vec(); - assert_eq!(PoolTypeFilter::new_from_pool_types(&pools), None); + assert_eq!(PoolTypeFilter::new_from_pool_types(&pools), Err(PoolTypeError::InvalidPoolType)); } #[test] - fn test_pool_type_filter_none_when_too_many_items() { + fn test_pool_type_filter_fails_when_too_many_items() { let pools = [ PoolType::Transparent, PoolType::Sapling, @@ -238,7 +258,7 @@ mod test { ] .to_vec(); - assert_eq!(PoolTypeFilter::new_from_pool_types(&pools), None); + assert_eq!(PoolTypeFilter::new_from_pool_types(&pools), Err(PoolTypeError::InvalidPoolType)); } #[test] @@ -247,7 +267,7 @@ mod test { assert_eq!( PoolTypeFilter::new_from_pool_types(&pools), - Some(PoolTypeFilter::from_checked_parts(true, true, false)) + Ok(PoolTypeFilter::from_checked_parts(true, true, false)) ); } @@ -257,7 +277,7 @@ mod test { assert_eq!( PoolTypeFilter::new_from_pool_types(&pools), - Some(PoolTypeFilter::from_checked_parts(true, false, false)) + Ok(PoolTypeFilter::from_checked_parts(true, false, false)) ); } @@ -265,7 +285,15 @@ mod test { fn test_pool_type_filter_default() { assert_eq!( PoolTypeFilter::new_from_pool_types(&vec![]), - Some(PoolTypeFilter::default()) + Ok(PoolTypeFilter::default()) + ); + } + + #[test] + fn test_pool_type_filter_includes_all() { + assert_eq!( + PoolTypeFilter::from_checked_parts(true, true, true), + PoolTypeFilter::includes_all() ); } } From f8e4160826a25d35ab74ccb16c80c7f9476ddd63 Mon Sep 17 00:00:00 2001 From: pacu Date: Tue, 20 Jan 2026 20:16:26 -0300 Subject: [PATCH 082/114] Ensure that all Full to Compact conversions account for PoolTypes --- zaino-fetch/src/chain/block.rs | 4 ++-- zaino-fetch/src/chain/transaction.rs | 4 ++-- zaino-state/src/backends/state.rs | 20 ++++++++++++++++++-- zaino-state/src/local_cache.rs | 5 +++-- 4 files changed, 25 insertions(+), 8 deletions(-) diff --git a/zaino-fetch/src/chain/block.rs b/zaino-fetch/src/chain/block.rs index 78b729cd9..c98ac1443 100644 --- a/zaino-fetch/src/chain/block.rs +++ b/zaino-fetch/src/chain/block.rs @@ -365,7 +365,7 @@ impl FullBlock { return Err(ParseError::InvalidData(format!( "Error decoding full block - {} bytes of Remaining data. Compact Block Created: ({:?})", remaining_data.len(), - full_block.into_compact_block(0, 0, PoolTypeFilter::default()) + full_block.into_compact_block(0, 0, PoolTypeFilter::includes_all()) ))); } Ok(full_block) @@ -386,7 +386,7 @@ impl FullBlock { .enumerate() .filter_map(|(index, tx)| { if tx.has_shielded_elements() { - Some(tx.to_compact_tx(Some(index as u64), pool_types.clone())) + Some(tx.to_compact_tx(Some(index as u64), &pool_types)) } else { None } diff --git a/zaino-fetch/src/chain/transaction.rs b/zaino-fetch/src/chain/transaction.rs index 81d6e1ced..c0c0725a8 100644 --- a/zaino-fetch/src/chain/transaction.rs +++ b/zaino-fetch/src/chain/transaction.rs @@ -1134,7 +1134,7 @@ impl FullTransaction { /// Converts a zcash full transaction into a compact transaction. #[deprecated] pub fn to_compact(self, index: u64) -> Result { - self.to_compact_tx(Some(index), PoolTypeFilter::default()) + self.to_compact_tx(Some(index), &PoolTypeFilter::default()) } /// Converts a Zcash Transaction into a `CompactTx` of the Light wallet protocol. @@ -1144,7 +1144,7 @@ impl FullTransaction { pub fn to_compact_tx( self, index: Option, - pool_types: PoolTypeFilter, + pool_types: &PoolTypeFilter, ) -> Result { let hash = self.tx_id(); diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index d64622a63..41154bd0d 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -47,7 +47,7 @@ use zaino_proto::proto::{ GetAddressUtxosReplyList, GetMempoolTxRequest, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, - utils::{pool_types_from_vector, PoolTypeError, ValidatedBlockRangeRequest}, + utils::{PoolTypeError, PoolTypeFilter, ValidatedBlockRangeRequest, pool_types_from_vector}, }; use zcash_protocol::consensus::NetworkType; @@ -2240,6 +2240,22 @@ impl LightWalletIndexer for StateServiceSubscriber { exclude_txids.push(hex_string_txid); } + let pool_types = match PoolTypeFilter::new_from_slice(&request.pool_types) { + Ok(pool_type_filter) => pool_type_filter, + Err(PoolTypeError::InvalidPoolType) => return Err(StateServiceError::TonicStatusError( + tonic::Status::invalid_argument(format!( + "Error: An invalid `PoolType' was found" + )), + )), + Err(PoolTypeError::UnknownPoolType(unknown_pool_type)) => return Err(StateServiceError::TonicStatusError( + tonic::Status::invalid_argument(format!( + "Error: Unknown `PoolType' {} was found", + unknown_pool_type + )), + )) + }; + + let mempool = self.mempool.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); @@ -2277,7 +2293,7 @@ impl LightWalletIndexer for StateServiceSubscriber { .send( transaction .1 - .to_compact(0) + .to_compact_tx(None, &pool_types) .map_err(|e| tonic::Status::unknown(e.to_string())), ) .await diff --git a/zaino-state/src/local_cache.rs b/zaino-state/src/local_cache.rs index 76ec4b57c..d2a2d0fb5 100644 --- a/zaino-state/src/local_cache.rs +++ b/zaino-state/src/local_cache.rs @@ -281,9 +281,10 @@ async fn try_state_path( block_hex.as_ref(), Some(display_txids_to_server(txid_strings)?), )? - .into_compact( + .into_compact_block( u32::try_from(trees.sapling())?, u32::try_from(trees.orchard())?, + PoolTypeFilter::includes_all() )?, )) } @@ -347,7 +348,7 @@ async fn try_fetcher_path( type_name::(), )) })?, - PoolTypeFilter::default(), + PoolTypeFilter::includes_all(), ) .map_err(|e| { RpcRequestError::Transport(TransportError::BadNodeData( From 13bc1e6a95a4c70f4e0ea47e66a095e005bccc64 Mon Sep 17 00:00:00 2001 From: pacu Date: Tue, 20 Jan 2026 20:16:58 -0300 Subject: [PATCH 083/114] Fix transparent data tests --- integration-tests/tests/fetch_service.rs | 44 +++++++++++++----------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index 404bef7e7..0333113c5 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -1093,17 +1093,10 @@ async fn fetch_service_get_block_range_returns_all_pools(valida } else { // zcashd test_manager - .generate_blocks_and_poll_indexer(11, &fetch_service_subscriber) + .generate_blocks_and_poll_indexer(14, &fetch_service_subscriber) .await; clients.faucet.sync_and_await().await.unwrap(); - for _ in 1..4 { - clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); - test_manager - .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) - .await; - clients.faucet.sync_and_await().await.unwrap(); - } } let recipient_transparent = clients.get_recipient_address("transparent").await; @@ -1135,8 +1128,16 @@ async fn fetch_service_get_block_range_returns_all_pools(valida .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) .await; - let start_height: u64 = 100; - let end_height: u64 = 106; + let start_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + 100 + } else { + 1 + }; + let end_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + 106 + } else { + 6 + }; let fetch_service_get_block_range = fetch_service_subscriber .get_block_range(BlockRange { @@ -1240,17 +1241,10 @@ async fn fetch_service_get_block_range_no_pools_returns_sapling_orchard(&ValidatorKind::Zcashd).await; } From 6aa8866d1067b75dc10fed18758de4dfa01f2c46 Mon Sep 17 00:00:00 2001 From: Larry Ruane Date: Mon, 19 Jan 2026 16:48:01 -0500 Subject: [PATCH 084/114] minor fixes (updates) to rpc_api.md --- docs/rpc_api.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/rpc_api.md b/docs/rpc_api.md index 7c73e8dab..1a731448f 100644 --- a/docs/rpc_api.md +++ b/docs/rpc_api.md @@ -1,6 +1,9 @@ # Zaino RPC APIs ## Lightwallet gRPC Services -Zaino Currently Serves the following gRPC services as defined in the [LightWallet Protocol](https://github.com/zcash/librustzcash/blob/main/zcash_client_backend/proto/service.proto): +Zaino Currently Serves the following gRPC services as defined in the [LightWallet Protocol](https://github.com/zcash/lightwallet-protocol/blob/main/walletrpc/service.proto): + +(gRPC service (function) arguments and return values are defined [here](https://github.com/zcash/lightwallet-protocol/blob/main/walletrpc/compact_formats.proto)) + - GetLatestBlock (ChainSpec) returns (BlockID) - GetBlock (BlockID) returns (CompactBlock) - GetBlockNullifiers (BlockID) returns (CompactBlock) @@ -8,7 +11,7 @@ Zaino Currently Serves the following gRPC services as defined in the [LightWalle - GetBlockRangeNullifiers (BlockRange) returns (stream CompactBlock) - GetTransaction (TxFilter) returns (RawTransaction) - SendTransaction (RawTransaction) returns (SendResponse) - - GetTaddressTxids (TransparentAddressBlockFilter) returns (stream RawTransaction) + - GetTaddressTransactions (TransparentAddressBlockFilter) returns (stream RawTransaction) - GetTaddressBalance (AddressList) returns (Balance) - GetTaddressBalanceStream (stream Address) returns (Balance) (**MARKED FOR DEPRECATION**) - GetMempoolTx (Exclude) returns (stream CompactTx) @@ -19,11 +22,8 @@ Zaino Currently Serves the following gRPC services as defined in the [LightWalle - GetAddressUtxos (GetAddressUtxosArg) returns (GetAddressUtxosReplyList) - GetAddressUtxosStream (GetAddressUtxosArg) returns (stream GetAddressUtxosReply) - GetLightdInfo (Empty) returns (LightdInfo) - - Ping (Duration) returns (PingResponse) (**CURRENTLY UNIMPLEMENTED**) - ## Zcash RPC Services Zaino has also committed to taking over responsibility for serving all [Zcash RPC Services](https://zcash.github.io/rpc/) required by non-validator (miner) clients from Zcashd. A full specification of the Zcash RPC services served by Zaino, and their current state of development, can be seen [here](./Zaino-zcash-rpcs.pdf). - From dffd889b797103bd946e023132cc49332c6d7918 Mon Sep 17 00:00:00 2001 From: pacu Date: Wed, 21 Jan 2026 19:17:13 -0300 Subject: [PATCH 085/114] fix test `block_range_no_pool_type_returns_sapling_orchard` for Zcashd --- integration-tests/tests/fetch_service.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index 0333113c5..e010613c2 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -1241,7 +1241,7 @@ async fn fetch_service_get_block_range_no_pools_returns_sapling_orchard Date: Wed, 21 Jan 2026 22:08:38 -0300 Subject: [PATCH 086/114] fix test zcashd::get::block_range_returns_all_pools_when_requested --- integration-tests/tests/fetch_service.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index e010613c2..39da06268 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -1136,7 +1136,7 @@ async fn fetch_service_get_block_range_returns_all_pools(valida let end_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { 106 } else { - 6 + 17 }; let fetch_service_get_block_range = fetch_service_subscriber @@ -1165,8 +1165,13 @@ async fn fetch_service_get_block_range_returns_all_pools(valida assert_eq!(compact_block.height, end_height); - // the compact block has 3 transactions - assert_eq!(compact_block.vtx.len(), 3); + let expected_transaction_count = if matches!(validator, ValidatorKind::Zebrad) { + 3 + } else { + 4 // zcashd uses shielded coinbase which will add an extra compact tx + }; + // the compact block has the right number of transactions + assert_eq!(compact_block.vtx.len(), expected_transaction_count); // transaction order is not guaranteed so it's necessary to look up for them by TXID let deshielding_tx = compact_block From cb025666424895b900a3d715b9d52efa6e4f378d Mon Sep 17 00:00:00 2001 From: pacu Date: Mon, 26 Jan 2026 13:35:46 -0300 Subject: [PATCH 087/114] Implement PoolTypeFilter default() as Default trait impl --- zaino-proto/src/proto/utils.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 3f96aef98..8213fa9af 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -126,16 +126,18 @@ pub struct PoolTypeFilter { include_orchard: bool, } -impl PoolTypeFilter { +impl std::default::Default for PoolTypeFilter { /// By default PoolType includes `Sapling` and `Orchard` pools. - pub fn default() -> Self { + fn default() -> Self { PoolTypeFilter { include_transparent: false, include_sapling: true, include_orchard: true, } } +} +impl PoolTypeFilter { /// A PoolType Filter that will include all existing pool types. pub fn includes_all() -> Self { PoolTypeFilter { From fe26a5fb25b358772241607f20fac78b33ab7de2 Mon Sep 17 00:00:00 2001 From: pacu Date: Mon, 26 Jan 2026 13:43:18 -0300 Subject: [PATCH 088/114] Fix error messages per PR Review comments --- zaino-state/src/error.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zaino-state/src/error.rs b/zaino-state/src/error.rs index 6ffbf8497..0b246e42f 100644 --- a/zaino-state/src/error.rs +++ b/zaino-state/src/error.rs @@ -210,7 +210,7 @@ impl FetchServiceError { } GetBlockRangeError::NoStartHeightProvided => { FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( - "Error: Start height out of range. Failed to convert to u32.", + "Error: No start height given", )) } GetBlockRangeError::EndHeightOutOfRange => { @@ -219,7 +219,7 @@ impl FetchServiceError { )) } GetBlockRangeError::NoEndHeightProvided => FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), + tonic::Status::invalid_argument("Error: No end height given."), ), GetBlockRangeError::PoolTypeArgumentError(_) => FetchServiceError::TonicStatusError( tonic::Status::invalid_argument("Error: invalid pool type"), From 8828ba0cd36448bd061479000e04c7388e428bdf Mon Sep 17 00:00:00 2001 From: pacu Date: Mon, 26 Jan 2026 13:44:33 -0300 Subject: [PATCH 089/114] remove unnecessary .bak file --- zaino-proto/build.rs.bak | 87 ---------------------------------------- 1 file changed, 87 deletions(-) delete mode 100644 zaino-proto/build.rs.bak diff --git a/zaino-proto/build.rs.bak b/zaino-proto/build.rs.bak deleted file mode 100644 index bf72a1d83..000000000 --- a/zaino-proto/build.rs.bak +++ /dev/null @@ -1,87 +0,0 @@ -use std::env; -use std::fs; -use std::io; -use std::path::{Path, PathBuf}; -use std::process::Command; - -const COMPACT_FORMATS_PROTO: &str = "proto/compact_formats.proto"; -const PROPOSAL_PROTO: &str = "proto/proposal.proto"; -const SERVICE_PROTO: &str = "proto/service.proto"; - -fn main() -> io::Result<()> { - // Check and compile proto files if needed - if Path::new(COMPACT_FORMATS_PROTO).exists() - && env::var_os("PROTOC") - .map(PathBuf::from) - .or_else(|| which::which("protoc").ok()) - .is_some() - { - build()?; - } - - Ok(()) -} - -fn build() -> io::Result<()> { - let out: PathBuf = env::var_os("OUT_DIR") - .expect("Cannot find OUT_DIR environment variable") - .into(); - - // Build the compact format types. - tonic_build::compile_protos(COMPACT_FORMATS_PROTO)?; - - // Copy the generated types into the source tree so changes can be committed. - fs::copy( - out.join("cash.z.wallet.sdk.rpc.rs"), - "src/proto/compact_formats.rs", - )?; - - // Build the gRPC types and client. - tonic_build::configure() - .build_server(true) - // .client_mod_attribute( - // "cash.z.wallet.sdk.rpc", - // r#"#[cfg(feature = "lightwalletd-tonic")]"#, - // ) - .extern_path( - ".cash.z.wallet.sdk.rpc.ChainMetadata", - "crate::proto::compact_formats::ChainMetadata", - ) - .extern_path( - ".cash.z.wallet.sdk.rpc.CompactBlock", - "crate::proto::compact_formats::CompactBlock", - ) - .extern_path( - ".cash.z.wallet.sdk.rpc.CompactTx", - "crate::proto::compact_formats::CompactTx", - ) - .extern_path( - ".cash.z.wallet.sdk.rpc.CompactSaplingSpend", - "crate::proto::compact_formats::CompactSaplingSpend", - ) - .extern_path( - ".cash.z.wallet.sdk.rpc.CompactSaplingOutput", - "crate::proto::compact_formats::CompactSaplingOutput", - ) - .extern_path( - ".cash.z.wallet.sdk.rpc.CompactOrchardAction", - "crate::proto::compact_formats::CompactOrchardAction", - ) - .compile(&[SERVICE_PROTO], &["proto/"])?; - - // Build the proposal types. - tonic_build::compile_protos(PROPOSAL_PROTO)?; - - // Copy the generated types into the source tree so changes can be committed. - fs::copy( - out.join("cash.z.wallet.sdk.ffi.rs"), - "src/proto/proposal.rs", - )?; - - // Copy the generated types into the source tree so changes can be committed. The - // file has the same name as for the compact format types because they have the - // same package, but we've set things up so this only contains the service types. - fs::copy(out.join("cash.z.wallet.sdk.rpc.rs"), "src/proto/service.rs")?; - - Ok(()) -} From de596941270b7b8a3db6fec64d25dc4a865ff208 Mon Sep 17 00:00:00 2001 From: pacu Date: Mon, 26 Jan 2026 14:27:24 -0300 Subject: [PATCH 090/114] add `From` trait impl for `FetchServiceError` and `StateServiceError` --- zaino-state/src/backends/fetch.rs | 4 +-- zaino-state/src/backends/state.rs | 2 +- zaino-state/src/error.rs | 41 +++++++++++++++++++++++++------ 3 files changed, 37 insertions(+), 10 deletions(-) diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 7b8b3c462..2813870ad 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -811,7 +811,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { request: BlockRange, ) -> Result { let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) - .map_err(FetchServiceError::from_get_block_change_error)?; + .map_err(FetchServiceError::from)?; // FIXME: this should be changed but this logic is hard to understand and we lack tests. // we will maintain the behaviour with less smelly code @@ -899,7 +899,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { request: BlockRange, ) -> Result { let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) - .map_err(FetchServiceError::from_get_block_change_error)?; + .map_err(FetchServiceError::from)?; // FIXME: this should be changed but this logic is hard to understand and we lack tests. // we will maintain the behaviour with less smelly code diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index 41154bd0d..b9b7244d9 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -554,7 +554,7 @@ impl StateServiceSubscriber { trim_non_nullifier: bool, ) -> Result { let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) - .map_err(|_| StateServiceError::Custom("fixme".to_string()))?; + .map_err(StateServiceError::from)?; // FIXME: this should be changed but this logic is hard to understand and we lack tests. // we will maintain the behaviour with less smelly code diff --git a/zaino-state/src/error.rs b/zaino-state/src/error.rs index 0b246e42f..8ee8268aa 100644 --- a/zaino-state/src/error.rs +++ b/zaino-state/src/error.rs @@ -97,6 +97,33 @@ pub enum StateServiceError { }, } +impl From for StateServiceError { + fn from(value: GetBlockRangeError) -> Self { + match value { + GetBlockRangeError::StartHeightOutOfRange => { + Self::TonicStatusError(tonic::Status::out_of_range( + "Error: Start height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoStartHeightProvided => { + Self::TonicStatusError(tonic::Status::out_of_range( + "Error: No start height given", + )) + } + GetBlockRangeError::EndHeightOutOfRange => { + Self::TonicStatusError(tonic::Status::out_of_range( + "Error: End height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoEndHeightProvided => Self::TonicStatusError( + tonic::Status::out_of_range("Error: No end height given."), + ), + GetBlockRangeError::PoolTypeArgumentError(_) => Self::TonicStatusError( + tonic::Status::invalid_argument("Error: invalid pool type"), + ), + } + } +} #[allow(deprecated)] impl From for tonic::Status { fn from(error: StateServiceError) -> Self { @@ -200,26 +227,26 @@ pub enum FetchServiceError { SerializationError(#[from] zebra_chain::serialization::SerializationError), } -impl FetchServiceError { - pub(crate) fn from_get_block_change_error(error: GetBlockRangeError) -> Self { - match error { +impl From for FetchServiceError { + fn from(value: GetBlockRangeError) -> Self { + match value { GetBlockRangeError::StartHeightOutOfRange => { - FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + FetchServiceError::TonicStatusError(tonic::Status::out_of_range( "Error: Start height out of range. Failed to convert to u32.", )) } GetBlockRangeError::NoStartHeightProvided => { - FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + FetchServiceError::TonicStatusError(tonic::Status::out_of_range( "Error: No start height given", )) } GetBlockRangeError::EndHeightOutOfRange => { - FetchServiceError::TonicStatusError(tonic::Status::invalid_argument( + FetchServiceError::TonicStatusError(tonic::Status::out_of_range( "Error: End height out of range. Failed to convert to u32.", )) } GetBlockRangeError::NoEndHeightProvided => FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No end height given."), + tonic::Status::out_of_range("Error: No end height given."), ), GetBlockRangeError::PoolTypeArgumentError(_) => FetchServiceError::TonicStatusError( tonic::Status::invalid_argument("Error: invalid pool type"), From 0d8f1d0f1a92edc5d8fe6e3d7688229b5a8ba822 Mon Sep 17 00:00:00 2001 From: pacu Date: Mon, 26 Jan 2026 15:00:10 -0300 Subject: [PATCH 091/114] cargo fmt --- integration-tests/tests/fetch_service.rs | 77 +- integration-tests/tests/state_service.rs | 136 +-- zaino-proto/src/proto/service.rs | 906 ++++++------------ zaino-proto/src/proto/utils.rs | 33 +- zaino-state/src/backends/fetch.rs | 2 +- zaino-state/src/backends/state.rs | 38 +- .../src/chain_index/finalised_state/db/v1.rs | 10 +- .../src/chain_index/types/db/legacy.rs | 2 +- zaino-state/src/error.rs | 26 +- zaino-state/src/local_cache.rs | 5 +- 10 files changed, 471 insertions(+), 764 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index 39da06268..15bd0d314 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -4,8 +4,8 @@ use futures::StreamExt as _; use zaino_fetch::jsonrpsee::connector::{test_node_and_return_url, JsonRpSeeConnector}; use zaino_proto::proto::compact_formats::CompactBlock; use zaino_proto::proto::service::{ - AddressList, BlockId, BlockRange, GetMempoolTxRequest, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, - TransparentAddressBlockFilter, TxFilter, + AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetMempoolTxRequest, GetSubtreeRootsArg, + PoolType, TransparentAddressBlockFilter, TxFilter, }; use zaino_state::FetchServiceSubscriber; #[allow(deprecated)] @@ -1061,8 +1061,9 @@ async fn fetch_service_get_block_range(validator: &ValidatorKin } #[allow(deprecated)] -async fn fetch_service_get_block_range_returns_all_pools(validator: &ValidatorKind) { - +async fn fetch_service_get_block_range_returns_all_pools( + validator: &ValidatorKind, +) { let mut test_manager = TestManager::::launch(validator, None, None, None, true, false, true) .await @@ -1118,22 +1119,24 @@ async fn fetch_service_get_block_range_returns_all_pools(valida .head; let recipient_ua = clients.get_recipient_address("unified").await; - let orchard_txid = - zaino_testutils::from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) - .await - .unwrap() - .head; + let orchard_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap() + .head; test_manager .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) .await; - let start_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + let start_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { 100 } else { 1 }; - let end_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + let end_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { 106 } else { 17 @@ -1165,7 +1168,7 @@ async fn fetch_service_get_block_range_returns_all_pools(valida assert_eq!(compact_block.height, end_height); - let expected_transaction_count = if matches!(validator, ValidatorKind::Zebrad) { + let expected_transaction_count = if matches!(validator, ValidatorKind::Zebrad) { 3 } else { 4 // zcashd uses shielded coinbase which will add an extra compact tx @@ -1181,7 +1184,7 @@ async fn fetch_service_get_block_range_returns_all_pools(valida .unwrap(); dbg!(deshielding_tx); - + assert!( !deshielding_tx.vout.is_empty(), "transparent data should be present when transaparent pool type is specified in the request." @@ -1214,13 +1217,14 @@ async fn fetch_service_get_block_range_returns_all_pools(valida } #[allow(deprecated)] -async fn fetch_service_get_block_range_no_pools_returns_sapling_orchard(validator: &ValidatorKind) { - +async fn fetch_service_get_block_range_no_pools_returns_sapling_orchard( + validator: &ValidatorKind, +) { let mut test_manager = TestManager::::launch(validator, None, None, None, true, false, true) .await .unwrap(); - + let mut clients = test_manager .clients .take() @@ -1271,22 +1275,24 @@ async fn fetch_service_get_block_range_no_pools_returns_sapling_orchard(validator: &ValidatorKind tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - let exclude_list_empty = GetMempoolTxRequest { exclude_txid_suffixes: Vec::new(), pool_types: Vec::new() }; - + let exclude_list_empty = GetMempoolTxRequest { + exclude_txid_suffixes: Vec::new(), + pool_types: Vec::new(), + }; + let fetch_service_stream = fetch_service_subscriber .get_mempool_tx(exclude_list_empty.clone()) .await @@ -1736,7 +1745,7 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind assert_eq!(sorted_fetch_mempool_tx.len(), 2); let exclude_list = GetMempoolTxRequest { - exclude_txid_suffixes: vec![sorted_txids[0][8..].to_vec()], + exclude_txid_suffixes: vec![sorted_txids[0][8..].to_vec()], pool_types: vec![], }; @@ -2278,7 +2287,10 @@ mod zcashd { #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { - fetch_service_get_block_range_no_pools_returns_sapling_orchard::(&ValidatorKind::Zcashd).await; + fetch_service_get_block_range_no_pools_returns_sapling_orchard::( + &ValidatorKind::Zcashd, + ) + .await; } #[tokio::test(flavor = "multi_thread")] @@ -2466,10 +2478,13 @@ mod zebrad { pub(crate) async fn block_range_returns_all_pools_when_requested() { fetch_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad).await; } - + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { - fetch_service_get_block_range_no_pools_returns_sapling_orchard::(&ValidatorKind::Zebrad).await; + fetch_service_get_block_range_no_pools_returns_sapling_orchard::( + &ValidatorKind::Zebrad, + ) + .await; } #[tokio::test(flavor = "multi_thread")] diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 33f740710..e80bd7543 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -639,7 +639,9 @@ async fn state_service_get_raw_mempool_testnet() { /// Tests whether that calls to `get_block_range` with the same block range are the same when /// specifying the default `PoolType`s and passing and empty Vec to verify that the method falls /// back to the default pools when these are not explicitly specified. -async fn state_service_get_block_range_returns_default_pools(validator: &ValidatorKind) { +async fn state_service_get_block_range_returns_default_pools( + validator: &ValidatorKind, +) { let ( mut test_manager, _fetch_service, @@ -691,17 +693,17 @@ async fn state_service_get_block_range_returns_default_pools(val let end_height: u64 = 103; let default_pools_request = BlockRange { - start: Some(BlockId { - height: start_height, - hash: vec![], - }), - end: Some(BlockId { - height: end_height, - hash: vec![], - }), - pool_types: vec![], - }; - + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![], + }; + let fetch_service_get_block_range = fetch_service_subscriber .get_block_range(default_pools_request.clone()) .await @@ -711,17 +713,17 @@ async fn state_service_get_block_range_returns_default_pools(val .await; let explicit_default_pool_request = BlockRange { - start: Some(BlockId { - height: start_height, - hash: vec![], - }), - end: Some(BlockId { - height: end_height, - hash: vec![], - }), - pool_types: vec![PoolType::Sapling as i32, PoolType::Orchard as i32], - }; - + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![PoolType::Sapling as i32, PoolType::Orchard as i32], + }; + let fetch_service_get_block_range_specifying_pools = fetch_service_subscriber .get_block_range(explicit_default_pool_request.clone()) .await @@ -783,7 +785,9 @@ async fn state_service_get_block_range_returns_default_pools(val } /// tests whether the `GetBlockRange` RPC returns all pools when requested -async fn state_service_get_block_range_returns_all_pools(validator: &ValidatorKind) { +async fn state_service_get_block_range_returns_all_pools( + validator: &ValidatorKind, +) { let ( mut test_manager, _fetch_service, @@ -858,21 +862,21 @@ async fn state_service_get_block_range_returns_all_pools(valida let end_height: u64 = 106; let block_range = BlockRange { - start: Some(BlockId { - height: start_height, - hash: vec![], - }), - end: Some(BlockId { - height: end_height, - hash: vec![], - }), - pool_types: vec![ - PoolType::Transparent as i32, - PoolType::Sapling as i32, - PoolType::Orchard as i32, - ], - }; - + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }; + let fetch_service_get_block_range = fetch_service_subscriber .get_block_range(block_range.clone()) .await @@ -1260,8 +1264,10 @@ async fn state_service_get_raw_transaction_testnet() { test_manager.close().await; } -async fn state_service_get_address_transactions_regtest(validator: &ValidatorKind) { - let ( +async fn state_service_get_address_transactions_regtest( + validator: &ValidatorKind, +) { + let ( mut test_manager, _fetch_service, fetch_service_subscriber, @@ -2022,12 +2028,16 @@ mod zebra { #[tokio::test(flavor = "multi_thread")] pub(crate) async fn get_block_range_default_request_returns_no_t_data_regtest() { - state_service_get_block_range_returns_default_pools::(&ValidatorKind::Zebrad).await; + state_service_get_block_range_returns_default_pools::( + &ValidatorKind::Zebrad, + ) + .await; } #[tokio::test(flavor = "multi_thread")] pub(crate) async fn get_block_range_default_request_returns_all_pools_regtest() { - state_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad).await; + state_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad) + .await; } #[tokio::test(flavor = "multi_thread")] @@ -2216,10 +2226,13 @@ mod zebra { pub(crate) mod lightwallet_indexer { use futures::StreamExt as _; - use zaino_proto::proto::{service::{ - AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, - TxFilter, - }, utils::pool_types_into_i32_vec}; + use zaino_proto::proto::{ + service::{ + AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, + TxFilter, + }, + utils::pool_types_into_i32_vec, + }; use zebra_rpc::methods::{GetAddressTxIdsRequest, GetBlock}; use super::*; @@ -2844,7 +2857,7 @@ mod zebra { state_service_taddress_balance ); } - + #[tokio::test(flavor = "multi_thread")] async fn gat_transparent_data_from_compact_block_when_requested() { let ( @@ -2889,24 +2902,19 @@ mod zebra { state_service_taddress_balance ); - let compact_block_range = state_service_subscriber.get_block_range( - BlockRange { - start: None, - end: None, - pool_types: pool_types_into_i32_vec( - [ - PoolType::Transparent, - PoolType::Sapling, - PoolType::Orchard - ].to_vec() - ) - } - ) + let compact_block_range = state_service_subscriber + .get_block_range(BlockRange { + start: None, + end: None, + pool_types: pool_types_into_i32_vec( + [PoolType::Transparent, PoolType::Sapling, PoolType::Orchard].to_vec(), + ), + }) .await .unwrap() .map(Result::unwrap) .collect::>() - .await; + .await; for cb in compact_block_range.into_iter() { for tx in cb.vtx { @@ -2915,7 +2923,7 @@ mod zebra { // script pub key of this transaction is not empty assert!(!tx.vout.first().unwrap().script_pub_key.is_empty()); } - } + } } - } + } } diff --git a/zaino-proto/src/proto/service.rs b/zaino-proto/src/proto/service.rs index 2441bc93f..cbe8a5d0f 100644 --- a/zaino-proto/src/proto/service.rs +++ b/zaino-proto/src/proto/service.rs @@ -380,10 +380,10 @@ pub mod compact_tx_streamer_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value, + clippy::let_unit_value )] - use tonic::codegen::*; use tonic::codegen::http::Uri; + use tonic::codegen::*; #[derive(Debug, Clone)] pub struct CompactTxStreamerClient { inner: tonic::client::Grpc, @@ -427,9 +427,8 @@ pub mod compact_tx_streamer_client { >::ResponseBody, >, >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, + >>::Error: + Into + std::marker::Send + std::marker::Sync, { CompactTxStreamerClient::new(InterceptedService::new(inner, interceptor)) } @@ -469,26 +468,18 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestBlock", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestBlock", + )); self.inner.unary(req, path, codec).await } /// Return the compact block corresponding to the given block identifier @@ -499,26 +490,18 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlock", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlock", + )); self.inner.unary(req, path, codec).await } /// Same as GetBlock except the returned CompactBlock value contains only @@ -533,26 +516,18 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockNullifiers", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockNullifiers", + )); self.inner.unary(req, path, codec).await } /// Return a list of consecutive compact blocks in the specified range, @@ -564,31 +539,21 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response< - tonic::codec::Streaming, - >, + tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRange", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRange", + )); self.inner.server_streaming(req, path, codec).await } /// Same as GetBlockRange except the returned CompactBlock values contain @@ -600,31 +565,21 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response< - tonic::codec::Streaming, - >, + tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRangeNullifiers", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRangeNullifiers", + )); self.inner.server_streaming(req, path, codec).await } /// Return the requested full (not compact) transaction (as from zcashd) @@ -632,26 +587,18 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTransaction", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTransaction", + )); self.inner.unary(req, path, codec).await } /// Submit the given transaction to the Zcash network @@ -659,26 +606,18 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "SendTransaction", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "SendTransaction", + )); self.inner.unary(req, path, codec).await } /// Return RawTransactions that match the given transparent address filter. @@ -692,26 +631,18 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressTxids", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTxids", + )); self.inner.server_streaming(req, path, codec).await } /// Return the transactions corresponding to the given t-address within the given block range. @@ -723,78 +654,54 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressTransactions", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTransactions", + )); self.inner.server_streaming(req, path, codec).await } pub async fn get_taddress_balance( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalance", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalance", + )); self.inner.unary(req, path, codec).await } pub async fn get_taddress_balance_stream( &mut self, request: impl tonic::IntoStreamingRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream", ); let mut req = request.into_streaming_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalanceStream", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalanceStream", + )); self.inner.client_streaming(req, path, codec).await } /// Returns a stream of the compact transaction representation for transactions @@ -813,31 +720,21 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response< - tonic::codec::Streaming, - >, + tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolTx", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolTx", + )); self.inner.server_streaming(req, path, codec).await } /// Return a stream of current Mempool transactions. This will keep the output stream open while @@ -849,26 +746,18 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolStream", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolStream", + )); self.inner.server_streaming(req, path, codec).await } /// GetTreeState returns the note commitment tree state corresponding to the given block. @@ -879,52 +768,36 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTreeState", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTreeState", + )); self.inner.unary(req, path, codec).await } pub async fn get_latest_tree_state( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestTreeState", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestTreeState", + )); self.inner.unary(req, path, codec).await } /// Returns a stream of information about roots of subtrees of the note commitment tree @@ -936,55 +809,37 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetSubtreeRoots", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetSubtreeRoots", + )); self.inner.server_streaming(req, path, codec).await } pub async fn get_address_utxos( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxos", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxos", + )); self.inner.unary(req, path, codec).await } pub async fn get_address_utxos_stream( @@ -994,26 +849,18 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxosStream", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxosStream", + )); self.inner.server_streaming(req, path, codec).await } /// Return information about this lightwalletd instance and the blockchain @@ -1021,26 +868,18 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLightdInfo", - ), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLightdInfo", + )); self.inner.unary(req, path, codec).await } /// Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) @@ -1048,23 +887,18 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner.ready().await.map_err(|e| { + tonic::Status::unknown(format!("Service was not ready: {}", e.into())) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping", ); let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("cash.z.wallet.sdk.rpc.CompactTxStreamer", "Ping"), - ); + req.extensions_mut().insert(GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "Ping", + )); self.inner.unary(req, path, codec).await } } @@ -1076,7 +910,7 @@ pub mod compact_tx_streamer_server { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value, + clippy::let_unit_value )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with CompactTxStreamerServer. @@ -1113,8 +947,7 @@ pub mod compact_tx_streamer_server { crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Return a list of consecutive compact blocks in the specified range, /// which is inclusive of `range.end`. @@ -1124,18 +957,14 @@ pub mod compact_tx_streamer_server { async fn get_block_range( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetBlockRangeNullifiers method. type GetBlockRangeNullifiersStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result< crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Same as GetBlockRange except the returned CompactBlock values contain /// only nullifiers. @@ -1145,10 +974,7 @@ pub mod compact_tx_streamer_server { async fn get_block_range_nullifiers( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Return the requested full (not compact) transaction (as from zcashd) async fn get_transaction( &self, @@ -1162,8 +988,7 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetTaddressTxids method. type GetTaddressTxidsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Return RawTransactions that match the given transparent address filter. /// @@ -1172,25 +997,18 @@ pub mod compact_tx_streamer_server { async fn get_taddress_txids( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetTaddressTransactions method. type GetTaddressTransactionsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Return the transactions corresponding to the given t-address within the given block range. /// Mempool transactions are not included in the results. async fn get_taddress_transactions( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn get_taddress_balance( &self, request: tonic::Request, @@ -1201,12 +1019,8 @@ pub mod compact_tx_streamer_server { ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetMempoolTx method. type GetMempoolTxStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result< - crate::proto::compact_formats::CompactTx, - tonic::Status, - >, - > - + std::marker::Send + Item = std::result::Result, + > + std::marker::Send + 'static; /// Returns a stream of the compact transaction representation for transactions /// currently in the mempool. The results of this operation may be a few @@ -1223,25 +1037,18 @@ pub mod compact_tx_streamer_server { async fn get_mempool_tx( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetMempoolStream method. type GetMempoolStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Return a stream of current Mempool transactions. This will keep the output stream open while /// there are mempool transactions. It will close the returned stream when a new block is mined. async fn get_mempool_stream( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// GetTreeState returns the note commitment tree state corresponding to the given block. /// See section 3.7 of the Zcash protocol specification. It returns several other useful /// values also (even though they can be obtained using GetBlock). @@ -1257,38 +1064,27 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetSubtreeRoots method. type GetSubtreeRootsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > - + std::marker::Send + > + std::marker::Send + 'static; /// Returns a stream of information about roots of subtrees of the note commitment tree /// for the specified shielded protocol (Sapling or Orchard). async fn get_subtree_roots( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn get_address_utxos( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetAddressUtxosStream method. type GetAddressUtxosStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > - + std::marker::Send + > + std::marker::Send + 'static; async fn get_address_utxos_stream( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; /// Return information about this lightwalletd instance and the blockchain async fn get_lightd_info( &self, @@ -1321,10 +1117,7 @@ pub mod compact_tx_streamer_server { max_encoding_message_size: None, } } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService where F: tonic::service::Interceptor, { @@ -1379,23 +1172,16 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock" => { #[allow(non_camel_case_types)] struct GetLatestBlockSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetLatestBlockSvc { + impl tonic::server::UnaryService for GetLatestBlockSvc { type Response = super::BlockId; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_block(&inner, request) - .await + ::get_latest_block(&inner, request).await }; Box::pin(fut) } @@ -1425,14 +1211,9 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock" => { #[allow(non_camel_case_types)] struct GetBlockSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService for GetBlockSvc { + impl tonic::server::UnaryService for GetBlockSvc { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -1469,25 +1250,18 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockNullifiersSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetBlockNullifiersSvc { + impl tonic::server::UnaryService + for GetBlockNullifiersSvc + { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_nullifiers( - &inner, - request, - ) + ::get_block_nullifiers(&inner, request) .await }; Box::pin(fut) @@ -1518,24 +1292,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange" => { #[allow(non_camel_case_types)] struct GetBlockRangeSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetBlockRangeSvc { + impl + tonic::server::ServerStreamingService + for GetBlockRangeSvc + { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_range(&inner, request) - .await + ::get_block_range(&inner, request).await }; Box::pin(fut) } @@ -1565,16 +1336,14 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockRangeNullifiersSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetBlockRangeNullifiersSvc { + impl + tonic::server::ServerStreamingService + for GetBlockRangeNullifiersSvc + { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeNullifiersStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -1582,10 +1351,9 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_block_range_nullifiers( - &inner, - request, - ) - .await + &inner, request, + ) + .await }; Box::pin(fut) } @@ -1615,23 +1383,16 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction" => { #[allow(non_camel_case_types)] struct GetTransactionSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetTransactionSvc { + impl tonic::server::UnaryService for GetTransactionSvc { type Response = super::RawTransaction; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_transaction(&inner, request) - .await + ::get_transaction(&inner, request).await }; Box::pin(fut) } @@ -1661,23 +1422,18 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction" => { #[allow(non_camel_case_types)] struct SendTransactionSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for SendTransactionSvc { + impl tonic::server::UnaryService + for SendTransactionSvc + { type Response = super::SendResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::send_transaction(&inner, request) - .await + ::send_transaction(&inner, request).await }; Box::pin(fut) } @@ -1707,28 +1463,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids" => { #[allow(non_camel_case_types)] struct GetTaddressTxidsSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService< - super::TransparentAddressBlockFilter, - > for GetTaddressTxidsSvc { + impl + tonic::server::ServerStreamingService + for GetTaddressTxidsSvc + { type Response = super::RawTransaction; type ResponseStream = T::GetTaddressTxidsStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_txids( - &inner, - request, - ) - .await + ::get_taddress_txids(&inner, request).await }; Box::pin(fut) } @@ -1758,27 +1507,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions" => { #[allow(non_camel_case_types)] struct GetTaddressTransactionsSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService< - super::TransparentAddressBlockFilter, - > for GetTaddressTransactionsSvc { + impl + tonic::server::ServerStreamingService + for GetTaddressTransactionsSvc + { type Response = super::RawTransaction; type ResponseStream = T::GetTaddressTransactionsStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_transactions( - &inner, - request, - ) + ::get_taddress_transactions(&inner, request) .await }; Box::pin(fut) @@ -1809,25 +1552,18 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetTaddressBalanceSvc { + impl tonic::server::UnaryService + for GetTaddressBalanceSvc + { type Response = super::Balance; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_balance( - &inner, - request, - ) + ::get_taddress_balance(&inner, request) .await }; Box::pin(fut) @@ -1858,15 +1594,11 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceStreamSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ClientStreamingService - for GetTaddressBalanceStreamSvc { + impl tonic::server::ClientStreamingService + for GetTaddressBalanceStreamSvc + { type Response = super::Balance; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request>, @@ -1874,10 +1606,9 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_taddress_balance_stream( - &inner, - request, - ) - .await + &inner, request, + ) + .await }; Box::pin(fut) } @@ -1907,24 +1638,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx" => { #[allow(non_camel_case_types)] struct GetMempoolTxSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetMempoolTxSvc { + impl + tonic::server::ServerStreamingService + for GetMempoolTxSvc + { type Response = crate::proto::compact_formats::CompactTx; type ResponseStream = T::GetMempoolTxStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_tx(&inner, request) - .await + ::get_mempool_tx(&inner, request).await }; Box::pin(fut) } @@ -1954,27 +1682,17 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream" => { #[allow(non_camel_case_types)] struct GetMempoolStreamSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetMempoolStreamSvc { + impl tonic::server::ServerStreamingService + for GetMempoolStreamSvc + { type Response = super::RawTransaction; type ResponseStream = T::GetMempoolStreamStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = + BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_stream( - &inner, - request, - ) - .await + ::get_mempool_stream(&inner, request).await }; Box::pin(fut) } @@ -2004,23 +1722,16 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState" => { #[allow(non_camel_case_types)] struct GetTreeStateSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetTreeStateSvc { + impl tonic::server::UnaryService for GetTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_tree_state(&inner, request) - .await + ::get_tree_state(&inner, request).await }; Box::pin(fut) } @@ -2050,23 +1761,13 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState" => { #[allow(non_camel_case_types)] struct GetLatestTreeStateSvc(pub Arc); - impl tonic::server::UnaryService - for GetLatestTreeStateSvc { + impl tonic::server::UnaryService for GetLatestTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_tree_state( - &inner, - request, - ) + ::get_latest_tree_state(&inner, request) .await }; Box::pin(fut) @@ -2097,24 +1798,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots" => { #[allow(non_camel_case_types)] struct GetSubtreeRootsSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetSubtreeRootsSvc { + impl + tonic::server::ServerStreamingService + for GetSubtreeRootsSvc + { type Response = super::SubtreeRoot; type ResponseStream = T::GetSubtreeRootsStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_subtree_roots(&inner, request) - .await + ::get_subtree_roots(&inner, request).await }; Box::pin(fut) } @@ -2144,23 +1842,19 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos" => { #[allow(non_camel_case_types)] struct GetAddressUtxosSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService - for GetAddressUtxosSvc { + impl + tonic::server::UnaryService + for GetAddressUtxosSvc + { type Response = super::GetAddressUtxosReplyList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos(&inner, request) - .await + ::get_address_utxos(&inner, request).await }; Box::pin(fut) } @@ -2190,26 +1884,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream" => { #[allow(non_camel_case_types)] struct GetAddressUtxosStreamSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::ServerStreamingService - for GetAddressUtxosStreamSvc { + impl + tonic::server::ServerStreamingService + for GetAddressUtxosStreamSvc + { type Response = super::GetAddressUtxosReply; type ResponseStream = T::GetAddressUtxosStreamStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = + BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos_stream( - &inner, - request, - ) + ::get_address_utxos_stream(&inner, request) .await }; Box::pin(fut) @@ -2240,21 +1929,13 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo" => { #[allow(non_camel_case_types)] struct GetLightdInfoSvc(pub Arc); - impl tonic::server::UnaryService - for GetLightdInfoSvc { + impl tonic::server::UnaryService for GetLightdInfoSvc { type Response = super::LightdInfo; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { + type Future = BoxFuture, tonic::Status>; + fn call(&mut self, request: tonic::Request) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_lightd_info(&inner, request) - .await + ::get_lightd_info(&inner, request).await }; Box::pin(fut) } @@ -2284,14 +1965,9 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping" => { #[allow(non_camel_case_types)] struct PingSvc(pub Arc); - impl< - T: CompactTxStreamer, - > tonic::server::UnaryService for PingSvc { + impl tonic::server::UnaryService for PingSvc { type Response = super::PingResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -2325,23 +2001,19 @@ pub mod compact_tx_streamer_server { }; Box::pin(fut) } - _ => { - Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } + _ => Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers.insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers.insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }), } } } diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 8213fa9af..4605a1c92 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -10,7 +10,7 @@ pub enum PoolTypeError { } /// Converts a vector of pool_types (i32) into its rich-type representation -/// Returns `PoolTypeError::InvalidPoolType` when invalid `pool_types` are found +/// Returns `PoolTypeError::InvalidPoolType` when invalid `pool_types` are found /// or `PoolTypeError::UnknownPoolType` if unknown ones are found. pub fn pool_types_from_vector(pool_types: &[i32]) -> Result, PoolTypeError> { let pools = if pool_types.is_empty() { @@ -140,28 +140,30 @@ impl std::default::Default for PoolTypeFilter { impl PoolTypeFilter { /// A PoolType Filter that will include all existing pool types. pub fn includes_all() -> Self { - PoolTypeFilter { + PoolTypeFilter { include_transparent: true, include_sapling: true, - include_orchard: true + include_orchard: true, } } /// create a `PoolTypeFilter` from a vector of raw i32 `PoolType`s /// If the vector is empty it will return `Self::default()`. - /// If the vector contains `PoolType::Invalid` or the vector contains more than 3 elements + /// If the vector contains `PoolType::Invalid` or the vector contains more than 3 elements /// returns `PoolTypeError::InvalidPoolType` pub fn new_from_slice(pool_types: &[i32]) -> Result { let pool_types = pool_types_from_vector(pool_types)?; - + Self::new_from_pool_types(&pool_types) } /// create a `PoolTypeFilter` from a vector of `PoolType` /// If the vector is empty it will return `Self::default()`. - /// If the vector contains `PoolType::Invalid` or the vector contains more than 3 elements + /// If the vector contains `PoolType::Invalid` or the vector contains more than 3 elements /// returns `PoolTypeError::InvalidPoolType` - pub fn new_from_pool_types(pool_types: &Vec) -> Result { + pub fn new_from_pool_types( + pool_types: &Vec, + ) -> Result { if pool_types.len() > PoolType::Orchard as usize { return Err(PoolTypeError::InvalidPoolType); } @@ -235,7 +237,10 @@ impl PoolTypeFilter { #[cfg(test)] mod test { - use crate::proto::{service::PoolType, utils::{PoolTypeError, PoolTypeFilter}}; + use crate::proto::{ + service::PoolType, + utils::{PoolTypeError, PoolTypeFilter}, + }; #[test] fn test_pool_type_filter_fails_when_invalid() { @@ -247,7 +252,10 @@ mod test { ] .to_vec(); - assert_eq!(PoolTypeFilter::new_from_pool_types(&pools), Err(PoolTypeError::InvalidPoolType)); + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Err(PoolTypeError::InvalidPoolType) + ); } #[test] @@ -260,7 +268,10 @@ mod test { ] .to_vec(); - assert_eq!(PoolTypeFilter::new_from_pool_types(&pools), Err(PoolTypeError::InvalidPoolType)); + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Err(PoolTypeError::InvalidPoolType) + ); } #[test] @@ -291,7 +302,7 @@ mod test { ); } - #[test] + #[test] fn test_pool_type_filter_includes_all() { assert_eq!( PoolTypeFilter::from_checked_parts(true, true, true), diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 2813870ad..e22389033 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -1013,7 +1013,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { &self, request: TransparentAddressBlockFilter, ) -> Result { - let chain_height = self.chain_height().await?; + let chain_height = self.chain_height().await?; let txids = self.get_taddress_txids_helper(request).await?; let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index b9b7244d9..3a225f18c 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -47,7 +47,7 @@ use zaino_proto::proto::{ GetAddressUtxosReplyList, GetMempoolTxRequest, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, - utils::{PoolTypeError, PoolTypeFilter, ValidatedBlockRangeRequest, pool_types_from_vector}, + utils::{pool_types_from_vector, PoolTypeError, PoolTypeFilter, ValidatedBlockRangeRequest}, }; use zcash_protocol::consensus::NetworkType; @@ -574,18 +574,15 @@ impl StateServiceSubscriber { let pool_types = match pool_types_from_vector(&request.pool_types) { Ok(p) => Ok(p), - Err(e) => { - Err( - match e { - PoolTypeError::InvalidPoolType => StateServiceError::UnhandledRpcError( - "PoolType::Invalid specified as argument in `BlockRange`.".to_string() - ), - PoolTypeError::UnknownPoolType(t) => StateServiceError::UnhandledRpcError( - format!("Unknown value specified in `BlockRange`. Value '{}' is not a known PoolType.", t) - ) - } - ) - } + Err(e) => Err(match e { + PoolTypeError::InvalidPoolType => StateServiceError::UnhandledRpcError( + "PoolType::Invalid specified as argument in `BlockRange`.".to_string(), + ), + PoolTypeError::UnknownPoolType(t) => StateServiceError::UnhandledRpcError(format!( + "Unknown value specified in `BlockRange`. Value '{}' is not a known PoolType.", + t + )), + }), }?; // FIX: find out why there's repeated code fetching the chain tip and then the rest tokio::spawn(async move { @@ -2241,21 +2238,24 @@ impl LightWalletIndexer for StateServiceSubscriber { } let pool_types = match PoolTypeFilter::new_from_slice(&request.pool_types) { - Ok(pool_type_filter) => pool_type_filter, - Err(PoolTypeError::InvalidPoolType) => return Err(StateServiceError::TonicStatusError( + Ok(pool_type_filter) => pool_type_filter, + Err(PoolTypeError::InvalidPoolType) => { + return Err(StateServiceError::TonicStatusError( tonic::Status::invalid_argument(format!( "Error: An invalid `PoolType' was found" )), - )), - Err(PoolTypeError::UnknownPoolType(unknown_pool_type)) => return Err(StateServiceError::TonicStatusError( + )) + } + Err(PoolTypeError::UnknownPoolType(unknown_pool_type)) => { + return Err(StateServiceError::TonicStatusError( tonic::Status::invalid_argument(format!( "Error: Unknown `PoolType' {} was found", unknown_pool_type )), )) + } }; - - + let mempool = self.mempool.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index 24c1427d8..816d1ad50 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -33,7 +33,8 @@ use crate::{ TransparentHistExt, }, entry::{StoredEntryFixed, StoredEntryVar}, - }, types::{AddrEventBytes, TransactionHash, GENESIS_HEIGHT} + }, + types::{AddrEventBytes, TransactionHash, GENESIS_HEIGHT}, }, config::BlockCacheConfig, error::FinalisedStateError, @@ -3050,8 +3051,11 @@ impl DbV1 { let block = self.get_chain_block(height).await?; match block { - Some(b) => Ok(b.to_compact_block()), - None => Err(FinalisedStateError::DataUnavailable(format!("Block {} not present in validator's state.", height))) + Some(b) => Ok(b.to_compact_block()), + None => Err(FinalisedStateError::DataUnavailable(format!( + "Block {} not present in validator's state.", + height + ))), } } diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index de84f1e33..d7336f034 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -33,8 +33,8 @@ use core2::io::{self, Read, Write}; use hex::{FromHex, ToHex}; use primitive_types::U256; use std::{fmt, io::Cursor}; -use zebra_chain::serialization::BytesInDisplayOrder as _; use zaino_proto::proto::compact_formats::{CompactTxIn, TxOut}; +use zebra_chain::serialization::BytesInDisplayOrder as _; use crate::chain_index::encoding::{ read_fixed_le, read_i64_le, read_option, read_u16_be, read_u32_be, read_u32_le, read_u64_le, diff --git a/zaino-state/src/error.rs b/zaino-state/src/error.rs index 8ee8268aa..3e9fc893d 100644 --- a/zaino-state/src/error.rs +++ b/zaino-state/src/error.rs @@ -106,21 +106,19 @@ impl From for StateServiceError { )) } GetBlockRangeError::NoStartHeightProvided => { - Self::TonicStatusError(tonic::Status::out_of_range( - "Error: No start height given", - )) + Self::TonicStatusError(tonic::Status::out_of_range("Error: No start height given")) } GetBlockRangeError::EndHeightOutOfRange => { Self::TonicStatusError(tonic::Status::out_of_range( "Error: End height out of range. Failed to convert to u32.", )) } - GetBlockRangeError::NoEndHeightProvided => Self::TonicStatusError( - tonic::Status::out_of_range("Error: No end height given."), - ), - GetBlockRangeError::PoolTypeArgumentError(_) => Self::TonicStatusError( - tonic::Status::invalid_argument("Error: invalid pool type"), - ), + GetBlockRangeError::NoEndHeightProvided => { + Self::TonicStatusError(tonic::Status::out_of_range("Error: No end height given.")) + } + GetBlockRangeError::PoolTypeArgumentError(_) => { + Self::TonicStatusError(tonic::Status::invalid_argument("Error: invalid pool type")) + } } } } @@ -229,17 +227,15 @@ pub enum FetchServiceError { impl From for FetchServiceError { fn from(value: GetBlockRangeError) -> Self { - match value { + match value { GetBlockRangeError::StartHeightOutOfRange => { FetchServiceError::TonicStatusError(tonic::Status::out_of_range( "Error: Start height out of range. Failed to convert to u32.", )) } - GetBlockRangeError::NoStartHeightProvided => { - FetchServiceError::TonicStatusError(tonic::Status::out_of_range( - "Error: No start height given", - )) - } + GetBlockRangeError::NoStartHeightProvided => FetchServiceError::TonicStatusError( + tonic::Status::out_of_range("Error: No start height given"), + ), GetBlockRangeError::EndHeightOutOfRange => { FetchServiceError::TonicStatusError(tonic::Status::out_of_range( "Error: End height out of range. Failed to convert to u32.", diff --git a/zaino-state/src/local_cache.rs b/zaino-state/src/local_cache.rs index d2a2d0fb5..7dfb1d29e 100644 --- a/zaino-state/src/local_cache.rs +++ b/zaino-state/src/local_cache.rs @@ -23,7 +23,8 @@ use zaino_fetch::{ }; use zaino_proto::proto::{ compact_formats::{ChainMetadata, CompactBlock, CompactOrchardAction}, - service::PoolType, utils::PoolTypeFilter, + service::PoolType, + utils::PoolTypeFilter, }; use zebra_chain::{ block::{Hash, Height}, @@ -284,7 +285,7 @@ async fn try_state_path( .into_compact_block( u32::try_from(trees.sapling())?, u32::try_from(trees.orchard())?, - PoolTypeFilter::includes_all() + PoolTypeFilter::includes_all(), )?, )) } From 5c392470f8714e4dcfd67e7e369bb3789145465d Mon Sep 17 00:00:00 2001 From: pacu Date: Tue, 27 Jan 2026 16:42:23 -0300 Subject: [PATCH 092/114] make rustfmt ignore generated files and revert `service.rs` formatting --- .rustfmt.toml | 1 + zaino-proto/src/proto/service.rs | 906 +++++++++++++++++++++---------- 2 files changed, 618 insertions(+), 289 deletions(-) create mode 100644 .rustfmt.toml diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 000000000..33b25ee01 --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1 @@ +format_generated_files = false \ No newline at end of file diff --git a/zaino-proto/src/proto/service.rs b/zaino-proto/src/proto/service.rs index cbe8a5d0f..2441bc93f 100644 --- a/zaino-proto/src/proto/service.rs +++ b/zaino-proto/src/proto/service.rs @@ -380,10 +380,10 @@ pub mod compact_tx_streamer_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::http::Uri; use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct CompactTxStreamerClient { inner: tonic::client::Grpc, @@ -427,8 +427,9 @@ pub mod compact_tx_streamer_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { CompactTxStreamerClient::new(InterceptedService::new(inner, interceptor)) } @@ -468,18 +469,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestBlock", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestBlock", + ), + ); self.inner.unary(req, path, codec).await } /// Return the compact block corresponding to the given block identifier @@ -490,18 +499,26 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlock", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlock", + ), + ); self.inner.unary(req, path, codec).await } /// Same as GetBlock except the returned CompactBlock value contains only @@ -516,18 +533,26 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockNullifiers", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockNullifiers", + ), + ); self.inner.unary(req, path, codec).await } /// Return a list of consecutive compact blocks in the specified range, @@ -539,21 +564,31 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRange", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRange", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Same as GetBlockRange except the returned CompactBlock values contain @@ -565,21 +600,31 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRangeNullifiers", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRangeNullifiers", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return the requested full (not compact) transaction (as from zcashd) @@ -587,18 +632,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTransaction", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTransaction", + ), + ); self.inner.unary(req, path, codec).await } /// Submit the given transaction to the Zcash network @@ -606,18 +659,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "SendTransaction", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "SendTransaction", + ), + ); self.inner.unary(req, path, codec).await } /// Return RawTransactions that match the given transparent address filter. @@ -631,18 +692,26 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressTxids", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTxids", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return the transactions corresponding to the given t-address within the given block range. @@ -654,54 +723,78 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressTransactions", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTransactions", + ), + ); self.inner.server_streaming(req, path, codec).await } pub async fn get_taddress_balance( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalance", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalance", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_taddress_balance_stream( &mut self, request: impl tonic::IntoStreamingRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream", ); let mut req = request.into_streaming_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalanceStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalanceStream", + ), + ); self.inner.client_streaming(req, path, codec).await } /// Returns a stream of the compact transaction representation for transactions @@ -720,21 +813,31 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolTx", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolTx", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return a stream of current Mempool transactions. This will keep the output stream open while @@ -746,18 +849,26 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolStream", + ), + ); self.inner.server_streaming(req, path, codec).await } /// GetTreeState returns the note commitment tree state corresponding to the given block. @@ -768,36 +879,52 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTreeState", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTreeState", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_latest_tree_state( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestTreeState", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestTreeState", + ), + ); self.inner.unary(req, path, codec).await } /// Returns a stream of information about roots of subtrees of the note commitment tree @@ -809,37 +936,55 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetSubtreeRoots", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetSubtreeRoots", + ), + ); self.inner.server_streaming(req, path, codec).await } pub async fn get_address_utxos( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxos", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxos", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_address_utxos_stream( @@ -849,18 +994,26 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxosStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxosStream", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return information about this lightwalletd instance and the blockchain @@ -868,18 +1021,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLightdInfo", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLightdInfo", + ), + ); self.inner.unary(req, path, codec).await } /// Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) @@ -887,18 +1048,23 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "Ping", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("cash.z.wallet.sdk.rpc.CompactTxStreamer", "Ping"), + ); self.inner.unary(req, path, codec).await } } @@ -910,7 +1076,7 @@ pub mod compact_tx_streamer_server { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with CompactTxStreamerServer. @@ -947,7 +1113,8 @@ pub mod compact_tx_streamer_server { crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Return a list of consecutive compact blocks in the specified range, /// which is inclusive of `range.end`. @@ -957,14 +1124,18 @@ pub mod compact_tx_streamer_server { async fn get_block_range( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetBlockRangeNullifiers method. type GetBlockRangeNullifiersStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result< crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Same as GetBlockRange except the returned CompactBlock values contain /// only nullifiers. @@ -974,7 +1145,10 @@ pub mod compact_tx_streamer_server { async fn get_block_range_nullifiers( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Return the requested full (not compact) transaction (as from zcashd) async fn get_transaction( &self, @@ -988,7 +1162,8 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetTaddressTxids method. type GetTaddressTxidsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Return RawTransactions that match the given transparent address filter. /// @@ -997,18 +1172,25 @@ pub mod compact_tx_streamer_server { async fn get_taddress_txids( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetTaddressTransactions method. type GetTaddressTransactionsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Return the transactions corresponding to the given t-address within the given block range. /// Mempool transactions are not included in the results. async fn get_taddress_transactions( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_taddress_balance( &self, request: tonic::Request, @@ -1019,8 +1201,12 @@ pub mod compact_tx_streamer_server { ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetMempoolTx method. type GetMempoolTxStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > + std::marker::Send + Item = std::result::Result< + crate::proto::compact_formats::CompactTx, + tonic::Status, + >, + > + + std::marker::Send + 'static; /// Returns a stream of the compact transaction representation for transactions /// currently in the mempool. The results of this operation may be a few @@ -1037,18 +1223,25 @@ pub mod compact_tx_streamer_server { async fn get_mempool_tx( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetMempoolStream method. type GetMempoolStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Return a stream of current Mempool transactions. This will keep the output stream open while /// there are mempool transactions. It will close the returned stream when a new block is mined. async fn get_mempool_stream( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// GetTreeState returns the note commitment tree state corresponding to the given block. /// See section 3.7 of the Zcash protocol specification. It returns several other useful /// values also (even though they can be obtained using GetBlock). @@ -1064,27 +1257,38 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetSubtreeRoots method. type GetSubtreeRootsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + std::marker::Send + > + + std::marker::Send + 'static; /// Returns a stream of information about roots of subtrees of the note commitment tree /// for the specified shielded protocol (Sapling or Orchard). async fn get_subtree_roots( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_address_utxos( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetAddressUtxosStream method. type GetAddressUtxosStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + std::marker::Send + > + + std::marker::Send + 'static; async fn get_address_utxos_stream( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Return information about this lightwalletd instance and the blockchain async fn get_lightd_info( &self, @@ -1117,7 +1321,10 @@ pub mod compact_tx_streamer_server { max_encoding_message_size: None, } } - pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService where F: tonic::service::Interceptor, { @@ -1172,16 +1379,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock" => { #[allow(non_camel_case_types)] struct GetLatestBlockSvc(pub Arc); - impl tonic::server::UnaryService for GetLatestBlockSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetLatestBlockSvc { type Response = super::BlockId; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_block(&inner, request).await + ::get_latest_block(&inner, request) + .await }; Box::pin(fut) } @@ -1211,9 +1425,14 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock" => { #[allow(non_camel_case_types)] struct GetBlockSvc(pub Arc); - impl tonic::server::UnaryService for GetBlockSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService for GetBlockSvc { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -1250,18 +1469,25 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockNullifiersSvc(pub Arc); - impl tonic::server::UnaryService - for GetBlockNullifiersSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetBlockNullifiersSvc { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_nullifiers(&inner, request) + ::get_block_nullifiers( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1292,21 +1518,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange" => { #[allow(non_camel_case_types)] struct GetBlockRangeSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetBlockRangeSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetBlockRangeSvc { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_range(&inner, request).await + ::get_block_range(&inner, request) + .await }; Box::pin(fut) } @@ -1336,14 +1565,16 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockRangeNullifiersSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetBlockRangeNullifiersSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetBlockRangeNullifiersSvc { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeNullifiersStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -1351,9 +1582,10 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_block_range_nullifiers( - &inner, request, - ) - .await + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1383,16 +1615,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction" => { #[allow(non_camel_case_types)] struct GetTransactionSvc(pub Arc); - impl tonic::server::UnaryService for GetTransactionSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTransactionSvc { type Response = super::RawTransaction; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_transaction(&inner, request).await + ::get_transaction(&inner, request) + .await }; Box::pin(fut) } @@ -1422,18 +1661,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction" => { #[allow(non_camel_case_types)] struct SendTransactionSvc(pub Arc); - impl tonic::server::UnaryService - for SendTransactionSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for SendTransactionSvc { type Response = super::SendResponse; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::send_transaction(&inner, request).await + ::send_transaction(&inner, request) + .await }; Box::pin(fut) } @@ -1463,21 +1707,28 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids" => { #[allow(non_camel_case_types)] struct GetTaddressTxidsSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetTaddressTxidsSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService< + super::TransparentAddressBlockFilter, + > for GetTaddressTxidsSvc { type Response = super::RawTransaction; type ResponseStream = T::GetTaddressTxidsStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_txids(&inner, request).await + ::get_taddress_txids( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1507,21 +1758,27 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions" => { #[allow(non_camel_case_types)] struct GetTaddressTransactionsSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetTaddressTransactionsSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService< + super::TransparentAddressBlockFilter, + > for GetTaddressTransactionsSvc { type Response = super::RawTransaction; type ResponseStream = T::GetTaddressTransactionsStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_transactions(&inner, request) + ::get_taddress_transactions( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1552,18 +1809,25 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceSvc(pub Arc); - impl tonic::server::UnaryService - for GetTaddressBalanceSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTaddressBalanceSvc { type Response = super::Balance; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_balance(&inner, request) + ::get_taddress_balance( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1594,11 +1858,15 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceStreamSvc(pub Arc); - impl tonic::server::ClientStreamingService - for GetTaddressBalanceStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ClientStreamingService + for GetTaddressBalanceStreamSvc { type Response = super::Balance; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request>, @@ -1606,9 +1874,10 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_taddress_balance_stream( - &inner, request, - ) - .await + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1638,21 +1907,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx" => { #[allow(non_camel_case_types)] struct GetMempoolTxSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetMempoolTxSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetMempoolTxSvc { type Response = crate::proto::compact_formats::CompactTx; type ResponseStream = T::GetMempoolTxStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_tx(&inner, request).await + ::get_mempool_tx(&inner, request) + .await }; Box::pin(fut) } @@ -1682,17 +1954,27 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream" => { #[allow(non_camel_case_types)] struct GetMempoolStreamSvc(pub Arc); - impl tonic::server::ServerStreamingService - for GetMempoolStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetMempoolStreamSvc { type Response = super::RawTransaction; type ResponseStream = T::GetMempoolStreamStream; - type Future = - BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_stream(&inner, request).await + ::get_mempool_stream( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1722,16 +2004,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState" => { #[allow(non_camel_case_types)] struct GetTreeStateSvc(pub Arc); - impl tonic::server::UnaryService for GetTreeStateSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_tree_state(&inner, request).await + ::get_tree_state(&inner, request) + .await }; Box::pin(fut) } @@ -1761,13 +2050,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState" => { #[allow(non_camel_case_types)] struct GetLatestTreeStateSvc(pub Arc); - impl tonic::server::UnaryService for GetLatestTreeStateSvc { + impl tonic::server::UnaryService + for GetLatestTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_tree_state(&inner, request) + ::get_latest_tree_state( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1798,21 +2097,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots" => { #[allow(non_camel_case_types)] struct GetSubtreeRootsSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetSubtreeRootsSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetSubtreeRootsSvc { type Response = super::SubtreeRoot; type ResponseStream = T::GetSubtreeRootsStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_subtree_roots(&inner, request).await + ::get_subtree_roots(&inner, request) + .await }; Box::pin(fut) } @@ -1842,19 +2144,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos" => { #[allow(non_camel_case_types)] struct GetAddressUtxosSvc(pub Arc); - impl - tonic::server::UnaryService - for GetAddressUtxosSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetAddressUtxosSvc { type Response = super::GetAddressUtxosReplyList; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos(&inner, request).await + ::get_address_utxos(&inner, request) + .await }; Box::pin(fut) } @@ -1884,21 +2190,26 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream" => { #[allow(non_camel_case_types)] struct GetAddressUtxosStreamSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetAddressUtxosStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetAddressUtxosStreamSvc { type Response = super::GetAddressUtxosReply; type ResponseStream = T::GetAddressUtxosStreamStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos_stream(&inner, request) + ::get_address_utxos_stream( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1929,13 +2240,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo" => { #[allow(non_camel_case_types)] struct GetLightdInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetLightdInfoSvc { + impl tonic::server::UnaryService + for GetLightdInfoSvc { type Response = super::LightdInfo; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_lightd_info(&inner, request).await + ::get_lightd_info(&inner, request) + .await }; Box::pin(fut) } @@ -1965,9 +2284,14 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping" => { #[allow(non_camel_case_types)] struct PingSvc(pub Arc); - impl tonic::server::UnaryService for PingSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService for PingSvc { type Response = super::PingResponse; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -2001,19 +2325,23 @@ pub mod compact_tx_streamer_server { }; Box::pin(fut) } - _ => Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers.insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers.insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }), + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } } } } From a1fc0ba6dbce6a7ee1c79273df228ffc2fe12a97 Mon Sep 17 00:00:00 2001 From: idky137 Date: Fri, 23 Jan 2026 16:08:16 +0000 Subject: [PATCH 093/114] updated get_compact_block --- zaino-fetch/src/chain/transaction.rs | 4 +- zaino-proto/src/proto/utils.rs | 2 +- .../src/chain_index/finalised_state/db/v1.rs | 226 +++++++++++++++++- .../src/chain_index/types/db/legacy.rs | 41 ++++ 4 files changed, 261 insertions(+), 12 deletions(-) diff --git a/zaino-fetch/src/chain/transaction.rs b/zaino-fetch/src/chain/transaction.rs index c0c0725a8..c6f498992 100644 --- a/zaino-fetch/src/chain/transaction.rs +++ b/zaino-fetch/src/chain/transaction.rs @@ -1194,7 +1194,7 @@ impl FullTransaction { vec![] }; - let vout = if pool_types.includes_tranparent() { + let vout = if pool_types.includes_transparent() { self.raw_transaction .transparent_outputs .iter() @@ -1207,7 +1207,7 @@ impl FullTransaction { vec![] }; - let vin = if pool_types.includes_tranparent() { + let vin = if pool_types.includes_transparent() { self.raw_transaction .transparent_inputs .iter() diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 4605a1c92..709485b70 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -206,7 +206,7 @@ impl PoolTypeFilter { } /// retuns whether the filter includes transparent data - pub fn includes_tranparent(&self) -> bool { + pub fn includes_transparent(&self) -> bool { self.include_transparent } diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index 816d1ad50..37d422010 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -45,6 +45,7 @@ use crate::{ TxInCompact, TxLocation, TxOutCompact, TxidList, ZainoVersionedSerde as _, }; +use zaino_proto::proto::utils::PoolTypeFilter; use zebra_chain::parameters::NetworkKind; use zebra_state::HashOrHeight; @@ -361,7 +362,8 @@ impl CompactBlockExt for DbV1 { &self, height: Height, ) -> Result { - self.get_compact_block(height).await + self.get_compact_block(height, PoolTypeFilter::default()) + .await } } @@ -3047,16 +3049,222 @@ impl DbV1 { async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result { - let block = self.get_chain_block(height).await?; + let validated_height = self + .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) + .await?; + let height_bytes = validated_height.to_bytes()?; - match block { - Some(b) => Ok(b.to_compact_block()), - None => Err(FinalisedStateError::DataUnavailable(format!( - "Block {} not present in validator's state.", - height - ))), - } + tokio::task::block_in_place(|| { + let txn = self.env.begin_ro_txn()?; + + // ----- Fetch Header ----- + let raw = match txn.get(self.headers, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let header: BlockHeaderData = *StoredEntryVar::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("header decode error: {e}")))? + .inner(); + + // ----- Fetch Txids ----- + let raw = match txn.get(self.txids, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let txids_stored_entry_var = StoredEntryVar::::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("txids decode error: {e}")))?; + let txids = txids_stored_entry_var.inner().txids(); + + // ----- Fetch Transparent Tx Data ----- + let transparent_stored_entry_var = if pool_types.includes_transparent() { + let raw = match txn.get(self.transparent, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + Some( + StoredEntryVar::::from_bytes(raw).map_err(|e| { + FinalisedStateError::Custom(format!("transparent decode error: {e}")) + })?, + ) + } else { + None + }; + let transparent = match transparent_stored_entry_var.as_ref() { + Some(stored_entry_var) => stored_entry_var.inner().tx(), + None => &[], + }; + + // ----- Fetch Sapling Tx Data ----- + let sapling_stored_entry_var = if pool_types.includes_sapling() { + let raw = match txn.get(self.sapling, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + Some( + StoredEntryVar::::from_bytes(raw).map_err(|e| { + FinalisedStateError::Custom(format!("sapling decode error: {e}")) + })?, + ) + } else { + None + }; + let sapling = match sapling_stored_entry_var.as_ref() { + Some(stored_entry_var) => stored_entry_var.inner().tx(), + None => &[], + }; + + // ----- Fetch Orchard Tx Data ----- + let orchard_stored_entry_var = if pool_types.includes_orchard() { + let raw = match txn.get(self.orchard, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + Some( + StoredEntryVar::::from_bytes(raw).map_err(|e| { + FinalisedStateError::Custom(format!("orchard decode error: {e}")) + })?, + ) + } else { + None + }; + let orchard = match orchard_stored_entry_var.as_ref() { + Some(stored_entry_var) => stored_entry_var.inner().tx(), + None => &[], + }; + + // ----- Construct CompactTx ----- + let vtx: Vec = txids + .iter() + .enumerate() + .filter_map(|(i, txid)| { + let spends = sapling + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|s| { + s.spends() + .iter() + .map(|sp| sp.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let outputs = sapling + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|s| { + s.outputs() + .iter() + .map(|o| o.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let actions = orchard + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|o| { + o.actions() + .iter() + .map(|a| a.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let (vin, vout) = transparent + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|t| (t.compact_vin(), t.compact_vout())) + .unwrap_or_default(); + + // Skip txs that have no elements in any requested pool type. + // + // TODO: Explore whether we can avoid this check. + if spends.is_empty() + && outputs.is_empty() + && actions.is_empty() + && vin.is_empty() + && vout.is_empty() + { + return None; + } + + Some(zaino_proto::proto::compact_formats::CompactTx { + index: i as u64, + txid: txid.0.to_vec(), + fee: 0, + spends, + outputs, + actions, + vin, + vout, + }) + }) + .collect(); + + // ----- Fetch Commitment Tree Data ----- + let raw = match txn.get(self.commitment_tree_data, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + let commitment_tree_data: CommitmentTreeData = *StoredEntryFixed::from_bytes(raw) + .map_err(|e| { + FinalisedStateError::Custom(format!("commitment_tree decode error: {e}")) + })? + .inner(); + + let chain_metadata = zaino_proto::proto::compact_formats::ChainMetadata { + sapling_commitment_tree_size: commitment_tree_data.sizes().sapling(), + orchard_commitment_tree_size: commitment_tree_data.sizes().orchard(), + }; + + // ----- Construct CompactBlock ----- + Ok(zaino_proto::proto::compact_formats::CompactBlock { + proto_version: 4, + height: header.index().height().0 as u64, + hash: header.index().hash().0.to_vec(), + prev_hash: header.index().parent_hash().0.to_vec(), + // Is this safe? + time: header.data().time() as u32, + header: Vec::new(), + vtx, + chain_metadata: Some(chain_metadata), + }) + }) } /// Fetch database metadata. diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index d7336f034..ebde081fa 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -1708,6 +1708,23 @@ impl TransparentCompactTx { pub fn outputs(&self) -> &[TxOutCompact] { &self.vout } + + /// Returns Proto CompactTxIn values, omitting the null prevout used by coinbase. + pub fn compact_vin(&self) -> Vec { + self.inputs() + .iter() + .filter(|txin| !txin.is_null_prevout()) + .map(|txin| txin.to_compact()) + .collect() + } + + /// Returns Proto TxOut values. + pub fn compact_vout(&self) -> Vec { + self.outputs() + .iter() + .map(|txout| txout.to_compact()) + .collect() + } } /// A compact reference to a previously created transparent UTXO being spent. @@ -1752,6 +1769,14 @@ impl TxInCompact { pub fn is_null_prevout(&self) -> bool { self.prevout_txid == [0u8; 32] && self.prevout_index == u32::MAX } + + /// Creates a Proto CompactTxIn from this record. + pub fn to_compact(&self) -> zaino_proto::proto::compact_formats::CompactTxIn { + zaino_proto::proto::compact_formats::CompactTxIn { + prevout_txid: self.prevout_txid.to_vec(), + prevout_index: self.prevout_index, + } + } } impl ZainoVersionedSerde for TxInCompact { @@ -1943,6 +1968,22 @@ impl TxOutCompact { pub fn script_type_enum(&self) -> Option { ScriptType::try_from(self.script_type).ok() } + + /// Creates a Proto TxOut from this record. + /// + /// Note: this reconstructs standard P2PKH / P2SH scripts. For NonStandard outputs, + /// this returns an empty script_pub_key. + pub fn to_compact(&self) -> zaino_proto::proto::compact_formats::TxOut { + let script_pub_key = self + .script_type_enum() + .and_then(|script_type| build_standard_script(self.script_hash, script_type)) + .unwrap_or_default(); + + zaino_proto::proto::compact_formats::TxOut { + value: self.value, + script_pub_key, + } + } } impl> TryFrom<(u64, T)> for TxOutCompact { From d34e89d6c7000bd1e162df5c673bf18d87c741e4 Mon Sep 17 00:00:00 2001 From: idky137 Date: Mon, 26 Jan 2026 16:56:48 +0000 Subject: [PATCH 094/114] implemented get_compact_block_stream in v1 database --- .../src/chain_index/finalised_state/db/v1.rs | 1117 ++++++++++++++++- 1 file changed, 1085 insertions(+), 32 deletions(-) diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index 37d422010..3d4e10248 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -39,13 +39,13 @@ use crate::{ config::BlockCacheConfig, error::FinalisedStateError, AddrHistRecord, AddrScript, AtomicStatus, BlockHash, BlockHeaderData, CommitmentTreeData, - CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactSize, CompactTxData, - FixedEncodedLen as _, Height, IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, - SaplingCompactTx, SaplingTxList, StatusType, TransparentCompactTx, TransparentTxList, - TxInCompact, TxLocation, TxOutCompact, TxidList, ZainoVersionedSerde as _, + CompactBlockStream, CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, + CompactSize, CompactTxData, FixedEncodedLen as _, Height, IndexedBlock, OrchardCompactTx, + OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, StatusType, TransparentCompactTx, + TransparentTxList, TxInCompact, TxLocation, TxOutCompact, TxidList, ZainoVersionedSerde as _, }; -use zaino_proto::proto::utils::PoolTypeFilter; +use zaino_proto::proto::{compact_formats::CompactBlock, utils::PoolTypeFilter}; use zebra_chain::parameters::NetworkKind; use zebra_state::HashOrHeight; @@ -1782,11 +1782,21 @@ impl DbV1 { /// Fetches block headers for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_headers( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -1923,11 +1933,21 @@ impl DbV1 { /// Fetches block txids for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_txids( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -2082,11 +2102,21 @@ impl DbV1 { /// Fetches block transparent tx data for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_transparent( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -2243,11 +2273,21 @@ impl DbV1 { /// Fetches block sapling tx data for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_sapling( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -2403,11 +2443,21 @@ impl DbV1 { /// Fetches block orchard tx data for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_orchard( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -2476,11 +2526,21 @@ impl DbV1 { /// Fetches block commitment tree data for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_commitment_tree_data( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -3044,8 +3104,6 @@ impl DbV1 { } /// Returns the CompactBlock for the given Height. - /// - /// TODO: Add separate range fetch method! async fn get_compact_block( &self, height: Height, @@ -3206,9 +3264,18 @@ impl DbV1 { .map(|t| (t.compact_vin(), t.compact_vout())) .unwrap_or_default(); - // Skip txs that have no elements in any requested pool type. + // Omit transactions that have no elements in any requested pool type. + // + // This keeps `vtx` compact (it only contains transactions relevant to the caller’s pool filter), + // but it also means: + // - `vtx.len()` may be smaller than the block transaction count, and + // - transaction indices in `vtx` may be non-contiguous. + // Consumers must use `CompactTx.index` (the original transaction position in the block) rather + // than assuming `vtx` preserves block order densely. // - // TODO: Explore whether we can avoid this check. + // TODO: Re-evaluate whether omitting "empty-for-filter" transactions is the desired API behaviour. + // Some clients may expect a position-preserving representation (one entry per txid), even if + // the per-pool fields are empty for a given filter. if spends.is_empty() && outputs.is_empty() && actions.is_empty() @@ -3267,6 +3334,992 @@ impl DbV1 { }) } + /// Streams `CompactBlock` messages for an inclusive height range. + /// + /// This implementation is designed for high-throughput lightclient serving: + /// - It performs a single cursor-walk over the headers database and keeps all other databases + /// (txids + optional pool-specific tx data + commitment tree data) strictly aligned to the + /// same LMDB key. + /// - It uses *short-lived* read transactions and periodically re-seeks by key, which: + /// - reduces the lifetime of LMDB reader slots, + /// - bounds the amount of data held in the same read snapshot, + /// - and prevents a single long stream from monopolising the environment’s read resources. + /// + /// Ordering / range semantics: + /// - The stream covers the inclusive range `[start_height, end_height]`. + /// - If `start_height <= end_height` the stream is ascending; otherwise it is descending. + /// - This function enforces *contiguous heights* in the headers database. Missing heights, key + /// ordering problems, or cursor desynchronisation are treated as internal errors because they + /// indicate database corruption or a violated storage invariant. + /// + /// Pool filtering: + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// + /// Concurrency model: + /// - Spawns a dedicated blocking task (`spawn_blocking`) which performs LMDB reads and decoding. + /// - Results are pushed into a bounded `mpsc` channel; backpressure is applied if the consumer + /// is slow. + /// + /// Errors: + /// - Database-missing conditions are sent downstream as `tonic::Status::not_found`. + /// - Decode failures, cursor desynchronisation, and invariant violations are sent as + /// `tonic::Status::internal`. + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + let (validated_start_height, validated_end_height) = + self.validate_block_range(start_height, end_height).await?; + + let start_key_bytes = validated_start_height.to_bytes()?; + + // Direction is derived from the validated heights. This relies on `validate_block_range` + // preserving input ordering (i.e. not normalising to (min, max)). + let is_ascending = validated_start_height <= validated_end_height; + + // Bounded channel provides backpressure so the blocking task cannot run unbounded ahead of + // the gRPC consumer. + let (sender, receiver) = + tokio::sync::mpsc::channel::>(128); + + // Clone the database environment. + let env = self.env.clone(); + + // Copy database handles into the blocking task. LMDB database handles are cheap, copyable IDs. + let headers_database = self.headers; + let txids_database = self.txids; + let transparent_database = self.transparent; + let sapling_database = self.sapling; + let orchard_database = self.orchard; + let commitment_tree_data_database = self.commitment_tree_data; + + tokio::task::spawn_blocking(move || { + /// Maximum number of blocks to stream per LMDB read transaction. + /// + /// The cursor-walk is resumed by re-seeking to the next expected height key. This keeps + /// read transactions short-lived and reduces pressure on LMDB reader slots. + const BLOCKS_PER_READ_TRANSACTION: usize = 1024; + + // ===================================================================================== + // Helper functions + // ===================================================================================== + // + // These helpers keep the main streaming loop readable and ensure that any failure: + // - emits exactly one `tonic::Status` into the stream (best-effort), and then + // - terminates the blocking task. + // + // They intentionally return `Option`/`Result` to allow early-exit with minimal boilerplate. + + /// Send a `tonic::Status` downstream and ignore send errors. + /// + /// A send error means the receiver side has been dropped (e.g. client cancelled the RPC), + /// so the producer should terminate promptly. + fn send_status( + sender: &tokio::sync::mpsc::Sender>, + status: tonic::Status, + ) { + let _ = sender.blocking_send(Err(status)); + } + + /// Open a read-only cursor for `database` inside `txn`. + /// + /// On failure, emits an internal status and returns `None`. + fn open_ro_cursor_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + txn: &'txn lmdb::RoTransaction<'txn>, + database: lmdb::Database, + database_name: &'static str, + ) -> Option> { + match txn.open_ro_cursor(database) { + Ok(cursor) => Some(cursor), + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb open_ro_cursor({database_name}) failed: {error}" + )), + ); + None + } + } + } + + /// Position `cursor` exactly at `requested_key` using `MDB_SET_KEY`. + /// + /// Returns the `(key, value)` pair at that key. The returned `key` is expected to equal + /// `requested_key` (the function enforces this). + /// + /// Some LMDB bindings occasionally return `Ok((None, value))` for cursor operations. When + /// that happens: + /// - If `verify_on_none_key` is true, we call `MDB_GET_CURRENT` once to recover and verify + /// the current key. + /// - Otherwise we assume the cursor is correctly positioned and return `(requested_key, value)`. + /// + /// On `NotFound`, emits `not_found_status`. On other failures or verification failure, emits + /// `internal(...)`. In all error cases it returns `None`. + fn cursor_set_key_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + cursor: &lmdb::RoCursor<'txn>, + requested_key: &'txn [u8], + cursor_name: &'static str, + not_found_status: tonic::Status, + verify_on_none_key: bool, + ) -> Option<(&'txn [u8], &'txn [u8])> { + match cursor.get(Some(requested_key), None, lmdb_sys::MDB_SET_KEY) { + Ok((Some(found_key), found_val)) => { + if found_key != requested_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb SET_KEY({cursor_name}) returned non-matching key" + )), + ); + None + } else { + Some((found_key, found_val)) + } + } + Ok((None, found_val)) => { + // Some builds / bindings can return None for the key for certain ops. If requested, + // verify the cursor actually landed on the requested key via GET_CURRENT. + if verify_on_none_key { + let (recovered_key_opt, recovered_val) = + match cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT) { + Ok(pair) => pair, + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor GET_CURRENT({cursor_name}) failed: {error}" + )), + ); + return None; + } + }; + + let recovered_key = match recovered_key_opt { + Some(key) => key, + None => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb GET_CURRENT({cursor_name}) returned no key" + )), + ); + return None; + } + }; + + if recovered_key != requested_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb SET_KEY({cursor_name}) landed on unexpected key: expected {:?}, got {:?}", + requested_key, + recovered_key, + )), + ); + return None; + } + + Some((recovered_key, recovered_val)) + } else { + // Assume SET_KEY success implies match; return the requested key + value. + Some((requested_key, found_val)) + } + } + Err(lmdb::Error::NotFound) => { + send_status(sender, not_found_status); + None + } + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor SET_KEY({cursor_name}) failed: {error}" + )), + ); + None + } + } + } + + /// Step the headers cursor using `step_op` and return the next `(key, value)` pair. + /// + /// This is special-cased because the headers cursor is the *driving cursor*; all other + /// cursors must remain aligned to whatever key the headers cursor moves to. + /// + /// Returns: + /// - `Ok(Some((k, v)))` when the cursor moved successfully. + /// - `Ok(None)` when the cursor reached the end (`NotFound`). + /// - `Err(())` when an error status has been emitted and streaming must stop. + #[allow(clippy::complexity)] + fn headers_step_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + headers_cursor: &lmdb::RoCursor<'txn>, + step_op: lmdb_sys::MDB_cursor_op, + ) -> Result, ()> { + match headers_cursor.get(None, None, step_op) { + Ok((Some(found_key), found_val)) => Ok(Some((found_key, found_val))), + Ok((None, _found_val)) => { + // Some bindings can return None for the key; recover via GET_CURRENT. + let (recovered_key_opt, recovered_val) = + match headers_cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT) { + Ok(pair) => pair, + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor GET_CURRENT(headers) failed: {error}" + )), + ); + return Err(()); + } + }; + let recovered_key = match recovered_key_opt { + Some(key) => key, + None => { + send_status( + sender, + tonic::Status::internal( + "lmdb GET_CURRENT(headers) returned no key".to_string(), + ), + ); + return Err(()); + } + }; + Ok(Some((recovered_key, recovered_val))) + } + Err(lmdb::Error::NotFound) => Ok(None), + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor step(headers) failed: {error}" + )), + ); + Err(()) + } + } + } + + /// Step a non-header cursor and enforce that it remains aligned to `expected_key`. + /// + /// The design invariant for this streamer is: + /// - the headers cursor chooses the next key + /// - every other cursor must produce a value at that *same* key (otherwise the per-height + /// databases are inconsistent or a cursor has desynchronised). + /// + /// Returns the value slice for `expected_key` on success. + /// On `NotFound`, emits `not_found_status`. + /// On key mismatch or other errors, emits an internal error. + fn cursor_step_expect_key_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + cursor: &lmdb::RoCursor<'txn>, + step_op: lmdb_sys::MDB_cursor_op, + expected_key: &[u8], + cursor_name: &'static str, + not_found_status: tonic::Status, + ) -> Option<&'txn [u8]> { + match cursor.get(None, None, step_op) { + Ok((Some(found_key), found_val)) => { + if found_key != expected_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor desync({cursor_name}): expected key {:?}, got {:?}", + expected_key, found_key + )), + ); + None + } else { + Some(found_val) + } + } + Ok((None, _found_val)) => { + // Some bindings can return None for the key; recover via GET_CURRENT. + let (recovered_key_opt, recovered_val) = + match cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT) { + Ok(pair) => pair, + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor GET_CURRENT({cursor_name}) failed: {error}" + )), + ); + return None; + } + }; + + let recovered_key = match recovered_key_opt { + Some(key) => key, + None => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb GET_CURRENT({cursor_name}) returned no key" + )), + ); + return None; + } + }; + + if recovered_key != expected_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor desync({cursor_name}): expected key {:?}, got {:?}", + expected_key, recovered_key + )), + ); + None + } else { + Some(recovered_val) + } + } + Err(lmdb::Error::NotFound) => { + send_status(sender, not_found_status); + None + } + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor step({cursor_name}) failed: {error}" + )), + ); + None + } + } + } + + // ===================================================================================== + // Blocking streaming loop + // ===================================================================================== + + let step_op = if is_ascending { + lmdb_sys::MDB_NEXT + } else { + lmdb_sys::MDB_PREV + }; + + // Contiguous-height enforcement: we expect every emitted block to have exactly this height. + // This catches missing heights and cursor ordering/key-encoding problems early. + let mut expected_height = validated_start_height; + + // Key used to re-seek at the start of each transaction chunk. + // This begins at the start height and advances by exactly one height per emitted block. + let mut next_start_key_bytes: Vec = start_key_bytes; + + loop { + // Stop once we have emitted the inclusive end height. + if is_ascending { + if expected_height > validated_end_height { + return; + } + } else if expected_height < validated_end_height { + return; + } + + // Open a short-lived read transaction for this chunk. + // + // We intentionally drop the transaction regularly to keep reader slots available and + // to avoid holding a single snapshot for very large streams. + let txn = match env.begin_ro_txn() { + Ok(txn) => txn, + Err(error) => { + send_status( + &sender, + tonic::Status::internal(format!("lmdb begin_ro_txn failed: {error}")), + ); + return; + } + }; + + // Open cursors. Headers is the driving cursor; all others must remain key-aligned. + let headers_cursor = + match open_ro_cursor_or_send(&sender, &txn, headers_database, "headers") { + Some(cursor) => cursor, + None => return, + }; + + let txids_cursor = + match open_ro_cursor_or_send(&sender, &txn, txids_database, "txids") { + Some(cursor) => cursor, + None => return, + }; + + let transparent_cursor = if pool_types.includes_transparent() { + match open_ro_cursor_or_send(&sender, &txn, transparent_database, "transparent") + { + Some(cursor) => Some(cursor), + None => return, + } + } else { + None + }; + + let sapling_cursor = if pool_types.includes_sapling() { + match open_ro_cursor_or_send(&sender, &txn, sapling_database, "sapling") { + Some(cursor) => Some(cursor), + None => return, + } + } else { + None + }; + + let orchard_cursor = if pool_types.includes_orchard() { + match open_ro_cursor_or_send(&sender, &txn, orchard_database, "orchard") { + Some(cursor) => Some(cursor), + None => return, + } + } else { + None + }; + + let commitment_tree_cursor = match open_ro_cursor_or_send( + &sender, + &txn, + commitment_tree_data_database, + "commitment_tree_data", + ) { + Some(cursor) => cursor, + None => return, + }; + + // Position headers cursor at the start key for this chunk. This is the authoritative key + // that all other cursors must align to. + let (current_key, mut raw_header_bytes) = match cursor_set_key_or_send( + &sender, + &headers_cursor, + next_start_key_bytes.as_slice(), + "headers", + tonic::Status::not_found(format!( + "missing header at requested start height key {:?}", + next_start_key_bytes + )), + true, // verify-on-none-key + ) { + Some(pair) => pair, + None => return, + }; + + // Align all other cursors to the exact same key. + let (_txids_key, mut raw_txids_bytes) = match cursor_set_key_or_send( + &sender, + &txids_cursor, + current_key, + "txids", + tonic::Status::not_found("block data missing from db (txids)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + + let mut raw_transparent_bytes: Option<&[u8]> = + if let Some(cursor) = transparent_cursor.as_ref() { + let (_key, val) = match cursor_set_key_or_send( + &sender, + cursor, + current_key, + "transparent", + tonic::Status::not_found("block data missing from db (transparent)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + Some(val) + } else { + None + }; + + let mut raw_sapling_bytes: Option<&[u8]> = + if let Some(cursor) = sapling_cursor.as_ref() { + let (_key, val) = match cursor_set_key_or_send( + &sender, + cursor, + current_key, + "sapling", + tonic::Status::not_found("block data missing from db (sapling)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + Some(val) + } else { + None + }; + + let mut raw_orchard_bytes: Option<&[u8]> = + if let Some(cursor) = orchard_cursor.as_ref() { + let (_key, val) = match cursor_set_key_or_send( + &sender, + cursor, + current_key, + "orchard", + tonic::Status::not_found("block data missing from db (orchard)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + Some(val) + } else { + None + }; + + let (_commitment_key, mut raw_commitment_tree_bytes) = match cursor_set_key_or_send( + &sender, + &commitment_tree_cursor, + current_key, + "commitment_tree_data", + tonic::Status::not_found("block data missing from db (commitment_tree_data)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + + let mut blocks_streamed_in_transaction: usize = 0; + + loop { + // ----- Decode and validate block header ----- + let header: BlockHeaderData = match StoredEntryVar::from_bytes(raw_header_bytes) + .map_err(|error| format!("header decode error: {error}")) + { + Ok(entry) => *entry.inner(), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + }; + + // Contiguous-height check: ensures cursor ordering and storage invariants are intact. + let current_height = header.index().height(); + if current_height != expected_height { + send_status( + &sender, + tonic::Status::internal(format!( + "missing height or out-of-order headers: expected {}, got {}", + expected_height.0, current_height.0 + )), + ); + return; + } + + // ----- Decode txids and optional pool data ----- + let txids_stored_entry_var = + match StoredEntryVar::::from_bytes(raw_txids_bytes) + .map_err(|error| format!("txids decode error: {error}")) + { + Ok(entry) => entry, + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + }; + let txids = txids_stored_entry_var.inner().txids(); + + // Each pool database stores a per-height vector aligned to the txids list: + // one entry per transaction index (typically `Option` per tx). + let transparent_entries: Option> = + if let Some(raw) = raw_transparent_bytes { + match StoredEntryVar::::from_bytes(raw) + .map_err(|error| format!("transparent decode error: {error}")) + { + Ok(entry) => Some(entry), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + } + } else { + None + }; + + let sapling_entries: Option> = + if let Some(raw) = raw_sapling_bytes { + match StoredEntryVar::::from_bytes(raw) + .map_err(|error| format!("sapling decode error: {error}")) + { + Ok(entry) => Some(entry), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + } + } else { + None + }; + + let orchard_entries: Option> = + if let Some(raw) = raw_orchard_bytes { + match StoredEntryVar::::from_bytes(raw) + .map_err(|error| format!("orchard decode error: {error}")) + { + Ok(entry) => Some(entry), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + } + } else { + None + }; + + let transparent = match transparent_entries.as_ref() { + Some(entry) => entry.inner().tx(), + None => &[], + }; + let sapling = match sapling_entries.as_ref() { + Some(entry) => entry.inner().tx(), + None => &[], + }; + let orchard = match orchard_entries.as_ref() { + Some(entry) => entry.inner().tx(), + None => &[], + }; + + // Invariant: if a pool is requested, its per-height vector length must match txids. + if pool_types.includes_transparent() && transparent.len() != txids.len() { + send_status( + &sender, + tonic::Status::internal(format!( + "transparent list length mismatch at height {}: txids={}, transparent={}", + current_height.0, + txids.len(), + transparent.len(), + )), + ); + return; + } + if pool_types.includes_sapling() && sapling.len() != txids.len() { + send_status( + &sender, + tonic::Status::internal(format!( + "sapling list length mismatch at height {}: txids={}, sapling={}", + current_height.0, + txids.len(), + sapling.len(), + )), + ); + return; + } + if pool_types.includes_orchard() && orchard.len() != txids.len() { + send_status( + &sender, + tonic::Status::internal(format!( + "orchard list length mismatch at height {}: txids={}, orchard={}", + current_height.0, + txids.len(), + orchard.len(), + )), + ); + return; + } + + // ----- Build CompactTx list ----- + // + // `CompactTx.index` is the original transaction index within the block. + // This implementation omits transactions that contain no elements in any requested pool type, + // which means: + // - `vtx.len()` may be smaller than the number of txids in the block, and + // - indices in `vtx` may be non-contiguous. + // Consumers must interpret `CompactTx.index` as authoritative. + // + // TODO: Re-evaluate whether omitting "empty-for-filter" transactions is the desired API behaviour. + // Some clients may expect a position-preserving representation (one entry per txid), even if + // the per-pool fields are empty for a given filter. + let mut vtx: Vec = + Vec::with_capacity(txids.len()); + + for (i, txid) in txids.iter().enumerate() { + let spends = sapling + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|s| { + s.spends() + .iter() + .map(|sp| sp.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let outputs = sapling + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|s| { + s.outputs() + .iter() + .map(|o| o.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let actions = orchard + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|o| { + o.actions() + .iter() + .map(|a| a.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let (vin, vout) = transparent + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|t| (t.compact_vin(), t.compact_vout())) + .unwrap_or_default(); + + // Omit transactions that have no elements in any requested pool type. + // + // Note that omission produces a sparse `vtx` (by original transaction index). Clients must use + // `CompactTx.index` rather than assuming contiguous ordering. + // + // TODO: Re-evaluate whether omission is the desired API behaviour for all consumers. + if spends.is_empty() + && outputs.is_empty() + && actions.is_empty() + && vin.is_empty() + && vout.is_empty() + { + continue; + } + + vtx.push(zaino_proto::proto::compact_formats::CompactTx { + index: i as u64, + txid: txid.0.to_vec(), + fee: 0, + spends, + outputs, + actions, + vin, + vout, + }); + } + + // ----- Decode commitment tree data and construct block ----- + let commitment_tree_data: CommitmentTreeData = + match StoredEntryFixed::from_bytes(raw_commitment_tree_bytes) + .map_err(|error| format!("commitment_tree decode error: {error}")) + { + Ok(entry) => *entry.inner(), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + }; + + let chain_metadata = zaino_proto::proto::compact_formats::ChainMetadata { + sapling_commitment_tree_size: commitment_tree_data.sizes().sapling(), + orchard_commitment_tree_size: commitment_tree_data.sizes().orchard(), + }; + + let compact_block = zaino_proto::proto::compact_formats::CompactBlock { + proto_version: 4, + height: header.index().height().0 as u64, + hash: header.index().hash().0.to_vec(), + prev_hash: header.index().parent_hash().0.to_vec(), + // NOTE: `time()` is stored in the DB as a wider integer; this cast assumes it is + // always representable in `u32` for the protobuf. + time: header.data().time() as u32, + header: Vec::new(), + vtx, + chain_metadata: Some(chain_metadata), + }; + + // Send the block downstream; if the receiver is gone, stop immediately. + if sender.blocking_send(Ok(compact_block)).is_err() { + return; + } + + // If we just emitted the inclusive end height, stop without stepping cursors further. + if current_height == validated_end_height { + return; + } + + blocks_streamed_in_transaction += 1; + + // Compute the next expected height (used both for contiguity checking and chunk re-seek). + let next_expected_height = if is_ascending { + match expected_height.0.checked_add(1) { + Some(value) => Height(value), + None => { + send_status( + &sender, + tonic::Status::internal( + "expected_height overflow while iterating ascending" + .to_string(), + ), + ); + return; + } + } + } else { + match expected_height.0.checked_sub(1) { + Some(value) => Height(value), + None => { + send_status( + &sender, + tonic::Status::internal( + "expected_height underflow while iterating descending" + .to_string(), + ), + ); + return; + } + } + }; + + // Chunk boundary: drop the current read transaction after N blocks and re-seek in a new + // transaction on the next loop iteration. This avoids a single long-lived snapshot. + if blocks_streamed_in_transaction >= BLOCKS_PER_READ_TRANSACTION { + match next_expected_height.to_bytes() { + Ok(bytes) => { + next_start_key_bytes = bytes; + expected_height = next_expected_height; + break; + } + Err(error) => { + send_status( + &sender, + tonic::Status::internal(format!( + "height to_bytes failed at chunk boundary: {error}" + )), + ); + return; + } + } + } + + // Advance all cursors in lockstep. Headers drives the next key; all others must match it. + let next_headers = match headers_step_or_send(&sender, &headers_cursor, step_op) + { + Ok(value) => value, + Err(()) => return, + }; + + let (next_key, next_header_val) = match next_headers { + Some(pair) => pair, + None => { + // Headers ended early; if we have not reached the requested end height, the + // database no longer satisfies the contiguous-height invariant for this range. + if current_height != validated_end_height { + send_status( + &sender, + tonic::Status::internal(format!( + "headers cursor ended early at height {}; expected to reach {}", + current_height.0, validated_end_height.0 + )), + ); + } + return; + } + }; + + let next_txids_val = match cursor_step_expect_key_or_send( + &sender, + &txids_cursor, + step_op, + next_key, + "txids", + tonic::Status::not_found("block data missing from db (txids)"), + ) { + Some(val) => val, + None => return, + }; + + let next_transparent_val: Option<&[u8]> = if let Some(cursor) = + transparent_cursor.as_ref() + { + match cursor_step_expect_key_or_send( + &sender, + cursor, + step_op, + next_key, + "transparent", + tonic::Status::not_found("block data missing from db (transparent)"), + ) { + Some(val) => Some(val), + None => return, + } + } else { + None + }; + + let next_sapling_val: Option<&[u8]> = + if let Some(cursor) = sapling_cursor.as_ref() { + match cursor_step_expect_key_or_send( + &sender, + cursor, + step_op, + next_key, + "sapling", + tonic::Status::not_found("block data missing from db (sapling)"), + ) { + Some(val) => Some(val), + None => return, + } + } else { + None + }; + + let next_orchard_val: Option<&[u8]> = + if let Some(cursor) = orchard_cursor.as_ref() { + match cursor_step_expect_key_or_send( + &sender, + cursor, + step_op, + next_key, + "orchard", + tonic::Status::not_found("block data missing from db (orchard)"), + ) { + Some(val) => Some(val), + None => return, + } + } else { + None + }; + + let next_commitment_tree_val = match cursor_step_expect_key_or_send( + &sender, + &commitment_tree_cursor, + step_op, + next_key, + "commitment_tree_data", + tonic::Status::not_found( + "block data missing from db (commitment_tree_data)", + ), + ) { + Some(val) => val, + None => return, + }; + + raw_header_bytes = next_header_val; + raw_txids_bytes = next_txids_val; + raw_transparent_bytes = next_transparent_val; + raw_sapling_bytes = next_sapling_val; + raw_orchard_bytes = next_orchard_val; + raw_commitment_tree_bytes = next_commitment_tree_val; + + expected_height = next_expected_height; + } + } + }); + + Ok(CompactBlockStream::new(receiver)) + } + /// Fetch database metadata. async fn get_metadata(&self) -> Result { tokio::task::block_in_place(|| { @@ -3308,7 +4361,7 @@ impl DbV1 { // - Validation here is *structural / integrity* validation of stored records plus basic chain // continuity checks (parent hash, header merkle root vs txids). // - It is intentionally “lightweight” and does **not** attempt full consensus verification. - // - NOTE: It is planned to add basic shielded tx data validation using the "block_commitments" + // - NOTE / TODO: It is planned to add basic shielded tx data validation using the "block_commitments" // field in [`BlockData`] however this is currently unimplemented. /// Return `true` if `height` is already known-good. @@ -3731,8 +4784,10 @@ impl DbV1 { /// `validated_set`. /// /// Semantics: - /// - If `end < start`, returns an error. - /// - If the entire range is already validated, returns `(start, end)` without touching LMDB. + /// - Accepts either ordering of `start` and `end`. + /// - Validates the inclusive set `{min(start,end) ..= max(start,end)}` in ascending order. + /// - If the entire normalized range is already validated, returns `(start, end)` without + /// touching LMDB (preserves the caller's original ordering). /// - Otherwise, validates each missing height in ascending order using `validate_block_blocking`. /// /// WARNING: @@ -3744,21 +4799,22 @@ impl DbV1 { start: Height, end: Height, ) -> Result<(Height, Height), FinalisedStateError> { - if end.0 < start.0 { - return Err(FinalisedStateError::Custom( - "invalid block range: end < start".to_string(), - )); - } + // Normalize the range for validation, but preserve `(start, end)` ordering in the return. + let (range_start, range_end) = if start.0 <= end.0 { + (start, end) + } else { + (end, start) + }; let tip = self.validated_tip.load(Ordering::Acquire); - let mut h = std::cmp::max(start.0, tip); + let mut h = std::cmp::max(range_start.0, tip); - if h > end.0 { + if h > range_end.0 { return Ok((start, end)); } tokio::task::block_in_place(|| { - while h <= end.0 { + while h <= range_end.0 { if self.is_validated(h) { h += 1; continue; @@ -3766,19 +4822,16 @@ impl DbV1 { let height = Height(h); let height_bytes = height.to_bytes()?; - let bytes = { - let ro = self.env.begin_ro_txn()?; - let bytes = ro.get(self.headers, &height_bytes).map_err(|e| { - if e == lmdb::Error::NotFound { - FinalisedStateError::Custom("height not found in best chain".into()) - } else { - FinalisedStateError::LmdbError(e) - } - })?; - bytes.to_vec() - }; + let ro = self.env.begin_ro_txn()?; + let bytes = ro.get(self.headers, &height_bytes).map_err(|e| { + if e == lmdb::Error::NotFound { + FinalisedStateError::Custom("height not found in best chain".into()) + } else { + FinalisedStateError::LmdbError(e) + } + })?; - let hash = *StoredEntryVar::::deserialize(&*bytes)? + let hash = *StoredEntryVar::::deserialize(bytes)? .inner() .index() .hash(); From 660cf999db65f35b997a13996cc58184b28e0da4 Mon Sep 17 00:00:00 2001 From: idky137 Date: Mon, 26 Jan 2026 23:42:38 +0000 Subject: [PATCH 095/114] updated database interface --- zaino-proto/src/proto/utils.rs | 21 +++ .../chain_index/finalised_state/capability.rs | 18 +- .../src/chain_index/finalised_state/db.rs | 32 +++- .../src/chain_index/finalised_state/db/v0.rs | 175 +++++++++++++++++- .../src/chain_index/finalised_state/db/v1.rs | 12 +- .../src/chain_index/finalised_state/reader.rs | 24 ++- .../chain_index/tests/finalised_state/v0.rs | 24 ++- .../chain_index/tests/finalised_state/v1.rs | 24 ++- .../src/chain_index/types/db/legacy.rs | 18 +- 9 files changed, 309 insertions(+), 39 deletions(-) diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 709485b70..188cb1661 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -220,6 +220,27 @@ impl PoolTypeFilter { self.include_orchard } + /// Convert this filter into the corresponding `Vec`. + /// + /// The resulting vector contains each included pool type at most once. + pub fn to_pool_types_vector(&self) -> Vec { + let mut pool_types: Vec = Vec::new(); + + if self.include_transparent { + pool_types.push(PoolType::Transparent); + } + + if self.include_sapling { + pool_types.push(PoolType::Sapling); + } + + if self.include_orchard { + pool_types.push(PoolType::Orchard); + } + + pool_types + } + /// testing only #[allow(dead_code)] pub(crate) fn from_checked_parts( diff --git a/zaino-state/src/chain_index/finalised_state/capability.rs b/zaino-state/src/chain_index/finalised_state/capability.rs index f85e48bfd..58c41d0e4 100644 --- a/zaino-state/src/chain_index/finalised_state/capability.rs +++ b/zaino-state/src/chain_index/finalised_state/capability.rs @@ -81,14 +81,16 @@ use crate::{ chain_index::types::{AddrEventBytes, TransactionHash}, error::FinalisedStateError, read_fixed_le, read_u32_le, read_u8, version, write_fixed_le, write_u32_le, write_u8, - AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, FixedEncodedLen, Height, - IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, - StatusType, TransparentCompactTx, TransparentTxList, TxLocation, TxidList, ZainoVersionedSerde, + AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, CompactBlockStream, + FixedEncodedLen, Height, IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, + SaplingCompactTx, SaplingTxList, StatusType, TransparentCompactTx, TransparentTxList, + TxLocation, TxidList, ZainoVersionedSerde, }; use async_trait::async_trait; use bitflags::bitflags; use core2::io::{self, Read, Write}; +use zaino_proto::proto::utils::PoolTypeFilter; // ***** Capability definition structs ***** @@ -846,12 +848,18 @@ pub trait BlockShieldedExt: Send + Sync { #[async_trait] pub trait CompactBlockExt: Send + Sync { /// Returns the CompactBlock for the given Height. - /// - /// TODO: Add separate range fetch method as this method is slow for fetching large ranges! async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result; + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result; } /// `IndexedBlock` materialization extension. diff --git a/zaino-state/src/chain_index/finalised_state/db.rs b/zaino-state/src/chain_index/finalised_state/db.rs index 57922590f..22a73de07 100644 --- a/zaino-state/src/chain_index/finalised_state/db.rs +++ b/zaino-state/src/chain_index/finalised_state/db.rs @@ -58,6 +58,7 @@ pub(crate) mod v1; use v0::DbV0; use v1::DbV1; +use zaino_proto::proto::utils::PoolTypeFilter; use crate::{ chain_index::{ @@ -69,9 +70,9 @@ use crate::{ }, config::BlockCacheConfig, error::FinalisedStateError, - AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, Height, IndexedBlock, - OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, StatusType, - TransparentCompactTx, TransparentTxList, TxLocation, TxidList, + AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, CompactBlockStream, Height, + IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, + StatusType, TransparentCompactTx, TransparentTxList, TxLocation, TxidList, }; use async_trait::async_trait; @@ -482,11 +483,32 @@ impl CompactBlockExt for DbBackend { async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result { #[allow(unreachable_patterns)] match self { - Self::V0(db) => db.get_compact_block(height).await, - Self::V1(db) => db.get_compact_block(height).await, + Self::V0(db) => db.get_compact_block(height, pool_types).await, + Self::V1(db) => db.get_compact_block(height, pool_types).await, + _ => Err(FinalisedStateError::FeatureUnavailable("compact_block")), + } + } + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + #[allow(unreachable_patterns)] + match self { + Self::V0(db) => { + db.get_compact_block_stream(start_height, end_height, pool_types) + .await + } + Self::V1(db) => { + db.get_compact_block_stream(start_height, end_height, pool_types) + .await + } _ => Err(FinalisedStateError::FeatureUnavailable("compact_block")), } } diff --git a/zaino-state/src/chain_index/finalised_state/db/v0.rs b/zaino-state/src/chain_index/finalised_state/db/v0.rs index 4eadc36ae..f850d290d 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v0.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v0.rs @@ -49,11 +49,12 @@ use crate::{ }, config::BlockCacheConfig, error::FinalisedStateError, + local_cache::compact_block_with_pool_types, status::{AtomicStatus, StatusType}, - Height, IndexedBlock, + CompactBlockStream, Height, IndexedBlock, }; -use zaino_proto::proto::compact_formats::CompactBlock; +use zaino_proto::proto::{compact_formats::CompactBlock, service::PoolType, utils::PoolTypeFilter}; use zebra_chain::{ block::{Hash as ZebraHash, Height as ZebraHeight}, @@ -193,17 +194,28 @@ impl DbCore for DbV0 { } } -/// `CompactBlockExt` implementation for v0. +/// [`CompactBlockExt`] capability implementation for [`DbV0`]. /// -/// v0’s primary purpose is serving compact blocks (as used by lightwallet protocols). +/// Exposes `zcash_client_backend`-compatible compact blocks derived from stored header + +/// transaction data. #[async_trait] impl CompactBlockExt for DbV0 { - /// Fetches the compact block at the given height. async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result { - self.get_compact_block(height).await + self.get_compact_block(height, pool_types).await + } + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.get_compact_block_stream(start_height, end_height, pool_types) + .await } } @@ -810,6 +822,7 @@ impl DbV0 { async fn get_compact_block( &self, height: crate::Height, + pool_types: PoolTypeFilter, ) -> Result { let zebra_hash = zebra_chain::block::Hash::from(self.get_block_hash_by_height(height).await?); @@ -820,9 +833,157 @@ impl DbV0 { let block_bytes: &[u8] = txn.get(self.hashes_to_blocks, &hash_key)?; let block: DbCompactBlock = serde_json::from_slice(block_bytes)?; - Ok(block.0) + // Ok(block.0) + Ok(compact_block_with_pool_types( + block.0, + pool_types.to_pool_types_vector(), + )) }) } + + /// Streams `CompactBlock` messages for an inclusive height range. + /// + /// Legacy implementation for backwards compatibility. + /// + /// Behaviour: + /// - The stream covers the inclusive range `[start_height, end_height]`. + /// - If `start_height <= end_height` the stream is ascending; otherwise it is descending. + /// - Blocks are fetched one-by-one by calling `get_compact_block(height, pool_types)` for + /// each height in the range. + /// + /// Pool filtering: + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that have no elements in any requested pool type are omitted from `vtx`, + /// and `CompactTx.index` preserves the original transaction index within the block. + /// + /// Notes: + /// - This is intentionally not optimised (no LMDB cursor walk, no batch/range reads). + /// - Any fetch/deserialize error terminates the stream after emitting a single `tonic::Status`. + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + let is_ascending: bool = start_height <= end_height; + + let (sender, receiver) = + tokio::sync::mpsc::channel::>(128); + + let env = self.env.clone(); + let heights_to_hashes_database: lmdb::Database = self.heights_to_hashes; + let hashes_to_blocks_database: lmdb::Database = self.hashes_to_blocks; + + let pool_types_vector: Vec = pool_types.to_pool_types_vector(); + + tokio::task::spawn_blocking(move || { + fn lmdb_get_status( + database_name: &'static str, + height: Height, + error: lmdb::Error, + ) -> tonic::Status { + match error { + lmdb::Error::NotFound => tonic::Status::not_found(format!( + "missing db entry in {database_name} at height {}", + height.0 + )), + other_error => tonic::Status::internal(format!( + "lmdb get({database_name}) failed at height {}: {other_error}", + height.0 + )), + } + } + + let mut current_height: Height = start_height; + + loop { + let result: Result = (|| { + let txn = env.begin_ro_txn().map_err(|error| { + tonic::Status::internal(format!("lmdb begin_ro_txn failed: {error}")) + })?; + + // height -> hash (heights_to_hashes) + let zebra_height: ZebraHeight = current_height.into(); + let height_key: [u8; 4] = DbHeight(zebra_height).to_be_bytes(); + + let hash_bytes: &[u8] = txn + .get(heights_to_hashes_database, &height_key) + .map_err(|error| { + lmdb_get_status("heights_to_hashes", current_height, error) + })?; + + let db_hash: DbHash = serde_json::from_slice(hash_bytes).map_err(|error| { + tonic::Status::internal(format!( + "height->hash decode failed at height {}: {error}", + current_height.0 + )) + })?; + + // hash -> block (hashes_to_blocks) + let hash_key: Vec = + serde_json::to_vec(&DbHash(db_hash.0)).map_err(|error| { + tonic::Status::internal(format!( + "hash key encode failed at height {}: {error}", + current_height.0 + )) + })?; + + let block_bytes: &[u8] = txn + .get(hashes_to_blocks_database, &hash_key) + .map_err(|error| { + lmdb_get_status("hashes_to_blocks", current_height, error) + })?; + + let db_compact_block: DbCompactBlock = serde_json::from_slice(block_bytes) + .map_err(|error| { + tonic::Status::internal(format!( + "block decode failed at height {}: {error}", + current_height.0 + )) + })?; + + Ok(compact_block_with_pool_types( + db_compact_block.0, + pool_types_vector.clone(), + )) + })(); + + if sender.blocking_send(result).is_err() { + return; + } + + if current_height == end_height { + return; + } + + if is_ascending { + let next_value = match current_height.0.checked_add(1) { + Some(value) => value, + None => { + let _ = sender.blocking_send(Err(tonic::Status::internal( + "height overflow while iterating ascending".to_string(), + ))); + return; + } + }; + current_height = Height(next_value); + } else { + let next_value = match current_height.0.checked_sub(1) { + Some(value) => value, + None => { + let _ = sender.blocking_send(Err(tonic::Status::internal( + "height underflow while iterating descending".to_string(), + ))); + return; + } + }; + current_height = Height(next_value); + } + } + }); + + Ok(CompactBlockStream::new(receiver)) + } } /// Wrapper for `ZebraHeight` used for key encoding. diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index 3d4e10248..c951cdfb6 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -361,8 +361,18 @@ impl CompactBlockExt for DbV1 { async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result { - self.get_compact_block(height, PoolTypeFilter::default()) + self.get_compact_block(height, pool_types).await + } + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.get_compact_block_stream(start_height, end_height, pool_types) .await } } diff --git a/zaino-state/src/chain_index/finalised_state/reader.rs b/zaino-state/src/chain_index/finalised_state/reader.rs index 575e70ef1..2bb289f80 100644 --- a/zaino-state/src/chain_index/finalised_state/reader.rs +++ b/zaino-state/src/chain_index/finalised_state/reader.rs @@ -44,15 +44,17 @@ //! `DbReader` is created from an `Arc` using [`ZainoDB::to_reader`](super::ZainoDB::to_reader). //! Prefer passing `DbReader` through query layers rather than passing `ZainoDB` directly. +use zaino_proto::proto::utils::PoolTypeFilter; + use crate::{ chain_index::{ finalised_state::capability::CapabilityRequest, types::{AddrEventBytes, TransactionHash}, }, error::FinalisedStateError, - AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, Height, IndexedBlock, - OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, StatusType, - TransparentCompactTx, TransparentTxList, TxLocation, TxidList, + AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, CompactBlockStream, Height, + IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, + StatusType, TransparentCompactTx, TransparentTxList, TxLocation, TxidList, }; use super::{ @@ -460,14 +462,24 @@ impl DbReader { // ***** CompactBlock Ext ***** /// Returns the CompactBlock for the given Height. - /// - /// TODO: Add separate range fetch method! pub(crate) async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result { self.db(CapabilityRequest::CompactBlockExt)? - .get_compact_block(height) + .get_compact_block(height, pool_types) + .await + } + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.db(CapabilityRequest::CompactBlockExt)? + .get_compact_block_stream(start_height, end_height, pool_types) .await } } diff --git a/zaino-state/src/chain_index/tests/finalised_state/v0.rs b/zaino-state/src/chain_index/tests/finalised_state/v0.rs index 66595bfa5..b19360d4f 100644 --- a/zaino-state/src/chain_index/tests/finalised_state/v0.rs +++ b/zaino-state/src/chain_index/tests/finalised_state/v0.rs @@ -5,6 +5,7 @@ use tempfile::TempDir; use zaino_common::network::ActivationHeights; use zaino_common::{DatabaseConfig, Network, StorageConfig}; +use zaino_proto::proto::utils::PoolTypeFilter; use crate::chain_index::finalised_state::reader::DbReader; use crate::chain_index::finalised_state::ZainoDB; @@ -14,6 +15,7 @@ use crate::chain_index::tests::vectors::{ build_mockchain_source, load_test_vectors, TestVectorBlockData, TestVectorData, }; use crate::error::FinalisedStateError; +use crate::local_cache::compact_block_with_pool_types; use crate::{BlockCacheConfig, BlockMetadata, BlockWithMetadata, ChainWork, Height, IndexedBlock}; pub(crate) async fn spawn_v0_zaino_db( @@ -262,8 +264,26 @@ async fn get_compact_blocks() { parent_chain_work = *chain_block.index().chainwork(); - let reader_compact_block = db_reader.get_compact_block(Height(*height)).await.unwrap(); - assert_eq!(compact_block, reader_compact_block); + let reader_compact_block_default = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::default()) + .await + .unwrap(); + let default_compact_block = compact_block_with_pool_types( + compact_block.clone(), + PoolTypeFilter::default().to_pool_types_vector(), + ); + assert_eq!(default_compact_block, reader_compact_block_default); + + let reader_compact_block_all_data = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::includes_all()) + .await + .unwrap(); + let all_data_compact_block = compact_block_with_pool_types( + compact_block, + PoolTypeFilter::includes_all().to_pool_types_vector(), + ); + assert_eq!(all_data_compact_block, reader_compact_block_all_data); + println!("CompactBlock at height {height} OK"); } } diff --git a/zaino-state/src/chain_index/tests/finalised_state/v1.rs b/zaino-state/src/chain_index/tests/finalised_state/v1.rs index 4b41d6315..f3ae6ff19 100644 --- a/zaino-state/src/chain_index/tests/finalised_state/v1.rs +++ b/zaino-state/src/chain_index/tests/finalised_state/v1.rs @@ -5,6 +5,7 @@ use tempfile::TempDir; use zaino_common::network::ActivationHeights; use zaino_common::{DatabaseConfig, Network, StorageConfig}; +use zaino_proto::proto::utils::PoolTypeFilter; use crate::chain_index::finalised_state::capability::IndexedBlockExt; use crate::chain_index::finalised_state::db::DbBackend; @@ -17,6 +18,7 @@ use crate::chain_index::tests::vectors::{ }; use crate::chain_index::types::TransactionHash; use crate::error::FinalisedStateError; +use crate::local_cache::compact_block_with_pool_types; use crate::{ AddrScript, BlockCacheConfig, BlockMetadata, BlockWithMetadata, ChainWork, Height, IndexedBlock, Outpoint, @@ -437,8 +439,26 @@ async fn get_compact_blocks() { parent_chain_work = *chain_block.index().chainwork(); - let reader_compact_block = db_reader.get_compact_block(Height(*height)).await.unwrap(); - assert_eq!(compact_block, reader_compact_block); + let reader_compact_block_default = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::default()) + .await + .unwrap(); + let default_compact_block = compact_block_with_pool_types( + compact_block.clone(), + PoolTypeFilter::default().to_pool_types_vector(), + ); + assert_eq!(default_compact_block, reader_compact_block_default); + + let reader_compact_block_all_data = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::includes_all()) + .await + .unwrap(); + let all_data_compact_block = compact_block_with_pool_types( + compact_block, + PoolTypeFilter::includes_all().to_pool_types_vector(), + ); + assert_eq!(all_data_compact_block, reader_compact_block_all_data); + println!("CompactBlock at height {height} OK"); } } diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index ebde081fa..5bc14e992 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -1153,6 +1153,12 @@ impl IndexedBlock { } /// Converts this `IndexedBlock` into a CompactBlock protobuf message using proto v4 format. + /// + /// NOTE: This method currently includes transparent tx data in the compact block produced, + /// `zaino-state::local_cache::compact_block_with_pool_types` should be used to selectively + /// remove tx data by pool type. Alternatively this method could be updated to take a + /// `zaino-proto::proto::utils::PoolTypeFilter` could be added as an input to this method, + /// with tx data being added selectively here. pub fn to_compact_block(&self) -> zaino_proto::proto::compact_formats::CompactBlock { // NOTE: Returns u64::MAX if the block is not in the best chain. let height: u64 = self.height().0.into(); @@ -1163,17 +1169,7 @@ impl IndexedBlock { let vtx: Vec = self .transactions() .iter() - .filter_map(|tx| { - let has_shielded = !tx.sapling().spends().is_empty() - || !tx.sapling().outputs().is_empty() - || !tx.orchard().actions().is_empty(); - - if !has_shielded { - return None; - } - - Some(tx.to_compact_tx(None)) - }) + .map(|tx| tx.to_compact_tx(None)) .collect(); let sapling_commitment_tree_size = self.commitment_tree_data().sizes().sapling(); From 75bc97197d79da682243e3aa548bae9ea3a15c52 Mon Sep 17 00:00:00 2001 From: idky137 Date: Tue, 27 Jan 2026 15:19:51 +0000 Subject: [PATCH 096/114] added db migration (fixed migrationmanager), added db CHANGELOG --- .../chain_index/finalised_state/CHANGELOG.md | 148 ++++++++++++++++++ .../chain_index/finalised_state/capability.rs | 2 +- .../chain_index/finalised_state/migrations.rs | 126 ++++++++++++++- 3 files changed, 270 insertions(+), 6 deletions(-) create mode 100644 zaino-state/src/chain_index/finalised_state/CHANGELOG.md diff --git a/zaino-state/src/chain_index/finalised_state/CHANGELOG.md b/zaino-state/src/chain_index/finalised_state/CHANGELOG.md new file mode 100644 index 000000000..e4c044f29 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/CHANGELOG.md @@ -0,0 +1,148 @@ +Zaino Finalised-State Database Changelog +======================================= + +Format +------ +One entry per database version bump (major / minor / patch). Keep entries concise and factual. + +Entry template: + +-------------------------------------------------------------------------------- +DB VERSION vX.Y.Z (from vA.B.C) +Date: YYYY-MM-DD +-------------------------------------------------------------------------------- + +Summary +- <1–3 bullets describing intent of the change> + +On-disk schema +- Layout: + - +- Tables: + - Added: <...> + - Removed: <...> + - Renamed: new> +- Encoding: + - Keys: + - Values: + - Checksums / validation: +- Invariants: + - + +API / capabilities +- Capability changes: + - Added: <...> + - Removed: <...> + - Changed: <...> +- Public surface changes: + - Added: + - Removed: + - Changed: + +Migration +- Strategy: +- Backfill: +- Completion criteria: +- Failure handling: + +-------------------------------------------------------------------------------- +DB VERSION v1.0.0 (from v0.0.0) +Date: 2025-08-13 +-------------------------------------------------------------------------------- + +Summary +- Replace legacy v0 schema with versioned v1 schema and expanded indices / query surface. +- Introduce stronger integrity checks and on-demand validation for v1 read paths. +- Keep compact block retrieval available (compatibility surface). + +On-disk schema +- Layout: + - Move to per-network version directory layout: //v1/ + - VERSION_DIRS begins at ["v1"] (new versions append, no gaps). +- Tables: + - Added (v1): headers, txids, transparent, sapling, orchard, commitment_tree_data, heights (hash->height), + plus v1 indices for tx locations, spent outpoints, and transparent address history. + - Removed / superseded (v0): legacy compact-block-streamer oriented storage layout. +- Encoding: + - v1 values are stored as checksum-protected `StoredEntryVar` / `StoredEntryFixed` entries. + - Canonical key bytes are used for checksum verification via `verify(key)`. +- Invariants (v1 validation enforces): + - Per-table checksum verification for all per-block tables. + - Chain continuity: header parent hash at height h matches stored hash at h-1. + - Merkle consistency: header merkle root matches computed root from stored txid list. + - Index consistency: + - hash->height mapping must match the queried height. + - spent + addr history records must exist and match for transparent inputs/outputs. + +API / capabilities +- Capability changes: + - v0: READ_CORE | WRITE_CORE | COMPACT_BLOCK_EXT + - v1: Capability::LATEST (block core/transparent/shielded, indexed block, transparent history, etc.) +- Public surface changes: + - Added (v1-only; FeatureUnavailable on v0): + - BlockCoreExt: header/txids/range fetch, txid<->location lookup + - BlockTransparentExt: per-tx and per-block transparent access + ranges + - BlockShieldedExt: sapling/orchard per-tx and per-block access + ranges, commitment tree data (+ ranges) + - IndexedBlockExt: indexed block retrieval + - TransparentHistExt: addr records, range queries, balance/utxos, outpoint spender(s) + - Preserved: + - CompactBlockExt remains available for both v0 and v1. + +Migration +- Strategy: shadow build + promotion (no in-place transformation of v0). +- Backfill: rebuild all v1 tables/indices by ingesting chain data. +- Completion criteria: + - metadata indicates migrated/ready, and required tables exist through the tip. + - validation succeeds for the contiguous best chain range as built. +- Failure handling: + - do not promote partially built v1; continue using v0 if present; rebuild v1 on retry. + +-------------------------------------------------------------------------------- +DB VERSION v1.0.0 (from v1.1.0) +Date: 2026-01-27 +-------------------------------------------------------------------------------- + +Summary +- Minor version bump to reflect updated compact block API contract (streaming + pool filtering semantics). +- No schema or encoding changes; metadata-only migration updates persisted DB version marker. + +On-disk schema +- Layout: + - No changes. +- Tables: + - Added: None. + - Removed: None. + - Renamed: None. +- Encoding: + - Keys: No changes. + - Values: No changes. + - Checksums / validation: No changes. +- Invariants: + - No changes. + +API / capabilities +- Capability changes: + - Added: None. + - Removed: None. + - Changed: + - COMPACT_BLOCK_EXT contract updated for v1 backends: + - get_compact_block(...) now takes a PoolTypeFilter, which selects which pool data is materialized into the returned compact block. + - get_compact_block_stream(...) added. + +- Public surface changes: + - Added: + - CompactBlockExt::get_compact_block_stream(start_height, end_height, pool_types: PoolTypeFilter). + - Removed: None. + - Changed: + - CompactBlockExt::get_compact_block(height, pool_types: PoolTypeFilter) signature updated. + - Compact block contents are now filtered by PoolTypeFilter, and may include transparent transaction data (vin/vout) when selected. + +Migration +- Strategy: +- Backfill: +- Completion criteria: +- Failure handling: + +-------------------------------------------------------------------------------- +(append new entries below) +-------------------------------------------------------------------------------- diff --git a/zaino-state/src/chain_index/finalised_state/capability.rs b/zaino-state/src/chain_index/finalised_state/capability.rs index 58c41d0e4..22e542d74 100644 --- a/zaino-state/src/chain_index/finalised_state/capability.rs +++ b/zaino-state/src/chain_index/finalised_state/capability.rs @@ -452,7 +452,7 @@ impl DbVersion { } // V1: Adds chainblockv1 and transparent transaction history data. - (1, 0) => { + (1, 0) | (1, 1) => { Capability::READ_CORE | Capability::WRITE_CORE | Capability::BLOCK_CORE_EXT diff --git a/zaino-state/src/chain_index/finalised_state/migrations.rs b/zaino-state/src/chain_index/finalised_state/migrations.rs index 24be5f635..d648b5674 100644 --- a/zaino-state/src/chain_index/finalised_state/migrations.rs +++ b/zaino-state/src/chain_index/finalised_state/migrations.rs @@ -19,6 +19,10 @@ //! - holds the router, config, current and target versions, and a `BlockchainSource`, //! - repeatedly selects and runs the next migration via `get_migration()`. //! +//! - [`MigrationStep`]: +//! - enum-based dispatch wrapper used by `MigrationManager` to select between multiple concrete +//! `Migration` implementations (Rust cannot return different `impl Trait` types from a `match`). +//! //! - [`capability::MigrationStatus`]: //! - stored in `DbMetadata` and used to resume work safely after shutdown. //! @@ -62,10 +66,24 @@ //! - Promote shadow to primary via `router.promote_shadow()`. //! - Delete the old v0 directory asynchronously once all strong references are dropped. //! +//! ## v1.0.0 → v1.1.0 +//! +//! `Migration1_0_0To1_1_0` is a **minor version bump** with **no schema changes**, but does include +//! changes to the external ZainoDB API. +//! +//! It updates the stored `DbMetadata` version to reflect the v1.1.0 API contract: +//! - `CompactBlockExt` now includes `get_compact_block_stream(...)`. +//! - compact block transaction materialization is now selected via `PoolTypeFilter` (including +//! optional transparent data). +//! +//! This release also introduces [`MigrationStep`], the enum-based migration dispatcher used by +//! [`MigrationManager`], to allow selecting between multiple concrete migration implementations. +//! //! # Development: adding a new migration step //! //! 1. Introduce a new `struct MigrationX_Y_ZToA_B_C;` and implement `Migration`. -//! 2. Register it in `MigrationManager::get_migration()` by matching on the *current* version. +//! 2. Add a new `MigrationStep` variant and register it in `MigrationManager::get_migration()` by +//! matching on the *current* version. //! 3. Ensure the migration is: //! - deterministic, //! - resumable (use `DbMetadata::migration_status` and/or shadow tip), @@ -115,7 +133,9 @@ use super::{ }; use crate::{ - chain_index::{source::BlockchainSource, types::GENESIS_HEIGHT}, + chain_index::{ + finalised_state::capability::DbMetadata, source::BlockchainSource, types::GENESIS_HEIGHT, + }, config::BlockCacheConfig, error::FinalisedStateError, BlockHash, BlockMetadata, BlockWithMetadata, ChainWork, Height, IndexedBlock, @@ -245,7 +265,7 @@ impl MigrationManager { self.source.clone(), ) .await?; - self.current_version = migration.to_version(); + self.current_version = migration.to_version::(); } Ok(()) @@ -255,13 +275,14 @@ impl MigrationManager { /// /// This must be updated whenever a new supported DB version is introduced. The match is strict: /// if a step is missing, migration is aborted rather than attempting an unsafe fallback. - fn get_migration(&self) -> Result, FinalisedStateError> { + fn get_migration(&self) -> Result { match ( self.current_version.major, self.current_version.minor, self.current_version.patch, ) { - (0, 0, 0) => Ok(Migration0_0_0To1_0_0), + (0, 0, 0) => Ok(MigrationStep::Migration0_0_0To1_0_0(Migration0_0_0To1_0_0)), + (1, 0, 0) => Ok(MigrationStep::Migration1_0_0To1_1_0(Migration1_0_0To1_1_0)), (_, _, _) => Err(FinalisedStateError::Custom(format!( "Missing migration from version {}", self.current_version @@ -270,6 +291,41 @@ impl MigrationManager { } } +/// Concrete migration step selector. +/// +/// Rust cannot return `impl Migration` from a `match` that selects between multiple concrete +/// migration types. `MigrationStep` is the enum-based dispatch wrapper used by [`MigrationManager`] +/// to select a step and call `migrate(...)`, and to read the step’s `TO_VERSION`. +enum MigrationStep { + Migration0_0_0To1_0_0(Migration0_0_0To1_0_0), + Migration1_0_0To1_1_0(Migration1_0_0To1_1_0), +} + +impl MigrationStep { + fn to_version(&self) -> DbVersion { + match self { + MigrationStep::Migration0_0_0To1_0_0(_step) => { + >::TO_VERSION + } + MigrationStep::Migration1_0_0To1_1_0(_step) => { + >::TO_VERSION + } + } + } + + async fn migrate( + &self, + router: Arc, + cfg: BlockCacheConfig, + source: T, + ) -> Result<(), FinalisedStateError> { + match self { + MigrationStep::Migration0_0_0To1_0_0(step) => step.migrate(router, cfg, source).await, + MigrationStep::Migration1_0_0To1_1_0(step) => step.migrate(router, cfg, source).await, + } + } +} + // ***** Migrations ***** /// Major migration: v0.0.0 → v1.0.0. @@ -468,3 +524,63 @@ impl Migration for Migration0_0_0To1_0_0 { Ok(()) } } + +/// Minor migration: v1.0.0 → v1.1.0. +/// +/// There are **no on-disk schema changes** in this step. +/// +/// This release updates the *API contract* for compact blocks: +/// - [`CompactBlockExt`] adds `get_compact_block_stream(...)`. +/// - Compact block transaction materialization is selected via [`PoolTypeFilter`], which may include +/// transparent data. +/// +/// This release also introduces [`MigrationStep`], the enum-based migration dispatcher used by +/// [`MigrationManager`], to allow selecting between multiple concrete migration implementations. +/// +/// Because the persisted schema contract is unchanged, this migration only updates the stored +/// [`DbMetadata::version`] from `1.0.0` to `1.1.0`. +/// +/// Safety and resumability: +/// - Idempotent: if run more than once, it will re-write the same metadata. +/// - No shadow database and no table rebuild. +/// - Clears any stale in-progress migration status. +struct Migration1_0_0To1_1_0; + +#[async_trait] +impl Migration for Migration1_0_0To1_1_0 { + const CURRENT_VERSION: DbVersion = DbVersion { + major: 1, + minor: 0, + patch: 0, + }; + + const TO_VERSION: DbVersion = DbVersion { + major: 1, + minor: 1, + patch: 0, + }; + + async fn migrate( + &self, + router: Arc, + _cfg: BlockCacheConfig, + _source: T, + ) -> Result<(), FinalisedStateError> { + info!("Starting v1.0.0 → v1.1.0 migration (metadata-only)."); + + let mut metadata: DbMetadata = router.get_metadata().await?; + + // Preserve the schema hash because there are no schema changes in v1.1.0. + // Only advance the version marker to reflect the new API contract. + metadata.version = >::TO_VERSION; + + // Outside of migrations this should be `Empty`. This step performs no build phases, so we + // ensure we do not leave a stale in-progress status behind. + metadata.migration_status = MigrationStatus::Empty; + + router.update_metadata(metadata).await?; + + info!("v1.0.0 to v1.1.0 migration complete."); + Ok(()) + } +} From 1aecb9c2bb5157631d8f381acd20e825bf61290c Mon Sep 17 00:00:00 2001 From: idky137 Date: Tue, 27 Jan 2026 15:30:54 +0000 Subject: [PATCH 097/114] added db compact block streamer tests --- .../src/chain_index/finalised_state/reader.rs | 2 +- .../chain_index/tests/finalised_state/v0.rs | 51 +++++++++++++++++++ .../chain_index/tests/finalised_state/v1.rs | 51 +++++++++++++++++++ 3 files changed, 103 insertions(+), 1 deletion(-) diff --git a/zaino-state/src/chain_index/finalised_state/reader.rs b/zaino-state/src/chain_index/finalised_state/reader.rs index 2bb289f80..24df489af 100644 --- a/zaino-state/src/chain_index/finalised_state/reader.rs +++ b/zaino-state/src/chain_index/finalised_state/reader.rs @@ -472,7 +472,7 @@ impl DbReader { .await } - async fn get_compact_block_stream( + pub(crate) async fn get_compact_block_stream( &self, start_height: Height, end_height: Height, diff --git a/zaino-state/src/chain_index/tests/finalised_state/v0.rs b/zaino-state/src/chain_index/tests/finalised_state/v0.rs index b19360d4f..ec7ba5c9d 100644 --- a/zaino-state/src/chain_index/tests/finalised_state/v0.rs +++ b/zaino-state/src/chain_index/tests/finalised_state/v0.rs @@ -287,3 +287,54 @@ async fn get_compact_blocks() { println!("CompactBlock at height {height} OK"); } } + +#[tokio::test(flavor = "multi_thread")] +async fn get_compact_block_stream() { + use futures::StreamExt; + + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v0db_and_reader().await; + + let start_height = Height(blocks.first().unwrap().height); + let end_height = Height(blocks.last().unwrap().height); + + for pool_type_filter in [PoolTypeFilter::default(), PoolTypeFilter::includes_all()] { + let compact_block_stream = db_reader + .get_compact_block_stream(start_height, end_height, pool_type_filter.clone()) + .await + .unwrap(); + + futures::pin_mut!(compact_block_stream); + + let mut expected_next_height_u32: u32 = start_height.0; + let mut streamed_block_count: usize = 0; + + while let Some(block_result) = compact_block_stream.next().await { + let streamed_compact_block = block_result.unwrap(); + + let streamed_height_u32: u32 = u32::try_from(streamed_compact_block.height).unwrap(); + + assert_eq!(streamed_height_u32, expected_next_height_u32); + + let singular_compact_block = db_reader + .get_compact_block(Height(streamed_height_u32), pool_type_filter.clone()) + .await + .unwrap(); + + assert_eq!(singular_compact_block, streamed_compact_block); + + expected_next_height_u32 = expected_next_height_u32.saturating_add(1); + streamed_block_count = streamed_block_count.saturating_add(1); + } + + let expected_block_count: usize = (end_height + .0 + .saturating_sub(start_height.0) + .saturating_add(1)) as usize; + + assert_eq!(streamed_block_count, expected_block_count); + assert_eq!(expected_next_height_u32, end_height.0.saturating_add(1)); + } +} diff --git a/zaino-state/src/chain_index/tests/finalised_state/v1.rs b/zaino-state/src/chain_index/tests/finalised_state/v1.rs index f3ae6ff19..126701bf0 100644 --- a/zaino-state/src/chain_index/tests/finalised_state/v1.rs +++ b/zaino-state/src/chain_index/tests/finalised_state/v1.rs @@ -463,6 +463,57 @@ async fn get_compact_blocks() { } } +#[tokio::test(flavor = "multi_thread")] +async fn get_compact_block_stream() { + use futures::StreamExt; + + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v1db_and_reader().await; + + let start_height = Height(blocks.first().unwrap().height); + let end_height = Height(blocks.last().unwrap().height); + + for pool_type_filter in [PoolTypeFilter::default(), PoolTypeFilter::includes_all()] { + let compact_block_stream = db_reader + .get_compact_block_stream(start_height, end_height, pool_type_filter.clone()) + .await + .unwrap(); + + futures::pin_mut!(compact_block_stream); + + let mut expected_next_height_u32: u32 = start_height.0; + let mut streamed_block_count: usize = 0; + + while let Some(block_result) = compact_block_stream.next().await { + let streamed_compact_block = block_result.unwrap(); + + let streamed_height_u32: u32 = u32::try_from(streamed_compact_block.height).unwrap(); + + assert_eq!(streamed_height_u32, expected_next_height_u32); + + let singular_compact_block = db_reader + .get_compact_block(Height(streamed_height_u32), pool_type_filter.clone()) + .await + .unwrap(); + + assert_eq!(singular_compact_block, streamed_compact_block); + + expected_next_height_u32 = expected_next_height_u32.saturating_add(1); + streamed_block_count = streamed_block_count.saturating_add(1); + } + + let expected_block_count: usize = (end_height + .0 + .saturating_sub(start_height.0) + .saturating_add(1)) as usize; + + assert_eq!(streamed_block_count, expected_block_count); + assert_eq!(expected_next_height_u32, end_height.0.saturating_add(1)); + } +} + #[tokio::test(flavor = "multi_thread")] async fn get_faucet_txids() { init_tracing(); From ad23141810364b0ddcd72b63297445eaa4f61e08 Mon Sep 17 00:00:00 2001 From: idky137 Date: Tue, 27 Jan 2026 16:31:20 +0000 Subject: [PATCH 098/114] updated db changelog --- .../src/chain_index/finalised_state/CHANGELOG.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/zaino-state/src/chain_index/finalised_state/CHANGELOG.md b/zaino-state/src/chain_index/finalised_state/CHANGELOG.md index e4c044f29..0b5659203 100644 --- a/zaino-state/src/chain_index/finalised_state/CHANGELOG.md +++ b/zaino-state/src/chain_index/finalised_state/CHANGELOG.md @@ -138,10 +138,13 @@ API / capabilities - Compact block contents are now filtered by PoolTypeFilter, and may include transparent transaction data (vin/vout) when selected. Migration -- Strategy: -- Backfill: -- Completion criteria: -- Failure handling: +- Strategy: In-place (metadata-only). +- Backfill: None. +- Completion criteria: + - DbMetadata.version updated from 1.0.0 to 1.1.0. + - DbMetadata.migration_status reset to Empty. +- Failure handling: + - Idempotent: re-running re-writes the same metadata; no partial state beyond metadata. -------------------------------------------------------------------------------- (append new entries below) From c3682e31ce19e7e2b3ca7e861859e8f5b92ca0d8 Mon Sep 17 00:00:00 2001 From: idky137 Date: Wed, 28 Jan 2026 13:41:59 +0000 Subject: [PATCH 099/114] review comment changes --- zaino-state/src/backends/fetch.rs | 2 +- zaino-state/src/backends/state.rs | 4 ++-- zaino-state/src/chain_index/finalised_state/db/v0.rs | 5 ++--- zaino-state/src/chain_index/tests/finalised_state/v0.rs | 4 ++-- zaino-state/src/chain_index/tests/finalised_state/v1.rs | 4 ++-- zaino-state/src/local_cache.rs | 2 +- 6 files changed, 10 insertions(+), 11 deletions(-) diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index e22389033..e802243d8 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -840,7 +840,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { height.to_string(), ).await { Ok(mut block) => { - block = compact_block_with_pool_types(block, validated_request.pool_types()); + block = compact_block_with_pool_types(block, &validated_request.pool_types()); if channel_tx.send(Ok(block)).await.is_err() { break; } diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index 3a225f18c..d5ccec0ae 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -599,7 +599,7 @@ impl StateServiceSubscriber { if trim_non_nullifier { block = compact_block_to_nullifiers(block); } else { - block = compact_block_with_pool_types(block, pool_types.clone()); + block = compact_block_with_pool_types(block, &pool_types); } Ok(block) } @@ -637,7 +637,7 @@ impl StateServiceSubscriber { if trim_non_nullifier { block = compact_block_to_nullifiers(block); } else { - block = compact_block_with_pool_types(block, pool_types.clone()); + block = compact_block_with_pool_types(block, &pool_types); } Ok(block) } diff --git a/zaino-state/src/chain_index/finalised_state/db/v0.rs b/zaino-state/src/chain_index/finalised_state/db/v0.rs index f850d290d..0c7bc1436 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v0.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v0.rs @@ -833,10 +833,9 @@ impl DbV0 { let block_bytes: &[u8] = txn.get(self.hashes_to_blocks, &hash_key)?; let block: DbCompactBlock = serde_json::from_slice(block_bytes)?; - // Ok(block.0) Ok(compact_block_with_pool_types( block.0, - pool_types.to_pool_types_vector(), + &pool_types.to_pool_types_vector(), )) }) } @@ -944,7 +943,7 @@ impl DbV0 { Ok(compact_block_with_pool_types( db_compact_block.0, - pool_types_vector.clone(), + &pool_types_vector, )) })(); diff --git a/zaino-state/src/chain_index/tests/finalised_state/v0.rs b/zaino-state/src/chain_index/tests/finalised_state/v0.rs index ec7ba5c9d..d4094895a 100644 --- a/zaino-state/src/chain_index/tests/finalised_state/v0.rs +++ b/zaino-state/src/chain_index/tests/finalised_state/v0.rs @@ -270,7 +270,7 @@ async fn get_compact_blocks() { .unwrap(); let default_compact_block = compact_block_with_pool_types( compact_block.clone(), - PoolTypeFilter::default().to_pool_types_vector(), + &PoolTypeFilter::default().to_pool_types_vector(), ); assert_eq!(default_compact_block, reader_compact_block_default); @@ -280,7 +280,7 @@ async fn get_compact_blocks() { .unwrap(); let all_data_compact_block = compact_block_with_pool_types( compact_block, - PoolTypeFilter::includes_all().to_pool_types_vector(), + &PoolTypeFilter::includes_all().to_pool_types_vector(), ); assert_eq!(all_data_compact_block, reader_compact_block_all_data); diff --git a/zaino-state/src/chain_index/tests/finalised_state/v1.rs b/zaino-state/src/chain_index/tests/finalised_state/v1.rs index 126701bf0..bb25a45ca 100644 --- a/zaino-state/src/chain_index/tests/finalised_state/v1.rs +++ b/zaino-state/src/chain_index/tests/finalised_state/v1.rs @@ -445,7 +445,7 @@ async fn get_compact_blocks() { .unwrap(); let default_compact_block = compact_block_with_pool_types( compact_block.clone(), - PoolTypeFilter::default().to_pool_types_vector(), + &PoolTypeFilter::default().to_pool_types_vector(), ); assert_eq!(default_compact_block, reader_compact_block_default); @@ -455,7 +455,7 @@ async fn get_compact_blocks() { .unwrap(); let all_data_compact_block = compact_block_with_pool_types( compact_block, - PoolTypeFilter::includes_all().to_pool_types_vector(), + &PoolTypeFilter::includes_all().to_pool_types_vector(), ); assert_eq!(all_data_compact_block, reader_compact_block_all_data); diff --git a/zaino-state/src/local_cache.rs b/zaino-state/src/local_cache.rs index 7dfb1d29e..324a2b446 100644 --- a/zaino-state/src/local_cache.rs +++ b/zaino-state/src/local_cache.rs @@ -383,7 +383,7 @@ pub(crate) fn display_txids_to_server(txids: Vec) -> Result> /// Note: for backwards compatibility an empty vector will return Sapling and Orchard Tx info. pub(crate) fn compact_block_with_pool_types( mut block: CompactBlock, - pool_types: Vec, + pool_types: &[PoolType], ) -> CompactBlock { if pool_types.is_empty() { for compact_tx in &mut block.vtx { From d3c727af1eda8ffc4d8df6e7f3d82fff3197d74e Mon Sep 17 00:00:00 2001 From: idky137 Date: Thu, 29 Jan 2026 17:09:03 +0000 Subject: [PATCH 100/114] fix v1::get_compact_blocks test --- .../src/chain_index/types/db/legacy.rs | 30 +++---------------- zaino-state/src/local_cache.rs | 16 ++++++++++ 2 files changed, 20 insertions(+), 26 deletions(-) diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index 5bc14e992..622228a9f 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -1467,35 +1467,13 @@ impl CompactTxData { ) .collect(); - let vout = self - .transparent - .vout - .iter() - .map(|tx_out| TxOut { - value: tx_out.value, - script_pub_key: tx_out.script_hash.to_vec(), - }) - .collect(); + let vout = self.transparent().compact_vout(); - let vin = self - .transparent - .vin - .iter() - .filter_map(|t_in| { - if t_in.is_null_prevout() { - None - } else { - Some(CompactTxIn { - prevout_txid: t_in.prevout_txid.to_vec(), - prevout_index: t_in.prevout_index, - }) - } - }) - .collect(); + let vin = self.transparent().compact_vin(); zaino_proto::proto::compact_formats::CompactTx { index: self.index(), - txid: self.txid().bytes_in_display_order().to_vec(), + txid: self.txid().0.to_vec(), fee, spends, outputs, @@ -1760,7 +1738,7 @@ impl TxInCompact { self.prevout_index } - /// `true` iff this input is the special “null” out-point used by a + /// `true` if this input is the special “null” out-point used by a /// coinbase transaction (all-zero txid, index 0xffff_ffff). pub fn is_null_prevout(&self) -> bool { self.prevout_txid == [0u8; 32] && self.prevout_index == u32::MAX diff --git a/zaino-state/src/local_cache.rs b/zaino-state/src/local_cache.rs index 324a2b446..967b9bbf0 100644 --- a/zaino-state/src/local_cache.rs +++ b/zaino-state/src/local_cache.rs @@ -391,6 +391,13 @@ pub(crate) fn compact_block_with_pool_types( compact_tx.vin.clear(); compact_tx.vout.clear(); } + + // Omit transactions that have no Sapling/Orchard elements. + block.vtx.retain(|compact_tx| { + !compact_tx.spends.is_empty() + || !compact_tx.outputs.is_empty() + || !compact_tx.actions.is_empty() + }); } else { for compact_tx in &mut block.vtx { // strip out transparent inputs if not Requested @@ -408,6 +415,15 @@ pub(crate) fn compact_block_with_pool_types( compact_tx.actions.clear(); } } + + // Omit transactions that have no elements in any requested pool type. + block.vtx.retain(|compact_tx| { + !compact_tx.vin.is_empty() + || !compact_tx.vout.is_empty() + || !compact_tx.spends.is_empty() + || !compact_tx.outputs.is_empty() + || !compact_tx.actions.is_empty() + }); } block From 30484010f27735b3959843f2d706f276f3b96486 Mon Sep 17 00:00:00 2001 From: idky137 Date: Thu, 29 Jan 2026 17:15:11 +0000 Subject: [PATCH 101/114] fixed test_pool_type_filter_t_z_o --- zaino-proto/src/proto/utils.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 188cb1661..ef5804712 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -184,9 +184,9 @@ impl PoolTypeFilter { // guard against returning an invalid state this shouls never happen. if filter.is_empty() { - return Ok(Self::default()); + Ok(Self::default()) } else { - return Ok(filter); + Ok(filter) } } } @@ -301,7 +301,7 @@ mod test { assert_eq!( PoolTypeFilter::new_from_pool_types(&pools), - Ok(PoolTypeFilter::from_checked_parts(true, true, false)) + Ok(PoolTypeFilter::from_checked_parts(true, true, true)) ); } From 863a9cd29642ba55e542ab83c4c7ed605d0d516a Mon Sep 17 00:00:00 2001 From: idky137 Date: Thu, 29 Jan 2026 17:23:39 +0000 Subject: [PATCH 102/114] fixed get_transparent_data_from_compact_block_when_requested --- integration-tests/tests/state_service.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index e80bd7543..38b5048f7 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -2440,7 +2440,7 @@ mod zebra { max_entries: 0, }; let fetch_service_sapling_subtree_roots = fetch_service_subscriber - .get_subtree_roots(sapling_subtree_roots_request.clone()) + .get_subtree_roots(sapling_subtree_roots_request) .await .unwrap() .map(Result::unwrap) @@ -2859,7 +2859,7 @@ mod zebra { } #[tokio::test(flavor = "multi_thread")] - async fn gat_transparent_data_from_compact_block_when_requested() { + async fn get_transparent_data_from_compact_block_when_requested() { let ( mut test_manager, _fetch_service, @@ -2902,10 +2902,22 @@ mod zebra { state_service_taddress_balance ); + let chain_height = state_service_subscriber + .get_latest_block() + .await + .unwrap() + .height; + let compact_block_range = state_service_subscriber .get_block_range(BlockRange { - start: None, - end: None, + start: Some(BlockId { + height: 0, + hash: Vec::new(), + }), + end: Some(BlockId { + height: chain_height, + hash: Vec::new(), + }), pool_types: pool_types_into_i32_vec( [PoolType::Transparent, PoolType::Sapling, PoolType::Orchard].to_vec(), ), From 32b71044947b0901f8722c64ebf4e18ff2ad35b7 Mon Sep 17 00:00:00 2001 From: idky137 Date: Thu, 29 Jan 2026 17:25:04 +0000 Subject: [PATCH 103/114] clippy fix --- zaino-state/src/chain_index/types/db/legacy.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index 622228a9f..64dedbc15 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -33,7 +33,6 @@ use core2::io::{self, Read, Write}; use hex::{FromHex, ToHex}; use primitive_types::U256; use std::{fmt, io::Cursor}; -use zaino_proto::proto::compact_formats::{CompactTxIn, TxOut}; use zebra_chain::serialization::BytesInDisplayOrder as _; use crate::chain_index::encoding::{ From 29474bd162fbe40b9d1a1127ed58acff8b7274c0 Mon Sep 17 00:00:00 2001 From: pacu Date: Thu, 29 Jan 2026 15:51:05 -0300 Subject: [PATCH 104/114] move `zaino_state::utils::blockid_to_hashorheight` to zaino-proto --- Cargo.lock | 2 ++ zaino-proto/Cargo.toml | 3 +++ zaino-proto/src/proto/utils.rs | 19 ++++++++++++++++++- zaino-state/src/backends/fetch.rs | 4 ++-- zaino-state/src/backends/state.rs | 4 ++-- zaino-state/src/utils.rs | 19 +------------------ 6 files changed, 28 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b9e23b9fc..6942f39fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8426,6 +8426,8 @@ dependencies = [ "tonic 0.12.3", "tonic-build 0.12.3", "which 4.4.2", + "zebra-chain 3.1.0", + "zebra-state", ] [[package]] diff --git a/zaino-proto/Cargo.toml b/zaino-proto/Cargo.toml index 1ddbcc194..00ef41261 100644 --- a/zaino-proto/Cargo.toml +++ b/zaino-proto/Cargo.toml @@ -9,6 +9,9 @@ license = { workspace = true } version = { workspace = true } [dependencies] +zebra-state = { workspace = true } +zebra-chain = { workspace = true } + # Miscellaneous Workspace tonic = { workspace = true } diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index ef5804712..47e750516 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -1,4 +1,6 @@ -use crate::proto::service::{BlockRange, PoolType}; +use zebra_state::HashOrHeight; +use zebra_chain::block::Height; +use crate::proto::service::{BlockId, BlockRange, PoolType}; #[derive(Debug, PartialEq, Eq)] /// Errors that can arise when mapping `PoolType` from an `i32` value. @@ -119,6 +121,7 @@ impl ValidatedBlockRangeRequest { (self.start, self.end) = (self.end, self.start); } } + #[derive(Clone, Debug, Eq, PartialEq)] pub struct PoolTypeFilter { include_transparent: bool, @@ -331,3 +334,17 @@ mod test { ); } } + +/// Converts [`BlockId`] into [`HashOrHeight`] Zebra type +pub fn blockid_to_hashorheight(block_id: BlockId) -> Option { + <[u8; 32]>::try_from(block_id.hash) + .map(zebra_chain::block::Hash) + .map(HashOrHeight::from) + .or_else(|_| { + block_id + .height + .try_into() + .map(|height| HashOrHeight::Height(Height(height))) + }) + .ok() +} diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index e802243d8..1359bbcce 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -45,7 +45,7 @@ use zaino_proto::proto::{ PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, - utils::ValidatedBlockRangeRequest, + utils::{ValidatedBlockRangeRequest, blockid_to_hashorheight}, }; use crate::TransactionHash; @@ -66,7 +66,7 @@ use crate::{ AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, UtxoReplyStream, }, - utils::{blockid_to_hashorheight, get_build_info, ServiceMetadata}, + utils::{get_build_info, ServiceMetadata}, BackendType, }; diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index d5ccec0ae..b725870a4 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -20,7 +20,7 @@ use crate::{ AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, UtxoReplyStream, }, - utils::{blockid_to_hashorheight, get_build_info, ServiceMetadata}, + utils::{get_build_info, ServiceMetadata}, BackendType, MempoolKey, }; use nonempty::NonEmpty; @@ -47,7 +47,7 @@ use zaino_proto::proto::{ GetAddressUtxosReplyList, GetMempoolTxRequest, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, - utils::{pool_types_from_vector, PoolTypeError, PoolTypeFilter, ValidatedBlockRangeRequest}, + utils::{blockid_to_hashorheight, pool_types_from_vector, PoolTypeError, PoolTypeFilter, ValidatedBlockRangeRequest}, }; use zcash_protocol::consensus::NetworkType; diff --git a/zaino-state/src/utils.rs b/zaino-state/src/utils.rs index 09e31a8b0..fc62427e8 100644 --- a/zaino-state/src/utils.rs +++ b/zaino-state/src/utils.rs @@ -1,10 +1,6 @@ //! Contains utility funcitonality for Zaino-State. - use std::fmt; - -use zaino_proto::proto::service::BlockId; -use zebra_chain::{block::Height, parameters::Network}; -use zebra_state::HashOrHeight; +use zebra_chain::parameters::Network; /// Zaino build info. #[derive(Debug, Clone)] @@ -115,16 +111,3 @@ impl fmt::Display for ServiceMetadata { writeln!(f, "Zebra Subversion: {}", self.zebra_subversion) } } - -pub(crate) fn blockid_to_hashorheight(block_id: BlockId) -> Option { - <[u8; 32]>::try_from(block_id.hash) - .map(zebra_chain::block::Hash) - .map(HashOrHeight::from) - .or_else(|_| { - block_id - .height - .try_into() - .map(|height| HashOrHeight::Height(Height(height))) - }) - .ok() -} From e82fbe1172b7d4d2ddad5881a5bd45748699c840 Mon Sep 17 00:00:00 2001 From: pacu Date: Thu, 29 Jan 2026 15:55:07 -0300 Subject: [PATCH 105/114] remove rustfmt TOML to avoid fmt warnings --- .rustfmt.toml | 1 - 1 file changed, 1 deletion(-) delete mode 100644 .rustfmt.toml diff --git a/.rustfmt.toml b/.rustfmt.toml deleted file mode 100644 index 33b25ee01..000000000 --- a/.rustfmt.toml +++ /dev/null @@ -1 +0,0 @@ -format_generated_files = false \ No newline at end of file From 86244b19f71d18081c211f37982b2c6b3bba1ca0 Mon Sep 17 00:00:00 2001 From: pacu Date: Thu, 29 Jan 2026 15:55:33 -0300 Subject: [PATCH 106/114] cargo fmt --- zaino-proto/src/proto/utils.rs | 4 ++-- zaino-state/src/backends/fetch.rs | 2 +- zaino-state/src/backends/state.rs | 5 ++++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs index 47e750516..7c819a385 100644 --- a/zaino-proto/src/proto/utils.rs +++ b/zaino-proto/src/proto/utils.rs @@ -1,6 +1,6 @@ -use zebra_state::HashOrHeight; -use zebra_chain::block::Height; use crate::proto::service::{BlockId, BlockRange, PoolType}; +use zebra_chain::block::Height; +use zebra_state::HashOrHeight; #[derive(Debug, PartialEq, Eq)] /// Errors that can arise when mapping `PoolType` from an `i32` value. diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 1359bbcce..2643c6d02 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -45,7 +45,7 @@ use zaino_proto::proto::{ PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, - utils::{ValidatedBlockRangeRequest, blockid_to_hashorheight}, + utils::{blockid_to_hashorheight, ValidatedBlockRangeRequest}, }; use crate::TransactionHash; diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index b725870a4..3dc958cfa 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -47,7 +47,10 @@ use zaino_proto::proto::{ GetAddressUtxosReplyList, GetMempoolTxRequest, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, - utils::{blockid_to_hashorheight, pool_types_from_vector, PoolTypeError, PoolTypeFilter, ValidatedBlockRangeRequest}, + utils::{ + blockid_to_hashorheight, pool_types_from_vector, PoolTypeError, PoolTypeFilter, + ValidatedBlockRangeRequest, + }, }; use zcash_protocol::consensus::NetworkType; From c21edf77a35b4ed17d8d042f1ebf80411879e159 Mon Sep 17 00:00:00 2001 From: pacu Date: Thu, 29 Jan 2026 16:51:38 -0300 Subject: [PATCH 107/114] cargo clippy --- zaino-state/src/backends/state.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index 3dc958cfa..c35eb6ecf 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -2244,9 +2244,7 @@ impl LightWalletIndexer for StateServiceSubscriber { Ok(pool_type_filter) => pool_type_filter, Err(PoolTypeError::InvalidPoolType) => { return Err(StateServiceError::TonicStatusError( - tonic::Status::invalid_argument(format!( - "Error: An invalid `PoolType' was found" - )), + tonic::Status::invalid_argument("Error: An invalid `PoolType' was found".to_string()), )) } Err(PoolTypeError::UnknownPoolType(unknown_pool_type)) => { From 6a41ffaf0ca57a3332a2844c0ad0591d8e904b7c Mon Sep 17 00:00:00 2001 From: pacu Date: Thu, 29 Jan 2026 17:34:05 -0300 Subject: [PATCH 108/114] Create new CHANGELOG files and update existing ones --- CHANGELOG.md | 11 +++++++++++ zaino-proto/CHANGELOG.md | 1 + zaino-state/CHANGELOG.md | 40 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 52 insertions(+) create mode 100644 zaino-state/CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 13a3c1917..0231acc4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,18 @@ and this library adheres to Rust's notion of [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## Unreleased +- [808] Adopt lightclient-protocol v0.4.0 +### Added +### Changed +- zaino-proto now references v0.4.0 files +### Removed +- ` + +### Deprecated +- `zaino-fetch::chain:to_compact` in favor of `to_compact_tx` which takes an + optional height and a `PoolTypeFilter` (see zaino-proto changes) +- ## [v0.4.0] - 2025-12-03 ### Added diff --git a/zaino-proto/CHANGELOG.md b/zaino-proto/CHANGELOG.md index 9292cdb02..60dfd28f9 100644 --- a/zaino-proto/CHANGELOG.md +++ b/zaino-proto/CHANGELOG.md @@ -13,3 +13,4 @@ and this library adheres to Rust's notion of `GetBlockRange` RPC request - utils submodule to handle `PoolType` conversions - `PoolTypeError` defines conversion errors between i32 and known `PoolType` variants +- `PoolTypeFilter` indicates which pools need to be returned in a compact block. diff --git a/zaino-state/CHANGELOG.md b/zaino-state/CHANGELOG.md new file mode 100644 index 000000000..009f5ec4c --- /dev/null +++ b/zaino-state/CHANGELOG.md @@ -0,0 +1,40 @@ +# Changelog +All notable changes to this library will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this library adheres to Rust's notion of +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added +- `rpc::grpc::service.rs`, `backends::fetch::get_taddress_transactions`: + - these functions implement the GetTaddressTransactions GRPC method of + lightclient-protocol v0.4.0 which replaces `GetTaddressTxids` +- `chain_index` + - `::finalised_state::db::v0::get_compact_block_stream` + - `::finalised_state::db::v1::get_compact_block_stream` + - `::types::db::legacy`: + - `compact_vin` + - `compact_vout` + - `to_compact`: returns a compactTx from TxInCompact +- `local_cache::compact_block_with_pool_types` +### Changed +- `get_mempool_tx` now takes `GetMempoolTxRequest` as parameter +- `chain_index::finalised_state` + - `::db` + - `::v0` + - `get_compact_block` now takes a `PoolTypeFilter` parameter + - `::v1` + - `get_compact_block` now takes a `PoolTypeFilter` parameter + - `::reader`: + - `get_compact_block` now takes a `PoolTypeFilter` parameter +- `chain_index::types::db::legacy`: + - `to_compact_block()`: now returns transparent data + +### Deprecated +- `GetTaddressTxids` is replaced by `GetTaddressTransactions` + +### Removed +- `Ping` for GRPC service +- `utils::blockid_to_hashorheight` moved to `zaino_proto::utils` From e38f1228f177fa9bf64e3c8316c708812497117d Mon Sep 17 00:00:00 2001 From: idky137 Date: Mon, 2 Feb 2026 14:00:48 +0000 Subject: [PATCH 109/114] fixed zaino-state/src/chain_index/tests/proptest_blockgen.rs --- zaino-state/src/chain_index/tests/proptest_blockgen.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs index 795cbc925..0d81fe38d 100644 --- a/zaino-state/src/chain_index/tests/proptest_blockgen.rs +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -66,7 +66,7 @@ fn make_chain() { .await .unwrap(); tokio::time::sleep(Duration::from_secs(5)).await; - let index_reader = indexer.subscriber().await; + let index_reader = indexer.subscriber(); let snapshot = index_reader.snapshot_nonfinalized_state(); let best_tip_hash = snapshot.best_chaintip().blockhash; let best_tip_block = snapshot From d7661b0f1a6ac72fa843c83d97184ab2005df031 Mon Sep 17 00:00:00 2001 From: idky137 Date: Mon, 2 Feb 2026 15:21:25 +0000 Subject: [PATCH 110/114] updated ChainIndex::get_compact_block --- zaino-state/src/backends/fetch.rs | 19 ++++++++++++++++--- zaino-state/src/chain_index.rs | 31 ++++++++++++++++++++++++++----- 2 files changed, 42 insertions(+), 8 deletions(-) diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index f7eaf5e73..63ae58d89 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -47,7 +47,10 @@ use zaino_proto::proto::{ PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, - utils::{blockid_to_hashorheight, compact_block_to_nullifiers, ValidatedBlockRangeRequest}, + utils::{ + blockid_to_hashorheight, compact_block_to_nullifiers, GetBlockRangeError, PoolTypeFilter, + ValidatedBlockRangeRequest, + }, }; #[allow(deprecated)] @@ -726,7 +729,11 @@ impl LightWalletIndexer for FetchServiceSubscriber { match self .indexer - .get_compact_block(&snapshot, types::Height(height)) + .get_compact_block( + &snapshot, + types::Height(height), + PoolTypeFilter::includes_all(), + ) .await { Ok(Some(block)) => Ok(block), @@ -815,7 +822,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { }; match self .indexer - .get_compact_block(&snapshot, types::Height(height)) + .get_compact_block(&snapshot, types::Height(height), PoolTypeFilter::default()) .await { Ok(Some(block)) => Ok(compact_block_to_nullifiers(block)), @@ -883,6 +890,10 @@ impl LightWalletIndexer for FetchServiceSubscriber { let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) .map_err(FetchServiceError::from)?; + let pool_type_filter = PoolTypeFilter::new_from_pool_types(&validated_request.pool_types()) + .map_err(GetBlockRangeError::PoolTypeArgumentError) + .map_err(FetchServiceError::from)?; + // FIXME: this should be changed but this logic is hard to understand and we lack tests. // we will maintain the behaviour with less smelly code let rev_order = if validated_request.is_reverse_ordered() { @@ -911,6 +922,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { match fetch_service_clone.indexer.get_compact_block( &snapshot, types::Height(height), + pool_type_filter.clone(), ).await { Ok(Some(mut block)) => { block = compact_block_with_pool_types(block, &validated_request.pool_types()); @@ -1023,6 +1035,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { match fetch_service_clone.indexer.get_compact_block( &snapshot, types::Height(height), + PoolTypeFilter::default(), ).await { Ok(Some(block)) => { if channel_tx.send(Ok(compact_block_to_nullifiers(block))).await.is_err() { diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index 6ed192944..267f1c378 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -15,6 +15,7 @@ use crate::chain_index::non_finalised_state::BestTip; use crate::chain_index::types::db::metadata::MempoolInfo; use crate::chain_index::types::{BestChainLocation, NonBestChainLocation}; use crate::error::{ChainIndexError, ChainIndexErrorKind, FinalisedStateError}; +use crate::local_cache::compact_block_with_pool_types; use crate::{AtomicStatus, StatusType, SyncError}; use crate::{IndexedBlock, TransactionHash}; use std::collections::HashSet; @@ -201,15 +202,21 @@ pub trait ChainIndex { /// Returns the *compact* block for the given height. /// - /// Returns None if the specified height - /// is greater than the snapshot's tip + /// Returns `None` if the specified `height` is greater than the snapshot's tip. + /// + /// ## Pool filtering /// - /// TODO: Add range fetch method or update this? + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// - `PoolTypeFilter::default()` preserves the legacy behaviour (only Sapling and Orchard + /// components are populated). #[allow(clippy::type_complexity)] fn get_compact_block( &self, nonfinalized_snapshot: &Self::Snapshot, height: types::Height, + pool_types: PoolTypeFilter, ) -> impl std::future::Future< Output = Result, Self::Error>, >; @@ -740,20 +747,34 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Result, Self::Error> { if height <= nonfinalized_snapshot.best_tip.height { Ok(Some( match nonfinalized_snapshot.get_chainblock_by_height(&height) { - Some(block) => block.to_compact_block(), + Some(block) => compact_block_with_pool_types( + block.to_compact_block(), + &pool_types.to_pool_types_vector(), + ), None => match self .finalized_state - .get_compact_block(height, PoolTypeFilter::default()) + .get_compact_block(height, pool_types) .await { Ok(block) => block, From e68508595ae58d3c5f7572eb906dce6e5e6d62fc Mon Sep 17 00:00:00 2001 From: idky137 Date: Mon, 2 Feb 2026 16:59:48 +0000 Subject: [PATCH 111/114] added compact block streamer to chain index, switched fetchservice to use new method --- zaino-state/src/backends/fetch.rs | 323 +++++++++--------- zaino-state/src/chain_index.rs | 188 +++++++++- .../src/chain_index/finalised_state/db/v1.rs | 2 + 3 files changed, 353 insertions(+), 160 deletions(-) diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 63ae58d89..6fbbc608b 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -53,6 +53,10 @@ use zaino_proto::proto::{ }, }; +use crate::{ + chain_index::NonFinalizedSnapshot as _, ChainIndex, NodeBackedChainIndex, + NodeBackedChainIndexSubscriber, +}; #[allow(deprecated)] use crate::{ chain_index::{source::ValidatorConnector, types}, @@ -61,7 +65,6 @@ use crate::{ indexer::{ handle_raw_transaction, IndexerSubscriber, LightWalletIndexer, ZcashIndexer, ZcashService, }, - local_cache::compact_block_with_pool_types, status::StatusType, stream::{ AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, @@ -70,7 +73,6 @@ use crate::{ utils::{get_build_info, ServiceMetadata}, BackendType, }; -use crate::{ChainIndex, NodeBackedChainIndex, NodeBackedChainIndexSubscriber}; /// Chain fetch service backed by Zcashd's JsonRPC engine. /// @@ -887,21 +889,13 @@ impl LightWalletIndexer for FetchServiceSubscriber { &self, request: BlockRange, ) -> Result { - let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + let validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) .map_err(FetchServiceError::from)?; let pool_type_filter = PoolTypeFilter::new_from_pool_types(&validated_request.pool_types()) .map_err(GetBlockRangeError::PoolTypeArgumentError) .map_err(FetchServiceError::from)?; - // FIXME: this should be changed but this logic is hard to understand and we lack tests. - // we will maintain the behaviour with less smelly code - let rev_order = if validated_request.is_reverse_ordered() { - validated_request.reverse(); - true - } else { - false - }; // Note conversion here is safe due to the use of [`ValidatedBlockRangeRequest::new_from_block_range`] let start = validated_request.start() as u32; let end = validated_request.end() as u32; @@ -909,90 +903,90 @@ impl LightWalletIndexer for FetchServiceSubscriber { let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + tokio::spawn(async move { - let timeout = timeout(time::Duration::from_secs((service_timeout*4) as u64), async { + let timeout_result = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { let snapshot = fetch_service_clone.indexer.snapshot_nonfinalized_state(); - let chain_height = snapshot.best_tip.height.0; - for height in start..=end { - let height = if rev_order { - end - (height - start) - } else { - height - }; - match fetch_service_clone.indexer.get_compact_block( - &snapshot, - types::Height(height), - pool_type_filter.clone(), - ).await { - Ok(Some(mut block)) => { - block = compact_block_with_pool_types(block, &validated_request.pool_types()); - if channel_tx.send(Ok(block)).await.is_err() { - break; - } + let chain_height = snapshot.best_chaintip().height.0; + + match fetch_service_clone + .indexer + .get_compact_block_stream( + &snapshot, + types::Height(start), + types::Height(end), + pool_type_filter.clone(), + ) + .await + { + Ok(Some(mut compact_block_stream)) => { + while let Some(stream_item) = compact_block_stream.next().await { + if channel_tx.send(stream_item).await.is_err() { + break; } - Ok(None) => if height >= chain_height { - match channel_tx - .send(Err(tonic::Status::out_of_range(format!( - "Error: Height out of range [{height}]. Height requested is greater than the best chain tip [{chain_height}].", - )))) - .await - - { - Ok(_) => break, - Err(e) => { - warn!("GetBlockRange channel closed unexpectedly: {}", e); - break; - } - } - } else if channel_tx - .send(Err(tonic::Status::unknown("Internal error, Failed to fetch block."))) - .await - .is_err() - { - break; - } + } + } + Ok(None) => { + // Per `get_compact_block_stream` semantics: `None` means at least one bound is above the tip. + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} Err(e) => { - if height >= chain_height { - match channel_tx - .send(Err(tonic::Status::out_of_range(format!( - "Error: Height out of range [{height}]. Height requested is greater than the best chain tip [{chain_height}].", - )))) - .await - - { - Ok(_) => break, - Err(e) => { - warn!("GetBlockRange channel closed unexpectedly: {}", e); - break; - } - } - } else { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } + Err(e) => { + // Preserve previous behaviour: if the request is above tip, surface OutOfRange; + // otherwise return the error (currently exposed for dev). + if start > chain_height || end > chain_height { + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); } } + } else { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + .is_err() + { + warn!("GetBlockRangeStream closed unexpectedly: {}", e); + } } } - }) - .await; - match timeout { - Ok(_) => {} - Err(_) => { - channel_tx - .send(Err(tonic::Status::deadline_exceeded( - "Error: get_block_range gRPC request timed out.", - ))) - .await - .ok(); } + }, + ) + .await; + + if timeout_result.is_err() { + channel_tx + .send(Err(tonic::Status::deadline_exceeded( + "Error: get_block_range gRPC request timed out.", + ))) + .await + .ok(); } }); + Ok(CompactBlockStream::new(channel_rx)) } @@ -1004,106 +998,117 @@ impl LightWalletIndexer for FetchServiceSubscriber { &self, request: BlockRange, ) -> Result { - let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + let validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) .map_err(FetchServiceError::from)?; - // FIXME: this should be changed but this logic is hard to understand and we lack tests. - // we will maintain the behaviour with less smelly code - let rev_order = if validated_request.is_reverse_ordered() { - validated_request.reverse(); - true - } else { - false - }; + let pool_type_filter = PoolTypeFilter::new_from_pool_types(&validated_request.pool_types()) + .map_err(GetBlockRangeError::PoolTypeArgumentError) + .map_err(FetchServiceError::from)?; // Note conversion here is safe due to the use of [`ValidatedBlockRangeRequest::new_from_block_range`] let start = validated_request.start() as u32; let end = validated_request.end() as u32; + let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + tokio::spawn(async move { - let timeout = timeout(time::Duration::from_secs((service_timeout*4) as u64), async { + let timeout_result = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { let snapshot = fetch_service_clone.indexer.snapshot_nonfinalized_state(); - let chain_height = snapshot.best_tip.height.0; - for height in start..=end { - let height = if rev_order { - end - (height - start) - } else { - height - }; - match fetch_service_clone.indexer.get_compact_block( - &snapshot, - types::Height(height), - PoolTypeFilter::default(), - ).await { - Ok(Some(block)) => { - if channel_tx.send(Ok(compact_block_to_nullifiers(block))).await.is_err() { - break; - } - } - Ok(None) => if height >= chain_height { - match channel_tx - .send(Err(tonic::Status::out_of_range(format!( - "Error: Height out of range [{height}]. Height requested is greater than the best chain tip [{chain_height}].", - )))) - .await - { - Ok(_) => break, - Err(e) => { - warn!("GetBlockRange channel closed unexpectedly: {}", e); - break; - } - } - } else if channel_tx - .send(Err(tonic::Status::unknown("Internal error, Failed to fetch block."))) + let chain_height = snapshot.best_chaintip().height.0; + + match fetch_service_clone + .indexer + .get_compact_block_stream( + &snapshot, + types::Height(start), + types::Height(end), + pool_type_filter.clone(), + ) + .await + { + Ok(Some(mut compact_block_stream)) => { + while let Some(stream_item) = compact_block_stream.next().await { + match stream_item { + Ok(block) => { + if channel_tx + .send(Ok(compact_block_to_nullifiers(block))) .await .is_err() { break; } - Err(e) => { - if height >= chain_height { - match channel_tx - .send(Err(tonic::Status::out_of_range(format!( - "Error: Height out of range [{height}]. Height requested is greater than the best chain tip [{chain_height}].", - )))) - .await - - { - Ok(_) => break, - Err(e) => { - warn!("GetBlockRange channel closed unexpectedly: {}", e); - break; - } - } - } else { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { + } + Err(status) => { + if channel_tx.send(Err(status)).await.is_err() { break; } } } } } - }) - .await; - match timeout { - Ok(_) => {} - Err(_) => { - channel_tx - .send(Err(tonic::Status::deadline_exceeded( - "Error: get_block_range gRPC request timed out.", - ))) - .await - .ok(); + Ok(None) => { + // Per `get_compact_block_stream` semantics: `None` means at least one bound is above the tip. + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } + Err(e) => { + // Preserve previous behaviour: if the request is above tip, surface OutOfRange; + // otherwise return the error (currently exposed for dev). + if start > chain_height || end > chain_height { + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } else { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + .is_err() + { + warn!("GetBlockRangeStream closed unexpectedly: {}", e); + } + } + } } + }, + ) + .await; + + if timeout_result.is_err() { + channel_tx + .send(Err(tonic::Status::deadline_exceeded( + "Error: get_block_range gRPC request timed out.", + ))) + .await + .ok(); } }); + Ok(CompactBlockStream::new(channel_rx)) } diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index 267f1c378..422aadad7 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -16,7 +16,7 @@ use crate::chain_index::types::db::metadata::MempoolInfo; use crate::chain_index::types::{BestChainLocation, NonBestChainLocation}; use crate::error::{ChainIndexError, ChainIndexErrorKind, FinalisedStateError}; use crate::local_cache::compact_block_with_pool_types; -use crate::{AtomicStatus, StatusType, SyncError}; +use crate::{AtomicStatus, CompactBlockStream, StatusType, SyncError}; use crate::{IndexedBlock, TransactionHash}; use std::collections::HashSet; use std::{sync::Arc, time::Duration}; @@ -221,6 +221,29 @@ pub trait ChainIndex { Output = Result, Self::Error>, >; + /// Streams *compact* blocks for an inclusive height range. + /// + /// Returns `None` if the requested range is entirely above the snapshot's tip. + /// + /// - The stream covers `[start_height, end_height]` (inclusive). + /// - If `start_height <= end_height` the stream is ascending; otherwise it is descending. + /// + /// ## Pool filtering + /// + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// - `PoolTypeFilter::default()` preserves the legacy behaviour (only Sapling and Orchard + /// components are populated). + #[allow(clippy::type_complexity)] + fn get_compact_block_stream( + &self, + nonfinalized_snapshot: &Self::Snapshot, + start_height: types::Height, + end_height: types::Height, + pool_types: PoolTypeFilter, + ) -> impl std::future::Future, Self::Error>>; + /// Finds the newest ancestor of the given block on the main /// chain, or the block itself if it is on the main chain. fn find_fork_point( @@ -787,6 +810,169 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Result, Self::Error> { + let chain_tip_height = nonfinalized_snapshot.best_chaintip().height; + + if start_height > chain_tip_height || end_height > chain_tip_height { + return Ok(None); + } + + // The nonfinalized cache holds the tip block plus the previous 99 blocks (100 total), + // so the lowest possible cached height is `tip - 99` (saturating at 0). + let lowest_nonfinalized_height = types::Height(chain_tip_height.0.saturating_sub(99)); + + let is_ascending = start_height <= end_height; + + let pool_types_vector = pool_types.to_pool_types_vector(); + + // Pre-create any finalized-state stream(s) we will need so that errors are returned + // from this method (not deferred into the spawned task). + let finalized_stream: Option = if is_ascending { + if start_height < lowest_nonfinalized_height { + let finalized_end_height = types::Height(std::cmp::min( + end_height.0, + lowest_nonfinalized_height.0.saturating_sub(1), + )); + + if start_height <= finalized_end_height { + Some( + self.finalized_state + .get_compact_block_stream( + start_height, + finalized_end_height, + pool_types.clone(), + ) + .await + .map_err(ChainIndexError::from)?, + ) + } else { + None + } + } else { + None + } + // Serve in reverse order. + } else if end_height < lowest_nonfinalized_height { + let finalized_start_height = if start_height < lowest_nonfinalized_height { + start_height + } else { + types::Height(lowest_nonfinalized_height.0.saturating_sub(1)) + }; + + Some( + self.finalized_state + .get_compact_block_stream( + finalized_start_height, + end_height, + pool_types.clone(), + ) + .await + .map_err(ChainIndexError::from)?, + ) + } else { + None + }; + + let nonfinalized_snapshot = nonfinalized_snapshot.clone(); + // TODO: Investigate whether channel size should be changed, added to config, or set dynamically base on resources. + let (channel_sender, channel_receiver) = tokio::sync::mpsc::channel(128); + + tokio::spawn(async move { + if is_ascending { + // 1) Finalized segment (if any), ascending. + if let Some(mut finalized_stream) = finalized_stream { + while let Some(stream_item) = finalized_stream.next().await { + if channel_sender.send(stream_item).await.is_err() { + return; + } + } + } + + // 2) Nonfinalized segment, ascending. + let nonfinalized_start_height = + types::Height(std::cmp::max(start_height.0, lowest_nonfinalized_height.0)); + + for height_value in nonfinalized_start_height.0..=end_height.0 { + let Some(indexed_block) = nonfinalized_snapshot + .get_chainblock_by_height(&types::Height(height_value)) + else { + let _ = channel_sender + .send(Err(tonic::Status::internal(format!( + "Internal error, missing nonfinalized block at height [{height_value}].", + )))) + .await; + return; + }; + let compact_block = compact_block_with_pool_types( + indexed_block.to_compact_block(), + &pool_types_vector, + ); + if channel_sender.send(Ok(compact_block)).await.is_err() { + return; + } + } + } else { + // 1) Nonfinalized segment, descending. + if start_height >= lowest_nonfinalized_height { + let nonfinalized_end_height = + types::Height(std::cmp::max(end_height.0, lowest_nonfinalized_height.0)); + + for height_value in (nonfinalized_end_height.0..=start_height.0).rev() { + let Some(indexed_block) = nonfinalized_snapshot + .get_chainblock_by_height(&types::Height(height_value)) + else { + let _ = channel_sender + .send(Err(tonic::Status::internal(format!( + "Internal error, missing nonfinalized block at height [{height_value}].", + )))) + .await; + return; + }; + let compact_block = compact_block_with_pool_types( + indexed_block.to_compact_block(), + &pool_types_vector, + ); + if channel_sender.send(Ok(compact_block)).await.is_err() { + return; + } + } + } + + // 2) Finalized segment (if any), descending. + if let Some(mut finalized_stream) = finalized_stream { + while let Some(stream_item) = finalized_stream.next().await { + if channel_sender.send(stream_item).await.is_err() { + return; + } + } + } + } + }); + + Ok(Some(CompactBlockStream::new(channel_receiver))) + } + /// Finds the newest ancestor of the given block on the main /// chain, or the block itself if it is on the main chain. fn find_fork_point( diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index a92338d9c..82dbb2878 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -3394,6 +3394,8 @@ impl DbV1 { // Bounded channel provides backpressure so the blocking task cannot run unbounded ahead of // the gRPC consumer. + // + // TODO: Investigate whether channel size should be changed, added to config, or set dynamically base on resources. let (sender, receiver) = tokio::sync::mpsc::channel::>(128); From e340d17f263e39ce7f06505f427e14cf92ba0249 Mon Sep 17 00:00:00 2001 From: idky137 Date: Tue, 3 Feb 2026 13:55:02 +0000 Subject: [PATCH 112/114] test and bug fixes --- integration-tests/tests/fetch_service.rs | 9 ++++----- integration-tests/tests/state_service.rs | 4 ++-- zaino-fetch/src/chain/block.rs | 21 ++++++++++++++++----- zaino-fetch/src/chain/transaction.rs | 3 ++- zaino-state/src/backends/fetch.rs | 6 +----- zaino-state/src/backends/state.rs | 9 +++++++-- 6 files changed, 32 insertions(+), 20 deletions(-) diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index 1b230b6bd..d3211936d 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -1178,11 +1178,10 @@ async fn fetch_service_get_block_range_returns_all_pools( assert_eq!(compact_block.height, end_height); - let expected_transaction_count = if matches!(validator, ValidatorKind::Zebrad) { - 3 - } else { - 4 // zcashd uses shielded coinbase which will add an extra compact tx - }; + // Transparent tx are now included in compact blocks unless specified so the + // expected block count should be 4 (3 sent tx + coinbase) + let expected_transaction_count = 4; + // the compact block has the right number of transactions assert_eq!(compact_block.vtx.len(), expected_transaction_count); diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 71c86af03..6bc74a50d 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -901,8 +901,8 @@ async fn state_service_get_block_range_returns_all_pools( assert_eq!(compact_block.height, end_height); - // the compact block has 3 transactions - assert_eq!(compact_block.vtx.len(), 3); + // the compact block has 4 transactions (3 sent + coinbase) + assert_eq!(compact_block.vtx.len(), 4); // transaction order is not guaranteed so it's necessary to look up for them by TXID let deshielding_tx = compact_block diff --git a/zaino-fetch/src/chain/block.rs b/zaino-fetch/src/chain/block.rs index c98ac1443..2c8bd1e19 100644 --- a/zaino-fetch/src/chain/block.rs +++ b/zaino-fetch/src/chain/block.rs @@ -385,10 +385,21 @@ impl FullBlock { .into_iter() .enumerate() .filter_map(|(index, tx)| { - if tx.has_shielded_elements() { - Some(tx.to_compact_tx(Some(index as u64), &pool_types)) - } else { - None + match tx.to_compact_tx(Some(index as u64), &pool_types) { + Ok(compact_tx) => { + // Omit transactions that have no elements in any requested pool type. + if !compact_tx.vin.is_empty() + || !compact_tx.vout.is_empty() + || !compact_tx.spends.is_empty() + || !compact_tx.outputs.is_empty() + || !compact_tx.actions.is_empty() + { + Some(Ok(compact_tx)) + } else { + None + } + } + Err(parse_error) => Some(Err(parse_error)), } }) .collect::, _>>()?; @@ -415,7 +426,7 @@ impl FullBlock { } #[deprecated] - /// Converts a zcash full block into a compact block. + /// Converts a zcash full block into a **legacy** compact block. pub fn into_compact( self, sapling_commitment_tree_size: u32, diff --git a/zaino-fetch/src/chain/transaction.rs b/zaino-fetch/src/chain/transaction.rs index c6f498992..db9a9f2b7 100644 --- a/zaino-fetch/src/chain/transaction.rs +++ b/zaino-fetch/src/chain/transaction.rs @@ -1238,7 +1238,8 @@ impl FullTransaction { }) } - /// Returns true if the transaction contains either sapling spends or outputs. + /// Returns true if the transaction contains either sapling spends or outputs, or orchard actions. + #[allow(dead_code)] pub(crate) fn has_shielded_elements(&self) -> bool { !self.raw_transaction.shielded_spends.is_empty() || !self.raw_transaction.shielded_outputs.is_empty() diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index 6fbbc608b..d0afd5367 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -731,11 +731,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { match self .indexer - .get_compact_block( - &snapshot, - types::Height(height), - PoolTypeFilter::includes_all(), - ) + .get_compact_block(&snapshot, types::Height(height), PoolTypeFilter::default()) .await { Ok(Some(block)) => Ok(block), diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index be9a43909..407126716 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -1952,7 +1952,10 @@ impl LightWalletIndexer for StateServiceSubscriber { .get_compact_block(hash_or_height.to_string()) .await { - Ok(block) => Ok(block), + Ok(block) => Ok(compact_block_with_pool_types( + block, + &PoolTypeFilter::default().to_pool_types_vector(), + )), Err(e) => { self.error_get_block(BlockCacheError::Custom(e.to_string()), height as u32) .await @@ -2244,7 +2247,9 @@ impl LightWalletIndexer for StateServiceSubscriber { Ok(pool_type_filter) => pool_type_filter, Err(PoolTypeError::InvalidPoolType) => { return Err(StateServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: An invalid `PoolType' was found".to_string()), + tonic::Status::invalid_argument( + "Error: An invalid `PoolType' was found".to_string(), + ), )) } Err(PoolTypeError::UnknownPoolType(unknown_pool_type)) => { From 08ffe1e5dd0e9b2a8a1a155d46e0d463c9e65198 Mon Sep 17 00:00:00 2001 From: idky137 Date: Tue, 3 Feb 2026 15:05:44 +0000 Subject: [PATCH 113/114] fixed get_transparent_data_from_compact_block_when_requested --- integration-tests/tests/state_service.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 6bc74a50d..126c1e884 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -2947,8 +2947,6 @@ mod zebra { for cb in compact_block_range.into_iter() { for tx in cb.vtx { - // first transaction of a block is coinbase - assert!(tx.vin.first().unwrap().prevout_txid.is_empty()); // script pub key of this transaction is not empty assert!(!tx.vout.first().unwrap().script_pub_key.is_empty()); } From 35b44750550d8af152dbf21078ada6e50e07455c Mon Sep 17 00:00:00 2001 From: idky137 Date: Tue, 3 Feb 2026 16:15:53 +0000 Subject: [PATCH 114/114] remove unused import --- zaino-state/src/backends/state.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index 1652e6ab9..777fa05ff 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -1,8 +1,8 @@ //! Zcash chain fetch and tx submission service backed by Zebras [`ReadStateService`]. use crate::{ - chain_index::NonFinalizedSnapshot, error::ChainIndexError, ChainIndex as _, - NodeBackedChainIndex, NodeBackedChainIndexSubscriber, State, + chain_index::NonFinalizedSnapshot, error::ChainIndexError, NodeBackedChainIndex, + NodeBackedChainIndexSubscriber, State, }; #[allow(deprecated)] use crate::{