diff --git a/.gitignore b/.gitignore index 7c79ed51a..592f8877e 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ docker_cargo/**/* container-target/ .local/ .failed-tests +**/proptest-regressions/** diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..0231acc4e --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,192 @@ +# Changelog +All notable changes to this library will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this library adheres to Rust's notion of +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Unreleased +- [808] Adopt lightclient-protocol v0.4.0 + +### Added +### Changed +- zaino-proto now references v0.4.0 files +### Removed +- ` + +### Deprecated +- `zaino-fetch::chain:to_compact` in favor of `to_compact_tx` which takes an + optional height and a `PoolTypeFilter` (see zaino-proto changes) +- +## [v0.4.0] - 2025-12-03 + +### Added +- `compact_formats.CompactTxIn` +- `compact_formats.TxOut` +- `service.PoolType` +- `service.LightdInfo` has added fields `upgradeName`, `upgradeHeight`, and + `lightwalletProtocolVersion` +- `compact_formats.CompactTx` has added fields `vin` and `vout`, + which may be used to represent transparent transaction input and output data. +- `service.BlockRange` has added field `poolTypes`, which allows + the caller of service methods that take this type as input to cause returned + data to be filtered to include information only for the specified protocols. + For backwards compatibility, when this field is set the default (empty) value, + servers should return Sapling and Orchard data. This field is to be ignored + when the type is used as part of a `service.TransparentAddressBlockFilter`. + +### Changed +- The `hash` field of `compact_formats.CompactTx` has been renamed to `txid`. + This is a serialization-compatible clarification, as the index of this field + in the .proto type does not change. +- `service.Exclude` has been renamed to `service.GetMempoolTxRequest` and has + an added `poolTypes` field, which allows the caller of this method to specify + which pools the resulting `CompactTx` values should contain data for. + +### Deprecated +- `service.CompactTxStreamer`: + - The `GetBlockNullifiers` and `GetBlockRangeNullifiers` methods are + deprecated. +- `zaino_fetch::FullTransaction::to_compact` deprecated in favor of `to_compact_tx` which includes + an optional for index to explicitly specify that the transaction is in the mempool and has no + index and `Vec` to filter pool types according to the transparent data changes of + lightclient-protocol v0.4.0 +- `zaino_fetch::chain::Block::to_compact` deprecated in favor of `to_compact_block` allowing callers + to specify `PoolTypeFilter` to filter pools that are included into the compact block according to + lightclient-protocol v0.4.0 +- `zaino_fetch::chain::Transaction::to_compact` deprecated in favor of `to_compact_tx` allowing callers + to specify `PoolTypFilter` to filter pools that are included into the compact transaction according + to lightclient-protocol v0.4.0. + +## [v0.3.6] - 2025-05-20 + +### Added +- `service.LightdInfo` has added field `donationAddress` +- `service.CompactTxStreamer.GetTaddressTransactions`. This duplicates + the `GetTaddressTxids` method, but is more accurately named. + +### Deprecated +- `service.CompactTxStreamer.GetTaddressTxids`. Use `GetTaddressTransactions` + instead. + +## [v0.3.5] - 2023-07-03 + +### Added +- `compact_formats.ChainMetadata` +- `service.ShieldedProtocol` +- `service.GetSubtreeRootsArg` +- `service.SubtreeRoot` +- `service.CompactTxStreamer.GetBlockNullifiers` +- `service.CompactTxStreamer.GetBlockRangeNullifiers` +- `service.CompactTxStreamer.SubtreeRoots` + +### Changed +- `compact_formats.CompactBlock` has added field `chainMetadata` +- `compact_formats.CompactSaplingOutput.epk` has been renamed to `ephemeralKey` + +## [v0.3.4] - UNKNOWN + +### Added +- `service.CompactTxStreamer.GetLatestTreeState` + +## [v0.3.3] - 2022-04-02 + +### Added +- `service.TreeState` has added field `orchardTree` + +### Changed +- `service.TreeState.tree` has been renamed to `saplingTree` + +## [v0.3.2] - 2021-12-09 + +### Changed +- `compact_formats.CompactOrchardAction.encCiphertext` has been renamed to + `CompactOrchardAction.ciphertext` + +## [v0.3.1] - 2021-12-09 + +### Added +- `compact_formats.CompactOrchardAction` +- `service.CompactTxStreamer.GetMempoolTx` (removed in 0.3.0) has been reintroduced. +- `service.Exclude` (removed in 0.3.0) has been reintroduced. + +### Changed +- `compact_formats.CompactSpend` has been renamed `CompactSaplingSpend` +- `compact_formats.CompactOutput` has been renamed `CompactSaplingOutput` + +## [v0.3.0] - 2021-07-23 + +### Added +- `service.CompactTxStreamer.GetMempoolStream` + +### Removed +- `service.CompactTxStreamer.GetMempoolTx` has been replaced by `GetMempoolStream` +- `service.Exclude` has been removed as it is now unused. + +## [v0.2.4] - 2021-01-14 + +### Changed +- `service.GetAddressUtxosArg.address` has been replaced by the + repeated field `addresses`. This is a [conditionally-safe](https://protobuf.dev/programming-guides/proto3/#conditionally-safe-changes) + format change. +- `service.GetAddressUtxosReply` has added field `address` + +## [v0.2.3] - 2021-01-14 + +### Added +- `service.LightdInfo` has added fields: + - `estimatedHeight` + - `zcashdBuild` + - `zcashdSubversion` + +## [v0.2.2] - 2020-10-22 + +### Added +- `service.TreeState` +- `service.GetAddressUtxosArg` +- `service.GetAddressUtxosReply` +- `service.GetAddressUtxosReplyList` +- `service.CompactTxStreamer.GetTreeState` +- `service.CompactTxStreamer.GetAddressUtxos` +- `service.CompactTxStreamer.GetAddressUtxosStream` + +## [v0.2.1] - 2020-10-06 + +### Added +- `service.Address` +- `service.AddressList` +- `service.Balance` +- `service.Exclude` +- `service.CompactTxStreamer.GetTaddressBalance` +- `service.CompactTxStreamer.GetTaddressBalanceStream` +- `service.CompactTxStreamer.GetMempoolTx` +- `service.LightdInfo` has added fields: + - `gitCommit` + - `branch` + - `buildDate` + - `buildUser` + +## [v0.2.0] - 2020-04-24 + +### Added +- `service.Duration` +- `service.PingResponse` +- `service.CompactTxStreamer.Ping` + +### Removed +- `service.TransparentAddress` was removed (it was unused in any service API). + +## [v0.1.1] - 2019-11-27 + +### Added +- `service.Empty` +- `service.LightdInfo` +- `service.TransparentAddress` +- `service.TransparentAddressBlockFilter` +- `service.CompactTxStreamer.GetTaddressTxids` +- `service.CompactTxStreamer.GetLightdInfo` +- `service.RawTransaction` has added field `height` + +## [v0.1.0] - 2019-09-19 + +Initial release diff --git a/Cargo.lock b/Cargo.lock index 06de6be64..2088811cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,7 +23,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ - "crypto-common 0.1.7", + "crypto-common 0.1.6", "generic-array", ] @@ -64,9 +64,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.4" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -164,22 +164,22 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.5" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" +checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.11" +version = "3.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" +checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.61.2", + "windows-sys 0.60.2", ] [[package]] @@ -190,9 +190,9 @@ checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "append-only-vec" -version = "0.1.8" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114736faba96bcd79595c700d03183f61357b9fbce14852515e59f3bee4ed4a" +checksum = "7992085ec035cfe96992dd31bfd495a2ebd31969bb95f624471cb6c0b349e571" [[package]] name = "arc-swap" @@ -220,7 +220,7 @@ checksum = "4734bde002bb3d52e27ab808faa971a143d48d11dbd836d5c02edd1756cdab06" dependencies = [ "async-trait", "cfg-if", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "derive_more", "educe", @@ -287,7 +287,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", "synstructure", ] @@ -299,14 +299,14 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "async-compression" -version = "0.4.35" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07a926debf178f2d355197f9caddb08e54a9329d44748034bba349c5848cb519" +checksum = "5a89bce6054c720275ac2432fbba080a66a2106a44a1b804553930ca6909f4e0" dependencies = [ "compression-codecs", "compression-core", @@ -334,7 +334,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -345,7 +345,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -405,9 +405,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.15.1" +version = "1.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b5ce75405893cd713f9ab8e297d8e438f624dde7d706108285f7e17a25a180f" +checksum = "879b6c89592deb404ba4dc0ae6b58ffd1795c78991cbb5b8bc441c48a070440d" dependencies = [ "aws-lc-sys", "zeroize", @@ -415,10 +415,11 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.34.0" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "179c3777a8b5e70e90ea426114ffc565b2c1a9f82f6c4a0c5a34aa6ef5e781b6" +checksum = "107a4e9d9cab9963e04e84bb8dee0e25f2a987f9a8bad5ed054abd439caa8f8c" dependencies = [ + "bindgen 0.72.1", "cc", "cmake", "dunce", @@ -454,9 +455,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.8.7" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" +checksum = "8a18ed336352031311f4e0b4dd2ff392d4fbb370777c9d18d7fc9d7359f73871" dependencies = [ "axum-core 0.5.5", "bytes", @@ -544,15 +545,15 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "bech32" -version = "0.11.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32637268377fc7b10a8c6d51de3e7fba1ce5dd371a96e342b34e6078db558e7f" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" [[package]] name = "bellman" @@ -590,7 +591,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "cexpr", "clang-sys", "itertools 0.12.1", @@ -601,7 +602,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -610,16 +611,18 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "cexpr", "clang-sys", "itertools 0.13.0", + "log", + "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -654,18 +657,18 @@ dependencies = [ [[package]] name = "bit-set" -version = "0.5.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" dependencies = [ "bit-vec", ] [[package]] name = "bit-vec" -version = "0.6.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitflags" @@ -675,9 +678,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.10.0" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" [[package]] name = "bitflags-serde-legacy" @@ -685,7 +688,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b64e60c28b6d25ad92e8b367801ff9aa12b41d05fc8798055d296bace4a60cc" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "serde", ] @@ -740,7 +743,7 @@ checksum = "e0b121a9fe0df916e362fb3271088d071159cdf11db0e4182d02152850756eff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -776,9 +779,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.6.0" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" dependencies = [ "borsh-derive", "cfg_aliases", @@ -786,15 +789,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.6.0" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -824,9 +827,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.12.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" +checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" dependencies = [ "memchr", "regex-automata", @@ -887,9 +890,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.11.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bzip2-sys" @@ -930,9 +933,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.49" +version = "1.2.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" +checksum = "ac9fe6cdbb24b6ade63616c0a0688e45bb56732262c158df3c0c4bea4ca47cb7" dependencies = [ "find-msvc-tools", "jobserver", @@ -1005,7 +1008,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "crypto-common 0.1.7", + "crypto-common 0.1.6", "inout", "zeroize", ] @@ -1023,9 +1026,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.53" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" +checksum = "f4512b90fa68d3a9932cea5184017c5d200f5921df706d45e853537dea51508f" dependencies = [ "clap_builder", "clap_derive", @@ -1033,9 +1036,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.53" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" +checksum = "0025e98baa12e766c67ba13ff4695a887a1eba19569aad00a472546795bd6730" dependencies = [ "anstream", "anstyle", @@ -1052,7 +1055,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1102,9 +1105,9 @@ checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "compression-codecs" -version = "0.4.34" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34a3cbbb8b6eca96f3a5c4bf6938d5b27ced3675d69f95bb51948722870bc323" +checksum = "ef8a506ec4b81c460798f572caead636d57d3d7e940f998160f52bd254bf2d23" dependencies = [ "compression-core", "flate2", @@ -1115,9 +1118,9 @@ dependencies = [ [[package]] name = "compression-core" -version = "0.4.31" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" +checksum = "e47641d3deaf41fb1538ac1f54735925e275eaf3bf4d55c81b137fba797e5cbb" [[package]] name = "concurrent-queue" @@ -1128,6 +1131,30 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "config" +version = "0.15.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b30fa8254caad766fc03cb0ccae691e14bf3bd72bfff27f72802ce729551b3d6" +dependencies = [ + "pathdiff", + "serde_core", + "toml 0.9.8", + "winnow", +] + +[[package]] +name = "console" +version = "0.15.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" +dependencies = [ + "encode_unicode", + "libc", + "once_cell", + "windows-sys 0.59.0", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -1162,9 +1189,9 @@ checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "convert_case" -version = "0.10.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" dependencies = [ "unicode-segmentation", ] @@ -1322,9 +1349,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.7" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", @@ -1373,7 +1400,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1421,7 +1448,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1443,7 +1470,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1501,14 +1528,42 @@ dependencies = [ "serde", ] +[[package]] +name = "derive-deftly" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ea84d0109517cc2253d4a679bdda1e8989e9bd86987e9e4f75ffdda0095fd1" +dependencies = [ + "derive-deftly-macros 0.14.6", + "heck", +] + [[package]] name = "derive-deftly" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0015cb20a284ec944852820598af3aef6309ea8dc317a0304441272ed620f196" dependencies = [ - "derive-deftly-macros", + "derive-deftly-macros 1.0.1", + "heck", +] + +[[package]] +name = "derive-deftly-macros" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357422a457ccb850dc8f1c1680e0670079560feaad6c2e247e3f345c4fab8a3f" +dependencies = [ "heck", + "indexmap 2.11.4", + "itertools 0.14.0", + "proc-macro-crate", + "proc-macro2", + "quote", + "sha3", + "strum 0.27.2", + "syn 2.0.106", + "void", ] [[package]] @@ -1518,14 +1573,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b48e8e38a4aa565da767322b5ca55fb0f8347983c5bc7f7647db069405420479" dependencies = [ "heck", - "indexmap 2.12.1", + "indexmap 2.11.4", "itertools 0.14.0", "proc-macro-crate", "proc-macro2", "quote", "sha3", "strum 0.27.2", - "syn 2.0.111", + "syn 2.0.106", "void", ] @@ -1537,7 +1592,7 @@ checksum = "74ef43543e701c01ad77d3a5922755c6a1d71b22d942cb8042be4994b380caff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1584,24 +1639,23 @@ dependencies = [ [[package]] name = "derive_more" -version = "2.1.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.1.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version", - "syn 2.0.111", + "syn 2.0.106", "unicode-xid", ] @@ -1619,7 +1673,7 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "const-oid", - "crypto-common 0.1.7", + "crypto-common 0.1.6", "subtle", ] @@ -1672,14 +1726,14 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "document-features" -version = "0.2.12" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" +checksum = "95249b50c6c185bee49034bcb378a49dc2b5dff0be90ff6616d31d64febab05d" dependencies = [ "litrs", ] @@ -1720,7 +1774,7 @@ checksum = "7a4102713839a8c01c77c165bc38ef2e83948f6397fa1e1dcfacec0f07b149d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1818,6 +1872,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" + [[package]] name = "enum-ordinalize" version = "3.1.15" @@ -1828,7 +1888,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -1963,10 +2023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" dependencies = [ "atomic 0.6.1", - "parking_lot", - "pear", "serde", - "tempfile", "toml 0.8.23", "uncased", "version_check", @@ -1986,9 +2043,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.5" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" +checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" [[package]] name = "fixed-hash" @@ -2010,9 +2067,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "dc5a4e564e38c699f2880d3fda590bedc2e69f3f84cd48b457bd892ce61d0aa9" dependencies = [ "crc32fast", "miniz_oxide", @@ -2153,7 +2210,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -2163,7 +2220,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.35", + "rustls 0.23.32", "rustls-pki-types", ] @@ -2199,9 +2256,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.7" +version = "0.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" dependencies = [ "typenum", "version_check", @@ -2255,7 +2312,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -2300,7 +2357,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.1", + "indexmap 2.11.4", "slab", "tokio", "tokio-util", @@ -2347,9 +2404,9 @@ dependencies = [ [[package]] name = "halo2_proofs" -version = "0.3.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05713f117155643ce10975e0bee44a274bcda2f4bb5ef29a999ad67c1fa8d4d3" +checksum = "019561b5f3be60731e7b72f3f7878c5badb4174362d860b03d3cf64cb47f90db" dependencies = [ "blake2b_simd", "ff", @@ -2393,9 +2450,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.16.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" [[package]] name = "hashlink" @@ -2462,11 +2519,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.12" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -2477,11 +2534,12 @@ checksum = "f558a64ac9af88b5ba400d99b579451af0d39c6d360980045b91aac966d705e2" [[package]] name = "http" -version = "1.4.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", + "fnv", "itoa", ] @@ -2553,9 +2611,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.8.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" dependencies = [ "atomic-waker", "bytes", @@ -2584,13 +2642,13 @@ dependencies = [ "hyper", "hyper-util", "log", - "rustls 0.23.35", + "rustls 0.23.32", "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", - "webpki-roots 1.0.4", + "webpki-roots 1.0.3", ] [[package]] @@ -2608,9 +2666,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.19" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" dependencies = [ "base64", "bytes", @@ -2656,9 +2714,9 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", "potential_utf", @@ -2669,9 +2727,9 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -2682,10 +2740,11 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ + "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", @@ -2696,38 +2755,42 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "2.1.2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ + "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", + "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "2.1.2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "2.1.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", "icu_locale_core", + "stable_deref_trait", + "tinystr", "writeable", "yoke", "zerofrom", @@ -2788,7 +2851,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -2819,29 +2882,23 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.1" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.16.1", + "hashbrown 0.16.0", "serde", "serde_core", ] -[[package]] -name = "inlinable_string" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" - [[package]] name = "inotify" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "inotify-sys", "libc", ] @@ -2864,6 +2921,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "insta" +version = "1.43.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46fdb647ebde000f43b5b53f773c30cf9b0cb4300453208713fa38b2c70935a0" +dependencies = [ + "console", + "once_cell", + "similar", +] + [[package]] name = "integration-tests" version = "0.1.2" @@ -2908,9 +2976,9 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.9" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" dependencies = [ "memchr", "serde", @@ -2918,9 +2986,9 @@ dependencies = [ [[package]] name = "is_terminal_polyfill" -version = "1.70.2" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" @@ -2967,9 +3035,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.83" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" dependencies = [ "once_cell", "wasm-bindgen", @@ -2983,9 +3051,9 @@ checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" [[package]] name = "jsonrpsee" -version = "0.24.10" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e281ae70cc3b98dac15fced3366a880949e65fc66e345ce857a5682d152f3e62" +checksum = "37b26c20e2178756451cfeb0661fb74c47dd5988cb7e3939de7e9241fd604d42" dependencies = [ "jsonrpsee-core", "jsonrpsee-proc-macros", @@ -2997,9 +3065,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.10" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348ee569eaed52926b5e740aae20863762b16596476e943c9e415a6479021622" +checksum = "456196007ca3a14db478346f58c7238028d55ee15c1df15115596e411ff27925" dependencies = [ "async-trait", "bytes", @@ -3020,22 +3088,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.10" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7398cddf5013cca4702862a2692b66c48a3bd6cf6ec681a47453c93d63cf8de5" +checksum = "5e65763c942dfc9358146571911b0cd1c361c2d63e2d2305622d40d36376ca80" dependencies = [ "heck", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "jsonrpsee-server" -version = "0.24.10" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21429bcdda37dcf2d43b68621b994adede0e28061f816b038b0f18c70c143d51" +checksum = "55e363146da18e50ad2b51a0a7925fc423137a0b1371af8235b1c231a0647328" dependencies = [ "futures-util", "http", @@ -3060,9 +3128,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.10" +version = "0.24.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0f05e0028e55b15dbd2107163b3c744cd3bb4474f193f95d9708acbf5677e44" +checksum = "08a8e70baf945b6b5752fc8eb38c918a48f1234daf11355e07106d963f860089" dependencies = [ "http", "serde", @@ -3139,9 +3207,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.178" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libloading" @@ -3185,7 +3253,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "libc", "redox_syscall", ] @@ -3218,9 +3286,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.23" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "pkg-config", @@ -3254,15 +3322,15 @@ checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "litrs" -version = "1.0.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" +checksum = "f5e54036fe321fd421e10d732f155734c4e4afd610dd556d9a82833ab3ee0bed" [[package]] name = "lmdb" @@ -3297,11 +3365,11 @@ dependencies = [ [[package]] name = "log" -version = "0.4.29" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" dependencies = [ - "serde_core", + "serde", ] [[package]] @@ -3394,9 +3462,9 @@ checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "memmap2" -version = "0.9.9" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744133e4a0e0a658e1374cf3bf8e415c4052a15a111acd372764c55b4177d490" +checksum = "843a98750cd611cc2965a8213b53b43e715f13c37a9e096c6408e69990961db7" dependencies = [ "libc", ] @@ -3421,9 +3489,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.24.3" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d5312e9ba3771cfa961b585728215e3d972c950a3eed9252aa093d6301277e8" +checksum = "25dea7ac8057892855ec285c440160265225438c3c45072613c25a4b26e98ef5" dependencies = [ "ahash 0.8.12", "portable-atomic", @@ -3464,14 +3532,14 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", "wasi 0.11.1+wasi-snapshot-preview1", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -3498,7 +3566,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "cfg-if", "cfg_aliases", "libc", @@ -3526,7 +3594,7 @@ version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "inotify", "kqueue", "libc", @@ -3564,10 +3632,11 @@ dependencies = [ [[package]] name = "num-bigint-dig" -version = "0.8.6" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" dependencies = [ + "byteorder", "lazy_static", "libm", "num-integer", @@ -3626,9 +3695,9 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.5" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" dependencies = [ "num_enum_derive", "rustversion", @@ -3636,14 +3705,14 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.5" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -3663,9 +3732,9 @@ checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "once_cell_polyfill" -version = "1.70.2" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" [[package]] name = "oneshot-fused-workaround" @@ -3835,7 +3904,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -3899,6 +3968,12 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pathdiff" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" + [[package]] name = "pbkdf2" version = "0.12.2" @@ -3909,29 +3984,6 @@ dependencies = [ "password-hash", ] -[[package]] -name = "pear" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" -dependencies = [ - "inlinable_string", - "pear_codegen", - "yansi", -] - -[[package]] -name = "pear_codegen" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" -dependencies = [ - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn 2.0.111", -] - [[package]] name = "pem-rfc7468" version = "0.7.0" @@ -3991,7 +4043,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.12.1", + "indexmap 2.11.4", ] [[package]] @@ -4024,7 +4076,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4053,7 +4105,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4138,9 +4190,9 @@ dependencies = [ [[package]] name = "potential_utf" -version = "0.1.4" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" dependencies = [ "zerovec", ] @@ -4167,7 +4219,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4208,7 +4260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93980406f12d9f8140ed5abe7155acb10bb1e69ea55c88960b9c2f117445ef96" dependencies = [ "equivalent", - "indexmap 2.12.1", + "indexmap 2.11.4", "serde", ] @@ -4218,7 +4270,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.9", + "toml_edit 0.23.7", ] [[package]] @@ -4240,51 +4292,49 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" dependencies = [ "unicode-ident", ] -[[package]] -name = "proc-macro2-diagnostics" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.111", - "version_check", - "yansi", -] - [[package]] name = "proptest" -version = "1.2.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ "bit-set", - "bitflags 1.3.2", - "byteorder", + "bit-vec", + "bitflags 2.9.4", "lazy_static", "num-traits", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.6.29", + "regex-syntax", "rusty-fork", "tempfile", "unarray", ] +[[package]] +name = "proptest-derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "prost" version = "0.13.5" @@ -4321,7 +4371,7 @@ dependencies = [ "prost 0.13.5", "prost-types 0.13.5", "regex", - "syn 2.0.111", + "syn 2.0.106", "tempfile", ] @@ -4343,7 +4393,7 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.111", + "syn 2.0.106", "tempfile", ] @@ -4357,7 +4407,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4370,7 +4420,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4433,7 +4483,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "memchr", "unicase", ] @@ -4449,14 +4499,14 @@ dependencies = [ [[package]] name = "pwd-grp" -version = "1.0.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e2023f41b5fcb7c30eb5300a5733edfaa9e0e0d502d51b586f65633fd39e40c" +checksum = "b94fdf3867b7f2889a736f0022ea9386766280d2cca4bdbe41629ada9e4f3b8f" dependencies = [ - "derive-deftly", + "derive-deftly 0.14.6", "libc", "paste", - "thiserror 2.0.17", + "thiserror 1.0.69", ] [[package]] @@ -4500,7 +4550,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", - "rustls 0.23.35", + "rustls 0.23.32", "socket2 0.6.1", "thiserror 2.0.17", "tokio", @@ -4520,7 +4570,7 @@ dependencies = [ "rand 0.9.2", "ring 0.17.14", "rustc-hash 2.1.1", - "rustls 0.23.35", + "rustls 0.23.32", "rustls-pki-types", "slab", "thiserror 2.0.17", @@ -4545,9 +4595,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.42" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" dependencies = [ "proc-macro2", ] @@ -4730,7 +4780,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", ] [[package]] @@ -4761,7 +4811,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -4773,7 +4823,7 @@ dependencies = [ "aho-corasick", "memchr", "regex-automata", - "regex-syntax 0.8.8", + "regex-syntax", ] [[package]] @@ -4784,15 +4834,9 @@ checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.8", + "regex-syntax", ] -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - [[package]] name = "regex-syntax" version = "0.8.8" @@ -4810,9 +4854,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.25" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6eff9328d40131d43bd911d42d79eb6a47312002a4daefc9e37f17e74a7701a" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ "base64", "bytes", @@ -4830,7 +4874,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.35", + "rustls 0.23.32", "rustls-pki-types", "serde", "serde_json", @@ -4845,7 +4889,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 1.0.4", + "webpki-roots 1.0.3", ] [[package]] @@ -4967,9 +5011,9 @@ checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" [[package]] name = "rsa" -version = "0.9.9" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a0376c50d0358279d9d643e4bf7b7be212f1f4ff1da9070a7b54d22ef75c88" +checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b" dependencies = [ "const-oid", "digest 0.10.7", @@ -4992,7 +5036,7 @@ version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -5021,7 +5065,7 @@ dependencies = [ "proc-macro2", "quote", "rust-embed-utils", - "syn 2.0.111", + "syn 2.0.106", "walkdir", ] @@ -5099,7 +5143,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.4.15", @@ -5112,7 +5156,7 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.11.0", @@ -5133,16 +5177,16 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ "aws-lc-rs", "log", "once_cell", "ring 0.17.14", "rustls-pki-types", - "rustls-webpki 0.103.8", + "rustls-webpki 0.103.7", "subtle", "zeroize", ] @@ -5170,9 +5214,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" dependencies = [ "web-time", "zeroize", @@ -5190,9 +5234,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" dependencies = [ "aws-lc-rs", "ring 0.17.14", @@ -5311,9 +5355,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.1.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" +checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" dependencies = [ "dyn-clone", "ref-cast", @@ -5391,7 +5435,7 @@ version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "core-foundation", "core-foundation-sys", "libc", @@ -5464,7 +5508,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5483,7 +5527,7 @@ version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.11.4", "itoa", "memchr", "ryu", @@ -5500,6 +5544,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" +dependencies = [ + "serde_core", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -5514,17 +5567,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.16.1" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" +checksum = "6093cd8c01b25262b84927e0f7151692158fab02d961e04c979d3903eba7ecc5" dependencies = [ "base64", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.1", + "indexmap 2.11.4", "schemars 0.9.0", - "schemars 1.1.0", + "schemars 1.0.4", "serde_core", "serde_json", "serde_with_macros", @@ -5533,14 +5586,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.16.1" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" +checksum = "a7e6c180db0816026a61afa1cff5344fb7ebded7e4d3062772179f2501481c27" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5549,7 +5602,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.11.4", "itoa", "ryu", "serde", @@ -5614,7 +5667,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637e95dcd06bc1bb3f86ed9db1e1832a70125f32daae071ef37dcb7701b7d4fe" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "either", "incrementalmerkletree", "tracing", @@ -5639,9 +5692,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.7" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] @@ -5658,9 +5711,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.8" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" [[package]] name = "simdutf8" @@ -5668,6 +5721,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" +[[package]] +name = "similar" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" + [[package]] name = "simple-mermaid" version = "0.2.0" @@ -5699,9 +5758,9 @@ checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "slotmap" -version = "1.1.1" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdd58c3c93c3d278ca835519292445cb4b0d4dc59ccfdf7ceadaab3f8aeb4038" +checksum = "dbff4acf519f630b3a3ddcfaea6c06b42174d9a44bc70c620e9ed1649d58b82a" dependencies = [ "serde", "version_check", @@ -5762,6 +5821,28 @@ dependencies = [ "sha1", ] +[[package]] +name = "spandoc" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ed5a886d0234ac48bea41d450e4253cdd0642656249d6454e74c023d0f8821" +dependencies = [ + "spandoc-attribute", + "tracing", + "tracing-futures", +] + +[[package]] +name = "spandoc-attribute" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bdfb59103e43a0f99a346b57860d50f2138a7008d08acd964e9ac0fef3ae9a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "spin" version = "0.5.2" @@ -5877,7 +5958,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5889,7 +5970,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5911,9 +5992,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.111" +version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", @@ -5937,7 +6018,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5985,7 +6066,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -5996,7 +6077,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -6051,12 +6132,11 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", - "serde_core", "zerovec", ] @@ -6101,7 +6181,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -6110,7 +6190,7 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.35", + "rustls 0.23.32", "tokio", ] @@ -6128,9 +6208,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", @@ -6156,11 +6236,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", - "serde_spanned", + "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_edit 0.22.27", ] +[[package]] +name = "toml" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +dependencies = [ + "serde_core", + "serde_spanned 1.0.4", + "toml_datetime 0.7.3", + "toml_parser", + "winnow", +] + [[package]] name = "toml_datetime" version = "0.6.11" @@ -6185,9 +6278,9 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.11.4", "serde", - "serde_spanned", + "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_write", "winnow", @@ -6195,11 +6288,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.9" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d7cbc3b4b49633d57a0509303158ca50de80ae32c265093b24c414705807832" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.11.4", "toml_datetime 0.7.3", "toml_parser", "winnow", @@ -6260,7 +6353,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" dependencies = [ "async-trait", - "axum 0.8.7", + "axum 0.8.6", "base64", "bytes", "h2", @@ -6291,7 +6384,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" dependencies = [ "async-trait", - "axum 0.8.7", + "axum 0.8.6", "base64", "bytes", "h2", @@ -6324,7 +6417,7 @@ dependencies = [ "prost-build 0.13.5", "prost-types 0.13.5", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -6338,7 +6431,7 @@ dependencies = [ "prost-build 0.13.5", "prost-types 0.13.5", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -6350,7 +6443,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -6375,7 +6468,7 @@ dependencies = [ "prost-build 0.14.1", "prost-types 0.14.1", "quote", - "syn 2.0.111", + "syn 2.0.106", "tempfile", "tonic-build 0.14.2", ] @@ -6400,7 +6493,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5294c85610f52bcbe36fddde04a3a994c4ec382ceed455cfdc8252be7046008" dependencies = [ - "derive-deftly", + "derive-deftly 1.0.1", "educe", "futures", "oneshot-fused-workaround", @@ -6436,7 +6529,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "357650fb5bff5e94e5ecc7ee26c6af3f584c2be178b45da8f5ab81cf9f9d4795" dependencies = [ "bytes", - "derive-deftly", + "derive-deftly 1.0.1", "digest 0.10.7", "educe", "getrandom 0.2.16", @@ -6454,10 +6547,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5341a132563ebeffa45ff60e6519394ee7ba58cb5cf65ba99e7ef879789d87b7" dependencies = [ "amplify", - "bitflags 2.10.0", + "bitflags 2.9.4", "bytes", "caret", - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "educe", "paste", @@ -6594,7 +6687,7 @@ checksum = "bca6cc0af790f5f02d8a06c8f692fa471207de2739d8b2921c04f9570af34d75" dependencies = [ "amplify", "cfg-if", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "educe", "either", @@ -6763,7 +6856,7 @@ checksum = "29a8f3ddf135d23e2c5443e97fb30c635767daa44923b142915d22bdaf47e2ea" dependencies = [ "amplify", "base64ct", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "derive_more", "dyn-clone", @@ -6828,7 +6921,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22ecf1c5b6bfa7849bf92cad3daab16bbc741ac62a61c9fea47c8be2f982e01" dependencies = [ - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "downcast-rs", "paste", @@ -6852,7 +6945,7 @@ dependencies = [ "amplify", "arrayvec", "cfg-if", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "derive_more", "downcast-rs", @@ -6890,7 +6983,7 @@ dependencies = [ "base64ct", "by_address", "caret", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "derive_more", "hex", @@ -6919,7 +7012,7 @@ dependencies = [ "ctr", "curve25519-dalek", "der-parser", - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "digest 0.10.7", "ed25519-dalek", @@ -6964,7 +7057,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d63eef6dd4d38b16199cf201de07b6de4a6af310f67bd71067d22ef746eb1a1d" dependencies = [ - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "dyn-clone", "educe", @@ -6993,7 +7086,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e17883b3b2ef17a5f9ad4ae8a78de2c4b3d629ccfeb66c15c4cb33494384f08" dependencies = [ "async-trait", - "bitflags 2.10.0", + "bitflags 2.9.4", "derive_more", "futures", "humantime", @@ -7023,7 +7116,7 @@ checksum = "aec11efe729e4ca9c5b03a8702f94b82dfd0ab450c0d58c4ca5ee9e4c49e6f89" dependencies = [ "amplify", "base64ct", - "bitflags 2.10.0", + "bitflags 2.9.4", "cipher", "derive_builder_fork_arti", "derive_more", @@ -7061,7 +7154,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be9958219e20477aef5645f99d0d3695e01bb230bbd36a0fd4c207f5428abe6b" dependencies = [ - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "filetime", "fs-mistrust", @@ -7095,7 +7188,7 @@ dependencies = [ "caret", "cipher", "coarsetime", - "derive-deftly", + "derive-deftly 1.0.1", "derive_builder_fork_arti", "derive_more", "digest 0.10.7", @@ -7197,7 +7290,7 @@ checksum = "9077af79aac5ad0c5336af1cc41a31c617bbc09261210a2427deb84f14356857" dependencies = [ "amplify", "async-trait", - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "educe", "futures", @@ -7225,7 +7318,7 @@ checksum = "d3892f6d0c323b87a2390f41e91c0294c6d5852f00e955e41e85a0116636e82d" dependencies = [ "amplify", "caret", - "derive-deftly", + "derive-deftly 1.0.1", "educe", "safelog", "subtle", @@ -7240,7 +7333,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7388f506c9278d07421e6799aa8a912adee4ea6921b3dd08a1247a619de82124" dependencies = [ - "derive-deftly", + "derive-deftly 1.0.1", "derive_more", "thiserror 2.0.17", "tor-memquota", @@ -7274,7 +7367,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.1", + "indexmap 2.11.4", "pin-project-lite", "slab", "sync_wrapper", @@ -7316,11 +7409,11 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.8" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "bytes", "futures-util", "http", @@ -7346,9 +7439,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.43" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -7358,20 +7451,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.31" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "tracing-core" -version = "0.1.35" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -7410,9 +7503,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.22" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "matchers", "nu-ansi-term", @@ -7445,7 +7538,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -7456,7 +7549,7 @@ checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -7533,15 +7626,15 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.22" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "unicode-normalization" -version = "0.1.25" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -7564,7 +7657,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ - "crypto-common 0.1.7", + "crypto-common 0.1.6", "subtle", ] @@ -7621,9 +7714,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.19.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ "js-sys", "wasm-bindgen", @@ -7655,7 +7748,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -7780,9 +7873,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.106" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" dependencies = [ "cfg-if", "once_cell", @@ -7791,11 +7884,25 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.106", + "wasm-bindgen-shared", +] + [[package]] name = "wasm-bindgen-futures" -version = "0.4.56" +version = "0.4.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" dependencies = [ "cfg-if", "js-sys", @@ -7806,9 +7913,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.106" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7816,22 +7923,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.106" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ - "bumpalo", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", + "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.106" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" dependencies = [ "unicode-ident", ] @@ -7844,9 +7951,9 @@ checksum = "323f4da9523e9a669e1eaf9c6e763892769b1d38c623913647bfdc1532fe4549" [[package]] name = "web-sys" -version = "0.3.83" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" dependencies = [ "js-sys", "wasm-bindgen", @@ -7874,14 +7981,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.4", + "webpki-roots 1.0.3", ] [[package]] name = "webpki-roots" -version = "1.0.4" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" dependencies = [ "rustls-pki-types", ] @@ -7984,7 +8091,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -7995,7 +8102,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -8189,9 +8296,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.14" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr", ] @@ -8210,9 +8317,9 @@ checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" -version = "0.6.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "wyz" @@ -8251,18 +8358,13 @@ version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" -[[package]] -name = "yansi" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" - [[package]] name = "yoke" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ + "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -8270,13 +8372,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", "synstructure", ] @@ -8298,7 +8400,7 @@ dependencies = [ "derive_more", "hex", "http", - "indexmap 2.12.1", + "indexmap 2.11.4", "jsonrpsee-types", "prost 0.13.5", "reqwest", @@ -8310,6 +8412,7 @@ dependencies = [ "tonic 0.12.3", "tracing", "url", + "zaino-common", "zaino-proto", "zaino-testvectors", "zebra-chain 3.1.0", @@ -8324,6 +8427,8 @@ dependencies = [ "tonic 0.12.3", "tonic-build 0.12.3", "which 4.4.2", + "zebra-chain 3.1.0", + "zebra-state", ] [[package]] @@ -8352,7 +8457,7 @@ version = "0.1.2" dependencies = [ "arc-swap", "async-trait", - "bitflags 2.10.0", + "bitflags 2.9.4", "blake2", "bs58", "cargo-lock", @@ -8362,13 +8467,16 @@ dependencies = [ "derive_more", "futures", "hex", - "indexmap 2.12.1", + "incrementalmerkletree", + "indexmap 2.11.4", "lmdb", "lmdb-sys", "nonempty", "once_cell", "primitive-types 0.13.1", + "proptest", "prost 0.13.5", + "rand 0.8.5", "reqwest", "sapling-crypto", "serde", @@ -8441,9 +8549,10 @@ name = "zainod" version = "0.1.2" dependencies = [ "clap", - "figment", + "config", "http", "serde", + "tempfile", "thiserror 1.0.69", "tokio", "toml 0.5.11", @@ -8832,7 +8941,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bed6cf5b2b4361105d4ea06b2752f0c8af4641756c7fbc9858a80af186c234f" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "bounded-vec", "ripemd 0.1.3", "secp256k1", @@ -8905,7 +9014,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17a86ec712da2f25d3edc7e5cf0b1d15ef41ab35305e253f0f7cd9cecc0f1939" dependencies = [ "bech32", - "bitflags 2.10.0", + "bitflags 2.9.4", "bitflags-serde-legacy", "bitvec", "blake2b_simd", @@ -8962,7 +9071,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b4aa7e85afd7bdf159e8c9a973d32bfc410be42ce82c2396690ae1208933bb8" dependencies = [ "bech32", - "bitflags 2.10.0", + "bitflags 2.9.4", "bitflags-serde-legacy", "bitvec", "blake2b_simd", @@ -8986,6 +9095,10 @@ dependencies = [ "num-integer", "orchard", "primitive-types 0.12.2", + "proptest", + "proptest-derive", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_core 0.6.4", "rayon", "reddsa", @@ -9014,6 +9127,7 @@ dependencies = [ "zcash_protocol 0.7.0", "zcash_script", "zcash_transparent 0.6.0", + "zebra-test", ] [[package]] @@ -9061,7 +9175,7 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3edeb353c33962fb5f9012745ddb44d33ee90acb8c9410669bf54d72488b8cf" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.9.4", "byteorder", "bytes", "chrono", @@ -9069,7 +9183,7 @@ dependencies = [ "futures", "hex", "humantime-serde", - "indexmap 2.12.1", + "indexmap 2.11.4", "itertools 0.14.0", "lazy_static", "metrics", @@ -9123,7 +9237,7 @@ dependencies = [ "hex", "http-body-util", "hyper", - "indexmap 2.12.1", + "indexmap 2.11.4", "jsonrpsee", "jsonrpsee-proc-macros", "jsonrpsee-types", @@ -9188,7 +9302,7 @@ dependencies = [ "hex-literal", "human_bytes", "humantime-serde", - "indexmap 2.12.1", + "indexmap 2.11.4", "itertools 0.14.0", "lazy_static", "metrics", @@ -9209,24 +9323,52 @@ dependencies = [ "zebra-node-services", ] +[[package]] +name = "zebra-test" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a7e1a3cc12e6a0cea765b16012f3c7dfc5394ba3e0a8fcaf10cd0fa3b57d8c0" +dependencies = [ + "color-eyre", + "futures", + "hex", + "humantime", + "indexmap 2.11.4", + "insta", + "itertools 0.14.0", + "lazy_static", + "once_cell", + "owo-colors", + "proptest", + "rand 0.8.5", + "regex", + "spandoc", + "thiserror 2.0.17", + "tokio", + "tower 0.4.13", + "tracing", + "tracing-error", + "tracing-subscriber", +] + [[package]] name = "zerocopy" -version = "0.8.31" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.31" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -9246,7 +9388,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", "synstructure", ] @@ -9267,14 +9409,14 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] name = "zerotrie" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" dependencies = [ "displaydoc", "yoke", @@ -9283,11 +9425,10 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.5" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ - "serde", "yoke", "zerofrom", "zerovec-derive", @@ -9295,13 +9436,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.2" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.106", ] [[package]] @@ -9441,7 +9582,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.14", "rust-embed", - "rustls 0.23.35", + "rustls 0.23.32", "sapling-crypto", "secp256k1", "secrecy", diff --git a/Cargo.toml b/Cargo.toml index db39ae9b0..2de396592 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,9 +115,9 @@ zaino-common.path = "zaino-common" zaino-testutils = { path = "zaino-testutils" } zaino-testvectors = { path = "zaino-testvectors" } zainod = { path = "zainod" } -figment = "0.10" +config = { version = "0.15", default-features = false, features = ["toml"] } nonempty = "0.11.0" -proptest = "~1.2" +proptest = "~1.6" zip32 = "0.2.1" # Patch for vulnerable dependency diff --git a/docs/rpc_api.md b/docs/rpc_api.md index 7c73e8dab..1a731448f 100644 --- a/docs/rpc_api.md +++ b/docs/rpc_api.md @@ -1,6 +1,9 @@ # Zaino RPC APIs ## Lightwallet gRPC Services -Zaino Currently Serves the following gRPC services as defined in the [LightWallet Protocol](https://github.com/zcash/librustzcash/blob/main/zcash_client_backend/proto/service.proto): +Zaino Currently Serves the following gRPC services as defined in the [LightWallet Protocol](https://github.com/zcash/lightwallet-protocol/blob/main/walletrpc/service.proto): + +(gRPC service (function) arguments and return values are defined [here](https://github.com/zcash/lightwallet-protocol/blob/main/walletrpc/compact_formats.proto)) + - GetLatestBlock (ChainSpec) returns (BlockID) - GetBlock (BlockID) returns (CompactBlock) - GetBlockNullifiers (BlockID) returns (CompactBlock) @@ -8,7 +11,7 @@ Zaino Currently Serves the following gRPC services as defined in the [LightWalle - GetBlockRangeNullifiers (BlockRange) returns (stream CompactBlock) - GetTransaction (TxFilter) returns (RawTransaction) - SendTransaction (RawTransaction) returns (SendResponse) - - GetTaddressTxids (TransparentAddressBlockFilter) returns (stream RawTransaction) + - GetTaddressTransactions (TransparentAddressBlockFilter) returns (stream RawTransaction) - GetTaddressBalance (AddressList) returns (Balance) - GetTaddressBalanceStream (stream Address) returns (Balance) (**MARKED FOR DEPRECATION**) - GetMempoolTx (Exclude) returns (stream CompactTx) @@ -19,11 +22,8 @@ Zaino Currently Serves the following gRPC services as defined in the [LightWalle - GetAddressUtxos (GetAddressUtxosArg) returns (GetAddressUtxosReplyList) - GetAddressUtxosStream (GetAddressUtxosArg) returns (stream GetAddressUtxosReply) - GetLightdInfo (Empty) returns (LightdInfo) - - Ping (Duration) returns (PingResponse) (**CURRENTLY UNIMPLEMENTED**) - ## Zcash RPC Services Zaino has also committed to taking over responsibility for serving all [Zcash RPC Services](https://zcash.github.io/rpc/) required by non-validator (miner) clients from Zcashd. A full specification of the Zcash RPC services served by Zaino, and their current state of development, can be seen [here](./Zaino-zcash-rpcs.pdf). - diff --git a/integration-tests/tests/chain_cache.rs b/integration-tests/tests/chain_cache.rs index d119276f8..2dd8f0663 100644 --- a/integration-tests/tests/chain_cache.rs +++ b/integration-tests/tests/chain_cache.rs @@ -15,7 +15,10 @@ async fn create_test_manager_and_connector( ) -> (TestManager, JsonRpSeeConnector) where T: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let test_manager = TestManager::::launch( @@ -32,7 +35,7 @@ where let json_service = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -68,10 +71,7 @@ mod chain_query_interface { }; use zcash_local_net::validator::{zcashd::Zcashd, zebrad::Zebrad}; use zebra_chain::{ - parameters::{ - testnet::{ConfiguredActivationHeights, RegtestParameters}, - NetworkKind, - }, + parameters::{testnet::RegtestParameters, NetworkKind}, serialization::{ZcashDeserialize, ZcashDeserializeInto}, }; @@ -92,7 +92,10 @@ mod chain_query_interface { ) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, json_service) = create_test_manager_and_connector::( @@ -111,11 +114,11 @@ mod chain_query_interface { None => test_manager.data_dir.clone(), }; let network = match test_manager.network { - NetworkKind::Regtest => zebra_chain::parameters::Network::new_regtest( - RegtestParameters::from(ConfiguredActivationHeights::from( + NetworkKind::Regtest => { + zebra_chain::parameters::Network::new_regtest(RegtestParameters::from( test_manager.local_net.get_activation_heights().await, - )), - ), + )) + } NetworkKind::Testnet => zebra_chain::parameters::Network::new_default_testnet(), NetworkKind::Mainnet => zebra_chain::parameters::Network::Mainnet, @@ -131,7 +134,7 @@ mod chain_query_interface { // todo: does this matter? should_backup_non_finalized_state: true, }, - test_manager.full_node_rpc_listen_address, + test_manager.full_node_rpc_listen_address.to_string(), test_manager.full_node_grpc_listen_address, false, None, @@ -230,7 +233,10 @@ mod chain_query_interface { async fn get_block_range(validator: &ValidatorKind) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = @@ -280,7 +286,10 @@ mod chain_query_interface { async fn find_fork_point(validator: &ValidatorKind) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = @@ -320,7 +329,10 @@ mod chain_query_interface { async fn get_raw_transaction(validator: &ValidatorKind) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = @@ -348,9 +360,9 @@ mod chain_query_interface { assert_eq!( branch_id, - if height == Some(chain_index::types::GENESIS_HEIGHT) { + if height == chain_index::types::GENESIS_HEIGHT { None - } else if height == Some(Height::try_from(1).unwrap()) { + } else if height == Height::try_from(1).unwrap() { zebra_chain::parameters::NetworkUpgrade::Canopy .branch_id() .map(u32::from) @@ -382,7 +394,10 @@ mod chain_query_interface { async fn get_transaction_status(validator: &ValidatorKind) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, _json_service, _option_state_service, _chain_index, indexer) = @@ -407,7 +422,7 @@ mod chain_query_interface { .unwrap(); assert_eq!( transaction_status_best_chain.unwrap(), - BestChainLocation::Block(*block_hash, height.unwrap()) + BestChainLocation::Block(*block_hash, height) ); assert!(transaction_status_nonbest_chain.is_empty()); } @@ -428,7 +443,10 @@ mod chain_query_interface { async fn sync_large_chain(validator: &ValidatorKind) where C: ValidatorExt, - Service: zaino_state::ZcashService> + Send + Sync + 'static, + Service: zaino_state::ZcashService> + + Send + + Sync + + 'static, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let (test_manager, json_service, option_state_service, _chain_index, indexer) = diff --git a/integration-tests/tests/fetch_service.rs b/integration-tests/tests/fetch_service.rs index e615c6719..48ea4651c 100644 --- a/integration-tests/tests/fetch_service.rs +++ b/integration-tests/tests/fetch_service.rs @@ -3,9 +3,10 @@ use futures::StreamExt as _; use hex::ToHex as _; use zaino_fetch::jsonrpsee::connector::{test_node_and_return_url, JsonRpSeeConnector}; +use zaino_proto::proto::compact_formats::CompactBlock; use zaino_proto::proto::service::{ - AddressList, BlockId, BlockRange, Exclude, GetAddressUtxosArg, GetSubtreeRootsArg, - TransparentAddressBlockFilter, TxFilter, + AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetMempoolTxRequest, GetSubtreeRootsArg, + PoolType, TransparentAddressBlockFilter, TxFilter, }; use zaino_state::ChainIndex; use zaino_state::FetchServiceSubscriber; @@ -189,7 +190,7 @@ async fn fetch_service_get_raw_mempool(validator: &ValidatorKin let json_service = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -628,7 +629,7 @@ async fn fetch_service_get_latest_block(validator: &ValidatorKi let json_service = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -672,7 +673,7 @@ async fn assert_fetch_service_difficulty_matches_rpc(validator: let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -701,7 +702,7 @@ async fn assert_fetch_service_mininginfo_matches_rpc(validator: let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -730,7 +731,7 @@ async fn assert_fetch_service_peerinfo_matches_rpc(validator: & let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -784,7 +785,7 @@ async fn fetch_service_get_block_subsidy(validator: &ValidatorK let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -867,7 +868,7 @@ async fn fetch_service_get_block_header(validator: &ValidatorKi let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -1050,6 +1051,7 @@ async fn fetch_service_get_block_range(validator: &ValidatorKin height: 10, hash: Vec::new(), }), + pool_types: vec![], }; let fetch_service_stream = fetch_service_subscriber @@ -1068,6 +1070,311 @@ async fn fetch_service_get_block_range(validator: &ValidatorKin test_manager.close().await; } +#[allow(deprecated)] +async fn fetch_service_get_block_range_returns_all_pools( + validator: &ValidatorKind, +) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + } + } else { + // zcashd + test_manager + .generate_blocks_and_poll_indexer(14, &fetch_service_subscriber) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + } + + let recipient_transparent = clients.get_recipient_address("transparent").await; + let deshielding_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_transparent, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_sapling = clients.get_recipient_address("sapling").await; + let sapling_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_sapling, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_ua = clients.get_recipient_address("unified").await; + let orchard_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap() + .head; + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let start_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + 100 + } else { + 1 + }; + let end_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + 106 + } else { + 17 + }; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let compact_block = fetch_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + // Transparent tx are now included in compact blocks unless specified so the + // expected block count should be 4 (3 sent tx + coinbase) + let expected_transaction_count = 4; + + // the compact block has the right number of transactions + assert_eq!(compact_block.vtx.len(), expected_transaction_count); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let deshielding_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == deshielding_txid.as_ref().to_vec()) + .unwrap(); + + dbg!(deshielding_tx); + + assert!( + !deshielding_tx.vout.is_empty(), + "transparent data should be present when transaparent pool type is specified in the request." + ); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let sapling_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == sapling_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !sapling_tx.outputs.is_empty(), + "sapling data should be present when all pool types are specified in the request." + ); + + let orchard_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == orchard_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !orchard_tx.actions.is_empty(), + "orchard data should be present when all pool types are specified in the request." + ); + + test_manager.close().await; +} + +#[allow(deprecated)] +async fn fetch_service_get_block_range_no_pools_returns_sapling_orchard( + validator: &ValidatorKind, +) { + let mut test_manager = + TestManager::::launch(validator, None, None, None, true, false, true) + .await + .unwrap(); + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + + clients.faucet.sync_and_await().await.unwrap(); + + let fetch_service_subscriber = test_manager.service_subscriber.take().unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager + .generate_blocks_and_poll_indexer(100, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + clients.faucet.sync_and_await().await.unwrap(); + } + } else { + // zcashd + test_manager + .generate_blocks_and_poll_indexer(14, &fetch_service_subscriber) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + } + + let recipient_transparent = clients.get_recipient_address("transparent").await; + let deshielding_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_transparent, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_sapling = clients.get_recipient_address("sapling").await; + let sapling_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_sapling, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_ua = clients.get_recipient_address("unified").await; + let orchard_txid = zaino_testutils::from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_ua, 250_000, None)], + ) + .await + .unwrap() + .head; + + test_manager + .generate_blocks_and_poll_indexer(1, &fetch_service_subscriber) + .await; + + let start_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + 100 + } else { + 10 + }; + let end_height: u64 = if matches!(validator, ValidatorKind::Zebrad) { + 106 + } else { + 17 + }; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![], + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let compact_block = fetch_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + let expected_tx_count = if matches!(validator, ValidatorKind::Zebrad) { + 3 + } else { + 4 // zcashd shields coinbase and tx count will be one more than zebra's + }; + // the compact block has 3 transactions + assert_eq!(compact_block.vtx.len(), expected_tx_count); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let deshielding_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == deshielding_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + deshielding_tx.vout.is_empty(), + "transparent data should not be present when transaparent pool type is specified in the request." + ); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let sapling_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == sapling_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !sapling_tx.outputs.is_empty(), + "sapling data should be present when default pool types are specified in the request." + ); + + let orchard_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == orchard_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !orchard_tx.actions.is_empty(), + "orchard data should be present when default pool types are specified in the request." + ); + + test_manager.close().await; +} + #[allow(deprecated)] async fn fetch_service_get_block_range_nullifiers(validator: &ValidatorKind) { let mut test_manager = @@ -1090,6 +1397,11 @@ async fn fetch_service_get_block_range_nullifiers(validator: &V height: 10, hash: Vec::new(), }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], }; let fetch_service_stream = fetch_service_subscriber @@ -1098,7 +1410,7 @@ async fn fetch_service_get_block_range_nullifiers(validator: &V .unwrap(); let fetch_service_compact_blocks: Vec<_> = fetch_service_stream.collect().await; - let fetch_nullifiers: Vec<_> = fetch_service_compact_blocks + let fetch_nullifiers: Vec = fetch_service_compact_blocks .into_iter() .filter_map(|result| result.ok()) .collect(); @@ -1273,6 +1585,11 @@ async fn fetch_service_get_taddress_txids(validator: &Validator height: chain_height as u64, hash: Vec::new(), }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], }), }; @@ -1408,7 +1725,10 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - let exclude_list_empty = Exclude { txid: Vec::new() }; + let exclude_list_empty = GetMempoolTxRequest { + exclude_txid_suffixes: Vec::new(), + pool_types: Vec::new(), + }; let fetch_service_stream = fetch_service_subscriber .get_mempool_tx(exclude_list_empty.clone()) @@ -1422,7 +1742,7 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind .collect(); let mut sorted_fetch_mempool_tx = fetch_mempool_tx.clone(); - sorted_fetch_mempool_tx.sort_by_key(|tx| tx.hash.clone()); + sorted_fetch_mempool_tx.sort_by_key(|tx| tx.txid.clone()); // Transaction IDs from quick_send are already in internal byte order, // which matches what the mempool returns, so no reversal needed @@ -1432,23 +1752,13 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind let mut sorted_txids = [tx1_bytes, tx2_bytes]; sorted_txids.sort_by_key(|hash| *hash); - // Verify we have exactly 2 transactions in the mempool - assert_eq!( - sorted_fetch_mempool_tx.len(), - 2, - "Expected exactly 2 transactions in mempool, but found {}", - sorted_fetch_mempool_tx.len() - ); - - assert_eq!(sorted_fetch_mempool_tx[0].hash, sorted_txids[0]); - assert_eq!(sorted_fetch_mempool_tx[1].hash, sorted_txids[1]); + assert_eq!(sorted_fetch_mempool_tx[0].txid, sorted_txids[0]); + assert_eq!(sorted_fetch_mempool_tx[1].txid, sorted_txids[1]); assert_eq!(sorted_fetch_mempool_tx.len(), 2); - // For the exclude list, we send bytes in internal order. The backend (fetch.rs) - // will reverse them to RPC format before matching against mempool keys (which are stored in RPC format). - // We take the last 8 bytes of internal order, which after reversal becomes the first 8 bytes of RPC format. - let exclude_list = Exclude { - txid: vec![sorted_txids[0][24..].to_vec()], + let exclude_list = GetMempoolTxRequest { + exclude_txid_suffixes: vec![sorted_txids[0][8..].to_vec()], + pool_types: vec![], }; let exclude_fetch_service_stream = fetch_service_subscriber @@ -1463,9 +1773,9 @@ async fn fetch_service_get_mempool_tx(validator: &ValidatorKind .collect(); let mut sorted_exclude_fetch_mempool_tx = exclude_fetch_mempool_tx.clone(); - sorted_exclude_fetch_mempool_tx.sort_by_key(|tx| tx.hash.clone()); + sorted_exclude_fetch_mempool_tx.sort_by_key(|tx| tx.txid.clone()); - assert_eq!(sorted_exclude_fetch_mempool_tx[0].hash, sorted_txids[1]); + assert_eq!(sorted_exclude_fetch_mempool_tx[0].txid, sorted_txids[1]); assert_eq!(sorted_exclude_fetch_mempool_tx.len(), 1); test_manager.close().await; @@ -1608,7 +1918,7 @@ async fn fetch_service_get_subtree_roots(validator: &ValidatorK }; let fetch_service_stream = fetch_service_subscriber - .get_subtree_roots(subtree_roots_arg.clone()) + .get_subtree_roots(subtree_roots_arg) .await .unwrap(); let fetch_service_roots: Vec<_> = fetch_service_stream.collect().await; @@ -1770,7 +2080,7 @@ async fn assert_fetch_service_getnetworksols_matches_rpc( let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -1932,7 +2242,7 @@ mod zcashd { let jsonrpc_client = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -1987,6 +2297,19 @@ mod zcashd { fetch_service_get_block_range::(&ValidatorKind::Zcashd).await; } + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { + fetch_service_get_block_range_no_pools_returns_sapling_orchard::( + &ValidatorKind::Zcashd, + ) + .await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_returns_all_pools_when_requested() { + fetch_service_get_block_range_returns_all_pools::(&ValidatorKind::Zcashd).await; + } + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_nullifiers() { fetch_service_get_block_range_nullifiers::(&ValidatorKind::Zcashd).await; @@ -2163,6 +2486,19 @@ mod zebrad { fetch_service_get_block::(&ValidatorKind::Zebrad).await; } + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_returns_all_pools_when_requested() { + fetch_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn block_range_no_pool_type_returns_sapling_orchard() { + fetch_service_get_block_range_no_pools_returns_sapling_orchard::( + &ValidatorKind::Zebrad, + ) + .await; + } + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_header() { fetch_service_get_block_header::(&ValidatorKind::Zebrad).await; @@ -2203,11 +2539,6 @@ mod zebrad { fetch_service_get_block_nullifiers::(&ValidatorKind::Zebrad).await; } - #[tokio::test(flavor = "multi_thread")] - pub(crate) async fn block_range() { - fetch_service_get_block_range::(&ValidatorKind::Zebrad).await; - } - #[tokio::test(flavor = "multi_thread")] pub(crate) async fn block_range_nullifiers() { fetch_service_get_block_range_nullifiers::(&ValidatorKind::Zebrad).await; diff --git a/integration-tests/tests/json_server.rs b/integration-tests/tests/json_server.rs index 27099b099..f577e5d23 100644 --- a/integration-tests/tests/json_server.rs +++ b/integration-tests/tests/json_server.rs @@ -44,7 +44,7 @@ async fn create_zcashd_test_manager_and_fetch_services( println!("Launching zcashd fetch service.."); let zcashd_fetch_service = FetchService::spawn(FetchServiceConfig::new( - test_manager.full_node_rpc_listen_address, + test_manager.full_node_rpc_listen_address.to_string(), None, None, None, @@ -71,7 +71,7 @@ async fn create_zcashd_test_manager_and_fetch_services( println!("Launching zaino fetch service.."); let zaino_fetch_service = FetchService::spawn(FetchServiceConfig::new( - test_manager.full_node_rpc_listen_address, + test_manager.full_node_rpc_listen_address.to_string(), test_manager.json_server_cookie_dir.clone(), None, None, diff --git a/integration-tests/tests/local_cache.rs b/integration-tests/tests/local_cache.rs index 6ac265222..fe23fa68c 100644 --- a/integration-tests/tests/local_cache.rs +++ b/integration-tests/tests/local_cache.rs @@ -40,7 +40,7 @@ async fn create_test_manager_and_block_cache( let json_service = JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), diff --git a/integration-tests/tests/state_service.rs b/integration-tests/tests/state_service.rs index 729082617..126c1e884 100644 --- a/integration-tests/tests/state_service.rs +++ b/integration-tests/tests/state_service.rs @@ -1,14 +1,16 @@ +use futures::StreamExt; use zaino_common::network::ActivationHeights; use zaino_common::{DatabaseConfig, ServiceConfig, StorageConfig}; use zaino_fetch::jsonrpsee::response::address_deltas::GetAddressDeltasParams; +use zaino_proto::proto::service::{BlockId, BlockRange, PoolType, TransparentAddressBlockFilter}; use zaino_state::ChainIndex as _; +use zaino_state::{LightWalletService, ZcashService}; + #[allow(deprecated)] use zaino_state::{ FetchService, FetchServiceConfig, FetchServiceSubscriber, LightWalletIndexer, StateService, StateServiceConfig, StateServiceSubscriber, ZcashIndexer, }; -use zaino_testutils::Validator as _; -use zaino_state::{LightWalletService, ZcashService}; use zaino_testutils::{from_inputs, ValidatorExt}; use zaino_testutils::{TestManager, ValidatorKind, ZEBRAD_TESTNET_CACHE_DIR}; use zainodlib::config::ZainodConfig; @@ -79,7 +81,7 @@ async fn create_test_manager_and_services( test_manager.local_net.print_stdout(); let fetch_service = FetchService::spawn(FetchServiceConfig::new( - test_manager.full_node_rpc_listen_address, + test_manager.full_node_rpc_listen_address.to_string(), None, None, None, @@ -117,7 +119,7 @@ async fn create_test_manager_and_services( debug_validity_check_interval: None, should_backup_non_finalized_state: false, }, - test_manager.full_node_rpc_listen_address, + test_manager.full_node_rpc_listen_address.to_string(), test_manager.full_node_grpc_listen_address, false, None, @@ -163,7 +165,7 @@ async fn generate_blocks_and_poll_all_chain_indexes( ) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { test_manager.generate_blocks_and_poll(n).await; @@ -635,6 +637,311 @@ async fn state_service_get_raw_mempool_testnet() { test_manager.close().await; } +/// Tests whether that calls to `get_block_range` with the same block range are the same when +/// specifying the default `PoolType`s and passing and empty Vec to verify that the method falls +/// back to the default pools when these are not explicitly specified. +async fn state_service_get_block_range_returns_default_pools( + validator: &ValidatorKind, +) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let recipient_ua = clients.get_recipient_address("unified").await; + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap(); + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let start_height: u64 = 100; + let end_height: u64 = 103; + + let default_pools_request = BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![], + }; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(default_pools_request.clone()) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let explicit_default_pool_request = BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![PoolType::Sapling as i32, PoolType::Orchard as i32], + }; + + let fetch_service_get_block_range_specifying_pools = fetch_service_subscriber + .get_block_range(explicit_default_pool_request.clone()) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + assert_eq!( + fetch_service_get_block_range, + fetch_service_get_block_range_specifying_pools + ); + + let state_service_get_block_range_specifying_pools = state_service_subscriber + .get_block_range(explicit_default_pool_request) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let state_service_get_block_range = state_service_subscriber + .get_block_range(default_pools_request) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + assert_eq!( + state_service_get_block_range, + state_service_get_block_range_specifying_pools + ); + + // check that the block range is the same between fetch service and state service + assert_eq!(fetch_service_get_block_range, state_service_get_block_range); + + let compact_block = state_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + // the compact block has 1 transactions + assert_eq!(compact_block.vtx.len(), 1); + + let shielded_tx = compact_block.vtx.first().unwrap(); + assert_eq!(shielded_tx.index, 1); + // tranparent data should not be present when no pool types are requested + assert_eq!( + shielded_tx.vin, + vec![], + "transparent data should not be present when no pool types are specified in the request." + ); + assert_eq!( + shielded_tx.vout, + vec![], + "transparent data should not be present when no pool types are specified in the request." + ); + test_manager.close().await; +} + +/// tests whether the `GetBlockRange` RPC returns all pools when requested +async fn state_service_get_block_range_returns_all_pools( + validator: &ValidatorKind, +) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + generate_blocks_and_poll_all_chain_indexes( + 100, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + clients.faucet.sync_and_await().await.unwrap(); + for _ in 1..4 { + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + clients.faucet.sync_and_await().await.unwrap(); + } + }; + + let recipient_transparent = clients.get_recipient_address("transparent").await; + let deshielding_txid = from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_transparent, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_sapling = clients.get_recipient_address("sapling").await; + let sapling_txid = from_inputs::quick_send( + &mut clients.faucet, + vec![(&recipient_sapling, 250_000, None)], + ) + .await + .unwrap() + .head; + + let recipient_ua = clients.get_recipient_address("unified").await; + let orchard_txid = + from_inputs::quick_send(&mut clients.faucet, vec![(&recipient_ua, 250_000, None)]) + .await + .unwrap() + .head; + + generate_blocks_and_poll_all_chain_indexes( + 1, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let start_height: u64 = 100; + let end_height: u64 = 106; + + let block_range = BlockRange { + start: Some(BlockId { + height: start_height, + hash: vec![], + }), + end: Some(BlockId { + height: end_height, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }; + + let fetch_service_get_block_range = fetch_service_subscriber + .get_block_range(block_range.clone()) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + let state_service_get_block_range = state_service_subscriber + .get_block_range(block_range) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + // check that the block range is the same + assert_eq!(fetch_service_get_block_range, state_service_get_block_range); + + let compact_block = state_service_get_block_range.last().unwrap(); + + assert_eq!(compact_block.height, end_height); + + // the compact block has 4 transactions (3 sent + coinbase) + assert_eq!(compact_block.vtx.len(), 4); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let deshielding_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == deshielding_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !deshielding_tx.vout.is_empty(), + "transparent data should be present when transaparent pool type is specified in the request." + ); + + // transaction order is not guaranteed so it's necessary to look up for them by TXID + let sapling_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == sapling_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !sapling_tx.outputs.is_empty(), + "sapling data should be present when all pool types are specified in the request." + ); + + let orchard_tx = compact_block + .vtx + .iter() + .find(|tx| tx.txid == orchard_txid.as_ref().to_vec()) + .unwrap(); + + assert!( + !orchard_tx.actions.is_empty(), + "orchard data should be present when all pool types are specified in the request." + ); + + test_manager.close().await; +} + async fn state_service_z_get_treestate(validator: &ValidatorKind) { let ( mut test_manager, @@ -958,6 +1265,80 @@ async fn state_service_get_raw_transaction_testnet() { test_manager.close().await; } +async fn state_service_get_address_transactions_regtest( + validator: &ValidatorKind, +) { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::(validator, None, true, true, None).await; + + let mut clients = test_manager + .clients + .take() + .expect("Clients are not initialized"); + let recipient_taddr = clients.get_recipient_address("transparent").await; + clients.faucet.sync_and_await().await.unwrap(); + + if matches!(validator, ValidatorKind::Zebrad) { + test_manager.local_net.generate_blocks(100).await.unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + clients.faucet.sync_and_await().await.unwrap(); + clients.faucet.quick_shield(AccountId::ZERO).await.unwrap(); + test_manager.local_net.generate_blocks(1).await.unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + clients.faucet.sync_and_await().await.unwrap(); + }; + + let tx = from_inputs::quick_send( + &mut clients.faucet, + vec![(recipient_taddr.as_str(), 250_000, None)], + ) + .await + .unwrap(); + test_manager.local_net.generate_blocks(1).await.unwrap(); + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + + let chain_height: u32 = fetch_service_subscriber + .indexer + .snapshot_nonfinalized_state() + .best_tip + .height + .into(); + dbg!(&chain_height); + + let state_service_txids = state_service_subscriber + .get_taddress_transactions(TransparentAddressBlockFilter { + address: recipient_taddr, + range: Some(BlockRange { + start: Some(BlockId { + height: (chain_height - 2) as u64, + hash: vec![], + }), + end: Some(BlockId { + height: chain_height as u64, + hash: vec![], + }), + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }), + }) + .await + .unwrap(); + + dbg!(&tx); + + dbg!(&state_service_txids); + assert!(state_service_txids.count().await > 0); + + test_manager.close().await; +} async fn state_service_get_address_tx_ids(validator: &ValidatorKind) { let ( mut test_manager, @@ -1361,7 +1742,12 @@ mod zebra { state_service_get_address_utxos_testnet().await; } - #[tokio::test(flavor = "multi_thread")] + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] + async fn taddress_transactions_regtest() { + state_service_get_address_transactions_regtest::(&ValidatorKind::Zebrad).await; + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn address_tx_ids_regtest() { state_service_get_address_tx_ids::(&ValidatorKind::Zebrad).await; } @@ -1652,6 +2038,20 @@ mod zebra { use super::*; + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn get_block_range_default_request_returns_no_t_data_regtest() { + state_service_get_block_range_returns_default_pools::( + &ValidatorKind::Zebrad, + ) + .await; + } + + #[tokio::test(flavor = "multi_thread")] + pub(crate) async fn get_block_range_default_request_returns_all_pools_regtest() { + state_service_get_block_range_returns_all_pools::(&ValidatorKind::Zebrad) + .await; + } + #[tokio::test(flavor = "multi_thread")] pub(crate) async fn subtrees_by_index_regtest() { state_service_z_get_subtrees_by_index::(&ValidatorKind::Zebrad).await; @@ -1838,8 +2238,12 @@ mod zebra { pub(crate) mod lightwallet_indexer { use futures::StreamExt as _; - use zaino_proto::proto::service::{ - AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetSubtreeRootsArg, TxFilter, + use zaino_proto::proto::{ + service::{ + AddressList, BlockId, BlockRange, GetAddressUtxosArg, GetSubtreeRootsArg, PoolType, + TxFilter, + }, + utils::pool_types_into_i32_vec, }; use zebra_rpc::methods::{GetAddressTxIdsRequest, GetBlock}; @@ -2053,7 +2457,7 @@ mod zebra { max_entries: 0, }; let fetch_service_sapling_subtree_roots = fetch_service_subscriber - .get_subtree_roots(sapling_subtree_roots_request.clone()) + .get_subtree_roots(sapling_subtree_roots_request) .await .unwrap() .map(Result::unwrap) @@ -2138,7 +2542,15 @@ mod zebra { height: 5, hash: vec![], }); - let request = BlockRange { start, end }; + let request = BlockRange { + start, + end, + pool_types: vec![ + PoolType::Transparent as i32, + PoolType::Sapling as i32, + PoolType::Orchard as i32, + ], + }; if nullifiers_only { let fetch_service_get_block_range = fetch_service_subscriber .get_block_range_nullifiers(request.clone()) @@ -2232,7 +2644,7 @@ mod zebra { .await .unwrap(); let coinbase_tx = state_service_block_by_height.vtx.first().unwrap(); - let hash = coinbase_tx.hash.clone(); + let hash = coinbase_tx.txid.clone(); let request = TxFilter { block: None, index: 0, @@ -2462,5 +2874,83 @@ mod zebra { state_service_taddress_balance ); } + + #[tokio::test(flavor = "multi_thread")] + async fn get_transparent_data_from_compact_block_when_requested() { + let ( + mut test_manager, + _fetch_service, + fetch_service_subscriber, + _state_service, + state_service_subscriber, + ) = create_test_manager_and_services::( + &ValidatorKind::Zebrad, + None, + true, + true, + Some(NetworkKind::Regtest), + ) + .await; + + let clients = test_manager.clients.take().unwrap(); + let taddr = clients.get_faucet_address("transparent").await; + generate_blocks_and_poll_all_chain_indexes( + 5, + &test_manager, + fetch_service_subscriber.clone(), + state_service_subscriber.clone(), + ) + .await; + + let state_service_taddress_balance = state_service_subscriber + .get_taddress_balance(AddressList { + addresses: vec![taddr.clone()], + }) + .await + .unwrap(); + let fetch_service_taddress_balance = fetch_service_subscriber + .get_taddress_balance(AddressList { + addresses: vec![taddr], + }) + .await + .unwrap(); + assert_eq!( + fetch_service_taddress_balance, + state_service_taddress_balance + ); + + let chain_height = state_service_subscriber + .get_latest_block() + .await + .unwrap() + .height; + + let compact_block_range = state_service_subscriber + .get_block_range(BlockRange { + start: Some(BlockId { + height: 0, + hash: Vec::new(), + }), + end: Some(BlockId { + height: chain_height, + hash: Vec::new(), + }), + pool_types: pool_types_into_i32_vec( + [PoolType::Transparent, PoolType::Sapling, PoolType::Orchard].to_vec(), + ), + }) + .await + .unwrap() + .map(Result::unwrap) + .collect::>() + .await; + + for cb in compact_block_range.into_iter() { + for tx in cb.vtx { + // script pub key of this transaction is not empty + assert!(!tx.vout.first().unwrap().script_pub_key.is_empty()); + } + } + } } } diff --git a/integration-tests/tests/wallet_to_validator.rs b/integration-tests/tests/wallet_to_validator.rs index f4f10faec..615203162 100644 --- a/integration-tests/tests/wallet_to_validator.rs +++ b/integration-tests/tests/wallet_to_validator.rs @@ -18,7 +18,7 @@ async fn connect_to_node_get_info_for_validator(validator: &Validato where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -40,7 +40,7 @@ async fn send_to_orchard(validator: &ValidatorKind) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -88,7 +88,7 @@ async fn send_to_sapling(validator: &ValidatorKind) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -136,7 +136,7 @@ async fn send_to_transparent(validator: &ValidatorKind) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -167,7 +167,7 @@ where let fetch_service = zaino_fetch::jsonrpsee::connector::JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), @@ -234,7 +234,7 @@ async fn send_to_all(validator: &ValidatorKind) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -320,7 +320,7 @@ async fn shield_for_validator(validator: &ValidatorKind) where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -388,7 +388,7 @@ async fn monitor_unverified_mempool_for_validator(validator: &Valida where V: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { let mut test_manager = @@ -443,7 +443,7 @@ where let fetch_service = zaino_fetch::jsonrpsee::connector::JsonRpSeeConnector::new_with_basic_auth( test_node_and_return_url( - test_manager.full_node_rpc_listen_address, + &test_manager.full_node_rpc_listen_address.to_string(), None, Some("xxxxxx".to_string()), Some("xxxxxx".to_string()), diff --git a/zaino-common/src/config/storage.rs b/zaino-common/src/config/storage.rs index ccfde0aa3..11a539298 100644 --- a/zaino-common/src/config/storage.rs +++ b/zaino-common/src/config/storage.rs @@ -4,6 +4,7 @@ use std::path::PathBuf; /// Cache configuration for DashMaps. #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +#[serde(default)] pub struct CacheConfig { /// Capacity of the DashMaps used for caching pub capacity: usize, @@ -69,6 +70,7 @@ impl DatabaseSize { /// Configures the file path and size limits for persistent storage /// used by Zaino services. #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +#[serde(default)] pub struct DatabaseConfig { /// Database file path. pub path: PathBuf, diff --git a/zaino-common/src/config/validator.rs b/zaino-common/src/config/validator.rs index a5acc0b10..c6bbe72d8 100644 --- a/zaino-common/src/config/validator.rs +++ b/zaino-common/src/config/validator.rs @@ -2,16 +2,16 @@ // use serde::{Deserialize, Serialize}; // use zebra_chain::parameters::testnet::ConfiguredActivationHeights; -use std::net::SocketAddr; use std::path::PathBuf; /// Validator (full-node) type for Zaino configuration. #[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize, serde::Serialize)] +#[serde(default)] pub struct ValidatorConfig { /// Full node / validator gprc listen port. Only exists for zebra - pub validator_grpc_listen_address: Option, - /// Full node / validator listen port. - pub validator_jsonrpc_listen_address: SocketAddr, + pub validator_grpc_listen_address: Option, + /// Full node / validator listen address (supports hostname:port or ip:port format). + pub validator_jsonrpc_listen_address: String, /// Path to the validator cookie file. Enable validator rpc cookie authentication with Some. pub validator_cookie_path: Option, /// Full node / validator Username. @@ -19,3 +19,16 @@ pub struct ValidatorConfig { /// full node / validator Password. pub validator_password: Option, } + +/// Required by `#[serde(default)]` to fill missing fields when deserializing partial TOML configs. +impl Default for ValidatorConfig { + fn default() -> Self { + Self { + validator_grpc_listen_address: Some("127.0.0.1:18230".to_string()), + validator_jsonrpc_listen_address: "127.0.0.1:18232".to_string(), + validator_cookie_path: None, + validator_user: Some("xxxxxx".to_string()), + validator_password: Some("xxxxxx".to_string()), + } + } +} diff --git a/zaino-common/src/lib.rs b/zaino-common/src/lib.rs index b7eac3266..d02052292 100644 --- a/zaino-common/src/lib.rs +++ b/zaino-common/src/lib.rs @@ -4,9 +4,13 @@ //! and common utilities used across the Zaino blockchain indexer ecosystem. pub mod config; +pub mod net; pub mod probing; pub mod status; +// Re-export network utilities +pub use net::{resolve_socket_addr, try_resolve_address, AddressResolution}; + // Re-export commonly used config types at crate root for backward compatibility. // This allows existing code using `use zaino_common::Network` to continue working. pub use config::network::{ActivationHeights, Network, ZEBRAD_DEFAULT_ACTIVATION_HEIGHTS}; diff --git a/zaino-common/src/net.rs b/zaino-common/src/net.rs new file mode 100644 index 000000000..d53912b14 --- /dev/null +++ b/zaino-common/src/net.rs @@ -0,0 +1,329 @@ +//! Network utilities for Zaino. + +use std::net::{SocketAddr, ToSocketAddrs}; + +/// Result of attempting to resolve an address string. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum AddressResolution { + /// Successfully resolved to a socket address. + Resolved(SocketAddr), + /// Address appears to be a valid hostname:port format but DNS lookup failed. + /// This is acceptable for deferred resolution (e.g., Docker DNS). + UnresolvedHostname { + /// The original address string. + address: String, + /// The DNS error message. + error: String, + }, + /// Address format is invalid (missing port, garbage input, etc.). + /// This should always be treated as an error. + InvalidFormat { + /// The original address string. + address: String, + /// Description of what's wrong with the format. + reason: String, + }, +} + +impl AddressResolution { + /// Returns the resolved address if available. + pub fn resolved(&self) -> Option { + match self { + AddressResolution::Resolved(addr) => Some(*addr), + _ => None, + } + } + + /// Returns true if the address was successfully resolved. + pub fn is_resolved(&self) -> bool { + matches!(self, AddressResolution::Resolved(_)) + } + + /// Returns true if the address has a valid format but couldn't be resolved. + /// This is acceptable for deferred resolution scenarios like Docker DNS. + pub fn is_unresolved_hostname(&self) -> bool { + matches!(self, AddressResolution::UnresolvedHostname { .. }) + } + + /// Returns true if the address format is invalid. + pub fn is_invalid_format(&self) -> bool { + matches!(self, AddressResolution::InvalidFormat { .. }) + } +} + +/// Validates that an address string has a valid format (host:port). +/// +/// This performs basic format validation without DNS lookup: +/// - Must contain exactly one `:` separator (or be IPv6 format `[...]:port`) +/// - Port must be a valid number +/// - Host part must not be empty +fn validate_address_format(address: &str) -> Result<(), String> { + let address = address.trim(); + + if address.is_empty() { + return Err("Address cannot be empty".to_string()); + } + + // Handle IPv6 format: [::1]:port + if address.starts_with('[') { + let Some(bracket_end) = address.find(']') else { + return Err("IPv6 address missing closing bracket".to_string()); + }; + + if bracket_end + 1 >= address.len() { + return Err("Missing port after IPv6 address".to_string()); + } + + let after_bracket = &address[bracket_end + 1..]; + if !after_bracket.starts_with(':') { + return Err("Expected ':' after IPv6 address bracket".to_string()); + } + + let port_str = &after_bracket[1..]; + port_str + .parse::() + .map_err(|_| format!("Invalid port number: '{port_str}'"))?; + + return Ok(()); + } + + // Handle IPv4/hostname format: host:port + let parts: Vec<&str> = address.rsplitn(2, ':').collect(); + if parts.len() != 2 { + return Err("Missing port (expected format: 'host:port')".to_string()); + } + + let port_str = parts[0]; + let host = parts[1]; + + if host.is_empty() { + return Err("Host cannot be empty".to_string()); + } + + port_str + .parse::() + .map_err(|_| format!("Invalid port number: '{port_str}'"))?; + + Ok(()) +} + +/// Attempts to resolve an address string, returning detailed information about the result. +/// +/// This function distinguishes between: +/// - Successfully resolved addresses +/// - Valid hostname:port format that failed DNS lookup (acceptable for Docker DNS) +/// - Invalid address format (always an error) +/// +/// # Examples +/// +/// ``` +/// use zaino_common::net::{try_resolve_address, AddressResolution}; +/// +/// // IP:port format resolves immediately +/// let result = try_resolve_address("127.0.0.1:8080"); +/// assert!(result.is_resolved()); +/// +/// // Invalid format is detected +/// let result = try_resolve_address("no-port-here"); +/// assert!(result.is_invalid_format()); +/// ``` +pub fn try_resolve_address(address: &str) -> AddressResolution { + // First validate the format + if let Err(reason) = validate_address_format(address) { + return AddressResolution::InvalidFormat { + address: address.to_string(), + reason, + }; + } + + // Try parsing as SocketAddr first (handles ip:port format directly) + if let Ok(addr) = address.parse::() { + return AddressResolution::Resolved(addr); + } + + // Fall back to DNS resolution for hostname:port format + match address.to_socket_addrs() { + Ok(mut addrs) => { + let addrs_vec: Vec = addrs.by_ref().collect(); + + // Prefer IPv4 if available (more compatible, especially in Docker) + if let Some(ipv4_addr) = addrs_vec.iter().find(|addr| addr.is_ipv4()) { + AddressResolution::Resolved(*ipv4_addr) + } else if let Some(addr) = addrs_vec.into_iter().next() { + AddressResolution::Resolved(addr) + } else { + AddressResolution::UnresolvedHostname { + address: address.to_string(), + error: "DNS returned no addresses".to_string(), + } + } + } + Err(e) => AddressResolution::UnresolvedHostname { + address: address.to_string(), + error: e.to_string(), + }, + } +} + +/// Resolves an address string to a [`SocketAddr`]. +/// +/// Accepts both IP:port format (e.g., "127.0.0.1:8080") and hostname:port format +/// (e.g., "zebra:18232" for Docker DNS resolution). +/// +/// When both IPv4 and IPv6 addresses are available, IPv4 is preferred. +/// +/// # Examples +/// +/// ``` +/// use zaino_common::net::resolve_socket_addr; +/// +/// // IP:port format +/// let addr = resolve_socket_addr("127.0.0.1:8080").unwrap(); +/// assert_eq!(addr.port(), 8080); +/// +/// // Hostname resolution (localhost) +/// let addr = resolve_socket_addr("localhost:8080").unwrap(); +/// assert!(addr.ip().is_loopback()); +/// ``` +/// +/// # Errors +/// +/// Returns an error if: +/// - The address format is invalid (missing port, invalid IP, etc.) +/// - The hostname cannot be resolved (DNS lookup failure) +/// - No addresses are returned from resolution +pub fn resolve_socket_addr(address: &str) -> Result { + match try_resolve_address(address) { + AddressResolution::Resolved(addr) => Ok(addr), + AddressResolution::UnresolvedHostname { address, error } => Err(std::io::Error::new( + std::io::ErrorKind::NotFound, + format!("Cannot resolve hostname '{address}': {error}"), + )), + AddressResolution::InvalidFormat { address, reason } => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!("Invalid address format '{address}': {reason}"), + )), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::Ipv4Addr; + + // === Format validation tests (no DNS, always reliable) === + + #[test] + fn test_resolve_ipv4_address() { + let result = resolve_socket_addr("127.0.0.1:8080"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert_eq!(addr.ip().to_string(), "127.0.0.1"); + assert_eq!(addr.port(), 8080); + } + + #[test] + fn test_resolve_ipv4_any_address() { + let result = resolve_socket_addr("0.0.0.0:18232"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert_eq!(addr.ip(), Ipv4Addr::UNSPECIFIED); + assert_eq!(addr.port(), 18232); + } + + #[test] + fn test_resolve_ipv6_localhost() { + let result = resolve_socket_addr("[::1]:8080"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert!(addr.is_ipv6()); + assert_eq!(addr.port(), 8080); + } + + #[test] + fn test_resolve_missing_port() { + let result = try_resolve_address("127.0.0.1"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_empty_string() { + let result = try_resolve_address(""); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_invalid_port() { + let result = try_resolve_address("127.0.0.1:invalid"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_port_too_large() { + let result = try_resolve_address("127.0.0.1:99999"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_empty_host() { + let result = try_resolve_address(":8080"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_ipv6_missing_port() { + let result = try_resolve_address("[::1]"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_resolve_ipv6_missing_bracket() { + let result = try_resolve_address("[::1:8080"); + assert!(result.is_invalid_format()); + } + + #[test] + fn test_valid_hostname_format() { + // This hostname has valid format but won't resolve + let result = try_resolve_address("nonexistent-host.invalid:8080"); + // Should be unresolved hostname, not invalid format + assert!( + result.is_unresolved_hostname(), + "Expected UnresolvedHostname, got {:?}", + result + ); + } + + #[test] + fn test_docker_style_hostname_format() { + // Docker-style hostnames have valid format + let result = try_resolve_address("zebra:18232"); + // Can't resolve in unit tests, but format is valid + assert!( + result.is_unresolved_hostname(), + "Expected UnresolvedHostname for Docker-style hostname, got {:?}", + result + ); + } + + // === DNS-dependent tests (may be flaky in CI) === + + #[test] + #[ignore = "DNS-dependent: may be flaky in CI environments without reliable DNS"] + fn test_resolve_hostname_localhost() { + // "localhost" should resolve to 127.0.0.1 or ::1 + let result = resolve_socket_addr("localhost:8080"); + assert!(result.is_ok()); + let addr = result.unwrap(); + assert_eq!(addr.port(), 8080); + assert!(addr.ip().is_loopback()); + } + + #[test] + #[ignore = "DNS-dependent: behavior varies by system DNS configuration"] + fn test_resolve_invalid_hostname_dns() { + // This test verifies DNS lookup failure for truly invalid hostnames + let result = resolve_socket_addr("this-hostname-does-not-exist.invalid:8080"); + assert!(result.is_err()); + } +} diff --git a/zaino-fetch/Cargo.toml b/zaino-fetch/Cargo.toml index 3185e0eb3..3b2fcac76 100644 --- a/zaino-fetch/Cargo.toml +++ b/zaino-fetch/Cargo.toml @@ -9,6 +9,7 @@ license = { workspace = true } version = { workspace = true } [dependencies] +zaino-common = { workspace = true } zaino-proto = { workspace = true } # Zebra diff --git a/zaino-fetch/src/chain/block.rs b/zaino-fetch/src/chain/block.rs index a0a81d7f3..2c8bd1e19 100644 --- a/zaino-fetch/src/chain/block.rs +++ b/zaino-fetch/src/chain/block.rs @@ -7,7 +7,10 @@ use crate::chain::{ }; use sha2::{Digest, Sha256}; use std::io::Cursor; -use zaino_proto::proto::compact_formats::{ChainMetadata, CompactBlock}; +use zaino_proto::proto::{ + compact_formats::{ChainMetadata, CompactBlock}, + utils::PoolTypeFilter, +}; /// A block header, containing metadata about a block. /// @@ -362,27 +365,41 @@ impl FullBlock { return Err(ParseError::InvalidData(format!( "Error decoding full block - {} bytes of Remaining data. Compact Block Created: ({:?})", remaining_data.len(), - full_block.into_compact(0, 0) + full_block.into_compact_block(0, 0, PoolTypeFilter::includes_all()) ))); } Ok(full_block) } - /// Converts a zcash full block into a compact block. - pub fn into_compact( + /// Turns this Block into a Compact Block according to the Lightclient protocol [ZIP-307](https://zips.z.cash/zip-0307) + /// callers can choose which pools to include in this compact block by specifying a + /// `PoolTypeFilter` accordingly. + pub fn into_compact_block( self, sapling_commitment_tree_size: u32, orchard_commitment_tree_size: u32, + pool_types: PoolTypeFilter, ) -> Result { let vtx = self .vtx .into_iter() .enumerate() .filter_map(|(index, tx)| { - if tx.has_shielded_elements() { - Some(tx.to_compact(index as u64)) - } else { - None + match tx.to_compact_tx(Some(index as u64), &pool_types) { + Ok(compact_tx) => { + // Omit transactions that have no elements in any requested pool type. + if !compact_tx.vin.is_empty() + || !compact_tx.vout.is_empty() + || !compact_tx.spends.is_empty() + || !compact_tx.outputs.is_empty() + || !compact_tx.actions.is_empty() + { + Some(Ok(compact_tx)) + } else { + None + } + } + Err(parse_error) => Some(Err(parse_error)), } }) .collect::, _>>()?; @@ -408,6 +425,20 @@ impl FullBlock { Ok(compact_block) } + #[deprecated] + /// Converts a zcash full block into a **legacy** compact block. + pub fn into_compact( + self, + sapling_commitment_tree_size: u32, + orchard_commitment_tree_size: u32, + ) -> Result { + self.into_compact_block( + sapling_commitment_tree_size, + orchard_commitment_tree_size, + PoolTypeFilter::default(), + ) + } + /// Extracts the block height from the coinbase transaction. fn get_block_height(transactions: &[FullTransaction]) -> Result { let transparent_inputs = transactions[0].transparent_inputs(); diff --git a/zaino-fetch/src/chain/transaction.rs b/zaino-fetch/src/chain/transaction.rs index fe2bd370b..db9a9f2b7 100644 --- a/zaino-fetch/src/chain/transaction.rs +++ b/zaino-fetch/src/chain/transaction.rs @@ -5,8 +5,12 @@ use crate::chain::{ utils::{read_bytes, read_i64, read_u32, read_u64, skip_bytes, CompactSize, ParseFromSlice}, }; use std::io::Cursor; -use zaino_proto::proto::compact_formats::{ - CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTx, +use zaino_proto::proto::{ + compact_formats::{ + CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactTx, CompactTxIn, + TxOut as CompactTxOut, + }, + utils::PoolTypeFilter, }; /// Txin format as described in @@ -27,6 +31,12 @@ impl TxIn { fn into_inner(self) -> (Vec, u32, Vec) { (self.prev_txid, self.prev_index, self.script_sig) } + + /// Returns `true` if this `OutPoint` is "null" in the Bitcoin sense: it has txid set to + /// all-zeroes and output index set to `u32::MAX`. + fn is_null(&self) -> bool { + self.prev_txid.as_slice() == [0u8; 32] && self.prev_index == u32::MAX + } } impl ParseFromSlice for TxIn { @@ -1122,55 +1132,114 @@ impl FullTransaction { } /// Converts a zcash full transaction into a compact transaction. + #[deprecated] pub fn to_compact(self, index: u64) -> Result { - let hash = self.tx_id; + self.to_compact_tx(Some(index), &PoolTypeFilter::default()) + } - // NOTE: LightWalletD currently does not return a fee and is not currently priority here. Please open an Issue or PR at the Zingo-Indexer github (https://github.com/zingolabs/zingo-indexer) if you require this functionality. + /// Converts a Zcash Transaction into a `CompactTx` of the Light wallet protocol. + /// if the transaction you want to convert is a mempool transaction you can specify `None`. + /// specify the `PoolType`s that the transaction should include in the `pool_types` argument + /// with a `PoolTypeFilter` indicating which pools the compact block should include. + pub fn to_compact_tx( + self, + index: Option, + pool_types: &PoolTypeFilter, + ) -> Result { + let hash = self.tx_id(); + + // NOTE: LightWalletD currently does not return a fee and is not currently priority here. + // Please open an Issue or PR at the Zingo-Indexer github (https://github.com/zingolabs/zingo-indexer) + // if you require this functionality. let fee = 0; - let spends = self - .raw_transaction - .shielded_spends - .iter() - .map(|spend| CompactSaplingSpend { - nf: spend.nullifier.clone(), - }) - .collect(); + let spends = if pool_types.includes_sapling() { + self.raw_transaction + .shielded_spends + .iter() + .map(|spend| CompactSaplingSpend { + nf: spend.nullifier.clone(), + }) + .collect() + } else { + vec![] + }; - let outputs = self - .raw_transaction - .shielded_outputs - .iter() - .map(|output| CompactSaplingOutput { - cmu: output.cmu.clone(), - ephemeral_key: output.ephemeral_key.clone(), - ciphertext: output.enc_ciphertext[..52].to_vec(), - }) - .collect(); - - let actions = self - .raw_transaction - .orchard_actions - .iter() - .map(|action| CompactOrchardAction { - nullifier: action.nullifier.clone(), - cmx: action.cmx.clone(), - ephemeral_key: action.ephemeral_key.clone(), - ciphertext: action.enc_ciphertext[..52].to_vec(), - }) - .collect(); + let outputs = if pool_types.includes_sapling() { + self.raw_transaction + .shielded_outputs + .iter() + .map(|output| CompactSaplingOutput { + cmu: output.cmu.clone(), + ephemeral_key: output.ephemeral_key.clone(), + ciphertext: output.enc_ciphertext[..52].to_vec(), + }) + .collect() + } else { + vec![] + }; + + let actions = if pool_types.includes_orchard() { + self.raw_transaction + .orchard_actions + .iter() + .map(|action| CompactOrchardAction { + nullifier: action.nullifier.clone(), + cmx: action.cmx.clone(), + ephemeral_key: action.ephemeral_key.clone(), + ciphertext: action.enc_ciphertext[..52].to_vec(), + }) + .collect() + } else { + vec![] + }; + + let vout = if pool_types.includes_transparent() { + self.raw_transaction + .transparent_outputs + .iter() + .map(|t_out| CompactTxOut { + value: t_out.value, + script_pub_key: t_out.script_hash.clone(), + }) + .collect() + } else { + vec![] + }; + + let vin = if pool_types.includes_transparent() { + self.raw_transaction + .transparent_inputs + .iter() + .filter_map(|t_in| { + if t_in.is_null() { + None + } else { + Some(CompactTxIn { + prevout_txid: t_in.prev_txid.clone(), + prevout_index: t_in.prev_index, + }) + } + }) + .collect() + } else { + vec![] + }; Ok(CompactTx { - index, - hash, + index: index.unwrap_or(0), // this assumes that mempool txs have a zeroed index + txid: hash, fee, spends, outputs, actions, + vin, + vout, }) } - /// Returns true if the transaction contains either sapling spends or outputs. + /// Returns true if the transaction contains either sapling spends or outputs, or orchard actions. + #[allow(dead_code)] pub(crate) fn has_shielded_elements(&self) -> bool { !self.raw_transaction.shielded_spends.is_empty() || !self.raw_transaction.shielded_outputs.is_empty() diff --git a/zaino-fetch/src/jsonrpsee/connector.rs b/zaino-fetch/src/jsonrpsee/connector.rs index 8c7148ccc..b59519d5f 100644 --- a/zaino-fetch/src/jsonrpsee/connector.rs +++ b/zaino-fetch/src/jsonrpsee/connector.rs @@ -229,9 +229,10 @@ impl JsonRpSeeConnector { }) } - /// Helper function to create from parts of a StateServiceConfig or FetchServiceConfig + /// Helper function to create from parts of a StateServiceConfig or FetchServiceConfig. + /// Accepts both hostname:port (e.g., "zebra:18232") and ip:port (e.g., "127.0.0.1:18232") formats. pub async fn new_from_config_parts( - validator_rpc_address: SocketAddr, + validator_rpc_address: &str, validator_rpc_user: String, validator_rpc_password: String, validator_cookie_path: Option, @@ -871,13 +872,22 @@ async fn test_node_connection(url: Url, auth_method: AuthMethod) -> Result<(), T Ok(()) } -/// Tries to connect to zebrad/zcashd using the provided SocketAddr and returns the correct URL. +/// Resolves an address string (hostname:port or ip:port) to a SocketAddr. +fn resolve_address(address: &str) -> Result { + zaino_common::net::resolve_socket_addr(address) + .map_err(|e| TransportError::BadNodeData(Box::new(e), "address resolution")) +} + +/// Tries to connect to zebrad/zcashd using the provided address and returns the correct URL. +/// Accepts both hostname:port (e.g., "zebra:18232") and ip:port (e.g., "127.0.0.1:18232") formats. pub async fn test_node_and_return_url( - addr: SocketAddr, + address: &str, cookie_path: Option, user: Option, password: Option, ) -> Result { + let addr = resolve_address(address)?; + let auth_method = match cookie_path.is_some() { true => { let cookie_file_path_str = cookie_path.expect("validator rpc cookie path missing"); @@ -911,6 +921,26 @@ pub async fn test_node_and_return_url( } interval.tick().await; } - error!("Error: Could not establish connection with node. Please check config and confirm node is listening at the correct address and the correct authorisation details have been entered. Exiting.."); + error!("Error: Could not establish connection with node. Please check config and confirm node is listening at {url} and the correct authorisation details have been entered. Exiting.."); std::process::exit(1); } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_resolve_address_wraps_common_function() { + // Verify the wrapper correctly converts io::Error to TransportError + let result = resolve_address("127.0.0.1:8080"); + assert!(result.is_ok()); + assert_eq!(result.unwrap().port(), 8080); + + let result = resolve_address("invalid"); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + TransportError::BadNodeData(_, "address resolution") + )); + } +} diff --git a/zaino-proto/CHANGELOG.md b/zaino-proto/CHANGELOG.md new file mode 100644 index 000000000..60dfd28f9 --- /dev/null +++ b/zaino-proto/CHANGELOG.md @@ -0,0 +1,16 @@ +# Changelog +All notable changes to this library will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this library adheres to Rust's notion of +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + + +### Added +- `ValidatedBlockRangeRequest` type that encapsulates validations of the + `GetBlockRange` RPC request +- utils submodule to handle `PoolType` conversions +- `PoolTypeError` defines conversion errors between i32 and known `PoolType` variants +- `PoolTypeFilter` indicates which pools need to be returned in a compact block. diff --git a/zaino-proto/Cargo.toml b/zaino-proto/Cargo.toml index 1ddbcc194..00ef41261 100644 --- a/zaino-proto/Cargo.toml +++ b/zaino-proto/Cargo.toml @@ -9,6 +9,9 @@ license = { workspace = true } version = { workspace = true } [dependencies] +zebra-state = { workspace = true } +zebra-chain = { workspace = true } + # Miscellaneous Workspace tonic = { workspace = true } diff --git a/zaino-proto/README.md b/zaino-proto/README.md new file mode 100644 index 000000000..780175b6c --- /dev/null +++ b/zaino-proto/README.md @@ -0,0 +1,65 @@ +# Zaino Proto files module + +This module encapsulates the lightclient-protocol functionality and imports the canonicals files +using `git subtree`. + + +Below you can see the structure of the module + +```` +zaino-proto +├── build.rs +├── build.rs.bak +├── Cargo.toml +├── CHANGELOG.md +├── lightwallet-protocol <=== this is the git subtree +│   ├── CHANGELOG.md +│   ├── LICENSE +│   └── walletrpc +│   ├── compact_formats.proto +│   └── service.proto +├── proto +│   ├── compact_formats.proto -> ../lightwallet-protocol/walletrpc/compact_formats.proto +│   ├── proposal.proto +│   └── service.proto -> ../lightwallet-protocol/walletrpc/service.proto +└── src + ├── lib.rs + ├── proto + │   ├── compact_formats.rs + │   ├── proposal.rs + │   ├── service.rs + │   └── utils.rs + └── proto.rs +``` + +Handling maintaining the git subtree history has its own tricks. We recommend developers updating +zaino proto that they are wary of these shortcomings. + +If you need to update the canonical files to for your feature, maintain a linear and simple git +commit history in your PR. + +We recommend that PRs that change the reference to the git subtree do so in this fashion. + +for example: +============ + +when doing +``` +git subtree --prefix=zaino-proto/lightwallet-protocol pull git@github.com:zcash/lightwallet-protocol.git v0.4.0 --squash +``` + +your branch's commits must be sequenced like this. + +``` + your-branch-name + - commit applying the git subtree command + - commit merging the canonical files + - commits fixing compiler errors + - commit indicating the version adopted in the CHANGELOG.md of zaino-proto +``` + +If you are developing the `lightclient-protocol` and adopting it on Zaino, it is recommended that +you don't do subsequent `git subtree` to revisions and always rebase against the latest latest version +that you will be using in your latest commit to avoid rebasing issues and also keeping a coherent +git commit history for when your branch merges to `dev`. + diff --git a/zaino-proto/build.rs.bak b/zaino-proto/build.rs similarity index 97% rename from zaino-proto/build.rs.bak rename to zaino-proto/build.rs index bf72a1d83..f60dade06 100644 --- a/zaino-proto/build.rs.bak +++ b/zaino-proto/build.rs @@ -2,7 +2,6 @@ use std::env; use std::fs; use std::io; use std::path::{Path, PathBuf}; -use std::process::Command; const COMPACT_FORMATS_PROTO: &str = "proto/compact_formats.proto"; const PROPOSAL_PROTO: &str = "proto/proposal.proto"; @@ -67,7 +66,7 @@ fn build() -> io::Result<()> { ".cash.z.wallet.sdk.rpc.CompactOrchardAction", "crate::proto::compact_formats::CompactOrchardAction", ) - .compile(&[SERVICE_PROTO], &["proto/"])?; + .compile_protos(&[SERVICE_PROTO], &["proto/"])?; // Build the proposal types. tonic_build::compile_protos(PROPOSAL_PROTO)?; diff --git a/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto b/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto index c799448ce..c62c7acbb 100644 --- a/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto +++ b/zaino-proto/lightwallet-protocol/walletrpc/compact_formats.proto @@ -7,8 +7,8 @@ package cash.z.wallet.sdk.rpc; option go_package = "lightwalletd/walletrpc"; option swift_prefix = ""; -// Remember that proto3 fields are all optional. A field that is not present will be set to its zero value. -// bytes fields of hashes are in canonical little-endian format. +// REMINDER: proto3 fields are all optional. A field that is not present will be set to its zero/false/empty +// value. // Information about the state of the chain as of a given block. message ChainMetadata { @@ -16,34 +16,40 @@ message ChainMetadata { uint32 orchardCommitmentTreeSize = 2; // the size of the Orchard note commitment tree as of the end of this block } -// A compact representation of the shielded data in a Zcash block. +// A compact representation of a Zcash block. // // CompactBlock is a packaging of ONLY the data from a block that's needed to: // 1. Detect a payment to your Shielded address // 2. Detect a spend of your Shielded notes // 3. Update your witnesses to generate new spend proofs. +// 4. Spend UTXOs associated to t-addresses of your wallet. message CompactBlock { uint32 protoVersion = 1; // the version of this wire format, for storage uint64 height = 2; // the height of this block bytes hash = 3; // the ID (hash) of this block, same as in block explorers bytes prevHash = 4; // the ID (hash) of this block's predecessor uint32 time = 5; // Unix epoch time when the block was mined - bytes header = 6; // (hash, prevHash, and time) OR (full header) + bytes header = 6; // full header (as returned by the getblock RPC) repeated CompactTx vtx = 7; // zero or more compact transactions from this block ChainMetadata chainMetadata = 8; // information about the state of the chain as of this block } -// A compact representation of the shielded data in a Zcash transaction. +// A compact representation of a Zcash transaction. // // CompactTx contains the minimum information for a wallet to know if this transaction -// is relevant to it (either pays to it or spends from it) via shielded elements -// only. This message will not encode a transparent-to-transparent transaction. +// is relevant to it (either pays to it or spends from it) via shielded elements. Additionally, +// it can optionally include the minimum necessary data to detect payments to transparent addresses +// related to your wallet. message CompactTx { - // Index and hash will allow the receiver to call out to chain - // explorers or other data structures to retrieve more information - // about this transaction. - uint64 index = 1; // the index within the full block - bytes hash = 2; // the ID (hash) of this transaction, same as in block explorers + // The index of the transaction within the block. + uint64 index = 1; + + // The id of the transaction as defined in + // [§ 7.1.1 ‘Transaction Identifiers’](https://zips.z.cash/protocol/protocol.pdf#txnidentifiers) + // This byte array MUST be in protocol order and MUST NOT be reversed + // or hex-encoded; the byte-reversed and hex-encoded representation is + // exclusively a textual representation of a txid. + bytes txid = 2; // The transaction fee: present if server can provide. In the case of a // stateless server and a transaction with transparent inputs, this will be @@ -55,6 +61,41 @@ message CompactTx { repeated CompactSaplingSpend spends = 4; repeated CompactSaplingOutput outputs = 5; repeated CompactOrchardAction actions = 6; + + // `CompactTxIn` values corresponding to the `vin` entries of the full transaction. + // + // Note: the single null-outpoint input for coinbase transactions is omitted. Light + // clients can test `CompactTx.index == 0` to determine whether a `CompactTx` + // represents a coinbase transaction, as the coinbase transaction is always the + // first transaction in any block. + repeated CompactTxIn vin = 7; + + // A sequence of transparent outputs being created by the transaction. + repeated TxOut vout = 8; +} + +// A compact representation of a transparent transaction input. +message CompactTxIn { + // The id of the transaction that generated the output being spent. This + // byte array must be in protocol order and MUST NOT be reversed or + // hex-encoded. + bytes prevoutTxid = 1; + + // The index of the output being spent in the `vout` array of the + // transaction referred to by `prevoutTxid`. + uint32 prevoutIndex = 2; +} + +// A transparent output being created by the transaction. +// +// This contains identical data to the `TxOut` type in the transaction itself, and +// thus it is not "compact". +message TxOut { + // The value of the output, in Zatoshis. + uint64 value = 1; + + // The script pubkey that must be satisfied in order to spend this output. + bytes scriptPubKey = 2; } // A compact representation of a [Sapling Spend](https://zips.z.cash/protocol/protocol.pdf#spendencodingandconsensus). @@ -62,7 +103,7 @@ message CompactTx { // CompactSaplingSpend is a Sapling Spend Description as described in 7.3 of the Zcash // protocol specification. message CompactSaplingSpend { - bytes nf = 1; // nullifier (see the Zcash protocol specification) + bytes nf = 1; // Nullifier (see the Zcash protocol specification) } // A compact representation of a [Sapling Output](https://zips.z.cash/protocol/protocol.pdf#outputencodingandconsensus). @@ -70,9 +111,9 @@ message CompactSaplingSpend { // It encodes the `cmu` field, `ephemeralKey` field, and a 52-byte prefix of the // `encCiphertext` field of a Sapling Output Description. Total size is 116 bytes. message CompactSaplingOutput { - bytes cmu = 1; // note commitment u-coordinate - bytes ephemeralKey = 2; // ephemeral public key - bytes ciphertext = 3; // first 52 bytes of ciphertext + bytes cmu = 1; // Note commitment u-coordinate. + bytes ephemeralKey = 2; // Ephemeral public key. + bytes ciphertext = 3; // First 52 bytes of ciphertext. } // A compact representation of an [Orchard Action](https://zips.z.cash/protocol/protocol.pdf#actionencodingandconsensus). diff --git a/zaino-proto/lightwallet-protocol/walletrpc/service.proto b/zaino-proto/lightwallet-protocol/walletrpc/service.proto index 0a0989c7d..d3dc8ba04 100644 --- a/zaino-proto/lightwallet-protocol/walletrpc/service.proto +++ b/zaino-proto/lightwallet-protocol/walletrpc/service.proto @@ -8,18 +8,35 @@ option go_package = "lightwalletd/walletrpc"; option swift_prefix = ""; import "compact_formats.proto"; +// An identifier for a Zcash value pool. +enum PoolType { + POOL_TYPE_INVALID = 0; + TRANSPARENT = 1; + SAPLING = 2; + ORCHARD = 3; +} + // A BlockID message contains identifiers to select a block: a height or a // hash. Specification by hash is not implemented, but may be in the future. message BlockID { - uint64 height = 1; - bytes hash = 2; + uint64 height = 1; + bytes hash = 2; } // BlockRange specifies a series of blocks from start to end inclusive. // Both BlockIDs must be heights; specification by hash is not yet supported. +// +// If no pool types are specified, the server should default to the legacy +// behavior of returning only data relevant to the shielded (Sapling and +// Orchard) pools; otherwise, the server should prune `CompactBlocks` returned +// to include only data relevant to the requested pool types. Clients MUST +// verify that the version of the server they are connected to are capable +// of returning pruned and/or transparent data before setting `poolTypes` +// to a non-empty value. message BlockRange { BlockID start = 1; BlockID end = 2; + repeated PoolType poolTypes = 3; } // A TxFilter contains the information needed to identify a particular @@ -93,13 +110,21 @@ message LightdInfo { string zcashdBuild = 13; // example: "v4.1.1-877212414" string zcashdSubversion = 14; // example: "/MagicBean:4.1.1/" string donationAddress = 15; // Zcash donation UA address + string upgradeName = 16; // name of next pending network upgrade, empty if none scheduled + uint64 upgradeHeight = 17; // height of next pending upgrade, zero if none is scheduled + string lightwalletProtocolVersion = 18; // version of https://github.com/zcash/lightwallet-protocol served by this server } -// TransparentAddressBlockFilter restricts the results to the given address -// or block range. +// TransparentAddressBlockFilter restricts the results of the GRPC methods that +// use it to the transactions that involve the given address and were mined in +// the specified block range. Non-default values for both the address and the +// block range must be specified. Mempool transactions are not included. +// +// The `poolTypes` field of the `range` argument should be ignored. +// Implementations MAY consider it an error if any pool types are specified. message TransparentAddressBlockFilter { string address = 1; // t-address - BlockRange range = 2; // start, end heights + BlockRange range = 2; // start, end heights only } // Duration is currently used only for testing, so that the Ping rpc @@ -127,10 +152,23 @@ message Balance { int64 valueZat = 1; } -// The a shortened transaction ID is the prefix in big-endian (hex) format -// (then converted to binary). -message Exclude { - repeated bytes txid = 1; +// Request parameters for the `GetMempoolTx` RPC. +message GetMempoolTxRequest { + // A list of transaction ID byte string suffixes that should be excluded + // from the response. These suffixes may be produced either directly from + // the underlying txid bytes, or, if the source values are encoded txid + // strings, by truncating the hexadecimal representation of each + // transaction ID to an even number of characters, and then hex-decoding + // and then byte-reversing this value to obtain the byte representation. + repeated bytes exclude_txid_suffixes = 1; + // We reserve field number 2 for a potential future `exclude_txid_prefixes` + // field. + reserved 2; + // The server must prune `CompactTx`s returned to include only data + // relevant to the requested pool types. If no pool types are specified, + // the server should default to the legacy behavior of returning only data + // relevant to the shielded (Sapling and Orchard) pools. + repeated PoolType poolTypes = 3; } // The TreeState is derived from the Zcash z_gettreestate rpc. @@ -181,44 +219,63 @@ message GetAddressUtxosReplyList { service CompactTxStreamer { // Return the BlockID of the block at the tip of the best chain rpc GetLatestBlock(ChainSpec) returns (BlockID) {} + // Return the compact block corresponding to the given block identifier rpc GetBlock(BlockID) returns (CompactBlock) {} - // Same as GetBlock except actions contain only nullifiers + + // Same as GetBlock except the returned CompactBlock value contains only + // nullifiers. + // + // Note: this method is deprecated. Implementations should ignore any + // `PoolType::TRANSPARENT` member of the `poolTypes` argument. rpc GetBlockNullifiers(BlockID) returns (CompactBlock) {} - // Return a list of consecutive compact blocks + + // Return a list of consecutive compact blocks in the specified range, + // which is inclusive of `range.end`. + // + // If range.start <= range.end, blocks are returned increasing height order; + // otherwise blocks are returned in decreasing height order. rpc GetBlockRange(BlockRange) returns (stream CompactBlock) {} - // Same as GetBlockRange except actions contain only nullifiers + + // Same as GetBlockRange except the returned CompactBlock values contain + // only nullifiers. + // + // Note: this method is deprecated. Implementations should ignore any + // `PoolType::TRANSPARENT` member of the `poolTypes` argument. rpc GetBlockRangeNullifiers(BlockRange) returns (stream CompactBlock) {} // Return the requested full (not compact) transaction (as from zcashd) rpc GetTransaction(TxFilter) returns (RawTransaction) {} + // Submit the given transaction to the Zcash network rpc SendTransaction(RawTransaction) returns (SendResponse) {} - // Return the transactions corresponding to the given t-address within the given block range - // NB - this method is misnamed, it returns transactions, not transaction IDs. + // Return RawTransactions that match the given transparent address filter. + // + // Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. // NOTE: this method is deprecated, please use GetTaddressTransactions instead. rpc GetTaddressTxids(TransparentAddressBlockFilter) returns (stream RawTransaction) {} - // Return the transactions corresponding to the given t-address within the given block range + // Return the transactions corresponding to the given t-address within the given block range. + // Mempool transactions are not included in the results. rpc GetTaddressTransactions(TransparentAddressBlockFilter) returns (stream RawTransaction) {} rpc GetTaddressBalance(AddressList) returns (Balance) {} rpc GetTaddressBalanceStream(stream Address) returns (Balance) {} - // Return the compact transactions currently in the mempool; the results - // can be a few seconds out of date. If the Exclude list is empty, return - // all transactions; otherwise return all *except* those in the Exclude list - // (if any); this allows the client to avoid receiving transactions that it - // already has (from an earlier call to this rpc). The transaction IDs in the - // Exclude list can be shortened to any number of bytes to make the request - // more bandwidth-efficient; if two or more transactions in the mempool - // match a shortened txid, they are all sent (none is excluded). Transactions - // in the exclude list that don't exist in the mempool are ignored. - // - // The a shortened transaction ID is the prefix in big-endian (hex) format - // (then converted to binary). See smoke-test.bash for examples. - rpc GetMempoolTx(Exclude) returns (stream CompactTx) {} + // Returns a stream of the compact transaction representation for transactions + // currently in the mempool. The results of this operation may be a few + // seconds out of date. If the `exclude_txid_suffixes` list is empty, + // return all transactions; otherwise return all *except* those in the + // `exclude_txid_suffixes` list (if any); this allows the client to avoid + // receiving transactions that it already has (from an earlier call to this + // RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + // shortened to any number of bytes to make the request more + // bandwidth-efficient; if two or more transactions in the mempool match a + // txid suffix, none of the matching transactions are excluded. Txid + // suffixes in the exclude list that don't match any transactions in the + // mempool are ignored. + rpc GetMempoolTx(GetMempoolTxRequest) returns (stream CompactTx) {} // Return a stream of current Mempool transactions. This will keep the output stream open while // there are mempool transactions. It will close the returned stream when a new block is mined. @@ -240,6 +297,7 @@ service CompactTxStreamer { // Return information about this lightwalletd instance and the blockchain rpc GetLightdInfo(Empty) returns (LightdInfo) {} + // Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) rpc Ping(Duration) returns (PingResponse) {} } diff --git a/zaino-proto/src/proto.rs b/zaino-proto/src/proto.rs index 7e04b9499..2ce891332 100644 --- a/zaino-proto/src/proto.rs +++ b/zaino-proto/src/proto.rs @@ -3,3 +3,4 @@ pub mod compact_formats; pub mod proposal; pub mod service; +pub mod utils; diff --git a/zaino-proto/src/proto/compact_formats.rs b/zaino-proto/src/proto/compact_formats.rs index 44455378f..82c2eea51 100644 --- a/zaino-proto/src/proto/compact_formats.rs +++ b/zaino-proto/src/proto/compact_formats.rs @@ -1,6 +1,6 @@ +// This file is @generated by prost-build. /// Information about the state of the chain as of a given block. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ChainMetadata { /// the size of the Sapling note commitment tree as of the end of this block #[prost(uint32, tag = "1")] @@ -9,13 +9,13 @@ pub struct ChainMetadata { #[prost(uint32, tag = "2")] pub orchard_commitment_tree_size: u32, } -/// A compact representation of the shielded data in a Zcash block. +/// A compact representation of a Zcash block. /// /// CompactBlock is a packaging of ONLY the data from a block that's needed to: -/// 1. Detect a payment to your shielded Sapling address -/// 2. Detect a spend of your shielded Sapling notes -/// 3. Update your witnesses to generate new Sapling spend proofs. -#[allow(clippy::derive_partial_eq_without_eq)] +/// 1. Detect a payment to your Shielded address +/// 2. Detect a spend of your Shielded notes +/// 3. Update your witnesses to generate new spend proofs. +/// 4. Spend UTXOs associated to t-addresses of your wallet. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactBlock { /// the version of this wire format, for storage @@ -33,7 +33,7 @@ pub struct CompactBlock { /// Unix epoch time when the block was mined #[prost(uint32, tag = "5")] pub time: u32, - /// (hash, prevHash, and time) OR (full header) + /// full header (as returned by the getblock RPC) #[prost(bytes = "vec", tag = "6")] pub header: ::prost::alloc::vec::Vec, /// zero or more compact transactions from this block @@ -43,24 +43,24 @@ pub struct CompactBlock { #[prost(message, optional, tag = "8")] pub chain_metadata: ::core::option::Option, } -/// A compact representation of the shielded data in a Zcash transaction. +/// A compact representation of a Zcash transaction. /// /// CompactTx contains the minimum information for a wallet to know if this transaction -/// is relevant to it (either pays to it or spends from it) via shielded elements -/// only. This message will not encode a transparent-to-transparent transaction. -#[allow(clippy::derive_partial_eq_without_eq)] +/// is relevant to it (either pays to it or spends from it) via shielded elements. Additionally, +/// it can optionally include the minimum necessary data to detect payments to transparent addresses +/// related to your wallet. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactTx { - /// Index and hash will allow the receiver to call out to chain - /// explorers or other data structures to retrieve more information - /// about this transaction. - /// - /// the index within the full block + /// The index of the transaction within the block. #[prost(uint64, tag = "1")] pub index: u64, - /// the ID (hash) of this transaction, same as in block explorers + /// The id of the transaction as defined in + /// [§ 7.1.1 ‘Transaction Identifiers’]() + /// This byte array MUST be in protocol order and MUST NOT be reversed + /// or hex-encoded; the byte-reversed and hex-encoded representation is + /// exclusively a textual representation of a txid. #[prost(bytes = "vec", tag = "2")] - pub hash: ::prost::alloc::vec::Vec, + pub txid: ::prost::alloc::vec::Vec, /// The transaction fee: present if server can provide. In the case of a /// stateless server and a transaction with transparent inputs, this will be /// unset because the calculation requires reference to prior transactions. @@ -74,12 +74,48 @@ pub struct CompactTx { pub outputs: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "6")] pub actions: ::prost::alloc::vec::Vec, + /// `CompactTxIn` values corresponding to the `vin` entries of the full transaction. + /// + /// Note: the single null-outpoint input for coinbase transactions is omitted. Light + /// clients can test `CompactTx.index == 0` to determine whether a `CompactTx` + /// represents a coinbase transaction, as the coinbase transaction is always the + /// first transaction in any block. + #[prost(message, repeated, tag = "7")] + pub vin: ::prost::alloc::vec::Vec, + /// A sequence of transparent outputs being created by the transaction. + #[prost(message, repeated, tag = "8")] + pub vout: ::prost::alloc::vec::Vec, +} +/// A compact representation of a transparent transaction input. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompactTxIn { + /// The id of the transaction that generated the output being spent. This + /// byte array must be in protocol order and MUST NOT be reversed or + /// hex-encoded. + #[prost(bytes = "vec", tag = "1")] + pub prevout_txid: ::prost::alloc::vec::Vec, + /// The index of the output being spent in the `vout` array of the + /// transaction referred to by `prevoutTxid`. + #[prost(uint32, tag = "2")] + pub prevout_index: u32, +} +/// A transparent output being created by the transaction. +/// +/// This contains identical data to the `TxOut` type in the transaction itself, and +/// thus it is not "compact". +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxOut { + /// The value of the output, in Zatoshis. + #[prost(uint64, tag = "1")] + pub value: u64, + /// The script pubkey that must be satisfied in order to spend this output. + #[prost(bytes = "vec", tag = "2")] + pub script_pub_key: ::prost::alloc::vec::Vec, } /// A compact representation of a [Sapling Spend](). /// /// CompactSaplingSpend is a Sapling Spend Description as described in 7.3 of the Zcash /// protocol specification. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactSaplingSpend { /// Nullifier (see the Zcash protocol specification) @@ -90,7 +126,6 @@ pub struct CompactSaplingSpend { /// /// It encodes the `cmu` field, `ephemeralKey` field, and a 52-byte prefix of the /// `encCiphertext` field of a Sapling Output Description. Total size is 116 bytes. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactSaplingOutput { /// Note commitment u-coordinate. @@ -104,7 +139,6 @@ pub struct CompactSaplingOutput { pub ciphertext: ::prost::alloc::vec::Vec, } /// A compact representation of an [Orchard Action](). -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactOrchardAction { /// \[32\] The nullifier of the input note diff --git a/zaino-proto/src/proto/proposal.rs b/zaino-proto/src/proto/proposal.rs index 1ea321afc..eed2b14a7 100644 --- a/zaino-proto/src/proto/proposal.rs +++ b/zaino-proto/src/proto/proposal.rs @@ -1,5 +1,5 @@ +// This file is @generated by prost-build. /// A data structure that describes a series of transactions to be created. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Proposal { /// The version of this serialization format. @@ -20,7 +20,6 @@ pub struct Proposal { } /// A data structure that describes the inputs to be consumed and outputs to /// be produced in a proposed transaction. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProposalStep { /// ZIP 321 serialized transaction request @@ -50,8 +49,7 @@ pub struct ProposalStep { /// A mapping from ZIP 321 payment index to the output pool that has been chosen /// for that payment, based upon the payment address and the selected inputs to /// the transaction. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PaymentOutputPool { #[prost(uint32, tag = "1")] pub payment_index: u32, @@ -60,7 +58,6 @@ pub struct PaymentOutputPool { } /// The unique identifier and value for each proposed input that does not /// require a back-reference to a prior step of the proposal. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReceivedOutput { #[prost(bytes = "vec", tag = "1")] @@ -74,8 +71,7 @@ pub struct ReceivedOutput { } /// A reference to a payment in a prior step of the proposal. This payment must /// belong to the wallet. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PriorStepOutput { #[prost(uint32, tag = "1")] pub step_index: u32, @@ -83,8 +79,7 @@ pub struct PriorStepOutput { pub payment_index: u32, } /// A reference to a change or ephemeral output from a prior step of the proposal. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PriorStepChange { #[prost(uint32, tag = "1")] pub step_index: u32, @@ -92,7 +87,6 @@ pub struct PriorStepChange { pub change_index: u32, } /// The unique identifier and value for an input to be used in the transaction. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProposedInput { #[prost(oneof = "proposed_input::Value", tags = "1, 2, 3")] @@ -100,7 +94,6 @@ pub struct ProposedInput { } /// Nested message and enum types in `ProposedInput`. pub mod proposed_input { - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { #[prost(message, tag = "1")] @@ -112,7 +105,6 @@ pub mod proposed_input { } } /// The proposed change outputs and fee value. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionBalance { /// A list of change or ephemeral output values. @@ -129,7 +121,6 @@ pub struct TransactionBalance { /// an ephemeral output, which must be spent by a subsequent step. This is /// only supported for transparent outputs. Each ephemeral output will be /// given a unique t-address. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ChangeValue { /// The value of a change or ephemeral output to be created, in zatoshis. @@ -148,7 +139,6 @@ pub struct ChangeValue { } /// An object wrapper for memo bytes, to facilitate representing the /// `change_memo == None` case. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MemoBytes { #[prost(bytes = "vec", tag = "1")] @@ -176,10 +166,10 @@ impl ValuePool { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - ValuePool::PoolNotSpecified => "PoolNotSpecified", - ValuePool::Transparent => "Transparent", - ValuePool::Sapling => "Sapling", - ValuePool::Orchard => "Orchard", + Self::PoolNotSpecified => "PoolNotSpecified", + Self::Transparent => "Transparent", + Self::Sapling => "Sapling", + Self::Orchard => "Orchard", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -216,10 +206,10 @@ impl FeeRule { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - FeeRule::NotSpecified => "FeeRuleNotSpecified", - FeeRule::PreZip313 => "PreZip313", - FeeRule::Zip313 => "Zip313", - FeeRule::Zip317 => "Zip317", + Self::NotSpecified => "FeeRuleNotSpecified", + Self::PreZip313 => "PreZip313", + Self::Zip313 => "Zip313", + Self::Zip317 => "Zip317", } } /// Creates an enum from field names used in the ProtoBuf definition. diff --git a/zaino-proto/src/proto/service.rs b/zaino-proto/src/proto/service.rs index 36834c1e2..2441bc93f 100644 --- a/zaino-proto/src/proto/service.rs +++ b/zaino-proto/src/proto/service.rs @@ -1,6 +1,32 @@ +// This file is @generated by prost-build. +/// A compact representation of a transparent transaction input. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompactTxIn { + /// The id of the transaction that generated the output being spent. This + /// byte array must be in protocol order and MUST NOT be reversed or + /// hex-encoded. + #[prost(bytes = "vec", tag = "1")] + pub prevout_txid: ::prost::alloc::vec::Vec, + /// The index of the output being spent in the `vout` array of the + /// transaction referred to by `prevoutTxid`. + #[prost(uint32, tag = "2")] + pub prevout_index: u32, +} +/// A transparent output being created by the transaction. +/// +/// This contains identical data to the `TxOut` type in the transaction itself, and +/// thus it is not "compact". +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxOut { + /// The value of the output, in Zatoshis. + #[prost(uint64, tag = "1")] + pub value: u64, + /// The script pubkey that must be satisfied in order to spend this output. + #[prost(bytes = "vec", tag = "2")] + pub script_pub_key: ::prost::alloc::vec::Vec, +} /// A BlockID message contains identifiers to select a block: a height or a /// hash. Specification by hash is not implemented, but may be in the future. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockId { #[prost(uint64, tag = "1")] @@ -10,18 +36,26 @@ pub struct BlockId { } /// BlockRange specifies a series of blocks from start to end inclusive. /// Both BlockIDs must be heights; specification by hash is not yet supported. -#[allow(clippy::derive_partial_eq_without_eq)] +/// +/// If no pool types are specified, the server should default to the legacy +/// behavior of returning only data relevant to the shielded (Sapling and +/// Orchard) pools; otherwise, the server should prune `CompactBlocks` returned +/// to include only data relevant to the requested pool types. Clients MUST +/// verify that the version of the server they are connected to are capable +/// of returning pruned and/or transparent data before setting `poolTypes` +/// to a non-empty value. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockRange { #[prost(message, optional, tag = "1")] pub start: ::core::option::Option, #[prost(message, optional, tag = "2")] pub end: ::core::option::Option, + #[prost(enumeration = "PoolType", repeated, tag = "3")] + pub pool_types: ::prost::alloc::vec::Vec, } /// A TxFilter contains the information needed to identify a particular /// transaction: either a block and an index, or a direct transaction hash. /// Currently, only specification by hash is supported. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TxFilter { /// block identifier, height or hash @@ -37,20 +71,38 @@ pub struct TxFilter { /// RawTransaction contains the complete transaction data. It also optionally includes /// the block height in which the transaction was included, or, when returned /// by GetMempoolStream(), the latest block height. -#[allow(clippy::derive_partial_eq_without_eq)] +/// +/// FIXME: the documentation here about mempool status contradicts the documentation +/// for the `height` field. See #[derive(Clone, PartialEq, ::prost::Message)] pub struct RawTransaction { - /// exact data returned by Zcash 'getrawtransaction' + /// The serialized representation of the Zcash transaction. #[prost(bytes = "vec", tag = "1")] pub data: ::prost::alloc::vec::Vec, - /// height that the transaction was mined (or -1) + /// The height at which the transaction is mined, or a sentinel value. + /// + /// Due to an error in the original protobuf definition, it is necessary to + /// reinterpret the result of the `getrawtransaction` RPC call. Zcashd will + /// return the int64 value `-1` for the height of transactions that appear + /// in the block index, but which are not mined in the main chain. Here, the + /// height field of `RawTransaction` was erroneously created as a `uint64`, + /// and as such we must map the response from the zcashd RPC API to be + /// representable within this space. Additionally, the `height` field will + /// be absent for transactions in the mempool, resulting in the default + /// value of `0` being set. Therefore, the meanings of the `height` field of + /// the `RawTransaction` type are as follows: + /// + /// * height 0: the transaction is in the mempool + /// * height 0xffffffffffffffff: the transaction has been mined on a fork that + /// is not currently the main chain + /// * any other height: the transaction has been mined in the main chain at the + /// given height #[prost(uint64, tag = "2")] pub height: u64, } /// A SendResponse encodes an error code and a string. It is currently used /// only by SendTransaction(). If error code is zero, the operation was /// successful; if non-zero, it and the message specify the failure. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SendResponse { #[prost(int32, tag = "1")] @@ -59,16 +111,13 @@ pub struct SendResponse { pub error_message: ::prost::alloc::string::String, } /// Chainspec is a placeholder to allow specification of a particular chain fork. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ChainSpec {} /// Empty is for gRPCs that take no arguments, currently only GetLightdInfo. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Empty {} /// LightdInfo returns various information about this lightwalletd instance /// and the state of the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LightdInfo { #[prost(string, tag = "1")] @@ -107,24 +156,39 @@ pub struct LightdInfo { /// example: "/MagicBean:4.1.1/" #[prost(string, tag = "14")] pub zcashd_subversion: ::prost::alloc::string::String, + /// Zcash donation UA address + #[prost(string, tag = "15")] + pub donation_address: ::prost::alloc::string::String, + /// name of next pending network upgrade, empty if none scheduled + #[prost(string, tag = "16")] + pub upgrade_name: ::prost::alloc::string::String, + /// height of next pending upgrade, zero if none is scheduled + #[prost(uint64, tag = "17")] + pub upgrade_height: u64, + /// version of served by this server + #[prost(string, tag = "18")] + pub lightwallet_protocol_version: ::prost::alloc::string::String, } -/// TransparentAddressBlockFilter restricts the results to the given address -/// or block range. -#[allow(clippy::derive_partial_eq_without_eq)] +/// TransparentAddressBlockFilter restricts the results of the GRPC methods that +/// use it to the transactions that involve the given address and were mined in +/// the specified block range. Non-default values for both the address and the +/// block range must be specified. Mempool transactions are not included. +/// +/// The `poolTypes` field of the `range` argument should be ignored. +/// Implementations MAY consider it an error if any pool types are specified. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransparentAddressBlockFilter { /// t-address #[prost(string, tag = "1")] pub address: ::prost::alloc::string::String, - /// start, end heights + /// start, end heights only #[prost(message, optional, tag = "2")] pub range: ::core::option::Option, } /// Duration is currently used only for testing, so that the Ping rpc /// can simulate a delay, to create many simultaneous connections. Units /// are microseconds. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Duration { #[prost(int64, tag = "1")] pub interval_us: i64, @@ -132,40 +196,47 @@ pub struct Duration { /// PingResponse is used to indicate concurrency, how many Ping rpcs /// are executing upon entry and upon exit (after the delay). /// This rpc is used for testing only. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct PingResponse { #[prost(int64, tag = "1")] pub entry: i64, #[prost(int64, tag = "2")] pub exit: i64, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Address { #[prost(string, tag = "1")] pub address: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AddressList { #[prost(string, repeated, tag = "1")] pub addresses: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Balance { #[prost(int64, tag = "1")] pub value_zat: i64, } -#[allow(clippy::derive_partial_eq_without_eq)] +/// Request parameters for the `GetMempoolTx` RPC. #[derive(Clone, PartialEq, ::prost::Message)] -pub struct Exclude { +pub struct GetMempoolTxRequest { + /// A list of transaction ID byte string suffixes that should be excluded + /// from the response. These suffixes may be produced either directly from + /// the underlying txid bytes, or, if the source values are encoded txid + /// strings, by truncating the hexadecimal representation of each + /// transaction ID to an even number of characters, and then hex-decoding + /// and then byte-reversing this value to obtain the byte representation. #[prost(bytes = "vec", repeated, tag = "1")] - pub txid: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + pub exclude_txid_suffixes: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// The server must prune `CompactTx`s returned to include only data + /// relevant to the requested pool types. If no pool types are specified, + /// the server should default to the legacy behavior of returning only data + /// relevant to the shielded (Sapling and Orchard) pools. + #[prost(enumeration = "PoolType", repeated, tag = "3")] + pub pool_types: ::prost::alloc::vec::Vec, } /// The TreeState is derived from the Zcash z_gettreestate rpc. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TreeState { /// "main" or "test" @@ -187,8 +258,7 @@ pub struct TreeState { #[prost(string, tag = "6")] pub orchard_tree: ::prost::alloc::string::String, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct GetSubtreeRootsArg { /// Index identifying where to start returning subtree roots #[prost(uint32, tag = "1")] @@ -200,7 +270,6 @@ pub struct GetSubtreeRootsArg { #[prost(uint32, tag = "3")] pub max_entries: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SubtreeRoot { /// The 32-byte Merkle root of the subtree. @@ -215,7 +284,6 @@ pub struct SubtreeRoot { } /// Results are sorted by height, which makes it easy to issue another /// request that picks up from where the previous left off. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetAddressUtxosArg { #[prost(string, repeated, tag = "1")] @@ -226,7 +294,6 @@ pub struct GetAddressUtxosArg { #[prost(uint32, tag = "3")] pub max_entries: u32, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetAddressUtxosReply { #[prost(string, tag = "6")] @@ -242,12 +309,44 @@ pub struct GetAddressUtxosReply { #[prost(uint64, tag = "5")] pub height: u64, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetAddressUtxosReplyList { #[prost(message, repeated, tag = "1")] pub address_utxos: ::prost::alloc::vec::Vec, } +/// An identifier for a Zcash value pool. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum PoolType { + Invalid = 0, + Transparent = 1, + Sapling = 2, + Orchard = 3, +} +impl PoolType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Invalid => "POOL_TYPE_INVALID", + Self::Transparent => "TRANSPARENT", + Self::Sapling => "SAPLING", + Self::Orchard => "ORCHARD", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "POOL_TYPE_INVALID" => Some(Self::Invalid), + "TRANSPARENT" => Some(Self::Transparent), + "SAPLING" => Some(Self::Sapling), + "ORCHARD" => Some(Self::Orchard), + _ => None, + } + } +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum ShieldedProtocol { @@ -261,8 +360,8 @@ impl ShieldedProtocol { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - ShieldedProtocol::Sapling => "sapling", - ShieldedProtocol::Orchard => "orchard", + Self::Sapling => "sapling", + Self::Orchard => "orchard", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -276,9 +375,15 @@ impl ShieldedProtocol { } /// Generated client implementations. pub mod compact_tx_streamer_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::http::Uri; + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct CompactTxStreamerClient { inner: tonic::client::Grpc, @@ -298,8 +403,8 @@ pub mod compact_tx_streamer_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -322,8 +427,9 @@ pub mod compact_tx_streamer_client { >::ResponseBody, >, >, - >>::Error: - Into + Send + Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { CompactTxStreamerClient::new(InterceptedService::new(inner, interceptor)) } @@ -358,26 +464,31 @@ pub mod compact_tx_streamer_client { self.inner = self.inner.max_encoding_message_size(limit); self } - /// Return the height of the tip of the best chain + /// Return the BlockID of the block at the tip of the best chain pub async fn get_latest_block( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestBlock", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestBlock", + ), + ); self.inner.unary(req, path, codec).await } /// Return the compact block corresponding to the given block identifier @@ -388,24 +499,33 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlock", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlock", + ), + ); self.inner.unary(req, path, codec).await } - /// Same as GetBlock except actions contain only nullifiers + /// Same as GetBlock except the returned CompactBlock value contains only + /// nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. pub async fn get_block_nullifiers( &mut self, request: impl tonic::IntoRequest, @@ -413,71 +533,98 @@ pub mod compact_tx_streamer_client { tonic::Response, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockNullifiers", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockNullifiers", + ), + ); self.inner.unary(req, path, codec).await } - /// Return a list of consecutive compact blocks + /// Return a list of consecutive compact blocks in the specified range, + /// which is inclusive of `range.end`. + /// + /// If range.start <= range.end, blocks are returned increasing height order; + /// otherwise blocks are returned in decreasing height order. pub async fn get_block_range( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRange", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRange", + ), + ); self.inner.server_streaming(req, path, codec).await } - /// Same as GetBlockRange except actions contain only nullifiers + /// Same as GetBlockRange except the returned CompactBlock values contain + /// only nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. pub async fn get_block_range_nullifiers( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetBlockRangeNullifiers", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetBlockRangeNullifiers", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return the requested full (not compact) transaction (as from zcashd) @@ -485,21 +632,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTransaction", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTransaction", + ), + ); self.inner.unary(req, path, codec).await } /// Submit the given transaction to the Zcash network @@ -507,24 +659,32 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "SendTransaction", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "SendTransaction", + ), + ); self.inner.unary(req, path, codec).await } - /// Return the txids corresponding to the given t-address within the given block range + /// Return RawTransactions that match the given transparent address filter. + /// + /// Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. + /// NOTE: this method is deprecated, please use GetTaddressTransactions instead. pub async fn get_taddress_txids( &mut self, request: impl tonic::IntoRequest, @@ -532,96 +692,152 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressTxids", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTxids", + ), + ); + self.inner.server_streaming(req, path, codec).await + } + /// Return the transactions corresponding to the given t-address within the given block range. + /// Mempool transactions are not included in the results. + pub async fn get_taddress_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressTransactions", + ), + ); self.inner.server_streaming(req, path, codec).await } pub async fn get_taddress_balance( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalance", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalance", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_taddress_balance_stream( &mut self, request: impl tonic::IntoStreamingRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream", ); let mut req = request.into_streaming_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTaddressBalanceStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTaddressBalanceStream", + ), + ); self.inner.client_streaming(req, path, codec).await } - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. pub async fn get_mempool_tx( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response>, + tonic::Response< + tonic::codec::Streaming, + >, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolTx", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolTx", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return a stream of current Mempool transactions. This will keep the output stream open while @@ -633,21 +849,26 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetMempoolStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetMempoolStream", + ), + ); self.inner.server_streaming(req, path, codec).await } /// GetTreeState returns the note commitment tree state corresponding to the given block. @@ -658,46 +879,56 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetTreeState", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetTreeState", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_latest_tree_state( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLatestTreeState", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLatestTreeState", + ), + ); self.inner.unary(req, path, codec).await } - /// Returns a stream of information about roots of subtrees of the Sapling and Orchard - /// note commitment trees. + /// Returns a stream of information about roots of subtrees of the note commitment tree + /// for the specified shielded protocol (Sapling or Orchard). pub async fn get_subtree_roots( &mut self, request: impl tonic::IntoRequest, @@ -705,43 +936,55 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetSubtreeRoots", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetSubtreeRoots", + ), + ); self.inner.server_streaming(req, path, codec).await } pub async fn get_address_utxos( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxos", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxos", + ), + ); self.inner.unary(req, path, codec).await } pub async fn get_address_utxos_stream( @@ -751,21 +994,26 @@ pub mod compact_tx_streamer_client { tonic::Response>, tonic::Status, > { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetAddressUtxosStream", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetAddressUtxosStream", + ), + ); self.inner.server_streaming(req, path, codec).await } /// Return information about this lightwalletd instance and the blockchain @@ -773,21 +1021,26 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "GetLightdInfo", - )); + req.extensions_mut() + .insert( + GrpcMethod::new( + "cash.z.wallet.sdk.rpc.CompactTxStreamer", + "GetLightdInfo", + ), + ); self.inner.unary(req, path, codec).await } /// Testing-only, requires lightwalletd --ping-very-insecure (do not enable in production) @@ -795,33 +1048,41 @@ pub mod compact_tx_streamer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new( - "cash.z.wallet.sdk.rpc.CompactTxStreamer", - "Ping", - )); + req.extensions_mut() + .insert( + GrpcMethod::new("cash.z.wallet.sdk.rpc.CompactTxStreamer", "Ping"), + ); self.inner.unary(req, path, codec).await } } } /// Generated server implementations. pub mod compact_tx_streamer_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with CompactTxStreamerServer. #[async_trait] - pub trait CompactTxStreamer: Send + Sync + 'static { - /// Return the height of the tip of the best chain + pub trait CompactTxStreamer: std::marker::Send + std::marker::Sync + 'static { + /// Return the BlockID of the block at the tip of the best chain async fn get_latest_block( &self, request: tonic::Request, @@ -834,7 +1095,11 @@ pub mod compact_tx_streamer_server { tonic::Response, tonic::Status, >; - /// Same as GetBlock except actions contain only nullifiers + /// Same as GetBlock except the returned CompactBlock value contains only + /// nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. async fn get_block_nullifiers( &self, request: tonic::Request, @@ -848,26 +1113,42 @@ pub mod compact_tx_streamer_server { crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > + Send + > + + std::marker::Send + 'static; - /// Return a list of consecutive compact blocks + /// Return a list of consecutive compact blocks in the specified range, + /// which is inclusive of `range.end`. + /// + /// If range.start <= range.end, blocks are returned increasing height order; + /// otherwise blocks are returned in decreasing height order. async fn get_block_range( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetBlockRangeNullifiers method. type GetBlockRangeNullifiersStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result< crate::proto::compact_formats::CompactBlock, tonic::Status, >, - > + Send + > + + std::marker::Send + 'static; - /// Same as GetBlockRange except actions contain only nullifiers + /// Same as GetBlockRange except the returned CompactBlock values contain + /// only nullifiers. + /// + /// Note: this method is deprecated. Implementations should ignore any + /// `PoolType::TRANSPARENT` member of the `poolTypes` argument. async fn get_block_range_nullifiers( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Return the requested full (not compact) transaction (as from zcashd) async fn get_transaction( &self, @@ -881,13 +1162,35 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetTaddressTxids method. type GetTaddressTxidsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + Send + > + + std::marker::Send + 'static; - /// Return the txids corresponding to the given t-address within the given block range + /// Return RawTransactions that match the given transparent address filter. + /// + /// Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. + /// NOTE: this method is deprecated, please use GetTaddressTransactions instead. async fn get_taddress_txids( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Server streaming response type for the GetTaddressTransactions method. + type GetTaddressTransactionsStream: tonic::codegen::tokio_stream::Stream< + Item = std::result::Result, + > + + std::marker::Send + + 'static; + /// Return the transactions corresponding to the given t-address within the given block range. + /// Mempool transactions are not included in the results. + async fn get_taddress_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_taddress_balance( &self, request: tonic::Request, @@ -898,33 +1201,47 @@ pub mod compact_tx_streamer_server { ) -> std::result::Result, tonic::Status>; /// Server streaming response type for the GetMempoolTx method. type GetMempoolTxStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > + Send + Item = std::result::Result< + crate::proto::compact_formats::CompactTx, + tonic::Status, + >, + > + + std::marker::Send + 'static; - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. async fn get_mempool_tx( &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetMempoolStream method. type GetMempoolStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + Send + > + + std::marker::Send + 'static; /// Return a stream of current Mempool transactions. This will keep the output stream open while /// there are mempool transactions. It will close the returned stream when a new block is mined. async fn get_mempool_stream( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// GetTreeState returns the note commitment tree state corresponding to the given block. /// See section 3.7 of the Zcash protocol specification. It returns several other useful /// values also (even though they can be obtained using GetBlock). @@ -940,27 +1257,38 @@ pub mod compact_tx_streamer_server { /// Server streaming response type for the GetSubtreeRoots method. type GetSubtreeRootsStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + Send + > + + std::marker::Send + 'static; - /// Returns a stream of information about roots of subtrees of the Sapling and Orchard - /// note commitment trees. + /// Returns a stream of information about roots of subtrees of the note commitment tree + /// for the specified shielded protocol (Sapling or Orchard). async fn get_subtree_roots( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn get_address_utxos( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the GetAddressUtxosStream method. type GetAddressUtxosStreamStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, - > + Send + > + + std::marker::Send + 'static; async fn get_address_utxos_stream( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Return information about this lightwalletd instance and the blockchain async fn get_lightd_info( &self, @@ -973,20 +1301,18 @@ pub mod compact_tx_streamer_server { ) -> std::result::Result, tonic::Status>; } #[derive(Debug)] - pub struct CompactTxStreamerServer { - inner: _Inner, + pub struct CompactTxStreamerServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl CompactTxStreamerServer { + impl CompactTxStreamerServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -995,7 +1321,10 @@ pub mod compact_tx_streamer_server { max_encoding_message_size: None, } } - pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService where F: tonic::service::Interceptor, { @@ -1033,8 +1362,8 @@ pub mod compact_tx_streamer_server { impl tonic::codegen::Service> for CompactTxStreamerServer where T: CompactTxStreamer, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { type Response = http::Response; type Error = std::convert::Infallible; @@ -1046,21 +1375,27 @@ pub mod compact_tx_streamer_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock" => { #[allow(non_camel_case_types)] struct GetLatestBlockSvc(pub Arc); - impl tonic::server::UnaryService for GetLatestBlockSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetLatestBlockSvc { type Response = super::BlockId; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_block(&inner, request).await + ::get_latest_block(&inner, request) + .await }; Box::pin(fut) } @@ -1071,7 +1406,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetLatestBlockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1091,9 +1425,14 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock" => { #[allow(non_camel_case_types)] struct GetBlockSvc(pub Arc); - impl tonic::server::UnaryService for GetBlockSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService for GetBlockSvc { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -1111,7 +1450,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1131,18 +1469,25 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockNullifiersSvc(pub Arc); - impl tonic::server::UnaryService - for GetBlockNullifiersSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetBlockNullifiersSvc { type Response = crate::proto::compact_formats::CompactBlock; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_nullifiers(&inner, request) + ::get_block_nullifiers( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1154,7 +1499,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockNullifiersSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1174,21 +1518,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRange" => { #[allow(non_camel_case_types)] struct GetBlockRangeSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetBlockRangeSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetBlockRangeSvc { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_block_range(&inner, request).await + ::get_block_range(&inner, request) + .await }; Box::pin(fut) } @@ -1199,7 +1546,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockRangeSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1219,14 +1565,16 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlockRangeNullifiers" => { #[allow(non_camel_case_types)] struct GetBlockRangeNullifiersSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetBlockRangeNullifiersSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetBlockRangeNullifiersSvc { type Response = crate::proto::compact_formats::CompactBlock; type ResponseStream = T::GetBlockRangeNullifiersStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -1234,9 +1582,10 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_block_range_nullifiers( - &inner, request, - ) - .await + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1247,7 +1596,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockRangeNullifiersSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1267,16 +1615,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTransaction" => { #[allow(non_camel_case_types)] struct GetTransactionSvc(pub Arc); - impl tonic::server::UnaryService for GetTransactionSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTransactionSvc { type Response = super::RawTransaction; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_transaction(&inner, request).await + ::get_transaction(&inner, request) + .await }; Box::pin(fut) } @@ -1287,7 +1642,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTransactionSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1307,18 +1661,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/SendTransaction" => { #[allow(non_camel_case_types)] struct SendTransactionSvc(pub Arc); - impl tonic::server::UnaryService - for SendTransactionSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for SendTransactionSvc { type Response = super::SendResponse; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::send_transaction(&inner, request).await + ::send_transaction(&inner, request) + .await }; Box::pin(fut) } @@ -1329,7 +1688,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SendTransactionSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1349,21 +1707,28 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTxids" => { #[allow(non_camel_case_types)] struct GetTaddressTxidsSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetTaddressTxidsSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService< + super::TransparentAddressBlockFilter, + > for GetTaddressTxidsSvc { type Response = super::RawTransaction; type ResponseStream = T::GetTaddressTxidsStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_txids(&inner, request).await + ::get_taddress_txids( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1374,7 +1739,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTaddressTxidsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1391,21 +1755,79 @@ pub mod compact_tx_streamer_server { }; Box::pin(fut) } + "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressTransactions" => { + #[allow(non_camel_case_types)] + struct GetTaddressTransactionsSvc(pub Arc); + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService< + super::TransparentAddressBlockFilter, + > for GetTaddressTransactionsSvc { + type Response = super::RawTransaction; + type ResponseStream = T::GetTaddressTransactionsStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_taddress_transactions( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTaddressTransactionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalance" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceSvc(pub Arc); - impl tonic::server::UnaryService - for GetTaddressBalanceSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTaddressBalanceSvc { type Response = super::Balance; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_taddress_balance(&inner, request) + ::get_taddress_balance( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1417,7 +1839,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTaddressBalanceSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1437,11 +1858,15 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTaddressBalanceStream" => { #[allow(non_camel_case_types)] struct GetTaddressBalanceStreamSvc(pub Arc); - impl tonic::server::ClientStreamingService - for GetTaddressBalanceStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ClientStreamingService + for GetTaddressBalanceStreamSvc { type Response = super::Balance; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request>, @@ -1449,9 +1874,10 @@ pub mod compact_tx_streamer_server { let inner = Arc::clone(&self.0); let fut = async move { ::get_taddress_balance_stream( - &inner, request, - ) - .await + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1462,7 +1888,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTaddressBalanceStreamSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1482,20 +1907,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx" => { #[allow(non_camel_case_types)] struct GetMempoolTxSvc(pub Arc); - impl tonic::server::ServerStreamingService - for GetMempoolTxSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetMempoolTxSvc { type Response = crate::proto::compact_formats::CompactTx; type ResponseStream = T::GetMempoolTxStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_tx(&inner, request).await + ::get_mempool_tx(&inner, request) + .await }; Box::pin(fut) } @@ -1506,7 +1935,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetMempoolTxSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1526,17 +1954,27 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolStream" => { #[allow(non_camel_case_types)] struct GetMempoolStreamSvc(pub Arc); - impl tonic::server::ServerStreamingService - for GetMempoolStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetMempoolStreamSvc { type Response = super::RawTransaction; type ResponseStream = T::GetMempoolStreamStream; - type Future = - BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_mempool_stream(&inner, request).await + ::get_mempool_stream( + &inner, + request, + ) + .await }; Box::pin(fut) } @@ -1547,7 +1985,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetMempoolStreamSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1567,16 +2004,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetTreeState" => { #[allow(non_camel_case_types)] struct GetTreeStateSvc(pub Arc); - impl tonic::server::UnaryService for GetTreeStateSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_tree_state(&inner, request).await + ::get_tree_state(&inner, request) + .await }; Box::pin(fut) } @@ -1587,7 +2031,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTreeStateSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1607,13 +2050,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestTreeState" => { #[allow(non_camel_case_types)] struct GetLatestTreeStateSvc(pub Arc); - impl tonic::server::UnaryService for GetLatestTreeStateSvc { + impl tonic::server::UnaryService + for GetLatestTreeStateSvc { type Response = super::TreeState; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_latest_tree_state(&inner, request) + ::get_latest_tree_state( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1625,7 +2078,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetLatestTreeStateSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1645,21 +2097,24 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetSubtreeRoots" => { #[allow(non_camel_case_types)] struct GetSubtreeRootsSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetSubtreeRootsSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetSubtreeRootsSvc { type Response = super::SubtreeRoot; type ResponseStream = T::GetSubtreeRootsStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_subtree_roots(&inner, request).await + ::get_subtree_roots(&inner, request) + .await }; Box::pin(fut) } @@ -1670,7 +2125,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetSubtreeRootsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1690,19 +2144,23 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxos" => { #[allow(non_camel_case_types)] struct GetAddressUtxosSvc(pub Arc); - impl - tonic::server::UnaryService - for GetAddressUtxosSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService + for GetAddressUtxosSvc { type Response = super::GetAddressUtxosReplyList; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos(&inner, request).await + ::get_address_utxos(&inner, request) + .await }; Box::pin(fut) } @@ -1713,7 +2171,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetAddressUtxosSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1733,21 +2190,26 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetAddressUtxosStream" => { #[allow(non_camel_case_types)] struct GetAddressUtxosStreamSvc(pub Arc); - impl - tonic::server::ServerStreamingService - for GetAddressUtxosStreamSvc - { + impl< + T: CompactTxStreamer, + > tonic::server::ServerStreamingService + for GetAddressUtxosStreamSvc { type Response = super::GetAddressUtxosReply; type ResponseStream = T::GetAddressUtxosStreamStream; - type Future = - BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_address_utxos_stream(&inner, request) + ::get_address_utxos_stream( + &inner, + request, + ) .await }; Box::pin(fut) @@ -1759,7 +2221,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetAddressUtxosStreamSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1779,13 +2240,21 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLightdInfo" => { #[allow(non_camel_case_types)] struct GetLightdInfoSvc(pub Arc); - impl tonic::server::UnaryService for GetLightdInfoSvc { + impl tonic::server::UnaryService + for GetLightdInfoSvc { type Response = super::LightdInfo; - type Future = BoxFuture, tonic::Status>; - fn call(&mut self, request: tonic::Request) -> Self::Future { + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_lightd_info(&inner, request).await + ::get_lightd_info(&inner, request) + .await }; Box::pin(fut) } @@ -1796,7 +2265,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetLightdInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1816,9 +2284,14 @@ pub mod compact_tx_streamer_server { "/cash.z.wallet.sdk.rpc.CompactTxStreamer/Ping" => { #[allow(non_camel_case_types)] struct PingSvc(pub Arc); - impl tonic::server::UnaryService for PingSvc { + impl< + T: CompactTxStreamer, + > tonic::server::UnaryService for PingSvc { type Response = super::PingResponse; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -1836,7 +2309,6 @@ pub mod compact_tx_streamer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = PingSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1853,18 +2325,27 @@ pub mod compact_tx_streamer_server { }; Box::pin(fut) } - _ => Box::pin(async move { - Ok(http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap()) - }), + _ => { + Box::pin(async move { + let mut response = http::Response::new(empty_body()); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } } } } - impl Clone for CompactTxStreamerServer { + impl Clone for CompactTxStreamerServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -1876,17 +2357,9 @@ pub mod compact_tx_streamer_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for CompactTxStreamerServer { - const NAME: &'static str = "cash.z.wallet.sdk.rpc.CompactTxStreamer"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "cash.z.wallet.sdk.rpc.CompactTxStreamer"; + impl tonic::server::NamedService for CompactTxStreamerServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/zaino-proto/src/proto/utils.rs b/zaino-proto/src/proto/utils.rs new file mode 100644 index 000000000..4eaf60f87 --- /dev/null +++ b/zaino-proto/src/proto/utils.rs @@ -0,0 +1,398 @@ +use crate::proto::{ + compact_formats::{ChainMetadata, CompactBlock, CompactOrchardAction}, + service::{BlockId, BlockRange, PoolType}, +}; +use zebra_chain::block::Height; +use zebra_state::HashOrHeight; + +#[derive(Debug, PartialEq, Eq)] +/// Errors that can arise when mapping `PoolType` from an `i32` value. +pub enum PoolTypeError { + /// Pool Type value was map to the enum `PoolType::Invalid`. + InvalidPoolType, + /// Pool Type value was mapped to value that can't be mapped to a known pool type. + UnknownPoolType(i32), +} + +/// Converts a vector of pool_types (i32) into its rich-type representation +/// Returns `PoolTypeError::InvalidPoolType` when invalid `pool_types` are found +/// or `PoolTypeError::UnknownPoolType` if unknown ones are found. +pub fn pool_types_from_vector(pool_types: &[i32]) -> Result, PoolTypeError> { + let pools = if pool_types.is_empty() { + vec![PoolType::Sapling, PoolType::Orchard] + } else { + let mut pools: Vec = vec![]; + + for pool in pool_types.iter() { + match PoolType::try_from(*pool) { + Ok(pool_type) => { + if pool_type == PoolType::Invalid { + return Err(PoolTypeError::InvalidPoolType); + } else { + pools.push(pool_type); + } + } + Err(_) => { + return Err(PoolTypeError::UnknownPoolType(*pool)); + } + }; + } + + pools.clone() + }; + Ok(pools) +} + +/// Converts a `Vec` into a `Vec` +pub fn pool_types_into_i32_vec(pool_types: Vec) -> Vec { + pool_types.iter().map(|p| *p as i32).collect() +} + +/// Errors that can be present in the request of the GetBlockRange RPC +pub enum GetBlockRangeError { + /// Error: No start height given. + NoStartHeightProvided, + /// Error: No end height given. + NoEndHeightProvided, + /// Start height out of range. Failed to convert to u32. + StartHeightOutOfRange, + + /// End height out of range. Failed to convert to u32. + EndHeightOutOfRange, + /// An invalid pool type request was provided. + PoolTypeArgumentError(PoolTypeError), +} + +/// `BlockRange` request that has been validated in terms of the semantics +/// of `GetBlockRange` RPC. +/// +/// # Guarantees +/// +/// - `start` and `end` were provided in the request. +/// - `start` and `end` are in the inclusive range `0..=u32::MAX`, so they can be +/// safely converted to `u32` (for example via `u32::try_from(...)`) without +/// failing. +/// - `pool_types` has been validated via `pool_types_from_vector`. +pub struct ValidatedBlockRangeRequest { + start: u64, + end: u64, + pool_types: Vec, +} + +impl ValidatedBlockRangeRequest { + /// Validates a `BlockRange` in terms of the `GetBlockRange` RPC. + /// + /// # Errors + /// + /// Returns: + /// - [`GetBlockRangeError::NoStartHeightProvided`] if `request.start` is `None`. + /// - [`GetBlockRangeError::NoEndHeightProvided`] if `request.end` is `None`. + /// - [`GetBlockRangeError::StartHeightOutOfRange`] if `start` does not fit in a `u32`. + /// - [`GetBlockRangeError::EndHeightOutOfRange`] if `end` does not fit in a `u32`. + /// - [`GetBlockRangeError::PoolTypeArgumentError`] if pool types are invalid. + pub fn new_from_block_range( + request: &BlockRange, + ) -> Result { + let start = match &request.start { + Some(block_id) => block_id.height, + None => { + return Err(GetBlockRangeError::NoStartHeightProvided); + } + }; + let end = match &request.end { + Some(block_id) => block_id.height, + None => { + return Err(GetBlockRangeError::NoEndHeightProvided); + } + }; + + if u32::try_from(start).is_err() { + return Err(GetBlockRangeError::StartHeightOutOfRange); + } + if u32::try_from(end).is_err() { + return Err(GetBlockRangeError::EndHeightOutOfRange); + } + + let pool_types = pool_types_from_vector(&request.pool_types) + .map_err(GetBlockRangeError::PoolTypeArgumentError)?; + + Ok(ValidatedBlockRangeRequest { + start, + end, + pool_types, + }) + } + + /// Start Height of the BlockRange Request + pub fn start(&self) -> u64 { + self.start + } + + /// End Height of the BlockRange Request + pub fn end(&self) -> u64 { + self.end + } + + /// Pool Types of the BlockRange request + pub fn pool_types(&self) -> Vec { + self.pool_types.clone() + } + + /// checks whether this request is specified in reversed order + pub fn is_reverse_ordered(&self) -> bool { + self.start > self.end + } + + /// Reverses the order of this request + pub fn reverse(&mut self) { + (self.start, self.end) = (self.end, self.start); + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct PoolTypeFilter { + include_transparent: bool, + include_sapling: bool, + include_orchard: bool, +} + +impl std::default::Default for PoolTypeFilter { + /// By default PoolType includes `Sapling` and `Orchard` pools. + fn default() -> Self { + PoolTypeFilter { + include_transparent: false, + include_sapling: true, + include_orchard: true, + } + } +} + +impl PoolTypeFilter { + /// A PoolType Filter that will include all existing pool types. + pub fn includes_all() -> Self { + PoolTypeFilter { + include_transparent: true, + include_sapling: true, + include_orchard: true, + } + } + + /// create a `PoolTypeFilter` from a vector of raw i32 `PoolType`s + /// If the vector is empty it will return `Self::default()`. + /// If the vector contains `PoolType::Invalid` or the vector contains more than 3 elements + /// returns `PoolTypeError::InvalidPoolType` + pub fn new_from_slice(pool_types: &[i32]) -> Result { + let pool_types = pool_types_from_vector(pool_types)?; + + Self::new_from_pool_types(&pool_types) + } + + /// create a `PoolTypeFilter` from a vector of `PoolType` + /// If the vector is empty it will return `Self::default()`. + /// If the vector contains `PoolType::Invalid` or the vector contains more than 3 elements + /// returns `PoolTypeError::InvalidPoolType` + pub fn new_from_pool_types( + pool_types: &Vec, + ) -> Result { + if pool_types.len() > PoolType::Orchard as usize { + return Err(PoolTypeError::InvalidPoolType); + } + + if pool_types.is_empty() { + Ok(Self::default()) + } else { + let mut filter = PoolTypeFilter::empty(); + + for pool_type in pool_types { + match pool_type { + PoolType::Invalid => return Err(PoolTypeError::InvalidPoolType), + PoolType::Transparent => filter.include_transparent = true, + PoolType::Sapling => filter.include_sapling = true, + PoolType::Orchard => filter.include_orchard = true, + } + } + + // guard against returning an invalid state this shouls never happen. + if filter.is_empty() { + Ok(Self::default()) + } else { + Ok(filter) + } + } + } + + /// only internal use. this in an invalid state. + fn empty() -> Self { + Self { + include_transparent: false, + include_sapling: false, + include_orchard: false, + } + } + + /// only internal use + fn is_empty(&self) -> bool { + !self.include_transparent && !self.include_sapling && !self.include_orchard + } + + /// retuns whether the filter includes transparent data + pub fn includes_transparent(&self) -> bool { + self.include_transparent + } + + /// returns whether the filter includes orchard data + pub fn includes_sapling(&self) -> bool { + self.include_sapling + } + + // returnw whether the filter includes orchard data + pub fn includes_orchard(&self) -> bool { + self.include_orchard + } + + /// Convert this filter into the corresponding `Vec`. + /// + /// The resulting vector contains each included pool type at most once. + pub fn to_pool_types_vector(&self) -> Vec { + let mut pool_types: Vec = Vec::new(); + + if self.include_transparent { + pool_types.push(PoolType::Transparent); + } + + if self.include_sapling { + pool_types.push(PoolType::Sapling); + } + + if self.include_orchard { + pool_types.push(PoolType::Orchard); + } + + pool_types + } + + /// testing only + #[allow(dead_code)] + pub(crate) fn from_checked_parts( + include_transparent: bool, + include_sapling: bool, + include_orchard: bool, + ) -> Self { + PoolTypeFilter { + include_transparent, + include_sapling, + include_orchard, + } + } +} + +/// Converts [`BlockId`] into [`HashOrHeight`] Zebra type +pub fn blockid_to_hashorheight(block_id: BlockId) -> Option { + <[u8; 32]>::try_from(block_id.hash) + .map(zebra_chain::block::Hash) + .map(HashOrHeight::from) + .or_else(|_| { + block_id + .height + .try_into() + .map(|height| HashOrHeight::Height(Height(height))) + }) + .ok() +} + +/// Strips the ouputs and from all transactions, retains only +/// the nullifier from all orcard actions, and clears the chain +/// metadata from the block +pub fn compact_block_to_nullifiers(mut block: CompactBlock) -> CompactBlock { + for ctransaction in &mut block.vtx { + ctransaction.outputs = Vec::new(); + for caction in &mut ctransaction.actions { + *caction = CompactOrchardAction { + nullifier: caction.nullifier.clone(), + ..Default::default() + } + } + } + + block.chain_metadata = Some(ChainMetadata { + sapling_commitment_tree_size: 0, + orchard_commitment_tree_size: 0, + }); + block +} + +#[cfg(test)] +mod test { + use crate::proto::{ + service::PoolType, + utils::{PoolTypeError, PoolTypeFilter}, + }; + + #[test] + fn test_pool_type_filter_fails_when_invalid() { + let pools = [ + PoolType::Transparent, + PoolType::Sapling, + PoolType::Orchard, + PoolType::Invalid, + ] + .to_vec(); + + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Err(PoolTypeError::InvalidPoolType) + ); + } + + #[test] + fn test_pool_type_filter_fails_when_too_many_items() { + let pools = [ + PoolType::Transparent, + PoolType::Sapling, + PoolType::Orchard, + PoolType::Orchard, + ] + .to_vec(); + + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Err(PoolTypeError::InvalidPoolType) + ); + } + + #[test] + fn test_pool_type_filter_t_z_o() { + let pools = [PoolType::Transparent, PoolType::Sapling, PoolType::Orchard].to_vec(); + + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Ok(PoolTypeFilter::from_checked_parts(true, true, true)) + ); + } + + #[test] + fn test_pool_type_filter_t() { + let pools = [PoolType::Transparent].to_vec(); + + assert_eq!( + PoolTypeFilter::new_from_pool_types(&pools), + Ok(PoolTypeFilter::from_checked_parts(true, false, false)) + ); + } + + #[test] + fn test_pool_type_filter_default() { + assert_eq!( + PoolTypeFilter::new_from_pool_types(&vec![]), + Ok(PoolTypeFilter::default()) + ); + } + + #[test] + fn test_pool_type_filter_includes_all() { + assert_eq!( + PoolTypeFilter::from_checked_parts(true, true, true), + PoolTypeFilter::includes_all() + ); + } +} diff --git a/zaino-serve/src/rpc/grpc/service.rs b/zaino-serve/src/rpc/grpc/service.rs index 6e34e78dd..cf723095b 100644 --- a/zaino-serve/src/rpc/grpc/service.rs +++ b/zaino-serve/src/rpc/grpc/service.rs @@ -8,8 +8,8 @@ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ compact_tx_streamer_server::CompactTxStreamer, Address, AddressList, Balance, BlockId, - BlockRange, ChainSpec, Duration, Empty, Exclude, GetAddressUtxosArg, - GetAddressUtxosReplyList, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, + BlockRange, ChainSpec, Duration, Empty, GetAddressUtxosArg, GetAddressUtxosReplyList, + GetMempoolTxRequest, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, }; @@ -142,20 +142,26 @@ where get_transaction(TxFilter) -> RawTransaction, "submit the given transaction to the zcash network." send_transaction(RawTransaction) -> SendResponse, + "Return the transactions corresponding to the given t-address within the given block range" + get_taddress_transactions(TransparentAddressBlockFilter) -> Self::GetTaddressTransactionsStream as streaming, "This name is misleading, returns the full transactions that have either inputs or outputs connected to the given transparent address." get_taddress_txids(TransparentAddressBlockFilter) -> Self::GetTaddressTxidsStream as streaming, "Returns the total balance for a list of taddrs" get_taddress_balance(AddressList) -> Balance, - "Return the compact transactions currently in the mempool; the results \ - can be a few seconds out of date. If the Exclude list is empty, return \ - all transactions; otherwise return all *except* those in the Exclude list \ - (if any); this allows the client to avoid receiving transactions that it \ - already has (from an earlier call to this rpc). The transaction IDs in the \ - Exclude list can be shortened to any number of bytes to make the request \ - more bandwidth-efficient; if two or more transactions in the mempool \ - match a shortened txid, they are all sent (none is excluded). Transactions \ - in the exclude list that don't exist in the mempool are ignored." - get_mempool_tx(Exclude) -> Self::GetMempoolTxStream as streaming, + + "Returns a stream of the compact transaction representation for transactions \ + currently in the mempool. The results of this operation may be a few \ + seconds out of date. If the `exclude_txid_suffixes` list is empty, \ + return all transactions; otherwise return all *except* those in the \ + `exclude_txid_suffixes` list (if any); this allows the client to avoid \ + receiving transactions that it already has (from an earlier call to this \ + RPC). The transaction IDs in the `exclude_txid_suffixes` list can be \ + shortened to any number of bytes to make the request more \ + bandwidth-efficient; if two or more transactions in the mempool match a \ + txid suffix, none of the matching transactions are excluded. Txid \ + suffixes in the exclude list that don't match any transactions in the \ + mempool are ignored." + get_mempool_tx(GetMempoolTxRequest) -> Self::GetMempoolTxStream as streaming, "GetTreeState returns the note commitment tree state corresponding to the given block. \ See section 3.7 of the Zcash protocol specification. It returns several other useful \ values also (even though they can be obtained using GetBlock). @@ -199,6 +205,10 @@ where #[doc = " Server streaming response type for the GetBlockRangeNullifiers method."] type GetBlockRangeNullifiersStream = std::pin::Pin>; + /// Server streaming response type for the GetTaddressTransactions method. + #[doc = "Server streaming response type for the GetTaddressTransactions method."] + type GetTaddressTransactionsStream = std::pin::Pin>; + /// Server streaming response type for the GetTaddressTxids method. #[doc = "Server streaming response type for the GetTaddressTxids method."] type GetTaddressTxidsStream = std::pin::Pin>; diff --git a/zaino-serve/src/server/config.rs b/zaino-serve/src/server/config.rs index 03a1e244a..31661dd99 100644 --- a/zaino-serve/src/server/config.rs +++ b/zaino-serve/src/server/config.rs @@ -19,6 +19,7 @@ pub struct GrpcTls { #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] pub struct GrpcServerConfig { /// gRPC server bind addr. + #[serde(alias = "grpc_listen_address")] pub listen_address: SocketAddr, /// Enables TLS. pub tls: Option, diff --git a/zaino-state/CHANGELOG.md b/zaino-state/CHANGELOG.md new file mode 100644 index 000000000..009f5ec4c --- /dev/null +++ b/zaino-state/CHANGELOG.md @@ -0,0 +1,40 @@ +# Changelog +All notable changes to this library will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this library adheres to Rust's notion of +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added +- `rpc::grpc::service.rs`, `backends::fetch::get_taddress_transactions`: + - these functions implement the GetTaddressTransactions GRPC method of + lightclient-protocol v0.4.0 which replaces `GetTaddressTxids` +- `chain_index` + - `::finalised_state::db::v0::get_compact_block_stream` + - `::finalised_state::db::v1::get_compact_block_stream` + - `::types::db::legacy`: + - `compact_vin` + - `compact_vout` + - `to_compact`: returns a compactTx from TxInCompact +- `local_cache::compact_block_with_pool_types` +### Changed +- `get_mempool_tx` now takes `GetMempoolTxRequest` as parameter +- `chain_index::finalised_state` + - `::db` + - `::v0` + - `get_compact_block` now takes a `PoolTypeFilter` parameter + - `::v1` + - `get_compact_block` now takes a `PoolTypeFilter` parameter + - `::reader`: + - `get_compact_block` now takes a `PoolTypeFilter` parameter +- `chain_index::types::db::legacy`: + - `to_compact_block()`: now returns transparent data + +### Deprecated +- `GetTaddressTxids` is replaced by `GetTaddressTransactions` + +### Removed +- `Ping` for GRPC service +- `utils::blockid_to_hashorheight` moved to `zaino_proto::utils` diff --git a/zaino-state/Cargo.toml b/zaino-state/Cargo.toml index b3f3d332c..a6a360b91 100644 --- a/zaino-state/Cargo.toml +++ b/zaino-state/Cargo.toml @@ -66,6 +66,10 @@ sapling-crypto = "0.5.0" tempfile = { workspace = true } tracing-subscriber = { workspace = true } once_cell = { workspace = true } +zebra-chain = { workspace = true, features = ["proptest-impl"] } +proptest.workspace = true +incrementalmerkletree = "*" +rand = "0.8.5" [build-dependencies] whoami = { workspace = true } diff --git a/zaino-state/src/backends/fetch.rs b/zaino-state/src/backends/fetch.rs index ce0f1febe..969cb2fa0 100644 --- a/zaino-state/src/backends/fetch.rs +++ b/zaino-state/src/backends/fetch.rs @@ -42,12 +42,21 @@ use zaino_fetch::{ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - AddressList, Balance, BlockId, BlockRange, Duration, Exclude, GetAddressUtxosArg, - GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, RawTransaction, - SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, + AddressList, Balance, BlockId, BlockRange, Duration, GetAddressUtxosArg, + GetAddressUtxosReply, GetAddressUtxosReplyList, GetMempoolTxRequest, LightdInfo, + PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, + TxFilter, + }, + utils::{ + blockid_to_hashorheight, compact_block_to_nullifiers, GetBlockRangeError, PoolTypeFilter, + ValidatedBlockRangeRequest, }, }; +use crate::{ + chain_index::NonFinalizedSnapshot as _, ChainIndex, NodeBackedChainIndex, + NodeBackedChainIndexSubscriber, +}; #[allow(deprecated)] use crate::{ chain_index::{source::ValidatorConnector, types}, @@ -61,13 +70,9 @@ use crate::{ AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, UtxoReplyStream, }, - utils::{blockid_to_hashorheight, get_build_info, ServiceMetadata}, + utils::{get_build_info, ServiceMetadata}, BackendType, }; -use crate::{ - utils::compact_block_to_nullifiers, ChainIndex, NodeBackedChainIndex, - NodeBackedChainIndexSubscriber, -}; /// Chain fetch service backed by Zcashd's JsonRPC engine. /// @@ -112,7 +117,7 @@ impl ZcashService for FetchService { info!("Launching Chain Fetch Service.."); let fetcher = JsonRpSeeConnector::new_from_config_parts( - config.validator_rpc_address, + &config.validator_rpc_address, config.validator_rpc_user.clone(), config.validator_rpc_password.clone(), config.validator_cookie_path.clone(), @@ -735,7 +740,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { match self .indexer - .get_compact_block(&snapshot, types::Height(height)) + .get_compact_block(&snapshot, types::Height(height), PoolTypeFilter::default()) .await { Ok(Some(block)) => Ok(block), @@ -824,7 +829,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { }; match self .indexer - .get_compact_block(&snapshot, types::Height(height)) + .get_compact_block(&snapshot, types::Height(height), PoolTypeFilter::default()) .await { Ok(Some(block)) => Ok(compact_block_to_nullifiers(block)), @@ -889,131 +894,104 @@ impl LightWalletIndexer for FetchServiceSubscriber { &self, request: BlockRange, ) -> Result { - let mut start: u32 = match request.start { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: Start height out of range. Failed to convert to u32.", - ), - )); - } - }, - None => { - return Err(FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - )); - } - }; - let mut end: u32 = match request.end { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: End height out of range. Failed to convert to u32.", - ), - )); - } - }, - None => { - return Err(FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - )); - } - }; - let rev_order = if start > end { - (start, end) = (end, start); - true - } else { - false - }; + let validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + .map_err(FetchServiceError::from)?; + + let pool_type_filter = PoolTypeFilter::new_from_pool_types(&validated_request.pool_types()) + .map_err(GetBlockRangeError::PoolTypeArgumentError) + .map_err(FetchServiceError::from)?; + + // Note conversion here is safe due to the use of [`ValidatedBlockRangeRequest::new_from_block_range`] + let start = validated_request.start() as u32; + let end = validated_request.end() as u32; + let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + tokio::spawn(async move { - let timeout = timeout(time::Duration::from_secs((service_timeout*4) as u64), async { + let timeout_result = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { let snapshot = fetch_service_clone.indexer.snapshot_nonfinalized_state(); - let chain_height = snapshot.best_tip.height.0; - for height in start..=end { - let height = if rev_order { - end - (height - start) - } else { - height - }; - match fetch_service_clone.indexer.get_compact_block( - &snapshot, - types::Height(height), - ).await { - Ok(Some(block)) => { - if channel_tx.send(Ok(block)).await.is_err() { - break; - } + let chain_height = snapshot.best_chaintip().height.0; + + match fetch_service_clone + .indexer + .get_compact_block_stream( + &snapshot, + types::Height(start), + types::Height(end), + pool_type_filter.clone(), + ) + .await + { + Ok(Some(mut compact_block_stream)) => { + while let Some(stream_item) = compact_block_stream.next().await { + if channel_tx.send(stream_item).await.is_err() { + break; } - Ok(None) => if height >= chain_height { - match channel_tx - .send(Err(tonic::Status::out_of_range(format!( - "Error: Height out of range [{height}]. Height requested is greater than the best chain tip [{chain_height}].", - )))) - .await - - { - Ok(_) => break, - Err(e) => { - warn!("GetBlockRange channel closed unexpectedly: {}", e); - break; - } - } - } else if channel_tx - .send(Err(tonic::Status::unknown("Internal error, Failed to fetch block."))) - .await - .is_err() - { - break; - } + } + } + Ok(None) => { + // Per `get_compact_block_stream` semantics: `None` means at least one bound is above the tip. + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} Err(e) => { - if height >= chain_height { - match channel_tx - .send(Err(tonic::Status::out_of_range(format!( - "Error: Height out of range [{height}]. Height requested is greater than the best chain tip [{chain_height}].", - )))) - .await - - { - Ok(_) => break, - Err(e) => { - warn!("GetBlockRange channel closed unexpectedly: {}", e); - break; - } - } - } else { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { - break; - } + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } + Err(e) => { + // Preserve previous behaviour: if the request is above tip, surface OutOfRange; + // otherwise return the error (currently exposed for dev). + if start > chain_height || end > chain_height { + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); } } + } else { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + .is_err() + { + warn!("GetBlockRangeStream closed unexpectedly: {}", e); + } } } - }) - .await; - match timeout { - Ok(_) => {} - Err(_) => { - channel_tx - .send(Err(tonic::Status::deadline_exceeded( - "Error: get_block_range gRPC request timed out.", - ))) - .await - .ok(); } + }, + ) + .await; + + if timeout_result.is_err() { + channel_tx + .send(Err(tonic::Status::deadline_exceeded( + "Error: get_block_range gRPC request timed out.", + ))) + .await + .ok(); } }); + Ok(CompactBlockStream::new(channel_rx)) } @@ -1025,130 +1003,117 @@ impl LightWalletIndexer for FetchServiceSubscriber { &self, request: BlockRange, ) -> Result { - let mut start: u32 = match request.start { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: Start height out of range. Failed to convert to u32.", - ), - )); - } - }, - None => { - return Err(FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - )); - } - }; - let mut end: u32 = match request.end { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: End height out of range. Failed to convert to u32.", - ), - )); - } - }, - None => { - return Err(FetchServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - )); - } - }; - let rev_order = if start > end { - (start, end) = (end, start); - true - } else { - false - }; + let validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + .map_err(FetchServiceError::from)?; + + let pool_type_filter = PoolTypeFilter::new_from_pool_types(&validated_request.pool_types()) + .map_err(GetBlockRangeError::PoolTypeArgumentError) + .map_err(FetchServiceError::from)?; + + // Note conversion here is safe due to the use of [`ValidatedBlockRangeRequest::new_from_block_range`] + let start = validated_request.start() as u32; + let end = validated_request.end() as u32; + let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + tokio::spawn(async move { - let timeout = timeout(time::Duration::from_secs((service_timeout*4) as u64), async { + let timeout_result = timeout( + time::Duration::from_secs((service_timeout * 4) as u64), + async { let snapshot = fetch_service_clone.indexer.snapshot_nonfinalized_state(); - let chain_height = snapshot.best_tip.height.0; - for height in start..=end { - let height = if rev_order { - end - (height - start) - } else { - height - }; - match fetch_service_clone.indexer.get_compact_block( - &snapshot, - types::Height(height), - ).await { - Ok(Some(block)) => { - if channel_tx.send(Ok(compact_block_to_nullifiers(block))).await.is_err() { - break; - } - } - Ok(None) => if height >= chain_height { - match channel_tx - .send(Err(tonic::Status::out_of_range(format!( - "Error: Height out of range [{height}]. Height requested is greater than the best chain tip [{chain_height}].", - )))) - .await - { - Ok(_) => break, - Err(e) => { - warn!("GetBlockRange channel closed unexpectedly: {}", e); - break; - } - } - } else if channel_tx - .send(Err(tonic::Status::unknown("Internal error, Failed to fetch block."))) + let chain_height = snapshot.best_chaintip().height.0; + + match fetch_service_clone + .indexer + .get_compact_block_stream( + &snapshot, + types::Height(start), + types::Height(end), + pool_type_filter.clone(), + ) + .await + { + Ok(Some(mut compact_block_stream)) => { + while let Some(stream_item) = compact_block_stream.next().await { + match stream_item { + Ok(block) => { + if channel_tx + .send(Ok(compact_block_to_nullifiers(block))) .await .is_err() { break; } - Err(e) => { - if height >= chain_height { - match channel_tx - .send(Err(tonic::Status::out_of_range(format!( - "Error: Height out of range [{height}]. Height requested is greater than the best chain tip [{chain_height}].", - )))) - .await - - { - Ok(_) => break, - Err(e) => { - warn!("GetBlockRange channel closed unexpectedly: {}", e); - break; - } - } - } else { - // TODO: Hide server error from clients before release. Currently useful for dev purposes. - if channel_tx - .send(Err(tonic::Status::unknown(e.to_string()))) - .await - .is_err() - { + } + Err(status) => { + if channel_tx.send(Err(status)).await.is_err() { break; } } } } } - }) - .await; - match timeout { - Ok(_) => {} - Err(_) => { - channel_tx - .send(Err(tonic::Status::deadline_exceeded( - "Error: get_block_range gRPC request timed out.", - ))) - .await - .ok(); + Ok(None) => { + // Per `get_compact_block_stream` semantics: `None` means at least one bound is above the tip. + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } + Err(e) => { + // Preserve previous behaviour: if the request is above tip, surface OutOfRange; + // otherwise return the error (currently exposed for dev). + if start > chain_height || end > chain_height { + let offending_height = if start > chain_height { start } else { end }; + + match channel_tx + .send(Err(tonic::Status::out_of_range(format!( + "Error: Height out of range [{offending_height}]. Height requested is greater than the best chain tip [{chain_height}].", + )))) + .await + { + Ok(_) => {} + Err(e) => { + warn!("GetBlockRange channel closed unexpectedly: {}", e); + } + } + } else { + // TODO: Hide server error from clients before release. Currently useful for dev purposes. + if channel_tx + .send(Err(tonic::Status::unknown(e.to_string()))) + .await + .is_err() + { + warn!("GetBlockRangeStream closed unexpectedly: {}", e); + } + } + } } + }, + ) + .await; + + if timeout_result.is_err() { + channel_tx + .send(Err(tonic::Status::deadline_exceeded( + "Error: get_block_range gRPC request timed out.", + ))) + .await + .ok(); } }); + Ok(CompactBlockStream::new(channel_rx)) } @@ -1195,9 +1160,8 @@ impl LightWalletIndexer for FetchServiceSubscriber { }) } - /// Return the txids corresponding to the given t-address within the given block range - #[allow(deprecated)] - async fn get_taddress_txids( + // Return the transactions corresponding to the given t-address within the given block range + async fn get_taddress_transactions( &self, request: TransparentAddressBlockFilter, ) -> Result { @@ -1242,6 +1206,16 @@ impl LightWalletIndexer for FetchServiceSubscriber { Ok(RawTransactionStream::new(receiver)) } + /// Return the txids corresponding to the given t-address within the given block range + /// this function is deprecated: use `get_taddress_transactions` + #[allow(deprecated)] + async fn get_taddress_txids( + &self, + request: TransparentAddressBlockFilter, + ) -> Result { + self.get_taddress_transactions(request).await + } + /// Returns the total balance for a list of taddrs async fn get_taddress_balance(&self, request: AddressList) -> Result { let taddrs = GetAddressBalanceRequest::new(request.addresses); @@ -1357,29 +1331,40 @@ impl LightWalletIndexer for FetchServiceSubscriber { } } - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. #[allow(deprecated)] async fn get_mempool_tx( &self, - request: Exclude, + request: GetMempoolTxRequest, ) -> Result { - let exclude_txids: Vec = request - .txid - .iter() - .map(|txid_bytes| { - // NOTE: the TransactionHash methods cannot be used for this hex encoding as exclusions could be truncated to less than 32 bytes - let reversed_txid_bytes: Vec = txid_bytes.iter().cloned().rev().collect(); - hex::encode(&reversed_txid_bytes) - }) - .collect(); + let mut exclude_txids: Vec = vec![]; + + for (i, excluded_id) in request.exclude_txid_suffixes.iter().enumerate() { + if excluded_id.len() > 32 { + return Err(FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument(format!( + "Error: excluded txid {} is larger than 32 bytes", + i + )), + )); + } + + // NOTE: the TransactionHash methods cannot be used for this hex encoding as exclusions could be truncated to less than 32 bytes + let reversed_txid_bytes: Vec = excluded_id.iter().cloned().rev().collect(); + let hex_string_txid: String = hex::encode(&reversed_txid_bytes); + exclude_txids.push(hex_string_txid); + } let mempool = self.indexer.clone(); let service_timeout = self.config.service.timeout; @@ -1767,6 +1752,7 @@ impl LightWalletIndexer for FetchServiceSubscriber { })? .into(), ); + let sapling_activation_height = blockchain_info .upgrades() .get(&sapling_id) @@ -1777,6 +1763,16 @@ impl LightWalletIndexer for FetchServiceSubscriber { ) .to_string(); + let nu_info = blockchain_info + .upgrades() + .last() + .expect("Expected validator to have a consenus activated.") + .1 + .into_parts(); + + let nu_name = nu_info.0; + let nu_height = nu_info.1; + Ok(LightdInfo { version: self.data.build_info().version(), vendor: "ZingoLabs ZainoD".to_string(), @@ -1792,6 +1788,10 @@ impl LightWalletIndexer for FetchServiceSubscriber { estimated_height: blockchain_info.estimated_height().0 as u64, zcashd_build: self.data.zebra_build(), zcashd_subversion: self.data.zebra_subversion(), + donation_address: "".to_string(), + upgrade_name: nu_name.to_string(), + upgrade_height: nu_height.0 as u64, + lightwallet_protocol_version: "v0.4.0".to_string(), }) } diff --git a/zaino-state/src/backends/state.rs b/zaino-state/src/backends/state.rs index 80e759f39..777fa05ff 100644 --- a/zaino-state/src/backends/state.rs +++ b/zaino-state/src/backends/state.rs @@ -1,8 +1,8 @@ //! Zcash chain fetch and tx submission service backed by Zebras [`ReadStateService`]. use crate::{ - chain_index::NonFinalizedSnapshot, error::ChainIndexError, ChainIndex as _, - NodeBackedChainIndex, NodeBackedChainIndexSubscriber, State, + chain_index::NonFinalizedSnapshot, error::ChainIndexError, NodeBackedChainIndex, + NodeBackedChainIndexSubscriber, State, }; #[allow(deprecated)] use crate::{ @@ -16,16 +16,18 @@ use crate::{ indexer::{ handle_raw_transaction, IndexerSubscriber, LightWalletIndexer, ZcashIndexer, ZcashService, }, - local_cache::{compact_block_to_nullifiers, BlockCache, BlockCacheSubscriber}, + local_cache::{ + compact_block_to_nullifiers, compact_block_with_pool_types, BlockCache, + BlockCacheSubscriber, + }, status::{AtomicStatus, Status, StatusType}, stream::{ AddressStream, CompactBlockStream, CompactTransactionStream, RawTransactionStream, UtxoReplyStream, }, - utils::{blockid_to_hashorheight, get_build_info, ServiceMetadata}, + utils::{get_build_info, ServiceMetadata}, BackendType, MempoolKey, }; - use nonempty::NonEmpty; use tokio_stream::StreamExt as _; use zaino_fetch::{ @@ -46,10 +48,14 @@ use zaino_fetch::{ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - AddressList, Balance, BlockId, BlockRange, Exclude, GetAddressUtxosArg, - GetAddressUtxosReply, GetAddressUtxosReplyList, LightdInfo, PingResponse, RawTransaction, + AddressList, Balance, BlockId, BlockRange, GetAddressUtxosArg, GetAddressUtxosReply, + GetAddressUtxosReplyList, GetMempoolTxRequest, LightdInfo, PingResponse, RawTransaction, SendResponse, TransparentAddressBlockFilter, TreeState, TxFilter, }, + utils::{ + blockid_to_hashorheight, pool_types_from_vector, PoolTypeError, PoolTypeFilter, + ValidatedBlockRangeRequest, + }, }; use zcash_protocol::consensus::NetworkType; @@ -181,15 +187,15 @@ impl ZcashService for StateService { async fn spawn(config: StateServiceConfig) -> Result { info!("Spawning State Service.."); - let json_rpc_connector = JsonRpSeeConnector::new_from_config_parts( - config.validator_rpc_address, + let rpc_client = JsonRpSeeConnector::new_from_config_parts( + &config.validator_rpc_address, config.validator_rpc_user.clone(), config.validator_rpc_password.clone(), config.validator_cookie_path.clone(), ) .await?; - let zebra_build_data = json_rpc_connector.get_info().await?; + let zebra_build_data = rpc_client.get_info().await?; // This const is optional, as the build script can only // generate it from hash-based dependencies. @@ -240,7 +246,7 @@ impl ZcashService for StateService { // Wait for ReadStateService to catch up to primary database: loop { - let server_height = json_rpc_connector.get_blockchain_info().await?.blocks; + let server_height = rpc_client.get_blockchain_info().await?.blocks; info!("got blockchain info!"); let syncer_response = read_state_service @@ -265,7 +271,7 @@ impl ZcashService for StateService { } let block_cache = BlockCache::spawn( - &json_rpc_connector, + &rpc_client, Some(&read_state_service), config.clone().into(), ) @@ -273,7 +279,7 @@ impl ZcashService for StateService { let mempool_source = ValidatorConnector::State(crate::chain_index::source::State { read_state_service: read_state_service.clone(), - mempool_fetcher: json_rpc_connector.clone(), + mempool_fetcher: rpc_client.clone(), network: config.network, }); @@ -282,7 +288,7 @@ impl ZcashService for StateService { let chain_index = NodeBackedChainIndex::new( ValidatorConnector::State(State { read_state_service: read_state_service.clone(), - mempool_fetcher: json_rpc_connector.clone(), + mempool_fetcher: rpc_client.clone(), network: config.network, }), config.clone().into(), @@ -294,7 +300,7 @@ impl ZcashService for StateService { chain_tip_change, read_state_service, sync_task_handle: Some(Arc::new(sync_task_handle)), - rpc_client: json_rpc_connector.clone(), + rpc_client: rpc_client.clone(), block_cache, mempool, indexer: chain_index, @@ -558,50 +564,38 @@ impl StateServiceSubscriber { request: BlockRange, trim_non_nullifier: bool, ) -> Result { - let mut start: u32 = match request.start { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(StateServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: Start height out of range. Failed to convert to u32.", - ), - )); - } - }, - None => { - return Err(StateServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - )); - } - }; - let mut end: u32 = match request.end { - Some(block_id) => match block_id.height.try_into() { - Ok(height) => height, - Err(_) => { - return Err(StateServiceError::TonicStatusError( - tonic::Status::invalid_argument( - "Error: End height out of range. Failed to convert to u32.", - ), - )); - } - }, - None => { - return Err(StateServiceError::TonicStatusError( - tonic::Status::invalid_argument("Error: No start height given."), - )); - } - }; - let lowest_to_highest = if start > end { - (start, end) = (end, start); + let mut validated_request = ValidatedBlockRangeRequest::new_from_block_range(&request) + .map_err(StateServiceError::from)?; + + // FIXME: this should be changed but this logic is hard to understand and we lack tests. + // we will maintain the behaviour with less smelly code + let lowest_to_highest = if validated_request.is_reverse_ordered() { + validated_request.reverse(); false } else { true }; - let chain_height = self.block_cache.get_chain_height().await?.0; + + let start = validated_request.start(); + let end = validated_request.end(); + let chain_height: u64 = self.block_cache.get_chain_height().await?.0 as u64; let fetch_service_clone = self.clone(); let service_timeout = self.config.service.timeout; let (channel_tx, channel_rx) = mpsc::channel(self.config.service.channel_size as usize); + + let pool_types = match pool_types_from_vector(&request.pool_types) { + Ok(p) => Ok(p), + Err(e) => Err(match e { + PoolTypeError::InvalidPoolType => StateServiceError::UnhandledRpcError( + "PoolType::Invalid specified as argument in `BlockRange`.".to_string(), + ), + PoolTypeError::UnknownPoolType(t) => StateServiceError::UnhandledRpcError(format!( + "Unknown value specified in `BlockRange`. Value '{}' is not a known PoolType.", + t + )), + }), + }?; + // FIX: find out why there's repeated code fetching the chain tip and then the rest tokio::spawn(async move { let timeout = timeout( time::Duration::from_secs((service_timeout * 4) as u64), @@ -613,10 +607,12 @@ impl StateServiceSubscriber { .await { Ok(mut block) => { - if trim_non_nullifier { - block = compact_block_to_nullifiers(block); - } - Ok(block) + if trim_non_nullifier { + block = compact_block_to_nullifiers(block); + } else { + block = compact_block_with_pool_types(block, &pool_types); + } + Ok(block) } Err(e) => { if end >= chain_height { @@ -651,6 +647,8 @@ impl StateServiceSubscriber { Ok(mut block) => { if trim_non_nullifier { block = compact_block_to_nullifiers(block); + } else { + block = compact_block_with_pool_types(block, &pool_types); } Ok(block) } @@ -1764,7 +1762,11 @@ impl ZcashIndexer for StateServiceSubscriber { let snapshot = self.indexer.snapshot_nonfinalized_state(); let compact_block = self .indexer - .get_compact_block(&snapshot, chain_types::Height(tx.height.0)) + .get_compact_block( + &snapshot, + chain_types::Height(tx.height.0), + PoolTypeFilter::includes_all(), + ) .await? .ok_or_else(|| ChainIndexError::database_hole(tx.height.0))?; let tx_object = TransactionObject::from_transaction( @@ -1955,7 +1957,10 @@ impl LightWalletIndexer for StateServiceSubscriber { .get_compact_block(hash_or_height.to_string()) .await { - Ok(block) => Ok(block), + Ok(block) => Ok(compact_block_with_pool_types( + block, + &PoolTypeFilter::default().to_pool_types_vector(), + )), Err(e) => { self.error_get_block(BlockCacheError::Custom(e.to_string()), height as u32) .await @@ -1977,7 +1982,7 @@ impl LightWalletIndexer for StateServiceSubscriber { match self .indexer - .get_compact_block(&snapshot, block_height) + .get_compact_block(&snapshot, block_height, PoolTypeFilter::default()) .await { Ok(Some(block)) => Ok(compact_block_to_nullifiers(block)), @@ -2043,8 +2048,8 @@ impl LightWalletIndexer for StateServiceSubscriber { }) } - /// Return the txids corresponding to the given t-address within the given block range - async fn get_taddress_txids( + /// Return the transactions corresponding to the given t-address within the given block range + async fn get_taddress_transactions( &self, request: TransparentAddressBlockFilter, ) -> Result { @@ -2088,6 +2093,15 @@ impl LightWalletIndexer for StateServiceSubscriber { Ok(RawTransactionStream::new(receiver)) } + /// Return the txids corresponding to the given t-address within the given block range + /// This function is deprecated. Use `get_taddress_transactions`. + async fn get_taddress_txids( + &self, + request: TransparentAddressBlockFilter, + ) -> Result { + self.get_taddress_transactions(request).await + } + /// Returns the total balance for a list of taddrs async fn get_taddress_balance( &self, @@ -2217,16 +2231,44 @@ impl LightWalletIndexer for StateServiceSubscriber { /// in the exclude list that don't exist in the mempool are ignored. async fn get_mempool_tx( &self, - request: Exclude, + request: GetMempoolTxRequest, ) -> Result { - let exclude_txids: Vec = request - .txid - .iter() - .map(|txid_bytes| { - let reversed_txid_bytes: Vec = txid_bytes.iter().cloned().rev().collect(); - hex::encode(&reversed_txid_bytes) - }) - .collect(); + let mut exclude_txids: Vec = vec![]; + + for (i, excluded_id) in request.exclude_txid_suffixes.iter().enumerate() { + if excluded_id.len() > 32 { + return Err(StateServiceError::TonicStatusError( + tonic::Status::invalid_argument(format!( + "Error: excluded txid {} is larger than 32 bytes", + i + )), + )); + } + + // NOTE: the TransactionHash methods cannot be used for this hex encoding as exclusions could be truncated to less than 32 bytes + let reversed_txid_bytes: Vec = excluded_id.iter().cloned().rev().collect(); + let hex_string_txid: String = hex::encode(&reversed_txid_bytes); + exclude_txids.push(hex_string_txid); + } + + let pool_types = match PoolTypeFilter::new_from_slice(&request.pool_types) { + Ok(pool_type_filter) => pool_type_filter, + Err(PoolTypeError::InvalidPoolType) => { + return Err(StateServiceError::TonicStatusError( + tonic::Status::invalid_argument( + "Error: An invalid `PoolType' was found".to_string(), + ), + )) + } + Err(PoolTypeError::UnknownPoolType(unknown_pool_type)) => { + return Err(StateServiceError::TonicStatusError( + tonic::Status::invalid_argument(format!( + "Error: Unknown `PoolType' {} was found", + unknown_pool_type + )), + )) + } + }; let mempool = self.mempool.clone(); let service_timeout = self.config.service.timeout; @@ -2265,7 +2307,7 @@ impl LightWalletIndexer for StateServiceSubscriber { .send( transaction .1 - .to_compact(0) + .to_compact_tx(None, &pool_types) .map_err(|e| tonic::Status::unknown(e.to_string())), ) .await @@ -2529,6 +2571,16 @@ impl LightWalletIndexer for StateServiceSubscriber { ) .to_string(); + let nu_info = blockchain_info + .upgrades() + .last() + .expect("Expected validator to have a consenus activated.") + .1 + .into_parts(); + + let nu_name = nu_info.0; + let nu_height = nu_info.1; + Ok(LightdInfo { version: self.data.build_info().version(), vendor: "ZingoLabs ZainoD".to_string(), @@ -2544,6 +2596,10 @@ impl LightWalletIndexer for StateServiceSubscriber { estimated_height: blockchain_info.estimated_height().0 as u64, zcashd_build: self.data.zebra_build(), zcashd_subversion: self.data.zebra_subversion(), + donation_address: "".to_string(), + upgrade_name: nu_name.to_string(), + upgrade_height: nu_height.0 as u64, + lightwallet_protocol_version: "v0.4.0".to_string(), }) } diff --git a/zaino-state/src/chain_index.rs b/zaino-state/src/chain_index.rs index 0498d3578..ecb3bf513 100644 --- a/zaino-state/src/chain_index.rs +++ b/zaino-state/src/chain_index.rs @@ -15,8 +15,9 @@ use crate::chain_index::non_finalised_state::BestTip; use crate::chain_index::types::db::metadata::MempoolInfo; use crate::chain_index::types::{BestChainLocation, NonBestChainLocation}; use crate::error::{ChainIndexError, ChainIndexErrorKind, FinalisedStateError}; +use crate::local_cache::compact_block_with_pool_types; use crate::status::Status; -use crate::{AtomicStatus, StatusType, SyncError}; +use crate::{AtomicStatus, CompactBlockStream, StatusType, SyncError}; use crate::{IndexedBlock, TransactionHash}; use std::collections::HashSet; use std::{sync::Arc, time::Duration}; @@ -27,6 +28,7 @@ use non_finalised_state::NonfinalizedBlockCacheSnapshot; use source::{BlockchainSource, ValidatorConnector}; use tokio_stream::StreamExt; use tracing::info; +use zaino_proto::proto::utils::PoolTypeFilter; use zebra_chain::parameters::ConsensusBranchId; pub use zebra_chain::parameters::Network as ZebraNetwork; use zebra_chain::serialization::ZcashSerialize; @@ -201,19 +203,48 @@ pub trait ChainIndex { /// Returns the *compact* block for the given height. /// - /// Returns None if the specified height - /// is greater than the snapshot's tip + /// Returns `None` if the specified `height` is greater than the snapshot's tip. + /// + /// ## Pool filtering /// - /// TODO: Add range fetch method or update this? + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// - `PoolTypeFilter::default()` preserves the legacy behaviour (only Sapling and Orchard + /// components are populated). #[allow(clippy::type_complexity)] fn get_compact_block( &self, nonfinalized_snapshot: &Self::Snapshot, height: types::Height, + pool_types: PoolTypeFilter, ) -> impl std::future::Future< Output = Result, Self::Error>, >; + /// Streams *compact* blocks for an inclusive height range. + /// + /// Returns `None` if the requested range is entirely above the snapshot's tip. + /// + /// - The stream covers `[start_height, end_height]` (inclusive). + /// - If `start_height <= end_height` the stream is ascending; otherwise it is descending. + /// + /// ## Pool filtering + /// + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// - `PoolTypeFilter::default()` preserves the legacy behaviour (only Sapling and Orchard + /// components are populated). + #[allow(clippy::type_complexity)] + fn get_compact_block_stream( + &self, + nonfinalized_snapshot: &Self::Snapshot, + start_height: types::Height, + end_height: types::Height, + pool_types: PoolTypeFilter, + ) -> impl std::future::Future, Self::Error>>; + /// Finds the newest ancestor of the given block on the main /// chain, or the block itself if it is on the main chain. fn find_fork_point( @@ -507,10 +538,15 @@ impl NodeBackedChainIndex { if status.load() == StatusType::Closing { break; } + let handle_error = |e| { + tracing::error!("Sync failure: {e:?}. Shutting down."); + status.store(StatusType::CriticalError); + e + }; status.store(StatusType::Syncing); // Sync nfs to chain tip, trimming blocks to finalized tip. - nfs.sync(fs.clone()).await?; + nfs.sync(fs.clone()).await.map_err(handle_error)?; // Sync fs to chain tip - 100. { @@ -520,7 +556,7 @@ impl NodeBackedChainIndex { .to_reader() .db_height() .await - .map_err(|_e| SyncError::CannotReadFinalizedState)? + .map_err(|_e| handle_error(SyncError::CannotReadFinalizedState))? .unwrap_or(types::Height(0)) .0 + 100) @@ -529,7 +565,7 @@ impl NodeBackedChainIndex { .to_reader() .db_height() .await - .map_err(|_e| SyncError::CannotReadFinalizedState)? + .map_err(|_e| handle_error(SyncError::CannotReadFinalizedState))? .map(|height| height + 1) .unwrap_or(types::Height(0)); let next_finalized_block = snapshot @@ -540,11 +576,11 @@ impl NodeBackedChainIndex { .get(&(next_finalized_height)) .ok_or(SyncError::CompetingSyncProcess)?, ) - .ok_or(SyncError::CompetingSyncProcess)?; + .ok_or_else(|| handle_error(SyncError::CompetingSyncProcess))?; // TODO: Handle write errors better (fix db and continue) fs.write_block(next_finalized_block.clone()) .await - .map_err(|_e| SyncError::CompetingSyncProcess)?; + .map_err(|_e| handle_error(SyncError::CompetingSyncProcess))?; } } status.store(StatusType::Ready); @@ -601,6 +637,12 @@ impl NodeBackedChainIndexSubscriber { .transpose() } + /** + Searches finalized and non-finalized chains for any blocks containing the transaction. + Ordered with finalized blocks first. + + WARNING: there might be multiple chains, each containing a block with the transaction. + */ async fn blocks_containing_transaction<'snapshot, 'self_lt, 'iter>( &'self_lt self, snapshot: &'snapshot NonfinalizedBlockCacheSnapshot, @@ -610,35 +652,32 @@ impl NodeBackedChainIndexSubscriber { 'snapshot: 'iter, 'self_lt: 'iter, { - Ok(snapshot - .blocks - .values() - .filter_map(move |block| { + let finalized_blocks_containing_transaction = match self + .finalized_state + .get_tx_location(&types::TransactionHash(txid)) + .await? + { + Some(tx_location) => { + self.finalized_state + .get_chain_block(crate::Height(tx_location.block_height())) + .await? + } + + None => None, + } + .into_iter(); + let non_finalized_blocks_containing_transaction = + snapshot.blocks.values().filter_map(move |block| { block.transactions().iter().find_map(|transaction| { if transaction.txid().0 == txid { - Some(block) + Some(block.clone()) } else { None } }) - }) - .cloned() - .chain( - match self - .finalized_state - .get_tx_location(&types::TransactionHash(txid)) - .await? - { - Some(tx_location) => { - self.finalized_state - .get_chain_block(crate::Height(tx_location.block_height())) - .await? - } - - None => None, - } - .into_iter(), - )) + }); + Ok(finalized_blocks_containing_transaction + .chain(non_finalized_blocks_containing_transaction)) } } @@ -670,7 +709,12 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Result, Self::Error> { match nonfinalized_snapshot.blocks.get(&hash).cloned() { - Some(block) => Ok(block.index().height()), + Some(block) => Ok(nonfinalized_snapshot + .heights_to_hashes + .values() + .find(|h| **h == hash) + // Canonical height is None for blocks not on the best chain + .map(|_| block.index().height())), None => match self.finalized_state.get_block_height(hash).await { Ok(height) => Ok(height), Err(_e) => Err(ChainIndexError::database_hole(hash)), @@ -733,18 +777,36 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Result, Self::Error> { if height <= nonfinalized_snapshot.best_tip.height { Ok(Some( match nonfinalized_snapshot.get_chainblock_by_height(&height) { - Some(block) => block.to_compact_block(), - None => match self.finalized_state.get_compact_block(height).await { + Some(block) => compact_block_with_pool_types( + block.to_compact_block(), + &pool_types.to_pool_types_vector(), + ), + None => match self + .finalized_state + .get_compact_block(height, pool_types) + .await + { Ok(block) => block, Err(_e) => return Err(ChainIndexError::database_hole(height)), }, @@ -755,6 +817,169 @@ impl ChainIndex for NodeBackedChainIndexSubscriber Result, Self::Error> { + let chain_tip_height = nonfinalized_snapshot.best_chaintip().height; + + if start_height > chain_tip_height || end_height > chain_tip_height { + return Ok(None); + } + + // The nonfinalized cache holds the tip block plus the previous 99 blocks (100 total), + // so the lowest possible cached height is `tip - 99` (saturating at 0). + let lowest_nonfinalized_height = types::Height(chain_tip_height.0.saturating_sub(99)); + + let is_ascending = start_height <= end_height; + + let pool_types_vector = pool_types.to_pool_types_vector(); + + // Pre-create any finalized-state stream(s) we will need so that errors are returned + // from this method (not deferred into the spawned task). + let finalized_stream: Option = if is_ascending { + if start_height < lowest_nonfinalized_height { + let finalized_end_height = types::Height(std::cmp::min( + end_height.0, + lowest_nonfinalized_height.0.saturating_sub(1), + )); + + if start_height <= finalized_end_height { + Some( + self.finalized_state + .get_compact_block_stream( + start_height, + finalized_end_height, + pool_types.clone(), + ) + .await + .map_err(ChainIndexError::from)?, + ) + } else { + None + } + } else { + None + } + // Serve in reverse order. + } else if end_height < lowest_nonfinalized_height { + let finalized_start_height = if start_height < lowest_nonfinalized_height { + start_height + } else { + types::Height(lowest_nonfinalized_height.0.saturating_sub(1)) + }; + + Some( + self.finalized_state + .get_compact_block_stream( + finalized_start_height, + end_height, + pool_types.clone(), + ) + .await + .map_err(ChainIndexError::from)?, + ) + } else { + None + }; + + let nonfinalized_snapshot = nonfinalized_snapshot.clone(); + // TODO: Investigate whether channel size should be changed, added to config, or set dynamically base on resources. + let (channel_sender, channel_receiver) = tokio::sync::mpsc::channel(128); + + tokio::spawn(async move { + if is_ascending { + // 1) Finalized segment (if any), ascending. + if let Some(mut finalized_stream) = finalized_stream { + while let Some(stream_item) = finalized_stream.next().await { + if channel_sender.send(stream_item).await.is_err() { + return; + } + } + } + + // 2) Nonfinalized segment, ascending. + let nonfinalized_start_height = + types::Height(std::cmp::max(start_height.0, lowest_nonfinalized_height.0)); + + for height_value in nonfinalized_start_height.0..=end_height.0 { + let Some(indexed_block) = nonfinalized_snapshot + .get_chainblock_by_height(&types::Height(height_value)) + else { + let _ = channel_sender + .send(Err(tonic::Status::internal(format!( + "Internal error, missing nonfinalized block at height [{height_value}].", + )))) + .await; + return; + }; + let compact_block = compact_block_with_pool_types( + indexed_block.to_compact_block(), + &pool_types_vector, + ); + if channel_sender.send(Ok(compact_block)).await.is_err() { + return; + } + } + } else { + // 1) Nonfinalized segment, descending. + if start_height >= lowest_nonfinalized_height { + let nonfinalized_end_height = + types::Height(std::cmp::max(end_height.0, lowest_nonfinalized_height.0)); + + for height_value in (nonfinalized_end_height.0..=start_height.0).rev() { + let Some(indexed_block) = nonfinalized_snapshot + .get_chainblock_by_height(&types::Height(height_value)) + else { + let _ = channel_sender + .send(Err(tonic::Status::internal(format!( + "Internal error, missing nonfinalized block at height [{height_value}].", + )))) + .await; + return; + }; + let compact_block = compact_block_with_pool_types( + indexed_block.to_compact_block(), + &pool_types_vector, + ); + if channel_sender.send(Ok(compact_block)).await.is_err() { + return; + } + } + } + + // 2) Finalized segment (if any), descending. + if let Some(mut finalized_stream) = finalized_stream { + while let Some(stream_item) = finalized_stream.next().await { + if channel_sender.send(stream_item).await.is_err() { + return; + } + } + } + } + }); + + Ok(Some(CompactBlockStream::new(channel_receiver))) + } + /// Finds the newest ancestor of the given block on the main /// chain, or the block itself if it is on the main chain. fn find_fork_point( @@ -767,8 +992,8 @@ impl ChainIndex for NodeBackedChainIndexSubscriber ChainIndex for NodeBackedChainIndexSubscriber ChainIndex for NodeBackedChainIndexSubscriber ChainIndex for NodeBackedChainIndexSubscriber ChainIndex for NodeBackedChainIndexSubscriber>(); + let start_of_nonfinalized = snapshot.heights_to_hashes.keys().min().unwrap(); let mut best_chain_block = blocks_containing_transaction .iter() - .find_map(|block| BestChainLocation::try_from(block).ok()); + .find(|block| { + snapshot.heights_to_hashes.get(&block.height()) == Some(block.hash()) + || block.height() < *start_of_nonfinalized + // this block is either in the best chain ``heights_to_hashes`` or finalized. + }) + .map(|block| BestChainLocation::Block(*block.hash(), block.height())); let mut non_best_chain_blocks: HashSet = blocks_containing_transaction .iter() - .filter_map(|block| NonBestChainLocation::try_from(block).ok()) + .filter(|block| { + snapshot.heights_to_hashes.get(&block.height()) != Some(block.hash()) + && block.height() >= *start_of_nonfinalized + }) + .map(|block| NonBestChainLocation::Block(*block.hash(), block.height())) .collect(); let in_mempool = self .mempool @@ -908,6 +1141,8 @@ impl ChainIndex for NodeBackedChainIndexSubscriber ChainIndex for NodeBackedChainIndexSubscriber, + + /// Immutable configuration snapshot used for sync and metadata construction. cfg: BlockCacheConfig, } +/// Lifecycle, migration control, and core read/write API for the finalised database. +/// +/// This `impl` intentionally stays small and policy heavy: +/// - version selection and migration orchestration lives in [`ZainoDB::spawn`], +/// - the storage engine details are encapsulated behind [`DbBackend`] and the capability traits, +/// - higher-level query routing is provided by [`DbReader`]. impl ZainoDB { // ***** DB control ***** - /// Spawns a ZainoDB, opens an existing database if a path is given in the config else creates a new db. + /// Spawns a `ZainoDB` instance. + /// + /// This method: + /// 1. Detects the on-disk database version (if any) using [`ZainoDB::try_find_current_db_version`]. + /// 2. Selects a target schema version from `cfg.db_version`. + /// 3. Opens the existing database at the detected version, or creates a new database at the + /// target version. + /// 4. If an existing database is older than the target (`current_version < target_version`), + /// runs migrations using [`migrations::MigrationManager`]. + /// + /// ## Version selection rules + /// - `cfg.db_version == 0` targets `DbVersion { 0, 0, 0 }` (legacy layout). + /// - `cfg.db_version == 1` targets `DbVersion { 1, 0, 0 }` (current layout). + /// - Any other value returns an error. /// - /// Peeks at the db metadata store to load correct database version. + /// ## Migrations + /// Migrations are invoked only when a database already exists on disk and the opened database + /// reports a lower version than the configured target. + /// + /// Migrations may require access to chain data to rebuild indices. For that reason, a + /// [`BlockchainSource`] is provided here and passed into the migration manager. + /// + /// ## Errors + /// Returns [`FinalisedStateError`] if: + /// - the configured target version is unsupported, + /// - the on-disk database version is unsupported, + /// - opening or creating the database fails, + /// - or any migration step fails. pub(crate) async fn spawn( cfg: BlockCacheConfig, source: T, @@ -117,17 +342,35 @@ impl ZainoDB { Ok(Self { db: router, cfg }) } - /// Gracefully shuts down the running ZainoDB, closing all child processes. + /// Gracefully shuts down the running database backend(s). + /// + /// This delegates to the router, which shuts down: + /// - the primary backend, and + /// - any shadow backend currently present (during migrations). + /// + /// After this call returns `Ok(())`, database files may still remain on disk; shutdown does not + /// delete data. (Deletion of old versions is handled by migrations when applicable.) pub(crate) async fn shutdown(&self) -> Result<(), FinalisedStateError> { self.db.shutdown().await } - /// Returns the status of the running ZainoDB. + /// Returns the runtime status of the serving database. + /// + /// This status is provided by the backend implementing [`capability::DbCore::status`]. During + /// migrations, the router determines which backend serves `READ_CORE`, and the status reflects + /// that routing decision. pub(crate) fn status(&self) -> StatusType { self.db.status() } - /// Waits until the ZainoDB returns a Ready status. + /// Waits until the database reports [`StatusType::Ready`]. + /// + /// This polls the router at a fixed interval (100ms) using a Tokio timer. The polling loop uses + /// `MissedTickBehavior::Delay` to avoid catch-up bursts under load or when the runtime is + /// stalled. + /// + /// Call this after [`ZainoDB::spawn`] if downstream services require the database to be fully + /// initialised before handling requests. pub(crate) async fn wait_until_ready(&self) { let mut ticker = interval(Duration::from_millis(100)); ticker.set_missed_tick_behavior(MissedTickBehavior::Delay); @@ -139,21 +382,38 @@ impl ZainoDB { } } - /// Creates a read-only viewer onto the running ZainoDB. + /// Creates a read-only view onto the running database. /// - /// NOTE: **ALL** chain fetch should use DbReader instead of directly using ZainoDB. + /// All chain fetches should be performed through [`DbReader`] rather than calling read methods + /// directly on `ZainoDB`. pub(crate) fn to_reader(self: &Arc) -> DbReader { DbReader { inner: Arc::clone(self), } } - /// Look for known dirs to find current db version. + /// Attempts to detect the current on-disk database version from the filesystem layout. + /// + /// The detection is intentionally conservative: it returns the **oldest** detected version, + /// because the process may have been terminated mid-migration, leaving both an older primary + /// and a newer shadow directory on disk. + /// + /// ## Recognised layouts + /// + /// - **Legacy v0 layout** + /// - Network directories: `live/`, `test/`, `local/` + /// - Presence check: both `data.mdb` and `lock.mdb` exist + /// - Reported version: `Some(0)` /// - /// The oldest version is returned as the database may have been closed mid migration. + /// - **Versioned v1+ layout** + /// - Network directories: `mainnet/`, `testnet/`, `regtest/` + /// - Version subdirectories: enumerated by [`db::VERSION_DIRS`] (e.g. `"v1"`) + /// - Presence check: both `data.mdb` and `lock.mdb` exist within a version directory + /// - Reported version: `Some(i + 1)` where `i` is the index in `VERSION_DIRS` /// - /// * `Some(version)` – DB exists, version returned. - /// * `None` – directory or key is missing -> fresh DB. + /// Returns: + /// - `Some(version)` if a compatible database directory is found, + /// - `None` if no database is detected (fresh DB creation case). async fn try_find_current_db_version(cfg: &BlockCacheConfig) -> Option { let legacy_dir = match cfg.network.to_zebra_network().kind() { NetworkKind::Mainnet => "live", @@ -186,9 +446,15 @@ impl ZainoDB { None } - /// Returns the internal db backend for the given db capability. + /// Returns the database backend that should serve the requested capability. /// - /// Used by DbReader to route calls to the correct database during major migrations. + /// This is used by [`DbReader`] to route calls to the correct database during major migrations. + /// The router may return either the primary or shadow backend depending on the current routing + /// masks. + /// + /// ## Errors + /// Returns [`FinalisedStateError::FeatureUnavailable`] if neither backend currently serves the + /// requested capability. #[inline] pub(crate) fn backend_for_cap( &self, @@ -199,7 +465,32 @@ impl ZainoDB { // ***** Db Core Write ***** - /// Sync the database to the given height using the given BlockchainSource. + /// Sync the database up to and including `height` using a [`BlockchainSource`]. + /// + /// This method is a convenience ingestion loop that: + /// - determines the current database tip height, + /// - fetches each missing block from the source, + /// - fetches Sapling and Orchard commitment tree roots for each block, + /// - constructs [`BlockMetadata`] and an [`IndexedBlock`], + /// - and appends the block via [`ZainoDB::write_block`]. + /// + /// ## Chainwork handling + /// For database versions that expose [`capability::BlockCoreExt`], chainwork is retrieved from + /// stored header data and threaded through `BlockMetadata`. + /// + /// Legacy v0 databases do not expose header/chainwork APIs; in that case, chainwork is set to + /// zero. This is safe only insofar as v0 consumers do not rely on chainwork-dependent features. + /// + /// ## Invariants + /// - Blocks are written strictly in height order. + /// - This method assumes the source provides consistent block and commitment tree data. + /// + /// ## Errors + /// Returns [`FinalisedStateError`] if: + /// - a block is missing from the source at a required height, + /// - commitment tree roots are missing for Sapling or Orchard, + /// - constructing an [`IndexedBlock`] fails, + /// - or any underlying database write fails. pub(crate) async fn sync_to_height( &self, height: Height, @@ -301,20 +592,27 @@ impl ZainoDB { Ok(()) } - /// Writes a block to the database. + /// Appends a single fully constructed [`IndexedBlock`] to the database. + /// + /// This **must** be the next block after the current database tip (`db_tip_height + 1`). + /// Database implementations may assume append-only semantics to maintain secondary index + /// consistency. /// - /// This **MUST** be the *next* block in the chain (db_tip_height + 1). + /// For reorg handling, callers should delete tip blocks using [`ZainoDB::delete_block_at_height`] + /// or [`ZainoDB::delete_block`] before re-appending. pub(crate) async fn write_block(&self, b: IndexedBlock) -> Result<(), FinalisedStateError> { self.db.write_block(b).await } - /// Deletes a block from the database by height. + /// Deletes the block at height `h` from the database. /// - /// This **MUST** be the *top* block in the db. + /// This **must** be the current database tip. Deleting non-tip blocks is not supported because + /// it would require re-writing dependent indices for all higher blocks. /// - /// Uses `delete_block` internally, fails if the block to be deleted cannot be correctly built. - /// If this happens, the block to be deleted must be fetched from the validator and given to `delete_block` - /// to ensure the block has been completely wiped from the database. + /// This method delegates to the backend’s `delete_block_at_height` implementation. If that + /// deletion cannot be completed correctly (for example, if the backend cannot reconstruct all + /// derived index entries needed for deletion), callers must fall back to [`ZainoDB::delete_block`] + /// using an [`IndexedBlock`] fetched from the validator/source to ensure a complete wipe. pub(crate) async fn delete_block_at_height( &self, h: Height, @@ -322,21 +620,33 @@ impl ZainoDB { self.db.delete_block_at_height(h).await } - /// Deletes a given block from the database. + /// Deletes the provided block from the database. /// - /// This **MUST** be the *top* block in the db. + /// This **must** be the current database tip. The provided [`IndexedBlock`] is used to ensure + /// all derived indices created by that block can be removed deterministically. + /// + /// Prefer [`ZainoDB::delete_block_at_height`] when possible; use this method when the backend + /// requires full block contents to correctly reverse all indices. pub(crate) async fn delete_block(&self, b: &IndexedBlock) -> Result<(), FinalisedStateError> { self.db.delete_block(b).await } // ***** DB Core Read ***** - /// Returns the highest block height held in the database. + /// Returns the highest block height stored in the finalised database. + /// + /// Returns: + /// - `Ok(Some(height))` if at least one block is present, + /// - `Ok(None)` if the database is empty. pub(crate) async fn db_height(&self) -> Result, FinalisedStateError> { self.db.db_height().await } - /// Returns the block height for the given block hash *if* present in the finalised state. + /// Returns the main-chain height for `hash` if the block is present in the finalised database. + /// + /// Returns: + /// - `Ok(Some(height))` if the hash is indexed, + /// - `Ok(None)` if the hash is not present (not an error). pub(crate) async fn get_block_height( &self, hash: BlockHash, @@ -344,7 +654,11 @@ impl ZainoDB { self.db.get_block_height(hash).await } - /// Returns the block block hash for the given block height *if* present in the finlaised state. + /// Returns the main-chain block hash for `height` if the block is present in the finalised database. + /// + /// Returns: + /// - `Ok(Some(hash))` if the height is indexed, + /// - `Ok(None)` if the height is not present (not an error). pub(crate) async fn get_block_hash( &self, height: Height, @@ -352,11 +666,17 @@ impl ZainoDB { self.db.get_block_hash(height).await } - /// Returns metadata for the running ZainoDB. + /// Returns the persisted database metadata. + /// + /// See [`capability::DbMetadata`] for the precise fields and on-disk encoding. pub(crate) async fn get_metadata(&self) -> Result { self.db.get_metadata().await } + /// Returns the internal router (test-only). + /// + /// This is intended for unit/integration tests that need to observe or manipulate routing state + /// during migrations. Production code should not depend on the router directly. #[cfg(test)] pub(crate) fn router(&self) -> &Router { &self.db diff --git a/zaino-state/src/chain_index/finalised_state/CHANGELOG.md b/zaino-state/src/chain_index/finalised_state/CHANGELOG.md new file mode 100644 index 000000000..0b5659203 --- /dev/null +++ b/zaino-state/src/chain_index/finalised_state/CHANGELOG.md @@ -0,0 +1,151 @@ +Zaino Finalised-State Database Changelog +======================================= + +Format +------ +One entry per database version bump (major / minor / patch). Keep entries concise and factual. + +Entry template: + +-------------------------------------------------------------------------------- +DB VERSION vX.Y.Z (from vA.B.C) +Date: YYYY-MM-DD +-------------------------------------------------------------------------------- + +Summary +- <1–3 bullets describing intent of the change> + +On-disk schema +- Layout: + - +- Tables: + - Added: <...> + - Removed: <...> + - Renamed: new> +- Encoding: + - Keys: + - Values: + - Checksums / validation: +- Invariants: + - + +API / capabilities +- Capability changes: + - Added: <...> + - Removed: <...> + - Changed: <...> +- Public surface changes: + - Added: + - Removed: + - Changed: + +Migration +- Strategy: +- Backfill: +- Completion criteria: +- Failure handling: + +-------------------------------------------------------------------------------- +DB VERSION v1.0.0 (from v0.0.0) +Date: 2025-08-13 +-------------------------------------------------------------------------------- + +Summary +- Replace legacy v0 schema with versioned v1 schema and expanded indices / query surface. +- Introduce stronger integrity checks and on-demand validation for v1 read paths. +- Keep compact block retrieval available (compatibility surface). + +On-disk schema +- Layout: + - Move to per-network version directory layout: //v1/ + - VERSION_DIRS begins at ["v1"] (new versions append, no gaps). +- Tables: + - Added (v1): headers, txids, transparent, sapling, orchard, commitment_tree_data, heights (hash->height), + plus v1 indices for tx locations, spent outpoints, and transparent address history. + - Removed / superseded (v0): legacy compact-block-streamer oriented storage layout. +- Encoding: + - v1 values are stored as checksum-protected `StoredEntryVar` / `StoredEntryFixed` entries. + - Canonical key bytes are used for checksum verification via `verify(key)`. +- Invariants (v1 validation enforces): + - Per-table checksum verification for all per-block tables. + - Chain continuity: header parent hash at height h matches stored hash at h-1. + - Merkle consistency: header merkle root matches computed root from stored txid list. + - Index consistency: + - hash->height mapping must match the queried height. + - spent + addr history records must exist and match for transparent inputs/outputs. + +API / capabilities +- Capability changes: + - v0: READ_CORE | WRITE_CORE | COMPACT_BLOCK_EXT + - v1: Capability::LATEST (block core/transparent/shielded, indexed block, transparent history, etc.) +- Public surface changes: + - Added (v1-only; FeatureUnavailable on v0): + - BlockCoreExt: header/txids/range fetch, txid<->location lookup + - BlockTransparentExt: per-tx and per-block transparent access + ranges + - BlockShieldedExt: sapling/orchard per-tx and per-block access + ranges, commitment tree data (+ ranges) + - IndexedBlockExt: indexed block retrieval + - TransparentHistExt: addr records, range queries, balance/utxos, outpoint spender(s) + - Preserved: + - CompactBlockExt remains available for both v0 and v1. + +Migration +- Strategy: shadow build + promotion (no in-place transformation of v0). +- Backfill: rebuild all v1 tables/indices by ingesting chain data. +- Completion criteria: + - metadata indicates migrated/ready, and required tables exist through the tip. + - validation succeeds for the contiguous best chain range as built. +- Failure handling: + - do not promote partially built v1; continue using v0 if present; rebuild v1 on retry. + +-------------------------------------------------------------------------------- +DB VERSION v1.0.0 (from v1.1.0) +Date: 2026-01-27 +-------------------------------------------------------------------------------- + +Summary +- Minor version bump to reflect updated compact block API contract (streaming + pool filtering semantics). +- No schema or encoding changes; metadata-only migration updates persisted DB version marker. + +On-disk schema +- Layout: + - No changes. +- Tables: + - Added: None. + - Removed: None. + - Renamed: None. +- Encoding: + - Keys: No changes. + - Values: No changes. + - Checksums / validation: No changes. +- Invariants: + - No changes. + +API / capabilities +- Capability changes: + - Added: None. + - Removed: None. + - Changed: + - COMPACT_BLOCK_EXT contract updated for v1 backends: + - get_compact_block(...) now takes a PoolTypeFilter, which selects which pool data is materialized into the returned compact block. + - get_compact_block_stream(...) added. + +- Public surface changes: + - Added: + - CompactBlockExt::get_compact_block_stream(start_height, end_height, pool_types: PoolTypeFilter). + - Removed: None. + - Changed: + - CompactBlockExt::get_compact_block(height, pool_types: PoolTypeFilter) signature updated. + - Compact block contents are now filtered by PoolTypeFilter, and may include transparent transaction data (vin/vout) when selected. + +Migration +- Strategy: In-place (metadata-only). +- Backfill: None. +- Completion criteria: + - DbMetadata.version updated from 1.0.0 to 1.1.0. + - DbMetadata.migration_status reset to Empty. +- Failure handling: + - Idempotent: re-running re-writes the same metadata; no partial state beyond metadata. + +-------------------------------------------------------------------------------- +(append new entries below) +-------------------------------------------------------------------------------- diff --git a/zaino-state/src/chain_index/finalised_state/capability.rs b/zaino-state/src/chain_index/finalised_state/capability.rs index 8318b3a73..22e542d74 100644 --- a/zaino-state/src/chain_index/finalised_state/capability.rs +++ b/zaino-state/src/chain_index/finalised_state/capability.rs @@ -1,4 +1,79 @@ -//! Holds ZainoDB capability traits and bitmaps. +//! Capability model, versioned metadata, and DB trait surface +//! +//! This file defines the **capability- and version-aware interface** that all `ZainoDB` database +//! implementations must conform to. +//! +//! The core idea is: +//! - Each concrete DB major version (e.g. `DbV0`, `DbV1`) implements a common set of traits. +//! - A `Capability` bitmap declares which parts of that trait surface are actually supported. +//! - The router (`Router`) and reader (`DbReader`) use *single-feature* requests +//! (`CapabilityRequest`) to route a call to a backend that is guaranteed to support it. +//! +//! This design enables: +//! - running mixed-version configurations during major migrations (primary + shadow), +//! - serving old data while building new indices, +//! - and gating API features cleanly when a backend does not support an extension. +//! +//! # What’s in this file +//! +//! ## Capability / routing types +//! - [`Capability`]: bitflags describing what an *open* database instance can serve. +//! - [`CapabilityRequest`]: a single-feature request (non-composite) used for routing. +//! +//! ## Versioned metadata +//! - [`DbVersion`]: schema version triple (major/minor/patch) plus a mapping to supported capabilities. +//! - [`DbMetadata`]: persisted singleton stored under the fixed key `"metadata"` in the LMDB +//! metadata database; includes: +//! - `version: DbVersion` +//! - `schema_hash: [u8; 32]` (BLAKE2b-256 of schema definition/contract) +//! - `migration_status: MigrationStatus` +//! - [`MigrationStatus`]: persisted migration progress marker to support resuming after shutdown. +//! +//! All metadata types in this file implement `ZainoVersionedSerde` and therefore have explicit +//! on-disk encoding versions. +//! +//! ## Trait surface +//! This file defines: +//! +//! - **Core traits** implemented by every DB version: +//! - [`DbRead`], [`DbWrite`], and [`DbCore`] +//! +//! - **Extension traits** implemented by *some* versions: +//! - [`BlockCoreExt`], [`BlockTransparentExt`], [`BlockShieldedExt`] +//! - [`CompactBlockExt`] +//! - [`IndexedBlockExt`] +//! - [`TransparentHistExt`] +//! +//! Extension traits must be capability-gated: if a DB does not advertise the corresponding capability +//! bit, routing must not hand that backend out for that request. +//! +//! # Versioning strategy (practical guidance) +//! +//! - `DbVersion::major` is the primary compatibility boundary: +//! - v0 is a legacy compact-block streamer. +//! - v1 adds richer indices (chain block data + transparent history). +//! +//! - `minor`/`patch` can be used for additive or compatible changes, but only if on-disk encodings +//! remain readable and all invariants remain satisfied. +//! +//! - `DbVersion::capability()` must remain conservative: +//! - only advertise capabilities that are fully correct for that on-disk schema. +//! +//! # Development: adding or changing features safely +//! +//! When adding a new feature/query that requires new persistent data: +//! +//! 1. Add a new capability bit to [`Capability`]. +//! 2. Add a corresponding variant to [`CapabilityRequest`] and map it in: +//! - `as_capability()` +//! - `name()` +//! 3. Add a new extension trait (or extend an existing one) that expresses the required operations. +//! 4. Implement the extension trait for the latest DB version(s). +//! 5. Update `DbVersion::capability()` for the version(s) that support it. +//! 6. Route it through `DbReader` by requesting the new `CapabilityRequest`. +//! +//! When changing persisted metadata formats, bump the `ZainoVersionedSerde::VERSION` for that type +//! and provide a decoding path in `decode_latest()`. use core::fmt; @@ -6,51 +81,87 @@ use crate::{ chain_index::types::{AddrEventBytes, TransactionHash}, error::FinalisedStateError, read_fixed_le, read_u32_le, read_u8, version, write_fixed_le, write_u32_le, write_u8, - AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, FixedEncodedLen, Height, - IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, - StatusType, TransparentCompactTx, TransparentTxList, TxLocation, TxidList, ZainoVersionedSerde, + AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, CompactBlockStream, + FixedEncodedLen, Height, IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, + SaplingCompactTx, SaplingTxList, StatusType, TransparentCompactTx, TransparentTxList, + TxLocation, TxidList, ZainoVersionedSerde, }; use async_trait::async_trait; use bitflags::bitflags; use core2::io::{self, Read, Write}; +use zaino_proto::proto::utils::PoolTypeFilter; // ***** Capability definition structs ***** bitflags! { - /// Represents what an **open** ZainoDB can provide. + /// Capability bitmap describing what an **open** database instance can serve. /// - /// The façade (`ZainoDB`) sets these flags **once** at open-time from the - /// on-disk `SchemaVersion`, then consults them to decide which helper - /// (`writer()`, `block_core()`, …) it may expose. + /// A capability is an *implementation promise*: if a backend advertises a capability bit, then + /// the corresponding trait surface must be fully and correctly implemented for that backend’s + /// on-disk schema. /// - /// Each flag corresponds 1-for-1 with an extension trait. + /// ## How capabilities are used + /// - [`DbVersion::capability`] maps a persisted schema version to a conservative capability set. + /// - [`crate::chain_index::finalised_state::router::Router`] holds a primary and optional shadow + /// backend and uses masks to decide which backend may serve a given feature. + /// - [`crate::chain_index::finalised_state::reader::DbReader`] requests capabilities via + /// [`CapabilityRequest`] (single-feature requests) and therefore obtains a backend that is + /// guaranteed to support the requested operation. + /// + /// ## Extension trait mapping + /// Each bit corresponds 1-for-1 with a trait surface: + /// - `READ_CORE` / `WRITE_CORE` correspond to [`DbRead`] / [`DbWrite`] + /// - all other bits correspond to extension traits (e.g. [`BlockCoreExt`], [`TransparentHistExt`]) #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash, Default)] pub(crate) struct Capability: u32 { /* ------ core database functionality ------ */ - /// Implements `DbRead`. + + /// Backend implements [`DbRead`]. + /// + /// This includes: + /// - tip height (`db_height`) + /// - hash↔height lookups + /// - reading the persisted metadata singleton. const READ_CORE = 0b0000_0001; - /// Implements `DbWrite`. + + /// Backend implements [`DbWrite`]. + /// + /// This includes: + /// - appending tip blocks, + /// - deleting tip blocks, + /// - and updating the metadata singleton. const WRITE_CORE = 0b0000_0010; /* ---------- database extensions ---------- */ - /// Implements `BlockCoreExt`. + + /// Backend implements [`BlockCoreExt`] (header/txid and tx-index lookups). const BLOCK_CORE_EXT = 0b0000_0100; - /// Implements `BlockTransparentExt`. + + /// Backend implements [`BlockTransparentExt`] (transparent per-block/per-tx data). const BLOCK_TRANSPARENT_EXT = 0b0000_1000; - /// Implements `BlockShieldedExt`. + + /// Backend implements [`BlockShieldedExt`] (sapling/orchard per-block/per-tx data). const BLOCK_SHIELDED_EXT = 0b0001_0000; - /// Implements `CompactBlockExt`. + + /// Backend implements [`CompactBlockExt`] (CompactBlock materialization). const COMPACT_BLOCK_EXT = 0b0010_0000; - /// Implements `IndexedBlockExt`. + + /// Backend implements [`IndexedBlockExt`] (full `IndexedBlock` materialization). const CHAIN_BLOCK_EXT = 0b0100_0000; - /// Implements `TransparentHistExt`. + + /// Backend implements [`TransparentHistExt`] (transparent address history indices). const TRANSPARENT_HIST_EXT = 0b1000_0000; } } impl Capability { - /// All features supported by a **fresh v1** database. + /// Capability set supported by a **fresh** database at the latest major schema supported by this build. + /// + /// This value is used as the “expected modern baseline” for new DB instances. It must remain in + /// sync with: + /// - the latest on-disk schema (`DbV1` today, `DbV2` in the future), + /// - and [`DbVersion::capability`] for that schema. pub(crate) const LATEST: Capability = Capability::READ_CORE .union(Capability::WRITE_CORE) .union(Capability::BLOCK_CORE_EXT) @@ -60,28 +171,56 @@ impl Capability { .union(Capability::CHAIN_BLOCK_EXT) .union(Capability::TRANSPARENT_HIST_EXT); - /// Checks for the given capability. + /// Returns `true` if `self` includes **all** bits from `other`. + /// + /// This is primarily used for feature gating and routing assertions. #[inline] pub(crate) const fn has(self, other: Capability) -> bool { self.contains(other) } } -// A single-feature request type (cannot be composite). +/// A *single-feature* capability request used for routing. +/// +/// `CapabilityRequest` values are intentionally non-composite: each variant maps to exactly one +/// [`Capability`] bit. This keeps routing and error reporting unambiguous. +/// +/// The router uses the request to select a backend that advertises the requested capability. +/// If no backend advertises the capability, the call must fail with +/// [`FinalisedStateError::FeatureUnavailable`]. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub(crate) enum CapabilityRequest { + /// Request the [`DbRead`] core surface. ReadCore, + + /// Request the [`DbWrite`] core surface. WriteCore, + + /// Request the [`BlockCoreExt`] extension surface. BlockCoreExt, + + /// Request the [`BlockTransparentExt`] extension surface. BlockTransparentExt, + + /// Request the [`BlockShieldedExt`] extension surface. BlockShieldedExt, + + /// Request the [`CompactBlockExt`] extension surface. CompactBlockExt, + + /// Request the [`IndexedBlockExt`] extension surface. IndexedBlockExt, + + /// Request the [`TransparentHistExt`] extension surface. TransparentHistExt, } impl CapabilityRequest { - /// Map to the corresponding single-bit `Capability`. + /// Maps this request to the corresponding single-bit [`Capability`]. + /// + /// This mapping must remain 1-for-1 with: + /// - the definitions in [`Capability`], and + /// - the human-readable names returned by [`CapabilityRequest::name`]. #[inline] pub(crate) const fn as_capability(self) -> Capability { match self { @@ -96,7 +235,10 @@ impl CapabilityRequest { } } - /// Human-friendly feature name for errors and logs. + /// Returns a stable human-friendly feature name for errors and logs. + /// + /// This value is used in [`FinalisedStateError::FeatureUnavailable`] and must remain stable + /// across refactors to avoid confusing diagnostics. #[inline] pub(crate) const fn name(self) -> &'static str { match self { @@ -112,7 +254,7 @@ impl CapabilityRequest { } } -// Optional convenience conversions. +/// Convenience conversion from a routing request to its single-bit capability. impl From for Capability { #[inline] fn from(req: CapabilityRequest) -> Self { @@ -120,22 +262,43 @@ impl From for Capability { } } -/// Top-level database metadata entry, storing the current schema version. +// ***** Database metadata structs ***** + +/// Persisted database metadata singleton. +/// +/// This record is stored under the fixed key `"metadata"` in the LMDB metadata database and is used to: +/// - identify the schema version currently on disk, +/// - bind the database to an explicit schema contract (`schema_hash`), +/// - and persist migration progress (`migration_status`) for crash-safe resumption. /// -/// Stored under the fixed key `"metadata"` in the LMDB metadata database. +/// ## Encoding +/// `DbMetadata` implements [`ZainoVersionedSerde`]. The encoded body is: +/// - one versioned [`DbVersion`], +/// - a fixed 32-byte schema hash, +/// - one versioned [`MigrationStatus`]. #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash, Default)] #[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] pub(crate) struct DbMetadata { - /// Encodes the version and schema hash. + /// Schema version triple for the on-disk database. pub(crate) version: DbVersion, - /// BLAKE2b-256 hash of the schema definition (includes struct layout, types, etc.) + + /// BLAKE2b-256 hash of the schema definition/contract. + /// + /// This hash is intended to detect accidental schema drift (layout/type changes) across builds. + /// It is not a security boundary; it is a correctness and operator-safety signal. pub(crate) schema_hash: [u8; 32], - /// Migration status of the database, `Empty` outside of migrations. + + /// Persisted migration state, used to resume safely after shutdown/crash. + /// + /// Outside of migrations this should be [`MigrationStatus::Empty`]. pub(crate) migration_status: MigrationStatus, } impl DbMetadata { - /// Creates a new DbMetadata. + /// Constructs a new metadata record. + /// + /// Callers should ensure `schema_hash` matches the schema contract for `version`, and that + /// `migration_status` is set conservatively (typically `Empty` unless actively migrating). pub(crate) fn new( version: DbVersion, schema_hash: [u8; 32], @@ -148,22 +311,28 @@ impl DbMetadata { } } - /// Returns the version data. + /// Returns the persisted schema version. pub(crate) fn version(&self) -> DbVersion { self.version } - /// Returns the version schema hash. + /// Returns the schema contract hash. pub(crate) fn schema(&self) -> [u8; 32] { self.schema_hash } - /// Returns the migration status of the database. + /// Returns the persisted migration status. pub(crate) fn migration_status(&self) -> MigrationStatus { self.migration_status } } +/// Versioned on-disk encoding for the metadata singleton. +/// +/// Body layout (after the `ZainoVersionedSerde` tag byte): +/// 1. `DbVersion` (versioned, includes its own tag) +/// 2. `[u8; 32]` schema hash +/// 3. `MigrationStatus` (versioned, includes its own tag) impl ZainoVersionedSerde for DbMetadata { const VERSION: u8 = version::V1; @@ -189,12 +358,17 @@ impl ZainoVersionedSerde for DbMetadata { } } -// DbMetadata: its body is one *versioned* DbVersion (12 + 1 tag) + 32-byte schema hash -// + one *versioned* MigrationStatus (1 + 1 tag) = 47 bytes +/// `DbMetadata` has a fixed encoded body length. +/// +/// Body length = `DbVersion::VERSIONED_LEN` (12 + 1) + 32-byte schema hash +/// + `MigrationStatus::VERSIONED_LEN` (1 + 1) = 47 bytes. impl FixedEncodedLen for DbMetadata { const ENCODED_LEN: usize = DbVersion::VERSIONED_LEN + 32 + MigrationStatus::VERSIONED_LEN; } +/// Human-readable summary for logs. +/// +/// The schema hash is abbreviated to the first 4 bytes for readability. impl core::fmt::Display for DbMetadata { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!( @@ -213,9 +387,20 @@ impl core::fmt::Display for DbMetadata { } } -/// Database schema version information. +/// Database schema version triple. +/// +/// The version is interpreted as `{major}.{minor}.{patch}` and is used to: +/// - select a database backend implementation, +/// - determine supported capabilities for routing, +/// - and enforce safe upgrades via migrations. +/// +/// ## Compatibility model +/// - `major` is the primary compatibility boundary (schema family). +/// - `minor` and `patch` may be used for compatible changes, but only if all persisted record +/// encodings remain readable and correctness invariants are preserved. /// -/// This is used for schema migration safety and compatibility checks. +/// The authoritative capability mapping is provided by [`DbVersion::capability`], and must remain +/// conservative: only advertise features that are correct for the given on-disk schema. #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash, Default)] #[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] pub(crate) struct DbVersion { @@ -228,7 +413,7 @@ pub(crate) struct DbVersion { } impl DbVersion { - /// creates a new DbVersion. + /// Construct a new DbVersion. pub(crate) fn new(major: u32, minor: u32, patch: u32) -> Self { Self { major, @@ -252,6 +437,13 @@ impl DbVersion { self.patch } + /// Returns the conservative capability set for this schema version. + /// + /// Routing relies on this mapping for safety: if a capability is not included here, callers + /// must not assume the corresponding trait surface is available. + /// + /// If a schema version is unknown to this build, this returns [`Capability::empty`], ensuring + /// the router will reject feature requests rather than serving incorrect data. pub(crate) fn capability(&self) -> Capability { match (self.major, self.minor) { // V0: legacy compact block streamer. @@ -260,7 +452,7 @@ impl DbVersion { } // V1: Adds chainblockv1 and transparent transaction history data. - (1, 0) => { + (1, 0) | (1, 1) => { Capability::READ_CORE | Capability::WRITE_CORE | Capability::BLOCK_CORE_EXT @@ -277,6 +469,10 @@ impl DbVersion { } } +/// Versioned on-disk encoding for database versions. +/// +/// Body layout (after the tag byte): three little-endian `u32` values: +/// `major`, `minor`, `patch`. impl ZainoVersionedSerde for DbVersion { const VERSION: u8 = version::V1; @@ -302,36 +498,53 @@ impl ZainoVersionedSerde for DbVersion { } } -/* DbVersion: body = 3*(4-byte u32) - 12 bytes */ +// DbVersion: body = 3*(4-byte u32) - 12 bytes impl FixedEncodedLen for DbVersion { const ENCODED_LEN: usize = 4 + 4 + 4; } +/// Formats as `{major}.{minor}.{patch}` for logs and diagnostics. impl core::fmt::Display for DbVersion { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "{}.{}.{}", self.major, self.minor, self.patch) } } -/// Holds migration data. +/// Persisted migration progress marker. /// -/// This is used when the database is shutdown mid-migration to ensure migration correctness. +/// This value exists to make migrations crash-resumable. A migration may: +/// - build a shadow database incrementally, +/// - optionally perform partial rebuild phases to limit disk amplification, +/// - and finally promote the shadow to primary. /// -/// NOTE: Some migrations run a partial database rebuild before the final build process. -/// This is done to minimise disk requirements during migrations, -/// enabling the deletion of the old database before the the database is rebuilt in full. +/// Database implementations and the migration manager must treat this value conservatively: +/// if the process is interrupted, the next startup should be able to determine the correct +/// resumption behavior from this status and the on-disk state. #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Hash)] #[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] #[derive(Default)] pub(crate) enum MigrationStatus { + /// No migration is in progress. #[default] Empty, + + /// A partial build phase is currently in progress. + /// + /// Some migrations split work into phases to limit disk usage (for example, deleting the old + /// database before rebuilding the new one in full). PartialBuidInProgress, + + /// The partial build phase completed successfully. PartialBuildComplete, + + /// The final build phase is currently in progress. FinalBuildInProgress, + + /// Migration work is complete and the database is ready for promotion/steady-state operation. Complete, } +/// Human-readable migration status for logs and diagnostics. impl fmt::Display for MigrationStatus { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let status_str = match self { @@ -345,6 +558,10 @@ impl fmt::Display for MigrationStatus { } } +/// Versioned on-disk encoding for migration status. +/// +/// Body layout (after the tag byte): one `u8` discriminator. +/// Unknown tags must fail decoding. impl ZainoVersionedSerde for MigrationStatus { const VERSION: u8 = version::V1; @@ -378,67 +595,115 @@ impl ZainoVersionedSerde for MigrationStatus { } } +/// `MigrationStatus` has a fixed 1-byte encoded body (discriminator). impl FixedEncodedLen for MigrationStatus { const ENCODED_LEN: usize = 1; } // ***** Core Database functionality ***** -/// Read-only operations that *every* ZainoDB version must support. +/// Core read-only operations that *every* database schema version must support. +/// +/// These operations form the minimum required surface for: +/// - determining the chain tip stored on disk, +/// - mapping hashes to heights and vice versa, +/// - and reading the persisted schema metadata. +/// +/// All methods must be consistent with the database’s *finalised* chain view. #[async_trait] pub trait DbRead: Send + Sync { - /// Highest block height stored (or `None` if DB empty). + /// Returns the highest block height stored, or `None` if the database is empty. + /// + /// Implementations must treat the stored height as the authoritative tip for all other core + /// lookups. async fn db_height(&self) -> Result, FinalisedStateError>; - /// Lookup height of a block by its hash. + /// Returns the height for `hash` if present. + /// + /// Returns: + /// - `Ok(Some(height))` if indexed, + /// - `Ok(None)` if not present (not an error). async fn get_block_height( &self, hash: BlockHash, ) -> Result, FinalisedStateError>; - /// Lookup hash of a block by its height. + /// Returns the hash for `height` if present. + /// + /// Returns: + /// - `Ok(Some(hash))` if indexed, + /// - `Ok(None)` if not present (not an error). async fn get_block_hash( &self, height: Height, ) -> Result, FinalisedStateError>; - /// Return the persisted `DbMetadata` singleton. + /// Returns the persisted metadata singleton. + /// + /// This must reflect the schema actually used by the backend instance. async fn get_metadata(&self) -> Result; } -/// Write operations that *every* ZainoDB version must support. +/// Core write operations that *every* database schema version must support. +/// +/// The finalised database is updated using *stack semantics*: +/// - blocks are appended at the tip (`write_block`), +/// - and removed only from the tip (`delete_block_at_height` / `delete_block`). +/// +/// Implementations must keep all secondary indices internally consistent with these operations. #[async_trait] pub trait DbWrite: Send + Sync { - /// Persist a fully-validated block to the database. + /// Appends a fully-validated block to the database. + /// + /// Invariant: `block` must be the next height after the current tip (no gaps, no rewrites). async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError>; - /// Deletes a block identified height from every finalised table. + /// Deletes the tip block identified by `height` from every finalised table. + /// + /// Invariant: `height` must be the current database tip height. async fn delete_block_at_height(&self, height: Height) -> Result<(), FinalisedStateError>; - /// Wipe the given block data from every finalised table. + /// Deletes the provided tip block from every finalised table. /// - /// Takes a IndexedBlock as input and ensures all data from this block is wiped from the database. + /// This is the “full-information” deletion path: it takes an [`IndexedBlock`] so the backend + /// can deterministically remove all derived index entries even if reconstructing them from + /// height alone is not possible. /// - /// Used as a backup when delete_block_at_height fails. + /// Invariant: `block` must be the current database tip block. async fn delete_block(&self, block: &IndexedBlock) -> Result<(), FinalisedStateError>; - /// Update the metadata store with the given DbMetadata + /// Replaces the persisted metadata singleton with `metadata`. + /// + /// Implementations must ensure this update is atomic with respect to readers (within the + /// backend’s concurrency model). async fn update_metadata(&self, metadata: DbMetadata) -> Result<(), FinalisedStateError>; } -/// Core database functionality that *every* ZainoDB version must support. +/// Core runtime surface implemented by every backend instance. +/// +/// This trait binds together: +/// - the core read/write operations, and +/// - lifecycle and status reporting for background tasks. +/// +/// In practice, [`crate::chain_index::finalised_state::router::Router`] implements this by +/// delegating to the currently routed core backend(s). #[async_trait] pub trait DbCore: DbRead + DbWrite + Send + Sync { /// Returns the current runtime status (`Starting`, `Syncing`, `Ready`, …). fn status(&self) -> StatusType; - /// Stops background tasks, syncs, etc. + /// Initiates a graceful shutdown of background tasks and closes database resources. async fn shutdown(&self) -> Result<(), FinalisedStateError>; } // ***** Database Extension traits ***** -/// Core block data extension. +/// Core block indexing extension. +/// +/// This extension covers header and txid range fetches plus transaction indexing by [`TxLocation`]. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise [`Capability::BLOCK_CORE_EXT`]. #[async_trait] pub trait BlockCoreExt: Send + Sync { /// Return block header data by height. @@ -447,7 +712,9 @@ pub trait BlockCoreExt: Send + Sync { height: Height, ) -> Result; - /// Return block headers for the given height range. + /// Returns block headers for the inclusive range `[start, end]`. + /// + /// Callers should ensure `start <= end`. async fn get_block_range_headers( &self, start: Height, @@ -458,41 +725,59 @@ pub trait BlockCoreExt: Send + Sync { async fn get_block_txids(&self, height: Height) -> Result; /// Return block txids for the given height range. + /// + /// Callers should ensure `start <= end`. async fn get_block_range_txids( &self, start: Height, end: Height, ) -> Result, FinalisedStateError>; - /// Fetch the txid bytes for a given TxLocation. + /// Returns the transaction hash for the given [`TxLocation`]. + /// + /// `TxLocation` is the internal transaction index key used by the database. async fn get_txid( &self, tx_location: TxLocation, ) -> Result; - /// Fetch the TxLocation for the given txid, transaction data is indexed by TxLocation internally. + /// Returns the [`TxLocation`] for `txid` if the transaction is indexed. + /// + /// Returns: + /// - `Ok(Some(location))` if indexed, + /// - `Ok(None)` if not present (not an error). + /// + /// NOTE: transaction data is indexed by TxLocation internally. async fn get_tx_location( &self, txid: &TransactionHash, ) -> Result, FinalisedStateError>; } -/// Transparent block data extension. +/// Transparent transaction indexing extension. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::BLOCK_TRANSPARENT_EXT`]. #[async_trait] pub trait BlockTransparentExt: Send + Sync { - /// Fetch the serialized TransparentCompactTx for the given TxLocation, if present. + /// Returns the serialized [`TransparentCompactTx`] for `tx_location`, if present. + /// + /// Returns: + /// - `Ok(Some(tx))` if present, + /// - `Ok(None)` if not present (not an error). async fn get_transparent( &self, tx_location: TxLocation, ) -> Result, FinalisedStateError>; - /// Fetch block transparent transaction data by height. + /// Fetch block transparent transaction data for given block height. async fn get_block_transparent( &self, height: Height, ) -> Result; - /// Fetches block transparent tx data for the given height range. + /// Returns transparent transaction tx data for the inclusive block height range `[start, end]`. async fn get_block_range_transparent( &self, start: Height, @@ -500,7 +785,11 @@ pub trait BlockTransparentExt: Send + Sync { ) -> Result, FinalisedStateError>; } -/// Transparent block data extension. +/// Shielded transaction indexing extension (Sapling + Orchard + commitment tree data). +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::BLOCK_SHIELDED_EXT`]. #[async_trait] pub trait BlockShieldedExt: Send + Sync { /// Fetch the serialized SaplingCompactTx for the given TxLocation, if present. @@ -513,7 +802,7 @@ pub trait BlockShieldedExt: Send + Sync { async fn get_block_sapling(&self, height: Height) -> Result; - /// Fetches block sapling tx data for the given height range. + /// Fetches block sapling tx data for the given (inclusive) height range. async fn get_block_range_sapling( &self, start: Height, @@ -530,7 +819,7 @@ pub trait BlockShieldedExt: Send + Sync { async fn get_block_orchard(&self, height: Height) -> Result; - /// Fetches block orchard tx data for the given height range. + /// Fetches block orchard tx data for the given (inclusive) height range. async fn get_block_range_orchard( &self, start: Height, @@ -543,7 +832,7 @@ pub trait BlockShieldedExt: Send + Sync { height: Height, ) -> Result; - /// Fetches block commitment tree data for the given height range. + /// Fetches block commitment tree data for the given (inclusive) height range. async fn get_block_range_commitment_tree_data( &self, start: Height, @@ -551,31 +840,60 @@ pub trait BlockShieldedExt: Send + Sync { ) -> Result, FinalisedStateError>; } -/// CompactBlock extension. +/// CompactBlock materialization extension. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::COMPACT_BLOCK_EXT`]. #[async_trait] pub trait CompactBlockExt: Send + Sync { /// Returns the CompactBlock for the given Height. - /// - /// TODO: Add separate range fetch method! async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result; + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result; } -/// IndexedBlock v1 extension. +/// `IndexedBlock` materialization extension. +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::CHAIN_BLOCK_EXT`]. #[async_trait] pub trait IndexedBlockExt: Send + Sync { - /// Returns the IndexedBlock for the given Height. + /// Returns the [`IndexedBlock`] for `height`, if present. /// - /// TODO: Add separate range fetch method! + /// Returns: + /// - `Ok(Some(block))` if present, + /// - `Ok(None)` if not present (not an error). + /// + /// TODO: Add separate range fetch method as this method is slow for fetching large ranges! async fn get_chain_block( &self, height: Height, ) -> Result, FinalisedStateError>; } -/// IndexedBlock v1 extension. +/// Transparent address history indexing extension. +/// +/// This extension provides address-scoped queries backed by persisted indices built from the +/// transparent transaction graph (outputs, spends, and derived address events). +/// +/// Capability gating: +/// - Backends must only be routed for this surface if they advertise +/// [`Capability::TRANSPARENT_HIST_EXT`]. +/// +/// Range semantics: +/// - Methods that accept `start_height` and `end_height` interpret the range as inclusive: +/// `[start_height, end_height]`. #[async_trait] pub trait TransparentHistExt: Send + Sync { /// Fetch all address history records for a given transparent address. diff --git a/zaino-state/src/chain_index/finalised_state/db.rs b/zaino-state/src/chain_index/finalised_state/db.rs index 4f81b7419..d07422e84 100644 --- a/zaino-state/src/chain_index/finalised_state/db.rs +++ b/zaino-state/src/chain_index/finalised_state/db.rs @@ -1,10 +1,64 @@ -//! Holds Database implementations by *major* version. +//! Versioned database backends (DbBackend) and major-version dispatch +//! +//! This file defines the major-version split for the on-disk finalised database and provides +//! [`DbBackend`], a version-erased enum used throughout the finalised-state subsystem. +//! +//! Concrete database implementations live in: +//! - [`v0`]: legacy schema (compact-block streamer) +//! - [`v1`]: current schema (expanded indices and query surface) +//! +//! `DbBackend` delegates the core DB traits (`DbCore`, `DbRead`, `DbWrite`) and all extension traits +//! to the appropriate concrete implementation. +//! +//! # Capability model integration +//! +//! Each `DbBackend` instance declares its supported [`Capability`] set via `DbBackend::capability()`. +//! This must remain consistent with: +//! - [`capability::DbVersion::capability()`] (schema version → capability mapping), and +//! - the extension trait impls in this file (unsupported methods must return `FeatureUnavailable`). +//! +//! In particular: +//! - v0 supports READ/WRITE core + `CompactBlockExt`. +//! - v1 supports the full current capability set (`Capability::LATEST`), including: +//! - block header/txid/location indexing, +//! - transparent + shielded compact tx access, +//! - indexed block retrieval, +//! - transparent address history indices. +//! +//! # On-disk directory layout (v1+) +//! +//! [`VERSION_DIRS`] enumerates the version subdirectory names used for versioned layouts under the +//! per-network directory (`mainnet/`, `testnet/`, `regtest/`). +//! +//! **Important:** new versions must be appended to `VERSION_DIRS` in order, with no gaps, because +//! discovery code assumes index+1 corresponds to the version number. +//! +//! # Adding a new major version (v2) — checklist +//! +//! 1. Create `db::v2` and implement `DbV2::spawn(cfg)`. +//! 2. Add `V2(DbV2)` variant to [`DbBackend`]. +//! 3. Add `spawn_v2` constructor. +//! 4. Append `"v2"` to [`VERSION_DIRS`]. +//! 5. Extend all trait delegation `match` arms in this file. +//! 6. Update `DbBackend::capability()` and `DbVersion::capability()` for the new version. +//! 7. Add a migration step in `migrations.rs` and register it with `MigrationManager`. +//! +//! # Development: adding new indices/queries +//! +//! Prefer implementing new indices in the latest DB version first (e.g. `v1`) and exposing them via: +//! - a capability bit + extension trait in `capability.rs`, +//! - routing via `DbReader` and `Router`, +//! - and a migration/rebuild plan if the index requires historical backfill. +//! +//! Keep unsupported methods explicit: if a DB version does not provide a feature, return +//! `FinalisedStateError::FeatureUnavailable(...)` rather than silently degrading semantics. pub(crate) mod v0; pub(crate) mod v1; use v0::DbV0; use v1::DbV1; +use zaino_proto::proto::utils::PoolTypeFilter; use crate::{ chain_index::{ @@ -16,9 +70,9 @@ use crate::{ }, config::BlockCacheConfig, error::FinalisedStateError, - AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, Height, IndexedBlock, - OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, StatusType, - TransparentCompactTx, TransparentTxList, TxLocation, TxidList, + AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, CompactBlockStream, Height, + IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, + StatusType, TransparentCompactTx, TransparentTxList, TxLocation, TxidList, }; use async_trait::async_trait; @@ -27,30 +81,65 @@ use tokio::time::{interval, MissedTickBehavior}; use super::capability::Capability; -/// New versions must be also be appended to this list and there must be no missing versions for correct functionality. +/// Version subdirectory names for versioned on-disk layouts. +/// +/// This list defines the supported major-version directory names under a per-network directory. +/// For example, a v1 database is stored under `/v1/`. +/// +/// Invariants: +/// - New versions must be appended to this list in order. +/// - There must be no missing versions between entries. +/// - Discovery code assumes `VERSION_DIRS[index]` corresponds to major version `index + 1`. pub(super) const VERSION_DIRS: [&str; 1] = ["v1"]; -/// All concrete database implementations. #[derive(Debug)] +/// All concrete database implementations. +/// Version-erased database backend. +/// +/// This enum is the central dispatch point for the finalised-state database: +/// - It is constructed by spawning a concrete backend (for example, v0 or v1). +/// - It implements the core database traits (`DbCore`, `DbRead`, `DbWrite`). +/// - It implements capability extension traits by delegating to the concrete implementation, or by +/// returning [`FinalisedStateError::FeatureUnavailable`] when unsupported. +/// +/// Capability reporting is provided by [`DbBackend::capability`] and must match the methods that +/// successfully dispatch in the extension trait implementations below. pub(crate) enum DbBackend { + /// Legacy schema backend. V0(DbV0), + + /// Current schema backend. V1(DbV1), } // ***** Core database functionality ***** impl DbBackend { - /// Spawn a v0 database. + /// Spawn a v0 database backend. + /// + /// This constructs and initializes the legacy schema implementation and returns it wrapped in + /// [`DbBackend::V0`]. pub(crate) async fn spawn_v0(cfg: &BlockCacheConfig) -> Result { Ok(Self::V0(DbV0::spawn(cfg).await?)) } - /// Spawn a v1 database. + /// Spawn a v1 database backend. + /// + /// This constructs and initializes the current schema implementation and returns it wrapped in + /// [`DbBackend::V1`]. pub(crate) async fn spawn_v1(cfg: &BlockCacheConfig) -> Result { Ok(Self::V1(DbV1::spawn(cfg).await?)) } - /// Waits until the ZainoDB returns a Ready status. + /// Wait until the database backend reports [`StatusType::Ready`]. + /// + /// This polls `DbCore::status()` on a fixed interval. It is intended for startup sequencing in + /// components that require the database to be fully initialized before accepting requests. + /// + /// Notes: + /// - This method does not return an error. If the database never becomes ready, it will loop. + /// - The polling interval is intentionally small and uses `MissedTickBehavior::Delay` to avoid + /// burst catch-up behavior under load. pub(crate) async fn wait_until_ready(&self) { let mut ticker = interval(Duration::from_millis(100)); ticker.set_missed_tick_behavior(MissedTickBehavior::Delay); @@ -63,7 +152,10 @@ impl DbBackend { } } - /// Returns the capabilities supported by this database instance. + /// Return the capabilities supported by this database instance. + /// + /// This is the authoritative runtime capability set for this backend and must remain consistent + /// with the dispatch behavior in the extension trait implementations below. pub(crate) fn capability(&self) -> Capability { match self { Self::V0(_) => { @@ -75,12 +167,14 @@ impl DbBackend { } impl From for DbBackend { + /// Wrap an already-constructed v0 database backend. fn from(value: DbV0) -> Self { Self::V0(value) } } impl From for DbBackend { + /// Wrap an already-constructed v1 database backend. fn from(value: DbV1) -> Self { Self::V1(value) } @@ -88,14 +182,19 @@ impl From for DbBackend { #[async_trait] impl DbCore for DbBackend { + /// Return the current status of the backend. + /// + /// This is a thin delegation wrapper over the concrete implementation. fn status(&self) -> StatusType { match self { - // TODO private Self::V0(db) => db.status(), Self::V1(db) => db.status(), } } + /// Shut down the backend and release associated resources. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn shutdown(&self) -> Result<(), FinalisedStateError> { match self { Self::V0(db) => db.shutdown().await, @@ -106,6 +205,9 @@ impl DbCore for DbBackend { #[async_trait] impl DbRead for DbBackend { + /// Return the highest stored height in the database, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn db_height(&self) -> Result, FinalisedStateError> { match self { Self::V0(db) => db.db_height().await, @@ -113,6 +215,9 @@ impl DbRead for DbBackend { } } + /// Resolve a block hash to its stored height, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn get_block_height( &self, hash: BlockHash, @@ -123,6 +228,9 @@ impl DbRead for DbBackend { } } + /// Resolve a block height to its stored block hash, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn get_block_hash( &self, height: Height, @@ -133,6 +241,10 @@ impl DbRead for DbBackend { } } + /// Read the database metadata record. + /// + /// This includes versioning and migration status and is used by the migration manager and + /// compatibility checks. async fn get_metadata(&self) -> Result { match self { Self::V0(db) => db.get_metadata().await, @@ -143,6 +255,9 @@ impl DbRead for DbBackend { #[async_trait] impl DbWrite for DbBackend { + /// Write a fully-indexed block into the database. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { match self { Self::V0(db) => db.write_block(block).await, @@ -150,6 +265,9 @@ impl DbWrite for DbBackend { } } + /// Delete the block at a given height, if present. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn delete_block_at_height(&self, height: Height) -> Result<(), FinalisedStateError> { match self { Self::V0(db) => db.delete_block_at_height(height).await, @@ -157,6 +275,9 @@ impl DbWrite for DbBackend { } } + /// Delete a specific indexed block from the database. + /// + /// This is a thin delegation wrapper over the concrete implementation. async fn delete_block(&self, block: &IndexedBlock) -> Result<(), FinalisedStateError> { match self { Self::V0(db) => db.delete_block(block).await, @@ -164,6 +285,9 @@ impl DbWrite for DbBackend { } } + /// Update the database metadata record. + /// + /// This is used by migrations and schema management logic. async fn update_metadata(&self, metadata: DbMetadata) -> Result<(), FinalisedStateError> { match self { Self::V0(db) => db.update_metadata(metadata).await, @@ -173,6 +297,12 @@ impl DbWrite for DbBackend { } // ***** Database capability extension traits ***** +// +// Each extension trait corresponds to a distinct capability group. The dispatch rules are: +// - If the backend supports the capability, delegate to the concrete implementation. +// - If unsupported, return `FinalisedStateError::FeatureUnavailable("")`. +// +// These names must remain consistent with the capability wiring in `capability.rs`. #[async_trait] impl BlockCoreExt for DbBackend { @@ -355,11 +485,32 @@ impl CompactBlockExt for DbBackend { async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result { #[allow(unreachable_patterns)] match self { - Self::V0(db) => db.get_compact_block(height).await, - Self::V1(db) => db.get_compact_block(height).await, + Self::V0(db) => db.get_compact_block(height, pool_types).await, + Self::V1(db) => db.get_compact_block(height, pool_types).await, + _ => Err(FinalisedStateError::FeatureUnavailable("compact_block")), + } + } + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + #[allow(unreachable_patterns)] + match self { + Self::V0(db) => { + db.get_compact_block_stream(start_height, end_height, pool_types) + .await + } + Self::V1(db) => { + db.get_compact_block_stream(start_height, end_height, pool_types) + .await + } _ => Err(FinalisedStateError::FeatureUnavailable("compact_block")), } } diff --git a/zaino-state/src/chain_index/finalised_state/db/v0.rs b/zaino-state/src/chain_index/finalised_state/db/v0.rs index 4552e32c2..0c7bc1436 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v0.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v0.rs @@ -2,8 +2,43 @@ //! //! WARNING: This is a legacy development database and should not be used in production environments. //! -//! NOTE: This database version was implemented before zaino's `ZainoVersionedSerde` was defined, -//! for this reason ZainoDB-V0 does not use the standard serialisation schema used elswhere in Zaino. +//! This module implements the original “v0” finalised-state database backend. It exists primarily +//! for backward compatibility and for development/testing scenarios where the historical v0 +//! on-disk layout must be opened. +//! +//! ## Important constraints +//! +//! - **Not schema-versioned in the modern sense:** this database version predates Zaino’s +//! `ZainoVersionedSerde` wire format, therefore it does not store version-tagged records and does +//! not participate in fine-grained schema evolution. +//! - **Legacy encoding strategy:** +//! - keys and values are stored as JSON via `serde_json` for most types, +//! - `CompactBlock` values are encoded as raw Prost bytes via a custom `Serialize`/`Deserialize` +//! wrapper (`DbCompactBlock`) so they can still flow through `serde_json`. +//! - **Limited feature surface:** v0 only supports the core height/hash mapping and compact block +//! retrieval. It does not provide the richer indices introduced in v1 (header data, transaction +//! locations, transparent history indexing, etc.). +//! +//! ## On-disk layout +//! +//! The v0 database uses the legacy network directory names: +//! - mainnet: `live/` +//! - testnet: `test/` +//! - regtest: `local/` +//! +//! Each network directory contains an LMDB environment with (at minimum) these tables: +//! - `heights_to_hashes`: `` +//! - `hashes_to_blocks`: `` (where the compact block is stored +//! as raw Prost bytes wrapped by JSON) +//! +//! ## Runtime model +//! +//! `DbV0` spawns a lightweight background maintenance task that: +//! - publishes `StatusType::Ready` once spawned, +//! - periodically calls `clean_trailing()` to reclaim stale LMDB reader slots. +//! +//! This backend uses `tokio::task::block_in_place` / `tokio::task::spawn_blocking` around LMDB +//! operations to avoid blocking the async runtime. use crate::{ chain_index::{ @@ -14,11 +49,12 @@ use crate::{ }, config::BlockCacheConfig, error::FinalisedStateError, + local_cache::compact_block_with_pool_types, status::{AtomicStatus, StatusType}, - Height, IndexedBlock, + CompactBlockStream, Height, IndexedBlock, }; -use zaino_proto::proto::compact_formats::CompactBlock; +use zaino_proto::proto::{compact_formats::CompactBlock, service::PoolType, utils::PoolTypeFilter}; use zebra_chain::{ block::{Hash as ZebraHash, Height as ZebraHeight}, @@ -35,12 +71,21 @@ use tracing::{info, warn}; // ───────────────────────── ZainoDb v0 Capabilities ───────────────────────── +/// `DbRead` implementation for the legacy v0 backend. +/// +/// Note: v0 exposes only a minimal read surface. Missing data is mapped to `Ok(None)` where the +/// core trait expects optional results. #[async_trait] impl DbRead for DbV0 { + /// Returns the database tip height (`None` if empty). async fn db_height(&self) -> Result, FinalisedStateError> { self.tip_height().await } + /// Returns the block height for a given block hash, if known. + /// + /// For v0, absence is represented as either `DataUnavailable` or `FeatureUnavailable` from the + /// legacy helper; both are mapped to `Ok(None)` here. async fn get_block_height( &self, hash: crate::BlockHash, @@ -55,6 +100,10 @@ impl DbRead for DbV0 { } } + /// Returns the block hash for a given block height, if known. + /// + /// For v0, absence is represented as either `DataUnavailable` or `FeatureUnavailable` from the + /// legacy helper; both are mapped to `Ok(None)` here. async fn get_block_hash( &self, height: crate::Height, @@ -69,17 +118,27 @@ impl DbRead for DbV0 { } } + /// Returns synthetic metadata for v0. + /// + /// v0 does not persist `DbMetadata` on disk; this returns a constructed value describing + /// version `0.0.0` and a default schema hash. async fn get_metadata(&self) -> Result { self.get_metadata().await } } +/// `DbWrite` implementation for the legacy v0 backend. +/// +/// v0 supports append-only writes and pop-only deletes at the tip, enforced by explicit checks in +/// the legacy methods. #[async_trait] impl DbWrite for DbV0 { + /// Writes a fully-validated finalised block, enforcing strict height monotonicity. async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { self.write_block(block).await } + /// Deletes a block at the given height, enforcing that it is the current tip. async fn delete_block_at_height( &self, height: crate::Height, @@ -87,22 +146,37 @@ impl DbWrite for DbV0 { self.delete_block_at_height(height).await } + /// Deletes a block by explicit content. + /// + /// This is a fallback path used when tip-based deletion cannot safely determine the full set of + /// keys to delete (for example, when corruption is suspected). async fn delete_block(&self, block: &IndexedBlock) -> Result<(), FinalisedStateError> { self.delete_block(block).await } - /// NOTE: V0 does not hold metadata! + /// Updates the metadata singleton. + /// + /// NOTE: v0 does not persist metadata on disk; this is a no-op to satisfy the trait. async fn update_metadata(&self, _metadata: DbMetadata) -> Result<(), FinalisedStateError> { Ok(()) } } +/// `DbCore` implementation for the legacy v0 backend. +/// +/// The core lifecycle API is implemented in terms of a status flag and a lightweight background +/// maintenance task. #[async_trait] impl DbCore for DbV0 { + /// Returns the current runtime status published by this backend. fn status(&self) -> StatusType { self.status.load() } + /// Requests shutdown of background tasks and syncs the LMDB environment before returning. + /// + /// This method is best-effort: background tasks are aborted after a timeout and the LMDB + /// environment is fsync’d before exit. async fn shutdown(&self) -> Result<(), FinalisedStateError> { self.status.store(StatusType::Closing); @@ -120,44 +194,82 @@ impl DbCore for DbV0 { } } +/// [`CompactBlockExt`] capability implementation for [`DbV0`]. +/// +/// Exposes `zcash_client_backend`-compatible compact blocks derived from stored header + +/// transaction data. #[async_trait] impl CompactBlockExt for DbV0 { async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result { - self.get_compact_block(height).await + self.get_compact_block(height, pool_types).await + } + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.get_compact_block_stream(start_height, end_height, pool_types) + .await } } -/// Finalised part of the chain, held in an LMDB database. +/// Finalised part of the chain, held in an LMDB database (legacy v0). +/// +/// `DbV0` maintains two simple indices: +/// - height → hash +/// - hash → compact block +/// +/// It does **not** implement the richer v1 indices (header data, tx location maps, address history, +/// commitment tree tables, etc.). #[derive(Debug)] pub struct DbV0 { - /// LMDB Database Environmant. + /// LMDB database environment handle. + /// + /// The environment is shared between tasks using `Arc` and is configured for high read + /// concurrency (`max_readers`) and reduced I/O overhead (`NO_READAHEAD`). env: Arc, - /// LMDB Databas containing ``. + /// LMDB database containing ``. + /// + /// Heights are stored as 4-byte big-endian keys for correct lexicographic ordering. heights_to_hashes: Database, - /// LMDB Databas containing ``. + /// LMDB database containing ``. + /// + /// The compact block is stored via the `DbCompactBlock` wrapper: raw Prost bytes embedded in a + /// JSON payload. hashes_to_blocks: Database, - /// Database handler task handle. + /// Background maintenance task handle. + /// + /// This task periodically performs housekeeping (currently reader-slot cleanup). db_handler: Option>, - /// Non-finalised state status. + /// Backend lifecycle status. status: AtomicStatus, - /// BlockCache config data. + + /// Configuration snapshot used for path/network selection and sizing parameters. config: BlockCacheConfig, } impl DbV0 { - /// Spawns a new [`DbV0`] and syncs the FinalisedState to the servers finalised state. + /// Spawns a new [`DbV0`] backend. /// - /// Uses ReadStateService to fetch chain data if given else uses JsonRPC client. + /// This: + /// - derives the v0 network directory name (`live` / `test` / `local`), + /// - opens or creates the LMDB environment and required databases, + /// - configures LMDB reader concurrency based on CPU count, + /// - spawns a background maintenance task, + /// - and returns the opened backend. /// - /// Inputs: - /// - config: ChainIndexConfig. + /// # Errors + /// Returns `FinalisedStateError` on any filesystem, LMDB, or task-spawn failure. pub(crate) async fn spawn(config: &BlockCacheConfig) -> Result { info!("Launching ZainoDB"); @@ -214,7 +326,13 @@ impl DbV0 { Ok(zaino_db) } - /// Try graceful shutdown, fall back to abort after a timeout. + /// Attempts a graceful shutdown and falls back to aborting the maintenance task after a timeout. + /// + /// This is a legacy lifecycle method retained for v0 compatibility. Newer backends should + /// implement shutdown via the `DbCore` trait. + /// + /// # Errors + /// Returns `FinalisedStateError` if LMDB cleanup or sync fails. pub(crate) async fn close(&mut self) -> Result<(), FinalisedStateError> { self.status.store(StatusType::Closing); @@ -244,12 +362,15 @@ impl DbV0 { Ok(()) } - /// Returns the status of ZainoDB. + /// Returns the current backend status. pub(crate) fn status(&self) -> StatusType { self.status.load() } - /// Awaits until the DB returns a Ready status. + /// Blocks until the backend reports `StatusType::Ready`. + /// + /// This is primarily used during startup sequencing so callers do not issue reads before the + /// backend is ready to serve queries. pub(crate) async fn wait_until_ready(&self) { let mut ticker = interval(Duration::from_millis(100)); ticker.set_missed_tick_behavior(MissedTickBehavior::Delay); @@ -264,13 +385,15 @@ impl DbV0 { // *** Internal Control Methods *** - /// Spawns the background validator / maintenance task. + /// Spawns the background maintenance task. /// - /// * **Startup** – runs a full‐DB validation pass (`initial_root_scan` → - /// `initial_block_scan`). - /// * **Steady-state** – every 5 s tries to validate the next block that - /// appeared after the current `validated_tip`. - /// Every 60 s it also calls `clean_trailing()` to purge stale reader slots. + /// The v0 maintenance task is intentionally minimal: + /// - publishes `StatusType::Ready` after spawning, + /// - periodically calls `clean_trailing()` to purge stale LMDB reader slots, + /// - exits when status transitions to `StatusType::Closing`. + /// + /// Note: historical comments refer to validation passes; the current implementation only + /// performs maintenance and does not validate chain contents. async fn spawn_handler(&mut self) -> Result<(), FinalisedStateError> { // Clone everything the task needs so we can move it into the async block. let zaino_db = Self { @@ -306,6 +429,10 @@ impl DbV0 { } /// Helper method to wait for the next loop iteration or perform maintenance. + /// + /// This selects between: + /// - a short sleep (steady-state pacing), and + /// - the maintenance tick (currently reader-slot cleanup). async fn zaino_db_handler_sleep(&self, maintenance: &mut tokio::time::Interval) { tokio::select! { _ = tokio::time::sleep(Duration::from_secs(5)) => {}, @@ -317,14 +444,19 @@ impl DbV0 { } } - /// Clears stale reader slots by opening and closing a read transaction. + /// Clears stale LMDB reader slots by opening and closing a read transaction. + /// + /// LMDB only reclaims reader slots when transactions are closed; this method is a cheap and safe + /// way to encourage reclamation in long-running services. async fn clean_trailing(&self) -> Result<(), FinalisedStateError> { let txn = self.env.begin_ro_txn()?; drop(txn); Ok(()) } - /// Opens an lmdb database if present else creates a new one. + /// Opens an LMDB database if present, otherwise creates it. + /// + /// v0 uses this helper for all tables to make environment creation idempotent across restarts. async fn open_or_create_db( env: &Environment, name: &str, @@ -342,16 +474,23 @@ impl DbV0 { // *** DB write / delete methods *** // These should only ever be used in a single DB control task. - /// Writes a given (finalised) [`IndexedBlock`] to ZainoDB. + /// Writes a given (finalised) [`IndexedBlock`] to the v0 database. + /// + /// This method enforces the v0 write invariant: + /// - if the database is non-empty, the new block height must equal `current_tip + 1`, + /// - if the database is empty, the first write must be genesis (`GENESIS_HEIGHT`). + /// + /// The following records are written atomically in a single LMDB write transaction: + /// - `heights_to_hashes[height_be] = hash_json` + /// - `hashes_to_blocks[hash_json] = compact_block_json` + /// + /// On failure, the method attempts to delete the partially-written block (best effort) and + /// returns an `InvalidBlock` error that includes the height/hash context. pub(crate) async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { self.status.store(StatusType::Syncing); let compact_block: CompactBlock = block.to_compact_block(); - let zebra_height: ZebraHeight = block - .index() - .height() - .expect("height always some in the finalised state") - .into(); + let zebra_height: ZebraHeight = block.index().height().into(); let zebra_hash: ZebraHash = zebra_chain::block::Hash::from(*block.index().hash()); let height_key = DbHeight(zebra_height).to_be_bytes(); @@ -359,11 +498,7 @@ impl DbV0 { let block_value = serde_json::to_vec(&DbCompactBlock(compact_block))?; // check this is the *next* block in the chain. - let block_height = block - .index() - .height() - .expect("height always some in finalised state") - .0; + let block_height = block.index().height().0; tokio::task::block_in_place(|| { let ro = self.env.begin_ro_txn()?; @@ -373,11 +508,7 @@ impl DbV0 { match cur.get(None, None, lmdb_sys::MDB_LAST) { // Database already has blocks Ok((last_height_bytes, _last_hash_bytes)) => { - let block_height = block - .index() - .height() - .expect("height always some in finalised state") - .0; + let block_height = block.index().height().0; let last_height = DbHeight::from_be_bytes( last_height_bytes.expect("Height is always some in the finalised state"), @@ -462,7 +593,14 @@ impl DbV0 { } } - /// Deletes a block identified height from every finalised table. + /// Deletes the block at `height` from every v0 table. + /// + /// This method enforces the v0 delete invariant: + /// - the requested height must equal the current database tip. + /// + /// The method determines the tip hash from `heights_to_hashes`, then deletes: + /// - `heights_to_hashes[height_be]` + /// - `hashes_to_blocks[hash_json]` pub(crate) async fn delete_block_at_height( &self, height: crate::Height, @@ -534,7 +672,9 @@ impl DbV0 { Ok(()) } - /// This is used as a backup when delete_block_at_height fails. + /// Deletes the provided block’s entries from every v0 table. + /// + /// This is used as a backup when `delete_block_at_height` fails. /// /// Takes a IndexedBlock as input and ensures all data from this block is wiped from the database. /// @@ -549,11 +689,7 @@ impl DbV0 { &self, block: &IndexedBlock, ) -> Result<(), FinalisedStateError> { - let zebra_height: ZebraHeight = block - .index() - .height() - .expect("height always some in the finalised state") - .into(); + let zebra_height: ZebraHeight = block.index().height().into(); let zebra_hash: ZebraHash = zebra_chain::block::Hash::from(*block.index().hash()); let height_key = DbHeight(zebra_height).to_be_bytes(); @@ -592,8 +728,10 @@ impl DbV0 { // ***** DB fetch methods ***** - // Returns the greatest `Height` stored in `headers` - /// (`None` if the DB is still empty). + /// Returns the greatest `Height` stored in `heights_to_hashes` (`None` if empty). + /// + /// Heights are stored as big-endian keys, so the LMDB `MDB_LAST` cursor position corresponds to + /// the maximum height. pub(crate) async fn tip_height(&self) -> Result, FinalisedStateError> { tokio::task::block_in_place(|| { let ro = self.env.begin_ro_txn()?; @@ -616,7 +754,10 @@ impl DbV0 { }) } - /// Fetch the block height in the main chain for a given block hash. + /// Fetches the block height for a given block hash. + /// + /// v0 resolves hash → compact block via `hashes_to_blocks` and then reads the embedded height + /// from the compact block message. async fn get_block_height_by_hash( &self, hash: crate::BlockHash, @@ -635,6 +776,9 @@ impl DbV0 { }) } + /// Fetches the block hash for a given block height. + /// + /// v0 resolves height → hash via `heights_to_hashes`. async fn get_block_hash_by_height( &self, height: crate::Height, @@ -652,6 +796,12 @@ impl DbV0 { }) } + /// Returns constructed metadata for v0. + /// + /// v0 does not persist real metadata. This method returns: + /// - version `0.0.0`, + /// - a zero schema hash, + /// - `MigrationStatus::Complete` (v0 does not participate in resumable migrations). async fn get_metadata(&self) -> Result { Ok(DbMetadata { version: DbVersion { @@ -665,9 +815,14 @@ impl DbV0 { }) } + /// Fetches the compact block for a given height. + /// + /// This resolves height → hash via `heights_to_hashes`, then hash → compact block via + /// `hashes_to_blocks`. async fn get_compact_block( &self, height: crate::Height, + pool_types: PoolTypeFilter, ) -> Result { let zebra_hash = zebra_chain::block::Hash::from(self.get_block_hash_by_height(height).await?); @@ -678,23 +833,177 @@ impl DbV0 { let block_bytes: &[u8] = txn.get(self.hashes_to_blocks, &hash_key)?; let block: DbCompactBlock = serde_json::from_slice(block_bytes)?; - Ok(block.0) + Ok(compact_block_with_pool_types( + block.0, + &pool_types.to_pool_types_vector(), + )) }) } + + /// Streams `CompactBlock` messages for an inclusive height range. + /// + /// Legacy implementation for backwards compatibility. + /// + /// Behaviour: + /// - The stream covers the inclusive range `[start_height, end_height]`. + /// - If `start_height <= end_height` the stream is ascending; otherwise it is descending. + /// - Blocks are fetched one-by-one by calling `get_compact_block(height, pool_types)` for + /// each height in the range. + /// + /// Pool filtering: + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that have no elements in any requested pool type are omitted from `vtx`, + /// and `CompactTx.index` preserves the original transaction index within the block. + /// + /// Notes: + /// - This is intentionally not optimised (no LMDB cursor walk, no batch/range reads). + /// - Any fetch/deserialize error terminates the stream after emitting a single `tonic::Status`. + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + let is_ascending: bool = start_height <= end_height; + + let (sender, receiver) = + tokio::sync::mpsc::channel::>(128); + + let env = self.env.clone(); + let heights_to_hashes_database: lmdb::Database = self.heights_to_hashes; + let hashes_to_blocks_database: lmdb::Database = self.hashes_to_blocks; + + let pool_types_vector: Vec = pool_types.to_pool_types_vector(); + + tokio::task::spawn_blocking(move || { + fn lmdb_get_status( + database_name: &'static str, + height: Height, + error: lmdb::Error, + ) -> tonic::Status { + match error { + lmdb::Error::NotFound => tonic::Status::not_found(format!( + "missing db entry in {database_name} at height {}", + height.0 + )), + other_error => tonic::Status::internal(format!( + "lmdb get({database_name}) failed at height {}: {other_error}", + height.0 + )), + } + } + + let mut current_height: Height = start_height; + + loop { + let result: Result = (|| { + let txn = env.begin_ro_txn().map_err(|error| { + tonic::Status::internal(format!("lmdb begin_ro_txn failed: {error}")) + })?; + + // height -> hash (heights_to_hashes) + let zebra_height: ZebraHeight = current_height.into(); + let height_key: [u8; 4] = DbHeight(zebra_height).to_be_bytes(); + + let hash_bytes: &[u8] = txn + .get(heights_to_hashes_database, &height_key) + .map_err(|error| { + lmdb_get_status("heights_to_hashes", current_height, error) + })?; + + let db_hash: DbHash = serde_json::from_slice(hash_bytes).map_err(|error| { + tonic::Status::internal(format!( + "height->hash decode failed at height {}: {error}", + current_height.0 + )) + })?; + + // hash -> block (hashes_to_blocks) + let hash_key: Vec = + serde_json::to_vec(&DbHash(db_hash.0)).map_err(|error| { + tonic::Status::internal(format!( + "hash key encode failed at height {}: {error}", + current_height.0 + )) + })?; + + let block_bytes: &[u8] = txn + .get(hashes_to_blocks_database, &hash_key) + .map_err(|error| { + lmdb_get_status("hashes_to_blocks", current_height, error) + })?; + + let db_compact_block: DbCompactBlock = serde_json::from_slice(block_bytes) + .map_err(|error| { + tonic::Status::internal(format!( + "block decode failed at height {}: {error}", + current_height.0 + )) + })?; + + Ok(compact_block_with_pool_types( + db_compact_block.0, + &pool_types_vector, + )) + })(); + + if sender.blocking_send(result).is_err() { + return; + } + + if current_height == end_height { + return; + } + + if is_ascending { + let next_value = match current_height.0.checked_add(1) { + Some(value) => value, + None => { + let _ = sender.blocking_send(Err(tonic::Status::internal( + "height overflow while iterating ascending".to_string(), + ))); + return; + } + }; + current_height = Height(next_value); + } else { + let next_value = match current_height.0.checked_sub(1) { + Some(value) => value, + None => { + let _ = sender.blocking_send(Err(tonic::Status::internal( + "height underflow while iterating descending".to_string(), + ))); + return; + } + }; + current_height = Height(next_value); + } + } + }); + + Ok(CompactBlockStream::new(receiver)) + } } -/// Wrapper for `Height`. +/// Wrapper for `ZebraHeight` used for key encoding. +/// +/// v0 stores heights as 4-byte **big-endian** keys to preserve numeric ordering under LMDB’s +/// lexicographic key ordering. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] struct DbHeight(pub ZebraHeight); impl DbHeight { - /// Converts `[DbHeight]` to 4-byte **big-endian** bytes. - /// Used when storing as an LMDB key. + /// Converts this height to 4-byte **big-endian** bytes. + /// + /// This is used when storing heights as LMDB keys so that increasing heights sort correctly. fn to_be_bytes(self) -> [u8; 4] { self.0 .0.to_be_bytes() } - /// Parse a 4-byte **big-endian** array into a `[DbHeight]`. + /// Parses a 4-byte **big-endian** key into a `DbHeight`. + /// + /// # Errors + /// Returns an error if the key is not exactly 4 bytes long. fn from_be_bytes(bytes: &[u8]) -> Result { let arr: [u8; 4] = bytes .try_into() @@ -703,15 +1012,23 @@ impl DbHeight { } } -/// Wrapper for `Hash`. +/// Wrapper for `ZebraHash` so it can be JSON-serialized as an LMDB value/key payload. +/// +/// v0 stores hashes using `serde_json` rather than Zaino’s versioned binary encoding. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] struct DbHash(pub ZebraHash); -/// Wrapper for `CompactBlock`. +/// Wrapper for `CompactBlock` for JSON storage. +/// +/// `CompactBlock` is a Prost message; v0 stores it by encoding to raw bytes and embedding those +/// bytes inside a serde payload. #[derive(Debug, Clone, PartialEq)] struct DbCompactBlock(pub CompactBlock); /// Custom `Serialize` implementation using Prost's `encode_to_vec()`. +/// +/// This serializes the compact block as raw bytes so it can be stored via `serde_json` as a byte +/// array payload. impl Serialize for DbCompactBlock { fn serialize(&self, serializer: S) -> Result where @@ -723,6 +1040,8 @@ impl Serialize for DbCompactBlock { } /// Custom `Deserialize` implementation using Prost's `decode()`. +/// +/// This reverses the `Serialize` strategy by decoding the stored raw bytes into a `CompactBlock`. impl<'de> Deserialize<'de> for DbCompactBlock { fn deserialize(deserializer: D) -> Result where diff --git a/zaino-state/src/chain_index/finalised_state/db/v1.rs b/zaino-state/src/chain_index/finalised_state/db/v1.rs index d8b4bb8a8..b0e407839 100644 --- a/zaino-state/src/chain_index/finalised_state/db/v1.rs +++ b/zaino-state/src/chain_index/finalised_state/db/v1.rs @@ -1,4 +1,28 @@ -//! ZainoDB V1 Implementation +//! ZainoDB Finalised State (Schema V1) +//! +//! This module provides the **V1** implementation of Zaino’s LMDB-backed finalised-state database. +//! It stores a validated, append-only view of the best chain and exposes a set of capability traits +//! (read, write, metadata, block-range fetchers, compact-block generation, and transparent history). +//! +//! ## On-disk layout +//! The V1 on-disk layout is described by an ASCII schema file that is embedded into the binary at +//! compile time (`db_schema_v1_0.txt`). A fixed 32-byte BLAKE2b checksum of that schema description +//! is stored in / compared against the database metadata to detect accidental schema drift. +//! +//! ## Validation model +//! The database maintains a monotonically increasing **validated tip** (`validated_tip`) and a set +//! of validated heights above that tip (`validated_set`) to support out-of-order validation. Reads +//! that require correctness use `resolve_validated_hash_or_height()` to ensure the requested height +//! is validated (performing on-demand validation if required). +//! +//! A background task performs: +//! - an initial full scan of the stored data for checksum / structural correctness, then +//! - steady-state incremental validation of newly appended blocks. +//! +//! ## Concurrency model +//! LMDB supports many concurrent readers and a single writer per environment. This implementation +//! uses `tokio::task::block_in_place` / `spawn_blocking` for LMDB operations to avoid blocking the +//! async runtime, and configures `max_readers` to support high read concurrency. use crate::{ chain_index::{ @@ -15,12 +39,13 @@ use crate::{ config::BlockCacheConfig, error::FinalisedStateError, AddrHistRecord, AddrScript, AtomicStatus, BlockHash, BlockHeaderData, CommitmentTreeData, - CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, CompactSize, CompactTxData, - FixedEncodedLen as _, Height, IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, - SaplingCompactTx, SaplingTxList, StatusType, TransparentCompactTx, TransparentTxList, - TxInCompact, TxLocation, TxOutCompact, TxidList, ZainoVersionedSerde as _, + CompactBlockStream, CompactOrchardAction, CompactSaplingOutput, CompactSaplingSpend, + CompactSize, CompactTxData, FixedEncodedLen as _, Height, IndexedBlock, OrchardCompactTx, + OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, StatusType, TransparentCompactTx, + TransparentTxList, TxInCompact, TxLocation, TxOutCompact, TxidList, ZainoVersionedSerde as _, }; +use zaino_proto::proto::{compact_formats::CompactBlock, utils::PoolTypeFilter}; use zebra_chain::parameters::NetworkKind; use zebra_state::HashOrHeight; @@ -46,8 +71,11 @@ use tracing::{error, info, warn}; // ───────────────────────── Schema v1 constants ───────────────────────── /// Full V1 schema text file. -// 1. Bring the *exact* ASCII description of the on-disk layout into the binary -// at compile-time. The path is relative to this source file. +/// +/// This is the exact ASCII description of the V1 on-disk layout embedded into the binary at +/// compile-time. The path is relative to this source file. +/// +/// 1. Bring the *exact* ASCII description of the on-disk layout into the binary at compile-time. pub(crate) const DB_SCHEMA_V1_TEXT: &str = include_str!("db_schema_v1_0.txt"); /* @@ -73,6 +101,9 @@ pub(crate) const DB_SCHEMA_V1_TEXT: &str = include_str!("db_schema_v1_0.txt"); */ /// *Current* database V1 schema hash, used for version validation. +/// +/// This value is compared against the schema hash stored in the metadata record to detect schema +/// drift without a corresponding version bump. pub(crate) const DB_SCHEMA_V1_HASH: [u8; 32] = [ 0xbc, 0x13, 0x52, 0x47, 0xb4, 0x6b, 0xb4, 0x6a, 0x4a, 0x97, 0x1e, 0x4c, 0x27, 0x07, 0x82, 0x6f, 0x80, 0x95, 0xe6, 0x62, 0xb6, 0x91, 0x9d, 0x28, 0x87, 0x2c, 0x71, 0xb6, 0xbd, 0x67, 0x65, 0x93, @@ -87,6 +118,10 @@ pub(crate) const DB_VERSION_V1: DbVersion = DbVersion { // ───────────────────────── ZainoDb v1 Capabilities ───────────────────────── +/// [`DbRead`] capability implementation for [`DbV1`]. +/// +/// This trait is the read-only surface used by higher layers. Methods typically delegate to +/// inherent async helpers that enforce validated reads where required. #[async_trait] impl DbRead for DbV1 { async fn db_height(&self) -> Result, FinalisedStateError> { @@ -126,6 +161,10 @@ impl DbRead for DbV1 { } } +/// [`DbWrite`] capability implementation for [`DbV1`]. +/// +/// This trait represents the mutating surface (append / delete tip / update metadata). Writes are +/// performed via LMDB write transactions and validated before becoming visible as “known-good”. #[async_trait] impl DbWrite for DbV1 { async fn write_block(&self, block: IndexedBlock) -> Result<(), FinalisedStateError> { @@ -145,6 +184,9 @@ impl DbWrite for DbV1 { } } +/// [`DbCore`] capability implementation for [`DbV1`]. +/// +/// This trait exposes lifecycle operations and a high-level status indicator. #[async_trait] impl DbCore for DbV1 { fn status(&self) -> StatusType { @@ -168,6 +210,9 @@ impl DbCore for DbV1 { } } +/// [`BlockCoreExt`] capability implementation for [`DbV1`]. +/// +/// Provides access to block headers, txid lists, and transaction location mapping. #[async_trait] impl BlockCoreExt for DbV1 { async fn get_block_header( @@ -212,6 +257,10 @@ impl BlockCoreExt for DbV1 { } } +/// [`BlockTransparentExt`] capability implementation for [`DbV1`]. +/// +/// Provides access to transparent compact transaction data at both per-transaction and per-block +/// granularity. #[async_trait] impl BlockTransparentExt for DbV1 { async fn get_transparent( @@ -237,6 +286,10 @@ impl BlockTransparentExt for DbV1 { } } +/// [`BlockShieldedExt`] capability implementation for [`DbV1`]. +/// +/// Provides access to Sapling / Orchard compact transaction data and per-block commitment tree +/// metadata. #[async_trait] impl BlockShieldedExt for DbV1 { async fn get_sapling( @@ -299,16 +352,34 @@ impl BlockShieldedExt for DbV1 { } } +/// [`CompactBlockExt`] capability implementation for [`DbV1`]. +/// +/// Exposes `zcash_client_backend`-compatible compact blocks derived from stored header + shielded +/// transaction data. #[async_trait] impl CompactBlockExt for DbV1 { async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result { - self.get_compact_block(height).await + self.get_compact_block(height, pool_types).await + } + + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.get_compact_block_stream(start_height, end_height, pool_types) + .await } } +/// [`IndexedBlockExt`] capability implementation for [`DbV1`]. +/// +/// Exposes reconstructed [`IndexedBlock`] values from stored per-height entries. #[async_trait] impl IndexedBlockExt for DbV1 { async fn get_chain_block( @@ -319,6 +390,10 @@ impl IndexedBlockExt for DbV1 { } } +/// [`TransparentHistExt`] capability implementation for [`DbV1`]. +/// +/// Provides address history queries built over the LMDB `DUP_SORT`/`DUP_FIXED` address-history +/// database. #[async_trait] impl TransparentHistExt for DbV1 { async fn addr_records( @@ -383,50 +458,68 @@ impl TransparentHistExt for DbV1 { // ───────────────────────── ZainoDb v1 Implementation ───────────────────────── -/// Zaino’s Finalised state database V1. -/// Implements a persistent LMDB-backed chain index for fast read access and verified data. #[derive(Debug)] +/// Zaino’s Finalised State database V1. +/// +/// This type owns an LMDB [`Environment`] and a fixed set of named databases representing the V1 +/// schema. It implements the capability traits used by the rest of the chain indexer. +/// +/// Data is stored per-height in “best chain” order and is validated (checksums and continuity) +/// before being treated as reliable for downstream reads. pub(crate) struct DbV1 { /// Shared LMDB environment. env: Arc, - /// Block headers: `Height` -> `StoredEntry` + /// Block headers: `Height` -> `StoredEntryVar` /// /// Stored per-block, in order. headers: Database, - /// Txids: `Height` -> `StoredEntry` + + /// Txids: `Height` -> `StoredEntryVar` /// /// Stored per-block, in order. txids: Database, - /// Transparent: `Height` -> `StoredEntry>` + + /// Transparent: `Height` -> `StoredEntryVar>` /// /// Stored per-block, in order. transparent: Database, - /// Sapling: `Height` -> `StoredEntry>` + + /// Sapling: `Height` -> `StoredEntryVar>` /// /// Stored per-block, in order. sapling: Database, - /// Orchard: `Height` -> `StoredEntry>` + + /// Orchard: `Height` -> `StoredEntryVar>` /// /// Stored per-block, in order. orchard: Database, - /// Block commitment tree data: `Height` -> `StoredEntry>` + + /// Block commitment tree data: `Height` -> `StoredEntryFixed>` /// /// Stored per-block, in order. commitment_tree_data: Database, - /// Heights: `Hash` -> `StoredEntry` + + /// Heights: `Hash` -> `StoredEntryFixed` /// /// Used for hash based fetch of the best chain (and random access). heights: Database, - /// Spent outpoints: `Outpoint` -> `StoredEntry>` + + /// Spent outpoints: `Outpoint` -> `StoredEntryFixed>` /// /// Used to check spent status of given outpoints, retuning spending tx. spent: Database, - /// Transparent address history: `AddrScript` -> `StoredEntry` + + /// Transparent address history: `AddrScript` -> duplicate values of `StoredEntryFixed`. + /// + /// Stored as an LMDB `DUP_SORT | DUP_FIXED` database keyed by address script bytes. Each duplicate + /// value is a fixed-size entry encoding one address event (mined output or spending input), + /// including flags and checksum. /// /// Used to search all transparent address indexes (txids, utxos, balances, deltas) address_history: Database, - /// Metadata: singleton entry "metadata" -> `StoredEntry` + + /// Metadata: singleton entry "metadata" -> `StoredEntryFixed` metadata: Database, /// Contiguous **water-mark**: every height ≤ `validated_tip` is known-good. @@ -434,6 +527,7 @@ pub(crate) struct DbV1 { /// Wrapped in an `Arc` so the background validator and any foreground tasks /// all see (and update) the **same** atomic. validated_tip: Arc, + /// Heights **above** the tip that have also been validated. /// /// Whenever the next consecutive height is inserted we pop it @@ -441,7 +535,7 @@ pub(crate) struct DbV1 { /// grows beyond the number of “holes” in the sequence. validated_set: DashSet, - /// Database handler task handle. + /// Background validator / maintenance task handle. db_handler: Option>, /// ZainoDB status. @@ -451,13 +545,23 @@ pub(crate) struct DbV1 { config: BlockCacheConfig, } +/// Inherent implementation for [`DbV1`]. +/// +/// This block contains: +/// - environment / database setup (`spawn`, `open_or_create_db`, schema checks), +/// - background validation task management, +/// - write/delete operations for finalised blocks, +/// - validated read fetchers used by the capability trait implementations, and +/// - internal validation / indexing helpers. impl DbV1 { - /// Spawns a new [`DbV1`] and syncs the FinalisedState to the servers finalised state. - /// - /// Uses ReadStateService to fetch chain data if given else uses JsonRPC client. + /// Spawns a new [`DbV1`] and opens (or creates) the LMDB environment for the configured network. /// - /// Inputs: - /// - config: ChainIndexConfig. + /// This method: + /// - chooses a versioned path suffix (`...//v1`), + /// - configures LMDB map size and reader slots, + /// - opens or creates all V1 named databases, + /// - validates or initializes the `"metadata"` record (schema hash + version), and + /// - spawns the background validator / maintenance task. pub(crate) async fn spawn(config: &BlockCacheConfig) -> Result { info!("Launching ZainoDB"); @@ -579,7 +683,9 @@ impl DbV1 { self.status.load() } - /// Awaits until the DB returns a Ready status. + /// Waits until the DB reaches [`StatusType::Ready`]. + /// + /// NOTE: This does not currently backpressure on LMDB reader availability. /// /// TODO: check db for free readers and wait if busy. pub(crate) async fn wait_until_ready(&self) { @@ -598,11 +704,11 @@ impl DbV1 { /// Spawns the background validator / maintenance task. /// - /// * **Startup** – runs a full‐DB validation pass (`initial_root_scan` → - /// `initial_block_scan`). - /// * **Steady-state** – every 5 s tries to validate the next block that - /// appeared after the current `validated_tip`. - /// Every 60 s it also calls `clean_trailing()` to purge stale reader slots. + /// The task runs: + /// - **Startup:** full validation passes (`initial_spent_scan`, `initial_address_history_scan`, + /// `initial_block_scan`). + /// - **Steady state:** periodically attempts to validate the next height after `validated_tip`. + /// Separately, it performs periodic trailing-reader cleanup via `clean_trailing()`. async fn spawn_handler(&mut self) -> Result<(), FinalisedStateError> { // Clone everything the task needs so we can move it into the async block. let zaino_db = Self { @@ -719,7 +825,7 @@ impl DbV1 { } } - /// Validate every stored `TxLocation`. + /// Validates every stored spent-outpoint entry (`Outpoint` -> `TxLocation`) by checksum. async fn initial_spent_scan(&self) -> Result<(), FinalisedStateError> { let env = self.env.clone(); let spent = self.spent; @@ -746,7 +852,7 @@ impl DbV1 { .map_err(|e| FinalisedStateError::Custom(format!("Tokio task error: {e}")))? } - /// Validate every stored `AddrEventBytes`. + /// Validates every stored address-history record (`AddrScript` duplicates of `AddrEventBytes`) by checksum. async fn initial_address_history_scan(&self) -> Result<(), FinalisedStateError> { let env = self.env.clone(); let address_history = self.address_history; @@ -774,7 +880,7 @@ impl DbV1 { .map_err(|e| FinalisedStateError::Custom(format!("spawn_blocking failed: {e}")))? } - /// Scan the whole finalised chain once at start-up and validate every block. + /// Scans the whole finalised chain once at start-up and validates every block by checksum and continuity. async fn initial_block_scan(&self) -> Result<(), FinalisedStateError> { let zaino_db = Self { env: Arc::clone(&self.env), @@ -844,9 +950,7 @@ impl DbV1 { self.status.store(StatusType::Syncing); let block_hash = *block.index().hash(); let block_hash_bytes = block_hash.to_bytes()?; - let block_height = block.index().height().ok_or(FinalisedStateError::Custom( - "finalised state received non finalised block".to_string(), - ))?; + let block_height = block.index().height(); let block_height_bytes = block_height.to_bytes()?; // Check if this specific block already exists (idempotent write support for shared DB). @@ -923,12 +1027,7 @@ impl DbV1 { } // Build DBHeight - let height_entry = StoredEntryFixed::new( - &block_hash_bytes, - block.index().height().ok_or(FinalisedStateError::Custom( - "finalised state received non finalised block".to_string(), - ))?, - ); + let height_entry = StoredEntryFixed::new(&block_hash_bytes, block.index().height()); // Build header let header_entry = StoredEntryVar::new( @@ -1054,7 +1153,7 @@ impl DbV1 { ); } else { return Err(FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: "Invalid block data: invalid transparent input.".to_string(), }); @@ -1246,10 +1345,7 @@ impl DbV1 { info!( "Successfully committed block {} at height {} to ZainoDB.", &block.index().hash(), - &block - .index() - .height() - .expect("height always some in the finalised state") + &block.index().height() ); Ok(()) @@ -1409,19 +1505,12 @@ impl DbV1 { block: &IndexedBlock, ) -> Result<(), FinalisedStateError> { // Check block height and hash - let block_height = block - .index() - .height() - .ok_or(FinalisedStateError::InvalidBlock { - height: 0, - hash: *block.hash(), - reason: "Invalid block data: Block does not contain finalised height".to_string(), - })?; + let block_height = block.index().height(); let block_height_bytes = block_height .to_bytes() .map_err(|_| FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: "Corrupt block data: failed to serialise hash".to_string(), })?; @@ -1431,7 +1520,7 @@ impl DbV1 { block_hash .to_bytes() .map_err(|_| FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: "Corrupt block data: failed to serialise hash".to_string(), })?; @@ -1517,12 +1606,12 @@ impl DbV1 { *prev_outpoint.prev_txid(), )) .map_err(|e| FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: e.to_string(), })? .ok_or_else(|| FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: "Invalid block data: invalid txid data.".to_string(), })?; @@ -1540,7 +1629,7 @@ impl DbV1 { ); } else { return Err(FinalisedStateError::InvalidBlock { - height: block.height().expect("already checked height is some").0, + height: block.height().0, hash: *block.hash(), reason: "Invalid block data: invalid transparent input.".to_string(), }); @@ -1804,11 +1893,21 @@ impl DbV1 { /// Fetches block headers for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_headers( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -1945,11 +2044,21 @@ impl DbV1 { /// Fetches block txids for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_txids( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -2104,11 +2213,21 @@ impl DbV1 { /// Fetches block transparent tx data for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_transparent( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -2265,11 +2384,21 @@ impl DbV1 { /// Fetches block sapling tx data for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_sapling( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -2425,11 +2554,21 @@ impl DbV1 { /// Fetches block orchard tx data for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_orchard( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -2498,11 +2637,21 @@ impl DbV1 { /// Fetches block commitment tree data for the given height range. /// /// Uses cursor based fetch. + /// + /// NOTE: Currently this method only fetches ranges where start_height <= end_height, + /// This could be updated by following the cursor step example in + /// get_compact_block_streamer. async fn get_block_range_commitment_tree_data( &self, start: Height, end: Height, ) -> Result, FinalisedStateError> { + if end.0 < start.0 { + return Err(FinalisedStateError::Custom( + "invalid block range: end < start".to_string(), + )); + } + self.validate_block_range(start, end).await?; let start_bytes = start.to_bytes()?; let end_bytes = end.to_bytes()?; @@ -3066,11 +3215,10 @@ impl DbV1 { } /// Returns the CompactBlock for the given Height. - /// - /// TODO: Add separate range fetch method! async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result { let validated_height = self .resolve_validated_hash_or_height(HashOrHeight::Height(height.into())) @@ -3080,7 +3228,7 @@ impl DbV1 { tokio::task::block_in_place(|| { let txn = self.env.begin_ro_txn()?; - // Fetch header data + // ----- Fetch Header ----- let raw = match txn.get(self.headers, &height_bytes) { Ok(val) => val, Err(lmdb::Error::NotFound) => { @@ -3094,7 +3242,7 @@ impl DbV1 { .map_err(|e| FinalisedStateError::Custom(format!("header decode error: {e}")))? .inner(); - // fetch transaction data + // ----- Fetch Txids ----- let raw = match txn.get(self.txids, &height_bytes) { Ok(val) => val, Err(lmdb::Error::NotFound) => { @@ -3104,42 +3252,86 @@ impl DbV1 { } Err(e) => return Err(FinalisedStateError::LmdbError(e)), }; - let txids_list = StoredEntryVar::::from_bytes(raw) - .map_err(|e| FinalisedStateError::Custom(format!("txids decode error: {e}")))? - .inner() - .clone(); - let txids = txids_list.txids(); + let txids_stored_entry_var = StoredEntryVar::::from_bytes(raw) + .map_err(|e| FinalisedStateError::Custom(format!("txids decode error: {e}")))?; + let txids = txids_stored_entry_var.inner().txids(); - let raw = match txn.get(self.sapling, &height_bytes) { - Ok(val) => val, - Err(lmdb::Error::NotFound) => { - return Err(FinalisedStateError::DataUnavailable( - "block data missing from db".into(), - )); - } - Err(e) => return Err(FinalisedStateError::LmdbError(e)), + // ----- Fetch Transparent Tx Data ----- + let transparent_stored_entry_var = if pool_types.includes_transparent() { + let raw = match txn.get(self.transparent, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + Some( + StoredEntryVar::::from_bytes(raw).map_err(|e| { + FinalisedStateError::Custom(format!("transparent decode error: {e}")) + })?, + ) + } else { + None + }; + let transparent = match transparent_stored_entry_var.as_ref() { + Some(stored_entry_var) => stored_entry_var.inner().tx(), + None => &[], }; - let sapling_list = StoredEntryVar::::from_bytes(raw) - .map_err(|e| FinalisedStateError::Custom(format!("sapling decode error: {e}")))? - .inner() - .clone(); - let sapling = sapling_list.tx(); - let raw = match txn.get(self.orchard, &height_bytes) { - Ok(val) => val, - Err(lmdb::Error::NotFound) => { - return Err(FinalisedStateError::DataUnavailable( - "block data missing from db".into(), - )); - } - Err(e) => return Err(FinalisedStateError::LmdbError(e)), + // ----- Fetch Sapling Tx Data ----- + let sapling_stored_entry_var = if pool_types.includes_sapling() { + let raw = match txn.get(self.sapling, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + Some( + StoredEntryVar::::from_bytes(raw).map_err(|e| { + FinalisedStateError::Custom(format!("sapling decode error: {e}")) + })?, + ) + } else { + None + }; + let sapling = match sapling_stored_entry_var.as_ref() { + Some(stored_entry_var) => stored_entry_var.inner().tx(), + None => &[], }; - let orchard_list = StoredEntryVar::::from_bytes(raw) - .map_err(|e| FinalisedStateError::Custom(format!("orchard decode error: {e}")))? - .inner() - .clone(); - let orchard = orchard_list.tx(); + // ----- Fetch Orchard Tx Data ----- + let orchard_stored_entry_var = if pool_types.includes_orchard() { + let raw = match txn.get(self.orchard, &height_bytes) { + Ok(val) => val, + Err(lmdb::Error::NotFound) => { + return Err(FinalisedStateError::DataUnavailable( + "block data missing from db".into(), + )); + } + Err(e) => return Err(FinalisedStateError::LmdbError(e)), + }; + + Some( + StoredEntryVar::::from_bytes(raw).map_err(|e| { + FinalisedStateError::Custom(format!("orchard decode error: {e}")) + })?, + ) + } else { + None + }; + let orchard = match orchard_stored_entry_var.as_ref() { + Some(stored_entry_var) => stored_entry_var.inner().tx(), + None => &[], + }; + + // ----- Construct CompactTx ----- let vtx: Vec = txids .iter() .enumerate() @@ -3177,23 +3369,47 @@ impl DbV1 { }) .unwrap_or_default(); - // SKIP transparent-only txs: - if spends.is_empty() && outputs.is_empty() && actions.is_empty() { + let (vin, vout) = transparent + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|t| (t.compact_vin(), t.compact_vout())) + .unwrap_or_default(); + + // Omit transactions that have no elements in any requested pool type. + // + // This keeps `vtx` compact (it only contains transactions relevant to the caller’s pool filter), + // but it also means: + // - `vtx.len()` may be smaller than the block transaction count, and + // - transaction indices in `vtx` may be non-contiguous. + // Consumers must use `CompactTx.index` (the original transaction position in the block) rather + // than assuming `vtx` preserves block order densely. + // + // TODO: Re-evaluate whether omitting "empty-for-filter" transactions is the desired API behaviour. + // Some clients may expect a position-preserving representation (one entry per txid), even if + // the per-pool fields are empty for a given filter. + if spends.is_empty() + && outputs.is_empty() + && actions.is_empty() + && vin.is_empty() + && vout.is_empty() + { return None; } Some(zaino_proto::proto::compact_formats::CompactTx { index: i as u64, - hash: txid.0.to_vec(), + txid: txid.0.to_vec(), fee: 0, spends, outputs, actions, + vin, + vout, }) }) .collect(); - // fetch commitment tree data + // ----- Fetch Commitment Tree Data ----- let raw = match txn.get(self.commitment_tree_data, &height_bytes) { Ok(val) => val, Err(lmdb::Error::NotFound) => { @@ -3203,7 +3419,6 @@ impl DbV1 { } Err(e) => return Err(FinalisedStateError::LmdbError(e)), }; - let commitment_tree_data: CommitmentTreeData = *StoredEntryFixed::from_bytes(raw) .map_err(|e| { FinalisedStateError::Custom(format!("commitment_tree decode error: {e}")) @@ -3215,14 +3430,10 @@ impl DbV1 { orchard_commitment_tree_size: commitment_tree_data.sizes().orchard(), }; - // Construct CompactBlock + // ----- Construct CompactBlock ----- Ok(zaino_proto::proto::compact_formats::CompactBlock { proto_version: 4, - height: header - .index() - .height() - .expect("height always present in finalised state.") - .0 as u64, + height: header.index().height().0 as u64, hash: header.index().hash().0.to_vec(), prev_hash: header.index().parent_hash().0.to_vec(), // Is this safe? @@ -3234,6 +3445,994 @@ impl DbV1 { }) } + /// Streams `CompactBlock` messages for an inclusive height range. + /// + /// This implementation is designed for high-throughput lightclient serving: + /// - It performs a single cursor-walk over the headers database and keeps all other databases + /// (txids + optional pool-specific tx data + commitment tree data) strictly aligned to the + /// same LMDB key. + /// - It uses *short-lived* read transactions and periodically re-seeks by key, which: + /// - reduces the lifetime of LMDB reader slots, + /// - bounds the amount of data held in the same read snapshot, + /// - and prevents a single long stream from monopolising the environment’s read resources. + /// + /// Ordering / range semantics: + /// - The stream covers the inclusive range `[start_height, end_height]`. + /// - If `start_height <= end_height` the stream is ascending; otherwise it is descending. + /// - This function enforces *contiguous heights* in the headers database. Missing heights, key + /// ordering problems, or cursor desynchronisation are treated as internal errors because they + /// indicate database corruption or a violated storage invariant. + /// + /// Pool filtering: + /// - `pool_types` controls which per-transaction components are populated. + /// - Transactions that contain no elements in any requested pool are omitted from `vtx`. + /// The original transaction index is preserved in `CompactTx.index`. + /// + /// Concurrency model: + /// - Spawns a dedicated blocking task (`spawn_blocking`) which performs LMDB reads and decoding. + /// - Results are pushed into a bounded `mpsc` channel; backpressure is applied if the consumer + /// is slow. + /// + /// Errors: + /// - Database-missing conditions are sent downstream as `tonic::Status::not_found`. + /// - Decode failures, cursor desynchronisation, and invariant violations are sent as + /// `tonic::Status::internal`. + async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + let (validated_start_height, validated_end_height) = + self.validate_block_range(start_height, end_height).await?; + + let start_key_bytes = validated_start_height.to_bytes()?; + + // Direction is derived from the validated heights. This relies on `validate_block_range` + // preserving input ordering (i.e. not normalising to (min, max)). + let is_ascending = validated_start_height <= validated_end_height; + + // Bounded channel provides backpressure so the blocking task cannot run unbounded ahead of + // the gRPC consumer. + // + // TODO: Investigate whether channel size should be changed, added to config, or set dynamically base on resources. + let (sender, receiver) = + tokio::sync::mpsc::channel::>(128); + + // Clone the database environment. + let env = self.env.clone(); + + // Copy database handles into the blocking task. LMDB database handles are cheap, copyable IDs. + let headers_database = self.headers; + let txids_database = self.txids; + let transparent_database = self.transparent; + let sapling_database = self.sapling; + let orchard_database = self.orchard; + let commitment_tree_data_database = self.commitment_tree_data; + + tokio::task::spawn_blocking(move || { + /// Maximum number of blocks to stream per LMDB read transaction. + /// + /// The cursor-walk is resumed by re-seeking to the next expected height key. This keeps + /// read transactions short-lived and reduces pressure on LMDB reader slots. + const BLOCKS_PER_READ_TRANSACTION: usize = 1024; + + // ===================================================================================== + // Helper functions + // ===================================================================================== + // + // These helpers keep the main streaming loop readable and ensure that any failure: + // - emits exactly one `tonic::Status` into the stream (best-effort), and then + // - terminates the blocking task. + // + // They intentionally return `Option`/`Result` to allow early-exit with minimal boilerplate. + + /// Send a `tonic::Status` downstream and ignore send errors. + /// + /// A send error means the receiver side has been dropped (e.g. client cancelled the RPC), + /// so the producer should terminate promptly. + fn send_status( + sender: &tokio::sync::mpsc::Sender>, + status: tonic::Status, + ) { + let _ = sender.blocking_send(Err(status)); + } + + /// Open a read-only cursor for `database` inside `txn`. + /// + /// On failure, emits an internal status and returns `None`. + fn open_ro_cursor_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + txn: &'txn lmdb::RoTransaction<'txn>, + database: lmdb::Database, + database_name: &'static str, + ) -> Option> { + match txn.open_ro_cursor(database) { + Ok(cursor) => Some(cursor), + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb open_ro_cursor({database_name}) failed: {error}" + )), + ); + None + } + } + } + + /// Position `cursor` exactly at `requested_key` using `MDB_SET_KEY`. + /// + /// Returns the `(key, value)` pair at that key. The returned `key` is expected to equal + /// `requested_key` (the function enforces this). + /// + /// Some LMDB bindings occasionally return `Ok((None, value))` for cursor operations. When + /// that happens: + /// - If `verify_on_none_key` is true, we call `MDB_GET_CURRENT` once to recover and verify + /// the current key. + /// - Otherwise we assume the cursor is correctly positioned and return `(requested_key, value)`. + /// + /// On `NotFound`, emits `not_found_status`. On other failures or verification failure, emits + /// `internal(...)`. In all error cases it returns `None`. + fn cursor_set_key_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + cursor: &lmdb::RoCursor<'txn>, + requested_key: &'txn [u8], + cursor_name: &'static str, + not_found_status: tonic::Status, + verify_on_none_key: bool, + ) -> Option<(&'txn [u8], &'txn [u8])> { + match cursor.get(Some(requested_key), None, lmdb_sys::MDB_SET_KEY) { + Ok((Some(found_key), found_val)) => { + if found_key != requested_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb SET_KEY({cursor_name}) returned non-matching key" + )), + ); + None + } else { + Some((found_key, found_val)) + } + } + Ok((None, found_val)) => { + // Some builds / bindings can return None for the key for certain ops. If requested, + // verify the cursor actually landed on the requested key via GET_CURRENT. + if verify_on_none_key { + let (recovered_key_opt, recovered_val) = + match cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT) { + Ok(pair) => pair, + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor GET_CURRENT({cursor_name}) failed: {error}" + )), + ); + return None; + } + }; + + let recovered_key = match recovered_key_opt { + Some(key) => key, + None => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb GET_CURRENT({cursor_name}) returned no key" + )), + ); + return None; + } + }; + + if recovered_key != requested_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb SET_KEY({cursor_name}) landed on unexpected key: expected {:?}, got {:?}", + requested_key, + recovered_key, + )), + ); + return None; + } + + Some((recovered_key, recovered_val)) + } else { + // Assume SET_KEY success implies match; return the requested key + value. + Some((requested_key, found_val)) + } + } + Err(lmdb::Error::NotFound) => { + send_status(sender, not_found_status); + None + } + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor SET_KEY({cursor_name}) failed: {error}" + )), + ); + None + } + } + } + + /// Step the headers cursor using `step_op` and return the next `(key, value)` pair. + /// + /// This is special-cased because the headers cursor is the *driving cursor*; all other + /// cursors must remain aligned to whatever key the headers cursor moves to. + /// + /// Returns: + /// - `Ok(Some((k, v)))` when the cursor moved successfully. + /// - `Ok(None)` when the cursor reached the end (`NotFound`). + /// - `Err(())` when an error status has been emitted and streaming must stop. + #[allow(clippy::complexity)] + fn headers_step_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + headers_cursor: &lmdb::RoCursor<'txn>, + step_op: lmdb_sys::MDB_cursor_op, + ) -> Result, ()> { + match headers_cursor.get(None, None, step_op) { + Ok((Some(found_key), found_val)) => Ok(Some((found_key, found_val))), + Ok((None, _found_val)) => { + // Some bindings can return None for the key; recover via GET_CURRENT. + let (recovered_key_opt, recovered_val) = + match headers_cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT) { + Ok(pair) => pair, + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor GET_CURRENT(headers) failed: {error}" + )), + ); + return Err(()); + } + }; + let recovered_key = match recovered_key_opt { + Some(key) => key, + None => { + send_status( + sender, + tonic::Status::internal( + "lmdb GET_CURRENT(headers) returned no key".to_string(), + ), + ); + return Err(()); + } + }; + Ok(Some((recovered_key, recovered_val))) + } + Err(lmdb::Error::NotFound) => Ok(None), + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor step(headers) failed: {error}" + )), + ); + Err(()) + } + } + } + + /// Step a non-header cursor and enforce that it remains aligned to `expected_key`. + /// + /// The design invariant for this streamer is: + /// - the headers cursor chooses the next key + /// - every other cursor must produce a value at that *same* key (otherwise the per-height + /// databases are inconsistent or a cursor has desynchronised). + /// + /// Returns the value slice for `expected_key` on success. + /// On `NotFound`, emits `not_found_status`. + /// On key mismatch or other errors, emits an internal error. + fn cursor_step_expect_key_or_send<'txn>( + sender: &tokio::sync::mpsc::Sender>, + cursor: &lmdb::RoCursor<'txn>, + step_op: lmdb_sys::MDB_cursor_op, + expected_key: &[u8], + cursor_name: &'static str, + not_found_status: tonic::Status, + ) -> Option<&'txn [u8]> { + match cursor.get(None, None, step_op) { + Ok((Some(found_key), found_val)) => { + if found_key != expected_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor desync({cursor_name}): expected key {:?}, got {:?}", + expected_key, found_key + )), + ); + None + } else { + Some(found_val) + } + } + Ok((None, _found_val)) => { + // Some bindings can return None for the key; recover via GET_CURRENT. + let (recovered_key_opt, recovered_val) = + match cursor.get(None, None, lmdb_sys::MDB_GET_CURRENT) { + Ok(pair) => pair, + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor GET_CURRENT({cursor_name}) failed: {error}" + )), + ); + return None; + } + }; + + let recovered_key = match recovered_key_opt { + Some(key) => key, + None => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb GET_CURRENT({cursor_name}) returned no key" + )), + ); + return None; + } + }; + + if recovered_key != expected_key { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor desync({cursor_name}): expected key {:?}, got {:?}", + expected_key, recovered_key + )), + ); + None + } else { + Some(recovered_val) + } + } + Err(lmdb::Error::NotFound) => { + send_status(sender, not_found_status); + None + } + Err(error) => { + send_status( + sender, + tonic::Status::internal(format!( + "lmdb cursor step({cursor_name}) failed: {error}" + )), + ); + None + } + } + } + + // ===================================================================================== + // Blocking streaming loop + // ===================================================================================== + + let step_op = if is_ascending { + lmdb_sys::MDB_NEXT + } else { + lmdb_sys::MDB_PREV + }; + + // Contiguous-height enforcement: we expect every emitted block to have exactly this height. + // This catches missing heights and cursor ordering/key-encoding problems early. + let mut expected_height = validated_start_height; + + // Key used to re-seek at the start of each transaction chunk. + // This begins at the start height and advances by exactly one height per emitted block. + let mut next_start_key_bytes: Vec = start_key_bytes; + + loop { + // Stop once we have emitted the inclusive end height. + if is_ascending { + if expected_height > validated_end_height { + return; + } + } else if expected_height < validated_end_height { + return; + } + + // Open a short-lived read transaction for this chunk. + // + // We intentionally drop the transaction regularly to keep reader slots available and + // to avoid holding a single snapshot for very large streams. + let txn = match env.begin_ro_txn() { + Ok(txn) => txn, + Err(error) => { + send_status( + &sender, + tonic::Status::internal(format!("lmdb begin_ro_txn failed: {error}")), + ); + return; + } + }; + + // Open cursors. Headers is the driving cursor; all others must remain key-aligned. + let headers_cursor = + match open_ro_cursor_or_send(&sender, &txn, headers_database, "headers") { + Some(cursor) => cursor, + None => return, + }; + + let txids_cursor = + match open_ro_cursor_or_send(&sender, &txn, txids_database, "txids") { + Some(cursor) => cursor, + None => return, + }; + + let transparent_cursor = if pool_types.includes_transparent() { + match open_ro_cursor_or_send(&sender, &txn, transparent_database, "transparent") + { + Some(cursor) => Some(cursor), + None => return, + } + } else { + None + }; + + let sapling_cursor = if pool_types.includes_sapling() { + match open_ro_cursor_or_send(&sender, &txn, sapling_database, "sapling") { + Some(cursor) => Some(cursor), + None => return, + } + } else { + None + }; + + let orchard_cursor = if pool_types.includes_orchard() { + match open_ro_cursor_or_send(&sender, &txn, orchard_database, "orchard") { + Some(cursor) => Some(cursor), + None => return, + } + } else { + None + }; + + let commitment_tree_cursor = match open_ro_cursor_or_send( + &sender, + &txn, + commitment_tree_data_database, + "commitment_tree_data", + ) { + Some(cursor) => cursor, + None => return, + }; + + // Position headers cursor at the start key for this chunk. This is the authoritative key + // that all other cursors must align to. + let (current_key, mut raw_header_bytes) = match cursor_set_key_or_send( + &sender, + &headers_cursor, + next_start_key_bytes.as_slice(), + "headers", + tonic::Status::not_found(format!( + "missing header at requested start height key {:?}", + next_start_key_bytes + )), + true, // verify-on-none-key + ) { + Some(pair) => pair, + None => return, + }; + + // Align all other cursors to the exact same key. + let (_txids_key, mut raw_txids_bytes) = match cursor_set_key_or_send( + &sender, + &txids_cursor, + current_key, + "txids", + tonic::Status::not_found("block data missing from db (txids)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + + let mut raw_transparent_bytes: Option<&[u8]> = + if let Some(cursor) = transparent_cursor.as_ref() { + let (_key, val) = match cursor_set_key_or_send( + &sender, + cursor, + current_key, + "transparent", + tonic::Status::not_found("block data missing from db (transparent)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + Some(val) + } else { + None + }; + + let mut raw_sapling_bytes: Option<&[u8]> = + if let Some(cursor) = sapling_cursor.as_ref() { + let (_key, val) = match cursor_set_key_or_send( + &sender, + cursor, + current_key, + "sapling", + tonic::Status::not_found("block data missing from db (sapling)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + Some(val) + } else { + None + }; + + let mut raw_orchard_bytes: Option<&[u8]> = + if let Some(cursor) = orchard_cursor.as_ref() { + let (_key, val) = match cursor_set_key_or_send( + &sender, + cursor, + current_key, + "orchard", + tonic::Status::not_found("block data missing from db (orchard)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + Some(val) + } else { + None + }; + + let (_commitment_key, mut raw_commitment_tree_bytes) = match cursor_set_key_or_send( + &sender, + &commitment_tree_cursor, + current_key, + "commitment_tree_data", + tonic::Status::not_found("block data missing from db (commitment_tree_data)"), + true, + ) { + Some(pair) => pair, + None => return, + }; + + let mut blocks_streamed_in_transaction: usize = 0; + + loop { + // ----- Decode and validate block header ----- + let header: BlockHeaderData = match StoredEntryVar::from_bytes(raw_header_bytes) + .map_err(|error| format!("header decode error: {error}")) + { + Ok(entry) => *entry.inner(), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + }; + + // Contiguous-height check: ensures cursor ordering and storage invariants are intact. + let current_height = header.index().height(); + if current_height != expected_height { + send_status( + &sender, + tonic::Status::internal(format!( + "missing height or out-of-order headers: expected {}, got {}", + expected_height.0, current_height.0 + )), + ); + return; + } + + // ----- Decode txids and optional pool data ----- + let txids_stored_entry_var = + match StoredEntryVar::::from_bytes(raw_txids_bytes) + .map_err(|error| format!("txids decode error: {error}")) + { + Ok(entry) => entry, + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + }; + let txids = txids_stored_entry_var.inner().txids(); + + // Each pool database stores a per-height vector aligned to the txids list: + // one entry per transaction index (typically `Option` per tx). + let transparent_entries: Option> = + if let Some(raw) = raw_transparent_bytes { + match StoredEntryVar::::from_bytes(raw) + .map_err(|error| format!("transparent decode error: {error}")) + { + Ok(entry) => Some(entry), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + } + } else { + None + }; + + let sapling_entries: Option> = + if let Some(raw) = raw_sapling_bytes { + match StoredEntryVar::::from_bytes(raw) + .map_err(|error| format!("sapling decode error: {error}")) + { + Ok(entry) => Some(entry), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + } + } else { + None + }; + + let orchard_entries: Option> = + if let Some(raw) = raw_orchard_bytes { + match StoredEntryVar::::from_bytes(raw) + .map_err(|error| format!("orchard decode error: {error}")) + { + Ok(entry) => Some(entry), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + } + } else { + None + }; + + let transparent = match transparent_entries.as_ref() { + Some(entry) => entry.inner().tx(), + None => &[], + }; + let sapling = match sapling_entries.as_ref() { + Some(entry) => entry.inner().tx(), + None => &[], + }; + let orchard = match orchard_entries.as_ref() { + Some(entry) => entry.inner().tx(), + None => &[], + }; + + // Invariant: if a pool is requested, its per-height vector length must match txids. + if pool_types.includes_transparent() && transparent.len() != txids.len() { + send_status( + &sender, + tonic::Status::internal(format!( + "transparent list length mismatch at height {}: txids={}, transparent={}", + current_height.0, + txids.len(), + transparent.len(), + )), + ); + return; + } + if pool_types.includes_sapling() && sapling.len() != txids.len() { + send_status( + &sender, + tonic::Status::internal(format!( + "sapling list length mismatch at height {}: txids={}, sapling={}", + current_height.0, + txids.len(), + sapling.len(), + )), + ); + return; + } + if pool_types.includes_orchard() && orchard.len() != txids.len() { + send_status( + &sender, + tonic::Status::internal(format!( + "orchard list length mismatch at height {}: txids={}, orchard={}", + current_height.0, + txids.len(), + orchard.len(), + )), + ); + return; + } + + // ----- Build CompactTx list ----- + // + // `CompactTx.index` is the original transaction index within the block. + // This implementation omits transactions that contain no elements in any requested pool type, + // which means: + // - `vtx.len()` may be smaller than the number of txids in the block, and + // - indices in `vtx` may be non-contiguous. + // Consumers must interpret `CompactTx.index` as authoritative. + // + // TODO: Re-evaluate whether omitting "empty-for-filter" transactions is the desired API behaviour. + // Some clients may expect a position-preserving representation (one entry per txid), even if + // the per-pool fields are empty for a given filter. + let mut vtx: Vec = + Vec::with_capacity(txids.len()); + + for (i, txid) in txids.iter().enumerate() { + let spends = sapling + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|s| { + s.spends() + .iter() + .map(|sp| sp.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let outputs = sapling + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|s| { + s.outputs() + .iter() + .map(|o| o.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let actions = orchard + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|o| { + o.actions() + .iter() + .map(|a| a.into_compact()) + .collect::>() + }) + .unwrap_or_default(); + + let (vin, vout) = transparent + .get(i) + .and_then(|opt| opt.as_ref()) + .map(|t| (t.compact_vin(), t.compact_vout())) + .unwrap_or_default(); + + // Omit transactions that have no elements in any requested pool type. + // + // Note that omission produces a sparse `vtx` (by original transaction index). Clients must use + // `CompactTx.index` rather than assuming contiguous ordering. + // + // TODO: Re-evaluate whether omission is the desired API behaviour for all consumers. + if spends.is_empty() + && outputs.is_empty() + && actions.is_empty() + && vin.is_empty() + && vout.is_empty() + { + continue; + } + + vtx.push(zaino_proto::proto::compact_formats::CompactTx { + index: i as u64, + txid: txid.0.to_vec(), + fee: 0, + spends, + outputs, + actions, + vin, + vout, + }); + } + + // ----- Decode commitment tree data and construct block ----- + let commitment_tree_data: CommitmentTreeData = + match StoredEntryFixed::from_bytes(raw_commitment_tree_bytes) + .map_err(|error| format!("commitment_tree decode error: {error}")) + { + Ok(entry) => *entry.inner(), + Err(message) => { + send_status(&sender, tonic::Status::internal(message)); + return; + } + }; + + let chain_metadata = zaino_proto::proto::compact_formats::ChainMetadata { + sapling_commitment_tree_size: commitment_tree_data.sizes().sapling(), + orchard_commitment_tree_size: commitment_tree_data.sizes().orchard(), + }; + + let compact_block = zaino_proto::proto::compact_formats::CompactBlock { + proto_version: 4, + height: header.index().height().0 as u64, + hash: header.index().hash().0.to_vec(), + prev_hash: header.index().parent_hash().0.to_vec(), + // NOTE: `time()` is stored in the DB as a wider integer; this cast assumes it is + // always representable in `u32` for the protobuf. + time: header.data().time() as u32, + header: Vec::new(), + vtx, + chain_metadata: Some(chain_metadata), + }; + + // Send the block downstream; if the receiver is gone, stop immediately. + if sender.blocking_send(Ok(compact_block)).is_err() { + return; + } + + // If we just emitted the inclusive end height, stop without stepping cursors further. + if current_height == validated_end_height { + return; + } + + blocks_streamed_in_transaction += 1; + + // Compute the next expected height (used both for contiguity checking and chunk re-seek). + let next_expected_height = if is_ascending { + match expected_height.0.checked_add(1) { + Some(value) => Height(value), + None => { + send_status( + &sender, + tonic::Status::internal( + "expected_height overflow while iterating ascending" + .to_string(), + ), + ); + return; + } + } + } else { + match expected_height.0.checked_sub(1) { + Some(value) => Height(value), + None => { + send_status( + &sender, + tonic::Status::internal( + "expected_height underflow while iterating descending" + .to_string(), + ), + ); + return; + } + } + }; + + // Chunk boundary: drop the current read transaction after N blocks and re-seek in a new + // transaction on the next loop iteration. This avoids a single long-lived snapshot. + if blocks_streamed_in_transaction >= BLOCKS_PER_READ_TRANSACTION { + match next_expected_height.to_bytes() { + Ok(bytes) => { + next_start_key_bytes = bytes; + expected_height = next_expected_height; + break; + } + Err(error) => { + send_status( + &sender, + tonic::Status::internal(format!( + "height to_bytes failed at chunk boundary: {error}" + )), + ); + return; + } + } + } + + // Advance all cursors in lockstep. Headers drives the next key; all others must match it. + let next_headers = match headers_step_or_send(&sender, &headers_cursor, step_op) + { + Ok(value) => value, + Err(()) => return, + }; + + let (next_key, next_header_val) = match next_headers { + Some(pair) => pair, + None => { + // Headers ended early; if we have not reached the requested end height, the + // database no longer satisfies the contiguous-height invariant for this range. + if current_height != validated_end_height { + send_status( + &sender, + tonic::Status::internal(format!( + "headers cursor ended early at height {}; expected to reach {}", + current_height.0, validated_end_height.0 + )), + ); + } + return; + } + }; + + let next_txids_val = match cursor_step_expect_key_or_send( + &sender, + &txids_cursor, + step_op, + next_key, + "txids", + tonic::Status::not_found("block data missing from db (txids)"), + ) { + Some(val) => val, + None => return, + }; + + let next_transparent_val: Option<&[u8]> = if let Some(cursor) = + transparent_cursor.as_ref() + { + match cursor_step_expect_key_or_send( + &sender, + cursor, + step_op, + next_key, + "transparent", + tonic::Status::not_found("block data missing from db (transparent)"), + ) { + Some(val) => Some(val), + None => return, + } + } else { + None + }; + + let next_sapling_val: Option<&[u8]> = + if let Some(cursor) = sapling_cursor.as_ref() { + match cursor_step_expect_key_or_send( + &sender, + cursor, + step_op, + next_key, + "sapling", + tonic::Status::not_found("block data missing from db (sapling)"), + ) { + Some(val) => Some(val), + None => return, + } + } else { + None + }; + + let next_orchard_val: Option<&[u8]> = + if let Some(cursor) = orchard_cursor.as_ref() { + match cursor_step_expect_key_or_send( + &sender, + cursor, + step_op, + next_key, + "orchard", + tonic::Status::not_found("block data missing from db (orchard)"), + ) { + Some(val) => Some(val), + None => return, + } + } else { + None + }; + + let next_commitment_tree_val = match cursor_step_expect_key_or_send( + &sender, + &commitment_tree_cursor, + step_op, + next_key, + "commitment_tree_data", + tonic::Status::not_found( + "block data missing from db (commitment_tree_data)", + ), + ) { + Some(val) => val, + None => return, + }; + + raw_header_bytes = next_header_val; + raw_txids_bytes = next_txids_val; + raw_transparent_bytes = next_transparent_val; + raw_sapling_bytes = next_sapling_val; + raw_orchard_bytes = next_orchard_val; + raw_commitment_tree_bytes = next_commitment_tree_val; + + expected_height = next_expected_height; + } + } + }); + + Ok(CompactBlockStream::new(receiver)) + } + /// Fetch database metadata. async fn get_metadata(&self) -> Result { tokio::task::block_in_place(|| { @@ -3256,20 +4455,67 @@ impl DbV1 { } // *** Internal DB validation / varification *** - - /// Return `true` if *height* is already known-good. + // + // The finalised-state database supports **incremental, concurrency-safe validation** of blocks that + // have already been written to LMDB. + // + // Validation is tracked using two structures: + // + // - `validated_tip` (atomic u32): every height `<= validated_tip` is known-good (contiguous prefix). + // - `validated_set` (DashSet): a sparse set of individually validated heights `> validated_tip` + // (i.e., “holes” validated out-of-order). + // + // This scheme provides: + // - O(1) fast-path for the common case (`height <= validated_tip`), + // - O(1) expected membership tests above the tip, + // - and an efficient “coalescing” step that advances `validated_tip` when gaps are filled. + // + // IMPORTANT: + // - Validation here is *structural / integrity* validation of stored records plus basic chain + // continuity checks (parent hash, header merkle root vs txids). + // - It is intentionally “lightweight” and does **not** attempt full consensus verification. + // - NOTE / TODO: It is planned to add basic shielded tx data validation using the "block_commitments" + // field in [`BlockData`] however this is currently unimplemented. + + /// Return `true` if `height` is already known-good. + /// + /// Semantics: + /// - `height <= validated_tip` is always validated (contiguous prefix). + /// - For `height > validated_tip`, membership is tracked in `validated_set`. /// - /// O(1) look-ups: we check the tip first (fast) and only hit the DashSet - /// when `h > tip`. + /// Performance: + /// - O(1) in the fast-path (`height <= validated_tip`). + /// - O(1) expected for DashSet membership checks when `height > validated_tip`. + /// + /// Concurrency: + /// - `validated_tip` is read with `Acquire` so subsequent reads of dependent state in the same + /// thread are not reordered before the tip read. fn is_validated(&self, h: u32) -> bool { let tip = self.validated_tip.load(Ordering::Acquire); h <= tip || self.validated_set.contains(&h) } - /// Mark *height* as validated and coalesce contiguous ranges. + /// Mark `height` as validated and coalesce contiguous ranges into `validated_tip`. + /// + /// This method maintains the invariant: + /// - After completion, all heights `<= validated_tip` are validated. + /// - All validated heights `> validated_tip` remain represented in `validated_set`. + /// + /// Algorithm: + /// 1. If `height == validated_tip + 1`, attempt to atomically advance `validated_tip`. + /// 2. If that succeeds, repeatedly consume `validated_tip + 1` from `validated_set` and advance + /// `validated_tip` until the next height is not present. + /// 3. If `height > validated_tip + 1`, record it as an out-of-order validated “hole” in + /// `validated_set`. + /// 4. If `height <= validated_tip`, it is already covered by the contiguous prefix; no action. /// - /// 1. Insert it into the DashSet (if it was a “hole”). - /// 2. While `validated_tip + 1` is now present, pop it and advance the tip. + /// Concurrency: + /// - Uses CAS to ensure only one thread advances `validated_tip` at a time. + /// - Stores after successful coalescing use `Release` so other threads observing the new tip do not + /// see older state re-ordered after the tip update. + /// + /// NOTE: + /// - This function is intentionally tolerant of races: redundant inserts / removals are benign. fn mark_validated(&self, h: u32) { let mut next = h; loop { @@ -3306,9 +4552,40 @@ impl DbV1 { /// Lightweight per-block validation. /// - /// *Confirms the checksum* in each of the three per-block tables. + /// This validates the internal consistency of the LMDB-backed records for the specified + /// `(height, hash)` pair and marks the height as validated on success. /// - /// WARNING: This is a blocking function and **MUST** be called within a blocking thread / task. + /// Validations performed: + /// - Per-height tables: checksum + deserialization integrity for: + /// - `headers` (BlockHeaderData) + /// - `txids` (TxidList) + /// - `transparent` (TransparentTxList) + /// - `sapling` (SaplingTxList) + /// - `orchard` (OrchardTxList) + /// - `commitment_tree_data` (CommitmentTreeData; fixed entry) + /// - Hash→height mapping: + /// - checksum integrity under `hash_key` + /// - mapped height equals the requested `height` + /// - Chain continuity: + /// - for `height > 1`, the block header `parent_hash` equals the stored hash at `height - 1` + /// - Header merkle root: + /// - merkle root computed from `txids` matches the header’s merkle root + /// - Transparent indices / histories: + /// - each non-coinbase transparent input must have a `spent` record pointing at this tx + /// - each transparent output must have an addrhist mined record + /// - each non-coinbase transparent input must have an addrhist input record + /// + /// Fast-path: + /// - If `height` is already known validated (`is_validated`), this is a no-op. + /// + /// Error semantics: + /// - Returns `FinalisedStateError::InvalidBlock { .. }` when any integrity/continuity check fails. + /// - Returns LMDB errors for underlying storage failures (e.g., missing keys), which are then + /// typically mapped by callers into `DataUnavailable` where appropriate. + /// + /// WARNING: + /// - This is a blocking function and **MUST** be called from a blocking context + /// (`tokio::task::block_in_place` or `spawn_blocking`). fn validate_block_blocking( &self, height: Height, @@ -3556,7 +4833,9 @@ impl DbV1 { Ok(()) } - /// Double‑SHA‑256 (SHA256d) as used by Bitcoin/Zcash headers and Merkle nodes. + /// Double-SHA-256 (SHA256d), as used by Bitcoin/Zcash headers and merkle nodes. + /// + /// Input and output are raw bytes (no endianness conversions are performed here). fn sha256d(data: &[u8]) -> [u8; 32] { let mut hasher = Sha256::new(); Digest::update(&mut hasher, data); // first pass @@ -3569,8 +4848,16 @@ impl DbV1 { out } - /// Compute the Merkle root of a non‑empty slice of 32‑byte transaction IDs. - /// `txids` must be in block order and already in internal (little‑endian) byte order. + /// Compute the merkle root of a non-empty slice of 32-byte transaction IDs. + /// + /// Requirements: + /// - `txids` must be in block order. + /// - `txids` must already be in the internal byte order (little endian) expected by the header merkle root + /// comparison performed by this module (no byte order transforms are applied here). + /// + /// Behavior: + /// - Duplicates the final element when the layer width is odd, matching Bitcoin/Zcash merkle rules. + /// - Uses SHA256d over 64-byte concatenated pairs at each layer. fn calculate_block_merkle_root(txids: &[[u8; 32]]) -> [u8; 32] { assert!( !txids.is_empty(), @@ -3604,30 +4891,43 @@ impl DbV1 { layer[0] } - /// Validate a contiguous range of block heights `[start, end]` inclusive. + /// Validate a contiguous inclusive range of block heights `[start, end]`. + /// + /// This method is optimized to skip heights already known validated via `validated_tip` / + /// `validated_set`. /// - /// Optimized to skip blocks already known to be validated. - /// Returns the full requested `(start, end)` range on success. + /// Semantics: + /// - Accepts either ordering of `start` and `end`. + /// - Validates the inclusive set `{min(start,end) ..= max(start,end)}` in ascending order. + /// - If the entire normalized range is already validated, returns `(start, end)` without + /// touching LMDB (preserves the caller's original ordering). + /// - Otherwise, validates each missing height in ascending order using `validate_block_blocking`. + /// + /// WARNING: + /// - This uses `tokio::task::block_in_place` internally and performs LMDB reads; callers should + /// avoid invoking it from latency-sensitive async paths unless they explicitly intend to + /// validate on-demand. async fn validate_block_range( &self, start: Height, end: Height, ) -> Result<(Height, Height), FinalisedStateError> { - if end.0 < start.0 { - return Err(FinalisedStateError::Custom( - "invalid block range: end < start".to_string(), - )); - } + // Normalize the range for validation, but preserve `(start, end)` ordering in the return. + let (range_start, range_end) = if start.0 <= end.0 { + (start, end) + } else { + (end, start) + }; let tip = self.validated_tip.load(Ordering::Acquire); - let mut h = std::cmp::max(start.0, tip); + let mut h = std::cmp::max(range_start.0, tip); - if h > end.0 { + if h > range_end.0 { return Ok((start, end)); } tokio::task::block_in_place(|| { - while h <= end.0 { + while h <= range_end.0 { if self.is_validated(h) { h += 1; continue; @@ -3635,19 +4935,16 @@ impl DbV1 { let height = Height(h); let height_bytes = height.to_bytes()?; - let bytes = { - let ro = self.env.begin_ro_txn()?; - let bytes = ro.get(self.headers, &height_bytes).map_err(|e| { - if e == lmdb::Error::NotFound { - FinalisedStateError::Custom("height not found in best chain".into()) - } else { - FinalisedStateError::LmdbError(e) - } - })?; - bytes.to_vec() - }; + let ro = self.env.begin_ro_txn()?; + let bytes = ro.get(self.headers, &height_bytes).map_err(|e| { + if e == lmdb::Error::NotFound { + FinalisedStateError::Custom("height not found in best chain".into()) + } else { + FinalisedStateError::LmdbError(e) + } + })?; - let hash = *StoredEntryVar::::deserialize(&*bytes)? + let hash = *StoredEntryVar::::deserialize(bytes)? .inner() .index() .hash(); diff --git a/zaino-state/src/chain_index/finalised_state/entry.rs b/zaino-state/src/chain_index/finalised_state/entry.rs index eea461c49..73183f376 100644 --- a/zaino-state/src/chain_index/finalised_state/entry.rs +++ b/zaino-state/src/chain_index/finalised_state/entry.rs @@ -1,4 +1,69 @@ -//! DB stored data wrappers structs. +//! Checksummed database entry wrappers (fixed and variable length) +//! +//! This file defines small wrapper types used by concrete DB versions for storing values in +//! LMDB with an **integrity checksum**. +//! +//! Each wrapper stores: +//! - the inner *versioned* record `T: ZainoVersionedSerde`, and +//! - a BLAKE2b-256 checksum computed over `key || encoded_item`. +//! +//! The checksum is intended to: +//! - detect corruption or partial writes, +//! - detect accidental key/value mismatches (e.g., writing under the wrong key encoding), +//! - and provide a cheap integrity check during migrations or debugging. +//! +//! ## Integrity model (scope) +//! +//! The checksum is a **corruption and correctness** signal, not a cryptographic authentication +//! mechanism. It helps detect accidental corruption, partial writes, or key/value mismatches, but +//! it does not provide authenticity against a malicious database writer, this must be ensured in +//! actual database implementations by validating block data on startup and on block writes. +//! +//! # Two wrapper forms +//! +//! - [`StoredEntryFixed`] for fixed-length values: +//! - requires `T: FixedEncodedLen` so that the total encoded value length is constant. +//! - important when LMDB uses `DUP_SORT` and/or `DUP_FIXED` flags where record sizing matters. +//! +//! - [`StoredEntryVar`] for variable-length values: +//! - prefixes the serialized record with a CompactSize length so decoding is bounded and safe. +//! +//! Both wrappers are themselves versioned (`ZainoVersionedSerde`), which means their outer layout can +//! evolve in a controlled way if required. +//! +//! # Encoding contract (conceptual) +//! +//! `StoredEntryFixed` encodes as: +//! - StoredEntry version tag +//! - `T::serialize()` bytes (which include `T`'s own record version tag) +//! - 32-byte checksum +//! +//! `StoredEntryVar` encodes as: +//! - StoredEntry version tag +//! - CompactSize(length of `T::serialize()` bytes) +//! - `T::serialize()` bytes +//! - 32-byte checksum +//! +//! # Usage guidelines +//! +//! - Always compute the checksum using the **exact bytes** used as the DB key (i.e. the encoded key). +//! - On read, verify the checksum before trusting decoded contents. +//! - Treat checksum mismatch as a corruption/incompatibility signal: +//! - return a hard error, +//! - or trigger a rebuild path, depending on the calling context. +//! +//! # Development: when to pick fixed vs var +//! +//! - Use `StoredEntryFixed` when: +//! - `T` has a stable, fixed-size encoding and you want predictable sizing, or +//! - the LMDB table relies on fixed-size duplicates. +//! +//! - Use `StoredEntryVar` when: +//! - `T` naturally contains variable-length payloads (vectors, scripts, etc.), or +//! - the value size may grow over time and you want to avoid schema churn. +//! +//! If you change the wrapper layout itself, bump the wrapper’s `ZainoVersionedSerde::VERSION` and +//! maintain a decode path (or bump the DB major version and migrate). use crate::{ read_fixed_le, version, write_fixed_le, CompactSize, FixedEncodedLen, ZainoVersionedSerde, @@ -10,25 +75,47 @@ use blake2::{ }; use core2::io::{self, Read, Write}; -/// A fixed length database entry. -/// This is an important distinction for correct usage of DUP_SORT and DUP_FIXED -/// LMDB database flags. +/// Fixed-length checksummed database value wrapper. /// -/// Encoded Format: +/// This wrapper is designed for LMDB tables that rely on fixed-size value records, including those +/// configured with `DUP_SORT` and/or `DUP_FIXED`. /// -/// ┌─────── byte 0 ───────┬───── byte 1 ─────┬───── T::raw_len() bytes ──────┬─── 32 bytes ────┐ -/// │ StoredEntry version │ Record version │ Body │ B2B256 hash │ -/// └──────────────────────┴──────────────────┴───────────────────────────────┴─────────────────┘ +/// The wrapper stores: +/// - a versioned record `T` (encoded via [`ZainoVersionedSerde`]), and +/// - a 32-byte BLAKE2b-256 checksum computed over `encoded_key || encoded_item`. +/// +/// ## Invariants +/// - `T` must have a fixed encoded length (including its own version tag), enforced by +/// [`FixedEncodedLen`]. +/// - The checksum must be computed using the **exact key bytes** used in LMDB for this entry. +/// - On read, callers should verify the checksum before trusting decoded contents. +/// +/// ## Encoded format (conceptual) +/// +/// ┌─────── byte 0 ───────┬────────────── T::serialize() bytes ──────────────┬─── 32 bytes ────┐ +/// │ StoredEntry version │ (includes T's own record version tag + body) │ B2B256 checksum │ +/// └──────────────────────┴──────────────────────────────────────────────────┴─────────────────┘ +/// +/// Where the checksum is: +/// `blake2b256(encoded_key || encoded_item_bytes)`. #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct StoredEntryFixed { - /// Inner record + /// The inner record stored in this entry. pub(crate) item: T, - /// Entry checksum + + /// BLAKE2b-256 checksum of `encoded_key || encoded_item_bytes`. pub(crate) checksum: [u8; 32], } impl StoredEntryFixed { - /// Create a new entry, hashing `key || encoded_item`. + /// Constructs a new checksummed entry for `item` under `key`. + /// + /// The checksum is computed as: + /// `blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. If the caller + /// hashes a different key encoding than what is used for storage, verification will fail. pub(crate) fn new>(key: K, item: T) -> Self { let body = { let mut v = Vec::with_capacity(T::VERSIONED_LEN); @@ -39,8 +126,17 @@ impl StoredEntryFixed { Self { item, checksum } } - /// Verify checksum given the DB key. - /// Returns `true` if `self.checksum == blake2b256(key || item.serialize())`. + /// Verifies the checksum for this entry under `key`. + /// + /// Returns `true` if and only if: + /// `self.checksum == blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. + /// + /// # Usage + /// Callers should treat a checksum mismatch as a corruption or incompatibility signal and + /// return a hard error (or trigger a rebuild path), depending on context. pub(crate) fn verify>(&self, key: K) -> bool { let body = { let mut v = Vec::with_capacity(T::VERSIONED_LEN); @@ -51,12 +147,14 @@ impl StoredEntryFixed { candidate == self.checksum } - /// Returns a reference to the inner item. + /// Returns a reference to the inner record. pub(crate) fn inner(&self) -> &T { &self.item } - /// Computes a BLAKE2b-256 checksum. + /// Computes a BLAKE2b-256 checksum over `data`. + /// + /// This is the hashing primitive used by both wrappers. The checksum is not keyed. pub(crate) fn blake2b256(data: &[u8]) -> [u8; 32] { let mut hasher = Blake2bVar::new(32).expect("Failed to create hasher"); hasher.update(data); @@ -68,6 +166,13 @@ impl StoredEntryFixed { } } +/// Versioned on-disk encoding for fixed-length checksummed entries. +/// +/// Body layout (after the `StoredEntryFixed` version tag): +/// 1. `T::serialize()` bytes (fixed length: `T::VERSIONED_LEN`) +/// 2. 32-byte checksum +/// +/// Note: `T::serialize()` includes `T`’s own version tag and body. impl ZainoVersionedSerde for StoredEntryFixed { const VERSION: u8 = version::V1; @@ -90,26 +195,46 @@ impl ZainoVersionedSerde for StoredEnt } } +/// `StoredEntryFixed` has a fixed encoded body length. +/// +/// Body length = `T::VERSIONED_LEN` + 32 bytes checksum. impl FixedEncodedLen for StoredEntryFixed { const ENCODED_LEN: usize = T::VERSIONED_LEN + 32; } -/// Variable-length database value. -/// Layout (little-endian unless noted): +/// Variable-length checksummed database value wrapper. +/// +/// This wrapper is used for values whose serialized representation is not fixed-size. It stores: +/// - a versioned record `T` (encoded via [`ZainoVersionedSerde`]), +/// - a CompactSize length prefix for the serialized record, +/// - and a 32-byte BLAKE2b-256 checksum computed over `encoded_key || encoded_item`. +/// +/// The length prefix allows decoding to be bounded and avoids reading untrusted trailing bytes. +/// +/// ## Encoded format (conceptual) /// -/// ┌────── byte 0 ───────┬─────── CompactSize(len) ─────┬──── 1 byte ────┬── len - 1 bytes ───┬─ 32 bytes ─┐ -/// │ StoredEntry version │ (length of item.serialize()) │ Record version │ Body │ Hash │ -/// └─────────────────────┴──────────────────────────────┴────────────────┴────────────────────┴────────────┘ +/// ┌────── byte 0 ───────┬────── CompactSize(len) ──────┬────── len bytes ──────┬─ 32 bytes ─┐ +/// │ StoredEntry version │ len = item.serialize().len() │ T::serialize() bytes │ checksum │ +/// └─────────────────────┴──────────────────────────────┴───────────────────────┴────────────┘ +/// +/// Where the checksum is: +/// `blake2b256(encoded_key || encoded_item_bytes)`. #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct StoredEntryVar { - /// Inner record + /// The inner record stored in this entry. pub(crate) item: T, - /// Entry checksum + /// BLAKE2b-256 checksum of `encoded_key || encoded_item_bytes`. pub(crate) checksum: [u8; 32], } impl StoredEntryVar { - /// Create a new entry, hashing `encoded_key || encoded_item`. + /// Constructs a new checksummed entry for `item` under `key`. + /// + /// The checksum is computed as: + /// `blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. pub(crate) fn new>(key: K, item: T) -> Self { let body = { let mut v = Vec::new(); @@ -120,8 +245,13 @@ impl StoredEntryVar { Self { item, checksum } } - /// Verify checksum given the DB key. - /// Returns `true` if `self.checksum == blake2b256(key || item.serialize())`. + /// Verifies the checksum for this entry under `key`. + /// + /// Returns `true` if and only if: + /// `self.checksum == blake2b256(encoded_key || item.serialize())`. + /// + /// # Key requirements + /// `key` must be the exact byte encoding used as the LMDB key for this record. pub(crate) fn verify>(&self, key: K) -> bool { let mut body = Vec::new(); self.item.serialize(&mut body).unwrap(); @@ -129,12 +259,12 @@ impl StoredEntryVar { candidate == self.checksum } - /// Returns a reference to the inner item. + /// Returns a reference to the inner record. pub(crate) fn inner(&self) -> &T { &self.item } - /// Computes a BLAKE2b-256 checksum. + /// Computes a BLAKE2b-256 checksum over `data`. pub(crate) fn blake2b256(data: &[u8]) -> [u8; 32] { let mut hasher = Blake2bVar::new(32).expect("Failed to create hasher"); hasher.update(data); @@ -146,6 +276,15 @@ impl StoredEntryVar { } } +/// Versioned on-disk encoding for variable-length checksummed entries. +/// +/// Body layout (after the `StoredEntryVar` version tag): +/// 1. CompactSize `len` (the length of `T::serialize()` bytes) +/// 2. `len` bytes of `T::serialize()` (includes `T`’s own version tag and body) +/// 3. 32-byte checksum +/// +/// Implementations must ensure the length prefix matches the exact serialized record bytes written, +/// otherwise decoding will fail or misalign. impl ZainoVersionedSerde for StoredEntryVar { const VERSION: u8 = version::V1; diff --git a/zaino-state/src/chain_index/finalised_state/migrations.rs b/zaino-state/src/chain_index/finalised_state/migrations.rs index e2ba64078..d648b5674 100644 --- a/zaino-state/src/chain_index/finalised_state/migrations.rs +++ b/zaino-state/src/chain_index/finalised_state/migrations.rs @@ -1,4 +1,128 @@ -//! Migration management and implementations. +//! Database version migration framework and implementations +//! +//! This file defines how `ZainoDB` migrates on-disk databases between database versions. +//! +//! Migrations are orchestrated by [`MigrationManager`], which is invoked from `ZainoDB::spawn` when +//! `current_version < target_version`. +//! +//! The migration model is **stepwise**: +//! - each migration maps one concrete `DbVersion` to the next supported `DbVersion`, +//! - the manager iteratively applies steps until the target is reached. +//! +//! # Key concepts +//! +//! - [`Migration`] trait: +//! - declares `CURRENT_VERSION` and `TO_VERSION` constants, +//! - provides an async `migrate(...)` entry point. +//! +//! - [`MigrationManager`]: +//! - holds the router, config, current and target versions, and a `BlockchainSource`, +//! - repeatedly selects and runs the next migration via `get_migration()`. +//! +//! - [`MigrationStep`]: +//! - enum-based dispatch wrapper used by `MigrationManager` to select between multiple concrete +//! `Migration` implementations (Rust cannot return different `impl Trait` types from a `match`). +//! +//! - [`capability::MigrationStatus`]: +//! - stored in `DbMetadata` and used to resume work safely after shutdown. +//! +//! # How major migrations work in this codebase +//! +//! This module is designed around the router’s **primary + shadow** model: +//! +//! - The *primary* DB continues serving read/write traffic. +//! - A *shadow* DB (new schema version) is created and built in parallel. +//! - Once the shadow DB is fully built and marked complete, it is promoted to primary. +//! - The old primary DB is shut down and deleted from disk once all handles are dropped. +//! +//! This minimises downtime and allows migrations that require a full rebuild (rather than an +//! in-place rewrite) without duplicating the entire DB indefinitely. +//! +//! It ia also possible (if migration allows) to partially build the new database version, switch +//! specific functionality to the shadow, and partialy delete old the database version, rather than +//! building the new database in full. This enables developers to minimise transient disk usage +//! during migrations. +//! +//! # Implemented migrations +//! +//! ## v0.0.0 → v1.0.0 +//! +//! `Migration0_0_0To1_0_0` performs a **full shadow rebuild from genesis**. +//! +//! Rationale (as enforced by code/comments): +//! - The legacy v0 DB is a lightwallet-specific store that only builds compact blocks from Sapling +//! activation onwards. +//! - v1 requires data from genesis (notably for transparent address history indices), therefore a +//! partial “continue from Sapling” build is insufficient. +//! +//! Mechanics: +//! - Spawn v1 as a shadow backend. +//! - Determine the current shadow tip (to resume if interrupted). +//! - Fetch blocks and commitment tree roots from the `BlockchainSource` starting at either genesis +//! or `shadow_tip + 1`, building `BlockMetadata` and `IndexedBlock`. +//! - Keep building until the shadow catches up to the primary tip (looping because the primary can +//! advance during the build). +//! - Mark `migration_status = Complete` in shadow metadata. +//! - Promote shadow to primary via `router.promote_shadow()`. +//! - Delete the old v0 directory asynchronously once all strong references are dropped. +//! +//! ## v1.0.0 → v1.1.0 +//! +//! `Migration1_0_0To1_1_0` is a **minor version bump** with **no schema changes**, but does include +//! changes to the external ZainoDB API. +//! +//! It updates the stored `DbMetadata` version to reflect the v1.1.0 API contract: +//! - `CompactBlockExt` now includes `get_compact_block_stream(...)`. +//! - compact block transaction materialization is now selected via `PoolTypeFilter` (including +//! optional transparent data). +//! +//! This release also introduces [`MigrationStep`], the enum-based migration dispatcher used by +//! [`MigrationManager`], to allow selecting between multiple concrete migration implementations. +//! +//! # Development: adding a new migration step +//! +//! 1. Introduce a new `struct MigrationX_Y_ZToA_B_C;` and implement `Migration`. +//! 2. Add a new `MigrationStep` variant and register it in `MigrationManager::get_migration()` by +//! matching on the *current* version. +//! 3. Ensure the migration is: +//! - deterministic, +//! - resumable (use `DbMetadata::migration_status` and/or shadow tip), +//! - crash-safe (never leaves a partially promoted DB). +//! 4. Add tests/fixtures for: +//! - starting from the old version, +//! - resuming mid-build if applicable, +//! - validating the promoted DB serves required capabilities. +//! +//! # Notes on MigrationType +//! Database versioning (and migration) is split into three distinct types, dependant of the severity +//! of changes being made to the database: +//! - Major versions / migrations: +//! - Major schema / capability changes, notably changes that require refetching the complete +//! blockchain from the backing validator / finaliser to build / update database indices. +//! - Migrations should follow the "primary" database / "shadow" database model. The legacy database +//! should be spawned as the "primary" and set to carry on serving data during migration. The new +//! database version is then spawned as the "shadow" and built in a background process. Once the +//! "shadow" is built to "primary" db tip height it is promoted to primary, taking over serving +//! data from the legacy database, the demoted database can then be safely removed from disk. It is +//! also possible to partially build the new database version , promote specific database capability, +//! and delete specific tables from the legacy database, reducing transient disk usage. +//! - Minor versions / migrations: +//! - Updates involving minor schema / capability changes, notably changes that can be rebuilt in place +//! (changes that do not require fetching new data from the backing validator / finaliser) or that can +//! rely on updates to the versioned serialisation / deserialisation of database structures. +//! - Migrations for minor patch bumps can follow several paths. If the database table being updated +//! holds variable length items, and the actual data being held is not changed (only format changes +//! being applied) then it may be possible to rely on serialisation / deserialisation updates to the +//! items being chenged, with the database table holding a mix of serialisation versions. However, +//! if the table being updated is of fixed length items, or the actual data held is being updated, +//! then it will be necessary to rebuild that table in full, possibly requiring database downtime for +//! the migration. Since this only involves moving data already held in the database (rather than +//! fetching new data from the backing validator) migration should be quick and short downtimes are +//! accepted. +//! - Patch versions / migrations: +//! - Changes to database code that do not touch the database schema, these include bug fixes, +//! performance improvements etc. +//! - Migrations for patch updates only need to handle updating the stored DbMetadata singleton. use super::{ capability::{ @@ -9,7 +133,9 @@ use super::{ }; use crate::{ - chain_index::{source::BlockchainSource, types::GENESIS_HEIGHT}, + chain_index::{ + finalised_state::capability::DbMetadata, source::BlockchainSource, types::GENESIS_HEIGHT, + }, config::BlockCacheConfig, error::FinalisedStateError, BlockHash, BlockMetadata, BlockWithMetadata, ChainWork, Height, IndexedBlock, @@ -20,26 +146,70 @@ use std::sync::Arc; use tracing::info; use zebra_chain::parameters::NetworkKind; +/// Broad categorisation of migration severity. +/// +/// This enum exists as a design aid to communicate intent and constraints: +/// - **Patch**: code-only changes; schema is unchanged; typically only `DbMetadata` needs updating. +/// - **Minor**: compatible schema / encoding evolution; may require in-place rebuilds of selected tables. +/// - **Major**: capability or schema changes that require rebuilding indices from the backing validator, +/// typically using the router’s primary/shadow model. +/// +/// Note: this enum is not currently used to dispatch behaviour in this file; concrete steps are +/// selected by [`MigrationManager::get_migration`]. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum MigrationType { + /// Patch-level changes: no schema change; metadata updates only. Patch, + + /// Minor-level changes: compatible schema/encoding changes; may require in-place table rebuild. Minor, + + /// Major-level changes: new schema/capabilities; usually requires shadow rebuild and promotion. Major, } +/// A single migration step from one concrete on-disk version to the next. +/// +/// Migrations are designed to be **composable** and **stepwise**: each implementation should map a +/// specific `CURRENT_VERSION` to a specific `TO_VERSION`. The manager then iterates until the target +/// version is reached. +/// +/// ## Resumability and crash-safety +/// Migration implementations are expected to be resumable where practical. In this codebase, major +/// migrations typically use: +/// - a shadow database that can be incrementally built, +/// - the shadow tip height as an implicit progress marker, +/// - and [`MigrationStatus`] in `DbMetadata` as an explicit progress marker. +/// +/// Implementations must never promote a partially-correct database to primary. #[async_trait] pub trait Migration { + /// The exact on-disk version this step migrates *from*. const CURRENT_VERSION: DbVersion; + + /// The exact on-disk version this step migrates *to*. const TO_VERSION: DbVersion; + /// Returns the version this step migrates *from*. fn current_version(&self) -> DbVersion { Self::CURRENT_VERSION } + /// Returns the version this step migrates *to*. fn to_version(&self) -> DbVersion { Self::TO_VERSION } + /// Performs the migration step. + /// + /// Implementations may: + /// - spawn a shadow backend, + /// - build or rebuild indices, + /// - update metadata and migration status, + /// - and promote the shadow backend to primary via the router. + /// + /// # Errors + /// Returns `FinalisedStateError` if the migration cannot proceed safely or deterministically. async fn migrate( &self, router: Arc, @@ -48,16 +218,43 @@ pub trait Migration { ) -> Result<(), FinalisedStateError>; } +/// Orchestrates a sequence of migration steps until `target_version` is reached. +/// +/// `MigrationManager` is constructed by `ZainoDB::spawn` when it detects that the on-disk database +/// is older than the configured target version. +/// +/// The manager: +/// - selects the next step based on the current version, +/// - runs it, +/// - then advances `current_version` to the step’s `TO_VERSION` and repeats. +/// +/// The router is shared so that migration steps can use the primary/shadow routing model. pub(super) struct MigrationManager { + /// Router controlling primary/shadow backends and capability routing. pub(super) router: Arc, + + /// Block-cache configuration (paths, network, configured target DB version, etc.). pub(super) cfg: BlockCacheConfig, + + /// The on-disk version currently detected/opened. pub(super) current_version: DbVersion, + + /// The configured target version to migrate to. pub(super) target_version: DbVersion, + + /// Backing data source used to fetch blocks / tree roots for rebuild-style migrations. pub(super) source: T, } impl MigrationManager { /// Iteratively performs each migration step from current version to target version. + /// + /// The manager applies steps in order, where each step maps one specific `DbVersion` to the next. + /// The loop terminates once `current_version >= target_version`. + /// + /// # Errors + /// Returns an error if a migration step is missing for the current version, or if any migration + /// step fails. pub(super) async fn migrate(&mut self) -> Result<(), FinalisedStateError> { while self.current_version < self.target_version { let migration = self.get_migration()?; @@ -68,20 +265,24 @@ impl MigrationManager { self.source.clone(), ) .await?; - self.current_version = migration.to_version(); + self.current_version = migration.to_version::(); } Ok(()) } - /// Return the next migration for the current version. - fn get_migration(&self) -> Result, FinalisedStateError> { + /// Returns the next migration step for the current on-disk version. + /// + /// This must be updated whenever a new supported DB version is introduced. The match is strict: + /// if a step is missing, migration is aborted rather than attempting an unsafe fallback. + fn get_migration(&self) -> Result { match ( self.current_version.major, self.current_version.minor, self.current_version.patch, ) { - (0, 0, 0) => Ok(Migration0_0_0To1_0_0), + (0, 0, 0) => Ok(MigrationStep::Migration0_0_0To1_0_0(Migration0_0_0To1_0_0)), + (1, 0, 0) => Ok(MigrationStep::Migration1_0_0To1_1_0(Migration1_0_0To1_1_0)), (_, _, _) => Err(FinalisedStateError::Custom(format!( "Missing migration from version {}", self.current_version @@ -90,8 +291,50 @@ impl MigrationManager { } } +/// Concrete migration step selector. +/// +/// Rust cannot return `impl Migration` from a `match` that selects between multiple concrete +/// migration types. `MigrationStep` is the enum-based dispatch wrapper used by [`MigrationManager`] +/// to select a step and call `migrate(...)`, and to read the step’s `TO_VERSION`. +enum MigrationStep { + Migration0_0_0To1_0_0(Migration0_0_0To1_0_0), + Migration1_0_0To1_1_0(Migration1_0_0To1_1_0), +} + +impl MigrationStep { + fn to_version(&self) -> DbVersion { + match self { + MigrationStep::Migration0_0_0To1_0_0(_step) => { + >::TO_VERSION + } + MigrationStep::Migration1_0_0To1_1_0(_step) => { + >::TO_VERSION + } + } + } + + async fn migrate( + &self, + router: Arc, + cfg: BlockCacheConfig, + source: T, + ) -> Result<(), FinalisedStateError> { + match self { + MigrationStep::Migration0_0_0To1_0_0(step) => step.migrate(router, cfg, source).await, + MigrationStep::Migration1_0_0To1_1_0(step) => step.migrate(router, cfg, source).await, + } + } +} + // ***** Migrations ***** +/// Major migration: v0.0.0 → v1.0.0. +/// +/// This migration performs a shadow rebuild of the v1 database from genesis, then promotes the +/// completed shadow to primary and schedules deletion of the old v0 database directory once all +/// handles are dropped. +/// +/// See the module-level documentation for the detailed rationale and mechanics. struct Migration0_0_0To1_0_0; #[async_trait] @@ -107,11 +350,21 @@ impl Migration for Migration0_0_0To1_0_0 { patch: 0, }; - /// The V0 database that we are migrating from was a lightwallet specific database - /// that only built compact block data from sapling activation onwards. - /// DbV1 is required to be built from genasis to correctly build the transparent address indexes. - /// For this reason we do not do any partial builds in the V0 to V1 migration. - /// We just run V0 as primary until V1 is fully built in shadow, then switch primary, deleting V0. + /// Performs the v0 → v1 major migration using the router’s primary/shadow model. + /// + /// The legacy v0 database only supports compact block data from Sapling activation onwards. + /// DbV1 requires a complete rebuild from genesis to correctly build indices (notably transparent + /// address history). For this reason, this migration does not attempt partial incremental builds + /// from Sapling; it rebuilds v1 in full in a shadow backend, then promotes it. + /// + /// ## Resumption behaviour + /// If the process is shut down mid-migration: + /// - the v1 shadow DB directory may already exist, + /// - shadow tip height is used to resume from `shadow_tip + 1`, + /// - and `MigrationStatus` is used as a coarse progress marker. + /// + /// Promotion occurs only after the v1 build loop has caught up to the primary tip and the shadow + /// metadata is marked `Complete`. async fn migrate( &self, router: Arc, @@ -271,3 +524,63 @@ impl Migration for Migration0_0_0To1_0_0 { Ok(()) } } + +/// Minor migration: v1.0.0 → v1.1.0. +/// +/// There are **no on-disk schema changes** in this step. +/// +/// This release updates the *API contract* for compact blocks: +/// - [`CompactBlockExt`] adds `get_compact_block_stream(...)`. +/// - Compact block transaction materialization is selected via [`PoolTypeFilter`], which may include +/// transparent data. +/// +/// This release also introduces [`MigrationStep`], the enum-based migration dispatcher used by +/// [`MigrationManager`], to allow selecting between multiple concrete migration implementations. +/// +/// Because the persisted schema contract is unchanged, this migration only updates the stored +/// [`DbMetadata::version`] from `1.0.0` to `1.1.0`. +/// +/// Safety and resumability: +/// - Idempotent: if run more than once, it will re-write the same metadata. +/// - No shadow database and no table rebuild. +/// - Clears any stale in-progress migration status. +struct Migration1_0_0To1_1_0; + +#[async_trait] +impl Migration for Migration1_0_0To1_1_0 { + const CURRENT_VERSION: DbVersion = DbVersion { + major: 1, + minor: 0, + patch: 0, + }; + + const TO_VERSION: DbVersion = DbVersion { + major: 1, + minor: 1, + patch: 0, + }; + + async fn migrate( + &self, + router: Arc, + _cfg: BlockCacheConfig, + _source: T, + ) -> Result<(), FinalisedStateError> { + info!("Starting v1.0.0 → v1.1.0 migration (metadata-only)."); + + let mut metadata: DbMetadata = router.get_metadata().await?; + + // Preserve the schema hash because there are no schema changes in v1.1.0. + // Only advance the version marker to reflect the new API contract. + metadata.version = >::TO_VERSION; + + // Outside of migrations this should be `Empty`. This step performs no build phases, so we + // ensure we do not leave a stale in-progress status behind. + metadata.migration_status = MigrationStatus::Empty; + + router.update_metadata(metadata).await?; + + info!("v1.0.0 to v1.1.0 migration complete."); + Ok(()) + } +} diff --git a/zaino-state/src/chain_index/finalised_state/reader.rs b/zaino-state/src/chain_index/finalised_state/reader.rs index 259d14883..e75052524 100644 --- a/zaino-state/src/chain_index/finalised_state/reader.rs +++ b/zaino-state/src/chain_index/finalised_state/reader.rs @@ -1,6 +1,50 @@ -//! ZainoDbReader: Read only view onto a running ZainoDB +//! Read-only view onto a running `ZainoDB` (DbReader) //! -//! This should be used to fetch chain data in *all* cases. +//! This file defines [`DbReader`], the **read-only** interface that should be used for *all* chain +//! data fetches from the finalised database. +//! +//! `DbReader` exists for two reasons: +//! +//! 1. **API hygiene:** it narrows the surface to reads and discourages accidental use of write APIs +//! from query paths. +//! 2. **Migration safety:** it routes each call through [`Router`](super::router::Router) using a +//! [`CapabilityRequest`](crate::chain_index::finalised_state::capability::CapabilityRequest), +//! ensuring the underlying backend supports the requested feature (especially important during +//! major migrations where different DB versions may coexist). +//! +//! # How routing works +//! +//! Each method in `DbReader` requests a specific capability (e.g. `BlockCoreExt`, `TransparentHistExt`). +//! Internally, `DbReader::db(cap)` calls `ZainoDB::backend_for_cap(cap)`, which consults the router. +//! +//! - If the capability is currently served by the shadow DB (shadow mask contains the bit), the +//! query runs against shadow. +//! - Otherwise, it runs against primary if primary supports it. +//! - If neither backend supports it, the call returns `FinalisedStateError::FeatureUnavailable(...)`. +//! +//! # Version constraints and error handling +//! +//! Some queries are only available in newer DB versions (notably most v1 extension traits). +//! Callers should either: +//! - require a minimum DB version (via configuration and/or metadata checks), or +//! - handle `FeatureUnavailable` errors gracefully when operating against legacy databases. +//! +//! # Development: adding a new read method +//! +//! 1. Decide whether the new query belongs under an existing extension trait or needs a new one. +//! 2. If a new capability is required: +//! - add a new `Capability` bit and `CapabilityRequest` variant in `capability.rs`, +//! - implement the corresponding extension trait for supported DB versions, +//! - delegate through `DbBackend` and route via the router. +//! 3. Add the new method on `DbReader` that requests the corresponding `CapabilityRequest` and calls +//! into the backend. +//! +//! # Usage pattern +//! +//! `DbReader` is created from an `Arc` using [`ZainoDB::to_reader`](super::ZainoDB::to_reader). +//! Prefer passing `DbReader` through query layers rather than passing `ZainoDB` directly. + +use zaino_proto::proto::utils::PoolTypeFilter; use crate::{ chain_index::{ @@ -8,9 +52,9 @@ use crate::{ types::{AddrEventBytes, TransactionHash}, }, error::FinalisedStateError, - AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, Height, IndexedBlock, - OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, StatusType, - TransparentCompactTx, TransparentTxList, TxLocation, TxidList, + AddrScript, BlockHash, BlockHeaderData, CommitmentTreeData, CompactBlockStream, Height, + IndexedBlock, OrchardCompactTx, OrchardTxList, Outpoint, SaplingCompactTx, SaplingTxList, + StatusType, TransparentCompactTx, TransparentTxList, TxLocation, TxidList, }; use super::{ @@ -24,45 +68,64 @@ use super::{ use std::sync::Arc; -/// Immutable view onto an already-running [`ZainoDB`]. -/// -/// Carries a plain reference with the same lifetime as the parent DB #[derive(Clone, Debug)] +/// `DbReader` is the preferred entry point for serving chain queries: +/// - it exposes only read APIs, +/// - it routes each operation via [`CapabilityRequest`] to ensure the selected backend supports the +/// requested feature, +/// - and it remains stable across major migrations because routing is handled internally by the +/// [`Router`](super::router::Router). +/// +/// ## Cloning and sharing +/// `DbReader` is cheap to clone; clones share the underlying `Arc`. pub(crate) struct DbReader { - /// Immutable read-only view onto the running ZainoDB + /// Shared handle to the running `ZainoDB` instance. pub(crate) inner: Arc, } impl DbReader { - /// Returns the internal db backend for the given db capability. + /// Resolves the backend that should serve `cap` right now. + /// + /// This is the single routing choke-point for all `DbReader` methods. It delegates to + /// `ZainoDB::backend_for_cap`, which consults the router’s primary/shadow masks. + /// + /// # Errors + /// Returns `FinalisedStateError::FeatureUnavailable(...)` if no currently-open backend + /// advertises the requested capability. #[inline(always)] fn db(&self, cap: CapabilityRequest) -> Result, FinalisedStateError> { self.inner.backend_for_cap(cap) } + // ***** DB Core Read ***** - /// Returns the status of the serving ZainoDB. + /// Returns the current runtime status of the serving database. + /// + /// This reflects the status of the backend currently serving `READ_CORE`, which is the minimum + /// capability required for basic chain queries. pub(crate) fn status(&self) -> StatusType { self.inner.status() } - /// Returns the greatest block `Height` stored in the db - /// (`None` if the DB is still empty). + /// Returns the greatest block `Height` stored in the database, or `None` if the DB is empty. pub(crate) async fn db_height(&self) -> Result, FinalisedStateError> { self.inner.db_height().await } - /// Fetch database metadata. + /// Fetches the persisted database metadata singleton (`DbMetadata`). pub(crate) async fn get_metadata(&self) -> Result { self.inner.get_metadata().await } - /// Awaits untile the DB returns a Ready status. + /// Waits until the database reports [`StatusType::Ready`]. + /// + /// This is a convenience wrapper around `ZainoDB::wait_until_ready` and should typically be + /// awaited once during startup before serving queries. pub(crate) async fn wait_until_ready(&self) { self.inner.wait_until_ready().await } - /// Fetch the block height in the main chain for a given block hash. + /// Fetches the main-chain height for a given block hash, if present in finalised state. pub(crate) async fn get_block_height( &self, hash: BlockHash, @@ -70,7 +133,7 @@ impl DbReader { self.inner.get_block_height(hash).await } - /// Fetch the block hash in the main chain for a given block height. + /// Fetches the main-chain block hash for a given block height, if present in finalised state. pub(crate) async fn get_block_hash( &self, height: Height, @@ -397,14 +460,24 @@ impl DbReader { // ***** CompactBlock Ext ***** /// Returns the CompactBlock for the given Height. - /// - /// TODO: Add separate range fetch method! pub(crate) async fn get_compact_block( &self, height: Height, + pool_types: PoolTypeFilter, ) -> Result { self.db(CapabilityRequest::CompactBlockExt)? - .get_compact_block(height) + .get_compact_block(height, pool_types) + .await + } + + pub(crate) async fn get_compact_block_stream( + &self, + start_height: Height, + end_height: Height, + pool_types: PoolTypeFilter, + ) -> Result { + self.db(CapabilityRequest::CompactBlockExt)? + .get_compact_block_stream(start_height, end_height, pool_types) .await } } diff --git a/zaino-state/src/chain_index/finalised_state/router.rs b/zaino-state/src/chain_index/finalised_state/router.rs index 86c9b783d..a9f9c8472 100644 --- a/zaino-state/src/chain_index/finalised_state/router.rs +++ b/zaino-state/src/chain_index/finalised_state/router.rs @@ -1,8 +1,69 @@ -//! Implements the ZainoDB Router, used to selectively route database capabilities during major migrations. +//! Capability-based database router (primary + shadow) //! -//! The Router allows incremental database migrations by splitting read and write capability groups between primary and shadow databases. -//! This design enables partial migrations without duplicating the entire chain database, -//! greatly reducing disk usage and ensuring minimal downtime. +//! This file implements [`Router`], which allows `ZainoDB` to selectively route operations to one of +//! two database backends: +//! - a **primary** (active) DB, and +//! - an optional **shadow** DB used during major migrations. +//! +//! The router is designed to support incremental and low-downtime migrations by splitting the DB +//! feature set into capability groups. Each capability group can be served by either backend, +//! controlled by atomic bitmasks. +//! +//! # Why a router exists +//! +//! Major schema upgrades are often most safely implemented as a rebuild into a new DB rather than an +//! in-place rewrite. The router enables that by allowing the system to: +//! - keep serving requests from the old DB while building the new one, +//! - optionally move specific read capabilities to the shadow DB once they are correct there, +//! - then atomically promote the shadow DB to primary at the end. +//! +//! # Concurrency and atomicity model +//! +//! The router uses `ArcSwap` / `ArcSwapOption` for lock-free backend swapping and `AtomicU32` masks +//! for capability routing. +//! +//! - Backend selection (`backend(...)`) is wait-free and based on the current masks. +//! - Promotion (`promote_shadow`) swaps the primary Arc atomically; existing in-flight operations +//! remain valid because they hold an `Arc`. +//! +//! Memory ordering is explicit (`Acquire`/`Release`/`AcqRel`) to ensure mask updates are observed +//! consistently relative to backend pointer updates. +//! +//! # Capability routing semantics +//! +//! `Router::backend(req)` resolves as: +//! 1. If `shadow_mask` contains the requested bit and shadow exists → return shadow. +//! 2. Else if `primary_mask` contains the requested bit → return primary. +//! 3. Else → return `FinalisedStateError::FeatureUnavailable`. +//! +//! # Shadow lifecycle (migration-only API) +//! +//! The following methods are intended to be called **only** by the migration manager: +//! - `set_shadow(...)` +//! - `extend_shadow_caps(...)` +//! - `promote_shadow()` +//! +//! Promotion performs: +//! - shadow → primary swap, +//! - resets shadow and shadow mask, +//! - updates the primary mask from the promoted backend’s declared capabilities, +//! - returns the old primary backend so the migration can shut it down and delete its files safely. +//! +//! # Trait impls +//! +//! `Router` implements the core DB traits (`DbCore`, `DbRead`, `DbWrite`) by routing READ_CORE/WRITE_CORE +//! to whichever backend currently serves those capabilities. +//! +//! # Development notes +//! +//! - If you introduce a new capability bit, ensure it is: +//! - added to `CapabilityRequest`, +//! - implemented by the relevant DB version(s), +//! - and considered in migration routing policy (whether it can move to shadow incrementally). +//! +//! - When implementing incremental migrations (moving caps before final promotion), ensure the shadow +//! backend is kept consistent with the primary for those capabilities (or restrict such caps to +//! read-only queries that can tolerate lag with explicit semantics). use super::{ capability::{Capability, DbCore, DbMetadata, DbRead, DbWrite}, @@ -22,26 +83,85 @@ use std::sync::{ }; #[derive(Debug)] +/// Capability-based database router. +/// +/// `Router` is the internal dispatch layer used by `ZainoDB` to route operations to either: +/// - a **primary** database backend (the active DB), or +/// - an optional **shadow** backend used during major version migrations. +/// +/// Routing is driven by per-backend **capability bitmasks**: +/// - If a requested capability bit is set in the shadow mask and a shadow backend exists, the call +/// is routed to shadow. +/// - Otherwise, if the bit is set in the primary mask, the call is routed to primary. +/// - Otherwise, the feature is reported as unavailable. +/// +/// ## Concurrency model +/// - Backend pointers are stored using `ArcSwap` / `ArcSwapOption` to allow atomic, lock-free swaps. +/// - Capability masks are stored in `AtomicU32` and read using `Acquire` ordering in the hot path. +/// - Promoting shadow to primary is atomic and safe for in-flight calls because callers hold +/// `Arc` clones. +/// +/// ## Intended usage +/// The shadow-related APIs (`set_shadow`, `extend_shadow_caps`, `promote_shadow`) are intended to be +/// used only by the migration manager to support low-downtime rebuild-style migrations. pub(crate) struct Router { - /// Primary active database. + /// Primary active database backend. + /// + /// This is the default backend used for any capability bit that is not explicitly routed to the + /// shadow backend via [`Router::shadow_mask`]. + /// + /// Stored behind [`ArcSwap`] so it can be replaced atomically during promotion without locking. primary: ArcSwap, - /// Shadow database, new version to be built during major migration. + + /// Shadow database backend (optional). + /// + /// During a major migration, a new-version backend is built and installed here. Individual + /// capability groups can be routed to the shadow by setting bits in [`Router::shadow_mask`]. + /// + /// Outside of migrations this should remain `None`. shadow: ArcSwapOption, - /// Capability mask for primary database. + + /// Capability mask for the primary backend. + /// + /// A bit being set means “this capability may be served by the primary backend”. + /// + /// The mask is initialized from `primary.capability()` and can be restricted/extended during + /// migrations to ensure that requests are only routed to backends that can satisfy them. primary_mask: AtomicU32, - /// Capability mask dictating what database capalility (if any) should be served by the shadow. + + /// Capability mask for the shadow backend. + /// + /// A bit being set means “this capability should be served by the shadow backend (if present)”. + /// + /// Routing precedence is: + /// 1. shadow if the bit is set and shadow exists, + /// 2. else primary if the bit is set, + /// 3. else feature unavailable. shadow_mask: AtomicU32, } /// Database version router. /// -/// Routes database capability to the correct database during major migrations. +/// Routes database capabilities to either a primary backend or (during major migrations) an optional +/// shadow backend. +/// +/// ## Routing guarantees +/// - The router only returns a backend if the corresponding capability bit is enabled in the +/// backend’s active mask. +/// - Backend selection is lock-free and safe for concurrent use. +/// - Promotion swaps the primary backend atomically; in-flight operations remain valid because they +/// hold their own `Arc` clones. impl Router { // ***** Router creation ***** - /// Creatues a new database router, setting primary the given database. + /// Creates a new [`Router`] with `primary` installed as the active backend. + /// + /// The primary capability mask is initialized from `primary.capability()`. The shadow backend is + /// initially unset and must only be configured during major migrations. /// - /// Shadow is spawned as none and should only be set to some during major database migrations. + /// ## Notes + /// - The router does not validate that `primary.capability()` matches the masks that may later be + /// set by migration code; migration orchestration must keep the masks conservative. pub(crate) fn new(primary: Arc) -> Self { let cap = primary.capability(); Self { @@ -54,7 +174,18 @@ impl Router { // ***** Capability router ***** - /// Return the database backend for a given capability, or an error if none is available. + /// Returns the database backend that should serve `cap`. + /// + /// Routing order: + /// 1. If the shadow mask contains the requested bit *and* a shadow backend exists, return shadow. + /// 2. Else if the primary mask contains the requested bit, return primary. + /// 3. Otherwise return [`FinalisedStateError::FeatureUnavailable`]. + /// + /// ## Correctness contract + /// The masks are the source of truth for routing. If migration code enables a bit on the shadow + /// backend before the corresponding data/index is correct there, callers may observe incorrect + /// results. Therefore, migrations must only route a capability to shadow once it is complete and + /// consistent for that capability’s semantics. #[inline] pub(crate) fn backend( &self, @@ -78,27 +209,48 @@ impl Router { // // These methods should only ever be used by the migration manager. - /// Sets the shadow to the given database. + /// Installs `shadow` as the current shadow backend and sets its routed capability mask to `caps`. + /// + /// This is the entry point for starting a major migration: + /// - spawn/open the new-version backend, + /// - call `set_shadow(new_backend, initial_caps)`, + /// - optionally expand shadow routing incrementally with [`Router::extend_shadow_caps`]. + /// + /// ## Ordering + /// The shadow backend pointer is stored first, then the shadow mask is published with `Release` + /// ordering. Readers use `Acquire` to observe both consistently. pub(crate) fn set_shadow(&self, shadow: Arc, caps: Capability) { self.shadow.store(Some(shadow)); self.shadow_mask.store(caps.bits(), Ordering::Release); } - /// Move additional capability bits to the *current* shadow. + /// Adds additional capabilities to the shadow routing mask. + /// + /// This enables incremental migrations where certain read capabilities can move to the shadow + /// backend once the corresponding indices are complete there. + /// + /// ## Notes + /// - This only changes routing; it does not validate the shadow backend’s correctness. + /// - Use conservative routing policies: prefer moving read-only capabilities first. pub(crate) fn extend_shadow_caps(&self, caps: Capability) { self.shadow_mask.fetch_or(caps.bits(), Ordering::AcqRel); } - /// Promotes the shadow database to primary, resets shadow, - /// and updates the primary capability mask from the new backend. + /// Promotes the current shadow backend to become the new primary backend. /// - /// Used at the end of major migrations to move the active database to the new version. + /// Promotion performs the following steps: + /// - Removes the shadow backend (`shadow = None`). + /// - Sets `primary_mask` to the promoted backend’s declared capabilities. + /// - Clears `shadow_mask`. + /// - Atomically swaps the `primary` backend pointer to the promoted backend. /// - /// Returns the initial primary value. + /// Returns the old primary backend so the caller (migration manager) can: + /// - wait for all outstanding `Arc` clones to drop, + /// - shut it down, + /// - and finally remove the old on-disk directory safely. /// - /// # Error - /// - /// Returns a critical error if the shadow is not found. + /// # Errors + /// Returns [`FinalisedStateError::Critical`] if no shadow backend is currently installed. pub(crate) fn promote_shadow(&self) -> Result, FinalisedStateError> { let Some(new_primary) = self.shadow.swap(None) else { return Err(FinalisedStateError::Critical( @@ -115,17 +267,29 @@ impl Router { // ***** Primary database capability control ***** - /// Disables specific capabilities on the primary backend. + /// Disables specific capabilities on the primary backend by clearing bits in `primary_mask`. + /// + /// This is primarily used during migrations to prevent routing particular operations to the old + /// backend once the migration wants them served elsewhere. + /// + /// ## Safety + /// This only affects routing. It does not stop in-flight operations already holding an + /// `Arc` clone. pub(crate) fn limit_primary_caps(&self, caps: Capability) { self.primary_mask.fetch_and(!caps.bits(), Ordering::AcqRel); } - /// Enables specific capabilities on the primary backend. + /// Enables specific capabilities on the primary backend by setting bits in `primary_mask`. + /// + /// This can be used to restore routing to the primary backend after temporarily restricting it. pub(crate) fn extend_primary_caps(&self, caps: Capability) { self.primary_mask.fetch_or(caps.bits(), Ordering::AcqRel); } /// Overwrites the entire primary capability mask. + /// + /// This is a sharp tool intended for migration orchestration. Prefer incremental helpers + /// (`limit_primary_caps`, `extend_primary_caps`) unless a full reset is required. pub(crate) fn set_primary_mask(&self, new_mask: Capability) { self.primary_mask.store(new_mask.bits(), Ordering::Release); } @@ -133,8 +297,18 @@ impl Router { // ***** Core DB functionality ***** +/// Core database façade implementation for the router. +/// +/// `DbCore` methods are routed via capability selection: +/// - `status()` consults the backend that currently serves `READ_CORE`. +/// - `shutdown()` attempts to shut down both primary and shadow backends (if present). #[async_trait] impl DbCore for Router { + /// Returns the runtime status of the database system. + /// + /// This is derived from whichever backend currently serves `READ_CORE`. If `READ_CORE` is not + /// available (misconfiguration or partial migration state), this returns [`StatusType::Busy`] + /// as a conservative fallback. fn status(&self) -> StatusType { match self.backend(CapabilityRequest::ReadCore) { Ok(backend) => backend.status(), @@ -142,6 +316,15 @@ impl DbCore for Router { } } + /// Shuts down both the primary and shadow backends (if any). + /// + /// Shutdown is attempted for the primary first, then the shadow. If primary shutdown fails, the + /// error is returned immediately (the shadow shutdown result is not returned in that case). + /// + /// ## Migration note + /// During major migrations, the old primary backend may need to stay alive until all outstanding + /// handles are dropped. That waiting logic lives outside the router (typically in the migration + /// manager). async fn shutdown(&self) -> Result<(), FinalisedStateError> { let primary_shutdown_result = self.primary.load_full().shutdown().await; @@ -156,26 +339,37 @@ impl DbCore for Router { } } +/// Core write surface routed through `WRITE_CORE`. +/// +/// All writes are delegated to the backend currently selected for [`CapabilityRequest::WriteCore`]. +/// During migrations this allows writers to remain on the old backend until the new backend is ready +/// (or to be switched deliberately by migration orchestration). #[async_trait] impl DbWrite for Router { + /// Writes a block via the backend currently serving `WRITE_CORE`. async fn write_block(&self, blk: IndexedBlock) -> Result<(), FinalisedStateError> { self.backend(CapabilityRequest::WriteCore)? .write_block(blk) .await } + /// Deletes the block at height `h` via the backend currently serving `WRITE_CORE`. async fn delete_block_at_height(&self, h: Height) -> Result<(), FinalisedStateError> { self.backend(CapabilityRequest::WriteCore)? .delete_block_at_height(h) .await } + /// Deletes the provided block via the backend currently serving `WRITE_CORE`. async fn delete_block(&self, blk: &IndexedBlock) -> Result<(), FinalisedStateError> { self.backend(CapabilityRequest::WriteCore)? .delete_block(blk) .await } + /// Updates the persisted metadata singleton via the backend currently serving `WRITE_CORE`. + /// + /// This is used by migrations to record progress and completion status. async fn update_metadata(&self, metadata: DbMetadata) -> Result<(), FinalisedStateError> { self.backend(CapabilityRequest::WriteCore)? .update_metadata(metadata) @@ -183,12 +377,19 @@ impl DbWrite for Router { } } +/// Core read surface routed through `READ_CORE`. +/// +/// All reads are delegated to the backend currently selected for [`CapabilityRequest::ReadCore`]. +/// During migrations this allows reads to continue from the old backend unless/until explicitly +/// moved. #[async_trait] impl DbRead for Router { + /// Returns the database tip height via the backend currently serving `READ_CORE`. async fn db_height(&self) -> Result, FinalisedStateError> { self.backend(CapabilityRequest::ReadCore)?.db_height().await } + /// Returns the height for `hash` via the backend currently serving `READ_CORE`. async fn get_block_height( &self, hash: BlockHash, @@ -198,12 +399,17 @@ impl DbRead for Router { .await } + /// Returns the hash for `h` via the backend currently serving `READ_CORE`. async fn get_block_hash(&self, h: Height) -> Result, FinalisedStateError> { self.backend(CapabilityRequest::ReadCore)? .get_block_hash(h) .await } + /// Returns database metadata via the backend currently serving `READ_CORE`. + /// + /// During migrations, callers should expect `DbMetadata::migration_status` to reflect the state + /// of the active backend selected by routing. async fn get_metadata(&self) -> Result { self.backend(CapabilityRequest::ReadCore)? .get_metadata() diff --git a/zaino-state/src/chain_index/non_finalised_state.rs b/zaino-state/src/chain_index/non_finalised_state.rs index 3ac584c6c..f289f52ab 100644 --- a/zaino-state/src/chain_index/non_finalised_state.rs +++ b/zaino-state/src/chain_index/non_finalised_state.rs @@ -7,10 +7,10 @@ use crate::{ use arc_swap::ArcSwap; use futures::lock::Mutex; use primitive_types::U256; -use std::{collections::HashMap, mem, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use tokio::sync::mpsc; use tracing::{info, warn}; -use zebra_chain::parameters::Network; +use zebra_chain::{parameters::Network, serialization::BytesInDisplayOrder}; use zebra_state::HashOrHeight; /// Holds the block cache @@ -19,8 +19,6 @@ pub struct NonFinalizedState { /// We need access to the validator's best block hash, as well /// as a source of blocks pub(super) source: Source, - staged: Mutex>, - staging_sender: mpsc::Sender, /// This lock should not be exposed to consumers. Rather, /// clone the Arc and offer that. This means we can overwrite the arc /// without interfering with readers, who will hold a stale copy @@ -45,7 +43,7 @@ pub struct BestTip { pub blockhash: BlockHash, } -#[derive(Debug)] +#[derive(Debug, Clone)] /// A snapshot of the nonfinalized state as it existed when this was created. pub struct NonfinalizedBlockCacheSnapshot { /// the set of all known blocks < 100 blocks old @@ -54,6 +52,7 @@ pub struct NonfinalizedBlockCacheSnapshot { /// removed by a reorg. Blocks reorged away have no height. pub blocks: HashMap, /// hashes indexed by height + /// Hashes in this map are part of the best chain. pub heights_to_hashes: HashMap, // Do we need height here? /// The highest known block @@ -75,6 +74,17 @@ pub enum NodeConnectionError { UnrecoverableError(Box), } +#[derive(Debug)] +struct MissingBlockError(String); + +impl std::fmt::Display for MissingBlockError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "missing block: {}", self.0) + } +} + +impl std::error::Error for MissingBlockError {} + #[derive(Debug)] /// An error occurred during sync of the NonFinalized State. pub enum SyncError { @@ -88,9 +98,7 @@ pub enum SyncError { /// Sync has been called multiple times in parallel, or another process has /// written to the block snapshot. CompetingSyncProcess, - /// Sync attempted a reorg, and something went wrong. Currently, this - /// only happens when we attempt to reorg below the start of the chain, - /// indicating an entirely separate regtest/testnet chain to what we expected + /// Sync attempted a reorg, and something went wrong. ReorgFailure(String), /// UnrecoverableFinalizedStateError CannotReadFinalizedState, @@ -102,6 +110,9 @@ impl From for SyncError { UpdateError::ReceiverDisconnected => SyncError::StagingChannelClosed, UpdateError::StaleSnapshot => SyncError::CompetingSyncProcess, UpdateError::FinalizedStateCorruption => SyncError::CannotReadFinalizedState, + UpdateError::DatabaseHole => { + SyncError::ReorgFailure(String::from("could not determine best chain")) + } } } } @@ -131,28 +142,11 @@ pub enum InitError { InitalBlockMissingHeight, } -/// Staging infrastructure for block processing -struct StagingChannel { - receiver: Mutex>, - sender: mpsc::Sender, -} - -impl StagingChannel { - /// Create new staging channel with the given buffer size - fn new(buffer_size: usize) -> Self { - let (sender, receiver) = mpsc::channel(buffer_size); - Self { - receiver: Mutex::new(receiver), - sender, - } - } -} - /// This is the core of the concurrent block cache. impl BestTip { /// Create a BestTip from an IndexedBlock fn from_block(block: &IndexedBlock) -> Result { - let height = block.height().ok_or(InitError::InitalBlockMissingHeight)?; + let height = block.height(); let blockhash = *block.hash(); Ok(Self { height, blockhash }) } @@ -177,6 +171,34 @@ impl NonfinalizedBlockCacheSnapshot { best_tip, }) } + + fn add_block_new_chaintip(&mut self, block: IndexedBlock) { + self.best_tip = BestTip { + height: block.height(), + blockhash: *block.hash(), + }; + self.add_block(block) + } + + fn get_block_by_hash_bytes_in_serialized_order(&self, hash: [u8; 32]) -> Option<&IndexedBlock> { + self.blocks + .values() + .find(|block| block.hash_bytes_serialized_order() == hash) + } + + fn remove_finalized_blocks(&mut self, finalized_height: Height) { + // Keep the last finalized block. This means we don't have to check + // the finalized state when the entire non-finalized state is reorged away. + self.blocks + .retain(|_hash, block| block.height() >= finalized_height); + self.heights_to_hashes + .retain(|height, _hash| height >= &finalized_height); + } + + fn add_block(&mut self, block: IndexedBlock) { + self.heights_to_hashes.insert(block.height(), *block.hash()); + self.blocks.insert(*block.hash(), block); + } } impl NonFinalizedState { @@ -192,9 +214,6 @@ impl NonFinalizedState { ) -> Result { info!("Initialising non-finalised state."); - // Set up staging channel for block processing - let staging_channel = StagingChannel::new(100); - // Resolve the initial block (provided or genesis) let initial_block = Self::resolve_initial_block(&source, &network, start_block).await?; @@ -206,8 +225,6 @@ impl NonFinalizedState { Ok(Self { source, - staged: staging_channel.receiver, - staging_sender: staging_channel.sender, current: ArcSwap::new(Arc::new(snapshot)), network, nfs_change_listener, @@ -288,38 +305,8 @@ impl NonFinalizedState { /// sync to the top of the chain, trimming to the finalised tip. pub(super) async fn sync(&self, finalized_db: Arc) -> Result<(), SyncError> { - let initial_state = self.get_snapshot(); - let mut nonbest_blocks = HashMap::new(); - - // Fetch main chain blocks and handle reorgs - let new_blocks = self - .fetch_main_chain_blocks(&initial_state, &mut nonbest_blocks) - .await?; - - // Stage and update new blocks - self.stage_new_blocks(new_blocks, &finalized_db).await?; - - // Handle non-finalized change listener - self.handle_nfs_change_listener(&mut nonbest_blocks).await?; - - // Update finalized state - self.update(finalized_db.clone()).await?; - - // Process non-best chain blocks - self.process_nonbest_blocks(nonbest_blocks, &finalized_db) - .await?; - - Ok(()) - } - - /// Fetch main chain blocks and handle reorgs - async fn fetch_main_chain_blocks( - &self, - initial_state: &NonfinalizedBlockCacheSnapshot, - nonbest_blocks: &mut HashMap>, - ) -> Result, SyncError> { - let mut new_blocks = Vec::new(); - let mut best_tip = initial_state.best_tip; + let mut initial_state = self.get_snapshot(); + let mut working_snapshot = initial_state.as_ref().clone(); // currently this only gets main-chain blocks // once readstateservice supports serving sidechain data, this @@ -330,7 +317,7 @@ impl NonFinalizedState { while let Some(block) = self .source .get_block(HashOrHeight::Height(zebra_chain::block::Height( - u32::from(best_tip.height) + 1, + u32::from(working_snapshot.best_tip.height) + 1, ))) .await .map_err(|e| { @@ -341,101 +328,105 @@ impl NonFinalizedState { })? { let parent_hash = BlockHash::from(block.header.previous_block_hash); - if parent_hash == best_tip.blockhash { + if parent_hash == working_snapshot.best_tip.blockhash { // Normal chain progression - let prev_block = match new_blocks.last() { - Some(block) => block, - None => initial_state - .blocks - .get(&best_tip.blockhash) - .ok_or_else(|| { - SyncError::ReorgFailure(format!( - "found blocks {:?}, expected block {:?}", - initial_state - .blocks - .values() - .map(|block| (block.index().hash(), block.index().height())) - .collect::>(), - best_tip - )) - })?, - }; + let prev_block = working_snapshot + .blocks + .get(&working_snapshot.best_tip.blockhash) + .ok_or_else(|| { + SyncError::ReorgFailure(format!( + "found blocks {:?}, expected block {:?}", + working_snapshot + .blocks + .values() + .map(|block| (block.index().hash(), block.index().height())) + .collect::>(), + working_snapshot.best_tip + )) + })?; let chainblock = self.block_to_chainblock(prev_block, &block).await?; info!( "syncing block {} at height {}", &chainblock.index().hash(), - best_tip.height + 1 + working_snapshot.best_tip.height + 1 ); - best_tip = BestTip { - height: best_tip.height + 1, - blockhash: *chainblock.hash(), - }; - new_blocks.push(chainblock.clone()); + working_snapshot.add_block_new_chaintip(chainblock); } else { - // Handle reorg - info!("Reorg detected at height {}", best_tip.height + 1); - best_tip = self.handle_reorg(initial_state, best_tip)?; - nonbest_blocks.insert(block.hash(), block); + self.handle_reorg(&mut working_snapshot, block.as_ref()) + .await?; + // There's been a reorg. The fresh block is the new chaintip + // we need to work backwards from it and update heights_to_hashes + // with it and all its parents. + } + if initial_state.best_tip.height + 100 < working_snapshot.best_tip.height { + self.update(finalized_db.clone(), initial_state, working_snapshot) + .await?; + initial_state = self.current.load_full(); + working_snapshot = initial_state.as_ref().clone(); } } + // Handle non-finalized change listener + self.handle_nfs_change_listener(&mut working_snapshot) + .await?; + + self.update(finalized_db.clone(), initial_state, working_snapshot) + .await?; - Ok(new_blocks) + Ok(()) } /// Handle a blockchain reorg by finding the common ancestor - fn handle_reorg( + async fn handle_reorg( &self, - initial_state: &NonfinalizedBlockCacheSnapshot, - current_tip: BestTip, - ) -> Result { - let mut next_height_down = current_tip.height - 1; - - let prev_hash = loop { - if next_height_down == Height(0) { - return Err(SyncError::ReorgFailure( - "attempted to reorg below chain genesis".to_string(), - )); + working_snapshot: &mut NonfinalizedBlockCacheSnapshot, + block: &impl Block, + ) -> Result { + let prev_block = match working_snapshot + .get_block_by_hash_bytes_in_serialized_order(block.prev_hash_bytes_serialized_order()) + .cloned() + { + Some(prev_block) => { + if !working_snapshot + .heights_to_hashes + .values() + .any(|hash| hash == prev_block.hash()) + { + Box::pin(self.handle_reorg(working_snapshot, &prev_block)).await? + } else { + prev_block + } } - match initial_state - .blocks - .values() - .find(|block| block.height() == Some(next_height_down)) - .map(IndexedBlock::hash) - { - Some(hash) => break hash, - // There is a hole in our database. - // TODO: An error return may be more appropriate here - None => next_height_down = next_height_down - 1, + None => { + let prev_block = self + .source + .get_block(HashOrHeight::Hash( + zebra_chain::block::Hash::from_bytes_in_serialized_order( + block.prev_hash_bytes_serialized_order(), + ), + )) + .await + .map_err(|e| { + SyncError::ZebradConnectionError(NodeConnectionError::UnrecoverableError( + Box::new(e), + )) + })? + .ok_or(SyncError::ZebradConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(MissingBlockError( + "zebrad missing block in best chain".to_string(), + ))), + ))?; + Box::pin(self.handle_reorg(working_snapshot, &*prev_block)).await? } }; - - Ok(BestTip { - height: next_height_down, - blockhash: *prev_hash, - }) - } - - /// Stage new blocks and update the cache - async fn stage_new_blocks( - &self, - new_blocks: Vec, - finalized_db: &Arc, - ) -> Result<(), SyncError> { - for block in new_blocks { - if let Err(e) = self - .sync_stage_update_loop(block, finalized_db.clone()) - .await - { - return Err(e.into()); - } - } - Ok(()) + let indexed_block = block.to_indexed_block(&prev_block, self).await?; + working_snapshot.add_block_new_chaintip(indexed_block.clone()); + Ok(indexed_block) } /// Handle non-finalized change listener events async fn handle_nfs_change_listener( &self, - nonbest_blocks: &mut HashMap>, + working_snapshot: &mut NonfinalizedBlockCacheSnapshot, ) -> Result<(), SyncError> { let Some(ref listener) = self.nfs_change_listener else { return Ok(()); @@ -455,7 +446,7 @@ impl NonFinalizedState { .blocks .contains_key(&types::BlockHash(hash.0)) { - nonbest_blocks.insert(block.hash(), block); + self.add_nonbest_block(working_snapshot, &*block).await?; } } Err(mpsc::error::TryRecvError::Empty) => break, @@ -469,119 +460,13 @@ impl NonFinalizedState { Ok(()) } - /// Process non-best chain blocks iteratively - async fn process_nonbest_blocks( - &self, - mut nonbest_blocks: HashMap>, - finalized_db: &Arc, - ) -> Result<(), SyncError> { - let mut nonbest_chainblocks = HashMap::new(); - - loop { - let (next_up, later): (Vec<_>, Vec<_>) = nonbest_blocks - .into_iter() - .map(|(hash, block)| { - let prev_hash = - crate::chain_index::types::BlockHash(block.header.previous_block_hash.0); - ( - hash, - block, - self.current - .load() - .blocks - .get(&prev_hash) - .or_else(|| nonbest_chainblocks.get(&prev_hash)) - .cloned(), - ) - }) - .partition(|(_hash, _block, prev_block)| prev_block.is_some()); - - if next_up.is_empty() { - // Only store non-best chain blocks - // if we have a path from them - // to the chain - break; - } - - for (_hash, block, parent_block) in next_up { - let chainblock = self - .block_to_chainblock( - &parent_block.expect("partitioned, known to be some"), - &block, - ) - .await?; - nonbest_chainblocks.insert(*chainblock.hash(), chainblock); - } - nonbest_blocks = later - .into_iter() - .map(|(hash, block, _parent_block)| (hash, block)) - .collect(); - } - - for block in nonbest_chainblocks.into_values() { - if let Err(e) = self - .sync_stage_update_loop(block, finalized_db.clone()) - .await - { - return Err(e.into()); - } - } - Ok(()) - } - - async fn sync_stage_update_loop( + /// Add all blocks from the staging area, and save a new cache snapshot, trimming block below the finalised tip. + async fn update( &self, - block: IndexedBlock, finalized_db: Arc, + initial_state: Arc, + mut new_snapshot: NonfinalizedBlockCacheSnapshot, ) -> Result<(), UpdateError> { - if let Err(e) = self.stage(block.clone()) { - match *e { - mpsc::error::TrySendError::Full(_) => { - self.update(finalized_db.clone()).await?; - Box::pin(self.sync_stage_update_loop(block, finalized_db)).await?; - } - mpsc::error::TrySendError::Closed(_block) => { - return Err(UpdateError::ReceiverDisconnected) - } - } - } - Ok(()) - } - - /// Stage a block - fn stage( - &self, - block: IndexedBlock, - ) -> Result<(), Box>> { - self.staging_sender.try_send(block).map_err(Box::new) - } - - /// Add all blocks from the staging area, and save a new cache snapshot, trimming block below the finalised tip. - async fn update(&self, finalized_db: Arc) -> Result<(), UpdateError> { - let mut new = HashMap::::new(); - let mut staged = self.staged.lock().await; - loop { - match staged.try_recv() { - Ok(chain_block) => { - new.insert(*chain_block.index().hash(), chain_block); - } - Err(mpsc::error::TryRecvError::Empty) => break, - Err(mpsc::error::TryRecvError::Disconnected) => { - return Err(UpdateError::ReceiverDisconnected) - } - } - } - // at this point, we've collected everything in the staging area - // we can drop the stage lock, and more blocks can be staged while we finish setting current - mem::drop(staged); - let snapshot = self.get_snapshot(); - new.extend( - snapshot - .blocks - .iter() - .map(|(hash, block)| (*hash, block.clone())), - ); - let finalized_height = finalized_db .to_reader() .db_height() @@ -589,43 +474,25 @@ impl NonFinalizedState { .map_err(|_e| UpdateError::FinalizedStateCorruption)? .unwrap_or(Height(0)); - let (_finalized_blocks, blocks): (HashMap<_, _>, HashMap) = new - .into_iter() - .partition(|(_hash, block)| match block.index().height() { - Some(height) => height < finalized_height, - None => false, - }); - - let best_tip = blocks.iter().fold(snapshot.best_tip, |acc, (hash, block)| { - match block.index().height() { - Some(working_height) if working_height > acc.height => BestTip { - height: working_height, - blockhash: *hash, - }, - _ => acc, - } - }); - - let heights_to_hashes = blocks - .iter() - .filter_map(|(hash, chainblock)| { - chainblock.index().height().map(|height| (height, *hash)) - }) - .collect(); + new_snapshot.remove_finalized_blocks(finalized_height); + let best_block = &new_snapshot + .blocks + .values() + .max_by_key(|block| block.chainwork()) + .cloned() + .expect("empty snapshot impossible"); + self.handle_reorg(&mut new_snapshot, best_block) + .await + .map_err(|_e| UpdateError::DatabaseHole)?; // Need to get best hash at some point in this process - let stored = self.current.compare_and_swap( - &snapshot, - Arc::new(NonfinalizedBlockCacheSnapshot { - blocks, - heights_to_hashes, - best_tip, - }), - ); + let stored = self + .current + .compare_and_swap(&initial_state, Arc::new(new_snapshot)); - if Arc::ptr_eq(&stored, &snapshot) { - let stale_best_tip = snapshot.best_tip; - let new_best_tip = best_tip; + if Arc::ptr_eq(&stored, &initial_state) { + let stale_best_tip = initial_state.best_tip; + let new_best_tip = stored.best_tip; // Log chain tip change if new_best_tip != stale_best_tip { @@ -732,6 +599,43 @@ impl NonFinalizedState { let block_with_metadata = BlockWithMetadata::new(block, metadata); IndexedBlock::try_from(block_with_metadata) } + + async fn add_nonbest_block( + &self, + working_snapshot: &mut NonfinalizedBlockCacheSnapshot, + block: &impl Block, + ) -> Result { + let prev_block = match working_snapshot + .get_block_by_hash_bytes_in_serialized_order(block.prev_hash_bytes_serialized_order()) + .cloned() + { + Some(block) => block, + None => { + let prev_block = self + .source + .get_block(HashOrHeight::Hash( + zebra_chain::block::Hash::from_bytes_in_serialized_order( + block.prev_hash_bytes_serialized_order(), + ), + )) + .await + .map_err(|e| { + SyncError::ZebradConnectionError(NodeConnectionError::UnrecoverableError( + Box::new(e), + )) + })? + .ok_or(SyncError::ZebradConnectionError( + NodeConnectionError::UnrecoverableError(Box::new(MissingBlockError( + "zebrad missing block".to_string(), + ))), + ))?; + Box::pin(self.add_nonbest_block(working_snapshot, &*prev_block)).await? + } + }; + let indexed_block = block.to_indexed_block(&prev_block, self).await?; + working_snapshot.add_block(indexed_block.clone()); + Ok(indexed_block) + } } /// Errors that occur during a snapshot update @@ -745,4 +649,52 @@ pub enum UpdateError { /// Something has gone unrecoverably wrong in the finalized /// state. A full rebuild is likely needed FinalizedStateCorruption, + + /// A block in the snapshot is missing + DatabaseHole, +} + +trait Block { + fn hash_bytes_serialized_order(&self) -> [u8; 32]; + fn prev_hash_bytes_serialized_order(&self) -> [u8; 32]; + async fn to_indexed_block( + &self, + prev_block: &IndexedBlock, + nfs: &NonFinalizedState, + ) -> Result; +} + +impl Block for IndexedBlock { + fn hash_bytes_serialized_order(&self) -> [u8; 32] { + self.hash().0 + } + + fn prev_hash_bytes_serialized_order(&self) -> [u8; 32] { + self.index.parent_hash.0 + } + + async fn to_indexed_block( + &self, + _prev_block: &IndexedBlock, + _nfs: &NonFinalizedState, + ) -> Result { + Ok(self.clone()) + } +} +impl Block for zebra_chain::block::Block { + fn hash_bytes_serialized_order(&self) -> [u8; 32] { + self.hash().bytes_in_serialized_order() + } + + fn prev_hash_bytes_serialized_order(&self) -> [u8; 32] { + self.header.previous_block_hash.bytes_in_serialized_order() + } + + async fn to_indexed_block( + &self, + prev_block: &IndexedBlock, + nfs: &NonFinalizedState, + ) -> Result { + nfs.block_to_chainblock(prev_block, self).await + } } diff --git a/zaino-state/src/chain_index/source.rs b/zaino-state/src/chain_index/source.rs index 5eac010e7..9bf1483d5 100644 --- a/zaino-state/src/chain_index/source.rs +++ b/zaino-state/src/chain_index/source.rs @@ -92,7 +92,7 @@ pub enum BlockchainSourceError { #[error("data from validator invalid: {0}")] pub struct InvalidData(String); -type BlockchainSourceResult = Result; +pub(crate) type BlockchainSourceResult = Result; /// ReadStateService based validator connector. /// diff --git a/zaino-state/src/chain_index/tests.rs b/zaino-state/src/chain_index/tests.rs index a9bdf1c22..49879273a 100644 --- a/zaino-state/src/chain_index/tests.rs +++ b/zaino-state/src/chain_index/tests.rs @@ -2,6 +2,7 @@ pub(crate) mod finalised_state; pub(crate) mod mempool; +mod proptest_blockgen; pub(crate) mod vectors; pub(crate) fn init_tracing() { @@ -195,6 +196,7 @@ mod mockchain_tests { ) .await .unwrap(); + assert!(transaction_status_nonbest_chain.is_empty()); assert_eq!( transaction_status_best_chain.unwrap(), BestChainLocation::Block( @@ -202,7 +204,6 @@ mod mockchain_tests { crate::Height(block_height.unwrap().0) ) ); - assert!(transaction_status_nonbest_chain.is_empty()); } } diff --git a/zaino-state/src/chain_index/tests/finalised_state/v0.rs b/zaino-state/src/chain_index/tests/finalised_state/v0.rs index 66595bfa5..d4094895a 100644 --- a/zaino-state/src/chain_index/tests/finalised_state/v0.rs +++ b/zaino-state/src/chain_index/tests/finalised_state/v0.rs @@ -5,6 +5,7 @@ use tempfile::TempDir; use zaino_common::network::ActivationHeights; use zaino_common::{DatabaseConfig, Network, StorageConfig}; +use zaino_proto::proto::utils::PoolTypeFilter; use crate::chain_index::finalised_state::reader::DbReader; use crate::chain_index::finalised_state::ZainoDB; @@ -14,6 +15,7 @@ use crate::chain_index::tests::vectors::{ build_mockchain_source, load_test_vectors, TestVectorBlockData, TestVectorData, }; use crate::error::FinalisedStateError; +use crate::local_cache::compact_block_with_pool_types; use crate::{BlockCacheConfig, BlockMetadata, BlockWithMetadata, ChainWork, Height, IndexedBlock}; pub(crate) async fn spawn_v0_zaino_db( @@ -262,8 +264,77 @@ async fn get_compact_blocks() { parent_chain_work = *chain_block.index().chainwork(); - let reader_compact_block = db_reader.get_compact_block(Height(*height)).await.unwrap(); - assert_eq!(compact_block, reader_compact_block); + let reader_compact_block_default = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::default()) + .await + .unwrap(); + let default_compact_block = compact_block_with_pool_types( + compact_block.clone(), + &PoolTypeFilter::default().to_pool_types_vector(), + ); + assert_eq!(default_compact_block, reader_compact_block_default); + + let reader_compact_block_all_data = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::includes_all()) + .await + .unwrap(); + let all_data_compact_block = compact_block_with_pool_types( + compact_block, + &PoolTypeFilter::includes_all().to_pool_types_vector(), + ); + assert_eq!(all_data_compact_block, reader_compact_block_all_data); + println!("CompactBlock at height {height} OK"); } } + +#[tokio::test(flavor = "multi_thread")] +async fn get_compact_block_stream() { + use futures::StreamExt; + + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v0db_and_reader().await; + + let start_height = Height(blocks.first().unwrap().height); + let end_height = Height(blocks.last().unwrap().height); + + for pool_type_filter in [PoolTypeFilter::default(), PoolTypeFilter::includes_all()] { + let compact_block_stream = db_reader + .get_compact_block_stream(start_height, end_height, pool_type_filter.clone()) + .await + .unwrap(); + + futures::pin_mut!(compact_block_stream); + + let mut expected_next_height_u32: u32 = start_height.0; + let mut streamed_block_count: usize = 0; + + while let Some(block_result) = compact_block_stream.next().await { + let streamed_compact_block = block_result.unwrap(); + + let streamed_height_u32: u32 = u32::try_from(streamed_compact_block.height).unwrap(); + + assert_eq!(streamed_height_u32, expected_next_height_u32); + + let singular_compact_block = db_reader + .get_compact_block(Height(streamed_height_u32), pool_type_filter.clone()) + .await + .unwrap(); + + assert_eq!(singular_compact_block, streamed_compact_block); + + expected_next_height_u32 = expected_next_height_u32.saturating_add(1); + streamed_block_count = streamed_block_count.saturating_add(1); + } + + let expected_block_count: usize = (end_height + .0 + .saturating_sub(start_height.0) + .saturating_add(1)) as usize; + + assert_eq!(streamed_block_count, expected_block_count); + assert_eq!(expected_next_height_u32, end_height.0.saturating_add(1)); + } +} diff --git a/zaino-state/src/chain_index/tests/finalised_state/v1.rs b/zaino-state/src/chain_index/tests/finalised_state/v1.rs index 4ca42a83b..bb25a45ca 100644 --- a/zaino-state/src/chain_index/tests/finalised_state/v1.rs +++ b/zaino-state/src/chain_index/tests/finalised_state/v1.rs @@ -5,6 +5,7 @@ use tempfile::TempDir; use zaino_common::network::ActivationHeights; use zaino_common::{DatabaseConfig, Network, StorageConfig}; +use zaino_proto::proto::utils::PoolTypeFilter; use crate::chain_index::finalised_state::capability::IndexedBlockExt; use crate::chain_index::finalised_state::db::DbBackend; @@ -17,6 +18,7 @@ use crate::chain_index::tests::vectors::{ }; use crate::chain_index::types::TransactionHash; use crate::error::FinalisedStateError; +use crate::local_cache::compact_block_with_pool_types; use crate::{ AddrScript, BlockCacheConfig, BlockMetadata, BlockWithMetadata, ChainWork, Height, IndexedBlock, Outpoint, @@ -236,7 +238,7 @@ async fn load_db_backend_from_file() { assert_eq!(prev_hash, block.index().parent_hash); } prev_hash = Some(block.index().hash); - assert_eq!(block.index.height, Some(Height(height))); + assert_eq!(block.index.height, Height(height)); } assert!(finalized_state_backend .get_chain_block(Height(101)) @@ -281,7 +283,7 @@ async fn try_write_invalid_block() { let mut chain_block = IndexedBlock::try_from(BlockWithMetadata::new(&zebra_block, metadata)).unwrap(); - chain_block.index.height = Some(crate::Height(height + 1)); + chain_block.index.height = crate::Height(height + 1); dbg!(chain_block.index.height); let db_err = dbg!(zaino_db.write_block(chain_block).await); @@ -437,12 +439,81 @@ async fn get_compact_blocks() { parent_chain_work = *chain_block.index().chainwork(); - let reader_compact_block = db_reader.get_compact_block(Height(*height)).await.unwrap(); - assert_eq!(compact_block, reader_compact_block); + let reader_compact_block_default = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::default()) + .await + .unwrap(); + let default_compact_block = compact_block_with_pool_types( + compact_block.clone(), + &PoolTypeFilter::default().to_pool_types_vector(), + ); + assert_eq!(default_compact_block, reader_compact_block_default); + + let reader_compact_block_all_data = db_reader + .get_compact_block(Height(*height), PoolTypeFilter::includes_all()) + .await + .unwrap(); + let all_data_compact_block = compact_block_with_pool_types( + compact_block, + &PoolTypeFilter::includes_all().to_pool_types_vector(), + ); + assert_eq!(all_data_compact_block, reader_compact_block_all_data); + println!("CompactBlock at height {height} OK"); } } +#[tokio::test(flavor = "multi_thread")] +async fn get_compact_block_stream() { + use futures::StreamExt; + + init_tracing(); + + let (TestVectorData { blocks, .. }, _db_dir, _zaino_db, db_reader) = + load_vectors_v1db_and_reader().await; + + let start_height = Height(blocks.first().unwrap().height); + let end_height = Height(blocks.last().unwrap().height); + + for pool_type_filter in [PoolTypeFilter::default(), PoolTypeFilter::includes_all()] { + let compact_block_stream = db_reader + .get_compact_block_stream(start_height, end_height, pool_type_filter.clone()) + .await + .unwrap(); + + futures::pin_mut!(compact_block_stream); + + let mut expected_next_height_u32: u32 = start_height.0; + let mut streamed_block_count: usize = 0; + + while let Some(block_result) = compact_block_stream.next().await { + let streamed_compact_block = block_result.unwrap(); + + let streamed_height_u32: u32 = u32::try_from(streamed_compact_block.height).unwrap(); + + assert_eq!(streamed_height_u32, expected_next_height_u32); + + let singular_compact_block = db_reader + .get_compact_block(Height(streamed_height_u32), pool_type_filter.clone()) + .await + .unwrap(); + + assert_eq!(singular_compact_block, streamed_compact_block); + + expected_next_height_u32 = expected_next_height_u32.saturating_add(1); + streamed_block_count = streamed_block_count.saturating_add(1); + } + + let expected_block_count: usize = (end_height + .0 + .saturating_sub(start_height.0) + .saturating_add(1)) as usize; + + assert_eq!(streamed_block_count, expected_block_count); + assert_eq!(expected_next_height_u32, end_height.0.saturating_add(1)); + } +} + #[tokio::test(flavor = "multi_thread")] async fn get_faucet_txids() { init_tracing(); @@ -912,7 +983,7 @@ async fn check_faucet_spent_map() { .find(|tx| { let (block_height, tx_idx) = (spender_index.block_height(), spender_index.tx_index()); - chain_block.index().height() == Some(Height(block_height)) + chain_block.index().height() == Height(block_height) && tx.index() == tx_idx as u64 }) .cloned() @@ -1081,7 +1152,7 @@ async fn check_recipient_spent_map() { .find(|tx| { let (block_height, tx_idx) = (spender_index.block_height(), spender_index.tx_index()); - chain_block.index().height() == Some(Height(block_height)) + chain_block.index().height() == Height(block_height) && tx.index() == tx_idx as u64 }) .cloned() diff --git a/zaino-state/src/chain_index/tests/proptest_blockgen.rs b/zaino-state/src/chain_index/tests/proptest_blockgen.rs new file mode 100644 index 000000000..0d81fe38d --- /dev/null +++ b/zaino-state/src/chain_index/tests/proptest_blockgen.rs @@ -0,0 +1,428 @@ +use std::{sync::Arc, time::Duration}; + +use proptest::{ + prelude::{Arbitrary as _, BoxedStrategy, Just}, + strategy::Strategy, +}; +use rand::seq::SliceRandom; +use tonic::async_trait; +use zaino_common::{network::ActivationHeights, DatabaseConfig, Network, StorageConfig}; +use zebra_chain::{ + block::arbitrary::{self, LedgerStateOverride}, + fmt::SummaryDebug, + LedgerState, +}; +use zebra_state::{FromDisk, HashOrHeight, IntoDisk as _}; + +use crate::{ + chain_index::{ + source::BlockchainSourceResult, + tests::{init_tracing, proptest_blockgen::proptest_helpers::add_segment}, + NonFinalizedSnapshot, + }, + BlockCacheConfig, BlockHash, BlockchainSource, ChainIndex, NodeBackedChainIndex, + TransactionHash, +}; + +#[test] +fn make_chain() { + init_tracing(); + let network = Network::Regtest(ActivationHeights::default()); + // The length of the initial segment, and of the branches + // TODO: it would be useful to allow branches of different lengths. + let segment_length = 12; + + // The number of separate branches, after the branching point at the tip + // of the initial segment. + let branch_count = 2; + + // default is 256. As each case takes multiple seconds, this seems too many. + // TODO: this should be higher than 1. Currently set to 1 for ease of iteration + proptest::proptest!(proptest::test_runner::Config::with_cases(1), |(segments in make_branching_chain(2, segment_length, network))| { + let runtime = tokio::runtime::Builder::new_multi_thread().worker_threads(2).enable_time().build().unwrap(); + runtime.block_on(async { + let (genesis_segment, branching_segments) = segments; + let mockchain = ProptestMockchain { + genesis_segment, + branching_segments, + }; + let temp_dir: tempfile::TempDir = tempfile::tempdir().unwrap(); + let db_path: std::path::PathBuf = temp_dir.path().to_path_buf(); + + let config = BlockCacheConfig { + storage: StorageConfig { + database: DatabaseConfig { + path: db_path, + ..Default::default() + }, + ..Default::default() + }, + db_version: 1, + network, + + }; + + let indexer = NodeBackedChainIndex::new(mockchain.clone(), config) + .await + .unwrap(); + tokio::time::sleep(Duration::from_secs(5)).await; + let index_reader = indexer.subscriber(); + let snapshot = index_reader.snapshot_nonfinalized_state(); + let best_tip_hash = snapshot.best_chaintip().blockhash; + let best_tip_block = snapshot + .get_chainblock_by_hash(&best_tip_hash) + .unwrap(); + for (hash, block) in &snapshot.blocks { + if hash != &best_tip_hash { + assert!(block.chainwork().to_u256() <= best_tip_block.chainwork().to_u256()); + if snapshot.heights_to_hashes.get(&block.height()) == Some(block.hash()) { + assert_eq!(index_reader.find_fork_point(&snapshot, hash).unwrap().unwrap().0, *hash); + } else { + assert_ne!(index_reader.find_fork_point(&snapshot, hash).unwrap().unwrap().0, *hash); + } + } + } + assert_eq!(snapshot.heights_to_hashes.len(), (segment_length * 2) + 2); + assert_eq!( + snapshot.blocks.len(), + (segment_length * (branch_count + 1)) + 2 + ); + }); + }); +} + +#[derive(Clone)] +struct ProptestMockchain { + genesis_segment: ChainSegment, + branching_segments: Vec, +} + +impl ProptestMockchain { + fn best_branch(&self) -> SummaryDebug>> { + let mut best_branch_and_work = None; + for branch in self.branching_segments.clone() { + let branch_chainwork: u128 = branch + .iter() + .map(|block| { + block + .header + .difficulty_threshold + .to_work() + .unwrap() + .as_u128() + }) + .sum(); + match best_branch_and_work { + Some((ref _b, w)) => { + if w < branch_chainwork { + best_branch_and_work = Some((branch, branch_chainwork)) + } + } + None => best_branch_and_work = Some((branch, branch_chainwork)), + } + } + best_branch_and_work.unwrap().0 + } + + fn all_blocks_arb_branch_order(&self) -> impl Iterator> { + self.genesis_segment.iter().chain( + self.branching_segments + .iter() + .flat_map(|branch| branch.iter()), + ) + } + + fn get_block_and_all_preceeding( + &self, + // This probably doesn't need to allow FnMut closures (Fn should suffice) + // but there's no cost to allowing it + mut block_identifier: impl FnMut(&zebra_chain::block::Block) -> bool, + ) -> std::option::Option>> { + let mut blocks = Vec::new(); + for block in self.genesis_segment.iter() { + blocks.push(block); + if block_identifier(block) { + return Some(blocks); + } + } + for branch in self.branching_segments.iter() { + let mut branch_blocks = Vec::new(); + for block in branch.iter() { + branch_blocks.push(block); + if block_identifier(block) { + blocks.extend_from_slice(&branch_blocks); + return Some(blocks); + } + } + } + + None + } +} + +#[async_trait] +impl BlockchainSource for ProptestMockchain { + /// Returns the block by hash or height + async fn get_block( + &self, + id: HashOrHeight, + ) -> BlockchainSourceResult>> { + match id { + HashOrHeight::Hash(hash) => { + let matches_hash = |block: &&Arc| block.hash() == hash; + Ok(self + .genesis_segment + .iter() + .find(matches_hash) + .or_else(|| { + self.branching_segments + .iter() + .flat_map(|vec| vec.iter()) + .find(matches_hash) + }) + .cloned()) + } + // This implementation selects a block from a random branch instead + // of the best branch. This is intended to simulate reorgs + HashOrHeight::Height(height) => Ok(self + .genesis_segment + .iter() + .find(|block| block.coinbase_height().unwrap() == height) + .cloned() + .or_else(|| { + self.branching_segments + .choose(&mut rand::thread_rng()) + .unwrap() + .iter() + .find(|block| block.coinbase_height().unwrap() == height) + .cloned() + })), + } + } + + /// Returns the block commitment tree data by hash + async fn get_commitment_tree_roots( + &self, + id: BlockHash, + ) -> BlockchainSourceResult<( + Option<(zebra_chain::sapling::tree::Root, u64)>, + Option<(zebra_chain::orchard::tree::Root, u64)>, + )> { + let Some(chain_up_to_block) = + self.get_block_and_all_preceeding(|block| block.hash().0 == id.0) + else { + return Ok((None, None)); + }; + + let (sapling, orchard) = + chain_up_to_block + .iter() + .fold((None, None), |(mut sapling, mut orchard), block| { + for transaction in &block.transactions { + for sap_commitment in transaction.sapling_note_commitments() { + let sap_commitment = + sapling_crypto::Node::from_bytes(sap_commitment.to_bytes()) + .unwrap(); + + sapling = Some(sapling.unwrap_or_else(|| { + incrementalmerkletree::frontier::Frontier::<_, 32>::empty() + })); + + sapling = sapling.map(|mut tree| { + tree.append(sap_commitment); + tree + }); + } + for orc_commitment in transaction.orchard_note_commitments() { + let orc_commitment = + zebra_chain::orchard::tree::Node::from(*orc_commitment); + + orchard = Some(orchard.unwrap_or_else(|| { + incrementalmerkletree::frontier::Frontier::<_, 32>::empty() + })); + + orchard = orchard.map(|mut tree| { + tree.append(orc_commitment); + tree + }); + } + } + (sapling, orchard) + }); + Ok(( + sapling.map(|sap_front| { + ( + zebra_chain::sapling::tree::Root::from_bytes(sap_front.root().to_bytes()), + sap_front.tree_size(), + ) + }), + orchard.map(|orc_front| { + ( + zebra_chain::orchard::tree::Root::from_bytes(orc_front.root().as_bytes()), + orc_front.tree_size(), + ) + }), + )) + } + + /// Returns the sapling and orchard treestate by hash + async fn get_treestate( + &self, + _id: BlockHash, + ) -> BlockchainSourceResult<(Option>, Option>)> { + // I don't think this is used for sync? + unimplemented!() + } + + /// Returns the complete list of txids currently in the mempool. + async fn get_mempool_txids( + &self, + ) -> BlockchainSourceResult>> { + Ok(Some(Vec::new())) + } + + /// Returns the transaction by txid + async fn get_transaction( + &self, + txid: TransactionHash, + ) -> BlockchainSourceResult>> { + Ok(self.all_blocks_arb_branch_order().find_map(|block| { + block + .transactions + .iter() + .find(|transaction| transaction.hash() == txid.into()) + .cloned() + })) + } + + /// Returns the hash of the block at the tip of the best chain. + async fn get_best_block_hash( + &self, + ) -> BlockchainSourceResult> { + Ok(Some(self.best_branch().last().unwrap().hash())) + } + + /// Get a listener for new nonfinalized blocks, + /// if supported + async fn nonfinalized_listener( + &self, + ) -> Result< + Option< + tokio::sync::mpsc::Receiver<(zebra_chain::block::Hash, Arc)>, + >, + Box, + > { + let (sender, receiver) = tokio::sync::mpsc::channel(1_000); + let self_clone = self.clone(); + tokio::task::spawn(async move { + for block in self_clone.all_blocks_arb_branch_order() { + sender.send((block.hash(), block.clone())).await.unwrap() + } + // don't drop the sender + std::mem::forget(sender); + }) + .await + .unwrap(); + Ok(Some(receiver)) + } +} + +type ChainSegment = SummaryDebug>>; +fn make_branching_chain( + num_branches: usize, + chain_size: usize, + network_override: Network, +) -> BoxedStrategy<(ChainSegment, Vec)> { + let network_override = Some(network_override.to_zebra_network()); + // these feel like they shouldn't be needed. The closure lifetimes are fighting me + let n_o_clone = network_override.clone(); + let n_o_clone_2 = network_override.clone(); + add_segment(SummaryDebug(Vec::new()), network_override.clone(), 1) + .prop_flat_map(move |segment| add_segment(segment, n_o_clone.clone(), 1)) + .prop_flat_map(move |segment| add_segment(segment, n_o_clone_2.clone(), chain_size)) + .prop_flat_map(move |segment| { + ( + Just(segment.clone()), + LedgerState::arbitrary_with(LedgerStateOverride { + height_override: segment.last().unwrap().coinbase_height().unwrap() + 1, + previous_block_hash_override: Some(segment.last().unwrap().hash()), + network_upgrade_override: None, + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + network_override: network_override.clone(), + }), + ) + }) + .prop_flat_map(move |(segment, ledger)| { + ( + Just(segment), + std::iter::repeat_with(|| { + zebra_chain::block::Block::partial_chain_strategy( + ledger.clone(), + chain_size, + arbitrary::allow_all_transparent_coinbase_spends, + true, + ) + }) + .take(num_branches) + .collect::>(), + ) + }) + .boxed() +} + +mod proptest_helpers { + + use proptest::prelude::{Arbitrary, BoxedStrategy, Strategy}; + use zebra_chain::{ + block::{ + arbitrary::{allow_all_transparent_coinbase_spends, LedgerStateOverride}, + Block, Height, + }, + parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, + LedgerState, + }; + + use super::ChainSegment; + + pub(super) fn add_segment( + previous_chain: ChainSegment, + network_override: Option, + segment_length: usize, + ) -> BoxedStrategy { + LedgerState::arbitrary_with(LedgerStateOverride { + height_override: Some( + previous_chain + .last() + .map(|block| (block.coinbase_height().unwrap() + 1).unwrap()) + .unwrap_or(Height(0)), + ), + previous_block_hash_override: Some( + previous_chain + .last() + .map(|block| block.hash()) + .unwrap_or(GENESIS_PREVIOUS_BLOCK_HASH), + ), + network_upgrade_override: None, + transaction_version_override: None, + transaction_has_valid_network_upgrade: true, + always_has_coinbase: true, + network_override, + }) + .prop_flat_map(move |ledger| { + Block::partial_chain_strategy( + ledger, + segment_length, + allow_all_transparent_coinbase_spends, + true, + ) + }) + .prop_map(move |new_segment| { + let mut full_chain = previous_chain.clone(); + full_chain.extend_from_slice(&new_segment); + full_chain + }) + .boxed() + } +} diff --git a/zaino-state/src/chain_index/types/db/legacy.rs b/zaino-state/src/chain_index/types/db/legacy.rs index 59ceb5a3b..64dedbc15 100644 --- a/zaino-state/src/chain_index/types/db/legacy.rs +++ b/zaino-state/src/chain_index/types/db/legacy.rs @@ -634,8 +634,8 @@ pub struct BlockIndex { pub parent_hash: BlockHash, /// The cumulative proof-of-work of the blockchain up to this block, used for chain selection. pub chainwork: ChainWork, - /// The height of this block if it's in the current best chain. None if it's part of a fork. - pub height: Option, + /// The height of this block. + pub height: Height, } impl BlockIndex { @@ -644,7 +644,7 @@ impl BlockIndex { hash: BlockHash, parent_hash: BlockHash, chainwork: ChainWork, - height: Option, + height: Height, ) -> Self { Self { hash, @@ -670,7 +670,7 @@ impl BlockIndex { } /// Returns the height of this block if it’s part of the best chain. - pub fn height(&self) -> Option { + pub fn height(&self) -> Height { self.height } } @@ -685,7 +685,7 @@ impl ZainoVersionedSerde for BlockIndex { self.parent_hash.serialize(&mut w)?; self.chainwork.serialize(&mut w)?; - write_option(&mut w, &self.height, |w, h| h.serialize(w)) + write_option(&mut w, &Some(self.height), |w, h| h.serialize(w)) } fn decode_latest(r: &mut R) -> io::Result { @@ -699,7 +699,12 @@ impl ZainoVersionedSerde for BlockIndex { let chainwork = ChainWork::deserialize(&mut r)?; let height = read_option(&mut r, |r| Height::deserialize(r))?; - Ok(BlockIndex::new(hash, parent_hash, chainwork, height)) + Ok(BlockIndex::new( + hash, + parent_hash, + chainwork, + height.expect("blocks always have height"), + )) } } @@ -1132,7 +1137,7 @@ impl IndexedBlock { } /// Returns the block height if available. - pub fn height(&self) -> Option { + pub fn height(&self) -> Height { self.index.height() } @@ -1147,9 +1152,15 @@ impl IndexedBlock { } /// Converts this `IndexedBlock` into a CompactBlock protobuf message using proto v4 format. + /// + /// NOTE: This method currently includes transparent tx data in the compact block produced, + /// `zaino-state::local_cache::compact_block_with_pool_types` should be used to selectively + /// remove tx data by pool type. Alternatively this method could be updated to take a + /// `zaino-proto::proto::utils::PoolTypeFilter` could be added as an input to this method, + /// with tx data being added selectively here. pub fn to_compact_block(&self) -> zaino_proto::proto::compact_formats::CompactBlock { // NOTE: Returns u64::MAX if the block is not in the best chain. - let height: u64 = self.height().map(|h| h.0.into()).unwrap_or(u64::MAX); + let height: u64 = self.height().0.into(); let hash = self.hash().0.to_vec(); let prev_hash = self.index().parent_hash().0.to_vec(); @@ -1157,17 +1168,7 @@ impl IndexedBlock { let vtx: Vec = self .transactions() .iter() - .filter_map(|tx| { - let has_shielded = !tx.sapling().spends().is_empty() - || !tx.sapling().outputs().is_empty() - || !tx.orchard().actions().is_empty(); - - if !has_shielded { - return None; - } - - Some(tx.to_compact_tx(None)) - }) + .map(|tx| tx.to_compact_tx(None)) .collect(); let sapling_commitment_tree_size = self.commitment_tree_data().sizes().sapling(); @@ -1344,7 +1345,7 @@ impl BlockHash::from(hash), BlockHash::from(parent_hash), chainwork, - Some(height), + height, ); Ok(IndexedBlock::new( @@ -1465,13 +1466,19 @@ impl CompactTxData { ) .collect(); + let vout = self.transparent().compact_vout(); + + let vin = self.transparent().compact_vin(); + zaino_proto::proto::compact_formats::CompactTx { index: self.index(), - hash: self.txid().0.to_vec(), + txid: self.txid().0.to_vec(), fee, spends, outputs, actions, + vin, + vout, } } } @@ -1674,6 +1681,23 @@ impl TransparentCompactTx { pub fn outputs(&self) -> &[TxOutCompact] { &self.vout } + + /// Returns Proto CompactTxIn values, omitting the null prevout used by coinbase. + pub fn compact_vin(&self) -> Vec { + self.inputs() + .iter() + .filter(|txin| !txin.is_null_prevout()) + .map(|txin| txin.to_compact()) + .collect() + } + + /// Returns Proto TxOut values. + pub fn compact_vout(&self) -> Vec { + self.outputs() + .iter() + .map(|txout| txout.to_compact()) + .collect() + } } /// A compact reference to a previously created transparent UTXO being spent. @@ -1713,11 +1737,19 @@ impl TxInCompact { self.prevout_index } - /// `true` iff this input is the special “null” out-point used by a + /// `true` if this input is the special “null” out-point used by a /// coinbase transaction (all-zero txid, index 0xffff_ffff). pub fn is_null_prevout(&self) -> bool { self.prevout_txid == [0u8; 32] && self.prevout_index == u32::MAX } + + /// Creates a Proto CompactTxIn from this record. + pub fn to_compact(&self) -> zaino_proto::proto::compact_formats::CompactTxIn { + zaino_proto::proto::compact_formats::CompactTxIn { + prevout_txid: self.prevout_txid.to_vec(), + prevout_index: self.prevout_index, + } + } } impl ZainoVersionedSerde for TxInCompact { @@ -1909,6 +1941,22 @@ impl TxOutCompact { pub fn script_type_enum(&self) -> Option { ScriptType::try_from(self.script_type).ok() } + + /// Creates a Proto TxOut from this record. + /// + /// Note: this reconstructs standard P2PKH / P2SH scripts. For NonStandard outputs, + /// this returns an empty script_pub_key. + pub fn to_compact(&self) -> zaino_proto::proto::compact_formats::TxOut { + let script_pub_key = self + .script_type_enum() + .and_then(|script_type| build_standard_script(self.script_hash, script_type)) + .unwrap_or_default(); + + zaino_proto::proto::compact_formats::TxOut { + value: self.value, + script_pub_key, + } + } } impl> TryFrom<(u64, T)> for TxOutCompact { diff --git a/zaino-state/src/chain_index/types/helpers.rs b/zaino-state/src/chain_index/types/helpers.rs index b1f31f4a6..99c88caff 100644 --- a/zaino-state/src/chain_index/types/helpers.rs +++ b/zaino-state/src/chain_index/types/helpers.rs @@ -32,9 +32,7 @@ pub enum BestChainLocation { #[derive(Debug, PartialEq, Eq, Hash)] pub enum NonBestChainLocation { /// the block containing the transaction - // TODO: in this case, returning a consensus branch - // ID would be useful - Block(BlockHash), + Block(BlockHash, Height), /// if the transaction is in the mempool /// but the mempool does not match the /// snapshot's chaintip, return the target height if known @@ -44,27 +42,6 @@ pub enum NonBestChainLocation { Mempool(Option), } -impl TryFrom<&IndexedBlock> for NonBestChainLocation { - type Error = (); - - fn try_from(value: &IndexedBlock) -> Result { - match value.height() { - Some(_) => Err(()), - None => Ok(NonBestChainLocation::Block(*value.hash())), - } - } -} -impl TryFrom<&IndexedBlock> for BestChainLocation { - type Error = (); - - fn try_from(value: &IndexedBlock) -> Result { - match value.height() { - None => Err(()), - Some(height) => Ok(BestChainLocation::Block(*value.hash(), height)), - } - } -} - /// Wrapper for optional commitment tree roots from blockchain source #[derive(Clone)] pub struct TreeRootData { @@ -295,7 +272,10 @@ impl<'a> BlockWithMetadata<'a> { let block = self.block; let hash = BlockHash::from(block.hash()); let parent_hash = BlockHash::from(block.header.previous_block_hash); - let height = block.coinbase_height().map(|height| Height(height.0)); + let height = block + .coinbase_height() + .map(|height| Height(height.0)) + .ok_or_else(|| String::from("Any valid block has a coinbase height"))?; let block_work = block.header.difficulty_threshold.to_work().ok_or_else(|| { "Failed to calculate block work from difficulty threshold".to_string() diff --git a/zaino-state/src/config.rs b/zaino-state/src/config.rs index 523f89118..a7edf5ce5 100644 --- a/zaino-state/src/config.rs +++ b/zaino-state/src/config.rs @@ -3,13 +3,14 @@ use std::path::PathBuf; use zaino_common::{Network, ServiceConfig, StorageConfig}; -#[derive(Debug, Clone, serde::Deserialize, PartialEq, Copy)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, serde::Deserialize, serde::Serialize)] #[serde(rename_all = "lowercase")] /// Type of backend to be used. pub enum BackendType { /// Uses ReadStateService (Zebrad) State, /// Uses JsonRPC client (Zcashd. Zainod) + #[default] Fetch, } @@ -29,9 +30,9 @@ pub enum BackendConfig { pub struct StateServiceConfig { /// Zebra [`zebra_state::ReadStateService`] config data pub validator_state_config: zebra_state::Config, - /// Validator JsonRPC address. - pub validator_rpc_address: std::net::SocketAddr, - /// Validator gRPC address. + /// Validator JsonRPC address (supports hostname:port or ip:port format). + pub validator_rpc_address: String, + /// Validator gRPC address (requires ip:port format for Zebra state sync). pub validator_grpc_address: std::net::SocketAddr, /// Validator cookie auth. pub validator_cookie_auth: bool, @@ -56,7 +57,7 @@ impl StateServiceConfig { // TODO: replace with struct-literal init only? pub fn new( validator_state_config: zebra_state::Config, - validator_rpc_address: std::net::SocketAddr, + validator_rpc_address: String, validator_grpc_address: std::net::SocketAddr, validator_cookie_auth: bool, validator_cookie_path: Option, @@ -89,8 +90,8 @@ impl StateServiceConfig { #[derive(Debug, Clone)] #[deprecated] pub struct FetchServiceConfig { - /// Validator JsonRPC address. - pub validator_rpc_address: std::net::SocketAddr, + /// Validator JsonRPC address (supports hostname:port or ip:port format). + pub validator_rpc_address: String, /// Enable validator rpc cookie authentification with Some: path to the validator cookie file. pub validator_cookie_path: Option, /// Validator JsonRPC user. @@ -110,7 +111,7 @@ impl FetchServiceConfig { /// Returns a new instance of [`FetchServiceConfig`]. #[allow(clippy::too_many_arguments)] pub fn new( - validator_rpc_address: std::net::SocketAddr, + validator_rpc_address: String, validator_cookie_path: Option, validator_rpc_user: Option, validator_rpc_password: Option, diff --git a/zaino-state/src/error.rs b/zaino-state/src/error.rs index 2ba5e7176..3229bbab7 100644 --- a/zaino-state/src/error.rs +++ b/zaino-state/src/error.rs @@ -8,6 +8,7 @@ use crate::BlockHash; use std::{any::type_name, fmt::Display}; use zaino_fetch::jsonrpsee::connector::RpcRequestError; +use zaino_proto::proto::utils::GetBlockRangeError; /// Errors related to the `StateService`. // #[deprecated] @@ -79,6 +80,32 @@ pub enum StateServiceError { }, } +impl From for StateServiceError { + fn from(value: GetBlockRangeError) -> Self { + match value { + GetBlockRangeError::StartHeightOutOfRange => { + Self::TonicStatusError(tonic::Status::out_of_range( + "Error: Start height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoStartHeightProvided => { + Self::TonicStatusError(tonic::Status::out_of_range("Error: No start height given")) + } + GetBlockRangeError::EndHeightOutOfRange => { + Self::TonicStatusError(tonic::Status::out_of_range( + "Error: End height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoEndHeightProvided => { + Self::TonicStatusError(tonic::Status::out_of_range("Error: No end height given.")) + } + GetBlockRangeError::PoolTypeArgumentError(_) => { + Self::TonicStatusError(tonic::Status::invalid_argument("Error: invalid pool type")) + } + } + } +} + #[allow(deprecated)] impl From for tonic::Status { fn from(error: StateServiceError) -> Self { @@ -229,95 +256,62 @@ impl From> for FetchServiceError { } } -// /// Errors related to the `FetchService`. -// #[deprecated] -// #[derive(Debug, thiserror::Error)] -// pub enum FetchServiceError { -// /// Critical Errors, Restart Zaino. -// #[error("Critical error: {0}")] -// Critical(String), - -// /// Error from JsonRpcConnector. -// #[error("JsonRpcConnector error: {0}")] -// JsonRpcConnectorError(#[from] zaino_fetch::jsonrpsee::error::TransportError), - -// /// Error from the block cache. -// #[error("Mempool error: {0}")] -// BlockCacheError(#[from] BlockCacheError), - -// /// Error from the mempool. -// #[error("Mempool error: {0}")] -// MempoolError(#[from] MempoolError), - -// /// RPC error in compatibility with zcashd. -// #[error("RPC error: {0:?}")] -// RpcError(#[from] zaino_fetch::jsonrpsee::connector::RpcError), - -// /// Tonic gRPC error. -// #[error("Tonic status error: {0}")] -// TonicStatusError(#[from] tonic::Status), - -// /// Serialization error. -// #[error("Serialization error: {0}")] -// SerializationError(#[from] zebra_chain::serialization::SerializationError), -// } - -// #[allow(deprecated)] -// impl From for tonic::Status { -// fn from(error: FetchServiceError) -> Self { -// match error { -// FetchServiceError::Critical(message) => tonic::Status::internal(message), -// FetchServiceError::JsonRpcConnectorError(err) => { -// tonic::Status::internal(format!("JsonRpcConnector error: {err}")) -// } -// FetchServiceError::BlockCacheError(err) => { -// tonic::Status::internal(format!("BlockCache error: {err}")) -// } -// FetchServiceError::MempoolError(err) => { -// tonic::Status::internal(format!("Mempool error: {err}")) -// } -// FetchServiceError::RpcError(err) => { -// tonic::Status::internal(format!("RPC error: {err:?}")) -// } -// FetchServiceError::TonicStatusError(err) => err, -// FetchServiceError::SerializationError(err) => { -// tonic::Status::internal(format!("Serialization error: {err}")) -// } -// } -// } -// } -// /// These aren't the best conversions, but the MempoolError should go away -// /// in favor of a new type with the new chain cache is complete -// impl From> for MempoolError { -// fn from(value: RpcRequestError) -> Self { -// match value { -// RpcRequestError::Transport(transport_error) => { -// MempoolError::JsonRpcConnectorError(transport_error) -// } -// RpcRequestError::JsonRpc(error) => { -// MempoolError::Critical(format!("argument failed to serialze: {error}")) -// } -// RpcRequestError::InternalUnrecoverable(e) => { -// MempoolError::Critical(format!("Internal unrecoverable error: {e}")) -// } -// RpcRequestError::ServerWorkQueueFull => MempoolError::Critical( -// "Server queue full. Handling for this not yet implemented".to_string(), -// ), -// RpcRequestError::Method(e) => MempoolError::Critical(format!( -// "unhandled rpc-specific {} error: {}", -// type_name::(), -// e.to_string() -// )), -// RpcRequestError::UnexpectedErrorResponse(error) => MempoolError::Critical(format!( -// "unhandled rpc-specific {} error: {}", -// type_name::(), -// error -// )), -// } -// } -// } - -// >>>>>>> replace_block_generation_delay_with_polling +impl From for FetchServiceError { + fn from(value: GetBlockRangeError) -> Self { + match value { + GetBlockRangeError::StartHeightOutOfRange => { + FetchServiceError::TonicStatusError(tonic::Status::out_of_range( + "Error: Start height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoStartHeightProvided => FetchServiceError::TonicStatusError( + tonic::Status::out_of_range("Error: No start height given"), + ), + GetBlockRangeError::EndHeightOutOfRange => { + FetchServiceError::TonicStatusError(tonic::Status::out_of_range( + "Error: End height out of range. Failed to convert to u32.", + )) + } + GetBlockRangeError::NoEndHeightProvided => FetchServiceError::TonicStatusError( + tonic::Status::out_of_range("Error: No end height given."), + ), + GetBlockRangeError::PoolTypeArgumentError(_) => FetchServiceError::TonicStatusError( + tonic::Status::invalid_argument("Error: invalid pool type"), + ), + } + } +} + +/// These aren't the best conversions, but the MempoolError should go away +/// in favor of a new type with the new chain cache is complete +impl From> for MempoolError { + fn from(value: RpcRequestError) -> Self { + match value { + RpcRequestError::Transport(transport_error) => { + MempoolError::JsonRpcConnectorError(transport_error) + } + RpcRequestError::JsonRpc(error) => { + MempoolError::Critical(format!("argument failed to serialze: {error}")) + } + RpcRequestError::InternalUnrecoverable(e) => { + MempoolError::Critical(format!("Internal unrecoverable error: {e}")) + } + RpcRequestError::ServerWorkQueueFull => MempoolError::Critical( + "Server queue full. Handling for this not yet implemented".to_string(), + ), + RpcRequestError::Method(e) => MempoolError::Critical(format!( + "unhandled rpc-specific {} error: {}", + type_name::(), + e.to_string() + )), + RpcRequestError::UnexpectedErrorResponse(error) => MempoolError::Critical(format!( + "unhandled rpc-specific {} error: {}", + type_name::(), + error + )), + } + } +} /// Errors related to the `Mempool`. #[derive(Debug, thiserror::Error)] @@ -352,37 +346,6 @@ pub enum MempoolError { StatusError(StatusError), } -/// These aren't the best conversions, but the MempoolError should go away -/// in favor of a new type with the new chain cache is complete -impl From> for MempoolError { - fn from(value: RpcRequestError) -> Self { - match value { - RpcRequestError::Transport(transport_error) => { - MempoolError::JsonRpcConnectorError(transport_error) - } - RpcRequestError::JsonRpc(error) => { - MempoolError::Critical(format!("argument failed to serialze: {error}")) - } - RpcRequestError::InternalUnrecoverable(e) => { - MempoolError::Critical(format!("Internal unrecoverable error: {e}")) - } - RpcRequestError::ServerWorkQueueFull => MempoolError::Critical( - "Server queue full. Handling for this not yet implemented".to_string(), - ), - RpcRequestError::Method(e) => MempoolError::Critical(format!( - "unhandled rpc-specific {} error: {}", - type_name::(), - e.to_string() - )), - RpcRequestError::UnexpectedErrorResponse(error) => MempoolError::Critical(format!( - "unhandled rpc-specific {} error: {}", - type_name::(), - error - )), - } - } -} - /// Errors related to the `BlockCache`. #[derive(Debug, thiserror::Error)] pub enum BlockCacheError { diff --git a/zaino-state/src/indexer.rs b/zaino-state/src/indexer.rs index fc69b2681..164f5d3ff 100644 --- a/zaino-state/src/indexer.rs +++ b/zaino-state/src/indexer.rs @@ -16,10 +16,10 @@ use zaino_fetch::jsonrpsee::response::{ use zaino_proto::proto::{ compact_formats::CompactBlock, service::{ - AddressList, Balance, BlockId, BlockRange, Duration, Exclude, GetAddressUtxosArg, - GetAddressUtxosReplyList, GetSubtreeRootsArg, LightdInfo, PingResponse, RawTransaction, - SendResponse, ShieldedProtocol, SubtreeRoot, TransparentAddressBlockFilter, TreeState, - TxFilter, + AddressList, Balance, BlockId, BlockRange, Duration, GetAddressUtxosArg, + GetAddressUtxosReplyList, GetMempoolTxRequest, GetSubtreeRootsArg, LightdInfo, + PingResponse, RawTransaction, SendResponse, ShieldedProtocol, SubtreeRoot, + TransparentAddressBlockFilter, TreeState, TxFilter, }, }; use zebra_chain::{ @@ -603,7 +603,15 @@ pub trait LightWalletIndexer: Send + Sync + Clone + ZcashIndexer + 'static { /// Submit the given transaction to the Zcash network async fn send_transaction(&self, request: RawTransaction) -> Result; + /// Return the transactions corresponding to the given t-address within the given block range + async fn get_taddress_transactions( + &self, + request: TransparentAddressBlockFilter, + ) -> Result; + /// Return the txids corresponding to the given t-address within the given block range + /// Note: This function is misnamed, it returns complete `RawTransaction` values, not TxIds. + /// Note: this method is deprecated, please use GetTaddressTransactions instead. async fn get_taddress_txids( &self, request: TransparentAddressBlockFilter, @@ -620,18 +628,21 @@ pub trait LightWalletIndexer: Send + Sync + Clone + ZcashIndexer + 'static { request: AddressStream, ) -> Result; - /// Return the compact transactions currently in the mempool; the results - /// can be a few seconds out of date. If the Exclude list is empty, return - /// all transactions; otherwise return all *except* those in the Exclude list - /// (if any); this allows the client to avoid receiving transactions that it - /// already has (from an earlier call to this rpc). The transaction IDs in the - /// Exclude list can be shortened to any number of bytes to make the request - /// more bandwidth-efficient; if two or more transactions in the mempool - /// match a shortened txid, they are all sent (none is excluded). Transactions - /// in the exclude list that don't exist in the mempool are ignored. + /// Returns a stream of the compact transaction representation for transactions + /// currently in the mempool. The results of this operation may be a few + /// seconds out of date. If the `exclude_txid_suffixes` list is empty, + /// return all transactions; otherwise return all *except* those in the + /// `exclude_txid_suffixes` list (if any); this allows the client to avoid + /// receiving transactions that it already has (from an earlier call to this + /// RPC). The transaction IDs in the `exclude_txid_suffixes` list can be + /// shortened to any number of bytes to make the request more + /// bandwidth-efficient; if two or more transactions in the mempool match a + /// txid suffix, none of the matching transactions are excluded. Txid + /// suffixes in the exclude list that don't match any transactions in the + /// mempool are ignored. async fn get_mempool_tx( &self, - request: Exclude, + request: GetMempoolTxRequest, ) -> Result; /// Return a stream of current Mempool transactions. This will keep the output stream open while diff --git a/zaino-state/src/local_cache.rs b/zaino-state/src/local_cache.rs index 58b52b655..967b9bbf0 100644 --- a/zaino-state/src/local_cache.rs +++ b/zaino-state/src/local_cache.rs @@ -21,7 +21,11 @@ use zaino_fetch::{ response::{GetBlockError, GetBlockResponse}, }, }; -use zaino_proto::proto::compact_formats::{ChainMetadata, CompactBlock, CompactOrchardAction}; +use zaino_proto::proto::{ + compact_formats::{ChainMetadata, CompactBlock, CompactOrchardAction}, + service::PoolType, + utils::PoolTypeFilter, +}; use zebra_chain::{ block::{Hash, Height}, parameters::Network, @@ -278,9 +282,10 @@ async fn try_state_path( block_hex.as_ref(), Some(display_txids_to_server(txid_strings)?), )? - .into_compact( + .into_compact_block( u32::try_from(trees.sapling())?, u32::try_from(trees.orchard())?, + PoolTypeFilter::includes_all(), )?, )) } @@ -331,7 +336,7 @@ async fn try_fetcher_path( type_name::(), )) })? - .into_compact( + .into_compact_block( u32::try_from(trees.sapling()).map_err(|e| { RpcRequestError::Transport(TransportError::BadNodeData( Box::new(e), @@ -344,6 +349,7 @@ async fn try_fetcher_path( type_name::(), )) })?, + PoolTypeFilter::includes_all(), ) .map_err(|e| { RpcRequestError::Transport(TransportError::BadNodeData( @@ -372,6 +378,56 @@ pub(crate) fn display_txids_to_server(txids: Vec) -> Result> .collect::>, _>>() } +/// prunes a compact block from transaction in formation related to pools not included in the +/// `pool_types` vector. +/// Note: for backwards compatibility an empty vector will return Sapling and Orchard Tx info. +pub(crate) fn compact_block_with_pool_types( + mut block: CompactBlock, + pool_types: &[PoolType], +) -> CompactBlock { + if pool_types.is_empty() { + for compact_tx in &mut block.vtx { + // strip out transparent inputs if not Requested + compact_tx.vin.clear(); + compact_tx.vout.clear(); + } + + // Omit transactions that have no Sapling/Orchard elements. + block.vtx.retain(|compact_tx| { + !compact_tx.spends.is_empty() + || !compact_tx.outputs.is_empty() + || !compact_tx.actions.is_empty() + }); + } else { + for compact_tx in &mut block.vtx { + // strip out transparent inputs if not Requested + if !pool_types.contains(&PoolType::Transparent) { + compact_tx.vin.clear(); + compact_tx.vout.clear(); + } + // strip out sapling if not requested + if !pool_types.contains(&PoolType::Sapling) { + compact_tx.spends.clear(); + compact_tx.outputs.clear(); + } + // strip out orchard if not requested + if !pool_types.contains(&PoolType::Orchard) { + compact_tx.actions.clear(); + } + } + + // Omit transactions that have no elements in any requested pool type. + block.vtx.retain(|compact_tx| { + !compact_tx.vin.is_empty() + || !compact_tx.vout.is_empty() + || !compact_tx.spends.is_empty() + || !compact_tx.outputs.is_empty() + || !compact_tx.actions.is_empty() + }); + } + + block +} /// Strips the ouputs and from all transactions, retains only /// the nullifier from all orcard actions, and clears the chain /// metadata from the block diff --git a/zaino-state/src/stream.rs b/zaino-state/src/stream.rs index 1cf5db873..7816f47fd 100644 --- a/zaino-state/src/stream.rs +++ b/zaino-state/src/stream.rs @@ -7,6 +7,7 @@ use zaino_proto::proto::{ }; /// Stream of RawTransactions, output type of get_taddress_txids. +#[derive(Debug)] pub struct RawTransactionStream { inner: ReceiverStream>, } diff --git a/zaino-state/src/utils.rs b/zaino-state/src/utils.rs index 2ab1f4280..70c935f48 100644 --- a/zaino-state/src/utils.rs +++ b/zaino-state/src/utils.rs @@ -1,10 +1,6 @@ //! Contains utility funcitonality for Zaino-State. - use std::fmt; - -use zaino_proto::proto::service::BlockId; -use zebra_chain::{block::Height, parameters::Network}; -use zebra_state::HashOrHeight; +use zebra_chain::parameters::Network; // *** Metadata structs *** @@ -117,41 +113,3 @@ impl fmt::Display for ServiceMetadata { writeln!(f, "Zebra Subversion: {}", self.zebra_subversion) } } - -// *** Data transforms *** - -pub(crate) fn blockid_to_hashorheight(block_id: BlockId) -> Option { - <[u8; 32]>::try_from(block_id.hash) - .map(zebra_chain::block::Hash) - .map(HashOrHeight::from) - .or_else(|_| { - block_id - .height - .try_into() - .map(|height| HashOrHeight::Height(Height(height))) - }) - .ok() -} - -/// Strips the ouputs and from all transactions, retains only -/// the nullifier from all orcard actions, and clears the chain -/// metadata from the block -pub(crate) fn compact_block_to_nullifiers( - mut block: zaino_proto::proto::compact_formats::CompactBlock, -) -> zaino_proto::proto::compact_formats::CompactBlock { - for ctransaction in &mut block.vtx { - ctransaction.outputs = Vec::new(); - for caction in &mut ctransaction.actions { - *caction = zaino_proto::proto::compact_formats::CompactOrchardAction { - nullifier: caction.nullifier.clone(), - ..Default::default() - } - } - } - - block.chain_metadata = Some(zaino_proto::proto::compact_formats::ChainMetadata { - sapling_commitment_tree_size: 0, - orchard_commitment_tree_size: 0, - }); - block -} diff --git a/zaino-testutils/src/lib.rs b/zaino-testutils/src/lib.rs index c11c7353e..40cc887d8 100644 --- a/zaino-testutils/src/lib.rs +++ b/zaino-testutils/src/lib.rs @@ -240,13 +240,15 @@ impl ValidatorExt for Zebrad { ) -> Result<(Self, ValidatorConfig), LaunchError> { let zebrad = Zebrad::launch(config).await?; let validator_config = ValidatorConfig { - validator_jsonrpc_listen_address: SocketAddr::new( - IpAddr::V4(Ipv4Addr::LOCALHOST), - zebrad.rpc_listen_port(), + validator_jsonrpc_listen_address: format!( + "{}:{}", + Ipv4Addr::LOCALHOST, + zebrad.rpc_listen_port() ), - validator_grpc_listen_address: Some(SocketAddr::new( - IpAddr::V4(Ipv4Addr::LOCALHOST), - zebrad.indexer_listen_port(), + validator_grpc_listen_address: Some(format!( + "{}:{}", + Ipv4Addr::LOCALHOST, + zebrad.indexer_listen_port() )), validator_cookie_path: None, validator_user: Some("xxxxxx".to_string()), @@ -262,10 +264,7 @@ impl ValidatorExt for Zcashd { ) -> Result<(Self, ValidatorConfig), LaunchError> { let zcashd = Zcashd::launch(config).await?; let validator_config = ValidatorConfig { - validator_jsonrpc_listen_address: SocketAddr::new( - IpAddr::V4(Ipv4Addr::LOCALHOST), - zcashd.port(), - ), + validator_jsonrpc_listen_address: format!("{}:{}", Ipv4Addr::LOCALHOST, zcashd.port()), validator_grpc_listen_address: None, validator_cookie_path: None, validator_user: Some("xxxxxx".to_string()), @@ -279,7 +278,7 @@ impl TestManager where C: ValidatorExt, Service: LightWalletService + Send + Sync + 'static, - Service::Config: From, + Service::Config: TryFrom, IndexerError: From<<::Subscriber as ZcashIndexer>::Error>, { /// Launches zcash-local-net. @@ -401,7 +400,8 @@ where }; let (handle, service_subscriber) = Indexer::::launch_inner( - Service::Config::from(indexer_config.clone()), + Service::Config::try_from(indexer_config.clone()) + .expect("Failed to convert ZainodConfig to service config"), indexer_config, ) .await @@ -461,6 +461,8 @@ where full_node_rpc_listen_address, full_node_grpc_listen_address: validator_settings .validator_grpc_listen_address + .as_ref() + .and_then(|addr| addr.parse().ok()) .unwrap_or(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0)), zaino_handle, zaino_json_rpc_listen_address: zaino_json_listen_address, diff --git a/zainod/Cargo.toml b/zainod/Cargo.toml index cdcef6c51..66d3eb804 100644 --- a/zainod/Cargo.toml +++ b/zainod/Cargo.toml @@ -49,4 +49,5 @@ thiserror = { workspace = true } # Formats toml = { workspace = true } -figment= { workspace = true, features = ["toml", "env", "test"] } +config = { workspace = true } +tempfile = { workspace = true } diff --git a/zainod/src/config.rs b/zainod/src/config.rs index 004bcf825..dcee228e4 100644 --- a/zainod/src/config.rs +++ b/zainod/src/config.rs @@ -1,58 +1,42 @@ //! Zaino config. -use figment::{ - providers::{Format, Serialized, Toml}, - Figment, -}; + use std::{ - net::{IpAddr, SocketAddr, ToSocketAddrs}, + net::{IpAddr, SocketAddr}, path::PathBuf, }; -// Added for Serde deserialization helpers -use crate::error::IndexerError; -use serde::{ - de::{self, Deserializer}, - Deserialize, Serialize, -}; + +use serde::{Deserialize, Serialize}; +use tracing::info; #[cfg(feature = "no_tls_use_unencrypted_traffic")] use tracing::warn; -use tracing::{error, info}; + +use crate::error::IndexerError; use zaino_common::{ - CacheConfig, DatabaseConfig, DatabaseSize, Network, ServiceConfig, StorageConfig, - ValidatorConfig, + try_resolve_address, AddressResolution, CacheConfig, DatabaseConfig, DatabaseSize, Network, + ServiceConfig, StorageConfig, ValidatorConfig, }; use zaino_serve::server::config::{GrpcServerConfig, JsonRpcServerConfig}; - #[allow(deprecated)] -use zaino_state::{BackendConfig, FetchServiceConfig, StateServiceConfig}; - -/// Custom deserialization function for `BackendType` from a String. -/// Used by Serde's `deserialize_with`. -fn deserialize_backendtype_from_string<'de, D>( - deserializer: D, -) -> Result -where - D: Deserializer<'de>, -{ - let s = String::deserialize(deserializer)?; - match s.to_lowercase().as_str() { - "state" => Ok(zaino_state::BackendType::State), - "fetch" => Ok(zaino_state::BackendType::Fetch), - _ => Err(de::Error::custom(format!( - "Invalid backend type '{s}', valid options are 'state' or 'fetch'" - ))), - } +use zaino_state::{BackendType, FetchServiceConfig, StateServiceConfig}; + +/// Sensitive key suffixes that should not be set via environment variables. +const SENSITIVE_KEY_SUFFIXES: [&str; 5] = ["password", "secret", "token", "cookie", "private_key"]; + +/// Checks if a key is sensitive and should not be set via environment variables. +fn is_sensitive_leaf_key(leaf_key: &str) -> bool { + let key = leaf_key.to_ascii_lowercase(); + SENSITIVE_KEY_SUFFIXES + .iter() + .any(|suffix| key.ends_with(suffix)) } /// Config information required for Zaino. #[derive(Debug, Clone, Deserialize, Serialize)] -#[serde(default)] +#[serde(deny_unknown_fields, default)] pub struct ZainodConfig { /// Type of backend to be used. - #[serde(deserialize_with = "deserialize_backendtype_from_string")] - #[serde(serialize_with = "serialize_backendtype_to_string")] - pub backend: zaino_state::BackendType, + pub backend: BackendType, /// Enable JsonRPC server with a valid Some value. - #[serde(default)] pub json_server_settings: Option, /// gRPC server settings including listen addr, tls status, key and cert. pub grpc_settings: GrpcServerConfig, @@ -62,9 +46,7 @@ pub struct ZainodConfig { pub service: ServiceConfig, /// Storage configuration (cache and database). pub storage: StorageConfig, - /// Block Cache database file path. - /// - /// ZebraDB location. + /// Block Cache database file path (ZebraDB location). pub zebra_db_path: PathBuf, /// Network chain type. pub network: Network, @@ -73,33 +55,21 @@ pub struct ZainodConfig { impl ZainodConfig { /// Performs checks on config data. pub(crate) fn check_config(&self) -> Result<(), IndexerError> { - // Network type is validated at the type level via Network enum. // Check TLS settings. if self.grpc_settings.tls.is_some() { - // then check if cert path exists or return error - let c_path = &self - .grpc_settings - .tls - .as_ref() - .expect("to be Some") - .cert_path; - if !std::path::Path::new(&c_path).exists() { + let tls = self.grpc_settings.tls.as_ref().expect("to be Some"); + + if !std::path::Path::new(&tls.cert_path).exists() { return Err(IndexerError::ConfigError(format!( "TLS is enabled, but certificate path {:?} does not exist.", - c_path + tls.cert_path ))); } - let k_path = &self - .grpc_settings - .tls - .as_ref() - .expect("to be Some") - .key_path; - if !std::path::Path::new(&k_path).exists() { + if !std::path::Path::new(&tls.key_path).exists() { return Err(IndexerError::ConfigError(format!( "TLS is enabled, but key path {:?} does not exist.", - k_path + tls.key_path ))); } } @@ -107,9 +77,10 @@ impl ZainodConfig { // Check validator cookie authentication settings if let Some(ref cookie_path) = self.validator_settings.validator_cookie_path { if !std::path::Path::new(cookie_path).exists() { - return Err(IndexerError::ConfigError( - format!("Validator cookie authentication is enabled, but cookie path '{:?}' does not exist.", cookie_path), - )); + return Err(IndexerError::ConfigError(format!( + "Validator cookie authentication is enabled, but cookie path '{:?}' does not exist.", + cookie_path + ))); } } @@ -117,18 +88,36 @@ impl ZainodConfig { let grpc_addr = fetch_socket_addr_from_hostname(&self.grpc_settings.listen_address.to_string())?; - let validator_addr = fetch_socket_addr_from_hostname( - &self - .validator_settings - .validator_jsonrpc_listen_address - .to_string(), - )?; - - // Ensure validator listen address is private. - if !is_private_listen_addr(&validator_addr) { - return Err(IndexerError::ConfigError( - "Zaino may only connect to Zebra with private IP addresses.".to_string(), - )); + // Validate the validator address using the richer result type that distinguishes + // between format errors (always fail) and DNS lookup failures (can defer for Docker). + let validator_addr_result = + try_resolve_address(&self.validator_settings.validator_jsonrpc_listen_address); + + // Validator address validation: + // - Resolved IPs: must be private (RFC1918/ULA) + // - Hostnames: validated at connection time (supports Docker/K8s service discovery) + // - Cookie auth: determined by validator_cookie_path config, not enforced by address type + match validator_addr_result { + AddressResolution::Resolved(validator_addr) => { + if !is_private_listen_addr(&validator_addr) { + return Err(IndexerError::ConfigError( + "Zaino may only connect to Zebra with private IP addresses.".to_string(), + )); + } + } + AddressResolution::UnresolvedHostname { ref address, .. } => { + info!( + "Validator address '{}' cannot be resolved at config time.", + address + ); + } + AddressResolution::InvalidFormat { address, reason } => { + // Invalid address format - always fail immediately. + return Err(IndexerError::ConfigError(format!( + "Invalid validator address '{}': {}", + address, reason + ))); + } } #[cfg(not(feature = "no_tls_use_unencrypted_traffic"))] @@ -139,16 +128,6 @@ impl ZainodConfig { "TLS required when connecting to external addresses.".to_string(), )); } - - // Ensure validator rpc cookie authentication is used when connecting to non-loopback addresses. - if !is_loopback_listen_addr(&validator_addr) - && self.validator_settings.validator_cookie_path.is_none() - { - return Err(IndexerError::ConfigError( - "Validator listen address is not loopback, so cookie authentication must be enabled." - .to_string(), - )); - } } #[cfg(feature = "no_tls_use_unencrypted_traffic")] @@ -159,17 +138,13 @@ impl ZainodConfig { } // Check gRPC and JsonRPC server are not listening on the same address. - if self.json_server_settings.is_some() - && self - .json_server_settings - .as_ref() - .expect("json_server_settings to be Some") - .json_rpc_listen_address - == self.grpc_settings.listen_address - { - return Err(IndexerError::ConfigError( - "gRPC server and JsonRPC server must listen on different addresses.".to_string(), - )); + if let Some(ref json_settings) = self.json_server_settings { + if json_settings.json_rpc_listen_address == self.grpc_settings.listen_address { + return Err(IndexerError::ConfigError( + "gRPC server and JsonRPC server must listen on different addresses." + .to_string(), + )); + } } Ok(()) @@ -184,15 +159,15 @@ impl ZainodConfig { impl Default for ZainodConfig { fn default() -> Self { Self { - backend: zaino_state::BackendType::Fetch, + backend: BackendType::default(), json_server_settings: None, grpc_settings: GrpcServerConfig { listen_address: "127.0.0.1:8137".parse().unwrap(), tls: None, }, validator_settings: ValidatorConfig { - validator_grpc_listen_address: Some("127.0.0.1:18230".parse().unwrap()), - validator_jsonrpc_listen_address: "127.0.0.1:18232".parse().unwrap(), + validator_grpc_listen_address: Some("127.0.0.1:18230".to_string()), + validator_jsonrpc_listen_address: "127.0.0.1:18232".to_string(), validator_cookie_path: None, validator_user: Some("xxxxxx".to_string()), validator_password: Some("xxxxxx".to_string()), @@ -240,26 +215,13 @@ pub fn default_zebra_db_path() -> Result { /// Resolves a hostname to a SocketAddr. fn fetch_socket_addr_from_hostname(address: &str) -> Result { - address.parse::().or_else(|_| { - let addrs: Vec<_> = address - .to_socket_addrs() - .map_err(|e| IndexerError::ConfigError(format!("Invalid address '{address}': {e}")))? - .collect(); - if let Some(ipv4_addr) = addrs.iter().find(|addr| addr.is_ipv4()) { - Ok(*ipv4_addr) - } else { - addrs.into_iter().next().ok_or_else(|| { - IndexerError::ConfigError(format!("Unable to resolve address '{address}'")) - }) - } - }) + zaino_common::net::resolve_socket_addr(address) + .map_err(|e| IndexerError::ConfigError(format!("Invalid address '{address}': {e}"))) } /// Validates that the configured `address` is either: /// - An RFC1918 (private) IPv4 address, or -/// - An IPv6 Unique Local Address (ULA) (using `is_unique_local()`) -/// -/// Returns `Ok(BindAddress)` if valid. +/// - An IPv6 Unique Local Address (ULA) pub(crate) fn is_private_listen_addr(addr: &SocketAddr) -> bool { let ip = addr.ip(); match ip { @@ -268,94 +230,113 @@ pub(crate) fn is_private_listen_addr(addr: &SocketAddr) -> bool { } } -/// Validates that the configured `address` is a loopback address. +/// Loads configuration from a TOML file with optional environment variable overrides. /// -/// Returns `Ok(BindAddress)` if valid. -#[cfg_attr(feature = "no_tls_use_unencrypted_traffic", allow(dead_code))] -pub(crate) fn is_loopback_listen_addr(addr: &SocketAddr) -> bool { - let ip = addr.ip(); - match ip { - IpAddr::V4(ipv4) => ipv4.is_loopback(), - IpAddr::V6(ipv6) => ipv6.is_loopback(), - } +/// Configuration is layered: Defaults → TOML file → Environment variables (prefix: ZAINO_). +/// Sensitive keys (password, secret, token, cookie, private_key) are blocked from env vars. +pub fn load_config(file_path: &std::path::Path) -> Result { + load_config_with_env(file_path, "ZAINO") } -/// Attempts to load config data from a TOML file at the specified path. -/// -/// If the file cannot be read, or if its contents cannot be parsed into `ZainodConfig`, -/// a warning is logged, and a default configuration is returned. -/// Finally, there is an override of the config using environmental variables. -/// The loaded or default configuration undergoes further checks and finalization. -pub fn load_config(file_path: &PathBuf) -> Result { - // Configuration sources are layered: Env > TOML > Defaults. - let figment = Figment::new() - // 1. Base defaults from `ZainodConfig::default()`. - .merge(Serialized::defaults(ZainodConfig::default())) - // 2. Override with values from the TOML configuration file. - .merge(Toml::file(file_path)) - // 3. Override with values from environment variables prefixed with "ZAINO_". - .merge(figment::providers::Env::prefixed("ZAINO_").split("__")); - - match figment.extract::() { - Ok(mut parsed_config) => { - if parsed_config - .json_server_settings - .clone() - .is_some_and(|json_settings| { - json_settings.cookie_dir.is_some() - && json_settings - .cookie_dir - .expect("cookie_dir to be Some") - .as_os_str() - // if the assigned pathbuf is empty (cookies enabled but no path defined). - .is_empty() - }) - { - if let Some(ref mut json_config) = parsed_config.json_server_settings { - json_config.cookie_dir = Some(default_ephemeral_cookie_path()); +/// Loads configuration with a custom environment variable prefix. +pub fn load_config_with_env( + file_path: &std::path::Path, + env_prefix: &str, +) -> Result { + // Check for sensitive keys in environment variables before loading + let required_prefix = format!("{}_", env_prefix); + for (key, _) in std::env::vars() { + if let Some(without_prefix) = key.strip_prefix(&required_prefix) { + if let Some(leaf) = without_prefix.split("__").last() { + if is_sensitive_leaf_key(leaf) { + return Err(IndexerError::ConfigError(format!( + "Environment variable '{}' contains sensitive key '{}' - use config file instead", + key, leaf + ))); } - }; - - parsed_config.check_config()?; - info!( - "Successfully loaded and validated config. Base TOML file checked: '{}'", - file_path.display() - ); - Ok(parsed_config) - } - Err(figment_error) => { - error!( - "Failed to extract configuration using figment: {}", - figment_error - ); - Err(IndexerError::ConfigError(format!( - "Zaino configuration loading failed during figment extract '{}' (could be TOML file or environment variables). Details: {}", - file_path.display(), figment_error - ))) + } } } -} -impl TryFrom for BackendConfig { - type Error = IndexerError; - - #[allow(deprecated)] - fn try_from(cfg: ZainodConfig) -> Result { - match cfg.backend { - zaino_state::BackendType::State => { - Ok(BackendConfig::State(StateServiceConfig::from(cfg))) - } - zaino_state::BackendType::Fetch => { - Ok(BackendConfig::Fetch(FetchServiceConfig::from(cfg))) - } + let mut builder = config::Config::builder() + .set_default("backend", "fetch") + .map_err(|e| IndexerError::ConfigError(e.to_string()))?; + + // Add TOML file source + builder = builder.add_source( + config::File::from(file_path) + .format(config::FileFormat::Toml) + .required(true), + ); + + // Add environment variable source with ZAINO_ prefix and __ separator for nesting + // Note: config-rs lowercases all env var keys after stripping the prefix + builder = builder.add_source( + config::Environment::with_prefix(env_prefix) + .prefix_separator("_") + .separator("__") + .try_parsing(true), + ); + + let settings = builder + .build() + .map_err(|e| IndexerError::ConfigError(format!("Configuration loading failed: {}", e)))?; + + let mut parsed_config: ZainodConfig = settings + .try_deserialize() + .map_err(|e| IndexerError::ConfigError(format!("Configuration parsing failed: {}", e)))?; + + // Handle empty cookie_dir: if json_server_settings exists with empty cookie_dir, set default + if parsed_config + .json_server_settings + .as_ref() + .is_some_and(|json_settings| { + json_settings + .cookie_dir + .as_ref() + .is_some_and(|dir| dir.as_os_str().is_empty()) + }) + { + if let Some(ref mut json_config) = parsed_config.json_server_settings { + json_config.cookie_dir = Some(default_ephemeral_cookie_path()); } } + + parsed_config.check_config()?; + info!( + "Successfully loaded and validated config. Base TOML file checked: '{}'", + file_path.display() + ); + Ok(parsed_config) } #[allow(deprecated)] -impl From for StateServiceConfig { - fn from(cfg: ZainodConfig) -> Self { - StateServiceConfig { +impl TryFrom for StateServiceConfig { + type Error = IndexerError; + + fn try_from(cfg: ZainodConfig) -> Result { + let grpc_listen_address = cfg + .validator_settings + .validator_grpc_listen_address + .as_ref() + .ok_or_else(|| { + IndexerError::ConfigError( + "Missing validator_grpc_listen_address in configuration".to_string(), + ) + })?; + + let validator_grpc_address = + fetch_socket_addr_from_hostname(grpc_listen_address).map_err(|e| { + let msg = match e { + IndexerError::ConfigError(msg) => msg, + other => other.to_string(), + }; + IndexerError::ConfigError(format!( + "Invalid validator_grpc_listen_address '{grpc_listen_address}': {msg}" + )) + })?; + + Ok(StateServiceConfig { validator_state_config: zebra_state::Config { cache_dir: cfg.zebra_db_path.clone(), ephemeral: false, @@ -364,11 +345,11 @@ impl From for StateServiceConfig { debug_validity_check_interval: None, should_backup_non_finalized_state: true, }, - validator_rpc_address: cfg.validator_settings.validator_jsonrpc_listen_address, - validator_grpc_address: cfg + validator_rpc_address: cfg .validator_settings - .validator_grpc_listen_address - .expect("Zebra config with no grpc_listen_address"), + .validator_jsonrpc_listen_address + .clone(), + validator_grpc_address, validator_cookie_auth: cfg.validator_settings.validator_cookie_path.is_some(), validator_cookie_path: cfg.validator_settings.validator_cookie_path, validator_rpc_user: cfg @@ -382,14 +363,16 @@ impl From for StateServiceConfig { service: cfg.service, storage: cfg.storage, network: cfg.network, - } + }) } } #[allow(deprecated)] -impl From for FetchServiceConfig { - fn from(cfg: ZainodConfig) -> Self { - FetchServiceConfig { +impl TryFrom for FetchServiceConfig { + type Error = IndexerError; + + fn try_from(cfg: ZainodConfig) -> Result { + Ok(FetchServiceConfig { validator_rpc_address: cfg.validator_settings.validator_jsonrpc_listen_address, validator_cookie_path: cfg.validator_settings.validator_cookie_path, validator_rpc_user: cfg @@ -403,586 +386,586 @@ impl From for FetchServiceConfig { service: cfg.service, storage: cfg.storage, network: cfg.network, - } + }) } } -/// Custom serializer for BackendType -fn serialize_backendtype_to_string( - backend_type: &zaino_state::BackendType, - serializer: S, -) -> Result -where - S: serde::Serializer, -{ - serializer.serialize_str(match backend_type { - zaino_state::BackendType::State => "state", - zaino_state::BackendType::Fetch => "fetch", - }) -} #[cfg(test)] -mod test { - use crate::error::IndexerError; +mod tests { + use super::*; + use std::{env, sync::Mutex}; + use tempfile::TempDir; + + const ZAINO_ENV_PREFIX: &str = "ZAINO_"; + static TEST_MUTEX: Mutex<()> = Mutex::new(()); + + /// RAII guard for managing environment variables in tests. + /// Ensures test isolation by clearing ZAINO_* vars before tests + /// and restoring original values after. + struct EnvGuard { + _guard: std::sync::MutexGuard<'static, ()>, + original_vars: Vec<(String, String)>, + } - use super::ZainodConfig; + impl EnvGuard { + fn new() -> Self { + let guard = TEST_MUTEX.lock().unwrap_or_else(|e| e.into_inner()); + let original_vars: Vec<_> = env::vars() + .filter(|(k, _)| k.starts_with(ZAINO_ENV_PREFIX)) + .collect(); + // Clear all ZAINO_* vars for test isolation + for (key, _) in &original_vars { + env::remove_var(key); + } + Self { + _guard: guard, + original_vars, + } + } - use super::load_config; + fn set_var(&self, key: &str, value: &str) { + env::set_var(key, value); + } + } - use figment::Jail; + impl Drop for EnvGuard { + fn drop(&mut self) { + // Clear test vars + for (k, _) in env::vars().filter(|(k, _)| k.starts_with(ZAINO_ENV_PREFIX)) { + env::remove_var(&k); + } + // Restore originals + for (k, v) in &self.original_vars { + env::set_var(k, v); + } + } + } - use std::path::PathBuf; + fn create_test_config_file(dir: &TempDir, content: &str, filename: &str) -> PathBuf { + let path = dir.path().join(filename); + std::fs::write(&path, content).unwrap(); + path + } - use zaino_common::{DatabaseSize, Network}; + #[test] + fn test_deserialize_full_valid_config() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // Create mock files + let cert_file = temp_dir.path().join("test_cert.pem"); + let key_file = temp_dir.path().join("test_key.pem"); + let validator_cookie_file = temp_dir.path().join("validator.cookie"); + let zaino_cookie_dir = temp_dir.path().join("zaino_cookies_dir"); + let zaino_db_dir = temp_dir.path().join("zaino_db_dir"); + let zebra_db_dir = temp_dir.path().join("zebra_db_dir"); + + std::fs::write(&cert_file, "mock cert content").unwrap(); + std::fs::write(&key_file, "mock key content").unwrap(); + std::fs::write(&validator_cookie_file, "mock validator cookie content").unwrap(); + std::fs::create_dir_all(&zaino_cookie_dir).unwrap(); + std::fs::create_dir_all(&zaino_db_dir).unwrap(); + std::fs::create_dir_all(&zebra_db_dir).unwrap(); + + let toml_content = format!( + r#" +backend = "fetch" +zebra_db_path = "{}" +network = "Mainnet" + +[storage.database] +path = "{}" + +[validator_settings] +validator_jsonrpc_listen_address = "192.168.1.10:18232" +validator_cookie_path = "{}" +validator_user = "user" +validator_password = "password" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8000" +cookie_dir = "{}" + +[grpc_settings] +listen_address = "0.0.0.0:9000" + +[grpc_settings.tls] +cert_path = "{}" +key_path = "{}" +"#, + zebra_db_dir.display(), + zaino_db_dir.display(), + validator_cookie_file.display(), + zaino_cookie_dir.display(), + cert_file.display(), + key_file.display(), + ); + + let config_path = create_test_config_file(&temp_dir, &toml_content, "full_config.toml"); + let config = load_config(&config_path).expect("load_config failed"); + + assert_eq!(config.backend, BackendType::Fetch); + assert!(config.json_server_settings.is_some()); + assert_eq!( + config + .json_server_settings + .as_ref() + .unwrap() + .json_rpc_listen_address, + "127.0.0.1:8000".parse().unwrap() + ); + assert_eq!(config.network, Network::Mainnet); + assert_eq!( + config.grpc_settings.listen_address, + "0.0.0.0:9000".parse().unwrap() + ); + assert!(config.grpc_settings.tls.is_some()); + assert_eq!( + config.validator_settings.validator_user, + Some("user".to_string()) + ); + assert_eq!( + config.validator_settings.validator_password, + Some("password".to_string()) + ); + } - // Use the explicit library name `zainodlib` as defined in Cargo.toml [lib] name. + #[test] + fn test_deserialize_optional_fields_missing() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +backend = "state" +network = "Testnet" +zebra_db_path = "/opt/zebra/data" + +[storage.database] +path = "/opt/zaino/data" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "optional_missing.toml"); + let config = load_config(&config_path).expect("load_config failed"); + let default_values = ZainodConfig::default(); + + assert_eq!(config.backend, BackendType::State); + assert!(config.json_server_settings.is_none()); + assert_eq!( + config.validator_settings.validator_user, + default_values.validator_settings.validator_user + ); + assert_eq!( + config.storage.cache.capacity, + default_values.storage.cache.capacity + ); + } - // If BackendType is used directly in assertions beyond what IndexerConfig holds: - use zaino_state::BackendType as ZainoBackendType; + #[test] + fn test_cookie_dir_logic() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // Scenario 1: auth enabled, cookie_dir empty (should use default ephemeral path) + let toml_content = r#" +backend = "fetch" +network = "Testnet" +zebra_db_path = "/zebra/db" + +[storage.database] +path = "/zaino/db" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8237" +cookie_dir = "" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "s1.toml"); + let config = load_config(&config_path).expect("Config S1 failed"); + assert!(config.json_server_settings.is_some()); + assert!(config + .json_server_settings + .as_ref() + .unwrap() + .cookie_dir + .is_some()); + + // Scenario 2: auth enabled, cookie_dir specified + let toml_content2 = r#" +backend = "fetch" +network = "Testnet" +zebra_db_path = "/zebra/db" + +[storage.database] +path = "/zaino/db" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8237" +cookie_dir = "/my/cookie/path" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path2 = create_test_config_file(&temp_dir, toml_content2, "s2.toml"); + let config2 = load_config(&config_path2).expect("Config S2 failed"); + assert_eq!( + config2.json_server_settings.as_ref().unwrap().cookie_dir, + Some(PathBuf::from("/my/cookie/path")) + ); + + // Scenario 3: cookie_dir not specified (should be None) + let toml_content3 = r#" +backend = "fetch" +network = "Testnet" +zebra_db_path = "/zebra/db" + +[storage.database] +path = "/zaino/db" + +[json_server_settings] +json_rpc_listen_address = "127.0.0.1:8237" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path3 = create_test_config_file(&temp_dir, toml_content3, "s3.toml"); + let config3 = load_config(&config_path3).expect("Config S3 failed"); + assert!(config3.json_server_settings.unwrap().cookie_dir.is_none()); + } #[test] - // Validates loading a valid configuration via `load_config`, - // ensuring fields are parsed and `check_config` passes with mocked prerequisite files. - pub(crate) fn test_deserialize_full_valid_config() { - Jail::expect_with(|jail| { - // Define RELATIVE paths/filenames for use within the jail - let cert_file_name = "test_cert.pem"; - let key_file_name = "test_key.pem"; - let validator_cookie_file_name = "validator.cookie"; - let zaino_cookie_dir_name = "zaino_cookies_dir"; - let zaino_db_dir_name = "zaino_db_dir"; - let zebra_db_dir_name = "zebra_db_dir"; - - // Create the directories within the jail FIRST - jail.create_dir(zaino_cookie_dir_name)?; - jail.create_dir(zaino_db_dir_name)?; - jail.create_dir(zebra_db_dir_name)?; - - // Use relative paths in the TOML string - let toml_str = format!( - r#" - backend = "fetch" - storage.database.path = "{zaino_db_dir_name}" - zebra_db_path = "{zebra_db_dir_name}" - db_size = 100 - network = "Mainnet" - no_db = false - slow_sync = false - - [validator_settings] - validator_jsonrpc_listen_address = "192.168.1.10:18232" - validator_cookie_path = "{validator_cookie_file_name}" - validator_user = "user" - validator_password = "password" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8000" - cookie_dir = "{zaino_cookie_dir_name}" - - [grpc_settings] - listen_address = "0.0.0.0:9000" - - [grpc_settings.tls] - cert_path = "{cert_file_name}" - key_path = "{key_file_name}" - "# - ); + fn test_deserialize_empty_string_yields_default() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // Minimal valid config + let toml_content = r#" +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "empty.toml"); + let config = load_config(&config_path).expect("Empty TOML load failed"); + let default_config = ZainodConfig::default(); + + assert_eq!(config.network, default_config.network); + assert_eq!(config.backend, default_config.backend); + assert_eq!( + config.storage.cache.capacity, + default_config.storage.cache.capacity + ); + } + + #[test] + fn test_deserialize_invalid_backend_type() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); - let temp_toml_path = jail.directory().join("full_config.toml"); - jail.create_file(&temp_toml_path, &toml_str)?; + let toml_content = r#" +backend = "invalid_type" - // Create the actual mock files within the jail using the relative names - jail.create_file(cert_file_name, "mock cert content")?; - jail.create_file(key_file_name, "mock key content")?; - jail.create_file(validator_cookie_file_name, "mock validator cookie content")?; +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" - let config_result = load_config(&temp_toml_path); +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "invalid_backend.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + if let Err(IndexerError::ConfigError(msg)) = result { assert!( - config_result.is_ok(), - "load_config failed: {:?}", - config_result.err() - ); - let finalized_config = config_result.unwrap(); - - assert_eq!(finalized_config.backend, ZainoBackendType::Fetch); - assert!(finalized_config.json_server_settings.is_some()); - assert_eq!( - finalized_config - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .json_rpc_listen_address, - "127.0.0.1:8000".parse().unwrap() - ); - assert_eq!( - finalized_config - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .cookie_dir, - Some(PathBuf::from(zaino_cookie_dir_name)) - ); - assert_eq!( - finalized_config - .clone() - .grpc_settings - .tls - .expect("tls to be Some in finalized conifg") - .cert_path, - PathBuf::from(cert_file_name) - ); - assert_eq!( - finalized_config - .clone() - .grpc_settings - .tls - .expect("tls to be Some in finalized_conifg") - .key_path, - PathBuf::from(key_file_name) - ); - assert_eq!( - finalized_config.validator_settings.validator_cookie_path, - Some(PathBuf::from(validator_cookie_file_name)) - ); - assert_eq!( - finalized_config.storage.database.path, - PathBuf::from(zaino_db_dir_name) + msg.contains("unknown variant") || msg.contains("invalid_type"), + "Unexpected error message: {}", + msg ); - assert_eq!( - finalized_config.zebra_db_path, - PathBuf::from(zebra_db_dir_name) - ); - assert_eq!(finalized_config.network, Network::Mainnet); - assert_eq!( - finalized_config.grpc_settings.listen_address, - "0.0.0.0:9000".parse().unwrap() - ); - assert!(finalized_config.grpc_settings.tls.is_some()); - assert_eq!( - finalized_config.validator_settings.validator_user, - Some("user".to_string()) - ); - assert_eq!( - finalized_config.validator_settings.validator_password, - Some("password".to_string()) - ); - assert_eq!(finalized_config.storage.cache.capacity, 10000); - assert_eq!(finalized_config.storage.cache.shard_count(), 16); - assert_eq!( - finalized_config.storage.database.size.to_byte_count(), - 128 * 1024 * 1024 * 1024 - ); - assert!(match finalized_config.storage.database.size { - DatabaseSize::Gb(0) => false, - DatabaseSize::Gb(_) => true, - }); - - Ok(()) - }); + } } #[test] - // Verifies that when optional fields are omitted from TOML, `load_config` ensures they correctly adopt default values. - pub(crate) fn test_deserialize_optional_fields_missing() { - Jail::expect_with(|jail| { - let toml_str = r#" - backend = "state" - json_rpc_listen_address = "127.0.0.1:8237" - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/opt/zaino/data" - zebra_db_path = "/opt/zebra/data" - network = "Testnet" - "#; - let temp_toml_path = jail.directory().join("optional_missing.toml"); - jail.create_file(&temp_toml_path, toml_str)?; - - let config = load_config(&temp_toml_path).expect("load_config failed"); - let default_values = ZainodConfig::default(); - - assert_eq!(config.backend, ZainoBackendType::State); - assert_eq!( - config.json_server_settings.is_some(), - default_values.json_server_settings.is_some() - ); - assert_eq!( - config.validator_settings.validator_user, - default_values.validator_settings.validator_user - ); - assert_eq!( - config.validator_settings.validator_password, - default_values.validator_settings.validator_password - ); - assert_eq!( - config.storage.cache.capacity, - default_values.storage.cache.capacity - ); - assert_eq!( - config.storage.cache.shard_count(), - default_values.storage.cache.shard_count(), - ); - assert_eq!( - config.storage.database.size, - default_values.storage.database.size - ); - assert_eq!( - config.storage.database.size.to_byte_count(), - default_values.storage.database.size.to_byte_count() - ); - Ok(()) - }); + fn test_deserialize_invalid_socket_address() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +[json_server_settings] +json_rpc_listen_address = "not-a-valid-address" +cookie_dir = "" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "invalid_socket.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); } #[test] - // Tests the logic (via `load_config` and its internal call to `finalize_config_logic`) - // for setting `cookie_dir` based on `enable_cookie_auth`. - pub(crate) fn test_cookie_dir_logic() { - Jail::expect_with(|jail| { - // Scenario 1: auth enabled, cookie_dir missing (should use default ephemeral path) - let s1_path = jail.directory().join("s1.toml"); - jail.create_file( - &s1_path, - r#" - backend = "fetch" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8237" - cookie_dir = "" - - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/zaino/db" - zebra_db_path = "/zebra/db" - network = "Testnet" - "#, - )?; - - let config1 = load_config(&s1_path).expect("Config S1 failed"); - assert!(config1.json_server_settings.is_some()); - assert!(config1 - .json_server_settings - .as_ref() - .expect("json settings is Some") - .cookie_dir - .is_some()); - - // Scenario 2: auth enabled, cookie_dir specified - let s2_path = jail.directory().join("s2.toml"); - jail.create_file( - &s2_path, - r#" - backend = "fetch" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8237" - cookie_dir = "/my/cookie/path" - - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/zaino/db" - zebra_db_path = "/zebra/db" - network = "Testnet" - "#, - )?; - let config2 = load_config(&s2_path).expect("Config S2 failed"); - assert!(config2.json_server_settings.is_some()); - assert_eq!( - config2 - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .cookie_dir, - Some(PathBuf::from("/my/cookie/path")) - ); - let s3_path = jail.directory().join("s3.toml"); - jail.create_file( - &s3_path, - r#" - backend = "fetch" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8237" - - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/zaino/db" - zebra_db_path = "/zebra/db" - network = "Testnet" - "#, - )?; - let config3 = load_config(&s3_path).expect("Config S3 failed"); - assert!(config3 - .json_server_settings - .expect("json server settings to unwrap in config S3") - .cookie_dir - .is_none()); - Ok(()) - }); + fn test_parse_zindexer_toml_integration() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + let zindexer_toml_content = include_str!("../zindexer.toml"); + + let config_path = + create_test_config_file(&temp_dir, zindexer_toml_content, "zindexer_test.toml"); + let config = load_config(&config_path).expect("load_config failed to parse zindexer.toml"); + let defaults = ZainodConfig::default(); + + assert_eq!(config.backend, BackendType::Fetch); + assert_eq!( + config.validator_settings.validator_user, + defaults.validator_settings.validator_user + ); } #[test] - pub(crate) fn test_string_none_as_path_for_cookie_dir() { - Jail::expect_with(|jail| { - let toml_auth_enabled_path = jail.directory().join("auth_enabled.toml"); - // cookie auth on but no dir assigned - jail.create_file( - &toml_auth_enabled_path, - r#" - backend = "fetch" - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/zaino/db" - zebra_db_path = "/zebra/db" - network = "Testnet" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8237" - cookie_dir = "" - "#, - )?; - let config_auth_enabled = - load_config(&toml_auth_enabled_path).expect("Auth enabled failed"); - assert!(config_auth_enabled.json_server_settings.is_some()); - assert!(config_auth_enabled - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .cookie_dir - .is_some()); - - // omitting cookie_dir will set it to None - let toml_auth_disabled_path = jail.directory().join("auth_disabled.toml"); - jail.create_file( - &toml_auth_disabled_path, - r#" - backend = "fetch" - - [json_server_settings] - json_rpc_listen_address = "127.0.0.1:8237" - - grpc_listen_address = "127.0.0.1:8137" - validator_listen_address = "127.0.0.1:18232" - zaino_db_path = "/zaino/db" - zebra_db_path = "/zebra/db" - network = "Testnet" - "#, - )?; - let config_auth_disabled = - load_config(&toml_auth_disabled_path).expect("Auth disabled failed"); - assert!(config_auth_disabled.json_server_settings.is_some()); - assert_eq!( - config_auth_disabled - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .cookie_dir, - None - ); - Ok(()) - }); + fn test_env_override_toml_and_defaults() { + let guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +network = "Testnet" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + guard.set_var("ZAINO_NETWORK", "Mainnet"); + guard.set_var( + "ZAINO_JSON_SERVER_SETTINGS__JSON_RPC_LISTEN_ADDRESS", + "127.0.0.1:0", + ); + guard.set_var("ZAINO_JSON_SERVER_SETTINGS__COOKIE_DIR", "/env/cookie/path"); + guard.set_var("ZAINO_STORAGE__CACHE__CAPACITY", "12345"); + + let config_path = create_test_config_file(&temp_dir, toml_content, "test_config.toml"); + let config = load_config(&config_path).expect("load_config should succeed"); + + assert_eq!(config.network, Network::Mainnet); + assert_eq!(config.storage.cache.capacity, 12345); + assert!(config.json_server_settings.is_some()); + assert_eq!( + config.json_server_settings.as_ref().unwrap().cookie_dir, + Some(PathBuf::from("/env/cookie/path")) + ); } #[test] - // Checks that `load_config` with an empty TOML string results in the default `IndexerConfig` values. - pub(crate) fn test_deserialize_empty_string_yields_default() { - Jail::expect_with(|jail| { - let empty_toml_path = jail.directory().join("empty.toml"); - jail.create_file(&empty_toml_path, "")?; - let config = load_config(&empty_toml_path).expect("Empty TOML load failed"); - let default_config = ZainodConfig::default(); - // Compare relevant fields that should come from default - assert_eq!(config.network, default_config.network); - assert_eq!(config.backend, default_config.backend); - assert_eq!( - config.json_server_settings.is_some(), - default_config.json_server_settings.is_some() - ); - assert_eq!( - config.validator_settings.validator_user, - default_config.validator_settings.validator_user - ); - assert_eq!( - config.validator_settings.validator_password, - default_config.validator_settings.validator_password - ); - assert_eq!( - config.storage.cache.capacity, - default_config.storage.cache.capacity - ); - assert_eq!( - config.storage.cache.shard_count(), - default_config.storage.cache.shard_count() - ); - assert_eq!( - config.storage.database.size, - default_config.storage.database.size - ); - assert_eq!( - config.storage.database.size.to_byte_count(), - default_config.storage.database.size.to_byte_count() - ); - Ok(()) - }); + fn test_toml_overrides_defaults() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + // json_server_settings without a listening address is forbidden + let toml_content = r#" +network = "Regtest" + +[json_server_settings] +json_rpc_listen_address = "" +cookie_dir = "" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "test_config.toml"); + assert!(load_config(&config_path).is_err()); } #[test] - // Ensures `load_config` returns an error for an invalid `backend` type string in TOML. - pub(crate) fn test_deserialize_invalid_backend_type() { - Jail::expect_with(|jail| { - let invalid_toml_path = jail.directory().join("invalid_backend.toml"); - jail.create_file(&invalid_toml_path, r#"backend = "invalid_type""#)?; - let result = load_config(&invalid_toml_path); - assert!(result.is_err()); - if let Err(IndexerError::ConfigError(msg)) = result { - assert!(msg.contains("Invalid backend type")); - } - Ok(()) - }); + fn test_invalid_env_var_type() { + let guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + guard.set_var("ZAINO_STORAGE__CACHE__CAPACITY", "not_a_number"); + + let config_path = create_test_config_file(&temp_dir, toml_content, "test_config.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); } #[test] - // Ensures `load_config` returns an error for an invalid socket address string in TOML. - pub(crate) fn test_deserialize_invalid_socket_address() { - Jail::expect_with(|jail| { - let invalid_toml_path = jail.directory().join("invalid_socket.toml"); - jail.create_file( - &invalid_toml_path, - r#" - [json_server_settings] - json_rpc_listen_address = "not-a-valid-address" - cookie_dir = "" - "#, - )?; - let result = load_config(&invalid_toml_path); - assert!(result.is_err()); - if let Err(IndexerError::ConfigError(msg)) = result { - assert!(msg.contains("invalid socket address syntax")); - } - Ok(()) - }); + fn test_cookie_auth_not_forced_for_non_loopback_ip() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +backend = "fetch" +network = "Testnet" + +[validator_settings] +validator_jsonrpc_listen_address = "192.168.1.10:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "no_cookie_auth.toml"); + let config_result = load_config(&config_path); + assert!( + config_result.is_ok(), + "Non-loopback IP without cookie auth should succeed. Error: {:?}", + config_result.err() + ); + + let config = config_result.unwrap(); + assert!(config.validator_settings.validator_cookie_path.is_none()); } #[test] - // Validates that the actual zindexer.toml file (with optional values commented out) - // is parsed correctly by `load_config`, applying defaults for missing optional fields. - pub(crate) fn test_parse_zindexer_toml_integration() { - let zindexer_toml_content = include_str!("../zindexer.toml"); + fn test_public_ip_still_rejected() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); - Jail::expect_with(|jail| { - let temp_toml_path = jail.directory().join("zindexer_test.toml"); - jail.create_file(&temp_toml_path, zindexer_toml_content)?; + let toml_content = r#" +backend = "fetch" +network = "Testnet" - let config_result = load_config(&temp_toml_path); - assert!( - config_result.is_ok(), - "load_config failed to parse zindexer.toml: {:?}", - config_result.err() - ); - let config = config_result.unwrap(); - let defaults = ZainodConfig::default(); +[validator_settings] +validator_jsonrpc_listen_address = "8.8.8.8:18232" - assert_eq!(config.backend, ZainoBackendType::Fetch); - assert_eq!( - config.validator_settings.validator_user, - defaults.validator_settings.validator_user - ); +[storage.database] +path = "/zaino/db" - Ok(()) - }); - } +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; - // Figment-specific tests below are generally self-descriptive by name - #[test] - pub(crate) fn test_figment_env_override_toml_and_defaults() { - Jail::expect_with(|jail| { - jail.create_file( - "test_config.toml", - r#" - network = "Testnet" - "#, - )?; - jail.set_env("ZAINO_NETWORK", "Mainnet"); - jail.set_env( - "ZAINO_JSON_SERVER_SETTINGS__JSON_RPC_LISTEN_ADDRESS", - "127.0.0.1:0", - ); - jail.set_env("ZAINO_JSON_SERVER_SETTINGS__COOKIE_DIR", "/env/cookie/path"); - jail.set_env("ZAINO_STORAGE__CACHE__CAPACITY", "12345"); - - let temp_toml_path = jail.directory().join("test_config.toml"); - let config = load_config(&temp_toml_path).expect("load_config should succeed"); - - assert_eq!(config.network, Network::Mainnet); - assert_eq!(config.storage.cache.capacity, 12345); - assert!(config.json_server_settings.is_some()); - assert_eq!( - config - .json_server_settings - .as_ref() - .expect("json settings to be Some") - .cookie_dir, - Some(PathBuf::from("/env/cookie/path")) - ); - assert!(config.grpc_settings.tls.is_none()); - Ok(()) - }); + let config_path = create_test_config_file(&temp_dir, toml_content, "public_ip.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + + if let Err(IndexerError::ConfigError(msg)) = result { + assert!(msg.contains("private IP")); + } } #[test] - pub(crate) fn test_figment_toml_overrides_defaults() { - Jail::expect_with(|jail| { - jail.create_file( - "test_config.toml", - r#" - network = "Regtest" - - [json_server_settings] - json_rpc_listen_address = "" - cookie_dir = "" - "#, - )?; - let temp_toml_path = jail.directory().join("test_config.toml"); - // a json_server_setting without a listening address is forbidden - assert!(load_config(&temp_toml_path).is_err()); - Ok(()) - }); + fn test_sensitive_env_var_blocked() { + let guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + guard.set_var("ZAINO_VALIDATOR_SETTINGS__VALIDATOR_PASSWORD", "secret123"); + + let config_path = + create_test_config_file(&temp_dir, toml_content, "sensitive_env_test.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); + + if let Err(IndexerError::ConfigError(msg)) = result { + assert!(msg.contains("sensitive key")); + assert!(msg.contains("VALIDATOR_PASSWORD")); + } } #[test] - pub(crate) fn test_figment_all_defaults() { - Jail::expect_with(|jail| { - jail.create_file("empty_config.toml", "")?; - let temp_toml_path = jail.directory().join("empty_config.toml"); - let config = - load_config(&temp_toml_path).expect("load_config should succeed with empty toml"); - let defaults = ZainodConfig::default(); - assert_eq!(config.network, defaults.network); - assert_eq!( - config.json_server_settings.is_some(), - defaults.json_server_settings.is_some() - ); - assert_eq!( - config.storage.cache.capacity, - defaults.storage.cache.capacity - ); - Ok(()) - }); + fn test_sensitive_key_detection() { + assert!(is_sensitive_leaf_key("password")); + assert!(is_sensitive_leaf_key("PASSWORD")); + assert!(is_sensitive_leaf_key("validator_password")); + assert!(is_sensitive_leaf_key("VALIDATOR_PASSWORD")); + assert!(is_sensitive_leaf_key("secret")); + assert!(is_sensitive_leaf_key("api_token")); + assert!(is_sensitive_leaf_key("cookie")); + assert!(is_sensitive_leaf_key("private_key")); + + assert!(!is_sensitive_leaf_key("username")); + assert!(!is_sensitive_leaf_key("address")); + assert!(!is_sensitive_leaf_key("network")); } #[test] - pub(crate) fn test_figment_invalid_env_var_type() { - Jail::expect_with(|jail| { - jail.create_file("test_config.toml", "")?; - jail.set_env("ZAINO_STORAGE__CACHE__CAPACITY", "not_a_number"); - let temp_toml_path = jail.directory().join("test_config.toml"); - let result = load_config(&temp_toml_path); - assert!(result.is_err()); - if let Err(IndexerError::ConfigError(msg)) = result { - assert!(msg.to_lowercase().contains("storage.cache.capacity") && msg.contains("invalid type"), - "Error message should mention 'map_capacity' (case-insensitive) and 'invalid type'. Got: {msg}"); - } else { - panic!("Expected ConfigError, got {result:?}"); - } - Ok(()) - }); + fn test_unknown_fields_rejected() { + let _guard = EnvGuard::new(); + let temp_dir = TempDir::new().unwrap(); + + let toml_content = r#" +unknown_field = "value" + +[validator_settings] +validator_jsonrpc_listen_address = "127.0.0.1:18232" + +[storage.database] +path = "/zaino/db" + +[grpc_settings] +listen_address = "127.0.0.1:8137" +"#; + + let config_path = create_test_config_file(&temp_dir, toml_content, "unknown_fields.toml"); + let result = load_config(&config_path); + assert!(result.is_err()); } } diff --git a/zainod/src/indexer.rs b/zainod/src/indexer.rs index a67ce43fd..cc4154f8b 100644 --- a/zainod/src/indexer.rs +++ b/zainod/src/indexer.rs @@ -5,11 +5,10 @@ use tracing::info; use zaino_fetch::jsonrpsee::connector::test_node_and_return_url; use zaino_serve::server::{config::GrpcServerConfig, grpc::TonicServer, jsonrpc::JsonRpcServer}; - #[allow(deprecated)] use zaino_state::{ - BackendConfig, FetchService, IndexerService, LightWalletService, StateService, StatusType, - ZcashIndexer, ZcashService, + BackendType, FetchService, FetchServiceConfig, IndexerService, LightWalletService, + StateService, StateServiceConfig, StatusType, ZcashIndexer, ZcashService, }; use crate::{config::ZainodConfig, error::IndexerError}; @@ -38,14 +37,13 @@ pub async fn start_indexer( } /// Spawns a new Indexer server. -#[allow(deprecated)] pub async fn spawn_indexer( config: ZainodConfig, ) -> Result>, IndexerError> { config.check_config()?; info!("Checking connection with node.."); let zebrad_uri = test_node_and_return_url( - config.validator_settings.validator_jsonrpc_listen_address, + &config.validator_settings.validator_jsonrpc_listen_address, config.validator_settings.validator_cookie_path.clone(), config.validator_settings.validator_user.clone(), config.validator_settings.validator_password.clone(), @@ -56,18 +54,21 @@ pub async fn spawn_indexer( " - Connected to node using JsonRPSee at address {}.", zebrad_uri ); - match BackendConfig::try_from(config.clone()) { - Ok(BackendConfig::State(state_service_config)) => { - Indexer::::launch_inner(state_service_config, config) + + #[allow(deprecated)] + match config.backend { + BackendType::State => { + let state_config = StateServiceConfig::try_from(config.clone())?; + Indexer::::launch_inner(state_config, config) .await .map(|res| res.0) } - Ok(BackendConfig::Fetch(fetch_service_config)) => { - Indexer::::launch_inner(fetch_service_config, config) + BackendType::Fetch => { + let fetch_config = FetchServiceConfig::try_from(config.clone())?; + Indexer::::launch_inner(fetch_config, config) .await .map(|res| res.0) } - Err(e) => Err(e), } } diff --git a/zainod/zindexer.toml b/zainod/zindexer.toml index e856aa6a7..24c1cb210 100644 --- a/zainod/zindexer.toml +++ b/zainod/zindexer.toml @@ -40,13 +40,11 @@ backend = "fetch" # Validator config: # Required for valid zainod config. [validator_settings] - # Full node / validator listen address. + # Full node / validator gRPC listen address (Zebra only). # - # Must be a "private" address as defined in [IETF RFC 1918] for ipv4 addreses and [IETF RFC 4193] for ipv6 addreses. - # - # Must use validator rpc cookie authentication when connecting to non localhost addresses. - # Required - validator_grpc_listen_address = "127.0.0.1:18232" + # Must be a "private" address as defined in [IETF RFC 1918] for IPv4 or [IETF RFC 4193] for IPv6. + # Cookie or user/password authentication is recommended for non-localhost addresses. + validator_grpc_listen_address = "127.0.0.1:18232" # SocketAddr, Required. validator_jsonrpc_listen_address = "127.0.0.1:18230"