From 7c8466f4bac8a861add219483fc8e6543867924c Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 2 Jul 2024 22:08:19 +0000 Subject: [PATCH 001/104] Storage: add blockchain history requests (#206) * Add database requests for chain history * misc fixes * review comments * fix clippy * add link and fix typo * Apply suggestions from code review Co-authored-by: hinto-janai * add comment --------- Co-authored-by: hinto-janai --- Cargo.lock | 1 + p2p/p2p/src/block_downloader.rs | 2 +- p2p/p2p/src/block_downloader/request_chain.rs | 14 +-- p2p/p2p/src/block_downloader/tests.rs | 4 +- storage/blockchain/Cargo.toml | 2 +- storage/blockchain/src/service/free.rs | 69 ++++++++++++- storage/blockchain/src/service/read.rs | 99 +++++++++++++++++-- types/src/blockchain.rs | 30 +++++- 8 files changed, 198 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 68ccc3a..d862821 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -506,6 +506,7 @@ dependencies = [ "monero-serai", "paste", "pretty_assertions", + "proptest", "rayon", "tempfile", "thread_local", diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index 7d0ab7e..81640e9 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -121,7 +121,7 @@ pub enum ChainSvcResponse { /// The response for [`ChainSvcRequest::FindFirstUnknown`]. /// /// Contains the index of the first unknown block and its expected height. - FindFirstUnknown(usize, u64), + FindFirstUnknown(Option<(usize, u64)>), /// The response for [`ChainSvcRequest::CumulativeDifficulty`]. /// /// The current cumulative difficulty of our chain. diff --git a/p2p/p2p/src/block_downloader/request_chain.rs b/p2p/p2p/src/block_downloader/request_chain.rs index f8b5319..471635b 100644 --- a/p2p/p2p/src/block_downloader/request_chain.rs +++ b/p2p/p2p/src/block_downloader/request_chain.rs @@ -198,7 +198,7 @@ where tracing::debug!("Highest chin entry contained {} block Ids", hashes.len()); // Find the first unknown block in the batch. - let ChainSvcResponse::FindFirstUnknown(first_unknown, expected_height) = our_chain_svc + let ChainSvcResponse::FindFirstUnknown(first_unknown_ret) = our_chain_svc .ready() .await? .call(ChainSvcRequest::FindFirstUnknown(hashes.clone())) @@ -207,18 +207,18 @@ where panic!("chain service sent wrong response."); }; + // We know all the blocks already + // TODO: The peer could still be on a different chain, however the chain might just be too far split. + let Some((first_unknown, expected_height)) = first_unknown_ret else { + return Err(BlockDownloadError::FailedToFindAChainToFollow); + }; + // The peer must send at least one block we already know. if first_unknown == 0 { peer_handle.ban_peer(MEDIUM_BAN); return Err(BlockDownloadError::PeerSentNoOverlappingBlocks); } - // We know all the blocks already - // TODO: The peer could still be on a different chain, however the chain might just be too far split. - if first_unknown == hashes.len() { - return Err(BlockDownloadError::FailedToFindAChainToFollow); - } - let previous_id = hashes[first_unknown - 1]; let first_entry = ChainEntry { diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index 5d4225c..bf34272 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -314,7 +314,9 @@ impl Service for OurChainSvc { block_ids: vec![genesis], cumulative_difficulty: 1, }, - ChainSvcRequest::FindFirstUnknown(_) => ChainSvcResponse::FindFirstUnknown(1, 1), + ChainSvcRequest::FindFirstUnknown(_) => { + ChainSvcResponse::FindFirstUnknown(Some((1, 1))) + } ChainSvcRequest::CumulativeDifficulty => ChainSvcResponse::CumulativeDifficulty(1), }) } diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index bab582d..8a88214 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -45,8 +45,8 @@ rayon = { workspace = true, optional = true } cuprate-helper = { path = "../../helper", features = ["thread"] } cuprate-test-utils = { path = "../../test-utils" } -bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } tempfile = { version = "3.10.0" } pretty_assertions = { workspace = true } +proptest = { workspace = true } hex = { workspace = true } hex-literal = { workspace = true } diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index 3ff8d6e..3701f66 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -33,8 +33,69 @@ pub fn init(config: Config) -> Result<(DatabaseReadHandle, DatabaseWriteHandle), Ok((readers, writer)) } -//---------------------------------------------------------------------------------------------------- Tests -#[cfg(test)] -mod test { - // use super::*; +//---------------------------------------------------------------------------------------------------- Compact history +/// Given a position in the compact history, returns the height offset that should be in that position. +/// +/// The height offset is the difference between the top block's height and the block height that should be in that position. +#[inline] +pub(super) const fn compact_history_index_to_height_offset( + i: u64, +) -> u64 { + // If the position is below the initial blocks just return the position back + if i <= INITIAL_BLOCKS { + i + } else { + // Otherwise we go with power of 2 offsets, the same as monerod. + // So (INITIAL_BLOCKS + 2), (INITIAL_BLOCKS + 2 + 4), (INITIAL_BLOCKS + 2 + 4 + 8) + // ref: + INITIAL_BLOCKS + (2 << (i - INITIAL_BLOCKS)) - 2 + } +} + +/// Returns if the genesis block was _NOT_ included when calculating the height offsets. +/// +/// The genesis must always be included in the compact history. +#[inline] +pub(super) const fn compact_history_genesis_not_included( + top_block_height: u64, +) -> bool { + // If the top block height is less than the initial blocks then it will always be included. + // Otherwise, we use the fact that to reach the genesis block this statement must be true (for a + // single `i`): + // + // `top_block_height - INITIAL_BLOCKS - 2^i + 2 == 0` + // which then means: + // `top_block_height - INITIAL_BLOCKS + 2 == 2^i` + // So if `top_block_height - INITIAL_BLOCKS + 2` is a power of 2 then the genesis block is in + // the compact history already. + top_block_height > INITIAL_BLOCKS && !(top_block_height - INITIAL_BLOCKS + 2).is_power_of_two() +} + +//---------------------------------------------------------------------------------------------------- Tests + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use super::*; + + proptest! { + #[test] + fn compact_history(top_height in 0_u64..500_000_000) { + let mut heights = (0..) + .map(compact_history_index_to_height_offset::<11>) + .map_while(|i| top_height.checked_sub(i)) + .collect::>(); + + if compact_history_genesis_not_included::<11>(top_height) { + heights.push(0); + } + + // Make sure the genesis and top block are always included. + assert_eq!(*heights.last().unwrap(), 0); + assert_eq!(*heights.first().unwrap(), top_height); + + heights.windows(2).for_each(|window| assert_ne!(window[0], window[1])); + } + } } diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index 20aebf9..7f856cc 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -14,7 +14,7 @@ use tokio::sync::{OwnedSemaphorePermit, Semaphore}; use tokio_util::sync::PollSemaphore; use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; -use cuprate_helper::asynch::InfallibleOneshotReceiver; +use cuprate_helper::{asynch::InfallibleOneshotReceiver, map::combine_low_high_bits_to_u128}; use cuprate_types::{ blockchain::{BCReadRequest, BCResponse}, ExtendedBlockHeader, OutputOnChain, @@ -23,17 +23,20 @@ use cuprate_types::{ use crate::{ config::ReaderThreads, open_tables::OpenTables, - ops::block::block_exists, ops::{ - block::{get_block_extended_header_from_height, get_block_info}, + block::{ + block_exists, get_block_extended_header_from_height, get_block_height, get_block_info, + }, blockchain::{cumulative_generated_coins, top_block_height}, key_image::key_image_exists, output::id_to_output_on_chain, }, - service::types::{ResponseReceiver, ResponseResult, ResponseSender}, + service::{ + free::{compact_history_genesis_not_included, compact_history_index_to_height_offset}, + types::{ResponseReceiver, ResponseResult, ResponseSender}, + }, tables::{BlockHeights, BlockInfos, Tables}, - types::BlockHash, - types::{Amount, AmountIndex, BlockHeight, KeyImage, PreRctOutputId}, + types::{Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId}, }; //---------------------------------------------------------------------------------------------------- DatabaseReadHandle @@ -204,13 +207,15 @@ fn map_request( let response = match request { R::BlockExtendedHeader(block) => block_extended_header(env, block), R::BlockHash(block) => block_hash(env, block), - R::FilterUnknownHashes(hashes) => filter_unknown_hahses(env, hashes), + R::FilterUnknownHashes(hashes) => filter_unknown_hashes(env, hashes), R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range), R::ChainHeight => chain_height(env), R::GeneratedCoins => generated_coins(env), R::Outputs(map) => outputs(env, map), R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec), R::KeyImagesSpent(set) => key_images_spent(env, set), + R::CompactChainHistory => compact_chain_history(env), + R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids), }; if let Err(e) = response_sender.send(response) { @@ -320,7 +325,7 @@ fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult { /// [`BCReadRequest::FilterUnknownHashes`]. #[inline] -fn filter_unknown_hahses(env: &ConcreteEnv, mut hashes: HashSet) -> ResponseResult { +fn filter_unknown_hashes(env: &ConcreteEnv, mut hashes: HashSet) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro()?; @@ -525,3 +530,81 @@ fn key_images_spent(env: &ConcreteEnv, key_images: HashSet) -> Respons Some(Err(e)) => Err(e), // A database error occurred. } } + +/// [`BCReadRequest::CompactChainHistory`] +fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult { + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro()?; + + let table_block_heights = env_inner.open_db_ro::(&tx_ro)?; + let table_block_infos = env_inner.open_db_ro::(&tx_ro)?; + + let top_block_height = top_block_height(&table_block_heights)?; + + let top_block_info = get_block_info(&top_block_height, &table_block_infos)?; + let cumulative_difficulty = combine_low_high_bits_to_u128( + top_block_info.cumulative_difficulty_low, + top_block_info.cumulative_difficulty_high, + ); + + /// The amount of top block IDs in the compact chain. + const INITIAL_BLOCKS: u64 = 11; + + // rayon is not used here because the amount of block IDs is expected to be small. + let mut block_ids = (0..) + .map(compact_history_index_to_height_offset::) + .map_while(|i| top_block_height.checked_sub(i)) + .map(|height| Ok(get_block_info(&height, &table_block_infos)?.block_hash)) + .collect::, RuntimeError>>()?; + + if compact_history_genesis_not_included::(top_block_height) { + block_ids.push(get_block_info(&0, &table_block_infos)?.block_hash); + } + + Ok(BCResponse::CompactChainHistory { + cumulative_difficulty, + block_ids, + }) +} + +/// [`BCReadRequest::FindFirstUnknown`] +/// +/// # Invariant +/// `block_ids` must be sorted in chronological block order, or else +/// the returned result is unspecified and meaningless, as this function +/// performs a binary search. +fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseResult { + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro()?; + + let table_block_heights = env_inner.open_db_ro::(&tx_ro)?; + + let mut err = None; + + // Do a binary search to find the first unknown block in the batch. + let idx = + block_ids.partition_point( + |block_id| match block_exists(block_id, &table_block_heights) { + Ok(exists) => exists, + Err(e) => { + err.get_or_insert(e); + // if this happens the search is scrapped, just return `false` back. + false + } + }, + ); + + if let Some(e) = err { + return Err(e); + } + + Ok(if idx == block_ids.len() { + BCResponse::FindFirstUnknown(None) + } else if idx == 0 { + BCResponse::FindFirstUnknown(Some((0, 0))) + } else { + let last_known_height = get_block_height(&block_ids[idx - 1], &table_block_heights)?; + + BCResponse::FindFirstUnknown(Some((idx, last_known_height + 1))) + }) +} diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index 42390f9..5a09ca3 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -83,10 +83,21 @@ pub enum BCReadRequest { /// The input is a list of output amounts. NumberOutputsWithAmount(Vec), - /// Check that all key images within a set arer not spent. + /// Check that all key images within a set are not spent. /// /// Input is a set of key images. KeyImagesSpent(HashSet<[u8; 32]>), + + /// A request for the compact chain history. + CompactChainHistory, + + /// A request to find the first unknown block ID in a list of block IDs. + //// + /// # Invariant + /// The [`Vec`] containing the block IDs must be sorted in chronological block + /// order, or else the returned response is unspecified and meaningless, + /// as this request performs a binary search. + FindFirstUnknown(Vec<[u8; 32]>), } //---------------------------------------------------------------------------------------------------- WriteRequest @@ -164,6 +175,23 @@ pub enum BCResponse { /// The inner value is `false` if _none_ of the key images were spent. KeyImagesSpent(bool), + /// Response to [`BCReadRequest::CompactChainHistory`]. + CompactChainHistory { + /// A list of blocks IDs in our chain, starting with the most recent block, all the way to the genesis block. + /// + /// These blocks should be in reverse chronological order, not every block is needed. + block_ids: Vec<[u8; 32]>, + /// The current cumulative difficulty of the chain. + cumulative_difficulty: u128, + }, + + /// The response for [`BCReadRequest::FindFirstUnknown`]. + /// + /// Contains the index of the first unknown block and its expected height. + /// + /// This will be [`None`] if all blocks were known. + FindFirstUnknown(Option<(usize, u64)>), + //------------------------------------------------------ Writes /// Response to [`BCWriteRequest::WriteBlock`]. /// From a8b58fa4db363736e26f74baf17b031f122722ee Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 3 Jul 2024 17:38:47 -0400 Subject: [PATCH 002/104] ci: add `doc.yml` (#203) * ci: add `doc.yml` * update `{CONTRIBUTING,README}.md` * readme: formatting --- .github/workflows/doc.yml | 74 +++++++++++++++++++++++++++++++++++++++ CONTRIBUTING.md | 4 +-- README.md | 2 +- 3 files changed, 77 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/doc.yml diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml new file mode 100644 index 0000000..340ec59 --- /dev/null +++ b/.github/workflows/doc.yml @@ -0,0 +1,74 @@ +# This builds `cargo doc` and uploads it to the repo's GitHub Pages. + +name: Doc + +on: + push: + branches: [ "main" ] # Only deploy if `main` changes. + workflow_dispatch: + +env: + # Show colored output in CI. + CARGO_TERM_COLOR: always + # Fail on documentation warnings, generate an index page. + RUSTDOCFLAGS: '-D warnings --cfg docsrs --show-type-layout --enable-index-page -Zunstable-options' + +jobs: + # Build documentation. + build: + # FIXME: how to build and merge Windows + macOS docs + # with Linux's? Similar to the OS toggle on docs.rs. + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + # Nightly required for some `cargo doc` settings. + toolchain: nightly + + - name: Cache + uses: actions/cache@v4 + with: + # Don't cache actual doc files, just build files. + # This is so that removed crates don't show up. + path: target/debug + key: doc + + # Packages other than `Boost` used by `Monero` are listed here. + # https://github.com/monero-project/monero/blob/c444a7e002036e834bfb4c68f04a121ce1af5825/.github/workflows/build.yml#L71 + + - name: Install dependencies (Linux) + run: sudo apt install -y libboost-dev + + - name: Documentation + run: cargo +nightly doc --workspace --all-features + + - name: Upload documentation + uses: actions/upload-pages-artifact@v3 + with: + path: target/doc/ + + # Deployment job. + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + + # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages + permissions: + contents: read + pages: write + id-token: write + + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0c9c1f0..1b66a58 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -216,9 +216,9 @@ The description of pull requests should generally follow the template laid out i If your pull request is long and/or has sections that need clarifying, consider leaving a review on your own PR with comments explaining the changes. ## 5. Documentation -Cuprate's crates (libraries) have inline documentation. +Cuprate's crates (libraries) have inline documentation, they are published from the `main` branch at https://doc.cuprate.org. -These can be built and viewed using the `cargo` tool. For example, to build and view a specific crate's documentation, run the following command at the repository's root: +Documentation can be built and viewed using the `cargo` tool. For example, to build and view a specific crate's documentation, run the following command at the repository's root: ```bash cargo doc --open --package $CRATE ``` diff --git a/README.md b/README.md index 100900d..a9050d5 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ Cuprate maintains various documentation books: | [Monero's protocol book](https://monero-book.cuprate.org) | Documents the Monero protocol | | [Cuprate's user book](https://user.cuprate.org) | Practical user-guide for using `cuprated` | -For crate (library) documentation, see the `Documentation` section in [`CONTRIBUTING.md`](CONTRIBUTING.md). +For crate (library) documentation, see: https://doc.cuprate.org. This site holds documentation for Cuprate's crates and all dependencies. All Cuprate crates start with `cuprate_`, for example: [`cuprate_database`](https://doc.cuprate.org/cuprate_database). ## Contributing From 71131a4836c107ead37305da167ca54ef86081c7 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 4 Jul 2024 10:52:51 -0400 Subject: [PATCH 003/104] ci: fix `doc.yml` (#213) * types: remove borsh/serde * blockchain: re-add optional serde * doc.yml: remove `-D warnings` --- .github/workflows/doc.yml | 4 ++-- Cargo.lock | 1 + storage/blockchain/Cargo.toml | 1 + types/src/blockchain.rs | 5 ----- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/doc.yml b/.github/workflows/doc.yml index 340ec59..8ed932a 100644 --- a/.github/workflows/doc.yml +++ b/.github/workflows/doc.yml @@ -10,8 +10,8 @@ on: env: # Show colored output in CI. CARGO_TERM_COLOR: always - # Fail on documentation warnings, generate an index page. - RUSTDOCFLAGS: '-D warnings --cfg docsrs --show-type-layout --enable-index-page -Zunstable-options' + # Generate an index page. + RUSTDOCFLAGS: '--cfg docsrs --show-type-layout --enable-index-page -Zunstable-options' jobs: # Build documentation. diff --git a/Cargo.lock b/Cargo.lock index d862821..809bdf2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -508,6 +508,7 @@ dependencies = [ "pretty_assertions", "proptest", "rayon", + "serde", "tempfile", "thread_local", "tokio", diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 8a88214..fe242bc 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -31,6 +31,7 @@ curve25519-dalek = { workspace = true } cuprate-pruning = { path = "../../pruning" } monero-serai = { workspace = true, features = ["std"] } paste = { workspace = true } +serde = { workspace = true, optional = true } # `service` feature. crossbeam = { workspace = true, features = ["std"], optional = true } diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index 5a09ca3..4a280be 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -9,11 +9,6 @@ use std::{ ops::Range, }; -#[cfg(feature = "borsh")] -use borsh::{BorshDeserialize, BorshSerialize}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - use crate::types::{ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}; //---------------------------------------------------------------------------------------------------- ReadRequest From d5c8eba1d850a944b23dd8b1973ccc9b1654de60 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Thu, 4 Jul 2024 20:05:22 +0000 Subject: [PATCH 004/104] P2P: API Improvements (#168) * start handshaker builder * finish builder * remove borsh requirement * fix poll_ready :/ * add more docs to builder * more docs * fix clippy * merge fixes * fix doc test * fix imports * cargo fmt * split `PeerRequest` and `PeerResponse` enums up. This splits them both into a protocol and admin enum * add request handler file * integrate request handler into connection * fix docs * doc updates * add more docs * fix docs * misc changes * review fixes * fix merge * add dummy file * fix docs * Update p2p/dandelion-tower/src/router.rs * fix docs --- Cargo.lock | 3 + net/wire/src/lib.rs | 4 +- net/wire/src/p2p.rs | 74 +++-- net/wire/src/p2p/protocol.rs | 2 +- p2p/address-book/src/book.rs | 11 +- p2p/address-book/src/book/tests.rs | 14 +- p2p/address-book/src/lib.rs | 23 +- p2p/address-book/src/store.rs | 8 +- p2p/dandelion-tower/src/config.rs | 1 - p2p/dandelion-tower/src/pool.rs | 1 - p2p/dandelion-tower/src/router.rs | 1 - p2p/p2p-core/Cargo.toml | 2 + p2p/p2p-core/src/client.rs | 6 +- p2p/p2p-core/src/client/connection.rs | 42 +-- p2p/p2p-core/src/client/connector.rs | 29 +- p2p/p2p-core/src/client/handshaker.rs | 194 +++++++----- p2p/p2p-core/src/client/handshaker/builder.rs | 292 ++++++++++++++++++ .../src/client/handshaker/builder/dummy.rs | 151 +++++++++ p2p/p2p-core/src/client/request_handler.rs | 144 +++++++++ p2p/p2p-core/src/client/timeout_monitor.rs | 8 +- p2p/p2p-core/src/handles.rs | 10 +- p2p/p2p-core/src/lib.rs | 167 +++++----- p2p/p2p-core/src/protocol.rs | 111 ++++--- p2p/p2p-core/src/protocol/try_from.rs | 214 +++++-------- p2p/p2p-core/src/services.rs | 13 +- p2p/p2p-core/tests/fragmented_handshake.rs | 43 +-- p2p/p2p-core/tests/handles.rs | 12 +- p2p/p2p-core/tests/handshake.rs | 77 ++--- p2p/p2p-core/tests/sending_receiving.rs | 38 +-- p2p/p2p-core/tests/utils.rs | 110 ------- p2p/p2p/Cargo.toml | 1 + p2p/p2p/src/block_downloader/block_queue.rs | 8 +- .../src/block_downloader/download_batch.rs | 22 +- p2p/p2p/src/block_downloader/request_chain.rs | 20 +- p2p/p2p/src/block_downloader/tests.rs | 51 +-- p2p/p2p/src/broadcast.rs | 8 +- p2p/p2p/src/client_pool.rs | 1 - p2p/p2p/src/connection_maintainer.rs | 14 +- p2p/p2p/src/inbound_server.rs | 4 +- p2p/p2p/src/lib.rs | 43 ++- p2p/p2p/src/sync_states.rs | 15 +- 41 files changed, 1209 insertions(+), 783 deletions(-) create mode 100644 p2p/p2p-core/src/client/handshaker/builder.rs create mode 100644 p2p/p2p-core/src/client/handshaker/builder/dummy.rs create mode 100644 p2p/p2p-core/src/client/request_handler.rs delete mode 100644 p2p/p2p-core/tests/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 809bdf2..d3f4503 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -688,6 +688,7 @@ dependencies = [ name = "cuprate-p2p" version = "0.1.0" dependencies = [ + "borsh", "bytes", "cuprate-address-book", "cuprate-async-buffer", @@ -728,9 +729,11 @@ dependencies = [ "cuprate-wire", "futures", "hex", + "hex-literal", "thiserror", "tokio", "tokio-stream", + "tokio-test", "tokio-util", "tower", "tracing", diff --git a/net/wire/src/lib.rs b/net/wire/src/lib.rs index 45a2405..674a2e9 100644 --- a/net/wire/src/lib.rs +++ b/net/wire/src/lib.rs @@ -13,10 +13,10 @@ // copies or substantial portions of the Software. // -//! # Monero Wire +//! # Cuprate Wire //! //! A crate defining Monero network messages and network addresses, -//! built on top of the levin-cuprate crate. +//! built on top of the [`cuprate_levin`] crate. //! //! ## License //! diff --git a/net/wire/src/p2p.rs b/net/wire/src/p2p.rs index 0d448e4..9743109 100644 --- a/net/wire/src/p2p.rs +++ b/net/wire/src/p2p.rs @@ -177,6 +177,7 @@ fn build_message( Ok(()) } +#[derive(Debug, Clone)] pub enum ProtocolMessage { NewBlock(NewBlock), NewFluffyBlock(NewFluffyBlock), @@ -255,22 +256,23 @@ impl ProtocolMessage { } } -pub enum RequestMessage { +#[derive(Debug, Clone)] +pub enum AdminRequestMessage { Handshake(HandshakeRequest), Ping, SupportFlags, TimedSync(TimedSyncRequest), } -impl RequestMessage { +impl AdminRequestMessage { pub fn command(&self) -> LevinCommand { use LevinCommand as C; match self { - RequestMessage::Handshake(_) => C::Handshake, - RequestMessage::Ping => C::Ping, - RequestMessage::SupportFlags => C::SupportFlags, - RequestMessage::TimedSync(_) => C::TimedSync, + AdminRequestMessage::Handshake(_) => C::Handshake, + AdminRequestMessage::Ping => C::Ping, + AdminRequestMessage::SupportFlags => C::SupportFlags, + AdminRequestMessage::TimedSync(_) => C::TimedSync, } } @@ -278,19 +280,19 @@ impl RequestMessage { use LevinCommand as C; Ok(match command { - C::Handshake => decode_message(RequestMessage::Handshake, buf)?, - C::TimedSync => decode_message(RequestMessage::TimedSync, buf)?, + C::Handshake => decode_message(AdminRequestMessage::Handshake, buf)?, + C::TimedSync => decode_message(AdminRequestMessage::TimedSync, buf)?, C::Ping => { cuprate_epee_encoding::from_bytes::(buf) .map_err(|e| BucketError::BodyDecodingError(e.into()))?; - RequestMessage::Ping + AdminRequestMessage::Ping } C::SupportFlags => { cuprate_epee_encoding::from_bytes::(buf) .map_err(|e| BucketError::BodyDecodingError(e.into()))?; - RequestMessage::SupportFlags + AdminRequestMessage::SupportFlags } _ => return Err(BucketError::UnknownCommand), }) @@ -300,31 +302,34 @@ impl RequestMessage { use LevinCommand as C; match self { - RequestMessage::Handshake(val) => build_message(C::Handshake, val, builder)?, - RequestMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?, - RequestMessage::Ping => build_message(C::Ping, EmptyMessage, builder)?, - RequestMessage::SupportFlags => build_message(C::SupportFlags, EmptyMessage, builder)?, + AdminRequestMessage::Handshake(val) => build_message(C::Handshake, val, builder)?, + AdminRequestMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?, + AdminRequestMessage::Ping => build_message(C::Ping, EmptyMessage, builder)?, + AdminRequestMessage::SupportFlags => { + build_message(C::SupportFlags, EmptyMessage, builder)? + } } Ok(()) } } -pub enum ResponseMessage { +#[derive(Debug, Clone)] +pub enum AdminResponseMessage { Handshake(HandshakeResponse), Ping(PingResponse), SupportFlags(SupportFlagsResponse), TimedSync(TimedSyncResponse), } -impl ResponseMessage { +impl AdminResponseMessage { pub fn command(&self) -> LevinCommand { use LevinCommand as C; match self { - ResponseMessage::Handshake(_) => C::Handshake, - ResponseMessage::Ping(_) => C::Ping, - ResponseMessage::SupportFlags(_) => C::SupportFlags, - ResponseMessage::TimedSync(_) => C::TimedSync, + AdminResponseMessage::Handshake(_) => C::Handshake, + AdminResponseMessage::Ping(_) => C::Ping, + AdminResponseMessage::SupportFlags(_) => C::SupportFlags, + AdminResponseMessage::TimedSync(_) => C::TimedSync, } } @@ -332,10 +337,10 @@ impl ResponseMessage { use LevinCommand as C; Ok(match command { - C::Handshake => decode_message(ResponseMessage::Handshake, buf)?, - C::TimedSync => decode_message(ResponseMessage::TimedSync, buf)?, - C::Ping => decode_message(ResponseMessage::Ping, buf)?, - C::SupportFlags => decode_message(ResponseMessage::SupportFlags, buf)?, + C::Handshake => decode_message(AdminResponseMessage::Handshake, buf)?, + C::TimedSync => decode_message(AdminResponseMessage::TimedSync, buf)?, + C::Ping => decode_message(AdminResponseMessage::Ping, buf)?, + C::SupportFlags => decode_message(AdminResponseMessage::SupportFlags, buf)?, _ => return Err(BucketError::UnknownCommand), }) } @@ -344,18 +349,21 @@ impl ResponseMessage { use LevinCommand as C; match self { - ResponseMessage::Handshake(val) => build_message(C::Handshake, val, builder)?, - ResponseMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?, - ResponseMessage::Ping(val) => build_message(C::Ping, val, builder)?, - ResponseMessage::SupportFlags(val) => build_message(C::SupportFlags, val, builder)?, + AdminResponseMessage::Handshake(val) => build_message(C::Handshake, val, builder)?, + AdminResponseMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?, + AdminResponseMessage::Ping(val) => build_message(C::Ping, val, builder)?, + AdminResponseMessage::SupportFlags(val) => { + build_message(C::SupportFlags, val, builder)? + } } Ok(()) } } +#[derive(Debug, Clone)] pub enum Message { - Request(RequestMessage), - Response(ResponseMessage), + Request(AdminRequestMessage), + Response(AdminResponseMessage), Protocol(ProtocolMessage), } @@ -390,8 +398,10 @@ impl LevinBody for Message { command: LevinCommand, ) -> Result { Ok(match typ { - MessageType::Request => Message::Request(RequestMessage::decode(body, command)?), - MessageType::Response => Message::Response(ResponseMessage::decode(body, command)?), + MessageType::Request => Message::Request(AdminRequestMessage::decode(body, command)?), + MessageType::Response => { + Message::Response(AdminResponseMessage::decode(body, command)?) + } MessageType::Notification => Message::Protocol(ProtocolMessage::decode(body, command)?), }) } diff --git a/net/wire/src/p2p/protocol.rs b/net/wire/src/p2p/protocol.rs index 5e95a4f..a385099 100644 --- a/net/wire/src/p2p/protocol.rs +++ b/net/wire/src/p2p/protocol.rs @@ -61,7 +61,7 @@ epee_object!( /// A Request For Blocks #[derive(Debug, Clone, PartialEq, Eq)] pub struct GetObjectsRequest { - /// Block hashes we want + /// Block hashes wanted. pub blocks: ByteArrayVec<32>, /// Pruned pub pruned: bool, diff --git a/p2p/address-book/src/book.rs b/p2p/address-book/src/book.rs index b6ab07a..4b5a1e7 100644 --- a/p2p/address-book/src/book.rs +++ b/p2p/address-book/src/book.rs @@ -27,7 +27,10 @@ use cuprate_p2p_core::{ }; use cuprate_pruning::PruningSeed; -use crate::{peer_list::PeerList, store::save_peers_to_disk, AddressBookConfig, AddressBookError}; +use crate::{ + peer_list::PeerList, store::save_peers_to_disk, AddressBookConfig, AddressBookError, + BorshNetworkZone, +}; #[cfg(test)] mod tests; @@ -45,7 +48,7 @@ pub struct ConnectionPeerEntry { rpc_credits_per_hash: u32, } -pub struct AddressBook { +pub struct AddressBook { /// Our white peers - the peers we have previously connected to. white_list: PeerList, /// Our gray peers - the peers we have been told about but haven't connected to. @@ -66,7 +69,7 @@ pub struct AddressBook { cfg: AddressBookConfig, } -impl AddressBook { +impl AddressBook { pub fn new( cfg: AddressBookConfig, white_peers: Vec>, @@ -351,7 +354,7 @@ impl AddressBook { } } -impl Service> for AddressBook { +impl Service> for AddressBook { type Response = AddressBookResponse; type Error = AddressBookError; type Future = Ready>; diff --git a/p2p/address-book/src/book/tests.rs b/p2p/address-book/src/book/tests.rs index 11f3186..1abea04 100644 --- a/p2p/address-book/src/book/tests.rs +++ b/p2p/address-book/src/book/tests.rs @@ -1,7 +1,7 @@ -use std::{path::PathBuf, sync::Arc, time::Duration}; +use std::{path::PathBuf, time::Duration}; use futures::StreamExt; -use tokio::{sync::Semaphore, time::interval}; +use tokio::time::interval; use cuprate_p2p_core::handles::HandleBuilder; use cuprate_pruning::PruningSeed; @@ -78,11 +78,7 @@ async fn get_white_peers() { async fn add_new_peer_already_connected() { let mut address_book = make_fake_address_book(0, 0); - let semaphore = Arc::new(Semaphore::new(10)); - - let (_, handle) = HandleBuilder::default() - .with_permit(semaphore.clone().try_acquire_owned().unwrap()) - .build(); + let (_, handle) = HandleBuilder::default().build(); address_book .handle_new_connection( @@ -98,9 +94,7 @@ async fn add_new_peer_already_connected() { ) .unwrap(); - let (_, handle) = HandleBuilder::default() - .with_permit(semaphore.try_acquire_owned().unwrap()) - .build(); + let (_, handle) = HandleBuilder::default().build(); assert_eq!( address_book.handle_new_connection( diff --git a/p2p/address-book/src/lib.rs b/p2p/address-book/src/lib.rs index 1ce659f..c090348 100644 --- a/p2p/address-book/src/lib.rs +++ b/p2p/address-book/src/lib.rs @@ -10,10 +10,9 @@ //! clear net peers getting linked to their dark counterparts //! and so peers will only get told about peers they can //! connect to. -//! use std::{io::ErrorKind, path::PathBuf, time::Duration}; -use cuprate_p2p_core::NetworkZone; +use cuprate_p2p_core::{NetZoneAddress, NetworkZone}; mod book; mod peer_list; @@ -61,7 +60,7 @@ pub enum AddressBookError { } /// Initializes the P2P address book for a specific network zone. -pub async fn init_address_book( +pub async fn init_address_book( cfg: AddressBookConfig, ) -> Result, std::io::Error> { tracing::info!( @@ -82,3 +81,21 @@ pub async fn init_address_book( Ok(address_book) } + +use sealed::BorshNetworkZone; +mod sealed { + use super::*; + + /// An internal trait for the address book for a [`NetworkZone`] that adds the requirement of [`borsh`] traits + /// onto the network address. + pub trait BorshNetworkZone: NetworkZone { + type BorshAddr: NetZoneAddress + borsh::BorshDeserialize + borsh::BorshSerialize; + } + + impl BorshNetworkZone for T + where + T::Addr: borsh::BorshDeserialize + borsh::BorshSerialize, + { + type BorshAddr = T::Addr; + } +} diff --git a/p2p/address-book/src/store.rs b/p2p/address-book/src/store.rs index 94b0ec2..abc42d6 100644 --- a/p2p/address-book/src/store.rs +++ b/p2p/address-book/src/store.rs @@ -3,9 +3,9 @@ use std::fs; use borsh::{from_slice, to_vec, BorshDeserialize, BorshSerialize}; use tokio::task::{spawn_blocking, JoinHandle}; -use cuprate_p2p_core::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress, NetworkZone}; +use cuprate_p2p_core::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress}; -use crate::{peer_list::PeerList, AddressBookConfig}; +use crate::{peer_list::PeerList, AddressBookConfig, BorshNetworkZone}; // TODO: store anchor and ban list. @@ -21,7 +21,7 @@ struct DeserPeerDataV1 { gray_list: Vec>, } -pub fn save_peers_to_disk( +pub fn save_peers_to_disk( cfg: &AddressBookConfig, white_list: &PeerList, gray_list: &PeerList, @@ -38,7 +38,7 @@ pub fn save_peers_to_disk( spawn_blocking(move || fs::write(&file, &data)) } -pub async fn read_peers_from_disk( +pub async fn read_peers_from_disk( cfg: &AddressBookConfig, ) -> Result< ( diff --git a/p2p/dandelion-tower/src/config.rs b/p2p/dandelion-tower/src/config.rs index 71a4e5b..6266d60 100644 --- a/p2p/dandelion-tower/src/config.rs +++ b/p2p/dandelion-tower/src/config.rs @@ -42,7 +42,6 @@ pub enum Graph { /// `(-k*(k-1)*hop)/(2*log(1-ep))` /// /// Where `k` is calculated from the fluff probability, `hop` is `time_between_hop` and `ep` is fixed at `0.1`. -/// #[derive(Debug, Clone, Copy)] pub struct DandelionConfig { /// The time it takes for a stem transaction to pass through a node, including network latency. diff --git a/p2p/dandelion-tower/src/pool.rs b/p2p/dandelion-tower/src/pool.rs index eddcc67..68f7945 100644 --- a/p2p/dandelion-tower/src/pool.rs +++ b/p2p/dandelion-tower/src/pool.rs @@ -16,7 +16,6 @@ //! //! When using your handle to the backing store it must be remembered to keep transactions in the stem pool hidden. //! So handle any requests to the tx-pool like the stem side of the pool does not exist. -//! use std::{ collections::{HashMap, HashSet}, future::Future, diff --git a/p2p/dandelion-tower/src/router.rs b/p2p/dandelion-tower/src/router.rs index a64819a..c118c0b 100644 --- a/p2p/dandelion-tower/src/router.rs +++ b/p2p/dandelion-tower/src/router.rs @@ -7,7 +7,6 @@ //! //! It does not handle anything to do with keeping transactions long term, i.e. embargo timers and handling //! loops in the stem. It is up to implementers to do this if they decide not to use [`DandelionPool`](crate::pool::DandelionPool) -//! use std::{ collections::HashMap, hash::Hash, diff --git a/p2p/p2p-core/Cargo.toml b/p2p/p2p-core/Cargo.toml index f434d51..9ef8e24 100644 --- a/p2p/p2p-core/Cargo.toml +++ b/p2p/p2p-core/Cargo.toml @@ -23,6 +23,7 @@ tower = { workspace = true, features = ["util", "tracing"] } thiserror = { workspace = true } tracing = { workspace = true, features = ["std", "attributes"] } +hex-literal = { workspace = true } borsh = { workspace = true, features = ["derive", "std"], optional = true } @@ -31,4 +32,5 @@ cuprate-test-utils = {path = "../../test-utils"} hex = { workspace = true, features = ["std"] } tokio = { workspace = true, features = ["net", "rt-multi-thread", "rt", "macros"]} +tokio-test = { workspace = true } tracing-subscriber = { workspace = true } diff --git a/p2p/p2p-core/src/client.rs b/p2p/p2p-core/src/client.rs index 0e81d96..662a8ee 100644 --- a/p2p/p2p-core/src/client.rs +++ b/p2p/p2p-core/src/client.rs @@ -24,10 +24,11 @@ use crate::{ mod connection; mod connector; pub mod handshaker; +mod request_handler; mod timeout_monitor; pub use connector::{ConnectRequest, Connector}; -pub use handshaker::{DoHandshakeRequest, HandShaker, HandshakeError}; +pub use handshaker::{DoHandshakeRequest, HandshakeError, HandshakerBuilder}; /// An internal identifier for a given peer, will be their address if known /// or a random u128 if not. @@ -188,7 +189,8 @@ pub fn mock_client( mut request_handler: S, ) -> Client where - S: crate::PeerRequestHandler, + S: Service + Send + 'static, + S::Future: Send + 'static, { let (tx, mut rx) = mpsc::channel(1); diff --git a/p2p/p2p-core/src/client/connection.rs b/p2p/p2p-core/src/client/connection.rs index 341d8c0..f3f3f6b 100644 --- a/p2p/p2p-core/src/client/connection.rs +++ b/p2p/p2p-core/src/client/connection.rs @@ -2,7 +2,6 @@ //! //! This module handles routing requests from a [`Client`](crate::client::Client) or a broadcast channel to //! a peer. This module also handles routing requests from the connected peer to a request handler. -//! use std::pin::Pin; use futures::{ @@ -15,15 +14,15 @@ use tokio::{ time::{sleep, timeout, Sleep}, }; use tokio_stream::wrappers::ReceiverStream; -use tower::ServiceExt; use cuprate_wire::{LevinCommand, Message, ProtocolMessage}; +use crate::client::request_handler::PeerRequestHandler; use crate::{ constants::{REQUEST_TIMEOUT, SENDING_TIMEOUT}, handles::ConnectionGuard, - BroadcastMessage, MessageID, NetworkZone, PeerError, PeerRequest, PeerRequestHandler, - PeerResponse, SharedError, + AddressBook, BroadcastMessage, CoreSyncSvc, MessageID, NetworkZone, PeerError, PeerRequest, + PeerResponse, PeerSyncSvc, ProtocolRequestHandler, ProtocolResponse, SharedError, }; /// A request to the connection task from a [`Client`](crate::client::Client). @@ -72,7 +71,7 @@ fn levin_command_response(message_id: &MessageID, command: LevinCommand) -> bool } /// This represents a connection to a peer. -pub struct Connection { +pub struct Connection { /// The peer sink - where we send messages to the peer. peer_sink: Z::Sink, @@ -87,7 +86,7 @@ pub struct Connection { broadcast_stream: Pin>, /// The inner handler for any requests that come from the requested peer. - peer_request_handler: ReqHndlr, + peer_request_handler: PeerRequestHandler, /// The connection guard which will send signals to other parts of Cuprate when this connection is dropped. connection_guard: ConnectionGuard, @@ -95,9 +94,13 @@ pub struct Connection { error: SharedError, } -impl Connection +impl Connection where - ReqHndlr: PeerRequestHandler, + Z: NetworkZone, + A: AddressBook, + CS: CoreSyncSvc, + PS: PeerSyncSvc, + PR: ProtocolRequestHandler, BrdcstStrm: Stream + Send + 'static, { /// Create a new connection struct. @@ -105,10 +108,10 @@ where peer_sink: Z::Sink, client_rx: mpsc::Receiver, broadcast_stream: BrdcstStrm, - peer_request_handler: ReqHndlr, + peer_request_handler: PeerRequestHandler, connection_guard: ConnectionGuard, error: SharedError, - ) -> Connection { + ) -> Connection { Connection { peer_sink, state: State::WaitingForRequest, @@ -175,7 +178,9 @@ where return Err(e); } else { // We still need to respond even if the response is this. - let _ = req.response_channel.send(Ok(PeerResponse::NA)); + let _ = req + .response_channel + .send(Ok(PeerResponse::Protocol(ProtocolResponse::NA))); } Ok(()) @@ -185,17 +190,14 @@ where async fn handle_peer_request(&mut self, req: PeerRequest) -> Result<(), PeerError> { tracing::debug!("Received peer request: {:?}", req.id()); - let ready_svc = self.peer_request_handler.ready().await?; - let res = ready_svc.call(req).await?; - if matches!(res, PeerResponse::NA) { - return Ok(()); + let res = self.peer_request_handler.handle_peer_request(req).await?; + + // This will be an error if a response does not need to be sent + if let Ok(res) = res.try_into() { + self.send_message_to_peer(res).await?; } - self.send_message_to_peer( - res.try_into() - .expect("We just checked if the response was `NA`"), - ) - .await + Ok(()) } /// Handles a message from a peer when we are in [`State::WaitingForResponse`]. diff --git a/p2p/p2p-core/src/client/connector.rs b/p2p/p2p-core/src/client/connector.rs index 278d740..d937165 100644 --- a/p2p/p2p-core/src/client/connector.rs +++ b/p2p/p2p-core/src/client/connector.rs @@ -4,7 +4,6 @@ //! perform a handshake and create a [`Client`]. //! //! This is where outbound connections are created. -//! use std::{ future::Future, pin::Pin, @@ -16,9 +15,9 @@ use tokio::sync::OwnedSemaphorePermit; use tower::{Service, ServiceExt}; use crate::{ - client::{Client, DoHandshakeRequest, HandShaker, HandshakeError, InternalPeerID}, - AddressBook, BroadcastMessage, ConnectionDirection, CoreSyncSvc, NetworkZone, - PeerRequestHandler, PeerSyncSvc, + client::{handshaker::HandShaker, Client, DoHandshakeRequest, HandshakeError, InternalPeerID}, + AddressBook, BroadcastMessage, ConnectionDirection, CoreSyncSvc, NetworkZone, PeerSyncSvc, + ProtocolRequestHandler, }; /// A request to connect to a peer. @@ -27,30 +26,32 @@ pub struct ConnectRequest { pub addr: Z::Addr, /// A permit which will be held be the connection allowing you to set limits on the number of /// connections. - pub permit: OwnedSemaphorePermit, + /// + /// This doesn't have to be set. + pub permit: Option, } /// The connector service, this service connects to peer and returns the [`Client`]. -pub struct Connector { - handshaker: HandShaker, +pub struct Connector { + handshaker: HandShaker, } -impl - Connector +impl + Connector { /// Create a new connector from a handshaker. - pub fn new(handshaker: HandShaker) -> Self { + pub fn new(handshaker: HandShaker) -> Self { Self { handshaker } } } -impl - Service> for Connector +impl + Service> for Connector where AdrBook: AddressBook + Clone, CSync: CoreSyncSvc + Clone, PSync: PeerSyncSvc + Clone, - ReqHdlr: PeerRequestHandler + Clone, + ProtoHdlr: ProtocolRequestHandler + Clone, BrdcstStrm: Stream + Send + 'static, BrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Clone + Send + 'static, { @@ -74,7 +75,7 @@ where permit: req.permit, peer_stream, peer_sink, - direction: ConnectionDirection::OutBound, + direction: ConnectionDirection::Outbound, }; handshaker.ready().await?.call(req).await } diff --git a/p2p/p2p-core/src/client/handshaker.rs b/p2p/p2p-core/src/client/handshaker.rs index 1071b33..67a58d4 100644 --- a/p2p/p2p-core/src/client/handshaker.rs +++ b/p2p/p2p-core/src/client/handshaker.rs @@ -18,7 +18,7 @@ use tokio::{ time::{error::Elapsed, timeout}, }; use tower::{Service, ServiceExt}; -use tracing::{info_span, Instrument}; +use tracing::{info_span, Instrument, Span}; use cuprate_pruning::{PruningError, PruningSeed}; use cuprate_wire::{ @@ -27,13 +27,13 @@ use cuprate_wire::{ PING_OK_RESPONSE_STATUS_TEXT, }, common::PeerSupportFlags, - BasicNodeData, BucketError, LevinCommand, Message, RequestMessage, ResponseMessage, + AdminRequestMessage, AdminResponseMessage, BasicNodeData, BucketError, LevinCommand, Message, }; use crate::{ client::{ - connection::Connection, timeout_monitor::connection_timeout_monitor_task, Client, - InternalPeerID, PeerInformation, + connection::Connection, request_handler::PeerRequestHandler, + timeout_monitor::connection_timeout_monitor_task, Client, InternalPeerID, PeerInformation, }, constants::{ HANDSHAKE_TIMEOUT, MAX_EAGER_PROTOCOL_MESSAGES, MAX_PEERS_IN_PEER_LIST_MESSAGE, @@ -43,9 +43,12 @@ use crate::{ services::PeerSyncRequest, AddressBook, AddressBookRequest, AddressBookResponse, BroadcastMessage, ConnectionDirection, CoreSyncDataRequest, CoreSyncDataResponse, CoreSyncSvc, NetZoneAddress, NetworkZone, - PeerRequestHandler, PeerSyncSvc, SharedError, + PeerSyncSvc, ProtocolRequestHandler, SharedError, }; +pub mod builder; +pub use builder::HandshakerBuilder; + #[derive(Debug, thiserror::Error)] pub enum HandshakeError { #[error("The handshake timed out")] @@ -78,21 +81,21 @@ pub struct DoHandshakeRequest { pub peer_sink: Z::Sink, /// The direction of the connection. pub direction: ConnectionDirection, - /// A permit for this connection. - pub permit: OwnedSemaphorePermit, + /// An [`Option`]al permit for this connection. + pub permit: Option, } /// The peer handshaking service. #[derive(Debug, Clone)] -pub struct HandShaker { +pub struct HandShaker { /// The address book service. address_book: AdrBook, /// The core sync data service. core_sync_svc: CSync, /// The peer sync service. peer_sync_svc: PSync, - /// The peer request handler service. - peer_request_svc: ReqHdlr, + /// The protocol request handler service. + protocol_request_svc: ProtoHdlr, /// Our [`BasicNodeData`] our_basic_node_data: BasicNodeData, @@ -100,42 +103,46 @@ pub struct HandShaker, } -impl - HandShaker +impl + HandShaker { /// Creates a new handshaker. - pub fn new( + fn new( address_book: AdrBook, peer_sync_svc: PSync, core_sync_svc: CSync, - peer_request_svc: ReqHdlr, + protocol_request_svc: ProtoHdlr, broadcast_stream_maker: BrdcstStrmMkr, - our_basic_node_data: BasicNodeData, + connection_parent_span: Span, ) -> Self { Self { address_book, peer_sync_svc, core_sync_svc, - peer_request_svc, + protocol_request_svc, broadcast_stream_maker, our_basic_node_data, + connection_parent_span, _zone: PhantomData, } } } -impl - Service> for HandShaker +impl + Service> + for HandShaker where AdrBook: AddressBook + Clone, CSync: CoreSyncSvc + Clone, PSync: PeerSyncSvc + Clone, - ReqHdlr: PeerRequestHandler + Clone, + ProtoHdlr: ProtocolRequestHandler + Clone, BrdcstStrm: Stream + Send + 'static, BrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Clone + Send + 'static, { @@ -152,12 +159,14 @@ where let broadcast_stream_maker = self.broadcast_stream_maker.clone(); let address_book = self.address_book.clone(); - let peer_request_svc = self.peer_request_svc.clone(); + let protocol_request_svc = self.protocol_request_svc.clone(); let core_sync_svc = self.core_sync_svc.clone(); let peer_sync_svc = self.peer_sync_svc.clone(); let our_basic_node_data = self.our_basic_node_data.clone(); - let span = info_span!(parent: &tracing::Span::current(), "handshaker", addr=%req.addr); + let connection_parent_span = self.connection_parent_span.clone(); + + let span = info_span!(parent: &Span::current(), "handshaker", addr=%req.addr); async move { timeout( @@ -168,8 +177,9 @@ where address_book, core_sync_svc, peer_sync_svc, - peer_request_svc, + protocol_request_svc, our_basic_node_data, + connection_parent_span, ), ) .await? @@ -190,11 +200,11 @@ pub async fn ping(addr: N::Addr) -> Result tracing::debug!("Made outbound connection to peer, sending ping."); peer_sink - .send(Message::Request(RequestMessage::Ping).into()) + .send(Message::Request(AdminRequestMessage::Ping).into()) .await?; if let Some(res) = peer_stream.next().await { - if let Message::Response(ResponseMessage::Ping(ping)) = res? { + if let Message::Response(AdminResponseMessage::Ping(ping)) = res? { if ping.status == PING_OK_RESPONSE_STATUS_TEXT { tracing::debug!("Ping successful."); return Ok(ping.peer_id); @@ -220,7 +230,8 @@ pub async fn ping(addr: N::Addr) -> Result } /// This function completes a handshake with the requested peer. -async fn handshake( +#[allow(clippy::too_many_arguments)] +async fn handshake( req: DoHandshakeRequest, broadcast_stream_maker: BrdcstStrmMkr, @@ -228,14 +239,15 @@ async fn handshake Result, HandshakeError> where - AdrBook: AddressBook, - CSync: CoreSyncSvc, - PSync: PeerSyncSvc, - ReqHdlr: PeerRequestHandler, + AdrBook: AddressBook + Clone, + CSync: CoreSyncSvc + Clone, + PSync: PeerSyncSvc + Clone, + ProtoHdlr: ProtocolRequestHandler, BrdcstStrm: Stream + Send + 'static, BrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Send + 'static, { @@ -252,19 +264,20 @@ where let mut eager_protocol_messages = Vec::new(); let (peer_core_sync, peer_node_data) = match direction { - ConnectionDirection::InBound => { + ConnectionDirection::Inbound => { // Inbound handshake the peer sends the request. tracing::debug!("waiting for handshake request."); - let Message::Request(RequestMessage::Handshake(handshake_req)) = wait_for_message::( - LevinCommand::Handshake, - true, - &mut peer_sink, - &mut peer_stream, - &mut eager_protocol_messages, - &our_basic_node_data, - ) - .await? + let Message::Request(AdminRequestMessage::Handshake(handshake_req)) = + wait_for_message::( + LevinCommand::Handshake, + true, + &mut peer_sink, + &mut peer_stream, + &mut eager_protocol_messages, + &our_basic_node_data, + ) + .await? else { panic!("wait_for_message returned ok with wrong message."); }; @@ -273,7 +286,7 @@ where // We will respond to the handshake request later. (handshake_req.payload_data, handshake_req.node_data) } - ConnectionDirection::OutBound => { + ConnectionDirection::Outbound => { // Outbound handshake, we send the request. send_hs_request::( &mut peer_sink, @@ -283,7 +296,7 @@ where .await?; // Wait for the handshake response. - let Message::Response(ResponseMessage::Handshake(handshake_res)) = + let Message::Response(AdminResponseMessage::Handshake(handshake_res)) = wait_for_message::( LevinCommand::Handshake, false, @@ -373,13 +386,13 @@ where // public_address, if Some, is the reachable address of the node. let public_address = 'check_out_addr: { match direction { - ConnectionDirection::InBound => { + ConnectionDirection::Inbound => { // First send the handshake response. send_hs_response::( &mut peer_sink, &mut core_sync_svc, &mut address_book, - our_basic_node_data, + our_basic_node_data.clone(), ) .await?; @@ -411,7 +424,7 @@ where // The peer did not specify a reachable port or the ping was not successful. None } - ConnectionDirection::OutBound => { + ConnectionDirection::Outbound => { let InternalPeerID::KnownAddr(outbound_addr) = addr else { unreachable!("How could we make an outbound connection to an unknown address"); }; @@ -424,37 +437,7 @@ where tracing::debug!("Handshake complete."); - // Set up the connection data. - let error_slot = SharedError::new(); let (connection_guard, handle) = HandleBuilder::new().with_permit(permit).build(); - let (connection_tx, client_rx) = mpsc::channel(1); - - let connection = Connection::::new( - peer_sink, - client_rx, - broadcast_stream_maker(addr), - peer_request_svc, - connection_guard, - error_slot.clone(), - ); - - let connection_span = tracing::error_span!(parent: &tracing::Span::none(), "connection", %addr); - let connection_handle = tokio::spawn( - connection - .run(peer_stream.fuse(), eager_protocol_messages) - .instrument(connection_span), - ); - - // Tell the core sync service about the new peer. - peer_sync_svc - .ready() - .await? - .call(PeerSyncRequest::IncomingCoreSyncData( - addr, - handle.clone(), - peer_core_sync, - )) - .await?; // Tell the address book about the new connection. address_book @@ -471,6 +454,21 @@ where }) .await?; + // Tell the core sync service about the new peer. + peer_sync_svc + .ready() + .await? + .call(PeerSyncRequest::IncomingCoreSyncData( + addr, + handle.clone(), + peer_core_sync, + )) + .await?; + + // Set up the connection data. + let error_slot = SharedError::new(); + let (connection_tx, client_rx) = mpsc::channel(1); + let info = PeerInformation { id: addr, handle, @@ -478,6 +476,32 @@ where pruning_seed, }; + let request_handler = PeerRequestHandler { + address_book_svc: address_book.clone(), + our_sync_svc: core_sync_svc.clone(), + peer_sync_svc: peer_sync_svc.clone(), + protocol_request_handler, + our_basic_node_data, + peer_info: info.clone(), + }; + + let connection = Connection::::new( + peer_sink, + client_rx, + broadcast_stream_maker(addr), + request_handler, + connection_guard, + error_slot.clone(), + ); + + let connection_span = + tracing::error_span!(parent: &connection_parent_span, "connection", %addr); + let connection_handle = tokio::spawn( + connection + .run(peer_stream.fuse(), eager_protocol_messages) + .instrument(connection_span), + ); + let semaphore = Arc::new(Semaphore::new(1)); let timeout_handle = tokio::spawn(connection_timeout_monitor_task( @@ -502,7 +526,7 @@ where Ok(client) } -/// Sends a [`RequestMessage::Handshake`] down the peer sink. +/// Sends a [`AdminRequestMessage::Handshake`] down the peer sink. async fn send_hs_request( peer_sink: &mut Z::Sink, core_sync_svc: &mut CSync, @@ -525,13 +549,13 @@ where tracing::debug!("Sending handshake request."); peer_sink - .send(Message::Request(RequestMessage::Handshake(req)).into()) + .send(Message::Request(AdminRequestMessage::Handshake(req)).into()) .await?; Ok(()) } -/// Sends a [`ResponseMessage::Handshake`] down the peer sink. +/// Sends a [`AdminResponseMessage::Handshake`] down the peer sink. async fn send_hs_response( peer_sink: &mut Z::Sink, core_sync_svc: &mut CSync, @@ -568,7 +592,7 @@ where tracing::debug!("Sending handshake response."); peer_sink - .send(Message::Response(ResponseMessage::Handshake(res)).into()) + .send(Message::Response(AdminResponseMessage::Handshake(res)).into()) .await?; Ok(()) @@ -619,7 +643,7 @@ async fn wait_for_message( } match req_message { - RequestMessage::SupportFlags => { + AdminRequestMessage::SupportFlags => { if !allow_support_flag_req { return Err(HandshakeError::PeerSentInvalidMessage( "Peer sent 2 support flag requests", @@ -631,7 +655,7 @@ async fn wait_for_message( allow_support_flag_req = false; continue; } - RequestMessage::Ping => { + AdminRequestMessage::Ping => { if !allow_ping { return Err(HandshakeError::PeerSentInvalidMessage( "Peer sent 2 ping requests", @@ -674,7 +698,7 @@ async fn wait_for_message( )))? } -/// Sends a [`ResponseMessage::SupportFlags`] down the peer sink. +/// Sends a [`AdminResponseMessage::SupportFlags`] down the peer sink. async fn send_support_flags( peer_sink: &mut Z::Sink, support_flags: PeerSupportFlags, @@ -682,7 +706,7 @@ async fn send_support_flags( tracing::debug!("Sending support flag response."); Ok(peer_sink .send( - Message::Response(ResponseMessage::SupportFlags(SupportFlagsResponse { + Message::Response(AdminResponseMessage::SupportFlags(SupportFlagsResponse { support_flags, })) .into(), @@ -690,7 +714,7 @@ async fn send_support_flags( .await?) } -/// Sends a [`ResponseMessage::Ping`] down the peer sink. +/// Sends a [`AdminResponseMessage::Ping`] down the peer sink. async fn send_ping_response( peer_sink: &mut Z::Sink, peer_id: u64, @@ -698,7 +722,7 @@ async fn send_ping_response( tracing::debug!("Sending ping response."); Ok(peer_sink .send( - Message::Response(ResponseMessage::Ping(PingResponse { + Message::Response(AdminResponseMessage::Ping(PingResponse { status: PING_OK_RESPONSE_STATUS_TEXT, peer_id, })) diff --git a/p2p/p2p-core/src/client/handshaker/builder.rs b/p2p/p2p-core/src/client/handshaker/builder.rs new file mode 100644 index 0000000..a40f396 --- /dev/null +++ b/p2p/p2p-core/src/client/handshaker/builder.rs @@ -0,0 +1,292 @@ +use std::marker::PhantomData; + +use futures::{stream, Stream}; +use tracing::Span; + +use cuprate_wire::BasicNodeData; + +use crate::{ + client::{handshaker::HandShaker, InternalPeerID}, + AddressBook, BroadcastMessage, CoreSyncSvc, NetworkZone, PeerSyncSvc, ProtocolRequestHandler, +}; + +mod dummy; +pub use dummy::{ + DummyAddressBook, DummyCoreSyncSvc, DummyPeerSyncSvc, DummyProtocolRequestHandler, +}; + +/// A [`HandShaker`] [`Service`](tower::Service) builder. +/// +/// This builder applies default values to make usage easier, behaviour and drawbacks of the defaults are documented +/// on the `with_*` method to change it, for example [`HandshakerBuilder::with_protocol_request_handler`]. +/// +/// If you want to use any network other than [`Mainnet`](crate::Network::Mainnet) +/// you will need to change the core sync service with [`HandshakerBuilder::with_core_sync_svc`], +/// see that method for details. +#[derive(Debug, Clone)] +pub struct HandshakerBuilder< + N: NetworkZone, + AdrBook = DummyAddressBook, + CSync = DummyCoreSyncSvc, + PSync = DummyPeerSyncSvc, + ProtoHdlr = DummyProtocolRequestHandler, + BrdcstStrmMkr = fn( + InternalPeerID<::Addr>, + ) -> stream::Pending, +> { + /// The address book service. + address_book: AdrBook, + /// The core sync data service. + core_sync_svc: CSync, + /// The peer sync service. + peer_sync_svc: PSync, + /// The protocol request service. + protocol_request_svc: ProtoHdlr, + /// Our [`BasicNodeData`] + our_basic_node_data: BasicNodeData, + /// A function that returns a stream that will give items to be broadcast by a connection. + broadcast_stream_maker: BrdcstStrmMkr, + /// The [`Span`] that will set as the parent to the connection [`Span`]. + connection_parent_span: Option, + + /// The network zone. + _zone: PhantomData, +} + +impl HandshakerBuilder { + /// Creates a new builder with our node's basic node data. + pub fn new(our_basic_node_data: BasicNodeData) -> Self { + Self { + address_book: DummyAddressBook, + core_sync_svc: DummyCoreSyncSvc::static_mainnet_genesis(), + peer_sync_svc: DummyPeerSyncSvc, + protocol_request_svc: DummyProtocolRequestHandler, + our_basic_node_data, + broadcast_stream_maker: |_| stream::pending(), + connection_parent_span: None, + _zone: PhantomData, + } + } +} + +impl + HandshakerBuilder +{ + /// Changes the address book to the provided one. + /// + /// ## Default Address Book + /// + /// The default address book is used if this function is not called. + /// + /// The default address book's only drawback is that it does not keep track of peers and therefore + /// bans. + pub fn with_address_book( + self, + new_address_book: NAdrBook, + ) -> HandshakerBuilder + where + NAdrBook: AddressBook + Clone, + { + let HandshakerBuilder { + core_sync_svc, + peer_sync_svc, + protocol_request_svc, + our_basic_node_data, + broadcast_stream_maker, + connection_parent_span, + _zone, + .. + } = self; + + HandshakerBuilder { + address_book: new_address_book, + core_sync_svc, + peer_sync_svc, + protocol_request_svc, + our_basic_node_data, + broadcast_stream_maker, + connection_parent_span, + _zone, + } + } + + /// Changes the core sync service to the provided one. + /// + /// The core sync service should keep track of our nodes core sync data. + /// + /// ## Default Core Sync Service + /// + /// The default core sync service is used if this method is not called. + /// + /// The default core sync service will just use the mainnet genesis block, to use other network's + /// genesis see [`DummyCoreSyncSvc::static_stagenet_genesis`] and [`DummyCoreSyncSvc::static_testnet_genesis`]. + /// The drawbacks to keeping this the default is that it will always return the mainnet genesis as our nodes + /// sync info, which means peers won't know our actual chain height, this may or may not be a problem for + /// different use cases. + pub fn with_core_sync_svc( + self, + new_core_sync_svc: NCSync, + ) -> HandshakerBuilder + where + NCSync: CoreSyncSvc + Clone, + { + let HandshakerBuilder { + address_book, + peer_sync_svc, + protocol_request_svc, + our_basic_node_data, + broadcast_stream_maker, + connection_parent_span, + _zone, + .. + } = self; + + HandshakerBuilder { + address_book, + core_sync_svc: new_core_sync_svc, + peer_sync_svc, + protocol_request_svc, + our_basic_node_data, + broadcast_stream_maker, + connection_parent_span, + _zone, + } + } + + /// Changes the peer sync service, which keeps track of peers sync states. + /// + /// ## Default Peer Sync Service + /// + /// The default peer sync service will be used if this method is not called. + /// + /// The default peer sync service will not keep track of peers sync states. + pub fn with_peer_sync_svc( + self, + new_peer_sync_svc: NPSync, + ) -> HandshakerBuilder + where + NPSync: PeerSyncSvc + Clone, + { + let HandshakerBuilder { + address_book, + core_sync_svc, + protocol_request_svc, + our_basic_node_data, + broadcast_stream_maker, + connection_parent_span, + _zone, + .. + } = self; + + HandshakerBuilder { + address_book, + core_sync_svc, + peer_sync_svc: new_peer_sync_svc, + protocol_request_svc, + our_basic_node_data, + broadcast_stream_maker, + connection_parent_span, + _zone, + } + } + + /// Changes the protocol request handler, which handles [`ProtocolRequest`](crate::ProtocolRequest)s to our node. + /// + /// ## Default Protocol Request Handler + /// + /// The default protocol request handler will not respond to any protocol requests, this should not + /// be an issue as long as peers do not think we are ahead of them, if they do they will send requests + /// for our blocks, and we won't respond which will cause them to disconnect. + pub fn with_protocol_request_handler( + self, + new_protocol_handler: NProtoHdlr, + ) -> HandshakerBuilder + where + NProtoHdlr: ProtocolRequestHandler + Clone, + { + let HandshakerBuilder { + address_book, + core_sync_svc, + peer_sync_svc, + our_basic_node_data, + broadcast_stream_maker, + connection_parent_span, + _zone, + .. + } = self; + + HandshakerBuilder { + address_book, + core_sync_svc, + peer_sync_svc, + protocol_request_svc: new_protocol_handler, + our_basic_node_data, + broadcast_stream_maker, + connection_parent_span, + _zone, + } + } + + /// Changes the broadcast stream maker, which is used to create streams that yield messages to broadcast. + /// + /// ## Default Broadcast Stream Maker + /// + /// The default broadcast stream maker just returns [`stream::Pending`], i.e. the returned stream will not + /// produce any messages to broadcast, this is not a problem if your use case does not require broadcasting + /// messages. + pub fn with_broadcast_stream_maker( + self, + new_broadcast_stream_maker: NBrdcstStrmMkr, + ) -> HandshakerBuilder + where + BrdcstStrm: Stream + Send + 'static, + NBrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Clone + Send + 'static, + { + let HandshakerBuilder { + address_book, + core_sync_svc, + peer_sync_svc, + protocol_request_svc, + our_basic_node_data, + connection_parent_span, + _zone, + .. + } = self; + + HandshakerBuilder { + address_book, + core_sync_svc, + peer_sync_svc, + protocol_request_svc, + our_basic_node_data, + broadcast_stream_maker: new_broadcast_stream_maker, + connection_parent_span, + _zone, + } + } + + /// Changes the parent [`Span`] of the connection task to the one provided. + /// + /// ## Default Connection Parent Span + /// + /// The default connection span will be [`Span::none`]. + pub fn with_connection_parent_span(self, connection_parent_span: Span) -> Self { + Self { + connection_parent_span: Some(connection_parent_span), + ..self + } + } + + /// Builds the [`HandShaker`]. + pub fn build(self) -> HandShaker { + HandShaker::new( + self.address_book, + self.peer_sync_svc, + self.core_sync_svc, + self.protocol_request_svc, + self.broadcast_stream_maker, + self.our_basic_node_data, + self.connection_parent_span.unwrap_or(Span::none()), + ) + } +} diff --git a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs new file mode 100644 index 0000000..ae97cdc --- /dev/null +++ b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs @@ -0,0 +1,151 @@ +use std::{ + future::{ready, Ready}, + task::{Context, Poll}, +}; + +use tower::Service; + +use cuprate_wire::CoreSyncData; + +use crate::{ + services::{ + AddressBookRequest, AddressBookResponse, CoreSyncDataRequest, CoreSyncDataResponse, + PeerSyncRequest, PeerSyncResponse, + }, + NetworkZone, ProtocolRequest, ProtocolResponse, +}; + +/// A dummy peer sync service, that doesn't actually keep track of peers sync states. +#[derive(Debug, Clone)] +pub struct DummyPeerSyncSvc; + +impl Service> for DummyPeerSyncSvc { + type Response = PeerSyncResponse; + type Error = tower::BoxError; + type Future = Ready>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: PeerSyncRequest) -> Self::Future { + ready(Ok(match req { + PeerSyncRequest::PeersToSyncFrom { .. } => PeerSyncResponse::PeersToSyncFrom(vec![]), + PeerSyncRequest::IncomingCoreSyncData(_, _, _) => PeerSyncResponse::Ok, + })) + } +} + +/// A dummy core sync service that just returns static [`CoreSyncData`]. +#[derive(Debug, Clone)] +pub struct DummyCoreSyncSvc(CoreSyncData); + +impl DummyCoreSyncSvc { + /// Returns a [`DummyCoreSyncSvc`] that will just return the mainnet genesis [`CoreSyncData`]. + pub fn static_mainnet_genesis() -> DummyCoreSyncSvc { + DummyCoreSyncSvc(CoreSyncData { + cumulative_difficulty: 1, + cumulative_difficulty_top64: 0, + current_height: 1, + pruning_seed: 0, + top_id: hex_literal::hex!( + "418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3" + ), + top_version: 1, + }) + } + + /// Returns a [`DummyCoreSyncSvc`] that will just return the testnet genesis [`CoreSyncData`]. + pub fn static_testnet_genesis() -> DummyCoreSyncSvc { + DummyCoreSyncSvc(CoreSyncData { + cumulative_difficulty: 1, + cumulative_difficulty_top64: 0, + current_height: 1, + pruning_seed: 0, + top_id: hex_literal::hex!( + "48ca7cd3c8de5b6a4d53d2861fbdaedca141553559f9be9520068053cda8430b" + ), + top_version: 1, + }) + } + + /// Returns a [`DummyCoreSyncSvc`] that will just return the stagenet genesis [`CoreSyncData`]. + pub fn static_stagenet_genesis() -> DummyCoreSyncSvc { + DummyCoreSyncSvc(CoreSyncData { + cumulative_difficulty: 1, + cumulative_difficulty_top64: 0, + current_height: 1, + pruning_seed: 0, + top_id: hex_literal::hex!( + "76ee3cc98646292206cd3e86f74d88b4dcc1d937088645e9b0cbca84b7ce74eb" + ), + top_version: 1, + }) + } + + /// Returns a [`DummyCoreSyncSvc`] that will return the provided [`CoreSyncData`]. + pub fn static_custom(data: CoreSyncData) -> DummyCoreSyncSvc { + DummyCoreSyncSvc(data) + } +} + +impl Service for DummyCoreSyncSvc { + type Response = CoreSyncDataResponse; + type Error = tower::BoxError; + type Future = Ready>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _: CoreSyncDataRequest) -> Self::Future { + ready(Ok(CoreSyncDataResponse(self.0.clone()))) + } +} + +/// A dummy address book that doesn't actually keep track of peers. +#[derive(Debug, Clone)] +pub struct DummyAddressBook; + +impl Service> for DummyAddressBook { + type Response = AddressBookResponse; + type Error = tower::BoxError; + type Future = Ready>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: AddressBookRequest) -> Self::Future { + ready(Ok(match req { + AddressBookRequest::GetWhitePeers(_) => AddressBookResponse::Peers(vec![]), + AddressBookRequest::TakeRandomGrayPeer { .. } + | AddressBookRequest::TakeRandomPeer { .. } + | AddressBookRequest::TakeRandomWhitePeer { .. } => { + return ready(Err("dummy address book does not hold peers".into())); + } + AddressBookRequest::NewConnection { .. } | AddressBookRequest::IncomingPeerList(_) => { + AddressBookResponse::Ok + } + AddressBookRequest::IsPeerBanned(_) => AddressBookResponse::IsPeerBanned(false), + })) + } +} + +/// A dummy protocol request handler. +#[derive(Debug, Clone)] +pub struct DummyProtocolRequestHandler; + +impl Service for DummyProtocolRequestHandler { + type Response = ProtocolResponse; + type Error = tower::BoxError; + type Future = Ready>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _: ProtocolRequest) -> Self::Future { + ready(Ok(ProtocolResponse::NA)) + } +} diff --git a/p2p/p2p-core/src/client/request_handler.rs b/p2p/p2p-core/src/client/request_handler.rs new file mode 100644 index 0000000..284f954 --- /dev/null +++ b/p2p/p2p-core/src/client/request_handler.rs @@ -0,0 +1,144 @@ +use futures::TryFutureExt; +use tower::ServiceExt; + +use cuprate_wire::{ + admin::{ + PingResponse, SupportFlagsResponse, TimedSyncRequest, TimedSyncResponse, + PING_OK_RESPONSE_STATUS_TEXT, + }, + AdminRequestMessage, AdminResponseMessage, BasicNodeData, +}; + +use crate::{ + client::PeerInformation, + constants::MAX_PEERS_IN_PEER_LIST_MESSAGE, + services::{ + AddressBookRequest, AddressBookResponse, CoreSyncDataRequest, CoreSyncDataResponse, + PeerSyncRequest, + }, + AddressBook, CoreSyncSvc, NetworkZone, PeerRequest, PeerResponse, PeerSyncSvc, + ProtocolRequestHandler, +}; + +#[derive(thiserror::Error, Debug, Copy, Clone, Eq, PartialEq)] +enum PeerRequestHandlerError { + #[error("Received a handshake request during a connection.")] + ReceivedHandshakeDuringConnection, +} + +/// The peer request handler, handles incoming [`PeerRequest`]s to our node. +#[derive(Debug, Clone)] +pub(crate) struct PeerRequestHandler { + /// The address book service. + pub address_book_svc: A, + /// Our core sync service. + pub our_sync_svc: CS, + /// The peer sync service. + pub peer_sync_svc: PS, + + /// The handler for [`ProtocolRequest`](crate::ProtocolRequest)s to our node. + pub protocol_request_handler: PR, + + /// The basic node data of our node. + pub our_basic_node_data: BasicNodeData, + + /// The information on the connected peer. + pub peer_info: PeerInformation, +} + +impl PeerRequestHandler +where + Z: NetworkZone, + A: AddressBook, + CS: CoreSyncSvc, + PS: PeerSyncSvc, + PR: ProtocolRequestHandler, +{ + /// Handles an incoming [`PeerRequest`] to our node. + pub async fn handle_peer_request( + &mut self, + req: PeerRequest, + ) -> Result { + match req { + PeerRequest::Admin(admin_req) => match admin_req { + AdminRequestMessage::Handshake(_) => { + Err(PeerRequestHandlerError::ReceivedHandshakeDuringConnection.into()) + } + AdminRequestMessage::SupportFlags => { + let support_flags = self.our_basic_node_data.support_flags; + + Ok(PeerResponse::Admin(AdminResponseMessage::SupportFlags( + SupportFlagsResponse { support_flags }, + ))) + } + AdminRequestMessage::Ping => Ok(PeerResponse::Admin(AdminResponseMessage::Ping( + PingResponse { + peer_id: self.our_basic_node_data.peer_id, + status: PING_OK_RESPONSE_STATUS_TEXT, + }, + ))), + AdminRequestMessage::TimedSync(timed_sync_req) => { + let res = self.handle_timed_sync_request(timed_sync_req).await?; + + Ok(PeerResponse::Admin(AdminResponseMessage::TimedSync(res))) + } + }, + + PeerRequest::Protocol(protocol_req) => { + // TODO: add limits here + + self.protocol_request_handler + .ready() + .await? + .call(protocol_req) + .map_ok(PeerResponse::Protocol) + .await + } + } + } + + /// Handles a [`TimedSyncRequest`] to our node. + async fn handle_timed_sync_request( + &mut self, + req: TimedSyncRequest, + ) -> Result { + // TODO: add a limit on the amount of these requests in a certain time period. + + let peer_id = self.peer_info.id; + let handle = self.peer_info.handle.clone(); + + self.peer_sync_svc + .ready() + .await? + .call(PeerSyncRequest::IncomingCoreSyncData( + peer_id, + handle, + req.payload_data, + )) + .await?; + + let AddressBookResponse::Peers(peers) = self + .address_book_svc + .ready() + .await? + .call(AddressBookRequest::GetWhitePeers( + MAX_PEERS_IN_PEER_LIST_MESSAGE, + )) + .await? + else { + panic!("Address book sent incorrect response!"); + }; + + let CoreSyncDataResponse(core_sync_data) = self + .our_sync_svc + .ready() + .await? + .call(CoreSyncDataRequest) + .await?; + + Ok(TimedSyncResponse { + payload_data: core_sync_data, + local_peerlist_new: peers.into_iter().map(Into::into).collect(), + }) + } +} diff --git a/p2p/p2p-core/src/client/timeout_monitor.rs b/p2p/p2p-core/src/client/timeout_monitor.rs index db261b4..5228ede 100644 --- a/p2p/p2p-core/src/client/timeout_monitor.rs +++ b/p2p/p2p-core/src/client/timeout_monitor.rs @@ -12,7 +12,7 @@ use tokio::{ use tower::ServiceExt; use tracing::instrument; -use cuprate_wire::admin::TimedSyncRequest; +use cuprate_wire::{admin::TimedSyncRequest, AdminRequestMessage, AdminResponseMessage}; use crate::{ client::{connection::ConnectionTaskRequest, InternalPeerID}, @@ -87,15 +87,15 @@ where tracing::debug!(parent: &ping_span, "Sending timed sync to peer"); connection_tx .send(ConnectionTaskRequest { - request: PeerRequest::TimedSync(TimedSyncRequest { + request: PeerRequest::Admin(AdminRequestMessage::TimedSync(TimedSyncRequest { payload_data: core_sync_data, - }), + })), response_channel: tx, permit: Some(permit), }) .await?; - let PeerResponse::TimedSync(timed_sync) = rx.await?? else { + let PeerResponse::Admin(AdminResponseMessage::TimedSync(timed_sync)) = rx.await?? else { panic!("Connection task returned wrong response!"); }; diff --git a/p2p/p2p-core/src/handles.rs b/p2p/p2p-core/src/handles.rs index f383170..da47b65 100644 --- a/p2p/p2p-core/src/handles.rs +++ b/p2p/p2p-core/src/handles.rs @@ -23,10 +23,8 @@ impl HandleBuilder { } /// Sets the permit for this connection. - /// - /// This must be called at least once. - pub fn with_permit(mut self, permit: OwnedSemaphorePermit) -> Self { - self.permit = Some(permit); + pub fn with_permit(mut self, permit: Option) -> Self { + self.permit = permit; self } @@ -39,7 +37,7 @@ impl HandleBuilder { ( ConnectionGuard { token: token.clone(), - _permit: self.permit.expect("connection permit was not set!"), + _permit: self.permit, }, ConnectionHandle { token: token.clone(), @@ -56,7 +54,7 @@ pub struct BanPeer(pub Duration); /// A struct given to the connection task. pub struct ConnectionGuard { token: CancellationToken, - _permit: OwnedSemaphorePermit, + _permit: Option, } impl ConnectionGuard { diff --git a/p2p/p2p-core/src/lib.rs b/p2p/p2p-core/src/lib.rs index 8703d59..83cc4d2 100644 --- a/p2p/p2p-core/src/lib.rs +++ b/p2p/p2p-core/src/lib.rs @@ -1,4 +1,4 @@ -//! # Monero P2P +//! # Cuprate P2P Core //! //! This crate is general purpose P2P networking library for working with Monero. This is a low level //! crate, which means it may seem verbose for a lot of use cases, if you want a crate that handles @@ -6,13 +6,57 @@ //! //! # Network Zones //! -//! This crate abstracts over network zones, Tor/I2p/clearnet with the [NetworkZone] trait. Currently only clearnet is implemented: [ClearNet](network_zones::ClearNet). +//! This crate abstracts over network zones, Tor/I2p/clearnet with the [NetworkZone] trait. Currently only clearnet is implemented: [ClearNet]. //! //! # Usage //! -//! TODO +//! ## Connecting to a peer //! -use std::{fmt::Debug, future::Future, hash::Hash, pin::Pin}; +//! ```rust +//! # use std::{net::SocketAddr, str::FromStr}; +//! # +//! # use tower::ServiceExt; +//! # +//! # use cuprate_p2p_core::{ +//! # client::{ConnectRequest, Connector, HandshakerBuilder}, +//! # ClearNet, Network, +//! # }; +//! # use cuprate_wire::{common::PeerSupportFlags, BasicNodeData}; +//! # use cuprate_test_utils::monerod::monerod; +//! # +//! # tokio_test::block_on(async move { +//! # +//! # let _monerod = monerod::<&str>([]).await; +//! # let addr = _monerod.p2p_addr(); +//! # +//! // The information about our local node. +//! let our_basic_node_data = BasicNodeData { +//! my_port: 0, +//! network_id: Network::Mainnet.network_id(), +//! peer_id: 0, +//! support_flags: PeerSupportFlags::FLUFFY_BLOCKS, +//! rpc_port: 0, +//! rpc_credits_per_hash: 0, +//! }; +//! +//! // See [`HandshakerBuilder`] for information about the default values set, they may not be +//! // appropriate for every use case. +//! let handshaker = HandshakerBuilder::::new(our_basic_node_data).build(); +//! +//! // The outbound connector. +//! let mut connector = Connector::new(handshaker); +//! +//! // The connection. +//! let connection = connector +//! .oneshot(ConnectRequest { +//! addr, +//! permit: None, +//! }) +//! .await +//! .unwrap(); +//! # }); +//! ``` +use std::{fmt::Debug, future::Future, hash::Hash}; use futures::{Sink, Stream}; @@ -25,21 +69,27 @@ pub mod client; mod constants; pub mod error; pub mod handles; -pub mod network_zones; +mod network_zones; pub mod protocol; pub mod services; pub use error::*; +pub use network_zones::{ClearNet, ClearNetServerCfg}; pub use protocol::*; use services::*; +//re-export +pub use cuprate_helper::network::Network; +/// The direction of a connection. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum ConnectionDirection { - InBound, - OutBound, + /// An inbound connection to our node. + Inbound, + /// An outbound connection from our node. + Outbound, } -#[cfg(not(feature = "borsh"))] +/// An address on a specific [`NetworkZone`]. pub trait NetZoneAddress: TryFrom + Into @@ -56,46 +106,19 @@ pub trait NetZoneAddress: /// that include the port, to be able to facilitate this network addresses must have a ban ID /// which for hidden services could just be the address it self but for clear net addresses will /// be the IP address. - /// TODO: IP zone banning? - type BanID: Debug + Hash + Eq + Clone + Copy + Send + 'static; - - /// Changes the port of this address to `port`. - fn set_port(&mut self, port: u16); - - fn make_canonical(&mut self); - - fn ban_id(&self) -> Self::BanID; - - fn should_add_to_peer_list(&self) -> bool; -} - -#[cfg(feature = "borsh")] -pub trait NetZoneAddress: - TryFrom - + Into - + std::fmt::Display - + borsh::BorshSerialize - + borsh::BorshDeserialize - + Hash - + Eq - + Copy - + Send - + Sync - + Unpin - + 'static -{ - /// Cuprate needs to be able to ban peers by IP addresses and not just by SocketAddr as - /// that include the port, to be able to facilitate this network addresses must have a ban ID - /// which for hidden services could just be the address it self but for clear net addresses will - /// be the IP address. - /// TODO: IP zone banning? + /// + /// - TODO: IP zone banning? + /// - TODO: rename this to Host. + type BanID: Debug + Hash + Eq + Clone + Copy + Send + 'static; /// Changes the port of this address to `port`. fn set_port(&mut self, port: u16); + /// Turns this address into its canonical form. fn make_canonical(&mut self); + /// Returns the [`Self::BanID`] for this address. fn ban_id(&self) -> Self::BanID; fn should_add_to_peer_list(&self) -> bool; @@ -136,6 +159,15 @@ pub trait NetworkZone: Clone + Copy + Send + 'static { /// Config used to start a server which listens for incoming connections. type ServerCfg: Clone + Debug + Send + 'static; + /// Connects to a peer with the given address. + /// + ///
+ /// + /// This does not complete a handshake with the peer, to do that see the [crate](crate) docs. + /// + ///
+ /// + /// Returns the [`Self::Stream`] and [`Self::Sink`] to send messages to the peer. async fn connect_to_peer( addr: Self::Addr, ) -> Result<(Self::Stream, Self::Sink), std::io::Error>; @@ -206,55 +238,48 @@ pub trait CoreSyncSvc: CoreSyncDataRequest, Response = CoreSyncDataResponse, Error = tower::BoxError, - Future = Pin< - Box< - dyn Future> + Send + 'static, - >, - >, + Future = Self::Future2, > + Send + 'static { + // This allows us to put more restrictive bounds on the future without defining the future here + // explicitly. + type Future2: Future> + Send + 'static; } -impl CoreSyncSvc for T where +impl CoreSyncSvc for T +where T: tower::Service< CoreSyncDataRequest, Response = CoreSyncDataResponse, Error = tower::BoxError, - Future = Pin< - Box< - dyn Future> - + Send - + 'static, - >, - >, > + Send - + 'static + + 'static, + T::Future: Future> + Send + 'static, { + type Future2 = T::Future; } -pub trait PeerRequestHandler: +pub trait ProtocolRequestHandler: tower::Service< - PeerRequest, - Response = PeerResponse, + ProtocolRequest, + Response = ProtocolResponse, Error = tower::BoxError, - Future = Pin< - Box> + Send + 'static>, - >, + Future = Self::Future2, > + Send + 'static { + // This allows us to put more restrictive bounds on the future without defining the future here + // explicitly. + type Future2: Future> + Send + 'static; } -impl PeerRequestHandler for T where - T: tower::Service< - PeerRequest, - Response = PeerResponse, - Error = tower::BoxError, - Future = Pin< - Box> + Send + 'static>, - >, - > + Send - + 'static +impl ProtocolRequestHandler for T +where + T: tower::Service + + Send + + 'static, + T::Future: Future> + Send + 'static, { + type Future2 = T::Future; } diff --git a/p2p/p2p-core/src/protocol.rs b/p2p/p2p-core/src/protocol.rs index 172038f..5e4f4d7 100644 --- a/p2p/p2p-core/src/protocol.rs +++ b/p2p/p2p-core/src/protocol.rs @@ -1,13 +1,16 @@ -//! This module defines InternalRequests and InternalResponses. Cuprate's P2P works by translating network messages into an internal -//! request/ response, this is easy for levin "requests" and "responses" (admin messages) but takes a bit more work with "notifications" +//! This module defines [`PeerRequest`] and [`PeerResponse`]. Cuprate's P2P crates works by translating network messages into an internal +//! request/response enums, this is easy for levin "requests" and "responses" (admin messages) but takes a bit more work with "notifications" //! (protocol messages). //! -//! Some notifications are easy to translate, like `GetObjectsRequest` is obviously a request but others like `NewFluffyBlock` are a -//! bit tri cker. To translate a `NewFluffyBlock` into a request/ response we will have to look to see if we asked for `FluffyMissingTransactionsRequest` -//! if we have we interpret `NewFluffyBlock` as a response if not its a request that doesn't require a response. +//! Some notifications are easy to translate, like [`GetObjectsRequest`] is obviously a request but others like [`NewFluffyBlock`] are a +//! bit tricker. To translate a [`NewFluffyBlock`] into a request/ response we will have to look to see if we asked for [`FluffyMissingTransactionsRequest`], +//! if we have, we interpret [`NewFluffyBlock`] as a response, if not, it's a request that doesn't require a response. //! -//! Here is every P2P request/ response. *note admin messages are already request/ response so "Handshake" is actually made of a HandshakeRequest & HandshakeResponse +//! Here is every P2P request/response. //! +//! *note admin messages are already request/response so "Handshake" is actually made of a HandshakeRequest & HandshakeResponse +//! +//! ```md //! Admin: //! Handshake, //! TimedSync, @@ -21,16 +24,14 @@ //! Request: NewBlock, Response: None, //! Request: NewFluffyBlock, Response: None, //! Request: NewTransactions, Response: None +//!``` //! use cuprate_wire::{ - admin::{ - HandshakeRequest, HandshakeResponse, PingResponse, SupportFlagsResponse, TimedSyncRequest, - TimedSyncResponse, - }, protocol::{ ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest, GetObjectsResponse, GetTxPoolCompliment, NewBlock, NewFluffyBlock, NewTransactions, }, + AdminRequestMessage, AdminResponseMessage, }; mod try_from; @@ -60,12 +61,7 @@ pub enum BroadcastMessage { } #[derive(Debug, Clone)] -pub enum PeerRequest { - Handshake(HandshakeRequest), - TimedSync(TimedSyncRequest), - Ping, - SupportFlags, - +pub enum ProtocolRequest { GetObjects(GetObjectsRequest), GetChain(ChainRequest), FluffyMissingTxs(FluffyMissingTransactionsRequest), @@ -75,41 +71,47 @@ pub enum PeerRequest { NewTransactions(NewTransactions), } +#[derive(Debug, Clone)] +pub enum PeerRequest { + Admin(AdminRequestMessage), + Protocol(ProtocolRequest), +} + impl PeerRequest { pub fn id(&self) -> MessageID { match self { - PeerRequest::Handshake(_) => MessageID::Handshake, - PeerRequest::TimedSync(_) => MessageID::TimedSync, - PeerRequest::Ping => MessageID::Ping, - PeerRequest::SupportFlags => MessageID::SupportFlags, - - PeerRequest::GetObjects(_) => MessageID::GetObjects, - PeerRequest::GetChain(_) => MessageID::GetChain, - PeerRequest::FluffyMissingTxs(_) => MessageID::FluffyMissingTxs, - PeerRequest::GetTxPoolCompliment(_) => MessageID::GetTxPoolCompliment, - PeerRequest::NewBlock(_) => MessageID::NewBlock, - PeerRequest::NewFluffyBlock(_) => MessageID::NewFluffyBlock, - PeerRequest::NewTransactions(_) => MessageID::NewTransactions, + PeerRequest::Admin(admin_req) => match admin_req { + AdminRequestMessage::Handshake(_) => MessageID::Handshake, + AdminRequestMessage::TimedSync(_) => MessageID::TimedSync, + AdminRequestMessage::Ping => MessageID::Ping, + AdminRequestMessage::SupportFlags => MessageID::SupportFlags, + }, + PeerRequest::Protocol(protocol_request) => match protocol_request { + ProtocolRequest::GetObjects(_) => MessageID::GetObjects, + ProtocolRequest::GetChain(_) => MessageID::GetChain, + ProtocolRequest::FluffyMissingTxs(_) => MessageID::FluffyMissingTxs, + ProtocolRequest::GetTxPoolCompliment(_) => MessageID::GetTxPoolCompliment, + ProtocolRequest::NewBlock(_) => MessageID::NewBlock, + ProtocolRequest::NewFluffyBlock(_) => MessageID::NewFluffyBlock, + ProtocolRequest::NewTransactions(_) => MessageID::NewTransactions, + }, } } pub fn needs_response(&self) -> bool { !matches!( self, - PeerRequest::NewBlock(_) - | PeerRequest::NewFluffyBlock(_) - | PeerRequest::NewTransactions(_) + PeerRequest::Protocol( + ProtocolRequest::NewBlock(_) + | ProtocolRequest::NewFluffyBlock(_) + | ProtocolRequest::NewTransactions(_) + ) ) } } #[derive(Debug, Clone)] -pub enum PeerResponse { - Handshake(HandshakeResponse), - TimedSync(TimedSyncResponse), - Ping(PingResponse), - SupportFlags(SupportFlagsResponse), - +pub enum ProtocolResponse { GetObjects(GetObjectsResponse), GetChain(ChainResponse), NewFluffyBlock(NewFluffyBlock), @@ -117,20 +119,29 @@ pub enum PeerResponse { NA, } +#[derive(Debug, Clone)] +pub enum PeerResponse { + Admin(AdminResponseMessage), + Protocol(ProtocolResponse), +} + impl PeerResponse { - pub fn id(&self) -> MessageID { - match self { - PeerResponse::Handshake(_) => MessageID::Handshake, - PeerResponse::TimedSync(_) => MessageID::TimedSync, - PeerResponse::Ping(_) => MessageID::Ping, - PeerResponse::SupportFlags(_) => MessageID::SupportFlags, + pub fn id(&self) -> Option { + Some(match self { + PeerResponse::Admin(admin_res) => match admin_res { + AdminResponseMessage::Handshake(_) => MessageID::Handshake, + AdminResponseMessage::TimedSync(_) => MessageID::TimedSync, + AdminResponseMessage::Ping(_) => MessageID::Ping, + AdminResponseMessage::SupportFlags(_) => MessageID::SupportFlags, + }, + PeerResponse::Protocol(protocol_res) => match protocol_res { + ProtocolResponse::GetObjects(_) => MessageID::GetObjects, + ProtocolResponse::GetChain(_) => MessageID::GetChain, + ProtocolResponse::NewFluffyBlock(_) => MessageID::NewBlock, + ProtocolResponse::NewTransactions(_) => MessageID::NewFluffyBlock, - PeerResponse::GetObjects(_) => MessageID::GetObjects, - PeerResponse::GetChain(_) => MessageID::GetChain, - PeerResponse::NewFluffyBlock(_) => MessageID::NewBlock, - PeerResponse::NewTransactions(_) => MessageID::NewFluffyBlock, - - PeerResponse::NA => panic!("Can't get message ID for a non existent response"), - } + ProtocolResponse::NA => return None, + }, + }) } } diff --git a/p2p/p2p-core/src/protocol/try_from.rs b/p2p/p2p-core/src/protocol/try_from.rs index 8e3d026..8a0b67d 100644 --- a/p2p/p2p-core/src/protocol/try_from.rs +++ b/p2p/p2p-core/src/protocol/try_from.rs @@ -1,150 +1,111 @@ //! This module contains the implementations of [`TryFrom`] and [`From`] to convert between //! [`Message`], [`PeerRequest`] and [`PeerResponse`]. -use cuprate_wire::{Message, ProtocolMessage, RequestMessage, ResponseMessage}; +use cuprate_wire::{Message, ProtocolMessage}; -use super::{PeerRequest, PeerResponse}; +use crate::{PeerRequest, PeerResponse, ProtocolRequest, ProtocolResponse}; #[derive(Debug)] pub struct MessageConversionError; -macro_rules! match_body { - (match $value: ident {$($body:tt)*} ($left:pat => $right_ty:expr) $($todo:tt)*) => { - match_body!( match $value { - $left => $right_ty, - $($body)* - } $($todo)* ) - }; - (match $value: ident {$($body:tt)*}) => { - match $value { - $($body)* - } - }; -} - -macro_rules! from { - ($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => { - impl From<$left_ty> for $right_ty { - fn from(value: $left_ty) -> Self { - match_body!( match value {} - $(($left_ty::$left$(($val))? => $right_ty::$right$(($vall))?))+ - ) +impl From for ProtocolMessage { + fn from(value: ProtocolRequest) -> Self { + match value { + ProtocolRequest::GetObjects(val) => ProtocolMessage::GetObjectsRequest(val), + ProtocolRequest::GetChain(val) => ProtocolMessage::ChainRequest(val), + ProtocolRequest::FluffyMissingTxs(val) => { + ProtocolMessage::FluffyMissingTransactionsRequest(val) } + ProtocolRequest::GetTxPoolCompliment(val) => ProtocolMessage::GetTxPoolCompliment(val), + ProtocolRequest::NewBlock(val) => ProtocolMessage::NewBlock(val), + ProtocolRequest::NewFluffyBlock(val) => ProtocolMessage::NewFluffyBlock(val), + ProtocolRequest::NewTransactions(val) => ProtocolMessage::NewTransactions(val), } - }; + } } -macro_rules! try_from { - ($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => { - impl TryFrom<$left_ty> for $right_ty { - type Error = MessageConversionError; - - fn try_from(value: $left_ty) -> Result { - Ok(match_body!( match value { - _ => return Err(MessageConversionError) - } - $(($left_ty::$left$(($val))? => $right_ty::$right$(($vall))?))+ - )) - } - } - }; -} - -macro_rules! from_try_from { - ($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => { - try_from!($left_ty, $right_ty, {$($left $(($val))? = $right $(($vall))?,)+}); - from!($right_ty, $left_ty, {$($right $(($val))? = $left $(($vall))?,)+}); - }; -} - -macro_rules! try_from_try_from { - ($left_ty:ident, $right_ty:ident, {$($left:ident $(($val: ident))? = $right:ident $(($vall: ident))?,)+}) => { - try_from!($left_ty, $right_ty, {$($left $(($val))? = $right $(($vall))?,)+}); - try_from!($right_ty, $left_ty, {$($right $(($val))? = $left $(($val))?,)+}); - }; -} - -from_try_from!(PeerRequest, RequestMessage,{ - Handshake(val) = Handshake(val), - Ping = Ping, - SupportFlags = SupportFlags, - TimedSync(val) = TimedSync(val), -}); - -try_from_try_from!(PeerRequest, ProtocolMessage,{ - NewBlock(val) = NewBlock(val), - NewFluffyBlock(val) = NewFluffyBlock(val), - GetObjects(val) = GetObjectsRequest(val), - GetChain(val) = ChainRequest(val), - NewTransactions(val) = NewTransactions(val), - FluffyMissingTxs(val) = FluffyMissingTransactionsRequest(val), - GetTxPoolCompliment(val) = GetTxPoolCompliment(val), -}); - -impl TryFrom for PeerRequest { +impl TryFrom for ProtocolRequest { type Error = MessageConversionError; - fn try_from(value: Message) -> Result { - match value { - Message::Request(req) => Ok(req.into()), - Message::Protocol(pro) => pro.try_into(), - _ => Err(MessageConversionError), - } + fn try_from(value: ProtocolMessage) -> Result { + Ok(match value { + ProtocolMessage::GetObjectsRequest(val) => ProtocolRequest::GetObjects(val), + ProtocolMessage::ChainRequest(val) => ProtocolRequest::GetChain(val), + ProtocolMessage::FluffyMissingTransactionsRequest(val) => { + ProtocolRequest::FluffyMissingTxs(val) + } + ProtocolMessage::GetTxPoolCompliment(val) => ProtocolRequest::GetTxPoolCompliment(val), + ProtocolMessage::NewBlock(val) => ProtocolRequest::NewBlock(val), + ProtocolMessage::NewFluffyBlock(val) => ProtocolRequest::NewFluffyBlock(val), + ProtocolMessage::NewTransactions(val) => ProtocolRequest::NewTransactions(val), + ProtocolMessage::GetObjectsResponse(_) | ProtocolMessage::ChainEntryResponse(_) => { + return Err(MessageConversionError) + } + }) } } impl From for Message { fn from(value: PeerRequest) -> Self { match value { - PeerRequest::Handshake(val) => Message::Request(RequestMessage::Handshake(val)), - PeerRequest::Ping => Message::Request(RequestMessage::Ping), - PeerRequest::SupportFlags => Message::Request(RequestMessage::SupportFlags), - PeerRequest::TimedSync(val) => Message::Request(RequestMessage::TimedSync(val)), - - PeerRequest::NewBlock(val) => Message::Protocol(ProtocolMessage::NewBlock(val)), - PeerRequest::NewFluffyBlock(val) => { - Message::Protocol(ProtocolMessage::NewFluffyBlock(val)) - } - PeerRequest::GetObjects(val) => { - Message::Protocol(ProtocolMessage::GetObjectsRequest(val)) - } - PeerRequest::GetChain(val) => Message::Protocol(ProtocolMessage::ChainRequest(val)), - PeerRequest::NewTransactions(val) => { - Message::Protocol(ProtocolMessage::NewTransactions(val)) - } - PeerRequest::FluffyMissingTxs(val) => { - Message::Protocol(ProtocolMessage::FluffyMissingTransactionsRequest(val)) - } - PeerRequest::GetTxPoolCompliment(val) => { - Message::Protocol(ProtocolMessage::GetTxPoolCompliment(val)) - } + PeerRequest::Admin(val) => Message::Request(val), + PeerRequest::Protocol(val) => Message::Protocol(val.into()), } } } -from_try_from!(PeerResponse, ResponseMessage,{ - Handshake(val) = Handshake(val), - Ping(val) = Ping(val), - SupportFlags(val) = SupportFlags(val), - TimedSync(val) = TimedSync(val), -}); +impl TryFrom for PeerRequest { + type Error = MessageConversionError; -try_from_try_from!(PeerResponse, ProtocolMessage,{ - NewFluffyBlock(val) = NewFluffyBlock(val), - GetObjects(val) = GetObjectsResponse(val), - GetChain(val) = ChainEntryResponse(val), - NewTransactions(val) = NewTransactions(val), + fn try_from(value: Message) -> Result { + match value { + Message::Request(req) => Ok(PeerRequest::Admin(req)), + Message::Protocol(pro) => Ok(PeerRequest::Protocol(pro.try_into()?)), + Message::Response(_) => Err(MessageConversionError), + } + } +} -}); +impl TryFrom for ProtocolMessage { + type Error = MessageConversionError; + + fn try_from(value: ProtocolResponse) -> Result { + Ok(match value { + ProtocolResponse::NewTransactions(val) => ProtocolMessage::NewTransactions(val), + ProtocolResponse::NewFluffyBlock(val) => ProtocolMessage::NewFluffyBlock(val), + ProtocolResponse::GetChain(val) => ProtocolMessage::ChainEntryResponse(val), + ProtocolResponse::GetObjects(val) => ProtocolMessage::GetObjectsResponse(val), + ProtocolResponse::NA => return Err(MessageConversionError), + }) + } +} + +impl TryFrom for ProtocolResponse { + type Error = MessageConversionError; + + fn try_from(value: ProtocolMessage) -> Result { + Ok(match value { + ProtocolMessage::NewTransactions(val) => ProtocolResponse::NewTransactions(val), + ProtocolMessage::NewFluffyBlock(val) => ProtocolResponse::NewFluffyBlock(val), + ProtocolMessage::ChainEntryResponse(val) => ProtocolResponse::GetChain(val), + ProtocolMessage::GetObjectsResponse(val) => ProtocolResponse::GetObjects(val), + ProtocolMessage::ChainRequest(_) + | ProtocolMessage::FluffyMissingTransactionsRequest(_) + | ProtocolMessage::GetObjectsRequest(_) + | ProtocolMessage::GetTxPoolCompliment(_) + | ProtocolMessage::NewBlock(_) => return Err(MessageConversionError), + }) + } +} impl TryFrom for PeerResponse { type Error = MessageConversionError; fn try_from(value: Message) -> Result { match value { - Message::Response(res) => Ok(res.into()), - Message::Protocol(pro) => pro.try_into(), - _ => Err(MessageConversionError), + Message::Response(res) => Ok(PeerResponse::Admin(res)), + Message::Protocol(pro) => Ok(PeerResponse::Protocol(pro.try_into()?)), + Message::Request(_) => Err(MessageConversionError), } } } @@ -154,27 +115,8 @@ impl TryFrom for Message { fn try_from(value: PeerResponse) -> Result { Ok(match value { - PeerResponse::Handshake(val) => Message::Response(ResponseMessage::Handshake(val)), - PeerResponse::Ping(val) => Message::Response(ResponseMessage::Ping(val)), - PeerResponse::SupportFlags(val) => { - Message::Response(ResponseMessage::SupportFlags(val)) - } - PeerResponse::TimedSync(val) => Message::Response(ResponseMessage::TimedSync(val)), - - PeerResponse::NewFluffyBlock(val) => { - Message::Protocol(ProtocolMessage::NewFluffyBlock(val)) - } - PeerResponse::GetObjects(val) => { - Message::Protocol(ProtocolMessage::GetObjectsResponse(val)) - } - PeerResponse::GetChain(val) => { - Message::Protocol(ProtocolMessage::ChainEntryResponse(val)) - } - PeerResponse::NewTransactions(val) => { - Message::Protocol(ProtocolMessage::NewTransactions(val)) - } - - PeerResponse::NA => return Err(MessageConversionError), + PeerResponse::Admin(val) => Message::Response(val), + PeerResponse::Protocol(val) => Message::Protocol(val.try_into()?), }) } } diff --git a/p2p/p2p-core/src/services.rs b/p2p/p2p-core/src/services.rs index 6fd6c15..b01bde0 100644 --- a/p2p/p2p-core/src/services.rs +++ b/p2p/p2p-core/src/services.rs @@ -6,6 +6,7 @@ use crate::{ NetworkZone, }; +/// A request to the service that keeps track of peers sync states. pub enum PeerSyncRequest { /// Request some peers to sync from. /// @@ -15,10 +16,11 @@ pub enum PeerSyncRequest { current_cumulative_difficulty: u128, block_needed: Option, }, - /// Add/update a peers core sync data to the sync state service. + /// Add/update a peer's core sync data. IncomingCoreSyncData(InternalPeerID, ConnectionHandle, CoreSyncData), } +/// A response from the service that keeps track of peers sync states. pub enum PeerSyncResponse { /// The return value of [`PeerSyncRequest::PeersToSyncFrom`]. PeersToSyncFrom(Vec>), @@ -26,10 +28,16 @@ pub enum PeerSyncResponse { Ok, } +/// A request to the core sync service for our node's [`CoreSyncData`]. pub struct CoreSyncDataRequest; +/// A response from the core sync service containing our [`CoreSyncData`]. pub struct CoreSyncDataResponse(pub CoreSyncData); +/// A [`NetworkZone`] specific [`PeerListEntryBase`]. +/// +/// Using this type instead of [`PeerListEntryBase`] in the address book makes +/// usage easier for the rest of the P2P code as we can guarantee only the correct addresses will be stored and returned. #[derive(Debug, Copy, Clone, Eq, PartialEq)] #[cfg_attr( feature = "borsh", @@ -57,6 +65,7 @@ impl From> for cuprate_wire: } } +/// An error converting a [`PeerListEntryBase`] into a [`ZoneSpecificPeerListEntryBase`]. #[derive(Debug, thiserror::Error)] pub enum PeerListConversionError { #[error("Address is in incorrect zone")] @@ -82,6 +91,7 @@ impl TryFrom } } +/// A request to the address book service. pub enum AddressBookRequest { /// Tells the address book that we have connected or received a connection from a peer. NewConnection { @@ -123,6 +133,7 @@ pub enum AddressBookRequest { IsPeerBanned(Z::Addr), } +/// A response from the address book service. pub enum AddressBookResponse { Ok, Peer(ZoneSpecificPeerListEntryBase), diff --git a/p2p/p2p-core/tests/fragmented_handshake.rs b/p2p/p2p-core/tests/fragmented_handshake.rs index 2e96574..c19a2a6 100644 --- a/p2p/p2p-core/tests/fragmented_handshake.rs +++ b/p2p/p2p-core/tests/fragmented_handshake.rs @@ -2,7 +2,6 @@ use std::{ net::SocketAddr, pin::Pin, - sync::Arc, task::{Context, Poll}, time::Duration, }; @@ -13,7 +12,6 @@ use tokio::{ tcp::{OwnedReadHalf, OwnedWriteHalf}, TcpListener, TcpStream, }, - sync::Semaphore, time::timeout, }; use tokio_util::{ @@ -24,9 +22,11 @@ use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; use cuprate_p2p_core::{ - client::{ConnectRequest, Connector, DoHandshakeRequest, HandShaker, InternalPeerID}, - network_zones::ClearNetServerCfg, - ConnectionDirection, NetworkZone, + client::{ + handshaker::HandshakerBuilder, ConnectRequest, Connector, DoHandshakeRequest, + InternalPeerID, + }, + ClearNetServerCfg, ConnectionDirection, NetworkZone, }; use cuprate_wire::{ common::PeerSupportFlags, @@ -36,9 +36,6 @@ use cuprate_wire::{ use cuprate_test_utils::monerod::monerod; -mod utils; -use utils::*; - /// A network zone equal to clear net where every message sent is turned into a fragmented message. /// Does not support sending fragmented or dummy messages manually. #[derive(Clone, Copy)] @@ -135,9 +132,6 @@ impl Encoder> for FragmentCodec { #[tokio::test] async fn fragmented_handshake_cuprate_to_monerod() { - let semaphore = Arc::new(Semaphore::new(10)); - let permit = semaphore.acquire_owned().await.unwrap(); - let monerod = monerod(["--fixed-difficulty=1", "--out-peers=0"]).await; let our_basic_node_data = BasicNodeData { @@ -149,14 +143,7 @@ async fn fragmented_handshake_cuprate_to_monerod() { rpc_credits_per_hash: 0, }; - let handshaker = HandShaker::::new( - DummyAddressBook, - DummyPeerSyncSvc, - DummyCoreSyncSvc, - DummyPeerRequestHandlerSvc, - |_| futures::stream::pending(), - our_basic_node_data, - ); + let handshaker = HandshakerBuilder::::new(our_basic_node_data).build(); let mut connector = Connector::new(handshaker); @@ -166,7 +153,7 @@ async fn fragmented_handshake_cuprate_to_monerod() { .unwrap() .call(ConnectRequest { addr: monerod.p2p_addr(), - permit, + permit: None, }) .await .unwrap(); @@ -174,9 +161,6 @@ async fn fragmented_handshake_cuprate_to_monerod() { #[tokio::test] async fn fragmented_handshake_monerod_to_cuprate() { - let semaphore = Arc::new(Semaphore::new(10)); - let permit = semaphore.acquire_owned().await.unwrap(); - let our_basic_node_data = BasicNodeData { my_port: 18081, network_id: Network::Mainnet.network_id(), @@ -186,14 +170,7 @@ async fn fragmented_handshake_monerod_to_cuprate() { rpc_credits_per_hash: 0, }; - let mut handshaker = HandShaker::::new( - DummyAddressBook, - DummyPeerSyncSvc, - DummyCoreSyncSvc, - DummyPeerRequestHandlerSvc, - |_| futures::stream::pending(), - our_basic_node_data, - ); + let mut handshaker = HandshakerBuilder::::new(our_basic_node_data).build(); let ip = "127.0.0.1".parse().unwrap(); @@ -215,8 +192,8 @@ async fn fragmented_handshake_monerod_to_cuprate() { addr: InternalPeerID::KnownAddr(addr.unwrap()), // This is clear net all addresses are known. peer_stream: stream, peer_sink: sink, - direction: ConnectionDirection::InBound, - permit, + direction: ConnectionDirection::Inbound, + permit: None, }) .await .unwrap(); diff --git a/p2p/p2p-core/tests/handles.rs b/p2p/p2p-core/tests/handles.rs index e98cd2d..47d70b0 100644 --- a/p2p/p2p-core/tests/handles.rs +++ b/p2p/p2p-core/tests/handles.rs @@ -6,10 +6,7 @@ use cuprate_p2p_core::handles::HandleBuilder; #[test] fn send_ban_signal() { - let semaphore = Arc::new(Semaphore::new(5)); - let (guard, mut connection_handle) = HandleBuilder::default() - .with_permit(semaphore.try_acquire_owned().unwrap()) - .build(); + let (guard, mut connection_handle) = HandleBuilder::default().build(); connection_handle.ban_peer(Duration::from_secs(300)); @@ -28,10 +25,7 @@ fn send_ban_signal() { #[test] fn multiple_ban_signals() { - let semaphore = Arc::new(Semaphore::new(5)); - let (guard, mut connection_handle) = HandleBuilder::default() - .with_permit(semaphore.try_acquire_owned().unwrap()) - .build(); + let (guard, mut connection_handle) = HandleBuilder::default().build(); connection_handle.ban_peer(Duration::from_secs(300)); connection_handle.ban_peer(Duration::from_secs(301)); @@ -55,7 +49,7 @@ fn multiple_ban_signals() { fn dropped_guard_sends_disconnect_signal() { let semaphore = Arc::new(Semaphore::new(5)); let (guard, connection_handle) = HandleBuilder::default() - .with_permit(semaphore.try_acquire_owned().unwrap()) + .with_permit(Some(semaphore.try_acquire_owned().unwrap())) .build(); assert!(!connection_handle.is_closed()); diff --git a/p2p/p2p-core/tests/handshake.rs b/p2p/p2p-core/tests/handshake.rs index f979248..5ce6153 100644 --- a/p2p/p2p-core/tests/handshake.rs +++ b/p2p/p2p-core/tests/handshake.rs @@ -1,9 +1,8 @@ -use std::{sync::Arc, time::Duration}; +use std::time::Duration; use futures::StreamExt; use tokio::{ io::{duplex, split}, - sync::Semaphore, time::timeout, }; use tokio_util::codec::{FramedRead, FramedWrite}; @@ -13,9 +12,11 @@ use cuprate_helper::network::Network; use cuprate_wire::{common::PeerSupportFlags, BasicNodeData, MoneroWireCodec}; use cuprate_p2p_core::{ - client::{ConnectRequest, Connector, DoHandshakeRequest, HandShaker, InternalPeerID}, - network_zones::{ClearNet, ClearNetServerCfg}, - ConnectionDirection, NetworkZone, + client::{ + handshaker::HandshakerBuilder, ConnectRequest, Connector, DoHandshakeRequest, + InternalPeerID, + }, + ClearNet, ClearNetServerCfg, ConnectionDirection, NetworkZone, }; use cuprate_test_utils::{ @@ -23,18 +24,10 @@ use cuprate_test_utils::{ test_netzone::{TestNetZone, TestNetZoneAddr}, }; -mod utils; -use utils::*; - #[tokio::test] async fn handshake_cuprate_to_cuprate() { // Tests a Cuprate <-> Cuprate handshake by making 2 handshake services and making them talk to // each other. - - let semaphore = Arc::new(Semaphore::new(10)); - let permit_1 = semaphore.clone().acquire_owned().await.unwrap(); - let permit_2 = semaphore.acquire_owned().await.unwrap(); - let our_basic_node_data_1 = BasicNodeData { my_port: 0, network_id: Network::Mainnet.network_id(), @@ -48,23 +41,11 @@ async fn handshake_cuprate_to_cuprate() { let mut our_basic_node_data_2 = our_basic_node_data_1.clone(); our_basic_node_data_2.peer_id = 2344; - let mut handshaker_1 = HandShaker::, _, _, _, _, _>::new( - DummyAddressBook, - DummyPeerSyncSvc, - DummyCoreSyncSvc, - DummyPeerRequestHandlerSvc, - |_| futures::stream::pending(), - our_basic_node_data_1, - ); + let mut handshaker_1 = + HandshakerBuilder::>::new(our_basic_node_data_1).build(); - let mut handshaker_2 = HandShaker::, _, _, _, _, _>::new( - DummyAddressBook, - DummyPeerSyncSvc, - DummyCoreSyncSvc, - DummyPeerRequestHandlerSvc, - |_| futures::stream::pending(), - our_basic_node_data_2, - ); + let mut handshaker_2 = + HandshakerBuilder::>::new(our_basic_node_data_2).build(); let (p1, p2) = duplex(50_000); @@ -75,16 +56,16 @@ async fn handshake_cuprate_to_cuprate() { addr: InternalPeerID::KnownAddr(TestNetZoneAddr(888)), peer_stream: FramedRead::new(p2_receiver, MoneroWireCodec::default()), peer_sink: FramedWrite::new(p2_sender, MoneroWireCodec::default()), - direction: ConnectionDirection::OutBound, - permit: permit_1, + direction: ConnectionDirection::Outbound, + permit: None, }; let p2_handshake_req = DoHandshakeRequest { addr: InternalPeerID::KnownAddr(TestNetZoneAddr(444)), peer_stream: FramedRead::new(p1_receiver, MoneroWireCodec::default()), peer_sink: FramedWrite::new(p1_sender, MoneroWireCodec::default()), - direction: ConnectionDirection::InBound, - permit: permit_2, + direction: ConnectionDirection::Inbound, + permit: None, }; let p1 = tokio::spawn(async move { @@ -114,9 +95,6 @@ async fn handshake_cuprate_to_cuprate() { #[tokio::test] async fn handshake_cuprate_to_monerod() { - let semaphore = Arc::new(Semaphore::new(10)); - let permit = semaphore.acquire_owned().await.unwrap(); - let monerod = monerod(["--fixed-difficulty=1", "--out-peers=0"]).await; let our_basic_node_data = BasicNodeData { @@ -128,14 +106,7 @@ async fn handshake_cuprate_to_monerod() { rpc_credits_per_hash: 0, }; - let handshaker = HandShaker::::new( - DummyAddressBook, - DummyPeerSyncSvc, - DummyCoreSyncSvc, - DummyPeerRequestHandlerSvc, - |_| futures::stream::pending(), - our_basic_node_data, - ); + let handshaker = HandshakerBuilder::::new(our_basic_node_data).build(); let mut connector = Connector::new(handshaker); @@ -145,7 +116,7 @@ async fn handshake_cuprate_to_monerod() { .unwrap() .call(ConnectRequest { addr: monerod.p2p_addr(), - permit, + permit: None, }) .await .unwrap(); @@ -153,9 +124,6 @@ async fn handshake_cuprate_to_monerod() { #[tokio::test] async fn handshake_monerod_to_cuprate() { - let semaphore = Arc::new(Semaphore::new(10)); - let permit = semaphore.acquire_owned().await.unwrap(); - let our_basic_node_data = BasicNodeData { my_port: 18081, network_id: Network::Mainnet.network_id(), @@ -165,14 +133,7 @@ async fn handshake_monerod_to_cuprate() { rpc_credits_per_hash: 0, }; - let mut handshaker = HandShaker::::new( - DummyAddressBook, - DummyPeerSyncSvc, - DummyCoreSyncSvc, - DummyPeerRequestHandlerSvc, - |_| futures::stream::pending(), - our_basic_node_data, - ); + let mut handshaker = HandshakerBuilder::::new(our_basic_node_data).build(); let ip = "127.0.0.1".parse().unwrap(); @@ -194,8 +155,8 @@ async fn handshake_monerod_to_cuprate() { addr: InternalPeerID::KnownAddr(addr.unwrap()), // This is clear net all addresses are known. peer_stream: stream, peer_sink: sink, - direction: ConnectionDirection::InBound, - permit, + direction: ConnectionDirection::Inbound, + permit: None, }) .await .unwrap(); diff --git a/p2p/p2p-core/tests/sending_receiving.rs b/p2p/p2p-core/tests/sending_receiving.rs index b4c42e2..e035daf 100644 --- a/p2p/p2p-core/tests/sending_receiving.rs +++ b/p2p/p2p-core/tests/sending_receiving.rs @@ -1,27 +1,18 @@ -use std::sync::Arc; - -use tokio::sync::Semaphore; use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; use cuprate_wire::{common::PeerSupportFlags, protocol::GetObjectsRequest, BasicNodeData}; use cuprate_p2p_core::{ - client::{ConnectRequest, Connector, HandShaker}, - network_zones::ClearNet, + client::{handshaker::HandshakerBuilder, ConnectRequest, Connector}, protocol::{PeerRequest, PeerResponse}, + ClearNet, ProtocolRequest, ProtocolResponse, }; use cuprate_test_utils::monerod::monerod; -mod utils; -use utils::*; - #[tokio::test] async fn get_single_block_from_monerod() { - let semaphore = Arc::new(Semaphore::new(10)); - let permit = semaphore.acquire_owned().await.unwrap(); - let monerod = monerod(["--out-peers=0"]).await; let our_basic_node_data = BasicNodeData { @@ -33,14 +24,7 @@ async fn get_single_block_from_monerod() { rpc_credits_per_hash: 0, }; - let handshaker = HandShaker::::new( - DummyAddressBook, - DummyPeerSyncSvc, - DummyCoreSyncSvc, - DummyPeerRequestHandlerSvc, - |_| futures::stream::pending(), - our_basic_node_data, - ); + let handshaker = HandshakerBuilder::::new(our_basic_node_data).build(); let mut connector = Connector::new(handshaker); @@ -50,22 +34,26 @@ async fn get_single_block_from_monerod() { .unwrap() .call(ConnectRequest { addr: monerod.p2p_addr(), - permit, + permit: None, }) .await .unwrap(); - let PeerResponse::GetObjects(obj) = connected_peer + let PeerResponse::Protocol(ProtocolResponse::GetObjects(obj)) = connected_peer .ready() .await .unwrap() - .call(PeerRequest::GetObjects(GetObjectsRequest { - blocks: hex::decode("418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3") + .call(PeerRequest::Protocol(ProtocolRequest::GetObjects( + GetObjectsRequest { + blocks: hex::decode( + "418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3", + ) .unwrap() .try_into() .unwrap(), - pruned: false, - })) + pruned: false, + }, + ))) .await .unwrap() else { diff --git a/p2p/p2p-core/tests/utils.rs b/p2p/p2p-core/tests/utils.rs deleted file mode 100644 index 9587bb5..0000000 --- a/p2p/p2p-core/tests/utils.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, -}; - -use futures::FutureExt; -use tower::Service; - -use cuprate_p2p_core::{ - services::{ - AddressBookRequest, AddressBookResponse, CoreSyncDataRequest, CoreSyncDataResponse, - PeerSyncRequest, PeerSyncResponse, - }, - NetworkZone, PeerRequest, PeerResponse, -}; - -#[derive(Clone)] -pub struct DummyAddressBook; - -impl Service> for DummyAddressBook { - type Response = AddressBookResponse; - type Error = tower::BoxError; - type Future = - Pin> + Send + 'static>>; - - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: AddressBookRequest) -> Self::Future { - async move { - Ok(match req { - AddressBookRequest::GetWhitePeers(_) => AddressBookResponse::Peers(vec![]), - _ => AddressBookResponse::Ok, - }) - } - .boxed() - } -} - -#[derive(Clone)] -pub struct DummyCoreSyncSvc; - -impl Service for DummyCoreSyncSvc { - type Response = CoreSyncDataResponse; - type Error = tower::BoxError; - type Future = - Pin> + Send + 'static>>; - - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, _: CoreSyncDataRequest) -> Self::Future { - async move { - Ok(CoreSyncDataResponse(cuprate_wire::CoreSyncData { - cumulative_difficulty: 1, - cumulative_difficulty_top64: 0, - current_height: 1, - pruning_seed: 0, - top_id: hex::decode( - "418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3", - ) - .unwrap() - .try_into() - .unwrap(), - top_version: 1, - })) - } - .boxed() - } -} - -#[derive(Clone)] -pub struct DummyPeerSyncSvc; - -impl Service> for DummyPeerSyncSvc { - type Error = tower::BoxError; - type Future = - Pin> + Send + 'static>>; - - type Response = PeerSyncResponse; - - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, _: PeerSyncRequest) -> Self::Future { - async { Ok(PeerSyncResponse::Ok) }.boxed() - } -} - -#[derive(Clone)] -pub struct DummyPeerRequestHandlerSvc; - -impl Service for DummyPeerRequestHandlerSvc { - type Response = PeerResponse; - type Error = tower::BoxError; - type Future = - Pin> + Send + 'static>>; - - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, _: PeerRequest) -> Self::Future { - async move { Ok(PeerResponse::NA) }.boxed() - } -} diff --git a/p2p/p2p/Cargo.toml b/p2p/p2p/Cargo.toml index 507d362..e9b03d2 100644 --- a/p2p/p2p/Cargo.toml +++ b/p2p/p2p/Cargo.toml @@ -31,6 +31,7 @@ rand = { workspace = true, features = ["std", "std_rng"] } rand_distr = { workspace = true, features = ["std"] } hex = { workspace = true, features = ["std"] } tracing = { workspace = true, features = ["std", "attributes"] } +borsh = { workspace = true, features = ["derive", "std"] } [dev-dependencies] cuprate-test-utils = { path = "../../test-utils" } diff --git a/p2p/p2p/src/block_downloader/block_queue.rs b/p2p/p2p/src/block_downloader/block_queue.rs index b03d847..d846c22 100644 --- a/p2p/p2p/src/block_downloader/block_queue.rs +++ b/p2p/p2p/src/block_downloader/block_queue.rs @@ -113,11 +113,10 @@ impl BlockQueue { #[cfg(test)] mod tests { - use futures::StreamExt; - use std::{collections::BTreeSet, sync::Arc}; + use std::collections::BTreeSet; + use futures::StreamExt; use proptest::{collection::vec, prelude::*}; - use tokio::sync::Semaphore; use tokio_test::block_on; use cuprate_p2p_core::handles::HandleBuilder; @@ -126,8 +125,7 @@ mod tests { prop_compose! { fn ready_batch_strategy()(start_height in 0_u64..500_000_000) -> ReadyQueueBatch { - // TODO: The permit will not be needed here when - let (_, peer_handle) = HandleBuilder::new().with_permit(Arc::new(Semaphore::new(1)).try_acquire_owned().unwrap()).build(); + let (_, peer_handle) = HandleBuilder::new().build(); ReadyQueueBatch { start_height, diff --git a/p2p/p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs index e9dfcb4..fbf33b1 100644 --- a/p2p/p2p/src/block_downloader/download_batch.rs +++ b/p2p/p2p/src/block_downloader/download_batch.rs @@ -8,7 +8,10 @@ use tracing::instrument; use cuprate_fixed_bytes::ByteArrayVec; use cuprate_helper::asynch::rayon_spawn_async; -use cuprate_p2p_core::{handles::ConnectionHandle, NetworkZone, PeerRequest, PeerResponse}; +use cuprate_p2p_core::{ + handles::ConnectionHandle, NetworkZone, PeerRequest, PeerResponse, ProtocolRequest, + ProtocolResponse, +}; use cuprate_wire::protocol::{GetObjectsRequest, GetObjectsResponse}; use crate::{ @@ -50,16 +53,15 @@ async fn request_batch_from_peer( previous_id: [u8; 32], expected_start_height: u64, ) -> Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError> { - // Request the blocks. + let request = PeerRequest::Protocol(ProtocolRequest::GetObjects(GetObjectsRequest { + blocks: ids.clone(), + pruned: false, + })); + + // Request the blocks and add a timeout to the request let blocks_response = timeout(BLOCK_DOWNLOADER_REQUEST_TIMEOUT, async { - let PeerResponse::GetObjects(blocks_response) = client - .ready() - .await? - .call(PeerRequest::GetObjects(GetObjectsRequest { - blocks: ids.clone(), - pruned: false, - })) - .await? + let PeerResponse::Protocol(ProtocolResponse::GetObjects(blocks_response)) = + client.ready().await?.call(request).await? else { panic!("Connection task returned wrong response."); }; diff --git a/p2p/p2p/src/block_downloader/request_chain.rs b/p2p/p2p/src/block_downloader/request_chain.rs index 471635b..4b0b47e 100644 --- a/p2p/p2p/src/block_downloader/request_chain.rs +++ b/p2p/p2p/src/block_downloader/request_chain.rs @@ -10,7 +10,7 @@ use cuprate_p2p_core::{ client::InternalPeerID, handles::ConnectionHandle, services::{PeerSyncRequest, PeerSyncResponse}, - NetworkZone, PeerRequest, PeerResponse, PeerSyncSvc, + NetworkZone, PeerRequest, PeerResponse, PeerSyncSvc, ProtocolRequest, ProtocolResponse, }; use cuprate_wire::protocol::{ChainRequest, ChainResponse}; @@ -34,13 +34,15 @@ pub async fn request_chain_entry_from_peer( mut client: ClientPoolDropGuard, short_history: [[u8; 32]; 2], ) -> Result<(ClientPoolDropGuard, ChainEntry), BlockDownloadError> { - let PeerResponse::GetChain(chain_res) = client + let PeerResponse::Protocol(ProtocolResponse::GetChain(chain_res)) = client .ready() .await? - .call(PeerRequest::GetChain(ChainRequest { - block_ids: short_history.into(), - prune: true, - })) + .call(PeerRequest::Protocol(ProtocolRequest::GetChain( + ChainRequest { + block_ids: short_history.into(), + prune: true, + }, + ))) .await? else { panic!("Connection task returned wrong response!"); @@ -132,10 +134,10 @@ where let mut futs = JoinSet::new(); - let req = PeerRequest::GetChain(ChainRequest { + let req = PeerRequest::Protocol(ProtocolRequest::GetChain(ChainRequest { block_ids: block_ids.into(), prune: false, - }); + })); tracing::debug!("Sending requests for chain entries."); @@ -149,7 +151,7 @@ where futs.spawn(timeout( BLOCK_DOWNLOADER_REQUEST_TIMEOUT, async move { - let PeerResponse::GetChain(chain_res) = + let PeerResponse::Protocol(ProtocolResponse::GetChain(chain_res)) = next_peer.ready().await?.call(cloned_req).await? else { panic!("connection task returned wrong response!"); diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index bf34272..981c557 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -15,15 +15,15 @@ use monero_serai::{ transaction::{Input, Timelock, Transaction, TransactionPrefix}, }; use proptest::{collection::vec, prelude::*}; -use tokio::{sync::Semaphore, time::timeout}; +use tokio::time::timeout; use tower::{service_fn, Service}; use cuprate_fixed_bytes::ByteArrayVec; use cuprate_p2p_core::{ client::{mock_client, Client, InternalPeerID, PeerInformation}, - network_zones::ClearNet, services::{PeerSyncRequest, PeerSyncResponse}, - ConnectionDirection, NetworkZone, PeerRequest, PeerResponse, + ClearNet, ConnectionDirection, NetworkZone, PeerRequest, PeerResponse, ProtocolRequest, + ProtocolResponse, }; use cuprate_pruning::PruningSeed; use cuprate_wire::{ @@ -182,18 +182,15 @@ prop_compose! { } fn mock_block_downloader_client(blockchain: Arc) -> Client { - let semaphore = Arc::new(Semaphore::new(1)); - - let (connection_guard, connection_handle) = cuprate_p2p_core::handles::HandleBuilder::new() - .with_permit(semaphore.try_acquire_owned().unwrap()) - .build(); + let (connection_guard, connection_handle) = + cuprate_p2p_core::handles::HandleBuilder::new().build(); let request_handler = service_fn(move |req: PeerRequest| { let bc = blockchain.clone(); async move { match req { - PeerRequest::GetChain(chain_req) => { + PeerRequest::Protocol(ProtocolRequest::GetChain(chain_req)) => { let mut i = 0; while !bc.blocks.contains_key(&chain_req.block_ids[i]) { i += 1; @@ -215,18 +212,20 @@ fn mock_block_downloader_client(blockchain: Arc) -> Client>(); - Ok(PeerResponse::GetChain(ChainResponse { - start_height: 0, - total_height: 0, - cumulative_difficulty_low64: 1, - cumulative_difficulty_top64: 0, - m_block_ids: block_ids.into(), - m_block_weights: vec![], - first_block: Default::default(), - })) + Ok(PeerResponse::Protocol(ProtocolResponse::GetChain( + ChainResponse { + start_height: 0, + total_height: 0, + cumulative_difficulty_low64: 1, + cumulative_difficulty_top64: 0, + m_block_ids: block_ids.into(), + m_block_weights: vec![], + first_block: Default::default(), + }, + ))) } - PeerRequest::GetObjects(obj) => { + PeerRequest::Protocol(ProtocolRequest::GetObjects(obj)) => { let mut res = Vec::with_capacity(obj.blocks.len()); for i in 0..obj.blocks.len() { @@ -249,11 +248,13 @@ fn mock_block_downloader_client(blockchain: Arc) -> Client panic!(), } @@ -264,7 +265,7 @@ fn mock_block_downloader_client(blockchain: Arc) -> Client Service> for BroadcastSvc { // An error here means _all_ receivers were dropped which we assume will never happen. let _ = match direction { - Some(ConnectionDirection::InBound) => { + Some(ConnectionDirection::Inbound) => { self.tx_broadcast_channel_inbound.send(nex_tx_info) } - Some(ConnectionDirection::OutBound) => { + Some(ConnectionDirection::Outbound) => { self.tx_broadcast_channel_outbound.send(nex_tx_info) } None => { @@ -428,7 +428,7 @@ mod tests { .unwrap() .call(BroadcastRequest::Transaction { tx_bytes: Bytes::from_static(&[1]), - direction: Some(ConnectionDirection::OutBound), + direction: Some(ConnectionDirection::Outbound), received_from: None, }) .await @@ -440,7 +440,7 @@ mod tests { .unwrap() .call(BroadcastRequest::Transaction { tx_bytes: Bytes::from_static(&[2]), - direction: Some(ConnectionDirection::InBound), + direction: Some(ConnectionDirection::Inbound), received_from: None, }) .await diff --git a/p2p/p2p/src/client_pool.rs b/p2p/p2p/src/client_pool.rs index 711491d..51f57e9 100644 --- a/p2p/p2p/src/client_pool.rs +++ b/p2p/p2p/src/client_pool.rs @@ -9,7 +9,6 @@ //! //! Internally the pool is a [`DashMap`] which means care should be taken in `async` code //! as internally this uses blocking RwLocks. -//! use std::sync::Arc; use dashmap::DashMap; diff --git a/p2p/p2p/src/connection_maintainer.rs b/p2p/p2p/src/connection_maintainer.rs index 8e5c9bc..2bcf270 100644 --- a/p2p/p2p/src/connection_maintainer.rs +++ b/p2p/p2p/src/connection_maintainer.rs @@ -106,10 +106,6 @@ where panic!("No seed nodes available to get peers from"); } - // This isn't really needed here to limit connections as the seed nodes will be dropped when we have got - // peers from them. - let semaphore = Arc::new(Semaphore::new(seeds.len())); - let mut allowed_errors = seeds.len(); let mut handshake_futs = JoinSet::new(); @@ -125,10 +121,7 @@ where .expect("Connector had an error in `poll_ready`") .call(ConnectRequest { addr: *seed, - permit: semaphore - .clone() - .try_acquire_owned() - .expect("This must have enough permits as we just set the amount."), + permit: None, }), ); // Spawn the handshake on a separate task with a timeout, so we don't get stuck connecting to a peer. @@ -157,7 +150,10 @@ where .ready() .await .expect("Connector had an error in `poll_ready`") - .call(ConnectRequest { addr, permit }); + .call(ConnectRequest { + addr, + permit: Some(permit), + }); tokio::spawn( async move { diff --git a/p2p/p2p/src/inbound_server.rs b/p2p/p2p/src/inbound_server.rs index 6bc1e6d..aa971a5 100644 --- a/p2p/p2p/src/inbound_server.rs +++ b/p2p/p2p/src/inbound_server.rs @@ -87,8 +87,8 @@ where addr, peer_stream, peer_sink, - direction: ConnectionDirection::InBound, - permit, + direction: ConnectionDirection::Inbound, + permit: Some(permit), }); let cloned_pool = client_pool.clone(); diff --git a/p2p/p2p/src/lib.rs b/p2p/p2p/src/lib.rs index 95154ec..be18c2a 100644 --- a/p2p/p2p/src/lib.rs +++ b/p2p/p2p/src/lib.rs @@ -4,7 +4,6 @@ //! a certain [`NetworkZone`] use std::sync::Arc; -use cuprate_async_buffer::BufferStream; use futures::FutureExt; use tokio::{ sync::{mpsc, watch}, @@ -14,11 +13,12 @@ use tokio_stream::wrappers::WatchStream; use tower::{buffer::Buffer, util::BoxCloneService, Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; +use cuprate_async_buffer::BufferStream; use cuprate_p2p_core::{ client::Connector, client::InternalPeerID, services::{AddressBookRequest, AddressBookResponse, PeerSyncRequest}, - CoreSyncSvc, NetworkZone, PeerRequestHandler, + CoreSyncSvc, NetworkZone, ProtocolRequestHandler, }; mod block_downloader; @@ -42,17 +42,18 @@ use connection_maintainer::MakeConnectionRequest; /// /// # Usage /// You must provide: -/// - A peer request handler, which is given to each connection +/// - A protocol request handler, which is given to each connection /// - A core sync service, which keeps track of the sync state of our node #[instrument(level = "debug", name = "net", skip_all, fields(zone = N::NAME))] -pub async fn initialize_network( - peer_req_handler: R, +pub async fn initialize_network( + protocol_request_handler: PR, core_sync_svc: CS, config: P2PConfig, ) -> Result, tower::BoxError> where N: NetworkZone, - R: PeerRequestHandler + Clone, + N::Addr: borsh::BorshDeserialize + borsh::BorshSerialize, + PR: ProtocolRequestHandler + Clone, CS: CoreSyncSvc + Clone, { let address_book = @@ -79,23 +80,21 @@ where basic_node_data.peer_id = 1; } - let outbound_handshaker = cuprate_p2p_core::client::HandShaker::new( - address_book.clone(), - sync_states_svc.clone(), - core_sync_svc.clone(), - peer_req_handler.clone(), - outbound_mkr, - basic_node_data.clone(), - ); + let outbound_handshaker_builder = + cuprate_p2p_core::client::HandshakerBuilder::new(basic_node_data) + .with_address_book(address_book.clone()) + .with_peer_sync_svc(sync_states_svc.clone()) + .with_core_sync_svc(core_sync_svc) + .with_protocol_request_handler(protocol_request_handler) + .with_broadcast_stream_maker(outbound_mkr) + .with_connection_parent_span(Span::current()); - let inbound_handshaker = cuprate_p2p_core::client::HandShaker::new( - address_book.clone(), - sync_states_svc.clone(), - core_sync_svc.clone(), - peer_req_handler, - inbound_mkr, - basic_node_data, - ); + let inbound_handshaker = outbound_handshaker_builder + .clone() + .with_broadcast_stream_maker(inbound_mkr) + .build(); + + let outbound_handshaker = outbound_handshaker_builder.build(); let client_pool = client_pool::ClientPool::new(); diff --git a/p2p/p2p/src/sync_states.rs b/p2p/p2p/src/sync_states.rs index 1b4e81a..1484941 100644 --- a/p2p/p2p/src/sync_states.rs +++ b/p2p/p2p/src/sync_states.rs @@ -238,9 +238,6 @@ impl Service> for PeerSyncSvc { #[cfg(test)] mod tests { - use std::sync::Arc; - - use tokio::sync::Semaphore; use tower::{Service, ServiceExt}; use cuprate_p2p_core::{ @@ -255,11 +252,7 @@ mod tests { #[tokio::test] async fn top_sync_channel_updates() { - let semaphore = Arc::new(Semaphore::new(1)); - - let (_g, handle) = HandleBuilder::new() - .with_permit(semaphore.try_acquire_owned().unwrap()) - .build(); + let (_g, handle) = HandleBuilder::new().build(); let (mut svc, mut watch) = PeerSyncSvc::>::new(); @@ -336,11 +329,7 @@ mod tests { #[tokio::test] async fn peer_sync_info_updates() { - let semaphore = Arc::new(Semaphore::new(1)); - - let (_g, handle) = HandleBuilder::new() - .with_permit(semaphore.try_acquire_owned().unwrap()) - .build(); + let (_g, handle) = HandleBuilder::new().build(); let (mut svc, _watch) = PeerSyncSvc::>::new(); From a82c08cc80ed1d3d6b4a0898807665b1e377d51e Mon Sep 17 00:00:00 2001 From: Boog900 Date: Sat, 6 Jul 2024 12:21:46 +0000 Subject: [PATCH 005/104] Storage: fix lifetimes (#215) * fix db lifetimes * fix redb * fix blockchain with redb * add docs --- storage/blockchain/src/free.rs | 14 ++-------- storage/blockchain/src/open_tables.rs | 35 ++++++++++++------------ storage/database/Cargo.toml | 2 +- storage/database/src/backend/heed/env.rs | 22 +++++++-------- storage/database/src/backend/redb/env.rs | 14 ++++++---- storage/database/src/env.rs | 33 ++++++++++++---------- 6 files changed, 59 insertions(+), 61 deletions(-) diff --git a/storage/blockchain/src/free.rs b/storage/blockchain/src/free.rs index 255860a..bcbb897 100644 --- a/storage/blockchain/src/free.rs +++ b/storage/blockchain/src/free.rs @@ -50,20 +50,12 @@ pub fn open(config: Config) -> Result { // we want since it is agnostic, so we are responsible for this. { let env_inner = env.env_inner(); - let tx_rw = env_inner.tx_rw(); - let tx_rw = match tx_rw { - Ok(tx_rw) => tx_rw, - Err(e) => return Err(runtime_to_init_error(e)), - }; + let tx_rw = env_inner.tx_rw().map_err(runtime_to_init_error)?; // Create all tables. - if let Err(e) = OpenTables::create_tables(&env_inner, &tx_rw) { - return Err(runtime_to_init_error(e)); - }; + OpenTables::create_tables(&env_inner, &tx_rw).map_err(runtime_to_init_error)?; - if let Err(e) = tx_rw.commit() { - return Err(runtime_to_init_error(e)); - } + TxRw::commit(tx_rw).map_err(runtime_to_init_error)?; } Ok(env) diff --git a/storage/blockchain/src/open_tables.rs b/storage/blockchain/src/open_tables.rs index 4b265e8..b37d260 100644 --- a/storage/blockchain/src/open_tables.rs +++ b/storage/blockchain/src/open_tables.rs @@ -1,7 +1,7 @@ //! TODO //---------------------------------------------------------------------------------------------------- Import -use cuprate_database::{EnvInner, RuntimeError, TxRo, TxRw}; +use cuprate_database::{EnvInner, RuntimeError}; use crate::tables::{TablesIter, TablesMut}; @@ -84,12 +84,12 @@ pub(crate) use call_fn_on_all_tables_or_early_return; /// let mut tables = env_inner.open_tables_mut(&tx_rw)?; /// # Ok(()) } /// ``` -pub trait OpenTables<'env, Ro, Rw> -where - Self: 'env, - Ro: TxRo<'env>, - Rw: TxRw<'env>, -{ +pub trait OpenTables<'env> { + /// The read-only transaction type of the backend. + type Ro<'a>; + /// The read-write transaction type of the backend. + type Rw<'a>; + /// Open all tables in read/iter mode. /// /// This calls [`EnvInner::open_db_ro`] on all database tables @@ -100,7 +100,7 @@ where /// /// As all tables are created upon [`crate::open`], /// this function will never error because a table doesn't exist. - fn open_tables(&'env self, tx_ro: &Ro) -> Result; + fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result; /// Open all tables in read-write mode. /// @@ -109,7 +109,7 @@ where /// /// # Errors /// This will only return [`RuntimeError::Io`] on errors. - fn open_tables_mut(&'env self, tx_rw: &Rw) -> Result; + fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result; /// Create all database tables. /// @@ -118,28 +118,29 @@ where /// /// # Errors /// This will only return [`RuntimeError::Io`] on errors. - fn create_tables(&'env self, tx_rw: &Rw) -> Result<(), RuntimeError>; + fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError>; } -impl<'env, Ei, Ro, Rw> OpenTables<'env, Ro, Rw> for Ei +impl<'env, Ei> OpenTables<'env> for Ei where - Ei: EnvInner<'env, Ro, Rw>, - Ro: TxRo<'env>, - Rw: TxRw<'env>, + Ei: EnvInner<'env>, { - fn open_tables(&'env self, tx_ro: &Ro) -> Result { + type Ro<'a> = >::Ro<'a>; + type Rw<'a> = >::Rw<'a>; + + fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result { call_fn_on_all_tables_or_early_return! { Self::open_db_ro(self, tx_ro) } } - fn open_tables_mut(&'env self, tx_rw: &Rw) -> Result { + fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result { call_fn_on_all_tables_or_early_return! { Self::open_db_rw(self, tx_rw) } } - fn create_tables(&'env self, tx_rw: &Rw) -> Result<(), RuntimeError> { + fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError> { match call_fn_on_all_tables_or_early_return! { Self::create_db(self, tx_rw) } { diff --git a/storage/database/Cargo.toml b/storage/database/Cargo.toml index 887f1b6..e2dad70 100644 --- a/storage/database/Cargo.toml +++ b/storage/database/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/database" keywords = ["cuprate", "database"] [features] -default = ["heed"] +# default = ["heed"] # default = ["redb"] # default = ["redb-memory"] heed = ["dep:heed"] diff --git a/storage/database/src/backend/heed/env.rs b/storage/database/src/backend/heed/env.rs index 69e3b17..0c2847f 100644 --- a/storage/database/src/backend/heed/env.rs +++ b/storage/database/src/backend/heed/env.rs @@ -244,25 +244,28 @@ impl Env for ConcreteEnv { } //---------------------------------------------------------------------------------------------------- EnvInner Impl -impl<'env> EnvInner<'env, heed::RoTxn<'env>, RefCell>> - for RwLockReadGuard<'env, heed::Env> +impl<'env> EnvInner<'env> for RwLockReadGuard<'env, heed::Env> where Self: 'env, { + type Ro<'a> = heed::RoTxn<'a>; + + type Rw<'a> = RefCell>; + #[inline] - fn tx_ro(&'env self) -> Result, RuntimeError> { + fn tx_ro(&self) -> Result, RuntimeError> { Ok(self.read_txn()?) } #[inline] - fn tx_rw(&'env self) -> Result>, RuntimeError> { + fn tx_rw(&self) -> Result, RuntimeError> { Ok(RefCell::new(self.write_txn()?)) } #[inline] fn open_db_ro( &self, - tx_ro: &heed::RoTxn<'env>, + tx_ro: &Self::Ro<'_>, ) -> Result + DatabaseIter, RuntimeError> { // Open up a read-only database using our table's const metadata. // @@ -280,7 +283,7 @@ where #[inline] fn open_db_rw( &self, - tx_rw: &RefCell>, + tx_rw: &Self::Rw<'_>, ) -> Result, RuntimeError> { // Open up a read/write database using our table's const metadata. // @@ -293,7 +296,7 @@ where }) } - fn create_db(&self, tx_rw: &RefCell>) -> Result<(), RuntimeError> { + fn create_db(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError> { // Create a database using our: // - [`Table`]'s const metadata. // - (potentially) our [`Key`] comparison function @@ -325,10 +328,7 @@ where } #[inline] - fn clear_db( - &self, - tx_rw: &mut RefCell>, - ) -> Result<(), RuntimeError> { + fn clear_db(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError> { let tx_rw = tx_rw.get_mut(); // Open the table. We don't care about flags or key diff --git a/storage/database/src/backend/redb/env.rs b/storage/database/src/backend/redb/env.rs index 65e3e05..4a178ad 100644 --- a/storage/database/src/backend/redb/env.rs +++ b/storage/database/src/backend/redb/env.rs @@ -118,18 +118,20 @@ impl Env for ConcreteEnv { } //---------------------------------------------------------------------------------------------------- EnvInner Impl -impl<'env> EnvInner<'env, redb::ReadTransaction, redb::WriteTransaction> - for (&'env redb::Database, redb::Durability) +impl<'env> EnvInner<'env> for (&'env redb::Database, redb::Durability) where Self: 'env, { + type Ro<'a> = redb::ReadTransaction; + type Rw<'a> = redb::WriteTransaction; + #[inline] - fn tx_ro(&'env self) -> Result { + fn tx_ro(&self) -> Result { Ok(self.0.begin_read()?) } #[inline] - fn tx_rw(&'env self) -> Result { + fn tx_rw(&self) -> Result { // `redb` has sync modes on the TX level, unlike heed, // which sets it at the Environment level. // @@ -142,7 +144,7 @@ where #[inline] fn open_db_ro( &self, - tx_ro: &redb::ReadTransaction, + tx_ro: &Self::Ro<'_>, ) -> Result + DatabaseIter, RuntimeError> { // Open up a read-only database using our `T: Table`'s const metadata. let table: redb::TableDefinition<'static, StorableRedb, StorableRedb> = @@ -154,7 +156,7 @@ where #[inline] fn open_db_rw( &self, - tx_rw: &redb::WriteTransaction, + tx_rw: &Self::Rw<'_>, ) -> Result, RuntimeError> { // Open up a read/write database using our `T: Table`'s const metadata. let table: redb::TableDefinition<'static, StorableRedb, StorableRedb> = diff --git a/storage/database/src/env.rs b/storage/database/src/env.rs index 291ac9d..de094a9 100644 --- a/storage/database/src/env.rs +++ b/storage/database/src/env.rs @@ -62,17 +62,17 @@ pub trait Env: Sized { // For `heed`, this is just `heed::Env`, for `redb` this is // `(redb::Database, redb::Durability)` as each transaction // needs the sync mode set during creation. - type EnvInner<'env>: EnvInner<'env, Self::TxRo<'env>, Self::TxRw<'env>> + type EnvInner<'env>: EnvInner<'env> where Self: 'env; /// The read-only transaction type of the backend. - type TxRo<'env>: TxRo<'env> + 'env + type TxRo<'env>: TxRo<'env> where Self: 'env; /// The read/write transaction type of the backend. - type TxRw<'env>: TxRw<'env> + 'env + type TxRw<'env>: TxRw<'env> where Self: 'env; @@ -209,23 +209,23 @@ Subsequent table opens will follow the flags/ordering, but only if /// /// # Invariant #[doc = doc_heed_create_db_invariant!()] -pub trait EnvInner<'env, Ro, Rw> -where - Self: 'env, - Ro: TxRo<'env>, - Rw: TxRw<'env>, -{ +pub trait EnvInner<'env> { + /// The read-only transaction type of the backend. + type Ro<'a>: TxRo<'a>; + /// The read-write transaction type of the backend. + type Rw<'a>: TxRw<'a>; + /// Create a read-only transaction. /// /// # Errors /// This will only return [`RuntimeError::Io`] if it errors. - fn tx_ro(&'env self) -> Result; + fn tx_ro(&self) -> Result, RuntimeError>; /// Create a read/write transaction. /// /// # Errors /// This will only return [`RuntimeError::Io`] if it errors. - fn tx_rw(&'env self) -> Result; + fn tx_rw(&self) -> Result, RuntimeError>; /// Open a database in read-only mode. /// @@ -252,7 +252,7 @@ where #[doc = doc_heed_create_db_invariant!()] fn open_db_ro( &self, - tx_ro: &Ro, + tx_ro: &Self::Ro<'_>, ) -> Result + DatabaseIter, RuntimeError>; /// Open a database in read/write mode. @@ -271,7 +271,10 @@ where /// /// # Invariant #[doc = doc_heed_create_db_invariant!()] - fn open_db_rw(&self, tx_rw: &Rw) -> Result, RuntimeError>; + fn open_db_rw( + &self, + tx_rw: &Self::Rw<'_>, + ) -> Result, RuntimeError>; /// Create a database table. /// @@ -282,7 +285,7 @@ where /// /// # Invariant #[doc = doc_heed_create_db_invariant!()] - fn create_db(&self, tx_rw: &Rw) -> Result<(), RuntimeError>; + fn create_db(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError>; /// Clear all `(key, value)`'s from a database table. /// @@ -297,5 +300,5 @@ where /// /// If the specified table is not created upon before this function is called, /// this will return [`RuntimeError::TableNotFound`]. - fn clear_db(&self, tx_rw: &mut Rw) -> Result<(), RuntimeError>; + fn clear_db(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError>; } From 136abf7edda052dff8fb777efb6f51d70b029759 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Tue, 9 Jul 2024 17:58:02 -0400 Subject: [PATCH 006/104] rpc: feature flags, macro changes, misc setup (#218) * `serde/epee` feature flags * modify type generator macros * add `defaults.rs` * add `free.rs` * add `misc` module * modify `base.rs`, `contants.rs` * remove `binary_string.rs`, `status.rs` * fix macro usage * base: re-add `AccessRequestBase` * fix default functions * tx_entry: fix link --- rpc/types/Cargo.toml | 8 +- rpc/types/README.md | 49 +- rpc/types/src/base.rs | 76 +-- rpc/types/src/constants.rs | 22 +- rpc/types/src/defaults.rs | 70 +++ rpc/types/src/free.rs | 18 + rpc/types/src/json.rs | 118 ++-- rpc/types/src/lib.rs | 36 +- rpc/types/src/macros.rs | 413 ++++++++------ rpc/types/src/{ => misc}/binary_string.rs | 6 +- rpc/types/src/misc/block_complete_entry.rs | 37 ++ rpc/types/src/misc/key_image_spent_status.rs | 48 ++ rpc/types/src/misc/misc.rs | 539 +++++++++++++++++++ rpc/types/src/misc/mod.rs | 34 ++ rpc/types/src/misc/pool_info_extent.rs | 49 ++ rpc/types/src/{ => misc}/status.rs | 29 +- rpc/types/src/misc/tx_entry.rs | 59 ++ rpc/types/src/other.rs | 12 +- 18 files changed, 1259 insertions(+), 364 deletions(-) create mode 100644 rpc/types/src/defaults.rs create mode 100644 rpc/types/src/free.rs rename rpc/types/src/{ => misc}/binary_string.rs (80%) create mode 100644 rpc/types/src/misc/block_complete_entry.rs create mode 100644 rpc/types/src/misc/key_image_spent_status.rs create mode 100644 rpc/types/src/misc/misc.rs create mode 100644 rpc/types/src/misc/mod.rs create mode 100644 rpc/types/src/misc/pool_info_extent.rs rename rpc/types/src/{ => misc}/status.rs (88%) create mode 100644 rpc/types/src/misc/tx_entry.rs diff --git a/rpc/types/Cargo.toml b/rpc/types/Cargo.toml index 30e4aa9..c088e4d 100644 --- a/rpc/types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -9,14 +9,16 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/types" keywords = ["cuprate", "rpc", "types", "monero"] [features] -default = [] +default = ["serde", "epee"] +serde = ["dep:serde"] +epee = ["dep:cuprate-epee-encoding"] [dependencies] -cuprate-epee-encoding = { path = "../../net/epee-encoding" } +cuprate-epee-encoding = { path = "../../net/epee-encoding", optional = true } monero-serai = { workspace = true } paste = { workspace = true } -serde = { workspace = true } +serde = { workspace = true, optional = true } [dev-dependencies] serde_json = { workspace = true } diff --git a/rpc/types/README.md b/rpc/types/README.md index 65b6d90..21905fa 100644 --- a/rpc/types/README.md +++ b/rpc/types/README.md @@ -10,13 +10,14 @@ This crate ports the types used in Monero's RPC interface, including: # Modules This crate's types are split in the following manner: -This crate has 4 modules: -- The root module; `cuprate_rpc_types` -- [`json`] module; JSON types from the `/json_rpc` endpoint -- [`bin`] module; Binary types from the binary endpoints -- [`other`] module; Misc JSON types from other endpoints - -Miscellaneous types are found in the root module, e.g. [`crate::Status`]. +| Module | Purpose | +|--------|---------| +| The root module | Miscellaneous items, e.g. constants. +| [`json`] | Contains JSON request/response (some mixed with binary) that all share the common `/json_rpc` endpoint. | +| [`bin`] | Contains request/response types that are expected to be fully in binary (`cuprate_epee_encoding`) in `monerod` and `cuprated`'s RPC interface. These are called at a custom endpoint instead of `/json_rpc`, e.g. `/get_blocks.bin`. | +| [`other`] | Contains request/response types that are JSON, but aren't called at `/json_rpc` (e.g. [`crate::other::GetHeightRequest`]). | +| [`misc`] | Contains miscellaneous types, e.g. [`crate::misc::Status`]. Many of types here are found and used in request/response types, for example, [`crate::misc::BlockHeader`] is used in [`crate::json::GetLastBlockHeaderResponse`]. | +| [`base`] | Contains base types flattened into many request/response types. Each type in `{json,bin,other}` come in pairs and have identical names, but are suffixed with either `Request` or `Response`. e.g. [`GetBlockCountRequest`](crate::json::GetBlockCountRequest) & [`GetBlockCountResponse`](crate::json::GetBlockCountResponse). @@ -30,23 +31,21 @@ However, each type will document: # Naming The naming for types within `{json,bin,other}` follow the following scheme: -- Convert the endpoint or method name into `UpperCamelCase` -- Remove any suffix extension +1. Convert the endpoint or method name into `UpperCamelCase` +1. Remove any suffix extension +1. Add `Request/Response` suffix For example: | Endpoint/method | Crate location and name | |-----------------|-------------------------| | [`get_block_count`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_block_count) | [`json::GetBlockCountRequest`] & [`json::GetBlockCountResponse`] -| [`/get_blocks.bin`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_blockbin) | `bin::GetBlocksRequest` & `bin::GetBlocksResponse` -| [`/get_height`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_height) | `other::GetHeightRequest` & `other::GetHeightResponse` - -TODO: fix doc links when types are ready. +| [`/get_blocks.bin`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_blockbin) | [`bin::GetBlocksRequest`] & [`bin::GetBlocksResponse`] +| [`/get_height`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_height) | [`other::GetHeightRequest`] & [`other::GetHeightResponse`] # Mixed types -Note that some types within [`other`] mix JSON & binary together, i.e., -the message overall is JSON, however some fields contain binary -values inside JSON strings, for example: +Note that some types mix JSON & binary together, i.e., the message overall is JSON, +however some fields contain binary values inside JSON strings, for example: ```json { @@ -57,6 +56,20 @@ values inside JSON strings, for example: } ``` -`binary` here is (de)serialized as a normal [`String`]. In order to be clear on which fields contain binary data, the struct fields that have them will use [`crate::BinaryString`] instead of [`String`]. +`binary` here is (de)serialized as a normal [`String`]. In order to be clear on which fields contain binary data, the struct fields that have them will use [`crate::misc::BinaryString`] instead of [`String`]. -TODO: list the specific types. \ No newline at end of file +These mixed types are: +- [`crate::json::GetTransactionPoolBacklogResponse`] +- [`crate::json::GetOutputDistributionResponse`] + +TODO: we need to figure out a type that (de)serializes correctly, `String` errors with `serde_json` + +# Feature flags +List of feature flags for `cuprate-rpc-types`. + +All are enabled by default. + +| Feature flag | Does what | +|--------------|-----------| +| `serde` | Implements `serde` on all types +| `epee` | Implements `cuprate_epee_encoding` on all types \ No newline at end of file diff --git a/rpc/types/src/base.rs b/rpc/types/src/base.rs index 6a29367..f13ac40 100644 --- a/rpc/types/src/base.rs +++ b/rpc/types/src/base.rs @@ -10,76 +10,44 @@ //! - //! - //! - +//! +//! Note that this library doesn't use [`AccessRequestBase`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L114-L122) found in `monerod` +//! as the type is practically deprecated. +//! +//! Although, [`AccessResponseBase`] still exists as to allow +//! outputting the same JSON fields as `monerod` (even if deprecated). //---------------------------------------------------------------------------------------------------- Import +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "epee")] use cuprate_epee_encoding::epee_object; -use crate::Status; - -//---------------------------------------------------------------------------------------------------- Macro -/// Link the original `monerod` definition for RPC base types. -macro_rules! monero_rpc_base_link { - ($start:literal..=$end:literal) => { - concat!( - "[Definition](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L", - stringify!($start), - "-L", - stringify!($end), - ")." - ) - }; -} +use crate::{macros::monero_definition_link, misc::Status}; //---------------------------------------------------------------------------------------------------- Requests -/// The most common base for responses (nothing). -/// -#[doc = monero_rpc_base_link!(95..=99)] -#[derive( - Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, -)] -pub struct EmptyRequestBase; - -cuprate_epee_encoding::epee_object! { - EmptyRequestBase, -} - /// A base for RPC request types that support RPC payment. /// -#[doc = monero_rpc_base_link!(114..=122)] -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "rpc/core_rpc_server_commands_defs.h", 114..=122)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct AccessRequestBase { /// The RPC payment client. pub client: String, } -cuprate_epee_encoding::epee_object! { +#[cfg(feature = "epee")] +epee_object! { AccessRequestBase, client: String, } //---------------------------------------------------------------------------------------------------- Responses -/// An empty response base. -/// -/// This is for response types that do not contain -/// any extra fields, e.g. TODO. -// [`CalcPowResponse`](crate::json::CalcPowResponse). -#[derive( - Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, -)] -pub struct EmptyResponseBase; - -cuprate_epee_encoding::epee_object! { - EmptyResponseBase, -} - +#[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "rpc/core_rpc_server_commands_defs.h", 101..=112)] /// The most common base for responses. -/// -#[doc = monero_rpc_base_link!(101..=112)] -#[derive( - Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, -)] +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ResponseBase { /// General RPC error code. [`Status::Ok`] means everything looks good. pub status: Status, @@ -89,19 +57,20 @@ pub struct ResponseBase { pub untrusted: bool, } +#[cfg(feature = "epee")] epee_object! { ResponseBase, status: Status, untrusted: bool, } +#[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "rpc/core_rpc_server_commands_defs.h", 124..=136)] /// A base for RPC response types that support RPC payment. -/// -#[doc = monero_rpc_base_link!(124..=136)] -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct AccessResponseBase { /// A flattened [`ResponseBase`]. - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] pub response_base: ResponseBase, /// If payment for RPC is enabled, the number of credits /// available to the requesting client. Otherwise, `0`. @@ -111,6 +80,7 @@ pub struct AccessResponseBase { pub top_hash: String, } +#[cfg(feature = "epee")] epee_object! { AccessResponseBase, credits: u64, diff --git a/rpc/types/src/constants.rs b/rpc/types/src/constants.rs index 2d5266f..e580283 100644 --- a/rpc/types/src/constants.rs +++ b/rpc/types/src/constants.rs @@ -15,6 +15,7 @@ // What this means for Cuprate: just follow `monerod`. //---------------------------------------------------------------------------------------------------- Import +use crate::macros::monero_definition_link; //---------------------------------------------------------------------------------------------------- Status // Common RPC status strings: @@ -23,39 +24,32 @@ // Note that these are _distinct_ from the ones in ZMQ: // . -/// +#[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "/rpc/core_rpc_server_commands_defs.h", 78)] pub const CORE_RPC_STATUS_OK: &str = "OK"; -/// +#[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "/rpc/core_rpc_server_commands_defs.h", 79)] pub const CORE_RPC_STATUS_BUSY: &str = "BUSY"; -/// +#[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "/rpc/core_rpc_server_commands_defs.h", 80)] pub const CORE_RPC_STATUS_NOT_MINING: &str = "NOT MINING"; -/// +#[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "/rpc/core_rpc_server_commands_defs.h", 81)] pub const CORE_RPC_STATUS_PAYMENT_REQUIRED: &str = "PAYMENT REQUIRED"; /// Custom `CORE_RPC_STATUS` for usage in Cuprate. pub const CORE_RPC_STATUS_UNKNOWN: &str = "UNKNOWN"; //---------------------------------------------------------------------------------------------------- Versions +#[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "/rpc/core_rpc_server_commands_defs.h", 90)] /// RPC major version. -/// -/// See: . pub const CORE_RPC_VERSION_MAJOR: u32 = 3; +#[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "/rpc/core_rpc_server_commands_defs.h", 91)] /// RPC miror version. -/// -/// See: . pub const CORE_RPC_VERSION_MINOR: u32 = 14; +#[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "/rpc/core_rpc_server_commands_defs.h", 92..=93)] /// RPC version. -/// -/// See: . -/// -/// ```rust -/// assert_eq!(cuprate_rpc_types::CORE_RPC_VERSION, 196_622); -/// ``` pub const CORE_RPC_VERSION: u32 = (CORE_RPC_VERSION_MAJOR << 16) | CORE_RPC_VERSION_MINOR; //---------------------------------------------------------------------------------------------------- Tests diff --git a/rpc/types/src/defaults.rs b/rpc/types/src/defaults.rs new file mode 100644 index 0000000..9366a26 --- /dev/null +++ b/rpc/types/src/defaults.rs @@ -0,0 +1,70 @@ +//! These functions define the default values +//! of optional fields in request/response types. +//! +//! For example, [`crate::json::GetBlockRequest`] +//! has a [`crate::json::GetBlockRequest::height`] +//! field and a [`crate::json::GetBlockRequest::hash`] +//! field, when the RPC interface reads JSON without +//! `height`, it will use [`default_height`] to fill that in. + +//---------------------------------------------------------------------------------------------------- Import +use std::borrow::Cow; + +//---------------------------------------------------------------------------------------------------- TODO +/// Default [`bool`] type used in request/response types, `false`. +#[inline] +pub(crate) const fn default_false() -> bool { + false +} + +/// Default [`bool`] type used in _some_ request/response types, `true`. +#[inline] +pub(crate) const fn default_true() -> bool { + true +} + +/// Default `Cow<'static, str` type used in request/response types. +#[inline] +pub(crate) const fn default_cow_str() -> Cow<'static, str> { + Cow::Borrowed("") +} + +/// Default [`String`] type used in request/response types. +#[inline] +pub(crate) const fn default_string() -> String { + String::new() +} + +/// Default block height used in request/response types. +#[inline] +pub(crate) const fn default_height() -> u64 { + 0 +} + +/// Default [`Vec`] used in request/response types. +#[inline] +pub(crate) const fn default_vec() -> Vec { + Vec::new() +} + +/// Default `0` value used in request/response types. +#[inline] +pub(crate) fn default_zero>() -> T { + T::from(0) +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + use super::*; + + /// Tests that [`default_zero`] returns `0` on all unsigned numbers. + #[test] + fn zero() { + assert_eq!(default_zero::(), 0); + assert_eq!(default_zero::(), 0); + assert_eq!(default_zero::(), 0); + assert_eq!(default_zero::(), 0); + assert_eq!(default_zero::(), 0); + } +} diff --git a/rpc/types/src/free.rs b/rpc/types/src/free.rs new file mode 100644 index 0000000..043a520 --- /dev/null +++ b/rpc/types/src/free.rs @@ -0,0 +1,18 @@ +//! Free functions. + +//---------------------------------------------------------------------------------------------------- Serde +// These are functions used for conditionally (de)serialization. + +/// Returns `true` if the input `u` is equal to `0`. +#[inline] +#[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&` +pub(crate) const fn is_zero(u: &u64) -> bool { + *u == 0 +} + +/// Returns `true` the input `u` is equal to `1`. +#[inline] +#[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&` +pub(crate) const fn is_one(u: &u64) -> bool { + *u == 1 +} diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index 5f5f8ff..c258d59 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -3,10 +3,7 @@ //! . //---------------------------------------------------------------------------------------------------- Import -use crate::{ - base::{EmptyRequestBase, EmptyResponseBase, ResponseBase}, - macros::define_request_and_response, -}; +use crate::{base::ResponseBase, macros::define_request_and_response}; //---------------------------------------------------------------------------------------------------- Struct definitions // This generates 2 structs: @@ -26,38 +23,43 @@ define_request_and_response! { // The base type name. GetBlockTemplate, - // The base request type. + // The request type. // - // This must be a type found in [`crate::base`]. - // It acts as a "base" that gets flattened into - // the actually request type. + // If `Request {/* fields */}` is provided, a struct is generate as-is. // - // "Flatten" means the field(s) of a struct gets inlined - // directly into the struct during (de)serialization, see: - // . - // - // For example here, we're using [`crate::base::EmptyRequestBase`], - // which means that there is no extra fields flattened. - // - // If a request is not specified here, it will create a `type alias YOUR_REQUEST_TYPE = ()` + // If `Request {}` is specified here, it will create a `pub type YOUR_REQUEST_TYPE = ()` // instead of a `struct`, see below in other macro definitions for an example. - EmptyRequestBase { - reserve_size: u64, - wallet_address: String, - prev_block: String, - extra_nonce: String, - }, - - // The base response type. - // - // This is the same as the request base type, - // it must be a type found in [`crate::base`]. // // If there are any additional attributes (`/// docs` or `#[derive]`s) // for the struct, they go here, e.g.: // #[derive(Copy)] + Request { + // Within the `{}` is an infinite matching pattern of: + // ``` + // $ATTRIBUTES + // $FIELD_NAME: $FIELD_TYPE, + // ``` + // The struct generated and all fields are `pub`. + extra_nonce: String, + prev_block: String, + reserve_size: u64, + wallet_address: String, + }, + + // The response type. + // + // If `Response {/* fields */}` is used, + // this will generate a struct as-is. + // + // If a type found in [`crate::base`] is used, + // It acts as a "base" that gets flattened into + // the actual request type. + // + // "Flatten" means the field(s) of a struct gets inlined + // directly into the struct during (de)serialization, see: + // . ResponseBase { - // This is using `crate::base::ResponseBase`, + // This is using [`crate::base::ResponseBase`], // so the type we generate will contain this field: // ``` // base: crate::base::ResponseBase, @@ -69,56 +71,18 @@ define_request_and_response! { // status: crate::Status, // untrusted: bool, // ``` - - // Within the `{}` is an infinite matching pattern of: - // ``` - // $ATTRIBUTES - // $FIELD_NAME: $FIELD_TYPE, - // ``` - // The struct generated and all fields are `pub`. - difficulty: u64, - wide_difficulty: String, - difficulty_top64: u64, - height: u64, - reserved_offset: u64, - expected_reward: u64, - prev_hash: String, - seed_height: u64, - seed_hash: String, - next_seed_hash: String, - blocktemplate_blob: String, blockhashing_blob: String, - } -} - -define_request_and_response! { - get_block_count, - cc73fe71162d564ffda8e549b79a350bca53c454 => - core_rpc_server_commands_defs.h => 919..=933, - GetBlockCount, - - // There is no request type specified, - // this will cause the macro to generate a - // type alias to `()` instead of a `struct`. - - ResponseBase { - count: u64, - } -} - -define_request_and_response! { - on_get_block_hash, - cc73fe71162d564ffda8e549b79a350bca53c454 => - core_rpc_server_commands_defs.h => 935..=939, - OnGetBlockHash, - #[derive(Copy)] - EmptyRequestBase { - #[serde(flatten)] - block_height: u64, - }, - EmptyResponseBase { - #[serde(flatten)] - block_hash: String, + blocktemplate_blob: String, + difficulty_top64: u64, + difficulty: u64, + expected_reward: u64, + height: u64, + next_seed_hash: String, + prev_hash: String, + reserved_offset: u64, + seed_hash: String, + seed_height: u64, + wide_difficulty: String, } } diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index 780208b..45cca69 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -1,4 +1,5 @@ #![doc = include_str!("../README.md")] +#![cfg_attr(docsrs, feature(doc_cfg))] //---------------------------------------------------------------------------------------------------- Lints // Forbid lints. // Our code, and code generated (e.g macros) cannot overrule these. @@ -13,7 +14,6 @@ unused_allocation, coherence_leak_check, while_true, - clippy::missing_docs_in_private_items, // Maybe can be put into `#[deny]`. unconditional_recursion, @@ -82,7 +82,15 @@ clippy::option_if_let_else, )] // Allow some lints when running in debug mode. -#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))] +#![cfg_attr( + debug_assertions, + allow( + clippy::todo, + clippy::multiple_crate_versions, + unused_imports, + unused_variables + ) +)] // Allow some lints in tests. #![cfg_attr( test, @@ -94,23 +102,25 @@ ) )] // TODO: remove me after finishing impl -#![allow(dead_code)] +#![allow( + dead_code, + rustdoc::broken_intra_doc_links // TODO: remove after `{bin,json,other}.rs` gets merged +)] -//---------------------------------------------------------------------------------------------------- Use -mod binary_string; +//---------------------------------------------------------------------------------------------------- Mod mod constants; +mod defaults; +mod free; mod macros; -mod status; -pub use binary_string::BinaryString; +pub mod base; +pub mod bin; +pub mod json; +pub mod misc; +pub mod other; + pub use constants::{ CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_STATUS_UNKNOWN, CORE_RPC_VERSION, CORE_RPC_VERSION_MAJOR, CORE_RPC_VERSION_MINOR, }; -pub use status::Status; - -pub mod base; -pub mod bin; -pub mod json; -pub mod other; diff --git a/rpc/types/src/macros.rs b/rpc/types/src/macros.rs index 2728800..31bc6be 100644 --- a/rpc/types/src/macros.rs +++ b/rpc/types/src/macros.rs @@ -1,14 +1,12 @@ //! Macros. -//---------------------------------------------------------------------------------------------------- Struct definition -/// A template for generating 2 `struct`s with a bunch of information filled out. -/// -/// These are the RPC request and response `struct`s. +//---------------------------------------------------------------------------------------------------- define_request_and_response +/// A template for generating the RPC request and response `struct`s. /// /// These `struct`s automatically implement: /// - `Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash` /// - `serde::{Serialize, Deserialize}` -/// - `epee_encoding::EpeeObject` +/// - `cuprate_epee_encoding::EpeeObject` /// /// It's best to see the output of this macro via the documentation /// of the generated structs via `cargo doc`s to see which parts @@ -17,110 +15,35 @@ /// See the [`crate::json`] module for example usage. /// /// # Macro internals -/// This macro has 2 branches with almost the same output: -/// 1. An empty `Request` type -/// 2. An `Request` type with fields +/// This macro uses: +/// - [`__define_request`] +/// - [`__define_response`] +/// - [`__define_request_and_response_doc`] /// -/// The first branch is the same as the second with the exception -/// that if the caller of this macro provides no fields, it will -/// generate: +/// # `__define_request` +/// This macro has 2 branches. If the caller provides +/// `Request {}`, i.e. no fields, it will generate: /// ``` /// pub type Request = (); /// ``` -/// instead of: +/// If they _did_ specify fields, it will generate: /// ``` /// pub struct Request {/* fields */} /// ``` -/// /// This is because having a bunch of types that are all empty structs /// means they are not compatible and it makes it cumbersome for end-users. /// Really, they semantically are empty types, so `()` is used. /// -/// Again, other than this, the 2 branches do (should) not differ. +/// # `__define_response` +/// This macro has 2 branches. If the caller provides `Response` +/// it will generate a normal struct with no additional fields. /// -/// FIXME: there's probably a less painful way to branch here on input -/// without having to duplicate 80% of the macro. Sub-macros were attempted -/// but they ended up unreadable. So for now, make sure to fix the other -/// branch as well when making changes. The only de-duplicated part is -/// the doc generation with [`define_request_and_response_doc`]. +/// If the caller provides a base type from [`crate::base`], it will +/// flatten that into the request type automatically. +/// +/// E.g. `Response {/*...*/}` and `ResponseBase {/*...*/}` +/// would trigger the different branches. macro_rules! define_request_and_response { - //------------------------------------------------------------------------------ - // This version of the macro expects a `Request` type with no fields, i.e. `Request {}`. - ( - // The markdown tag for Monero RPC documentation. Not necessarily the endpoint. - $monero_daemon_rpc_doc_link:ident, - - // The commit hash and `$file.$extension` in which this type is defined in - // the Monero codebase in the `rpc/` directory, followed by the specific lines. - $monero_code_commit:ident => - $monero_code_filename:ident. - $monero_code_filename_extension:ident => - $monero_code_line_start:literal..= - $monero_code_line_end:literal, - - // The base `struct` name. - $type_name:ident, - - // The response type (and any doc comments, derives, etc). - $( #[$response_type_attr:meta] )* - $response_base_type:ty { - // And any fields. - $( - $( #[$response_field_attr:meta] )* - $response_field:ident: $response_field_type:ty, - )* - } - ) => { paste::paste! { - #[doc = $crate::macros::define_request_and_response_doc!( - "response", - $monero_daemon_rpc_doc_link, - $monero_code_commit, - $monero_code_filename, - $monero_code_filename_extension, - $monero_code_line_start, - $monero_code_line_end, - [<$type_name Request>], - )] - /// - /// This request has no inputs. - pub type [<$type_name Request>] = (); - - #[allow(dead_code)] - #[allow(missing_docs)] - #[derive(serde::Serialize, serde::Deserialize)] - #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] - $( #[$response_type_attr] )* - #[doc = $crate::macros::define_request_and_response_doc!( - "request", - $monero_daemon_rpc_doc_link, - $monero_code_commit, - $monero_code_filename, - $monero_code_filename_extension, - $monero_code_line_start, - $monero_code_line_end, - [<$type_name Response>], - )] - pub struct [<$type_name Response>] { - #[serde(flatten)] - pub base: $response_base_type, - - $( - $( #[$response_field_attr] )* - pub $response_field: $response_field_type, - )* - } - - ::cuprate_epee_encoding::epee_object! { - [<$type_name Response>], - $( - $response_field: $response_field_type, - )* - !flatten: base: $response_base_type, - } - }}; - - //------------------------------------------------------------------------------ - // This version of the macro expects a `Request` type with fields. ( // The markdown tag for Monero RPC documentation. Not necessarily the endpoint. $monero_daemon_rpc_doc_link:ident, @@ -134,15 +57,18 @@ macro_rules! define_request_and_response { $monero_code_line_end:literal, // The base `struct` name. + // Attributes added here will apply to _both_ + // request and response types. + $( #[$type_attr:meta] )* $type_name:ident, // The request type (and any doc comments, derives, etc). $( #[$request_type_attr:meta] )* - $request_base_type:ty { + Request { // And any fields. $( $( #[$request_field_attr:meta] )* - $request_field:ident: $request_field_type:ty, + $request_field:ident: $request_field_type:ty $(= $request_field_type_default:expr)?, )* }, @@ -152,84 +78,213 @@ macro_rules! define_request_and_response { // And any fields. $( $( #[$response_field_attr:meta] )* - $response_field:ident: $response_field_type:ty, + $response_field:ident: $response_field_type:ty $(= $response_field_type_default:expr)?, )* } ) => { paste::paste! { - #[allow(dead_code)] - #[allow(missing_docs)] - #[derive(serde::Serialize, serde::Deserialize)] - #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] - $( #[$request_type_attr] )* - #[doc = $crate::macros::define_request_and_response_doc!( - "response", - $monero_daemon_rpc_doc_link, - $monero_code_commit, - $monero_code_filename, - $monero_code_filename_extension, - $monero_code_line_start, - $monero_code_line_end, - [<$type_name Request>], - )] - pub struct [<$type_name Request>] { - #[serde(flatten)] - pub base: $request_base_type, - - $( - $( #[$request_field_attr] )* - pub $request_field: $request_field_type, - )* + $crate::macros::__define_request! { + #[doc = $crate::macros::__define_request_and_response_doc!( + "response" => [<$type_name Response>], + $monero_daemon_rpc_doc_link, + $monero_code_commit, + $monero_code_filename, + $monero_code_filename_extension, + $monero_code_line_start, + $monero_code_line_end, + )] + /// + $( #[$type_attr] )* + /// + $( #[$request_type_attr] )* + [<$type_name Request>] { + $( + $( #[$request_field_attr] )* + $request_field: $request_field_type $(= $request_field_type_default)?, + )* + } } - ::cuprate_epee_encoding::epee_object! { - [<$type_name Request>], - $( - $request_field: $request_field_type, - )* - !flatten: base: $request_base_type, - } - - #[allow(dead_code)] - #[allow(missing_docs)] - #[derive(serde::Serialize, serde::Deserialize)] - #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] - $( #[$response_type_attr] )* - #[doc = $crate::macros::define_request_and_response_doc!( - "request", - $monero_daemon_rpc_doc_link, - $monero_code_commit, - $monero_code_filename, - $monero_code_filename_extension, - $monero_code_line_start, - $monero_code_line_end, - [<$type_name Response>], - )] - pub struct [<$type_name Response>] { - #[serde(flatten)] - pub base: $response_base_type, - - $( - $( #[$response_field_attr] )* - pub $response_field: $response_field_type, - )* - } - - ::cuprate_epee_encoding::epee_object! { - [<$type_name Response>], - $( - $response_field: $response_field_type, - )* - !flatten: base: $response_base_type, + $crate::macros::__define_response! { + #[allow(dead_code)] + #[allow(missing_docs)] + #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] + #[doc = $crate::macros::__define_request_and_response_doc!( + "request" => [<$type_name Request>], + $monero_daemon_rpc_doc_link, + $monero_code_commit, + $monero_code_filename, + $monero_code_filename_extension, + $monero_code_line_start, + $monero_code_line_end, + )] + /// + $( #[$type_attr] )* + /// + $( #[$response_type_attr] )* + $response_base_type => [<$type_name Response>] { + $( + $( #[$response_field_attr] )* + $response_field: $response_field_type $(= $response_field_type_default)?, + )* + } } }}; } pub(crate) use define_request_and_response; +//---------------------------------------------------------------------------------------------------- define_request +/// Define a request type. +/// +/// This is only used in [`define_request_and_response`], see it for docs. +/// +/// `__` is used to notate that this shouldn't be called directly. +macro_rules! __define_request { + //------------------------------------------------------------------------------ + // This branch will generate a type alias to `()` if only given `{}` as input. + ( + // Any doc comments, derives, etc. + $( #[$attr:meta] )* + // The response type. + $t:ident {} + ) => { + $( #[$attr] )* + /// + /// This request has no inputs. + pub type $t = (); + }; + + //------------------------------------------------------------------------------ + // This branch of the macro expects fields within the `{}`, + // and will generate a `struct` + ( + // Any doc comments, derives, etc. + $( #[$attr:meta] )* + // The response type. + $t:ident { + // And any fields. + $( + $( #[$field_attr:meta] )* // field attributes + // field_name: FieldType + $field:ident: $field_type:ty $(= $field_default:expr)?, + // The $field_default is an optional extra token that represents + // a default value to pass to [`cuprate_epee_encoding::epee_object`], + // see it for usage. + )* + } + ) => { + #[allow(dead_code, missing_docs)] + #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] + $( #[$attr] )* + pub struct $t { + $( + $( #[$field_attr] )* + pub $field: $field_type, + )* + } + + #[cfg(feature = "epee")] + ::cuprate_epee_encoding::epee_object! { + $t, + $( + $field: $field_type $(= $field_default)?, + )* + } + }; +} +pub(crate) use __define_request; + +//---------------------------------------------------------------------------------------------------- define_response +/// Define a response type. +/// +/// This is only used in [`define_request_and_response`], see it for docs. +/// +/// `__` is used to notate that this shouldn't be called directly. +macro_rules! __define_response { + //------------------------------------------------------------------------------ + // This version of the macro expects the literal ident + // `Response` => $response_type_name. + // + // It will create a `struct` that _doesn't_ use a base from [`crate::base`], + // for example, [`crate::json::BannedResponse`] doesn't use a base, so it + // uses this branch. + ( + // Any doc comments, derives, etc. + $( #[$attr:meta] )* + // The response type. + Response => $t:ident { + // And any fields. + // See [`__define_request`] for docs, this does the same thing. + $( + $( #[$field_attr:meta] )* + $field:ident: $field_type:ty $(= $field_default:expr)?, + )* + } + ) => { + $( #[$attr] )* + pub struct $t { + $( + $( #[$field_attr] )* + pub $field: $field_type, + )* + } + + #[cfg(feature = "epee")] + ::cuprate_epee_encoding::epee_object! { + $t, + $( + $field: $field_type $($field_default)?, + )* + } + }; + + //------------------------------------------------------------------------------ + // This version of the macro expects a `Request` base type from [`crate::bases`]. + ( + // Any doc comments, derives, etc. + $( #[$attr:meta] )* + // The response base type => actual name of the struct + $base:ty => $t:ident { + // And any fields. + // See [`__define_request`] for docs, this does the same thing. + $( + $( #[$field_attr:meta] )* + $field:ident: $field_type:ty $(= $field_default:expr)?, + )* + } + ) => { + $( #[$attr] )* + pub struct $t { + #[cfg_attr(feature = "serde", serde(flatten))] + pub base: $base, + + $( + $( #[$field_attr] )* + pub $field: $field_type, + )* + } + + #[cfg(feature = "epee")] + ::cuprate_epee_encoding::epee_object! { + $t, + $( + $field: $field_type $(= $field_default)?, + )* + !flatten: base: $base, + } + }; +} +pub(crate) use __define_response; + +//---------------------------------------------------------------------------------------------------- define_request_and_response_doc /// Generate documentation for the types generated -/// by the [`define_request_and_response`] macro. +/// by the [`__define_request_and_response`] macro. /// /// See it for more info on inputs. -macro_rules! define_request_and_response_doc { +/// +/// `__` is used to notate that this shouldn't be called directly. +macro_rules! __define_request_and_response_doc { ( // This labels the last `[request]` or `[response]` // hyperlink in documentation. Input is either: @@ -239,7 +294,7 @@ macro_rules! define_request_and_response_doc { // Remember this is linking to the _other_ type, // so if defining a `Request` type, input should // be "response". - $request_or_response:literal, + $request_or_response:literal => $request_or_response_type:ident, $monero_daemon_rpc_doc_link:ident, $monero_code_commit:ident, @@ -247,7 +302,6 @@ macro_rules! define_request_and_response_doc { $monero_code_filename_extension:ident, $monero_code_line_start:literal, $monero_code_line_end:literal, - $type_name:ident, ) => { concat!( "", @@ -269,9 +323,34 @@ macro_rules! define_request_and_response_doc { "), [", $request_or_response, "](", - stringify!($type_name), + stringify!($request_or_response_type), ")." ) }; } -pub(crate) use define_request_and_response_doc; +pub(crate) use __define_request_and_response_doc; + +//---------------------------------------------------------------------------------------------------- Macro +/// Output a string link to `monerod` source code. +macro_rules! monero_definition_link { + ( + $commit:ident, // Git commit hash + $file_path:literal, // File path within `monerod`'s `src/`, e.g. `rpc/core_rpc_server_commands_defs.h` + $start:literal$(..=$end:literal)? // File lines, e.g. `0..=123` or `0` + ) => { + concat!( + "[Definition](https://github.com/monero-project/monero/blob/", + stringify!($commit), + "/src/", + $file_path, + "#L", + stringify!($start), + $( + "-L", + stringify!($end), + )? + ")." + ) + }; +} +pub(crate) use monero_definition_link; diff --git a/rpc/types/src/binary_string.rs b/rpc/types/src/misc/binary_string.rs similarity index 80% rename from rpc/types/src/binary_string.rs rename to rpc/types/src/misc/binary_string.rs index b644ad3..5c3908d 100644 --- a/rpc/types/src/binary_string.rs +++ b/rpc/types/src/misc/binary_string.rs @@ -1,14 +1,14 @@ -//! TODO +//! JSON string containing binary data. //---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- BinaryString -/// TODO +/// TODO: we need to figure out a type that (de)serializes correctly, `String` errors with `serde_json` /// /// ```rust /// use serde::Deserialize; /// use serde_json::from_str; -/// use cuprate_rpc_types::BinaryString; +/// use cuprate_rpc_types::misc::BinaryString; /// /// #[derive(Deserialize)] /// struct Key { diff --git a/rpc/types/src/misc/block_complete_entry.rs b/rpc/types/src/misc/block_complete_entry.rs new file mode 100644 index 0000000..ca791b0 --- /dev/null +++ b/rpc/types/src/misc/block_complete_entry.rs @@ -0,0 +1,37 @@ +//! TODO + +//---------------------------------------------------------------------------------------------------- Use +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::epee_object; + +use crate::misc::TxBlobEntry; + +//---------------------------------------------------------------------------------------------------- BlockCompleteEntry +#[doc = crate::macros::monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 210..=221 +)] +/// Used in [`crate::bin::GetBlocksResponse`]. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BlockCompleteEntry { + pub pruned: bool, + pub block: String, + pub block_weight: u64, + pub txs: Vec, +} + +// TODO: custom epee +// +#[cfg(feature = "epee")] +epee_object! { + BlockCompleteEntry, + pruned: bool, + block: String, + block_weight: u64, + txs: Vec, +} diff --git a/rpc/types/src/misc/key_image_spent_status.rs b/rpc/types/src/misc/key_image_spent_status.rs new file mode 100644 index 0000000..d075e64 --- /dev/null +++ b/rpc/types/src/misc/key_image_spent_status.rs @@ -0,0 +1,48 @@ +//! TODO + +//---------------------------------------------------------------------------------------------------- Use +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +//---------------------------------------------------------------------------------------------------- KeyImageSpentStatus +#[doc = crate::macros::monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 456..=460 +)] +/// Used in [`crate::other::IsKeyImageSpentResponse`]. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[repr(u8)] +pub enum KeyImageSpentStatus { + Unspent = 0, + SpentInBlockchain = 1, + SpentInPool = 2, +} + +#[cfg(feature = "epee")] +impl EpeeValue for KeyImageSpentStatus { + const MARKER: Marker = ::MARKER; + + fn read(r: &mut B, marker: &Marker) -> cuprate_epee_encoding::Result { + todo!() + } + + fn should_write(&self) -> bool { + todo!() + } + + fn epee_default_value() -> Option { + todo!() + } + + fn write(self, w: &mut B) -> cuprate_epee_encoding::Result<()> { + todo!() + } +} diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs new file mode 100644 index 0000000..31719a3 --- /dev/null +++ b/rpc/types/src/misc/misc.rs @@ -0,0 +1,539 @@ +//! Miscellaneous types. +//! +//! These are `struct`s that appear in request/response types. +//! For example, [`crate::json::GetConnectionsResponse`] contains +//! the [`crate::misc::ConnectionInfo`] struct defined here. + +//---------------------------------------------------------------------------------------------------- Import +use std::fmt::Display; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + epee_object, + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +use crate::{ + constants::{ + CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, + CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_STATUS_UNKNOWN, + }, + defaults::default_zero, + macros::monero_definition_link, +}; + +//---------------------------------------------------------------------------------------------------- Macros +/// This macro (local to this file) defines all the misc types. +/// +/// This macro: +/// 1. Defines a `pub struct` with all `pub` fields +/// 2. Implements `serde` on the struct +/// 3. Implements `epee` on the struct +/// +/// When using, consider documenting: +/// - The original Monero definition site with [`monero_definition_link`] +/// - The request/responses where the `struct` is used +macro_rules! define_struct_and_impl_epee { + ( + // Optional `struct` attributes. + $( #[$struct_attr:meta] )* + // The `struct`'s name. + $struct_name:ident { + // And any fields. + $( + $( #[$field_attr:meta] )* // Field attributes + // Field name => the type => optional `epee_object` default value. + $field_name:ident: $field_type:ty $(= $field_default:expr)?, + )* + } + ) => { + $( #[$struct_attr] )* + #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + pub struct $struct_name { + $( + $( #[$field_attr] )* + pub $field_name: $field_type, + )* + } + + #[cfg(feature = "epee")] + epee_object! { + $struct_name, + $( + $field_name: $field_type $(= $field_default)?, + )* + } + }; +} + +//---------------------------------------------------------------------------------------------------- Type Definitions +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1163..=1212 + )] + /// + /// Used in: + /// - [`crate::json::GetLastBlockHeaderResponse`] + /// - [`crate::json::GetBlockHeaderByHashResponse`] + /// - [`crate::json::GetBlockHeaderByHeightResponse`] + /// - [`crate::json::GetBlockHeadersRangeResponse`] + /// - [`crate::json::GetBlockResponse`] + BlockHeader { + block_size: u64, + block_weight: u64, + cumulative_difficulty_top64: u64, + cumulative_difficulty: u64, + depth: u64, + difficulty_top64: u64, + difficulty: u64, + hash: String, + height: u64, + long_term_weight: u64, + major_version: u8, + miner_tx_hash: String, + minor_version: u8, + nonce: u32, + num_txes: u64, + orphan_status: bool, + pow_hash: String, + prev_hash: String, + reward: u64, + timestamp: u64, + wide_cumulative_difficulty: String, + wide_difficulty: String, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "cryptonote_protocol/cryptonote_protocol_defs.h", + 47..=116 + )] + /// Used in [`crate::json::GetConnectionsResponse`]. + ConnectionInfo { + address: String, + address_type: u8, + avg_download: u64, + avg_upload: u64, + connection_id: String, + current_download: u64, + current_upload: u64, + height: u64, + host: String, + incoming: bool, + ip: String, + live_time: u64, + localhost: bool, + local_ip: bool, + peer_id: String, + port: String, + pruning_seed: u32, + recv_count: u64, + recv_idle_time: u64, + rpc_credits_per_hash: u32, + rpc_port: u16, + send_count: u64, + send_idle_time: u64, + ssl: bool, + state: String, + support_flags: u32, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 2034..=2047 + )] + /// Used in [`crate::json::SetBansRequest`]. + SetBan { + host: String, + ip: u32, + ban: bool, + seconds: u32, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1999..=2010 + )] + /// Used in [`crate::json::GetBansResponse`]. + GetBan { + host: String, + ip: u32, + seconds: u32, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 2139..=2156 + )] + #[derive(Copy)] + /// Used in [`crate::json::GetOutputHistogramResponse`]. + HistogramEntry { + amount: u64, + total_instances: u64, + unlocked_instances: u64, + recent_instances: u64, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 2180..=2191 + )] + #[derive(Copy)] + /// Used in [`crate::json::GetVersionResponse`]. + HardforkEntry { + height: u64, + hf_version: u8, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 2289..=2310 + )] + /// Used in [`crate::json::GetAlternateChainsResponse`]. + ChainInfo { + block_hash: String, + block_hashes: Vec, + difficulty: u64, + difficulty_top64: u64, + height: u64, + length: u64, + main_chain_parent_block: String, + wide_difficulty: String, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 2393..=2400 + )] + /// Used in [`crate::json::SyncInfoResponse`]. + SyncInfoPeer { + info: ConnectionInfo, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 2402..=2421 + )] + /// Used in [`crate::json::SyncInfoResponse`]. + Span { + connection_id: String, + nblocks: u64, + rate: u32, + remote_address: String, + size: u64, + speed: u32, + start_block_height: u64, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1637..=1642 + )] + #[derive(Copy)] + /// Used in [`crate::json::GetTransactionPoolBacklogResponse`]. + TxBacklogEntry { + weight: u64, + fee: u64, + time_in_pool: u64, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/rpc_handler.h", + 45..=50 + )] + /// Used in [`crate::json::GetOutputDistributionResponse`]. + OutputDistributionData { + distribution: Vec, + start_height: u64, + base: u64, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1016..=1027 + )] + /// Used in [`crate::json::GetMinerDataResponse`]. + /// + /// Note that this is different than [`crate::misc::TxBacklogEntry`]. + GetMinerDataTxBacklogEntry { + id: String, + weight: u64, + fee: u64, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1070..=1079 + )] + /// Used in [`crate::json::AddAuxPowRequest`]. + AuxPow { + id: String, + hash: String, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 192..=199 + )] + /// Used in [`crate::bin::GetBlocksResponse`]. + TxOutputIndices { + indices: Vec, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 201..=208 + )] + /// Used in [`crate::bin::GetBlocksResponse`]. + BlockOutputIndices { + indices: Vec, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 210..=221 + )] + /// Used in [`crate::bin::GetBlocksResponse`]. + PoolTxInfo { + tx_hash: [u8; 32], + tx_blob: String, + double_spend_seen: bool, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "cryptonote_protocol/cryptonote_protocol_defs.h", + 121..=131 + )] + /// Used in [`crate::bin::GetBlocksResponse`]. + TxBlobEntry { + blob: String, + prunable_hash: [u8; 32], + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 512..=521 + )] + #[derive(Copy)] + /// + /// Used in: + /// - [`crate::bin::GetOutsRequest`] + /// - [`crate::other::GetOutsRequest`] + GetOutputsOut { + amount: u64, + index: u64, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 538..=553 + )] + #[derive(Copy)] + /// Used in [`crate::bin::GetOutsRequest`]. + OutKeyBin { + key: [u8; 32], + mask: [u8; 32], + unlocked: bool, + height: u64, + txid: [u8; 32], + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1335..=1367 + )] + /// Used in [`crate::other::GetPeerListResponse`]. + Peer { + id: u64, + host: String, + ip: u32, + port: u16, + #[cfg_attr(feature = "serde", serde(default = "default_zero"))] + rpc_port: u16 = default_zero::(), + #[cfg_attr(feature = "serde", serde(default = "default_zero"))] + rpc_credits_per_hash: u32 = default_zero::(), + last_seen: u64, + #[cfg_attr(feature = "serde", serde(default = "default_zero"))] + pruning_seed: u32 = default_zero::(), + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1398..=1417 + )] + /// + /// Used in: + /// - [`crate::other::GetPeerListResponse`] + /// - [`crate::other::GetPublicNodesResponse`] + PublicNode { + host: String, + last_seen: u64, + rpc_port: u16, + rpc_credits_per_hash: u32, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1519..=1556 + )] + /// Used in [`crate::other::GetTransactionPoolResponse`]. + TxInfo { + blob_size: u64, + do_not_relay: bool, + double_spend_seen: bool, + fee: u64, + id_hash: String, + kept_by_block: bool, + last_failed_height: u64, + last_failed_id_hash: String, + last_relayed_time: u64, + max_used_block_height: u64, + max_used_block_id_hash: String, + receive_time: u64, + relayed: bool, + tx_blob: String, + tx_json: String, // TODO: this should be another struct + #[cfg_attr(feature = "serde", serde(default = "default_zero"))] + weight: u64 = default_zero::(), + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1558..=1567 + )] + /// Used in [`crate::other::GetTransactionPoolResponse`]. + SpentKeyImageInfo { + id_hash: String, + txs_hashes: Vec, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1666..=1675 + )] + #[derive(Copy)] + /// Used in [`crate::other::GetTransactionPoolStatsResponse`]. + TxpoolHisto { + txs: u32, + bytes: u64, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 1677..=1710 + )] + /// Used in [`crate::other::GetTransactionPoolStatsResponse`]. + TxpoolStats { + bytes_max: u32, + bytes_med: u32, + bytes_min: u32, + bytes_total: u64, + fee_total: u64, + histo_98pc: u64, + histo: Vec, + num_10m: u32, + num_double_spends: u32, + num_failing: u32, + num_not_relayed: u32, + oldest: u64, + txs_total: u32, + } +} + +define_struct_and_impl_epee! { + #[doc = monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 582..=597 + )] + /// Used in [`crate::other::GetOutsResponse`]. + OutKey { + key: String, + mask: String, + unlocked: bool, + height: u64, + txid: String, + } +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test {} diff --git a/rpc/types/src/misc/mod.rs b/rpc/types/src/misc/mod.rs new file mode 100644 index 0000000..31dba35 --- /dev/null +++ b/rpc/types/src/misc/mod.rs @@ -0,0 +1,34 @@ +//! Miscellaneous types. +//! +//! These are data types that appear in request/response types. +//! +//! For example, [`crate::json::GetConnectionsResponse`] contains +//! the [`crate::misc::ConnectionInfo`] struct defined here. + +//---------------------------------------------------------------------------------------------------- Lints +#![allow( + missing_docs, // Docs are at: + clippy::struct_excessive_bools, // hey man, tell that to the people who wrote `monerod` +)] + +//---------------------------------------------------------------------------------------------------- Mod +mod binary_string; +mod block_complete_entry; +mod key_image_spent_status; +mod misc; +mod pool_info_extent; +mod status; +mod tx_entry; + +pub use binary_string::BinaryString; +pub use block_complete_entry::BlockCompleteEntry; +pub use key_image_spent_status::KeyImageSpentStatus; +pub use misc::{ + AuxPow, BlockHeader, BlockOutputIndices, ChainInfo, ConnectionInfo, GetBan, + GetMinerDataTxBacklogEntry, GetOutputsOut, HardforkEntry, HistogramEntry, OutKey, OutKeyBin, + OutputDistributionData, Peer, PoolTxInfo, PublicNode, SetBan, Span, SpentKeyImageInfo, + SyncInfoPeer, TxBacklogEntry, TxBlobEntry, TxInfo, TxOutputIndices, TxpoolHisto, TxpoolStats, +}; +pub use pool_info_extent::PoolInfoExtent; +pub use status::Status; +pub use tx_entry::TxEntry; diff --git a/rpc/types/src/misc/pool_info_extent.rs b/rpc/types/src/misc/pool_info_extent.rs new file mode 100644 index 0000000..09b6c96 --- /dev/null +++ b/rpc/types/src/misc/pool_info_extent.rs @@ -0,0 +1,49 @@ +//! TODO + +//---------------------------------------------------------------------------------------------------- Use +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +//---------------------------------------------------------------------------------------------------- PoolInfoExtent +#[doc = crate::macros::monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 223..=228 +)] +/// Used in [`crate::bin::GetBlocksResponse`]. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[repr(u8)] +pub enum PoolInfoExtent { + None = 0, + Incremental = 1, + Full = 2, +} + +// +#[cfg(feature = "epee")] +impl EpeeValue for PoolInfoExtent { + const MARKER: Marker = ::MARKER; + + fn read(r: &mut B, marker: &Marker) -> cuprate_epee_encoding::Result { + todo!() + } + + fn should_write(&self) -> bool { + todo!() + } + + fn epee_default_value() -> Option { + todo!() + } + + fn write(self, w: &mut B) -> cuprate_epee_encoding::Result<()> { + todo!() + } +} diff --git a/rpc/types/src/status.rs b/rpc/types/src/misc/status.rs similarity index 88% rename from rpc/types/src/status.rs rename to rpc/types/src/misc/status.rs index e8ac6ce..f2dff1a 100644 --- a/rpc/types/src/status.rs +++ b/rpc/types/src/misc/status.rs @@ -3,8 +3,10 @@ //---------------------------------------------------------------------------------------------------- Import use std::fmt::Display; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "epee")] use cuprate_epee_encoding::{ macros::bytes::{Buf, BufMut}, EpeeValue, Marker, @@ -16,18 +18,20 @@ use crate::constants::{ }; //---------------------------------------------------------------------------------------------------- Status +// TODO: this type needs to expand more. +// There are a lot of RPC calls that will return a random +// string inside, which isn't compatible with [`Status`]. + /// RPC response status. /// /// This type represents `monerod`'s frequently appearing string field, `status`. /// -/// This field appears within RPC [JSON response](crate::json) types. -/// /// Reference: . /// /// ## Serialization and string formatting /// ```rust /// use cuprate_rpc_types::{ -/// Status, +/// misc::Status, /// CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, /// CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_STATUS_UNKNOWN /// }; @@ -59,28 +63,27 @@ use crate::constants::{ /// assert_eq!(format!("{:?}", Status::PaymentRequired), "PaymentRequired"); /// assert_eq!(format!("{:?}", unknown), "Unknown"); /// ``` -#[derive( - Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, -)] +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum Status { // FIXME: // `#[serde(rename = "")]` only takes raw string literals? // We have to re-type the constants here... /// Successful RPC response, everything is OK; [`CORE_RPC_STATUS_OK`]. - #[serde(rename = "OK")] + #[cfg_attr(feature = "serde", serde(rename = "OK"))] #[default] Ok, /// The daemon is busy, try later; [`CORE_RPC_STATUS_BUSY`]. - #[serde(rename = "BUSY")] + #[cfg_attr(feature = "serde", serde(rename = "BUSY"))] Busy, /// The daemon is not mining; [`CORE_RPC_STATUS_NOT_MINING`]. - #[serde(rename = "NOT MINING")] + #[cfg_attr(feature = "serde", serde(rename = "NOT MINING"))] NotMining, /// Payment is required for RPC; [`CORE_RPC_STATUS_PAYMENT_REQUIRED`]. - #[serde(rename = "PAYMENT REQUIRED")] + #[cfg_attr(feature = "serde", serde(rename = "PAYMENT REQUIRED"))] PaymentRequired, /// Some unknown other string; [`CORE_RPC_STATUS_UNKNOWN`]. @@ -91,8 +94,8 @@ pub enum Status { /// The reason this isn't `Unknown(String)` is because that /// disallows [`Status`] to be [`Copy`], and thus other types /// that contain it. - #[serde(other)] - #[serde(rename = "UNKNOWN")] + #[cfg_attr(feature = "serde", serde(other))] + #[cfg_attr(feature = "serde", serde(rename = "UNKNOWN"))] Unknown, } @@ -132,6 +135,7 @@ impl Display for Status { // // See below for more impl info: // . +#[cfg(feature = "epee")] impl EpeeValue for Status { const MARKER: Marker = ::MARKER; @@ -161,6 +165,7 @@ mod test { // Test epee (de)serialization works. #[test] + #[cfg(feature = "epee")] fn epee() { for status in [ Status::Ok, diff --git a/rpc/types/src/misc/tx_entry.rs b/rpc/types/src/misc/tx_entry.rs new file mode 100644 index 0000000..70fbdff --- /dev/null +++ b/rpc/types/src/misc/tx_entry.rs @@ -0,0 +1,59 @@ +//! TODO + +//---------------------------------------------------------------------------------------------------- Use +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + epee_object, + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +//---------------------------------------------------------------------------------------------------- TxEntry +#[doc = crate::macros::monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 389..=428 +)] +/// Used in [`crate::other::GetTransactionsResponse`]. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct TxEntry { + pub as_hex: String, + pub as_json: String, + pub block_height: u64, + pub block_timestamp: u64, + pub confirmations: u64, + pub double_spend_seen: bool, + pub in_pool: bool, + pub output_indices: Vec, + pub prunable_as_hex: String, + pub prunable_hash: String, + pub pruned_as_hex: String, + pub received_timestamp: u64, + pub relayed: bool, + pub tx_hash: String, +} + +// TODO: custom epee +// +#[cfg(feature = "epee")] +epee_object! { + TxEntry, + as_hex: String, + as_json: String, // TODO: should be its own struct + block_height: u64, + block_timestamp: u64, + confirmations: u64, + double_spend_seen: bool, + in_pool: bool, + output_indices: Vec, + prunable_as_hex: String, + prunable_hash: String, + pruned_as_hex: String, + received_timestamp: u64, + relayed: bool, + tx_hash: String, +} diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 22547ed..66b7197 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -7,11 +7,15 @@ use crate::{base::ResponseBase, macros::define_request_and_response}; //---------------------------------------------------------------------------------------------------- TODO define_request_and_response! { - save_bc, + get_height, cc73fe71162d564ffda8e549b79a350bca53c454 => - core_rpc_server_commands_defs.h => 898..=916, - SaveBc, - ResponseBase {} + core_rpc_server_commands_defs.h => 138..=160, + GetHeight, + Request {}, + ResponseBase { + hash: String, + height: u64, + } } //---------------------------------------------------------------------------------------------------- Tests From 03815efe29d45fb16f07673eb9a10a4d5f80fb26 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Jul 2024 23:01:31 +0100 Subject: [PATCH 007/104] build(deps): bump zerovec-derive from 0.10.2 to 0.10.3 (#223) Bumps [zerovec-derive](https://github.com/unicode-org/icu4x) from 0.10.2 to 0.10.3. - [Release notes](https://github.com/unicode-org/icu4x/releases) - [Changelog](https://github.com/unicode-org/icu4x/blob/main/CHANGELOG.md) - [Commits](https://github.com/unicode-org/icu4x/commits/ind/zerovec-derive@0.10.3) --- updated-dependencies: - dependency-name: zerovec-derive dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3f4503..5eacb0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3244,9 +3244,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", From 0d7b86abe3a6b26a49d6a696329a0a6deda7f854 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Jul 2024 23:03:13 +0100 Subject: [PATCH 008/104] build(deps): bump zerovec from 0.10.2 to 0.10.4 (#217) Bumps [zerovec](https://github.com/unicode-org/icu4x) from 0.10.2 to 0.10.4. - [Release notes](https://github.com/unicode-org/icu4x/releases) - [Changelog](https://github.com/unicode-org/icu4x/blob/main/CHANGELOG.md) - [Commits](https://github.com/unicode-org/icu4x/commits/ind/zerovec@0.10.4) --- updated-dependencies: - dependency-name: zerovec dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5eacb0b..e5b795e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3233,9 +3233,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.10.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" dependencies = [ "yoke", "zerofrom", From 5c3258a6e38072ac93bad808a8242e6899461c32 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Tue, 9 Jul 2024 20:32:23 -0400 Subject: [PATCH 009/104] workspace: add lints (#133) add lints to workspace --- Cargo.toml | 204 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 200 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b00a4b9..9b090ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -101,7 +101,203 @@ tokio-test = { version = "0.4.4" } # regex = { version = "1.10.2" } # Regular expressions | https://github.com/rust-lang/regex # ryu = { version = "1.0.15" } # Fast float to string formatting | https://github.com/dtolnay/ryu -# Maybe one day. -# disk = { version = "*" } # (De)serialization to/from disk with various file formats | https://github.com/hinto-janai/disk -# readable = { version = "*" } # Stack-based string formatting utilities | https://github.com/hinto-janai/readable -# json-rpc = { git = "https://github.com/hinto-janai/json-rpc" } # JSON-RPC 2.0 types +# Lints: cold, warm, hot: +[workspace.lints.clippy] +# Cold +borrow_as_ptr = "deny" +case_sensitive_file_extension_comparisons = "deny" +cast_lossless = "deny" +cast_ptr_alignment = "deny" +checked_conversions = "deny" +cloned_instead_of_copied = "deny" +doc_link_with_quotes = "deny" +empty_enum = "deny" +enum_glob_use = "deny" +expl_impl_clone_on_copy = "deny" +explicit_into_iter_loop = "deny" +filter_map_next = "deny" +flat_map_option = "deny" +from_iter_instead_of_collect = "deny" +if_not_else = "deny" +ignored_unit_patterns = "deny" +inconsistent_struct_constructor = "deny" +index_refutable_slice = "deny" +inefficient_to_string = "deny" +invalid_upcast_comparisons = "deny" +iter_filter_is_ok = "deny" +iter_filter_is_some = "deny" +implicit_clone = "deny" +manual_c_str_literals = "deny" +manual_instant_elapsed = "deny" +manual_is_variant_and = "deny" +manual_let_else = "deny" +manual_ok_or = "deny" +manual_string_new = "deny" +map_unwrap_or = "deny" +match_bool = "deny" +match_same_arms = "deny" +match_wildcard_for_single_variants = "deny" +mismatching_type_param_order = "deny" +mut_mut = "deny" +needless_bitwise_bool = "deny" +needless_continue = "deny" +needless_for_each = "deny" +needless_raw_string_hashes = "deny" +no_effect_underscore_binding = "deny" +no_mangle_with_rust_abi = "deny" +option_as_ref_cloned = "deny" +option_option = "deny" +ptr_as_ptr = "deny" +ptr_cast_constness = "deny" +pub_underscore_fields = "deny" +redundant_closure_for_method_calls = "deny" +ref_as_ptr = "deny" +ref_option_ref = "deny" +same_functions_in_if_condition = "deny" +semicolon_if_nothing_returned = "deny" +trivially_copy_pass_by_ref = "deny" +uninlined_format_args = "deny" +unnecessary_join = "deny" +unnested_or_patterns = "deny" +unused_async = "deny" +unused_self = "deny" +used_underscore_binding = "deny" +zero_sized_map_values = "deny" +as_ptr_cast_mut = "deny" +clear_with_drain = "deny" +collection_is_never_read = "deny" +debug_assert_with_mut_call = "deny" +derive_partial_eq_without_eq = "deny" +empty_line_after_doc_comments = "deny" +empty_line_after_outer_attr = "deny" +equatable_if_let = "deny" +iter_on_empty_collections = "deny" +iter_on_single_items = "deny" +iter_with_drain = "deny" +needless_collect = "deny" +needless_pass_by_ref_mut = "deny" +negative_feature_names = "deny" +non_send_fields_in_send_ty = "deny" +nonstandard_macro_braces = "deny" +path_buf_push_overwrite = "deny" +read_zero_byte_vec = "deny" +redundant_clone = "deny" +redundant_feature_names = "deny" +trailing_empty_array = "deny" +trait_duplication_in_bounds = "deny" +type_repetition_in_bounds = "deny" +uninhabited_references = "deny" +unnecessary_struct_initialization = "deny" +unused_peekable = "deny" +unused_rounding = "deny" +use_self = "deny" +useless_let_if_seq = "deny" +wildcard_dependencies = "deny" +unseparated_literal_suffix = "deny" +unnecessary_safety_doc = "deny" +unnecessary_safety_comment = "deny" +unnecessary_self_imports = "deny" +tests_outside_test_module = "deny" +string_to_string = "deny" +rest_pat_in_fully_bound_structs = "deny" +redundant_type_annotations = "deny" +infinite_loop = "deny" + +# Warm +cast_possible_truncation = "deny" +cast_possible_wrap = "deny" +cast_precision_loss = "deny" +cast_sign_loss = "deny" +copy_iterator = "deny" +doc_markdown = "deny" +explicit_deref_methods = "deny" +explicit_iter_loop = "deny" +float_cmp = "deny" +fn_params_excessive_bools = "deny" +into_iter_without_iter = "deny" +iter_without_into_iter = "deny" +iter_not_returning_iterator = "deny" +large_digit_groups = "deny" +large_types_passed_by_value = "deny" +manual_assert = "deny" +maybe_infinite_iter = "deny" +missing_fields_in_debug = "deny" +needless_pass_by_value = "deny" +range_minus_one = "deny" +range_plus_one = "deny" +redundant_else = "deny" +ref_binding_to_reference = "deny" +return_self_not_must_use = "deny" +single_match_else = "deny" +string_add_assign = "deny" +transmute_ptr_to_ptr = "deny" +unchecked_duration_subtraction = "deny" +unnecessary_box_returns = "deny" +unnecessary_wraps = "deny" +branches_sharing_code = "deny" +fallible_impl_from = "deny" +missing_const_for_fn = "deny" +significant_drop_in_scrutinee = "deny" +significant_drop_tightening = "deny" +try_err = "deny" +lossy_float_literal = "deny" +let_underscore_must_use = "deny" +iter_over_hash_type = "deny" +impl_trait_in_params = "deny" +get_unwrap = "deny" +error_impl_error = "deny" +empty_structs_with_brackets = "deny" +empty_enum_variants_with_brackets = "deny" +empty_drop = "deny" +clone_on_ref_ptr = "deny" + +# Hot +# inline_always = "deny" +# large_futures = "deny" +# large_stack_arrays = "deny" +# linkedlist = "deny" +# missing_errors_doc = "deny" +# missing_panics_doc = "deny" +# should_panic_without_expect = "deny" +# similar_names = "deny" +# too_many_lines = "deny" +# unreadable_literal = "deny" +# wildcard_imports = "deny" +# allow_attributes_without_reason = "deny" +# missing_assert_message = "deny" +# missing_docs_in_private_items = "deny" +# undocumented_unsafe_blocks = "deny" +# multiple_unsafe_ops_per_block = "deny" +# single_char_lifetime_names = "deny" +# wildcard_enum_match_arm = "deny" + +[workspace.lints.rust] +# Cold +absolute_paths_not_starting_with_crate = "deny" +explicit_outlives_requirements = "deny" +keyword_idents = "deny" +missing_abi = "deny" +non_ascii_idents = "deny" +non_local_definitions = "deny" +single_use_lifetimes = "deny" +trivial_casts = "deny" +trivial_numeric_casts = "deny" +unsafe_op_in_unsafe_fn = "deny" +unused_crate_dependencies = "deny" +unused_import_braces = "deny" +unused_lifetimes = "deny" +unused_macro_rules = "deny" +ambiguous_glob_imports = "deny" +unused_unsafe = "deny" + +# Warm +let_underscore_drop = "deny" +unreachable_pub = "deny" +unused_qualifications = "deny" +variant_size_differences = "deny" + +# Hot +# unused_results = "deny" +# non_exhaustive_omitted_patterns = "deny" +# missing_docs = "deny" +# missing_copy_implementations = "deny" \ No newline at end of file From ecbb5ad3dcf7a3fd7ba44dca3522dd0e7457c9ee Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 10 Jul 2024 16:12:40 -0400 Subject: [PATCH 010/104] rpc: implement `/json_rpc` types (#219) * `serde/epee` feature flags * modify type generator macros * add `defaults.rs` * add `free.rs` * add `misc` module * modify `base.rs`, `contants.rs` * remove `binary_string.rs`, `status.rs` * fix macro usage * impl `json.rs` * base: re-add `AccessRequestBase` * fix default functions * tx_entry: fix link * json: fix default functions * json: fix `on_get_block_hash`, `submit_block` * json: `status` -> `block_id` * json: fix `SubmitBlockRequest` * json: fix `OnGetBlockHashResponse` The serialized `result` field will come from our `json-rpc` crate, so the inner type should be a `transparent` string. * json: `Response` -> `ResponseBase` for `SubmitBlock` * Update rpc/types/src/json.rs Co-authored-by: Boog900 * json: fix `SubmitBlockRequest` doc test --------- Co-authored-by: Boog900 --- rpc/types/src/json.rs | 567 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 565 insertions(+), 2 deletions(-) diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index c258d59..b5b53c9 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -1,9 +1,18 @@ //! JSON types from the [`/json_rpc`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#json-rpc-methods) endpoint. //! -//! . +//! All types are originally defined in [`rpc/core_rpc_server_commands_defs.h`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h). //---------------------------------------------------------------------------------------------------- Import -use crate::{base::ResponseBase, macros::define_request_and_response}; +use crate::{ + base::{AccessResponseBase, ResponseBase}, + defaults::{default_false, default_height, default_string, default_vec, default_zero}, + free::{is_one, is_zero}, + macros::define_request_and_response, + misc::{ + AuxPow, BlockHeader, ChainInfo, ConnectionInfo, GetBan, HardforkEntry, HistogramEntry, + OutputDistributionData, SetBan, Span, Status, SyncInfoPeer, TxBacklogEntry, + }, +}; //---------------------------------------------------------------------------------------------------- Struct definitions // This generates 2 structs: @@ -86,6 +95,560 @@ define_request_and_response! { } } +define_request_and_response! { + get_block_count, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 919..=933, + GetBlockCount, + + // There are no request fields specified, + // this will cause the macro to generate a + // type alias to `()` instead of a `struct`. + Request {}, + + #[derive(Copy)] + ResponseBase { + count: u64, + } +} + +define_request_and_response! { + on_get_block_hash, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 935..=939, + OnGetBlockHash, + /// ```rust + /// use serde_json::*; + /// use cuprate_rpc_types::json::*; + /// + /// let x = OnGetBlockHashRequest { block_height: [3] }; + /// let x = to_string(&x).unwrap(); + /// assert_eq!(x, "[3]"); + /// ``` + #[cfg_attr(feature = "serde", serde(transparent))] + #[repr(transparent)] + #[derive(Copy)] + Request { + // This is `std::vector` in `monerod` but + // it must be a 1 length array or else it will error. + block_height: [u64; 1], + }, + /// ```rust + /// use serde_json::*; + /// use cuprate_rpc_types::json::*; + /// + /// let x = OnGetBlockHashResponse { block_hash: String::from("asdf") }; + /// let x = to_string(&x).unwrap(); + /// assert_eq!(x, "\"asdf\""); + /// ``` + #[cfg_attr(feature = "serde", serde(transparent))] + #[repr(transparent)] + Response { + block_hash: String, + } +} + +define_request_and_response! { + submit_block, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1114..=1128, + SubmitBlock, + /// ```rust + /// use serde_json::*; + /// use cuprate_rpc_types::json::*; + /// + /// let x = SubmitBlockRequest { block_blob: ["a".into()] }; + /// let x = to_string(&x).unwrap(); + /// assert_eq!(x, r#"["a"]"#); + /// ``` + #[cfg_attr(feature = "serde", serde(transparent))] + #[repr(transparent)] + Request { + // This is `std::vector` in `monerod` but + // it must be a 1 length array or else it will error. + block_blob: [String; 1], + }, + ResponseBase { + block_id: String, + } +} + +define_request_and_response! { + generateblocks, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1130..=1161, + GenerateBlocks, + Request { + amount_of_blocks: u64, + prev_block: String, + starting_nonce: u32, + wallet_address: String, + }, + ResponseBase { + blocks: Vec, + height: u64, + } +} + +define_request_and_response! { + get_last_block_header, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1214..=1238, + GetLastBlockHeader, + #[derive(Copy)] + Request { + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + fill_pow_hash: bool = default_false(), + }, + AccessResponseBase { + block_header: BlockHeader, + } +} + +define_request_and_response! { + get_block_header_by_hash, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1240..=1269, + GetBlockHeaderByHash, + Request { + hash: String, + hashes: Vec, + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + fill_pow_hash: bool = default_false(), + }, + AccessResponseBase { + block_header: BlockHeader, + block_headers: Vec, + } +} + +define_request_and_response! { + get_block_header_by_height, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1271..=1296, + GetBlockHeaderByHeight, + #[derive(Copy)] + Request { + height: u64, + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + fill_pow_hash: bool = default_false(), + }, + AccessResponseBase { + block_header: BlockHeader, + } +} + +define_request_and_response! { + get_block_headers_range, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1756..=1783, + GetBlockHeadersRange, + #[derive(Copy)] + Request { + start_height: u64, + end_height: u64, + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + fill_pow_hash: bool = default_false(), + }, + AccessResponseBase { + headers: Vec, + } +} + +define_request_and_response! { + get_block, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1298..=1313, + GetBlock, + Request { + // `monerod` has both `hash` and `height` fields. + // In the RPC handler, if `hash.is_empty()`, it will use it, else, it uses `height`. + // + #[cfg_attr(feature = "serde", serde(default = "default_string"))] + hash: String = default_string(), + #[cfg_attr(feature = "serde", serde(default = "default_height"))] + height: u64 = default_height(), + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + fill_pow_hash: bool = default_false(), + }, + AccessResponseBase { + blob: String, + block_header: BlockHeader, + json: String, // TODO: this should be defined in a struct, it has many fields. + miner_tx_hash: String, + tx_hashes: Vec, + } +} + +define_request_and_response! { + get_connections, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1734..=1754, + GetConnections, + Request {}, + ResponseBase { + // TODO: This is a `std::list` in `monerod` because...? + connections: Vec, + } +} + +define_request_and_response! { + get_info, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 693..=789, + GetInfo, + Request {}, + AccessResponseBase { + adjusted_time: u64, + alt_blocks_count: u64, + block_size_limit: u64, + block_size_median: u64, + block_weight_limit: u64, + block_weight_median: u64, + bootstrap_daemon_address: String, + busy_syncing: bool, + cumulative_difficulty_top64: u64, + cumulative_difficulty: u64, + database_size: u64, + difficulty_top64: u64, + difficulty: u64, + free_space: u64, + grey_peerlist_size: u64, + height: u64, + height_without_bootstrap: u64, + incoming_connections_count: u64, + mainnet: bool, + nettype: String, + offline: bool, + outgoing_connections_count: u64, + restricted: bool, + rpc_connections_count: u64, + stagenet: bool, + start_time: u64, + synchronized: bool, + target_height: u64, + target: u64, + testnet: bool, + top_block_hash: String, + tx_count: u64, + tx_pool_size: u64, + update_available: bool, + version: String, + was_bootstrap_ever_used: bool, + white_peerlist_size: u64, + wide_cumulative_difficulty: String, + wide_difficulty: String, + } +} + +define_request_and_response! { + hard_fork_info, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1958..=1995, + HardForkInfo, + Request {}, + AccessResponseBase { + earliest_height: u64, + enabled: bool, + state: u32, + threshold: u32, + version: u8, + votes: u32, + voting: u8, + window: u32, + } +} + +define_request_and_response! { + set_bans, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2032..=2067, + SetBans, + Request { + bans: Vec, + }, + ResponseBase {} +} + +define_request_and_response! { + get_bans, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1997..=2030, + GetBans, + Request {}, + ResponseBase { + bans: Vec, + } +} + +define_request_and_response! { + banned, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2069..=2094, + Banned, + #[cfg_attr(feature = "serde", serde(transparent))] + #[repr(transparent)] + Request { + address: String, + }, + #[derive(Copy)] + Response { + banned: bool, + seconds: u32, + status: Status, + } +} + +define_request_and_response! { + flush_txpool, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2096..=2116, + FlushTransactionPool, + Request { + #[cfg_attr(feature = "serde", serde(default = "default_vec"))] + txids: Vec = default_vec::(), + }, + #[derive(Copy)] + #[cfg_attr(feature = "serde", serde(transparent))] + #[repr(transparent)] + Response { + status: Status, + } +} + +define_request_and_response! { + get_output_histogram, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2118..=2168, + GetOutputHistogram, + Request { + amounts: Vec, + min_count: u64, + max_count: u64, + unlocked: bool, + recent_cutoff: u64, + }, + AccessResponseBase { + histogram: Vec, + } +} + +define_request_and_response! { + get_coinbase_tx_sum, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2213..=2248, + GetCoinbaseTxSum, + Request { + height: u64, + count: u64, + }, + AccessResponseBase { + emission_amount: u64, + emission_amount_top64: u64, + fee_amount: u64, + fee_amount_top64: u64, + wide_emission_amount: String, + wide_fee_amount: String, + } +} + +define_request_and_response! { + get_version, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2170..=2211, + GetVersion, + Request {}, + ResponseBase { + version: u32, + release: bool, + #[serde(skip_serializing_if = "is_zero", default = "default_zero")] + current_height: u64 = default_zero(), + #[serde(skip_serializing_if = "is_zero", default = "default_zero")] + target_height: u64 = default_zero(), + #[serde(skip_serializing_if = "Vec::is_empty", default = "default_vec")] + hard_forks: Vec = default_vec(), + } +} + +define_request_and_response! { + get_fee_estimate, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2250..=2277, + GetFeeEstimate, + Request {}, + AccessResponseBase { + fee: u64, + fees: Vec, + #[serde(skip_serializing_if = "is_one")] + quantization_mask: u64, + } +} + +define_request_and_response! { + get_alternate_chains, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2279..=2310, + GetAlternateChains, + Request {}, + ResponseBase { + chains: Vec, + } +} + +define_request_and_response! { + relay_tx, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2361..=2381, + RelayTx, + Request { + txids: Vec, + }, + #[derive(Copy)] + #[cfg_attr(feature = "serde", serde(transparent))] + #[repr(transparent)] + Response { + status: Status, + } +} + +define_request_and_response! { + sync_info, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2383..=2443, + SyncInfo, + Request {}, + AccessResponseBase { + height: u64, + next_needed_pruning_seed: u32, + overview: String, + // TODO: This is a `std::list` in `monerod` because...? + peers: Vec, + // TODO: This is a `std::list` in `monerod` because...? + spans: Vec, + target_height: u64, + } +} + +define_request_and_response! { + get_txpool_backlog, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1637..=1664, + GetTransactionPoolBacklog, + Request {}, + ResponseBase { + // TODO: this is a [`BinaryString`]. + backlog: Vec, + } +} + +define_request_and_response! { + get_output_distribution, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2445..=2520, + /// This type is also used in the (undocumented) + /// [`/get_output_distribution.bin`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server.h#L138) + /// binary endpoint. + GetOutputDistribution, + Request { + amounts: Vec, + binary: bool, + compress: bool, + cumulative: bool, + from_height: u64, + to_height: u64, + }, + /// TODO: this request has custom serde: + /// + AccessResponseBase { + distributions: Vec, + } +} + +define_request_and_response! { + get_miner_data, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 996..=1044, + GetMinerData, + Request {}, + ResponseBase { + major_version: u8, + height: u64, + prev_id: String, + seed_hash: String, + difficulty: String, + median_weight: u64, + already_generated_coins: u64, + } +} + +define_request_and_response! { + prune_blockchain, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2747..=2772, + PruneBlockchain, + #[derive(Copy)] + Request { + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + check: bool = default_false(), + }, + #[derive(Copy)] + ResponseBase { + pruned: bool, + pruning_seed: u32, + } +} + +define_request_and_response! { + calc_pow, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1046..=1066, + CalcPow, + Request { + major_version: u8, + height: u64, + block_blob: String, + seed_hash: String, + }, + #[cfg_attr(feature = "serde", serde(transparent))] + #[repr(transparent)] + Response { + pow_hash: String, + } +} + +define_request_and_response! { + flush_cache, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2774..=2796, + FlushCache, + #[derive(Copy)] + Request { + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + bad_txs: bool = default_false(), + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + bad_blocks: bool = default_false(), + }, + ResponseBase {} +} + +define_request_and_response! { + add_aux_pow, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1068..=1112, + AddAuxPow, + Request { + blocktemplate_blob: String, + aux_pow: Vec, + }, + ResponseBase { + blocktemplate_blob: String, + blockhashing_blob: String, + merkle_root: String, + merkle_tree_depth: u64, + aux_pow: Vec, + } +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { From 303c165df73ff53bd39329cb7d507bb51a677231 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 10 Jul 2024 16:19:19 -0400 Subject: [PATCH 011/104] rpc: implement `.bin` types (#220) * `serde/epee` feature flags * modify type generator macros * add `defaults.rs` * add `free.rs` * add `misc` module * modify `base.rs`, `contants.rs` * remove `binary_string.rs`, `status.rs` * fix macro usage * impl `bin.rs` * base: re-add `AccessRequestBase` * fix default functions * tx_entry: fix link * bin: fix default functions --- rpc/types/src/bin.rs | 115 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 114 insertions(+), 1 deletion(-) diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index f327847..02be193 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -1,8 +1,121 @@ -//! Binary types from [binary](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_blocksbin) endpoints. +//! Binary types from [`.bin` endpoints](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_blocksbin). +//! +//! All types are originally defined in [`rpc/core_rpc_server_commands_defs.h`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h). //---------------------------------------------------------------------------------------------------- Import +use crate::{ + base::{AccessResponseBase, ResponseBase}, + defaults::{default_false, default_height, default_string, default_vec, default_zero}, + free::{is_one, is_zero}, + macros::define_request_and_response, + misc::{ + AuxPow, BlockCompleteEntry, BlockHeader, BlockOutputIndices, ChainInfo, ConnectionInfo, + GetBan, GetOutputsOut, HardforkEntry, HistogramEntry, OutKeyBin, OutputDistributionData, + Peer, PoolTxInfo, SetBan, Span, Status, TxBacklogEntry, + }, +}; //---------------------------------------------------------------------------------------------------- TODO +define_request_and_response! { + get_blocksbin, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 162..=262, + GetBlocks, + Request { + #[cfg_attr(feature = "serde", serde(default = "default_zero"))] + requested_info: u8 = default_zero(), + // TODO: This is a `std::list` in `monerod` because...? + block_ids: Vec<[u8; 32]>, + start_height: u64, + prune: bool, + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + no_miner_tx: bool = default_false(), + #[cfg_attr(feature = "serde", serde(default = "default_zero"))] + pool_info_since: u64 = default_zero(), + }, + // TODO: this has custom epee (de)serialization. + // + ResponseBase { + blocks: Vec, + start_height: u64, + current_height: u64, + output_indices: Vec, + daemon_time: u64, + pool_info_extent: u8, + added_pool_txs: Vec, + remaining_added_pool_txids: Vec<[u8; 32]>, + removed_pool_txids: Vec<[u8; 32]>, + } +} + +define_request_and_response! { + get_blocks_by_heightbin, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 264..=286, + GetBlocksByHeight, + Request { + heights: Vec, + }, + AccessResponseBase { + blocks: Vec, + } +} + +define_request_and_response! { + get_hashesbin, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 309..=338, + GetHashes, + Request { + block_ids: Vec<[u8; 32]>, + start_height: u64, + }, + AccessResponseBase { + m_blocks_ids: Vec<[u8; 32]>, + start_height: u64, + current_height: u64, + } +} + +define_request_and_response! { + get_o_indexesbin, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 487..=510, + GetOutputIndexes, + #[derive(Copy)] + Request { + txid: [u8; 32], + }, + AccessResponseBase { + o_indexes: Vec, + } +} + +define_request_and_response! { + get_outsbin, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 512..=565, + GetOuts, + Request { + outputs: Vec, + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + get_txid: bool = default_false(), + }, + AccessResponseBase { + outs: Vec, + } +} + +define_request_and_response! { + get_transaction_pool_hashesbin, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1593..=1613, + GetTransactionPoolHashes, + Request {}, + AccessResponseBase { + tx_hashes: Vec<[u8; 32]>, + } +} //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] From 612938eae6965fb0ab2cffd43f657971ad9090d7 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 10 Jul 2024 17:11:09 -0400 Subject: [PATCH 012/104] database: workaround `redb` page freeing issue (#225) * redb: `SyncMode::Fast` => `redb::Durability::Eventual` * redb: fix clippy * database: document `SyncMode` --- storage/database/src/backend/redb/env.rs | 5 +++-- storage/database/src/config/sync_mode.rs | 20 ++++++++++++++------ 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/storage/database/src/backend/redb/env.rs b/storage/database/src/backend/redb/env.rs index 4a178ad..a405ea7 100644 --- a/storage/database/src/backend/redb/env.rs +++ b/storage/database/src/backend/redb/env.rs @@ -56,8 +56,9 @@ impl Env for ConcreteEnv { // // should we use that instead of Immediate? SyncMode::Safe => redb::Durability::Immediate, - SyncMode::Async => redb::Durability::Eventual, - SyncMode::Fast => redb::Durability::None, + // FIXME: `Fast` maps to `Eventual` instead of `None` because of: + // + SyncMode::Async | SyncMode::Fast => redb::Durability::Eventual, // SOMEDAY: dynamic syncs are not implemented. SyncMode::FastThenSafe | SyncMode::Threshold(_) => unimplemented!(), }; diff --git a/storage/database/src/config/sync_mode.rs b/storage/database/src/config/sync_mode.rs index 1d20339..e000462 100644 --- a/storage/database/src/config/sync_mode.rs +++ b/storage/database/src/config/sync_mode.rs @@ -84,7 +84,7 @@ pub enum SyncMode { /// /// This is expected to be very slow. /// - /// This matches: + /// This maps to: /// - LMDB without any special sync flags /// - [`redb::Durability::Immediate`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Immediate) Safe, @@ -96,7 +96,7 @@ pub enum SyncMode { /// each transaction commit will sync to disk, /// but only eventually, not necessarily immediately. /// - /// This matches: + /// This maps to: /// - [`MDB_MAPASYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#gab034ed0d8e5938090aef5ee0997f7e94) /// - [`redb::Durability::Eventual`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Eventual) Async, @@ -115,17 +115,25 @@ pub enum SyncMode { /// This is the fastest, yet unsafest option. /// /// It will cause the database to never _actively_ sync, - /// letting the OS decide when to flush data to disk. + /// letting the OS decide when to flush data to disk[^1]. /// - /// This matches: + /// This maps to: /// - [`MDB_NOSYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#ga5791dd1adb09123f82dd1f331209e12e) + [`MDB_MAPASYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#gab034ed0d8e5938090aef5ee0997f7e94) - /// - [`redb::Durability::None`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.None) + /// - [`redb::Durability::Eventual`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Eventual) /// - /// `monerod` reference: + /// [`monerod` reference](https://github.com/monero-project/monero/blob/7b7958bbd9d76375c47dc418b4adabba0f0b1785/src/blockchain_db/lmdb/db_lmdb.cpp#L1380-L1381). /// /// # Corruption /// In the case of a system crash, the database /// may become corrupted when using this option. + /// + /// [^1]: + /// Semantically, this variant would actually map to + /// [`redb::Durability::None`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.None), + /// however due to [`#149`](https://github.com/Cuprate/cuprate/issues/149), + /// this is not possible. As such, when using the `redb` backend, + /// transaction writes "should be persistent some time after `WriteTransaction::commit` returns." + /// Thus, [`SyncMode::Async`] will map to the same `redb::Durability::Eventual` as [`SyncMode::Fast`]. // // FIXME: we could call this `unsafe` // and use that terminology in the config file From 5aeb8af4b4041c5657cd83c2a8faba476891c5d2 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 10 Jul 2024 17:31:56 -0400 Subject: [PATCH 013/104] rpc: implement other JSON types (#221) * `serde/epee` feature flags * modify type generator macros * add `defaults.rs` * add `free.rs` * add `misc` module * modify `base.rs`, `contants.rs` * remove `binary_string.rs`, `status.rs` * fix macro usage * impl `other.rs` * base: re-add `AccessRequestBase` * fix default functions * tx_entry: fix link * other: fix default functions * Update rpc/types/src/other.rs Co-authored-by: Boog900 * Update rpc/types/src/other.rs Co-authored-by: Boog900 --------- Co-authored-by: Boog900 --- rpc/types/src/other.rs | 401 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 399 insertions(+), 2 deletions(-) diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 66b7197..03cb05d 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -1,9 +1,17 @@ //! JSON types from the [`other`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#other-daemon-rpc-calls) endpoints. //! -//! . +//! All types are originally defined in [`rpc/core_rpc_server_commands_defs.h`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h). //---------------------------------------------------------------------------------------------------- Import -use crate::{base::ResponseBase, macros::define_request_and_response}; +use crate::{ + base::{AccessResponseBase, ResponseBase}, + defaults::{default_false, default_string, default_true}, + macros::define_request_and_response, + misc::{ + GetOutputsOut, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, TxEntry, TxInfo, + TxpoolStats, + }, +}; //---------------------------------------------------------------------------------------------------- TODO define_request_and_response! { @@ -18,6 +26,395 @@ define_request_and_response! { } } +define_request_and_response! { + get_transactions, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 370..=451, + GetTransactions, + Request { + txs_hashes: Vec, + // FIXME: this is documented as optional but it isn't serialized as an optional + // but it is set _somewhere_ to false in `monerod` + // + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + decode_as_json: bool = default_false(), + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + prune: bool = default_false(), + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + split: bool = default_false(), + }, + AccessResponseBase { + txs_as_hex: Vec, + txs_as_json: Vec, + missed_tx: Vec, + txs: Vec, + } +} + +define_request_and_response! { + get_alt_blocks_hashes, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 288..=308, + GetAltBlocksHashes, + Request {}, + AccessResponseBase { + blks_hashes: Vec, + } +} + +define_request_and_response! { + is_key_image_spent, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 454..=484, + IsKeyImageSpent, + Request { + key_images: Vec, + }, + AccessResponseBase { + spent_status: Vec, // TODO: should be `KeyImageSpentStatus`. + } +} + +define_request_and_response! { + send_raw_transaction, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 370..=451, + SendRawTransaction, + Request { + tx_as_hex: String, + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + do_not_relay: bool = default_false(), + #[cfg_attr(feature = "serde", serde(default = "default_true"))] + do_sanity_checks: bool = default_true(), + }, + AccessResponseBase { + double_spend: bool, + fee_too_low: bool, + invalid_input: bool, + invalid_output: bool, + low_mixin: bool, + nonzero_unlock_time: bool, + not_relayed: bool, + overspend: bool, + reason: String, + sanity_check_failed: bool, + too_big: bool, + too_few_outputs: bool, + tx_extra_too_big: bool, + } +} + +define_request_and_response! { + start_mining, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 665..=691, + StartMining, + Request { + miner_address: String, + threads_count: u64, + do_background_mining: bool, + ignore_battery: bool, + }, + ResponseBase {} +} + +define_request_and_response! { + stop_mining, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 825..=843, + StopMining, + Request {}, + ResponseBase {} +} + +define_request_and_response! { + mining_status, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 846..=895, + MiningStatus, + Request {}, + ResponseBase { + active: bool, + address: String, + bg_idle_threshold: u8, + bg_ignore_battery: bool, + bg_min_idle_seconds: u8, + bg_target: u8, + block_reward: u64, + block_target: u32, + difficulty: u64, + difficulty_top64: u64, + is_background_mining_enabled: bool, + pow_algorithm: String, + speed: u64, + threads_count: u32, + wide_difficulty: String, + } +} + +define_request_and_response! { + save_bc, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 898..=916, + SaveBc, + Request {}, + ResponseBase {} +} + +define_request_and_response! { + get_peer_list, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1369..=1417, + GetPeerList, + Request { + #[cfg_attr(feature = "serde", serde(default = "default_true"))] + public_only: bool = default_true(), + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + include_blocked: bool = default_false(), + }, + ResponseBase { + white_list: Vec, + gray_list: Vec, + } +} + +define_request_and_response! { + set_log_hash_rate, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1450..=1470, + SetLogHashRate, + #[derive(Copy)] + Request { + visible: bool, + }, + ResponseBase {} +} + +define_request_and_response! { + set_log_level, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1450..=1470, + SetLogLevel, + #[derive(Copy)] + Request { + level: u8, + }, + ResponseBase {} +} + +define_request_and_response! { + set_log_categories, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1494..=1517, + SetLogCategories, + Request { + #[cfg_attr(feature = "serde", serde(default = "default_string"))] + categories: String = default_string(), + }, + ResponseBase { + categories: String, + } +} + +define_request_and_response! { + set_bootstrap_daemon, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1785..=1812, + SetBootstrapDaemon, + Request { + address: String, + username: String, + password: String, + proxy: String, + }, + #[derive(Copy)] + Response { + status: Status, + } +} + +define_request_and_response! { + get_transaction_pool, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1569..=1591, + GetTransactionPool, + Request {}, + AccessResponseBase { + transactions: Vec, + spent_key_images: Vec, + } +} + +define_request_and_response! { + get_transaction_pool_stats, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1712..=1732, + GetTransactionPoolStats, + Request {}, + AccessResponseBase { + pool_stats: TxpoolStats, + } +} + +define_request_and_response! { + stop_daemon, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1814..=1831, + StopDaemon, + Request {}, + ResponseBase { + status: Status, + } +} + +define_request_and_response! { + get_limit, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1852..=1874, + GetLimit, + Request {}, + ResponseBase { + limit_down: u64, + limit_up: u64, + } +} + +define_request_and_response! { + set_limit, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1876..=1903, + SetLimit, + Request { + limit_down: i64, + limit_up: i64, + }, + ResponseBase { + limit_down: i64, + limit_up: i64, + } +} + +define_request_and_response! { + out_peers, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1876..=1903, + OutPeers, + Request { + #[cfg_attr(feature = "serde", serde(default = "default_true"))] + set: bool = default_true(), + out_peers: u32, + }, + ResponseBase { + out_peers: u32, + } +} + +define_request_and_response! { + get_net_stats, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 793..=822, + GetNetStats, + Request {}, + ResponseBase { + start_time: u64, + total_packets_in: u64, + total_bytes_in: u64, + total_packets_out: u64, + total_bytes_out: u64, + } +} + +define_request_and_response! { + get_outs, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 567..=609, + GetOuts, + Request { + outputs: Vec, + get_txid: bool, + }, + ResponseBase { + outs: Vec, + } +} + +define_request_and_response! { + update, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2324..=2359, + Update, + Request { + command: String, + #[cfg_attr(feature = "serde", serde(default = "default_string"))] + path: String = default_string(), + }, + ResponseBase { + auto_uri: String, + hash: String, + path: String, + update: bool, + user_uri: String, + version: String, + } +} + +define_request_and_response! { + pop_blocks, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2722..=2745, + PopBlocks, + Request { + nblocks: u64, + }, + ResponseBase { + height: u64, + } +} + +define_request_and_response! { + UNDOCUMENTED_ENDPOINT, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2798..=2823, + GetTxIdsLoose, + Request { + txid_template: String, + num_matching_bits: u32, + }, + ResponseBase { + txids: Vec, + } +} + +define_request_and_response! { + UNDOCUMENTED_ENDPOINT, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1615..=1635, + GetTransactionPoolHashes, + Request {}, + ResponseBase { + tx_hashes: Vec, + } +} + +define_request_and_response! { + UNDOCUMENTED_ENDPOINT, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1419..=1448, + GetPublicNodes, + Request { + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + gray: bool = default_false(), + #[cfg_attr(feature = "serde", serde(default = "default_true"))] + white: bool = default_true(), + #[cfg_attr(feature = "serde", serde(default = "default_false"))] + include_blocked: bool = default_false(), + }, + ResponseBase { + gray: Vec, + white: Vec, + } +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { From 824651c8cf33caada7d841d139afb8eef0c8a8ca Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 10 Jul 2024 21:00:47 -0400 Subject: [PATCH 014/104] fixed-bytes: add `serde`, document feature flags (#226) * fixed-bytes: add `serde`, document feature flags * manual impl `serde::Deserialize` * add serde tests --- Cargo.lock | 5 ++ net/fixed-bytes/Cargo.toml | 9 +++- net/fixed-bytes/README.md | 10 ++++ net/fixed-bytes/src/lib.rs | 94 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 116 insertions(+), 2 deletions(-) create mode 100644 net/fixed-bytes/README.md diff --git a/Cargo.lock b/Cargo.lock index e5b795e..bab5a38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -274,6 +274,9 @@ name = "bytes" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +dependencies = [ + "serde", +] [[package]] name = "cc" @@ -641,6 +644,8 @@ name = "cuprate-fixed-bytes" version = "0.1.0" dependencies = [ "bytes", + "serde", + "serde_json", "thiserror", ] diff --git a/net/fixed-bytes/Cargo.toml b/net/fixed-bytes/Cargo.toml index b592a09..e9985e8 100644 --- a/net/fixed-bytes/Cargo.toml +++ b/net/fixed-bytes/Cargo.toml @@ -6,9 +6,14 @@ license = "MIT" authors = ["Boog900"] [features] -default = ["std"] +default = ["std", "serde"] std = ["bytes/std", "dep:thiserror"] +serde = ["bytes/serde", "dep:serde"] [dependencies] thiserror = { workspace = true, optional = true } -bytes = { workspace = true } \ No newline at end of file +bytes = { workspace = true } +serde = { workspace = true, features = ["derive"], optional = true } + +[dev-dependencies] +serde_json = { workspace = true, features = ["std"] } \ No newline at end of file diff --git a/net/fixed-bytes/README.md b/net/fixed-bytes/README.md new file mode 100644 index 0000000..b96c9fc --- /dev/null +++ b/net/fixed-bytes/README.md @@ -0,0 +1,10 @@ +# `cuprate-fixed-bytes` +TODO + +# Feature flags +| Feature flag | Does what | +|--------------|-----------| +| `std` | TODO +| `serde` | Enables `serde` on applicable types + +`serde` is enabled by default. \ No newline at end of file diff --git a/net/fixed-bytes/src/lib.rs b/net/fixed-bytes/src/lib.rs index 8776d30..370b881 100644 --- a/net/fixed-bytes/src/lib.rs +++ b/net/fixed-bytes/src/lib.rs @@ -1,3 +1,5 @@ +#![doc = include_str!("../README.md")] + use core::{ fmt::{Debug, Formatter}, ops::{Deref, Index}, @@ -5,7 +7,11 @@ use core::{ use bytes::{BufMut, Bytes, BytesMut}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Deserializer, Serialize}; + #[cfg_attr(feature = "std", derive(thiserror::Error))] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub enum FixedByteError { #[cfg_attr( feature = "std", @@ -43,8 +49,30 @@ impl Debug for FixedByteError { /// Internally this is just a wrapper around [`Bytes`], with the constructors checking that the length is equal to `N`. /// This implements [`Deref`] with the target being `[u8; N]`. #[derive(Debug, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize))] +#[cfg_attr(feature = "serde", serde(transparent))] +#[repr(transparent)] pub struct ByteArray(Bytes); +#[cfg(feature = "serde")] +impl<'de, const N: usize> Deserialize<'de> for ByteArray { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bytes = Bytes::deserialize(deserializer)?; + let len = bytes.len(); + if len == N { + Ok(Self(bytes)) + } else { + Err(serde::de::Error::invalid_length( + len, + &N.to_string().as_str(), + )) + } + } +} + impl ByteArray { pub fn take_bytes(self) -> Bytes { self.0 @@ -88,8 +116,30 @@ impl TryFrom> for ByteArray { } #[derive(Debug, Clone, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(Serialize))] +#[cfg_attr(feature = "serde", serde(transparent))] +#[repr(transparent)] pub struct ByteArrayVec(Bytes); +#[cfg(feature = "serde")] +impl<'de, const N: usize> Deserialize<'de> for ByteArrayVec { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bytes = Bytes::deserialize(deserializer)?; + let len = bytes.len(); + if len % N == 0 { + Ok(Self(bytes)) + } else { + Err(serde::de::Error::invalid_length( + len, + &N.to_string().as_str(), + )) + } + } +} + impl ByteArrayVec { pub fn len(&self) -> usize { self.0.len() / N @@ -197,6 +247,8 @@ impl Index for ByteArrayVec { #[cfg(test)] mod tests { + use serde_json::{from_str, to_string}; + use super::*; #[test] @@ -207,4 +259,46 @@ mod tests { assert_eq!(bytes.len(), 100); let _ = bytes[99]; } + + /// Tests that `serde` works on [`ByteArray`]. + #[test] + #[cfg(feature = "serde")] + fn byte_array_serde() { + let b = ByteArray::from([1, 0, 0, 0, 1]); + let string = to_string(&b).unwrap(); + assert_eq!(string, "[1,0,0,0,1]"); + let b2 = from_str::>(&string).unwrap(); + assert_eq!(b, b2); + } + + /// Tests that `serde` works on [`ByteArrayVec`]. + #[test] + #[cfg(feature = "serde")] + fn byte_array_vec_serde() { + let b = ByteArrayVec::from([1, 0, 0, 0, 1]); + let string = to_string(&b).unwrap(); + assert_eq!(string, "[1,0,0,0,1]"); + let b2 = from_str::>(&string).unwrap(); + assert_eq!(b, b2); + } + + /// Tests that bad input `serde` fails on [`ByteArray`]. + #[test] + #[cfg(feature = "serde")] + #[should_panic( + expected = r#"called `Result::unwrap()` on an `Err` value: Error("invalid length 4, expected 5", line: 0, column: 0)"# + )] + fn byte_array_bad_deserialize() { + from_str::>("[1,0,0,0]").unwrap(); + } + + /// Tests that bad input `serde` fails on [`ByteArrayVec`]. + #[test] + #[cfg(feature = "serde")] + #[should_panic( + expected = r#"called `Result::unwrap()` on an `Err` value: Error("invalid length 4, expected 5", line: 0, column: 0)"# + )] + fn byte_array_vec_bad_deserialize() { + from_str::>("[1,0,0,0]").unwrap(); + } } From fbae3df203213093561961f2d03245505dc5332f Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 11 Jul 2024 09:20:56 -0400 Subject: [PATCH 015/104] storage: move table generator macro `blockchain` -> `database` (#222) * move table generator macro `blockchain` -> `database` * blockchain: fix imports * docs * fix import ordering --- Cargo.lock | 2 +- storage/blockchain/Cargo.toml | 1 - storage/blockchain/README.md | 3 +- storage/blockchain/src/free.rs | 2 +- storage/blockchain/src/lib.rs | 13 +- storage/blockchain/src/open_tables.rs | 190 ---------- storage/blockchain/src/ops/block.rs | 2 +- storage/blockchain/src/ops/blockchain.rs | 3 +- storage/blockchain/src/ops/key_image.rs | 3 +- storage/blockchain/src/ops/mod.rs | 3 +- storage/blockchain/src/ops/output.rs | 3 +- storage/blockchain/src/ops/tx.rs | 3 +- storage/blockchain/src/service/read.rs | 2 +- storage/blockchain/src/service/tests.rs | 3 +- storage/blockchain/src/service/write.rs | 2 +- storage/blockchain/src/tables.rs | 331 +----------------- storage/blockchain/src/tests.rs | 5 +- storage/database/Cargo.toml | 1 + storage/database/README.md | 9 + storage/database/src/lib.rs | 35 +- storage/database/src/tables.rs | 427 +++++++++++++++++++++++ 21 files changed, 491 insertions(+), 552 deletions(-) delete mode 100644 storage/blockchain/src/open_tables.rs create mode 100644 storage/database/src/tables.rs diff --git a/Cargo.lock b/Cargo.lock index bab5a38..fbb68b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -507,7 +507,6 @@ dependencies = [ "hex", "hex-literal", "monero-serai", - "paste", "pretty_assertions", "proptest", "rayon", @@ -601,6 +600,7 @@ dependencies = [ "cfg-if", "heed", "page_size", + "paste", "redb", "serde", "tempfile", diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index fe242bc..79d0dc4 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -30,7 +30,6 @@ bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min curve25519-dalek = { workspace = true } cuprate-pruning = { path = "../../pruning" } monero-serai = { workspace = true, features = ["std"] } -paste = { workspace = true } serde = { workspace = true, optional = true } # `service` feature. diff --git a/storage/blockchain/README.md b/storage/blockchain/README.md index 8a2162c..58c06e0 100644 --- a/storage/blockchain/README.md +++ b/storage/blockchain/README.md @@ -67,8 +67,7 @@ use cuprate_blockchain::{ DatabaseRo, DatabaseRw, TxRo, TxRw, }, config::ConfigBuilder, - tables::{Tables, TablesMut}, - OpenTables, + tables::{Tables, TablesMut, OpenTables}, }; # fn main() -> Result<(), Box> { diff --git a/storage/blockchain/src/free.rs b/storage/blockchain/src/free.rs index bcbb897..87e63d7 100644 --- a/storage/blockchain/src/free.rs +++ b/storage/blockchain/src/free.rs @@ -3,7 +3,7 @@ //---------------------------------------------------------------------------------------------------- Import use cuprate_database::{ConcreteEnv, Env, EnvInner, InitError, RuntimeError, TxRw}; -use crate::{config::Config, open_tables::OpenTables}; +use crate::{config::Config, tables::OpenTables}; //---------------------------------------------------------------------------------------------------- Free functions /// Open the blockchain database, using the passed [`Config`]. diff --git a/storage/blockchain/src/lib.rs b/storage/blockchain/src/lib.rs index ad33e2a..9db0862 100644 --- a/storage/blockchain/src/lib.rs +++ b/storage/blockchain/src/lib.rs @@ -114,23 +114,18 @@ compile_error!("Cuprate is only compatible with 64-bit CPUs"); // // Documentation for each module is located in the respective file. -pub mod config; - mod constants; -pub use constants::{DATABASE_CORRUPT_MSG, DATABASE_VERSION}; - -mod open_tables; -pub use open_tables::OpenTables; - mod free; + +pub use constants::{DATABASE_CORRUPT_MSG, DATABASE_VERSION}; +pub use cuprate_database; pub use free::open; +pub mod config; pub mod ops; pub mod tables; pub mod types; -pub use cuprate_database; - //---------------------------------------------------------------------------------------------------- Feature-gated #[cfg(feature = "service")] pub mod service; diff --git a/storage/blockchain/src/open_tables.rs b/storage/blockchain/src/open_tables.rs deleted file mode 100644 index b37d260..0000000 --- a/storage/blockchain/src/open_tables.rs +++ /dev/null @@ -1,190 +0,0 @@ -//! TODO - -//---------------------------------------------------------------------------------------------------- Import -use cuprate_database::{EnvInner, RuntimeError}; - -use crate::tables::{TablesIter, TablesMut}; - -//---------------------------------------------------------------------------------------------------- Table function macro -/// `crate`-private macro for callings functions on all tables. -/// -/// This calls the function `$fn` with the optional -/// arguments `$args` on all tables - returning early -/// (within whatever scope this is called) if any -/// of the function calls error. -/// -/// Else, it evaluates to an `Ok((tuple, of, all, table, types, ...))`, -/// i.e., an `impl Table[Mut]` wrapped in `Ok`. -macro_rules! call_fn_on_all_tables_or_early_return { - ( - $($fn:ident $(::)?)* - ( - $($arg:ident),* $(,)? - ) - ) => {{ - Ok(( - $($fn ::)*<$crate::tables::BlockInfos>($($arg),*)?, - $($fn ::)*<$crate::tables::BlockBlobs>($($arg),*)?, - $($fn ::)*<$crate::tables::BlockHeights>($($arg),*)?, - $($fn ::)*<$crate::tables::KeyImages>($($arg),*)?, - $($fn ::)*<$crate::tables::NumOutputs>($($arg),*)?, - $($fn ::)*<$crate::tables::PrunedTxBlobs>($($arg),*)?, - $($fn ::)*<$crate::tables::PrunableHashes>($($arg),*)?, - $($fn ::)*<$crate::tables::Outputs>($($arg),*)?, - $($fn ::)*<$crate::tables::PrunableTxBlobs>($($arg),*)?, - $($fn ::)*<$crate::tables::RctOutputs>($($arg),*)?, - $($fn ::)*<$crate::tables::TxBlobs>($($arg),*)?, - $($fn ::)*<$crate::tables::TxIds>($($arg),*)?, - $($fn ::)*<$crate::tables::TxHeights>($($arg),*)?, - $($fn ::)*<$crate::tables::TxOutputs>($($arg),*)?, - $($fn ::)*<$crate::tables::TxUnlockTime>($($arg),*)?, - )) - }}; -} -pub(crate) use call_fn_on_all_tables_or_early_return; - -//---------------------------------------------------------------------------------------------------- OpenTables -/// Open all tables at once. -/// -/// This trait encapsulates the functionality of opening all tables at once. -/// It can be seen as the "constructor" for the [`Tables`](crate::tables::Tables) object. -/// -/// Note that this is already implemented on [`cuprate_database::EnvInner`], thus: -/// - You don't need to implement this -/// - It can be called using `env_inner.open_tables()` notation -/// -/// # Example -/// ```rust -/// use cuprate_blockchain::{ -/// cuprate_database::{Env, EnvInner}, -/// config::ConfigBuilder, -/// tables::{Tables, TablesMut}, -/// OpenTables, -/// }; -/// -/// # fn main() -> Result<(), Box> { -/// // Create a configuration for the database environment. -/// let tmp_dir = tempfile::tempdir()?; -/// let db_dir = tmp_dir.path().to_owned(); -/// let config = ConfigBuilder::new() -/// .db_directory(db_dir.into()) -/// .build(); -/// -/// // Initialize the database environment. -/// let env = cuprate_blockchain::open(config)?; -/// -/// // Open up a transaction. -/// let env_inner = env.env_inner(); -/// let tx_rw = env_inner.tx_rw()?; -/// -/// // Open _all_ tables in write mode using [`OpenTables::open_tables_mut`]. -/// // Note how this is being called on `env_inner`. -/// // | -/// // v -/// let mut tables = env_inner.open_tables_mut(&tx_rw)?; -/// # Ok(()) } -/// ``` -pub trait OpenTables<'env> { - /// The read-only transaction type of the backend. - type Ro<'a>; - /// The read-write transaction type of the backend. - type Rw<'a>; - - /// Open all tables in read/iter mode. - /// - /// This calls [`EnvInner::open_db_ro`] on all database tables - /// and returns a structure that allows access to all tables. - /// - /// # Errors - /// This will only return [`RuntimeError::Io`] if it errors. - /// - /// As all tables are created upon [`crate::open`], - /// this function will never error because a table doesn't exist. - fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result; - - /// Open all tables in read-write mode. - /// - /// This calls [`EnvInner::open_db_rw`] on all database tables - /// and returns a structure that allows access to all tables. - /// - /// # Errors - /// This will only return [`RuntimeError::Io`] on errors. - fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result; - - /// Create all database tables. - /// - /// This will create all the [`Table`](cuprate_database::Table)s - /// found in [`tables`](crate::tables). - /// - /// # Errors - /// This will only return [`RuntimeError::Io`] on errors. - fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError>; -} - -impl<'env, Ei> OpenTables<'env> for Ei -where - Ei: EnvInner<'env>, -{ - type Ro<'a> = >::Ro<'a>; - type Rw<'a> = >::Rw<'a>; - - fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result { - call_fn_on_all_tables_or_early_return! { - Self::open_db_ro(self, tx_ro) - } - } - - fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result { - call_fn_on_all_tables_or_early_return! { - Self::open_db_rw(self, tx_rw) - } - } - - fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError> { - match call_fn_on_all_tables_or_early_return! { - Self::create_db(self, tx_rw) - } { - Ok(_) => Ok(()), - Err(e) => Err(e), - } - } -} - -//---------------------------------------------------------------------------------------------------- Tests -#[cfg(test)] -mod test { - use std::borrow::Cow; - - use cuprate_database::{Env, EnvInner}; - - use crate::{config::ConfigBuilder, tests::tmp_concrete_env}; - - use super::*; - - /// Tests that [`crate::open`] creates all tables. - #[test] - fn test_all_tables_are_created() { - let (env, _tmp) = tmp_concrete_env(); - let env_inner = env.env_inner(); - let tx_ro = env_inner.tx_ro().unwrap(); - env_inner.open_tables(&tx_ro).unwrap(); - } - - /// Tests that direct usage of - /// [`cuprate_database::ConcreteEnv`] - /// does NOT create all tables. - #[test] - #[should_panic(expected = "`Result::unwrap()` on an `Err` value: TableNotFound")] - fn test_no_tables_are_created() { - let tempdir = tempfile::tempdir().unwrap(); - let config = ConfigBuilder::new() - .db_directory(Cow::Owned(tempdir.path().into())) - .low_power() - .build(); - let env = cuprate_database::ConcreteEnv::open(config.db_config).unwrap(); - - let env_inner = env.env_inner(); - let tx_ro = env_inner.tx_ro().unwrap(); - env_inner.open_tables(&tx_ro).unwrap(); - } -} diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 9097f0e..7e0284d 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -271,8 +271,8 @@ mod test { use super::*; use crate::{ - open_tables::OpenTables, ops::tx::{get_tx, tx_exists}, + tables::OpenTables, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, }; diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index 16e0a3c..e93af3d 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -87,9 +87,8 @@ mod test { use super::*; use crate::{ - open_tables::OpenTables, ops::block::add_block, - tables::Tables, + tables::{OpenTables, Tables}, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, }; diff --git a/storage/blockchain/src/ops/key_image.rs b/storage/blockchain/src/ops/key_image.rs index a518490..19444d6 100644 --- a/storage/blockchain/src/ops/key_image.rs +++ b/storage/blockchain/src/ops/key_image.rs @@ -52,8 +52,7 @@ mod test { use super::*; use crate::{ - open_tables::OpenTables, - tables::{Tables, TablesMut}, + tables::{OpenTables, Tables, TablesMut}, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, }; diff --git a/storage/blockchain/src/ops/mod.rs b/storage/blockchain/src/ops/mod.rs index 5821120..7210ae7 100644 --- a/storage/blockchain/src/ops/mod.rs +++ b/storage/blockchain/src/ops/mod.rs @@ -61,9 +61,8 @@ //! Env, EnvInner, //! DatabaseRo, DatabaseRw, TxRo, TxRw, //! }, -//! OpenTables, //! config::ConfigBuilder, -//! tables::{Tables, TablesMut}, +//! tables::{Tables, TablesMut, OpenTables}, //! ops::block::{add_block, pop_block}, //! }; //! diff --git a/storage/blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs index f08f7b3..dfc52f2 100644 --- a/storage/blockchain/src/ops/output.rs +++ b/storage/blockchain/src/ops/output.rs @@ -254,8 +254,7 @@ mod test { use cuprate_database::{Env, EnvInner}; use crate::{ - open_tables::OpenTables, - tables::{Tables, TablesMut}, + tables::{OpenTables, Tables, TablesMut}, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, types::OutputFlags, }; diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index 6edfbb2..f4a2675 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -331,8 +331,7 @@ mod test { use cuprate_test_utils::data::{tx_v1_sig0, tx_v1_sig2, tx_v2_rct3}; use crate::{ - open_tables::OpenTables, - tables::Tables, + tables::{OpenTables, Tables}, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, }; diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index 7f856cc..3f0b263 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -22,7 +22,6 @@ use cuprate_types::{ use crate::{ config::ReaderThreads, - open_tables::OpenTables, ops::{ block::{ block_exists, get_block_extended_header_from_height, get_block_height, get_block_info, @@ -35,6 +34,7 @@ use crate::{ free::{compact_history_genesis_not_included, compact_history_index_to_height_offset}, types::{ResponseReceiver, ResponseResult, ResponseSender}, }, + tables::OpenTables, tables::{BlockHeights, BlockInfos, Tables}, types::{Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId}, }; diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index d163474..4f3fbe4 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -24,14 +24,13 @@ use cuprate_types::{ use crate::{ config::ConfigBuilder, - open_tables::OpenTables, ops::{ block::{get_block_extended_header_from_height, get_block_info}, blockchain::chain_height, output::id_to_output_on_chain, }, service::{init, DatabaseReadHandle, DatabaseWriteHandle}, - tables::{Tables, TablesIter}, + tables::{OpenTables, Tables, TablesIter}, tests::AssertTableLen, types::{Amount, AmountIndex, PreRctOutputId}, }; diff --git a/storage/blockchain/src/service/write.rs b/storage/blockchain/src/service/write.rs index 42d9694..041ae7b 100644 --- a/storage/blockchain/src/service/write.rs +++ b/storage/blockchain/src/service/write.rs @@ -16,8 +16,8 @@ use cuprate_types::{ }; use crate::{ - open_tables::OpenTables, service::types::{ResponseReceiver, ResponseResult, ResponseSender}, + tables::OpenTables, }; //---------------------------------------------------------------------------------------------------- Constants diff --git a/storage/blockchain/src/tables.rs b/storage/blockchain/src/tables.rs index 447faa6..caac787 100644 --- a/storage/blockchain/src/tables.rs +++ b/storage/blockchain/src/tables.rs @@ -4,7 +4,7 @@ //! This module contains all the table definitions used by `cuprate_blockchain`. //! //! The zero-sized structs here represents the table type; -//! they all are essentially marker types that implement [`Table`]. +//! they all are essentially marker types that implement [`cuprate_database::Table`]. //! //! Table structs are `CamelCase`, and their static string //! names used by the actual database backend are `snake_case`. @@ -14,311 +14,14 @@ //! # Traits //! This module also contains a set of traits for //! accessing _all_ tables defined here at once. -//! -//! For example, this is the object returned by [`OpenTables::open_tables`](crate::OpenTables::open_tables). //---------------------------------------------------------------------------------------------------- Import -use cuprate_database::{DatabaseIter, DatabaseRo, DatabaseRw, Table}; - use crate::types::{ Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfo, KeyImage, Output, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RctOutput, TxBlob, TxHash, TxId, UnlockTime, }; -//---------------------------------------------------------------------------------------------------- Sealed -/// Private module, should not be accessible outside this crate. -pub(super) mod private { - /// Private sealed trait. - /// - /// Cannot be implemented outside this crate. - pub trait Sealed {} -} - -//---------------------------------------------------------------------------------------------------- `trait Tables[Mut]` -/// Creates: -/// - `pub trait Tables` -/// - `pub trait TablesIter` -/// - `pub trait TablesMut` -/// - Blanket implementation for `(tuples, containing, all, open, database, tables, ...)` -/// -/// For why this exists, see: . -macro_rules! define_trait_tables { - ($( - // The `T: Table` type The index in a tuple - // | containing all tables - // v v - $table:ident => $index:literal - ),* $(,)?) => { paste::paste! { - /// Object containing all opened [`Table`]s in read-only mode. - /// - /// This is an encapsulated object that contains all - /// available [`Table`]'s in read-only mode. - /// - /// It is a `Sealed` trait and is only implemented on a - /// `(tuple, containing, all, table, types, ...)`. - /// - /// This is used to return a _single_ object from functions like - /// [`OpenTables::open_tables`](crate::OpenTables::open_tables) rather - /// than the tuple containing the tables itself. - /// - /// To replace `tuple.0` style indexing, `field_accessor_functions()` - /// are provided on this trait, which essentially map the object to - /// fields containing the particular database table, for example: - /// ```rust,ignore - /// let tables = open_tables(); - /// - /// // The accessor function `block_infos()` returns the field - /// // containing an open database table for `BlockInfos`. - /// let _ = tables.block_infos(); - /// ``` - /// - /// See also: - /// - [`TablesMut`] - /// - [`TablesIter`] - pub trait Tables: private::Sealed { - // This expands to creating `fn field_accessor_functions()` - // for each passed `$table` type. - // - // It is essentially a mapping to the field - // containing the proper opened database table. - // - // The function name of the function is - // the table type in `snake_case`, e.g., `block_info_v1s()`. - $( - /// Access an opened - #[doc = concat!("[`", stringify!($table), "`]")] - /// database. - fn [<$table:snake>](&self) -> &impl DatabaseRo<$table>; - )* - - /// This returns `true` if all tables are empty. - /// - /// # Errors - /// This returns errors on regular database errors. - fn all_tables_empty(&self) -> Result; - } - - /// Object containing all opened [`Table`]s in read + iter mode. - /// - /// This is the same as [`Tables`] but includes `_iter()` variants. - /// - /// Note that this trait is a supertrait of `Tables`, - /// as in it can use all of its functions as well. - /// - /// See [`Tables`] for documentation - this trait exists for the same reasons. - pub trait TablesIter: private::Sealed + Tables { - $( - /// Access an opened read-only + iterable - #[doc = concat!("[`", stringify!($table), "`]")] - /// database. - fn [<$table:snake _iter>](&self) -> &(impl DatabaseRo<$table> + DatabaseIter<$table>); - )* - } - - /// Object containing all opened [`Table`]s in write mode. - /// - /// This is the same as [`Tables`] but for mutable accesses. - /// - /// Note that this trait is a supertrait of `Tables`, - /// as in it can use all of its functions as well. - /// - /// See [`Tables`] for documentation - this trait exists for the same reasons. - pub trait TablesMut: private::Sealed + Tables { - $( - /// Access an opened - #[doc = concat!("[`", stringify!($table), "`]")] - /// database. - fn [<$table:snake _mut>](&mut self) -> &mut impl DatabaseRw<$table>; - )* - } - - // Implement `Sealed` for all table types. - impl<$([<$table:upper>]),*> private::Sealed for ($([<$table:upper>]),*) {} - - // This creates a blanket-implementation for - // `(tuple, containing, all, table, types)`. - // - // There is a generic defined here _for each_ `$table` input. - // Specifically, the generic letters are just the table types in UPPERCASE. - // Concretely, this expands to something like: - // ```rust - // impl - // ``` - impl<$([<$table:upper>]),*> Tables - // We are implementing `Tables` on a tuple that - // contains all those generics specified, i.e., - // a tuple containing all open table types. - // - // Concretely, this expands to something like: - // ```rust - // (BLOCKINFOSV1S, BLOCKINFOSV2S, BLOCKINFOSV3S, [...]) - // ``` - // which is just a tuple of the generics defined above. - for ($([<$table:upper>]),*) - where - // This expands to a where bound that asserts each element - // in the tuple implements some database table type. - // - // Concretely, this expands to something like: - // ```rust - // BLOCKINFOSV1S: DatabaseRo + DatabaseIter, - // BLOCKINFOSV2S: DatabaseRo + DatabaseIter, - // [...] - // ``` - $( - [<$table:upper>]: DatabaseRo<$table>, - )* - { - $( - // The function name of the accessor function is - // the table type in `snake_case`, e.g., `block_info_v1s()`. - #[inline] - fn [<$table:snake>](&self) -> &impl DatabaseRo<$table> { - // The index of the database table in - // the tuple implements the table trait. - &self.$index - } - )* - - fn all_tables_empty(&self) -> Result { - $( - if !DatabaseRo::is_empty(&self.$index)? { - return Ok(false); - } - )* - Ok(true) - } - } - - // This is the same as the above - // `Tables`, but for `TablesIter`. - impl<$([<$table:upper>]),*> TablesIter - for ($([<$table:upper>]),*) - where - $( - [<$table:upper>]: DatabaseRo<$table> + DatabaseIter<$table>, - )* - { - $( - // The function name of the accessor function is - // the table type in `snake_case` + `_iter`, e.g., `block_info_v1s_iter()`. - #[inline] - fn [<$table:snake _iter>](&self) -> &(impl DatabaseRo<$table> + DatabaseIter<$table>) { - &self.$index - } - )* - } - - // This is the same as the above - // `Tables`, but for `TablesMut`. - impl<$([<$table:upper>]),*> TablesMut - for ($([<$table:upper>]),*) - where - $( - [<$table:upper>]: DatabaseRw<$table>, - )* - { - $( - // The function name of the mutable accessor function is - // the table type in `snake_case` + `_mut`, e.g., `block_info_v1s_mut()`. - #[inline] - fn [<$table:snake _mut>](&mut self) -> &mut impl DatabaseRw<$table> { - &mut self.$index - } - )* - } - }}; -} - -// Input format: $table_type => $index -// -// The $index: -// - Simply increments by 1 for each table -// - Must be 0.. -// - Must end at the total amount of table types - 1 -// -// Compile errors will occur if these aren't satisfied. -// -// $index is just the `tuple.$index`, as the above [`define_trait_tables`] -// macro has a blanket impl for `(all, table, types, ...)` and we must map -// each type to a tuple index explicitly. -// -// FIXME: there's definitely an automatic way to this :) -define_trait_tables! { - BlockInfos => 0, - BlockBlobs => 1, - BlockHeights => 2, - KeyImages => 3, - NumOutputs => 4, - PrunedTxBlobs => 5, - PrunableHashes => 6, - Outputs => 7, - PrunableTxBlobs => 8, - RctOutputs => 9, - TxBlobs => 10, - TxIds => 11, - TxHeights => 12, - TxOutputs => 13, - TxUnlockTime => 14, -} - -//---------------------------------------------------------------------------------------------------- Table macro -/// Create all tables, should be used _once_. -/// -/// Generating this macro once and using `$()*` is probably -/// faster for compile times than calling the macro _per_ table. -/// -/// All tables are zero-sized table structs, and implement the `Table` trait. -/// -/// Table structs are automatically `CamelCase`, -/// and their static string names are automatically `snake_case`. -macro_rules! tables { - ( - $( - $(#[$attr:meta])* // Documentation and any `derive`'s. - $table:ident, // The table name + doubles as the table struct name. - $key:ty => // Key type. - $value:ty // Value type. - ),* $(,)? - ) => { - paste::paste! { $( - // Table struct. - $(#[$attr])* - // The below test show the `snake_case` table name in cargo docs. - #[doc = concat!("- Key: [`", stringify!($key), "`]")] - #[doc = concat!("- Value: [`", stringify!($value), "`]")] - /// - /// ## Table Name - /// ```rust - /// # use cuprate_blockchain::{*,tables::*}; - /// use cuprate_database::Table; - #[doc = concat!( - "assert_eq!(", - stringify!([<$table:camel>]), - "::NAME, \"", - stringify!([<$table:snake>]), - "\");", - )] - /// ``` - #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] - #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)] - pub struct [<$table:camel>]; - - // Implement the `Sealed` in this file. - // Required by `Table`. - impl private::Sealed for [<$table:camel>] {} - - // Table trait impl. - impl Table for [<$table:camel>] { - const NAME: &'static str = stringify!([<$table:snake>]); - type Key = $key; - type Value = $value; - } - )* } - }; -} - //---------------------------------------------------------------------------------------------------- Tables // Notes: // - Keep this sorted A-Z (by table name) @@ -326,23 +29,23 @@ macro_rules! tables { // - If adding/changing a table also edit: // - the tests in `src/backend/tests.rs` // - `call_fn_on_all_tables_or_early_return!()` macro in `src/open_tables.rs` -tables! { +cuprate_database::define_tables! { /// Serialized block blobs (bytes). /// /// Contains the serialized version of all blocks. - BlockBlobs, + 0 => BlockBlobs, BlockHeight => BlockBlob, /// Block heights. /// /// Contains the height of all blocks. - BlockHeights, + 1 => BlockHeights, BlockHash => BlockHeight, /// Block information. /// /// Contains metadata of all blocks. - BlockInfos, + 2 => BlockInfos, BlockHeight => BlockInfo, /// Set of key images. @@ -351,38 +54,38 @@ tables! { /// /// This table has `()` as the value type, as in, /// it is a set of key images. - KeyImages, + 3 => KeyImages, KeyImage => (), /// Maps an output's amount to the number of outputs with that amount. /// /// For example, if there are 5 outputs with `amount = 123` /// then calling `get(123)` on this table will return 5. - NumOutputs, + 4 => NumOutputs, Amount => u64, /// Pre-RCT output data. - Outputs, + 5 => Outputs, PreRctOutputId => Output, /// Pruned transaction blobs (bytes). /// /// Contains the pruned portion of serialized transaction data. - PrunedTxBlobs, + 6 => PrunedTxBlobs, TxId => PrunedBlob, /// Prunable transaction blobs (bytes). /// /// Contains the prunable portion of serialized transaction data. // SOMEDAY: impl when `monero-serai` supports pruning - PrunableTxBlobs, + 7 => PrunableTxBlobs, TxId => PrunableBlob, /// Prunable transaction hashes. /// /// Contains the prunable portion of transaction hashes. // SOMEDAY: impl when `monero-serai` supports pruning - PrunableHashes, + 8 => PrunableHashes, TxId => PrunableHash, // SOMEDAY: impl a properties table: @@ -392,40 +95,40 @@ tables! { // StorableString => StorableVec, /// RCT output data. - RctOutputs, + 9 => RctOutputs, AmountIndex => RctOutput, /// Transaction blobs (bytes). /// /// Contains the serialized version of all transactions. // SOMEDAY: remove when `monero-serai` supports pruning - TxBlobs, + 10 => TxBlobs, TxId => TxBlob, /// Transaction indices. /// /// Contains the indices all transactions. - TxIds, + 11 => TxIds, TxHash => TxId, /// Transaction heights. /// /// Contains the block height associated with all transactions. - TxHeights, + 12 => TxHeights, TxId => BlockHeight, /// Transaction outputs. /// /// Contains the list of `AmountIndex`'s of the /// outputs associated with all transactions. - TxOutputs, + 13 => TxOutputs, TxId => AmountIndices, /// Transaction unlock time. /// /// Contains the unlock time of transactions IF they have one. /// Transactions without unlock times will not exist in this table. - TxUnlockTime, + 14 => TxUnlockTime, TxId => UnlockTime, } diff --git a/storage/blockchain/src/tests.rs b/storage/blockchain/src/tests.rs index ec2f18e..65527e1 100644 --- a/storage/blockchain/src/tests.rs +++ b/storage/blockchain/src/tests.rs @@ -11,7 +11,10 @@ use pretty_assertions::assert_eq; use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner}; -use crate::{config::ConfigBuilder, open_tables::OpenTables, tables::Tables}; +use crate::{ + config::ConfigBuilder, + tables::{OpenTables, Tables}, +}; //---------------------------------------------------------------------------------------------------- Struct /// Named struct to assert the length of all tables. diff --git a/storage/database/Cargo.toml b/storage/database/Cargo.toml index e2dad70..a70457f 100644 --- a/storage/database/Cargo.toml +++ b/storage/database/Cargo.toml @@ -21,6 +21,7 @@ bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_ bytes = { workspace = true } cfg-if = { workspace = true } page_size = { version = "0.6.0" } # Needed for database resizes, they must be a multiple of the OS page size. +paste = { workspace = true } thiserror = { workspace = true } # Optional features. diff --git a/storage/database/README.md b/storage/database/README.md index d7a9b92..5bcec60 100644 --- a/storage/database/README.md +++ b/storage/database/README.md @@ -80,6 +80,15 @@ and use `` everywhere it is stored instead. This would allow generic-backed dynamic runtime selection of the database backend, i.e. the user can select which database backend they use. --> +# Defining tables +Most likely, your crate building on-top of `cuprate_database` will +want to define all tables used at compile time. + +If this is the case, consider using the [`define_tables`] macro +to bulk generate zero-sized marker types that implement [`Table`]. + +This macro also generates other convenient traits specific to _your_ tables. + # Feature flags Different database backends are enabled by the feature flags: - `heed` (LMDB) diff --git a/storage/database/src/lib.rs b/storage/database/src/lib.rs index 31c5c95..da36b0d 100644 --- a/storage/database/src/lib.rs +++ b/storage/database/src/lib.rs @@ -78,6 +78,8 @@ clippy::module_inception, clippy::redundant_pub_crate, clippy::option_if_let_else, + + // unused_crate_dependencies, // false-positive with `paste` )] // Allow some lints when running in debug mode. #![cfg_attr( @@ -105,42 +107,39 @@ // Documentation for each module is located in the respective file. mod backend; -pub use backend::ConcreteEnv; +mod constants; +mod database; +mod env; +mod error; +mod key; +mod storable; +mod table; +mod tables; +mod transaction; pub mod config; +pub mod resize; -mod constants; +pub use backend::ConcreteEnv; pub use constants::{ DATABASE_BACKEND, DATABASE_CORRUPT_MSG, DATABASE_DATA_FILENAME, DATABASE_LOCK_FILENAME, }; - -mod database; pub use database::{DatabaseIter, DatabaseRo, DatabaseRw}; - -mod env; pub use env::{Env, EnvInner}; - -mod error; pub use error::{InitError, RuntimeError}; - -pub mod resize; - -mod key; pub use key::{Key, KeyCompare}; - -mod storable; pub use storable::{Storable, StorableBytes, StorableStr, StorableVec}; - -mod table; pub use table::Table; - -mod transaction; pub use transaction::{TxRo, TxRw}; //---------------------------------------------------------------------------------------------------- Private #[cfg(test)] pub(crate) mod tests; +// Used inside public facing macros. +#[doc(hidden)] +pub use paste; + //---------------------------------------------------------------------------------------------------- // HACK: needed to satisfy the `unused_crate_dependencies` lint. cfg_if::cfg_if! { diff --git a/storage/database/src/tables.rs b/storage/database/src/tables.rs new file mode 100644 index 0000000..c2ac821 --- /dev/null +++ b/storage/database/src/tables.rs @@ -0,0 +1,427 @@ +//! Database table definition macro. + +//---------------------------------------------------------------------------------------------------- Import + +//---------------------------------------------------------------------------------------------------- Table macro +/// Define all table types. +/// +/// # Purpose +/// This macro allows you to define all database tables in one place. +/// +/// A by-product of this macro is that it defines some +/// convenient traits specific to _your_ tables +/// (see [Output](#output)). +/// +/// # Inputs +/// This macro expects a list of tables, and their key/value types. +/// +/// This syntax is as follows: +/// +/// ```rust +/// cuprate_database::define_tables! { +/// /// Any extra attributes you'd like to add to +/// /// this table type, e.g. docs or derives. +/// +/// 0 => TableName, +/// // ▲ ▲ +/// // │ └─ Table struct name. The macro generates this for you. +/// // │ +/// // Incrementing index. This must start at 0 +/// // and increment by 1 per table added. +/// +/// u8 => u64, +/// // ▲ ▲ +/// // │ └─ Table value type. +/// // │ +/// // Table key type. +/// +/// // Another table. +/// 1 => TableName2, +/// i8 => i64, +/// } +/// ``` +/// +/// An example: +/// ```rust +/// use cuprate_database::{ +/// ConcreteEnv, Table, +/// config::ConfigBuilder, +/// Env, EnvInner, +/// DatabaseRo, DatabaseRw, TxRo, TxRw, +/// }; +/// +/// // This generates `pub struct Table{1,2,3}` +/// // where all those implement `Table` with +/// // the defined name and key/value types. +/// // +/// // It also generate traits specific to our tables. +/// cuprate_database::define_tables! { +/// 0 => Table1, +/// u32 => i32, +/// +/// /// This one has extra docs. +/// 1 => Table2, +/// u64 => (), +/// +/// 2 => Table3, +/// i32 => i32, +/// } +/// +/// # fn main() -> Result<(), Box> { +/// # let tmp_dir = tempfile::tempdir()?; +/// # let db_dir = tmp_dir.path().to_owned(); +/// # let config = ConfigBuilder::new(db_dir.into()).build(); +/// // Open the database. +/// let env = ConcreteEnv::open(config)?; +/// let env_inner = env.env_inner(); +/// +/// // Open the table we just defined. +/// { +/// let tx_rw = env_inner.tx_rw()?; +/// env_inner.create_db::(&tx_rw)?; +/// let mut table = env_inner.open_db_rw::(&tx_rw)?; +/// +/// // Write data to the table. +/// table.put(&0, &1)?; +/// +/// drop(table); +/// TxRw::commit(tx_rw)?; +/// } +/// +/// // Read the data, assert it is correct. +/// { +/// let tx_ro = env_inner.tx_ro()?; +/// let table = env_inner.open_db_ro::(&tx_ro)?; +/// assert_eq!(table.first()?, (0, 1)); +/// } +/// +/// // Create all tables at once using the +/// // `OpenTables` trait generated with the +/// // macro above. +/// { +/// let tx_rw = env_inner.tx_rw()?; +/// env_inner.create_tables(&tx_rw)?; +/// TxRw::commit(tx_rw)?; +/// } +/// +/// // Open all tables at once. +/// { +/// let tx_ro = env_inner.tx_ro()?; +/// let all_tables = env_inner.open_tables(&tx_ro)?; +/// } +/// # Ok(()) } +/// ``` +/// +/// # Output +/// This macro: +/// 1. Implements [`Table`](crate::Table) on all your table types +/// 1. Creates a `pub trait Tables` trait (in scope) +/// 1. Creates a `pub trait TablesIter` trait (in scope) +/// 1. Creates a `pub trait TablesMut` trait (in scope) +/// 1. Blanket implements a `(tuples, containing, all, open, database, tables, ...)` for the above traits +/// 1. Creates a `pub trait OpenTables` trait (in scope) +/// +/// All table types are zero-sized structs that implement the `Table` trait. +/// +/// Table structs are automatically `CamelCase`, and their +/// static string names are automatically `snake_case`. +/// +/// For why the table traits + blanket implementation on the tuple exists, see: +/// . +/// +/// The `OpenTables` trait lets you open all tables you've defined, at once. +/// +/// # Example +/// For examples of usage & output, see +/// [`cuprate_blockchain::tables`](https://github.com/Cuprate/cuprate/blob/main/storage/blockchain/src/tables.rs). +#[macro_export] +macro_rules! define_tables { + ( + $( + // Documentation and any `derive`'s. + $(#[$attr:meta])* + + // The table name + doubles as the table struct name. + $index:literal => $table:ident, + + // Key type => Value type. + $key:ty => $value:ty + ),* $(,)? + ) => { $crate::paste::paste! { + $( + // Table struct. + $(#[$attr])* + #[doc = concat!("- Key: [`", stringify!($key), "`]")] + #[doc = concat!("- Value: [`", stringify!($value), "`]")] + #[doc = concat!("- Name: `", stringify!([<$table:snake>]), "`")] + #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)] + pub struct [<$table:camel>]; + + // Table trait impl. + impl $crate::Table for [<$table:camel>] { + const NAME: &'static str = stringify!([<$table:snake>]); + type Key = $key; + type Value = $value; + } + )* + + /// Object containing all opened [`Table`](cuprate_database::Table)s in read-only mode. + /// + /// This is an encapsulated object that contains all + /// available `Table`'s in read-only mode. + /// + /// It is a `Sealed` trait and is only implemented on a + /// `(tuple, containing, all, table, types, ...)`. + /// + /// This is used to return a _single_ object from functions like + /// [`OpenTables::open_tables`] rather than the tuple containing the tables itself. + /// + /// To replace `tuple.0` style indexing, `field_accessor_functions()` + /// are provided on this trait, which essentially map the object to + /// fields containing the particular database table, for example: + /// ```rust,ignore + /// let tables = open_tables(); + /// + /// // The accessor function `block_infos()` returns the field + /// // containing an open database table for `BlockInfos`. + /// let _ = tables.block_infos(); + /// ``` + /// + /// See also: + /// - [`TablesMut`] + /// - [`TablesIter`] + pub trait Tables { + // This expands to creating `fn field_accessor_functions()` + // for each passed `$table` type. + // + // It is essentially a mapping to the field + // containing the proper opened database table. + // + // The function name of the function is + // the table type in `snake_case`, e.g., `block_info_v1s()`. + $( + /// Access an opened + #[doc = concat!("[`", stringify!($table), "`]")] + /// database. + fn [<$table:snake>](&self) -> &impl $crate::DatabaseRo<$table>; + )* + + /// This returns `true` if all tables are empty. + /// + /// # Errors + /// This returns errors on regular database errors. + fn all_tables_empty(&self) -> Result; + } + + /// Object containing all opened [`Table`](cuprate_database::Table)s in read + iter mode. + /// + /// This is the same as [`Tables`] but includes `_iter()` variants. + /// + /// Note that this trait is a supertrait of `Tables`, + /// as in it can use all of its functions as well. + /// + /// See [`Tables`] for documentation - this trait exists for the same reasons. + pub trait TablesIter: Tables { + $( + /// Access an opened read-only + iterable + #[doc = concat!("[`", stringify!($table), "`]")] + /// database. + fn [<$table:snake _iter>](&self) -> &(impl $crate::DatabaseRo<$table> + $crate::DatabaseIter<$table>); + )* + } + + /// Object containing all opened [`Table`](cuprate_database::Table)s in write mode. + /// + /// This is the same as [`Tables`] but for mutable accesses. + /// + /// Note that this trait is a supertrait of `Tables`, + /// as in it can use all of its functions as well. + /// + /// See [`Tables`] for documentation - this trait exists for the same reasons. + pub trait TablesMut: Tables { + $( + /// Access an opened + #[doc = concat!("[`", stringify!($table), "`]")] + /// database. + fn [<$table:snake _mut>](&mut self) -> &mut impl $crate::DatabaseRw<$table>; + )* + } + + // This creates a blanket-implementation for + // `(tuple, containing, all, table, types)`. + // + // There is a generic defined here _for each_ `$table` input. + // Specifically, the generic letters are just the table types in UPPERCASE. + // Concretely, this expands to something like: + // ```rust + // impl + // ``` + impl<$([<$table:upper>]),*> Tables + // We are implementing `Tables` on a tuple that + // contains all those generics specified, i.e., + // a tuple containing all open table types. + // + // Concretely, this expands to something like: + // ```rust + // (BLOCKINFOSV1S, BLOCKINFOSV2S, BLOCKINFOSV3S, [...]) + // ``` + // which is just a tuple of the generics defined above. + for ($([<$table:upper>]),*) + where + // This expands to a where bound that asserts each element + // in the tuple implements some database table type. + // + // Concretely, this expands to something like: + // ```rust + // BLOCKINFOSV1S: DatabaseRo + DatabaseIter, + // BLOCKINFOSV2S: DatabaseRo + DatabaseIter, + // [...] + // ``` + $( + [<$table:upper>]: $crate::DatabaseRo<$table>, + )* + { + $( + // The function name of the accessor function is + // the table type in `snake_case`, e.g., `block_info_v1s()`. + #[inline] + fn [<$table:snake>](&self) -> &impl $crate::DatabaseRo<$table> { + // The index of the database table in + // the tuple implements the table trait. + &self.$index + } + )* + + fn all_tables_empty(&self) -> Result { + $( + if !$crate::DatabaseRo::is_empty(&self.$index)? { + return Ok(false); + } + )* + Ok(true) + } + } + + // This is the same as the above + // `Tables`, but for `TablesIter`. + impl<$([<$table:upper>]),*> TablesIter + for ($([<$table:upper>]),*) + where + $( + [<$table:upper>]: $crate::DatabaseRo<$table> + $crate::DatabaseIter<$table>, + )* + { + $( + // The function name of the accessor function is + // the table type in `snake_case` + `_iter`, e.g., `block_info_v1s_iter()`. + #[inline] + fn [<$table:snake _iter>](&self) -> &(impl $crate::DatabaseRo<$table> + $crate::DatabaseIter<$table>) { + &self.$index + } + )* + } + + // This is the same as the above + // `Tables`, but for `TablesMut`. + impl<$([<$table:upper>]),*> TablesMut + for ($([<$table:upper>]),*) + where + $( + [<$table:upper>]: $crate::DatabaseRw<$table>, + )* + { + $( + // The function name of the mutable accessor function is + // the table type in `snake_case` + `_mut`, e.g., `block_info_v1s_mut()`. + #[inline] + fn [<$table:snake _mut>](&mut self) -> &mut impl $crate::DatabaseRw<$table> { + &mut self.$index + } + )* + } + + /// Open all tables at once. + /// + /// This trait encapsulates the functionality of opening all tables at once. + /// It can be seen as the "constructor" for the [`Tables`] object. + /// + /// Note that this is already implemented on [`cuprate_database::EnvInner`], thus: + /// - You don't need to implement this + /// - It can be called using `env_inner.open_tables()` notation + pub trait OpenTables<'env> { + /// The read-only transaction type of the backend. + type Ro<'a>; + /// The read-write transaction type of the backend. + type Rw<'a>; + + /// Open all tables in read/iter mode. + /// + /// This calls [`cuprate_database::EnvInner::open_db_ro`] on all database tables + /// and returns a structure that allows access to all tables. + /// + /// # Errors + /// This will only return [`cuprate_database::RuntimeError::Io`] if it errors. + /// + /// # Invariant + /// All tables should be created with a crate-specific open function. + /// + /// TODO: explain why + fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result; + + /// Open all tables in read-write mode. + /// + /// This calls [`cuprate_database::EnvInner::open_db_rw`] on all database tables + /// and returns a structure that allows access to all tables. + /// + /// # Errors + /// This will only return [`cuprate_database::RuntimeError::Io`] on errors. + fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result; + + /// Create all database tables. + /// + /// This will create all the defined [`Table`](cuprate_database::Table)s. + /// + /// # Errors + /// This will only return [`cuprate_database::RuntimeError::Io`] on errors. + fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError>; + } + + impl<'env, Ei> OpenTables<'env> for Ei + where + Ei: $crate::EnvInner<'env>, + { + type Ro<'a> = >::Ro<'a>; + type Rw<'a> = >::Rw<'a>; + + fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result { + Ok(($( + Self::open_db_ro::<[<$table:camel>]>(self, tx_ro)?, + )*)) + } + + fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result { + Ok(($( + Self::open_db_rw::<[<$table:camel>]>(self, tx_rw)?, + )*)) + } + + fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError> { + let result = Ok(($( + Self::create_db::<[<$table:camel>]>(self, tx_rw), + )*)); + + match result { + Ok(_) => Ok(()), + Err(e) => Err(e), + } + } + } + }}; +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} From 0a390a362ab8906b0c8a611d3334e262d5f1988d Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 12 Jul 2024 17:15:02 -0400 Subject: [PATCH 016/104] storage: doc fixes (#228) * database: doc fixes * blockchain: doc fixes * database: fix doc test * database: readme fixes * blockchain: ops fix * blockchain: readme fix --- storage/blockchain/README.md | 8 ++- .../blockchain/src/config/reader_threads.rs | 33 ++++++----- storage/blockchain/src/free.rs | 2 +- storage/blockchain/src/ops/block.rs | 2 +- storage/blockchain/src/ops/mod.rs | 8 +-- storage/database/README.md | 18 +++--- storage/database/src/config/config.rs | 2 +- storage/database/src/config/sync_mode.rs | 4 +- storage/database/src/env.rs | 56 ++++++++++++++----- storage/database/src/error.rs | 4 +- storage/database/src/storable.rs | 2 +- storage/database/src/table.rs | 2 + storage/database/src/tables.rs | 20 ++++--- storage/database/src/transaction.rs | 4 +- 14 files changed, 100 insertions(+), 65 deletions(-) diff --git a/storage/blockchain/README.md b/storage/blockchain/README.md index 58c06e0..4800546 100644 --- a/storage/blockchain/README.md +++ b/storage/blockchain/README.md @@ -5,6 +5,10 @@ This documentation is mostly for practical usage of `cuprate_blockchain`. For a high-level overview, see the database section in [Cuprate's architecture book](https://architecture.cuprate.org). +If you're looking for a database crate, consider using the lower-level +[`cuprate-database`](https://doc.cuprate.org/cuprate_database) +crate that this crate is built on-top of. + # Purpose This crate does 3 things: 1. Uses [`cuprate_database`] as a base database layer @@ -47,11 +51,11 @@ there are some things that must be kept in mind when doing so. Failing to uphold these invariants may cause panics. 1. `LMDB` requires the user to resize the memory map resizing (see [`cuprate_database::RuntimeError::ResizeNeeded`] -1. `LMDB` has a maximum reader transaction count, currently it is set to `128` +1. `LMDB` has a maximum reader transaction count, currently, [it is set to `126`](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/mdb.c#L794-L799) 1. `LMDB` has [maximum key/value byte size](http://www.lmdb.tech/doc/group__internal.html#gac929399f5d93cef85f874b9e9b1d09e0) which must not be exceeded # Examples -The below is an example of using `cuprate_blockchain` +The below is an example of using `cuprate_blockchain`'s lowest API, i.e. using a mix of this crate and `cuprate_database`'s traits directly - **this is NOT recommended.** diff --git a/storage/blockchain/src/config/reader_threads.rs b/storage/blockchain/src/config/reader_threads.rs index 04216e3..d4dd6ac 100644 --- a/storage/blockchain/src/config/reader_threads.rs +++ b/storage/blockchain/src/config/reader_threads.rs @@ -20,12 +20,11 @@ use serde::{Deserialize, Serialize}; /// This controls how many reader thread `service`'s /// thread-pool will spawn to receive and send requests/responses. /// -/// It does nothing outside of `service`. -/// -/// It will always be at least 1, up until the amount of threads on the machine. -/// +/// # Invariant /// The main function used to extract an actual /// usable thread count out of this is [`ReaderThreads::as_threads`]. +/// +/// This will always return at least 1, up until the amount of threads on the machine. #[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum ReaderThreads { @@ -97,30 +96,30 @@ impl ReaderThreads { /// /// # Example /// ```rust - /// use cuprate_blockchain::config::ReaderThreads as Rt; + /// use cuprate_blockchain::config::ReaderThreads as R; /// /// let total_threads: std::num::NonZeroUsize = /// cuprate_helper::thread::threads(); /// - /// assert_eq!(Rt::OnePerThread.as_threads(), total_threads); + /// assert_eq!(R::OnePerThread.as_threads(), total_threads); /// - /// assert_eq!(Rt::One.as_threads().get(), 1); + /// assert_eq!(R::One.as_threads().get(), 1); /// - /// assert_eq!(Rt::Number(0).as_threads(), total_threads); - /// assert_eq!(Rt::Number(1).as_threads().get(), 1); - /// assert_eq!(Rt::Number(usize::MAX).as_threads(), total_threads); + /// assert_eq!(R::Number(0).as_threads(), total_threads); + /// assert_eq!(R::Number(1).as_threads().get(), 1); + /// assert_eq!(R::Number(usize::MAX).as_threads(), total_threads); /// - /// assert_eq!(Rt::Percent(0.01).as_threads().get(), 1); - /// assert_eq!(Rt::Percent(0.0).as_threads(), total_threads); - /// assert_eq!(Rt::Percent(1.0).as_threads(), total_threads); - /// assert_eq!(Rt::Percent(f32::NAN).as_threads(), total_threads); - /// assert_eq!(Rt::Percent(f32::INFINITY).as_threads(), total_threads); - /// assert_eq!(Rt::Percent(f32::NEG_INFINITY).as_threads(), total_threads); + /// assert_eq!(R::Percent(0.01).as_threads().get(), 1); + /// assert_eq!(R::Percent(0.0).as_threads(), total_threads); + /// assert_eq!(R::Percent(1.0).as_threads(), total_threads); + /// assert_eq!(R::Percent(f32::NAN).as_threads(), total_threads); + /// assert_eq!(R::Percent(f32::INFINITY).as_threads(), total_threads); + /// assert_eq!(R::Percent(f32::NEG_INFINITY).as_threads(), total_threads); /// /// // Percentage only works on more than 1 thread. /// if total_threads.get() > 1 { /// assert_eq!( - /// Rt::Percent(0.5).as_threads().get(), + /// R::Percent(0.5).as_threads().get(), /// (total_threads.get() as f32 / 2.0) as usize, /// ); /// } diff --git a/storage/blockchain/src/free.rs b/storage/blockchain/src/free.rs index 87e63d7..8288e65 100644 --- a/storage/blockchain/src/free.rs +++ b/storage/blockchain/src/free.rs @@ -6,7 +6,7 @@ use cuprate_database::{ConcreteEnv, Env, EnvInner, InitError, RuntimeError, TxRw use crate::{config::Config, tables::OpenTables}; //---------------------------------------------------------------------------------------------------- Free functions -/// Open the blockchain database, using the passed [`Config`]. +/// Open the blockchain database using the passed [`Config`]. /// /// This calls [`cuprate_database::Env::open`] and prepares the /// database to be ready for blockchain-related usage, e.g. diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 7e0284d..b0eb013 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -1,4 +1,4 @@ -//! Blocks functions. +//! Block functions. //---------------------------------------------------------------------------------------------------- Import use bytemuck::TransparentWrapper; diff --git a/storage/blockchain/src/ops/mod.rs b/storage/blockchain/src/ops/mod.rs index 7210ae7..2699fc8 100644 --- a/storage/blockchain/src/ops/mod.rs +++ b/storage/blockchain/src/ops/mod.rs @@ -5,14 +5,14 @@ //! database operations. //! //! # `impl Table` -//! `ops/` functions take [`Tables`](crate::tables::Tables) and +//! Functions in this module take [`Tables`](crate::tables::Tables) and //! [`TablesMut`](crate::tables::TablesMut) directly - these are //! _already opened_ database tables. //! -//! As such, the function puts the responsibility -//! of transactions, tables, etc on the caller. +//! As such, the responsibility of +//! transactions, tables, etc, are on the caller. //! -//! This does mean these functions are mostly as lean +//! Notably, this means that these functions are as lean //! as possible, so calling them in a loop should be okay. //! //! # Atomicity diff --git a/storage/database/README.md b/storage/database/README.md index 5bcec60..aed738e 100644 --- a/storage/database/README.md +++ b/storage/database/README.md @@ -6,10 +6,10 @@ For a high-level overview, see the database section in [Cuprate's architecture book](https://architecture.cuprate.org). If you need blockchain specific capabilities, consider using the higher-level -`cuprate-blockchain` crate which builds upon this one. +[`cuprate-blockchain`](https://doc.cuprate.org/cuprate_blockchain) crate which builds upon this one. # Purpose -This crate abstracts various database backends with traits. The databases are: +This crate abstracts various database backends with traits. All backends have the following attributes: - [Embedded](https://en.wikipedia.org/wiki/Embedded_database) @@ -19,6 +19,10 @@ All backends have the following attributes: - Are table oriented (`"table_name" -> (key, value)`) - Allows concurrent readers +The currently implemented backends are: +- [`heed`](https://github.com/meilisearch/heed) (LMDB) +- [`redb`](https://github.com/cberner/redb) + # Terminology To be more clear on some terms used in this crate: @@ -26,17 +30,17 @@ To be more clear on some terms used in this crate: |------------------|--------------------------------------| | `Env` | The 1 database environment, the "whole" thing | `DatabaseR{o,w}` | A _actively open_ readable/writable `key/value` store -| `Table` | Solely the metadata of a `cuprate_database` (the `key` and `value` types, and the name) +| `Table` | Solely the metadata of a `Database` (the `key` and `value` types, and the name) | `TxR{o,w}` | A read/write transaction -| `Storable` | A data that type can be stored in the database +| `Storable` | A data type that can be stored in the database -The dataflow is `Env` -> `Tx` -> `cuprate_database` +The flow is `Env` -> `Tx` -> `Database` Which reads as: 1. You have a database `Environment` 1. You open up a `Transaction` -1. You open a particular `Table` from that `Environment`, getting a `cuprate_database` -1. You can now read/write data from/to that `cuprate_database` +1. You open a particular `Table` from that `Environment`, getting a `Database` +1. You can now read/write data from/to that `Database` # Concrete types You should _not_ rely on the concrete type of any abstracted backend. diff --git a/storage/database/src/config/config.rs b/storage/database/src/config/config.rs index a5ecbb2..8a4ddbf 100644 --- a/storage/database/src/config/config.rs +++ b/storage/database/src/config/config.rs @@ -160,7 +160,7 @@ pub struct Config { /// Set the number of slots in the reader table. /// /// This is only used in LMDB, see - /// . + /// [here](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/mdb.c#L794-L799). /// /// By default, this value is [`READER_THREADS_DEFAULT`]. pub reader_threads: NonZeroUsize, diff --git a/storage/database/src/config/sync_mode.rs b/storage/database/src/config/sync_mode.rs index e000462..5a0cba5 100644 --- a/storage/database/src/config/sync_mode.rs +++ b/storage/database/src/config/sync_mode.rs @@ -127,8 +127,8 @@ pub enum SyncMode { /// In the case of a system crash, the database /// may become corrupted when using this option. /// - /// [^1]: - /// Semantically, this variant would actually map to + /// + /// [^1]: Semantically, this variant would actually map to /// [`redb::Durability::None`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.None), /// however due to [`#149`](https://github.com/Cuprate/cuprate/issues/149), /// this is not possible. As such, when using the `redb` backend, diff --git a/storage/database/src/env.rs b/storage/database/src/env.rs index de094a9..cae4973 100644 --- a/storage/database/src/env.rs +++ b/storage/database/src/env.rs @@ -24,15 +24,14 @@ use crate::{ /// /// # Lifetimes /// The lifetimes associated with `Env` have a sequential flow: -/// 1. `ConcreteEnv` -/// 2. `'env` -/// 3. `'tx` -/// 4. `'db` +/// ```text +/// Env -> Tx -> Database +/// ``` /// /// As in: /// - open database tables only live as long as... /// - transactions which only live as long as the... -/// - environment ([`EnvInner`]) +/// - database environment pub trait Env: Sized { //------------------------------------------------ Constants /// Does the database backend need to be manually @@ -202,18 +201,19 @@ Subsequent table opens will follow the flags/ordering, but only if /// Note that when opening tables with [`EnvInner::open_db_ro`], /// they must be created first or else it will return error. /// -/// Note that when opening tables with [`EnvInner::open_db_ro`], -/// they must be created first or else it will return error. -/// /// See [`EnvInner::create_db`] for creating tables. /// /// # Invariant #[doc = doc_heed_create_db_invariant!()] pub trait EnvInner<'env> { /// The read-only transaction type of the backend. - type Ro<'a>: TxRo<'a>; + /// + /// `'tx` is the lifetime of the transaction itself. + type Ro<'tx>: TxRo<'tx>; /// The read-write transaction type of the backend. - type Rw<'a>: TxRw<'a>; + /// + /// `'tx` is the lifetime of the transaction itself. + type Rw<'tx>: TxRw<'tx>; /// Create a read-only transaction. /// @@ -235,11 +235,37 @@ pub trait EnvInner<'env> { /// This will open the database [`Table`] /// passed as a generic to this function. /// - /// ```rust,ignore - /// let db = env.open_db_ro::(&tx_ro); - /// // ^ ^ - /// // database table table metadata - /// // (name, key/value type) + /// ```rust + /// # use cuprate_database::{ + /// # ConcreteEnv, + /// # config::ConfigBuilder, + /// # Env, EnvInner, + /// # DatabaseRo, DatabaseRw, TxRo, TxRw, + /// # }; + /// # fn main() -> Result<(), Box> { + /// # let tmp_dir = tempfile::tempdir()?; + /// # let db_dir = tmp_dir.path().to_owned(); + /// # let config = ConfigBuilder::new(db_dir.into()).build(); + /// # let env = ConcreteEnv::open(config)?; + /// # + /// # struct Table; + /// # impl cuprate_database::Table for Table { + /// # const NAME: &'static str = "table"; + /// # type Key = u8; + /// # type Value = u64; + /// # } + /// # + /// # let env_inner = env.env_inner(); + /// # let tx_rw = env_inner.tx_rw()?; + /// # env_inner.create_db::
(&tx_rw)?; + /// # TxRw::commit(tx_rw); + /// # + /// # let tx_ro = env_inner.tx_ro()?; + /// let db = env_inner.open_db_ro::
(&tx_ro); + /// // ^ ^ + /// // database table table metadata + /// // (name, key/value type) + /// # Ok(()) } /// ``` /// /// # Errors diff --git a/storage/database/src/error.rs b/storage/database/src/error.rs index 386091d..3471ac7 100644 --- a/storage/database/src/error.rs +++ b/storage/database/src/error.rs @@ -59,18 +59,16 @@ pub enum InitError { } //---------------------------------------------------------------------------------------------------- RuntimeError -/// Errors that occur _after_ successful ([`Env::open`](crate::env::Env::open)). +/// Errors that occur _after_ successful [`Env::open`](crate::env::Env::open). /// /// There are no errors for: /// 1. Missing tables /// 2. (De)serialization -/// 3. Shutdown errors /// /// as `cuprate_database` upholds the invariant that: /// /// 1. All tables exist /// 2. (De)serialization never fails -/// 3. The database (thread-pool) only shuts down when all channels are dropped #[derive(thiserror::Error, Debug)] pub enum RuntimeError { /// The given key already existed in the database. diff --git a/storage/database/src/storable.rs b/storage/database/src/storable.rs index 8842af7..100ed44 100644 --- a/storage/database/src/storable.rs +++ b/storage/database/src/storable.rs @@ -129,7 +129,7 @@ where /// /// Slice types are owned both: /// - when returned from the database -/// - in `put()` +/// - in [`crate::DatabaseRw::put()`] /// /// This is needed as `impl Storable for Vec` runs into impl conflicts. /// diff --git a/storage/database/src/table.rs b/storage/database/src/table.rs index 56e84dd..3ad0e79 100644 --- a/storage/database/src/table.rs +++ b/storage/database/src/table.rs @@ -8,6 +8,8 @@ use crate::{key::Key, storable::Storable}; /// Database table metadata. /// /// Purely compile time information for database tables. +/// +/// See [`crate::define_tables`] for bulk table generation. pub trait Table: 'static { /// Name of the database table. const NAME: &'static str; diff --git a/storage/database/src/tables.rs b/storage/database/src/tables.rs index c2ac821..83a00e1 100644 --- a/storage/database/src/tables.rs +++ b/storage/database/src/tables.rs @@ -349,11 +349,18 @@ macro_rules! define_tables { /// Note that this is already implemented on [`cuprate_database::EnvInner`], thus: /// - You don't need to implement this /// - It can be called using `env_inner.open_tables()` notation + /// + /// # Creation before opening + /// As [`cuprate_database::EnvInner`] documentation states, + /// tables must be created before they are opened. + /// + /// I.e. [`OpenTables::create_tables`] must be called before + /// [`OpenTables::open_tables`] or else panics may occur. pub trait OpenTables<'env> { /// The read-only transaction type of the backend. - type Ro<'a>; + type Ro<'tx>; /// The read-write transaction type of the backend. - type Rw<'a>; + type Rw<'tx>; /// Open all tables in read/iter mode. /// @@ -362,11 +369,6 @@ macro_rules! define_tables { /// /// # Errors /// This will only return [`cuprate_database::RuntimeError::Io`] if it errors. - /// - /// # Invariant - /// All tables should be created with a crate-specific open function. - /// - /// TODO: explain why fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result; /// Open all tables in read-write mode. @@ -391,8 +393,8 @@ macro_rules! define_tables { where Ei: $crate::EnvInner<'env>, { - type Ro<'a> = >::Ro<'a>; - type Rw<'a> = >::Rw<'a>; + type Ro<'tx> = >::Ro<'tx>; + type Rw<'tx> = >::Rw<'tx>; fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result { Ok(($( diff --git a/storage/database/src/transaction.rs b/storage/database/src/transaction.rs index e4c310a..8f33983 100644 --- a/storage/database/src/transaction.rs +++ b/storage/database/src/transaction.rs @@ -11,7 +11,7 @@ use crate::error::RuntimeError; /// # Commit /// It's recommended but may not be necessary to call [`TxRo::commit`] in certain cases: /// - -pub trait TxRo<'env> { +pub trait TxRo<'tx> { /// Commit the read-only transaction. /// /// # Errors @@ -23,7 +23,7 @@ pub trait TxRo<'env> { /// Read/write database transaction. /// /// Returned from [`EnvInner::tx_rw`](crate::EnvInner::tx_rw). -pub trait TxRw<'env> { +pub trait TxRw<'tx> { /// Commit the read/write transaction. /// /// Note that this doesn't necessarily sync the database caches to disk. From 0910c0a231b4e0ef48b6cf0b9d7d48a44cc557a7 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 12 Jul 2024 20:26:11 -0400 Subject: [PATCH 017/104] rpc: use `ByteArrayVec` and `ContainerAsBlob` (#227) * fixed-bytes: add `serde`, document feature flags * fixed-bytes: add derives * rpc: add `as _` syntax to macro * rpc: use `ByteArrayVec` and `ContainerAsBlob` for binary types * fixed-bytes: re-add derives * rpc-types: dedup default value within macro * readme: fixed bytes section --- Cargo.lock | 1 + net/fixed-bytes/Cargo.toml | 2 +- net/fixed-bytes/src/lib.rs | 5 ++- rpc/types/Cargo.toml | 3 +- rpc/types/README.md | 14 +++++++ rpc/types/src/bin.rs | 43 +++++++++++++------- rpc/types/src/json.rs | 80 ++++++++++++++++++++++---------------- rpc/types/src/macros.rs | 47 ++++++++++++++++------ rpc/types/src/other.rs | 39 +++++++------------ 9 files changed, 146 insertions(+), 88 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fbb68b0..01d5329 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -762,6 +762,7 @@ name = "cuprate-rpc-types" version = "0.0.0" dependencies = [ "cuprate-epee-encoding", + "cuprate-fixed-bytes", "monero-serai", "paste", "serde", diff --git a/net/fixed-bytes/Cargo.toml b/net/fixed-bytes/Cargo.toml index e9985e8..4c5a1af 100644 --- a/net/fixed-bytes/Cargo.toml +++ b/net/fixed-bytes/Cargo.toml @@ -16,4 +16,4 @@ bytes = { workspace = true } serde = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] -serde_json = { workspace = true, features = ["std"] } \ No newline at end of file +serde_json = { workspace = true, features = ["std"] } diff --git a/net/fixed-bytes/src/lib.rs b/net/fixed-bytes/src/lib.rs index 370b881..2e8f1bc 100644 --- a/net/fixed-bytes/src/lib.rs +++ b/net/fixed-bytes/src/lib.rs @@ -12,6 +12,7 @@ use serde::{Deserialize, Deserializer, Serialize}; #[cfg_attr(feature = "std", derive(thiserror::Error))] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, Hash)] pub enum FixedByteError { #[cfg_attr( feature = "std", @@ -48,7 +49,7 @@ impl Debug for FixedByteError { /// /// Internally this is just a wrapper around [`Bytes`], with the constructors checking that the length is equal to `N`. /// This implements [`Deref`] with the target being `[u8; N]`. -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Default, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize))] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] @@ -115,7 +116,7 @@ impl TryFrom> for ByteArray { } } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Default, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize))] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] diff --git a/rpc/types/Cargo.toml b/rpc/types/Cargo.toml index c088e4d..1176526 100644 --- a/rpc/types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -10,11 +10,12 @@ keywords = ["cuprate", "rpc", "types", "monero"] [features] default = ["serde", "epee"] -serde = ["dep:serde"] +serde = ["dep:serde", "cuprate-fixed-bytes/serde"] epee = ["dep:cuprate-epee-encoding"] [dependencies] cuprate-epee-encoding = { path = "../../net/epee-encoding", optional = true } +cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } monero-serai = { workspace = true } paste = { workspace = true } diff --git a/rpc/types/README.md b/rpc/types/README.md index 21905fa..eb8da01 100644 --- a/rpc/types/README.md +++ b/rpc/types/README.md @@ -64,6 +64,20 @@ These mixed types are: TODO: we need to figure out a type that (de)serializes correctly, `String` errors with `serde_json` +# Fixed byte containers +TODO + + + # Feature flags List of feature flags for `cuprate-rpc-types`. diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index 02be193..3dcfb96 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -3,6 +3,11 @@ //! All types are originally defined in [`rpc/core_rpc_server_commands_defs.h`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h). //---------------------------------------------------------------------------------------------------- Import +use cuprate_fixed_bytes::ByteArrayVec; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::container_as_blob::ContainerAsBlob; + use crate::{ base::{AccessResponseBase, ResponseBase}, defaults::{default_false, default_height, default_string, default_vec, default_zero}, @@ -22,16 +27,13 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 162..=262, GetBlocks, Request { - #[cfg_attr(feature = "serde", serde(default = "default_zero"))] - requested_info: u8 = default_zero(), - // TODO: This is a `std::list` in `monerod` because...? - block_ids: Vec<[u8; 32]>, + requested_info: u8 = default_zero(), "default_zero", + // FIXME: This is a `std::list` in `monerod` because...? + block_ids: ByteArrayVec<32>, start_height: u64, prune: bool, - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - no_miner_tx: bool = default_false(), - #[cfg_attr(feature = "serde", serde(default = "default_zero"))] - pool_info_since: u64 = default_zero(), + no_miner_tx: bool = default_false(), "default_false", + pool_info_since: u64 = default_zero(), "default_zero", }, // TODO: this has custom epee (de)serialization. // @@ -67,16 +69,17 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 309..=338, GetHashes, Request { - block_ids: Vec<[u8; 32]>, + block_ids: ByteArrayVec<32>, start_height: u64, }, AccessResponseBase { - m_blocks_ids: Vec<[u8; 32]>, + m_blocks_ids: ByteArrayVec<32>, start_height: u64, current_height: u64, } } +#[cfg(not(feature = "epee"))] define_request_and_response! { get_o_indexesbin, cc73fe71162d564ffda8e549b79a350bca53c454 => @@ -91,6 +94,21 @@ define_request_and_response! { } } +#[cfg(feature = "epee")] +define_request_and_response! { + get_o_indexesbin, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 487..=510, + GetOutputIndexes, + #[derive(Copy)] + Request { + txid: [u8; 32], + }, + AccessResponseBase { + o_indexes: Vec as ContainerAsBlob, + } +} + define_request_and_response! { get_outsbin, cc73fe71162d564ffda8e549b79a350bca53c454 => @@ -98,8 +116,7 @@ define_request_and_response! { GetOuts, Request { outputs: Vec, - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - get_txid: bool = default_false(), + get_txid: bool = default_false(), "default_false", }, AccessResponseBase { outs: Vec, @@ -113,7 +130,7 @@ define_request_and_response! { GetTransactionPoolHashes, Request {}, AccessResponseBase { - tx_hashes: Vec<[u8; 32]>, + tx_hashes: ByteArrayVec<32>, } } diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index b5b53c9..2e7aa82 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -49,8 +49,33 @@ define_request_and_response! { // $FIELD_NAME: $FIELD_TYPE, // ``` // The struct generated and all fields are `pub`. - extra_nonce: String, - prev_block: String, + + // This optional expression can be placed after + // a `field: field_type`. this indicates to the + // macro to (de)serialize this field using this + // default expression if it doesn't exist in epee. + // + // See `cuprate_epee_encoding::epee_object` for info. + // + // The default function must be specified twice: + // + // 1. As an expression + // 2. As a string literal + // + // For example: `extra_nonce: String /* = default_string(), "default_string" */,` + // + // This is a HACK since `serde`'s default attribute only takes in + // string literals and macros (stringify) within attributes do not work. + extra_nonce: String /* = default_expression, "default_literal" */, + + // Another optional expression: + // This indicates to the macro to (de)serialize + // this field as another type in epee. + // + // See `cuprate_epee_encoding::epee_object` for info. + prev_block: String /* as Type */, + + // Regular fields. reserve_size: u64, wallet_address: String, }, @@ -197,8 +222,7 @@ define_request_and_response! { GetLastBlockHeader, #[derive(Copy)] Request { - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - fill_pow_hash: bool = default_false(), + fill_pow_hash: bool = default_false(), "default_false", }, AccessResponseBase { block_header: BlockHeader, @@ -213,8 +237,7 @@ define_request_and_response! { Request { hash: String, hashes: Vec, - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - fill_pow_hash: bool = default_false(), + fill_pow_hash: bool = default_false(), "default_false", }, AccessResponseBase { block_header: BlockHeader, @@ -230,8 +253,7 @@ define_request_and_response! { #[derive(Copy)] Request { height: u64, - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - fill_pow_hash: bool = default_false(), + fill_pow_hash: bool = default_false(), "default_false", }, AccessResponseBase { block_header: BlockHeader, @@ -247,8 +269,7 @@ define_request_and_response! { Request { start_height: u64, end_height: u64, - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - fill_pow_hash: bool = default_false(), + fill_pow_hash: bool = default_false(), "default_false", }, AccessResponseBase { headers: Vec, @@ -264,12 +285,9 @@ define_request_and_response! { // `monerod` has both `hash` and `height` fields. // In the RPC handler, if `hash.is_empty()`, it will use it, else, it uses `height`. // - #[cfg_attr(feature = "serde", serde(default = "default_string"))] - hash: String = default_string(), - #[cfg_attr(feature = "serde", serde(default = "default_height"))] - height: u64 = default_height(), - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - fill_pow_hash: bool = default_false(), + hash: String = default_string(), "default_string", + height: u64 = default_height(), "default_height", + fill_pow_hash: bool = default_false(), "default_false", }, AccessResponseBase { blob: String, @@ -287,7 +305,7 @@ define_request_and_response! { GetConnections, Request {}, ResponseBase { - // TODO: This is a `std::list` in `monerod` because...? + // FIXME: This is a `std::list` in `monerod` because...? connections: Vec, } } @@ -405,8 +423,7 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 2096..=2116, FlushTransactionPool, Request { - #[cfg_attr(feature = "serde", serde(default = "default_vec"))] - txids: Vec = default_vec::(), + txids: Vec = default_vec::(), "default_vec", }, #[derive(Copy)] #[cfg_attr(feature = "serde", serde(transparent))] @@ -461,12 +478,12 @@ define_request_and_response! { ResponseBase { version: u32, release: bool, - #[serde(skip_serializing_if = "is_zero", default = "default_zero")] - current_height: u64 = default_zero(), - #[serde(skip_serializing_if = "is_zero", default = "default_zero")] - target_height: u64 = default_zero(), - #[serde(skip_serializing_if = "Vec::is_empty", default = "default_vec")] - hard_forks: Vec = default_vec(), + #[serde(skip_serializing_if = "is_zero")] + current_height: u64 = default_zero(), "default_zero", + #[serde(skip_serializing_if = "is_zero")] + target_height: u64 = default_zero(), "default_zero", + #[serde(skip_serializing_if = "Vec::is_empty")] + hard_forks: Vec = default_vec(), "default_vec", } } @@ -521,9 +538,9 @@ define_request_and_response! { height: u64, next_needed_pruning_seed: u32, overview: String, - // TODO: This is a `std::list` in `monerod` because...? + // FIXME: This is a `std::list` in `monerod` because...? peers: Vec, - // TODO: This is a `std::list` in `monerod` because...? + // FIXME: This is a `std::list` in `monerod` because...? spans: Vec, target_height: u64, } @@ -588,8 +605,7 @@ define_request_and_response! { PruneBlockchain, #[derive(Copy)] Request { - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - check: bool = default_false(), + check: bool = default_false(), "default_false", }, #[derive(Copy)] ResponseBase { @@ -623,10 +639,8 @@ define_request_and_response! { FlushCache, #[derive(Copy)] Request { - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - bad_txs: bool = default_false(), - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - bad_blocks: bool = default_false(), + bad_txs: bool = default_false(), "default_false", + bad_blocks: bool = default_false(), "default_false", }, ResponseBase {} } diff --git a/rpc/types/src/macros.rs b/rpc/types/src/macros.rs index 31bc6be..e130138 100644 --- a/rpc/types/src/macros.rs +++ b/rpc/types/src/macros.rs @@ -45,7 +45,7 @@ /// would trigger the different branches. macro_rules! define_request_and_response { ( - // The markdown tag for Monero RPC documentation. Not necessarily the endpoint. + // The markdown tag for Monero daemon RPC documentation. Not necessarily the endpoint. $monero_daemon_rpc_doc_link:ident, // The commit hash and `$file.$extension` in which this type is defined in @@ -67,8 +67,10 @@ macro_rules! define_request_and_response { Request { // And any fields. $( - $( #[$request_field_attr:meta] )* - $request_field:ident: $request_field_type:ty $(= $request_field_type_default:expr)?, + $( #[$request_field_attr:meta] )* // Field attribute. + $request_field:ident: $request_field_type:ty // field_name: field type + $(as $request_field_type_as:ty)? // (optional) alternative type (de)serialization + $(= $request_field_type_default:expr, $request_field_type_default_string:literal)?, // (optional) default value )* }, @@ -78,7 +80,9 @@ macro_rules! define_request_and_response { // And any fields. $( $( #[$response_field_attr:meta] )* - $response_field:ident: $response_field_type:ty $(= $response_field_type_default:expr)?, + $response_field:ident: $response_field_type:ty + $(as $response_field_type_as:ty)? + $(= $response_field_type_default:expr, $response_field_type_default_string:literal)?, )* } ) => { paste::paste! { @@ -99,7 +103,9 @@ macro_rules! define_request_and_response { [<$type_name Request>] { $( $( #[$request_field_attr] )* - $request_field: $request_field_type $(= $request_field_type_default)?, + $request_field: $request_field_type + $(as $request_field_type_as)? + $(= $request_field_type_default, $request_field_type_default_string)?, )* } } @@ -125,7 +131,9 @@ macro_rules! define_request_and_response { $response_base_type => [<$type_name Response>] { $( $( #[$response_field_attr] )* - $response_field: $response_field_type $(= $response_field_type_default)?, + $response_field: $response_field_type + $(as $response_field_type_as)? + $(= $response_field_type_default, $response_field_type_default_string)?, )* } } @@ -166,7 +174,9 @@ macro_rules! __define_request { $( $( #[$field_attr:meta] )* // field attributes // field_name: FieldType - $field:ident: $field_type:ty $(= $field_default:expr)?, + $field:ident: $field_type:ty + $(as $field_as:ty)? + $(= $field_default:expr, $field_default_string:literal)?, // The $field_default is an optional extra token that represents // a default value to pass to [`cuprate_epee_encoding::epee_object`], // see it for usage. @@ -180,6 +190,7 @@ macro_rules! __define_request { pub struct $t { $( $( #[$field_attr] )* + $(#[cfg_attr(feature = "serde", serde(default = $field_default_string))])? pub $field: $field_type, )* } @@ -188,7 +199,9 @@ macro_rules! __define_request { ::cuprate_epee_encoding::epee_object! { $t, $( - $field: $field_type $(= $field_default)?, + $field: $field_type + $(as $field_as)? + $(= $field_default)?, )* } }; @@ -218,7 +231,9 @@ macro_rules! __define_response { // See [`__define_request`] for docs, this does the same thing. $( $( #[$field_attr:meta] )* - $field:ident: $field_type:ty $(= $field_default:expr)?, + $field:ident: $field_type:ty + $(as $field_as:ty)? + $(= $field_default:expr, $field_default_string:literal)?, )* } ) => { @@ -226,6 +241,7 @@ macro_rules! __define_response { pub struct $t { $( $( #[$field_attr] )* + $(#[cfg_attr(feature = "serde", serde(default = $field_default_string))])? pub $field: $field_type, )* } @@ -234,7 +250,9 @@ macro_rules! __define_response { ::cuprate_epee_encoding::epee_object! { $t, $( - $field: $field_type $($field_default)?, + $field: $field_type + $(as $field_as)? + $(= $field_default)?, )* } }; @@ -250,7 +268,9 @@ macro_rules! __define_response { // See [`__define_request`] for docs, this does the same thing. $( $( #[$field_attr:meta] )* - $field:ident: $field_type:ty $(= $field_default:expr)?, + $field:ident: $field_type:ty + $(as $field_as:ty)? + $(= $field_default:expr, $field_default_string:literal)?, )* } ) => { @@ -261,6 +281,7 @@ macro_rules! __define_response { $( $( #[$field_attr] )* + $(#[cfg_attr(feature = "serde", serde(default = $field_default_string))])? pub $field: $field_type, )* } @@ -269,7 +290,9 @@ macro_rules! __define_response { ::cuprate_epee_encoding::epee_object! { $t, $( - $field: $field_type $(= $field_default)?, + $field: $field_type + $(as $field_as)? + $(= $field_default)?, )* !flatten: base: $base, } diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 03cb05d..5ad2caa 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -36,12 +36,9 @@ define_request_and_response! { // FIXME: this is documented as optional but it isn't serialized as an optional // but it is set _somewhere_ to false in `monerod` // - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - decode_as_json: bool = default_false(), - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - prune: bool = default_false(), - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - split: bool = default_false(), + decode_as_json: bool = default_false(), "default_false", + prune: bool = default_false(), "default_false", + split: bool = default_false(), "default_false", }, AccessResponseBase { txs_as_hex: Vec, @@ -82,10 +79,8 @@ define_request_and_response! { SendRawTransaction, Request { tx_as_hex: String, - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - do_not_relay: bool = default_false(), - #[cfg_attr(feature = "serde", serde(default = "default_true"))] - do_sanity_checks: bool = default_true(), + do_not_relay: bool = default_false(), "default_false", + do_sanity_checks: bool = default_true(), "default_true", }, AccessResponseBase { double_spend: bool, @@ -167,10 +162,8 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 1369..=1417, GetPeerList, Request { - #[cfg_attr(feature = "serde", serde(default = "default_true"))] - public_only: bool = default_true(), - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - include_blocked: bool = default_false(), + public_only: bool = default_true(), "default_true", + include_blocked: bool = default_false(), "default_false", }, ResponseBase { white_list: Vec, @@ -208,8 +201,7 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 1494..=1517, SetLogCategories, Request { - #[cfg_attr(feature = "serde", serde(default = "default_string"))] - categories: String = default_string(), + categories: String = default_string(), "default_string", }, ResponseBase { categories: String, @@ -300,8 +292,7 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 1876..=1903, OutPeers, Request { - #[cfg_attr(feature = "serde", serde(default = "default_true"))] - set: bool = default_true(), + set: bool = default_true(), "default_true", out_peers: u32, }, ResponseBase { @@ -345,8 +336,7 @@ define_request_and_response! { Update, Request { command: String, - #[cfg_attr(feature = "serde", serde(default = "default_string"))] - path: String = default_string(), + path: String = default_string(), "default_string", }, ResponseBase { auto_uri: String, @@ -402,12 +392,9 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 1419..=1448, GetPublicNodes, Request { - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - gray: bool = default_false(), - #[cfg_attr(feature = "serde", serde(default = "default_true"))] - white: bool = default_true(), - #[cfg_attr(feature = "serde", serde(default = "default_false"))] - include_blocked: bool = default_false(), + gray: bool = default_false(), "default_false", + white: bool = default_true(), "default_true", + include_blocked: bool = default_false(), "default_false", }, ResponseBase { gray: Vec, From 6820da984886dab86d18753408388fcfb1db837c Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Tue, 16 Jul 2024 17:47:50 -0400 Subject: [PATCH 018/104] types: add `BlockCompleteEntry` (#230) * types: add `block_complete_entry.rs` * net: fix imports * p2p: fix imports * turn off default-features * p2p: fix imports * Update net/wire/Cargo.toml Co-authored-by: Boog900 * wire: re-export types --------- Co-authored-by: Boog900 --- Cargo.lock | 6 + net/wire/Cargo.toml | 1 + net/wire/src/p2p/common.rs | 112 +------------------ net/wire/src/p2p/protocol.rs | 4 +- p2p/p2p/Cargo.toml | 1 + p2p/p2p/src/block_downloader/tests.rs | 6 +- p2p/p2p/src/broadcast.rs | 6 +- types/Cargo.toml | 9 +- types/README.md | 25 ++--- types/src/block_complete_entry.rs | 154 ++++++++++++++++++++++++++ types/src/lib.rs | 12 +- 11 files changed, 190 insertions(+), 146 deletions(-) create mode 100644 types/src/block_complete_entry.rs diff --git a/Cargo.lock b/Cargo.lock index 01d5329..965e2c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -702,6 +702,7 @@ dependencies = [ "cuprate-p2p-core", "cuprate-pruning", "cuprate-test-utils", + "cuprate-types", "cuprate-wire", "dashmap", "futures", @@ -800,8 +801,12 @@ version = "0.0.0" name = "cuprate-types" version = "0.0.0" dependencies = [ + "bytes", + "cuprate-epee-encoding", + "cuprate-fixed-bytes", "curve25519-dalek", "monero-serai", + "serde", ] [[package]] @@ -813,6 +818,7 @@ dependencies = [ "cuprate-epee-encoding", "cuprate-fixed-bytes", "cuprate-levin", + "cuprate-types", "hex", "thiserror", ] diff --git a/net/wire/Cargo.toml b/net/wire/Cargo.toml index c71a77b..101daa3 100644 --- a/net/wire/Cargo.toml +++ b/net/wire/Cargo.toml @@ -14,6 +14,7 @@ tracing = ["cuprate-levin/tracing"] cuprate-levin = { path = "../levin" } cuprate-epee-encoding = { path = "../epee-encoding" } cuprate-fixed-bytes = { path = "../fixed-bytes" } +cuprate-types = { path = "../../types", default-features = false, features = ["epee"] } bitflags = { workspace = true, features = ["std"] } bytes = { workspace = true, features = ["std"] } diff --git a/net/wire/src/p2p/common.rs b/net/wire/src/p2p/common.rs index 91adb90..d585d07 100644 --- a/net/wire/src/p2p/common.rs +++ b/net/wire/src/p2p/common.rs @@ -16,10 +16,9 @@ //! Common types that are used across multiple messages. use bitflags::bitflags; -use bytes::{Buf, BufMut, Bytes}; -use cuprate_epee_encoding::{epee_object, EpeeValue, InnerMarker}; -use cuprate_fixed_bytes::ByteArray; +use cuprate_epee_encoding::epee_object; +pub use cuprate_types::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs}; use crate::NetworkAddress; @@ -168,113 +167,6 @@ epee_object! { rpc_credits_per_hash: u32 = 0_u32, } -/// A pruned tx with the hash of the missing prunable data -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct PrunedTxBlobEntry { - /// The Tx - pub tx: Bytes, - /// The Prunable Tx Hash - pub prunable_hash: ByteArray<32>, -} - -epee_object!( - PrunedTxBlobEntry, - tx: Bytes, - prunable_hash: ByteArray<32>, -); - -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum TransactionBlobs { - Pruned(Vec), - Normal(Vec), - None, -} - -impl TransactionBlobs { - pub fn take_pruned(self) -> Option> { - match self { - TransactionBlobs::Normal(_) => None, - TransactionBlobs::Pruned(txs) => Some(txs), - TransactionBlobs::None => Some(vec![]), - } - } - - pub fn take_normal(self) -> Option> { - match self { - TransactionBlobs::Normal(txs) => Some(txs), - TransactionBlobs::Pruned(_) => None, - TransactionBlobs::None => Some(vec![]), - } - } - - pub fn len(&self) -> usize { - match self { - TransactionBlobs::Normal(txs) => txs.len(), - TransactionBlobs::Pruned(txs) => txs.len(), - TransactionBlobs::None => 0, - } - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } -} - -/// A Block that can contain transactions -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct BlockCompleteEntry { - /// True if tx data is pruned - pub pruned: bool, - /// The Block - pub block: Bytes, - /// The Block Weight/Size - pub block_weight: u64, - /// The blocks txs - pub txs: TransactionBlobs, -} - -epee_object!( - BlockCompleteEntry, - pruned: bool = false, - block: Bytes, - block_weight: u64 = 0_u64, - txs: TransactionBlobs = TransactionBlobs::None => tx_blob_read, tx_blob_write, should_write_tx_blobs, -); - -fn tx_blob_read(b: &mut B) -> cuprate_epee_encoding::Result { - let marker = cuprate_epee_encoding::read_marker(b)?; - match marker.inner_marker { - InnerMarker::Object => Ok(TransactionBlobs::Pruned(Vec::read(b, &marker)?)), - InnerMarker::String => Ok(TransactionBlobs::Normal(Vec::read(b, &marker)?)), - _ => Err(cuprate_epee_encoding::Error::Value( - "Invalid marker for tx blobs".to_string(), - )), - } -} - -fn tx_blob_write( - val: TransactionBlobs, - field_name: &str, - w: &mut B, -) -> cuprate_epee_encoding::Result<()> { - if should_write_tx_blobs(&val) { - match val { - TransactionBlobs::Normal(bytes) => { - cuprate_epee_encoding::write_field(bytes, field_name, w)? - } - TransactionBlobs::Pruned(obj) => { - cuprate_epee_encoding::write_field(obj, field_name, w)? - } - TransactionBlobs::None => (), - } - } - Ok(()) -} - -fn should_write_tx_blobs(val: &TransactionBlobs) -> bool { - !val.is_empty() -} - #[cfg(test)] mod tests { diff --git a/net/wire/src/p2p/protocol.rs b/net/wire/src/p2p/protocol.rs index a385099..73694d5 100644 --- a/net/wire/src/p2p/protocol.rs +++ b/net/wire/src/p2p/protocol.rs @@ -16,14 +16,14 @@ //! This module defines Monero protocol messages //! //! Protocol message requests don't have to be responded to in order unlike -//! admin messages. +//! admin messages. use bytes::Bytes; use cuprate_epee_encoding::{container_as_blob::ContainerAsBlob, epee_object}; use cuprate_fixed_bytes::{ByteArray, ByteArrayVec}; -use super::common::BlockCompleteEntry; +use crate::p2p::common::BlockCompleteEntry; /// A block that SHOULD have transactions #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/p2p/p2p/Cargo.toml b/p2p/p2p/Cargo.toml index e9b03d2..7cbbdcb 100644 --- a/p2p/p2p/Cargo.toml +++ b/p2p/p2p/Cargo.toml @@ -13,6 +13,7 @@ cuprate-address-book = { path = "../address-book" } cuprate-pruning = { path = "../../pruning" } cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } cuprate-async-buffer = { path = "../async-buffer" } +cuprate-types = { path = "../../types", default-features = false } monero-serai = { workspace = true, features = ["std"] } diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index 981c557..f6ddbfc 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -26,10 +26,8 @@ use cuprate_p2p_core::{ ProtocolResponse, }; use cuprate_pruning::PruningSeed; -use cuprate_wire::{ - common::{BlockCompleteEntry, TransactionBlobs}, - protocol::{ChainResponse, GetObjectsResponse}, -}; +use cuprate_types::{BlockCompleteEntry, TransactionBlobs}; +use cuprate_wire::protocol::{ChainResponse, GetObjectsResponse}; use crate::{ block_downloader::{download_blocks, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}, diff --git a/p2p/p2p/src/broadcast.rs b/p2p/p2p/src/broadcast.rs index cfda28b..5d7d61e 100644 --- a/p2p/p2p/src/broadcast.rs +++ b/p2p/p2p/src/broadcast.rs @@ -25,10 +25,8 @@ use tower::Service; use cuprate_p2p_core::{ client::InternalPeerID, BroadcastMessage, ConnectionDirection, NetworkZone, }; -use cuprate_wire::{ - common::{BlockCompleteEntry, TransactionBlobs}, - protocol::{NewFluffyBlock, NewTransactions}, -}; +use cuprate_types::{BlockCompleteEntry, TransactionBlobs}; +use cuprate_wire::protocol::{NewFluffyBlock, NewTransactions}; use crate::constants::{ DIFFUSION_FLUSH_AVERAGE_SECONDS_INBOUND, DIFFUSION_FLUSH_AVERAGE_SECONDS_OUTBOUND, diff --git a/types/Cargo.toml b/types/Cargo.toml index 7f6b8f8..8f16eb4 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -9,11 +9,18 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/types" keywords = ["cuprate", "types"] [features] -default = ["blockchain"] +default = ["blockchain", "epee", "serde"] blockchain = [] +epee = ["dep:cuprate-epee-encoding"] +serde = ["dep:serde"] [dependencies] +cuprate-epee-encoding = { path = "../net/epee-encoding", optional = true } +cuprate-fixed-bytes = { path = "../net/fixed-bytes" } + +bytes = { workspace = true } curve25519-dalek = { workspace = true } monero-serai = { workspace = true } +serde = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] \ No newline at end of file diff --git a/types/README.md b/types/README.md index 6a2015a..4023e9f 100644 --- a/types/README.md +++ b/types/README.md @@ -1,20 +1,11 @@ # `cuprate-types` -Various data types shared by Cuprate. +Shared data types within Cuprate. -- [1. File Structure](#1-file-structure) - - [1.1 `src/`](#11-src) +This crate is a kitchen-sink for data types that are shared across Cuprate. ---- - -## 1. File Structure -A quick reference of the structure of the folders & files in `cuprate-types`. - -Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`. - -### 1.1 `src/` -The top-level `src/` files. - -| File | Purpose | -|---------------------|---------| -| `service.rs` | Types used in database requests; `enum {Request,Response}` -| `types.rs` | Various general types used by Cuprate \ No newline at end of file +# Features flags +| Feature flag | Does what | +|--------------|-----------| +| `blockchain` | Enables the `blockchain` module, containing the blockchain database request/response types +| `serde` | Enables `serde` on types where applicable +| `epee` | Enables `cuprate-epee-encoding` on types where applicable \ No newline at end of file diff --git a/types/src/block_complete_entry.rs b/types/src/block_complete_entry.rs new file mode 100644 index 0000000..ba5fc2b --- /dev/null +++ b/types/src/block_complete_entry.rs @@ -0,0 +1,154 @@ +//! Contains [`BlockCompleteEntry`] and the related types. + +//---------------------------------------------------------------------------------------------------- Import +#[cfg(feature = "epee")] +use bytes::Bytes; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use cuprate_fixed_bytes::ByteArray; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + epee_object, + macros::bytes::{Buf, BufMut}, + EpeeValue, InnerMarker, +}; + +//---------------------------------------------------------------------------------------------------- BlockCompleteEntry +/// A block that can contain transactions. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct BlockCompleteEntry { + /// `true` if transaction data is pruned. + pub pruned: bool, + /// The block. + pub block: Bytes, + /// The block weight/size. + pub block_weight: u64, + /// The block's transactions. + pub txs: TransactionBlobs, +} + +#[cfg(feature = "epee")] +epee_object!( + BlockCompleteEntry, + pruned: bool = false, + block: Bytes, + block_weight: u64 = 0_u64, + txs: TransactionBlobs = TransactionBlobs::None => + TransactionBlobs::tx_blob_read, + TransactionBlobs::tx_blob_write, + TransactionBlobs::should_write_tx_blobs, +); + +//---------------------------------------------------------------------------------------------------- TransactionBlobs +/// Transaction blobs within [`BlockCompleteEntry`]. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum TransactionBlobs { + /// Pruned transaction blobs. + Pruned(Vec), + /// Normal transaction blobs. + Normal(Vec), + #[default] + /// No transactions. + None, +} + +impl TransactionBlobs { + /// Returns [`Some`] if `self` is [`Self::Pruned`]. + pub fn take_pruned(self) -> Option> { + match self { + Self::Normal(_) => None, + Self::Pruned(txs) => Some(txs), + Self::None => Some(vec![]), + } + } + + /// Returns [`Some`] if `self` is [`Self::Normal`]. + pub fn take_normal(self) -> Option> { + match self { + Self::Normal(txs) => Some(txs), + Self::Pruned(_) => None, + Self::None => Some(vec![]), + } + } + + /// Returns the byte length of the blob. + pub fn len(&self) -> usize { + match self { + Self::Normal(txs) => txs.len(), + Self::Pruned(txs) => txs.len(), + Self::None => 0, + } + } + + /// Returns `true` if the byte length of the blob is `0`. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Epee read function. + #[cfg(feature = "epee")] + fn tx_blob_read(b: &mut B) -> cuprate_epee_encoding::Result { + let marker = cuprate_epee_encoding::read_marker(b)?; + match marker.inner_marker { + InnerMarker::Object => Ok(Self::Pruned(Vec::read(b, &marker)?)), + InnerMarker::String => Ok(Self::Normal(Vec::read(b, &marker)?)), + _ => Err(cuprate_epee_encoding::Error::Value( + "Invalid marker for tx blobs".to_string(), + )), + } + } + + /// Epee write function. + #[cfg(feature = "epee")] + fn tx_blob_write( + self, + field_name: &str, + w: &mut B, + ) -> cuprate_epee_encoding::Result<()> { + if self.should_write_tx_blobs() { + match self { + Self::Normal(bytes) => { + cuprate_epee_encoding::write_field(bytes, field_name, w)?; + } + Self::Pruned(obj) => { + cuprate_epee_encoding::write_field(obj, field_name, w)?; + } + Self::None => (), + } + } + Ok(()) + } + + /// Epee should write function. + #[cfg(feature = "epee")] + fn should_write_tx_blobs(&self) -> bool { + !self.is_empty() + } +} + +//---------------------------------------------------------------------------------------------------- PrunedTxBlobEntry +/// A pruned transaction with the hash of the missing prunable data +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct PrunedTxBlobEntry { + /// The transaction. + pub tx: Bytes, + /// The prunable transaction hash. + pub prunable_hash: ByteArray<32>, +} + +#[cfg(feature = "epee")] +epee_object!( + PrunedTxBlobEntry, + tx: Bytes, + prunable_hash: ByteArray<32>, +); + +//---------------------------------------------------------------------------------------------------- Import +#[cfg(test)] +mod tests {} diff --git a/types/src/lib.rs b/types/src/lib.rs index 2d161f7..1cdb9d5 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,11 +1,4 @@ -//! Cuprate shared data types. -//! -//! This crate is a kitchen-sink for data types that are shared across `Cuprate`. -//! -//! # Features flags -//! The [`blockchain`] module, containing the blockchain database request/response -//! types, must be enabled with the `blockchain` feature (on by default). - +#![doc = include_str!("../README.md")] //---------------------------------------------------------------------------------------------------- Lints // Forbid lints. // Our code, and code generated (e.g macros) cannot overrule these. @@ -86,7 +79,10 @@ // // Documentation for each module is located in the respective file. +mod block_complete_entry; mod types; + +pub use block_complete_entry::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs}; pub use types::{ ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation, VerifiedTransactionInformation, }; From 88551c800cc9ad335b27b4eb4946cc1bb2cde4a3 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Thu, 18 Jul 2024 20:34:56 +0000 Subject: [PATCH 019/104] Books: document p2p protocol (#216) * add admin message definitions * add block complete entry * add protocol messages * add most flows * add final flows * fix typos * move link to epee * review fixes --- books/protocol/src/SUMMARY.md | 11 +- books/protocol/src/p2p_network.md | 2 +- .../protocol/src/p2p_network/common_types.md | 116 +++++++++++++++++ books/protocol/src/p2p_network/epee.md | 3 - books/protocol/src/p2p_network/levin.md | 24 +++- books/protocol/src/p2p_network/levin/admin.md | 102 +++++++++++++++ .../src/p2p_network/levin/protocol.md | 121 ++++++++++++++++++ .../protocol/src/p2p_network/message_flows.md | 19 +++ .../p2p_network/message_flows/chain_sync.md | 28 ++++ .../p2p_network/message_flows/get_blocks.md | 19 +++ .../p2p_network/message_flows/handshake.md | 51 ++++++++ .../p2p_network/message_flows/new_block.md | 29 +++++ .../message_flows/new_transactions.md | 16 +++ .../p2p_network/message_flows/timed_sync.md | 28 ++++ books/protocol/src/p2p_network/messages.md | 37 ------ 15 files changed, 559 insertions(+), 47 deletions(-) create mode 100644 books/protocol/src/p2p_network/common_types.md delete mode 100644 books/protocol/src/p2p_network/epee.md create mode 100644 books/protocol/src/p2p_network/levin/admin.md create mode 100644 books/protocol/src/p2p_network/levin/protocol.md create mode 100644 books/protocol/src/p2p_network/message_flows.md create mode 100644 books/protocol/src/p2p_network/message_flows/chain_sync.md create mode 100644 books/protocol/src/p2p_network/message_flows/get_blocks.md create mode 100644 books/protocol/src/p2p_network/message_flows/handshake.md create mode 100644 books/protocol/src/p2p_network/message_flows/new_block.md create mode 100644 books/protocol/src/p2p_network/message_flows/new_transactions.md create mode 100644 books/protocol/src/p2p_network/message_flows/timed_sync.md delete mode 100644 books/protocol/src/p2p_network/messages.md diff --git a/books/protocol/src/SUMMARY.md b/books/protocol/src/SUMMARY.md index 1a4b1f0..682e0e7 100644 --- a/books/protocol/src/SUMMARY.md +++ b/books/protocol/src/SUMMARY.md @@ -23,5 +23,14 @@ - [Bulletproofs+](./consensus_rules/transactions/ring_ct/bulletproofs+.md) - [P2P Network](./p2p_network.md) - [Levin Protocol](./p2p_network/levin.md) - - [P2P Messages](./p2p_network/messages.md) + - [Admin Messages](./p2p_network/levin/admin.md) + - [Protocol Messages](./p2p_network/levin/protocol.md) + - [Common Types](./p2p_network/common_types.md) + - [Message Flows](./p2p_network/message_flows.md) + - [Handshake](./p2p_network/message_flows/handshake.md) + - [Timed Sync](./p2p_network/message_flows/timed_sync.md) + - [New Block](./p2p_network/message_flows/new_block.md) + - [New Transactions](./p2p_network/message_flows/new_transactions.md) + - [Chain Sync](./p2p_network/message_flows/chain_sync.md) + - [Get Blocks](./p2p_network/message_flows/get_blocks.md) - [Pruning](./pruning.md) diff --git a/books/protocol/src/p2p_network.md b/books/protocol/src/p2p_network.md index e0d9a79..89bd1be 100644 --- a/books/protocol/src/p2p_network.md +++ b/books/protocol/src/p2p_network.md @@ -1,3 +1,3 @@ # P2P Network -This chapter contains descriptions of Monero's peer to peer network, including messages, flows, expected responses, etc. +This chapter contains descriptions of Monero's peer to peer network, including messages, flows, etc. diff --git a/books/protocol/src/p2p_network/common_types.md b/books/protocol/src/p2p_network/common_types.md new file mode 100644 index 0000000..0bf29cb --- /dev/null +++ b/books/protocol/src/p2p_network/common_types.md @@ -0,0 +1,116 @@ +# Common P2P Types + +This chapter contains definitions of types used in multiple P2P messages. + +### Support Flags + +Support flags specify any protocol extensions the peer supports, currently only the first bit is used: + +`FLUFFY_BLOCKS = 1` - for if the peer supports receiving fluffy blocks. + +### Basic Node Data [^b-n-d] { #basic-node-data } + +| Fields | Type | Description | +|------------------------|---------------------------------------|-------------------------------------------------------------------------------------------| +| `network_id` | A UUID (epee string) | A fixed constant value for a specific network (mainnet,testnet,stagenet) | +| `my_port` | u32 | The peer's inbound port, if the peer does not want inbound connections this should be `0` | +| `rpc_port` | u16 | The peer's RPC port, if the peer does not want inbound connections this should be `0` | +| `rpc_credits_per_hash` | u32 | States how much it costs to use this node in credits per hashes, `0` being free | +| `peer_id` | u64 | A fixed ID for the node, set to 1 for anonymity networks | +| `support_flags` | [support flags](#support-flags) (u32) | Specifies any protocol extensions the peer supports | + +### Core Sync Data [^c-s-d] { #core-sync-data } + +| Fields | Type | Description | +|-------------------------------|------------------------|---------------------------------------------------------------| +| `current_height` | u64 | The current chain height | +| `cumulative_difficulty` | u64 | The low 64 bits of the cumulative difficulty | +| `cumulative_difficulty_top64` | u64 | The high 64 bits of the cumulative difficulty | +| `top_id` | [u8; 32] (epee string) | The hash of the top block | +| `top_version` | u8 | The hardfork version of the top block | +| `pruning_seed` | u32 | THe pruning seed of the node, `0` if the node does no pruning | + +### Network Address [^network-addr] { #network-address } + +Network addresses are serialized differently than other types, the fields needed depend on the `type` field: + +| Fields | Type | Description | +| ------ | --------------------------------------- | ---------------- | +| `type` | u8 | The address type | +| `addr` | An object whose fields depend on `type` | The address | + +#### IPv4 + +`type = 1` + +| Fields | Type | Description | +| -------- | ---- | ---------------- | +| `m_ip` | u32 | The IPv4 address | +| `m_port` | u16 | The port | + + +#### IPv6 + +`type = 2` + +| Fields | Type | Description | +| -------- | ---------------------- | ---------------- | +| `addr` | [u8; 16] (epee string) | The IPv6 address | +| `m_port` | u16 | The port | + +#### Tor + +TODO: + +#### I2p + +TODO: + +### Peer List Entry Base [^pl-entry-base] { #peer-list-entry-base } + +| Fields | Type | Description | +|------------------------|-------------------------------------|-------------------------------------------------------------------------------------------------------| +| `adr` | [Network Address](#network-address) | The address of the peer | +| `id` | u64 | The random, self assigned, ID of this node | +| `last_seen` | i64 | A field marking when this peer was last seen, although this is zeroed before sending over the network | +| `pruning_seed` | u32 | This peer's pruning seed, `0` if the peer does no pruning | +| `rpc_port` | u16 | This node's RPC port, `0` if this peer has no public RPC port. | +| `rpc_credits_per_hash` | u32 | States how much it costs to use this node in credits per hashes, `0` being free | + +### Tx Blob Entry [^tb-entry] { #tx-blob-entry } + +| Fields | Type | Description | +| --------------- | ---------------------- | --------------------------------------- | +| `blob` | bytes (epee string) | The pruned tx blob | +| `prunable_hash` | [u8; 32] (epee string) | The hash of the prunable part of the tx | + +### Block Complete Entry [^bc-entry] { #block-complete-entry } + +| Fields | Type | Description | +|----------------|---------------------|-----------------------------------------------------------| +| `pruned` | bool | True if the block is pruned, false otherwise | +| `block` | bytes (epee string) | The block blob | +| `block_weight` | u64 | The block's weight | +| `txs` | depends on `pruned` | The transaction blobs, the exact type depends on `pruned` | + +If `pruned` is true: + +`txs` is a vector of [Tx Blob Entry](#tx-blob-entry) + +If `pruned` is false: + +`txs` is a vector of bytes. + +--- + +[^b-n-d]: + +[^c-s-d]: + +[^network-addr]: + +[^pl-entry-base]: + +[^tb-entry]: + +[^bc-entry]: diff --git a/books/protocol/src/p2p_network/epee.md b/books/protocol/src/p2p_network/epee.md deleted file mode 100644 index 2f8161d..0000000 --- a/books/protocol/src/p2p_network/epee.md +++ /dev/null @@ -1,3 +0,0 @@ -# Epee Binary Format - -The epee binary format is described here: TODO diff --git a/books/protocol/src/p2p_network/levin.md b/books/protocol/src/p2p_network/levin.md index de74660..ec92f7d 100644 --- a/books/protocol/src/p2p_network/levin.md +++ b/books/protocol/src/p2p_network/levin.md @@ -10,16 +10,16 @@ of buckets that will be combined into a single message. ### Bucket Format | Field | Type | Size (bytes) | -| ------ | ----------------------------- | ------------ | +|--------|-------------------------------|--------------| | Header | [BucketHeader](#bucketheader) | 33 | | Body | bytes | dynamic | ### BucketHeader -Format: +Format[^header-format]: | Field | Type | Size (bytes) | -| ---------------- | ------ | ------------ | +|------------------|--------|--------------| | Signature | LE u64 | 8 | | Size | LE u64 | 8 | | Expect Response | bool | 1 | @@ -32,7 +32,7 @@ Format: The signature field is fixed for every bucket and is used to tell apart peers running different protocols. -Its value should be `0x0101010101012101` +Its value should be `0x0101010101012101` [^signature] #### Size @@ -53,7 +53,7 @@ responses should be `1`. #### Flags -This is a bit-flag field that determines what type of bucket this is: +This is a bit-flag field that determines what type of bucket this is[^flags]: | Type | Bits set | | -------------- | ----------- | @@ -66,3 +66,17 @@ This is a bit-flag field that determines what type of bucket this is: #### Protocol Version This is a fixed value of 1. + +## Bucket Body + +All bucket bodies are serialized in the epee binary format which is described here: https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/docs/PORTABLE_STORAGE.md + +Exact message types are described in the next chapters. + +--- + +[^header-format]: + +[^signature]: + +[^flags]: diff --git a/books/protocol/src/p2p_network/levin/admin.md b/books/protocol/src/p2p_network/levin/admin.md new file mode 100644 index 0000000..6f2b716 --- /dev/null +++ b/books/protocol/src/p2p_network/levin/admin.md @@ -0,0 +1,102 @@ +# Admin Messages + +This chapter describes admin messages, and documents the current admin messages. Admin messages are a subset of messages that handle connection +creation, making sure connections are still alive, and sharing peer lists. + +## Levin + +All admin messages are in the request/response levin format. This means requests will set the [expect response bit](./levin.md#expect-response) and +responses will set the return code to [`1`](./levin.md#return-code). + +## Messages + +### Handshake + +ID: `1001`[^handshake-id] + +#### Request [^handshake-req] { #handshake-request } + +| Fields | Type | Description | +|----------------|-------------------------------------------------------|--------------------------------------| +| `node_data` | [basic node data](../common_types.md#basic-node-data) | Static information about our node | +| `payload_data` | [core sync data](../common_types.md#core-sync-data) | Information on the node's sync state | + +#### Response [^handshake-res] { #handshake-response } + +| Fields | Type | Description | +|----------------------|--------------------------------------------------------------------------|-----------------------------------------| +| `node_data` | [basic node data](../common_types.md#basic-node-data) | Static information about our node | +| `payload_data` | [core sync data](../common_types.md#core-sync-data) | Information on the node's sync state | +| `local_peerlist_new` | A Vec of [peer list entry base](../common_types.md#peer-list-entry-base) | A list of peers in the node's peer list | + +### Timed Sync + +ID: `1002`[^timed-sync-id] + +#### Request [^timed-sync-req] { #timed-sync-request } + +| Fields | Type | Description | +| -------------- | --------------------------------------------------- | ------------------------------------ | +| `payload_data` | [core sync data](../common_types.md#core-sync-data) | Information on the node's sync state | + +#### Response [^timed-sync-res] { #timed-sync-response } + +| Fields | Type | Description | +|----------------------|--------------------------------------------------------------------------|-----------------------------------------| +| `payload_data` | [core sync data](../common_types.md#core-sync-data) | Information on the node's sync state | +| `local_peerlist_new` | A Vec of [peer list entry base](../common_types.md#peer-list-entry-base) | A list of peers in the node's peer list | + +### Ping + +ID: `1003`[^ping-id] + +#### Request [^ping-req] { #ping-request } + +No data is serialized for a ping request. + +#### Response [^ping-res] { #ping-response } + +| Fields | Type | Description | +| --------- | ------ | --------------------------------- | +| `status` | string | Will be `OK` for successful pings | +| `peer_id` | u64 | The self assigned id of the peer | + +### Request Support Flags + +ID: `1007`[^support-flags] + +#### Request [^sf-req] { #support-flags-request } + +No data is serialized for a ping request. + +#### Response [^sf-res] { #support-flags-response } + +| Fields | Type | Description | +| --------------- | ---- | ------------------------------------------------------------ | +| `support_flags` | u32 | The peer's [support flags](../common_types.md#support-flags) | + +--- + +[^handshake-id]: + +[^handshake-req]: + +[^handshake-res]: + +[^timed-sync-id]: + +[^timed-sync-req]: + +[^timed-sync-res]: + +[^ping-id]: + +[^ping-req]: + +[^ping-res]: + +[^support-flags]: + +[^sf-req]: + +[^sf-res]: diff --git a/books/protocol/src/p2p_network/levin/protocol.md b/books/protocol/src/p2p_network/levin/protocol.md new file mode 100644 index 0000000..a52ca1d --- /dev/null +++ b/books/protocol/src/p2p_network/levin/protocol.md @@ -0,0 +1,121 @@ +# Protocol Messages + +This chapter describes protocol messages, and documents the current protocol messages. Protocol messages are used to share protocol data +like blocks and transactions. + +## Levin + +All protocol messages are in the notification levin format. Although there are some messages that fall under requests/responses, levin will treat them as notifications. + +All admin messages are in the request/response levin format. This means requests will set the [expect response bit](../levin.md#expect-response) and +responses will set the return code to [`1`](../levin.md#return-code). + +## Messages + +### Notify New Block + +ID: `2001`[^notify-new-block-id] + +| Fields | Type | Description | +| --------------------------- | --------------------------------------------------------------- | ------------------------ | +| `b` | [Block Complete Entry](../common_types.md#block-complete-entry) | The full block | +| `current_blockchain_height` | u64 | The current chain height | + +### Notify New Transactions + +ID: `2002`[^notify-new-transactions-id] + +| Fields | Type | Description | +| ------------------- | ----------------- | ------------------------------------------------------ | +| `txs` | A vector of bytes | The txs | +| `_` | Bytes | Padding to prevent traffic volume analysis | +| `dandelionpp_fluff` | bool | True if this message contains fluff txs, false if stem | + +### Notify Request Get Objects + +ID: `2003`[^notify-request-get-objects-id] + +| Fields | Type | Description | +|----------|----------------------------------------------------|------------------------------------------------------------| +| `blocks` | A vector of [u8; 32] serialized as a single string | The block IDs requested | +| `prune` | bool | True if we want the blocks in pruned form, false otherwise | + +### Notify Response Get Objects + +ID: `2004`[^notify-response-get-objects-id] + +| Fields | Type | Description | +| --------------------------- | --------------------------------------------------------------------------- | ------------------------------ | +| `blocks` | A vector of [Block Complete Entry](../common_types.md#block-complete-entry) | The blocks that were requested | +| `missed_ids` | A vector of [u8; 32] serialized as a single string | IDs of any missed blocks | +| `current_blockchain_height` | u64 | The current blockchain height | + +### Notify Request Chain + +ID: `2006`[^notify-request-chain-id] + +| Fields | Type | Description | +|-------------|----------------------------------------------------|-------------------------------------------------------------------------------------------------------| +| `block_ids` | A vector of [u8; 32] serialized as a single string | A list of block IDs in reverse chronological order, the top and genesis block will always be included | +| `prune` | bool | True if we want the response to contain pruned blocks, false otherwise | + +### Notify Response Chain Entry + +ID: `2007`[^notify-response-chain-entry-id] + +| Fields | Type | Description | +|-------------------------------|----------------------------------------------------|------------------------------------------------| +| `start_height` | u64 | The start height of the entry | +| `total_height` | u64 | The height of the peer's blockchain | +| `cumulative_difficulty` | u64 | The low 64 bits of the cumulative difficulty | +| `cumulative_difficulty_top64` | u64 | The high 64 bits of the cumulative difficulty | +| `m_block_ids` | A vector of [u8; 32] serialized as a single string | The block IDs in this entry | +| `m_block_weights` | A vector of u64 serialized as a single string | The block weights | +| `first_block` | bytes (epee string) | The header of the first block in `m_block_ids` | + +### Notify New Fluffy Block + +ID: `2008`[^notify-new-fluffy-block-id] + +| Fields | Type | Description | +| --------------------------- | --------------------------------------------------------------- | ------------------------------------- | +| `b` | [Block Complete Entry](../common_types.md#block-complete-entry) | The block, may or may not contain txs | +| `current_blockchain_height` | u64 | The current chain height | + +### Notify Request Fluffy Missing Tx + +ID: `2009`[^notify-request-fluffy-missing-tx-id] + +| Fields | Type | Description | +|-----------------------------|-----------------------------------------------|--------------------------------------------| +| `block_hash` | [u8; 32] serialized as a string | The block hash txs are needed from | +| `current_blockchain_height` | u64 | The current chain height | +| `missing_tx_indices` | A vector of u64 serialized as a single string | The indices of the needed txs in the block | + +### Notify Get Txpool Compliment + +ID: `2010`[^notify-get-txpool-compliment-id] + +| Fields | Type | Description | +| -------- | ------------------------------------------- | ---------------------- | +| `hashes` | A vector of [u8; 32] serialized as a string | The current txpool txs | + +--- + +[^notify-new-block-id]: + +[^notify-new-transactions-id]: + +[^notify-request-get-objects-id]: + +[^notify-response-get-objects-id]: + +[^notify-request-chain-id]: + +[^notify-response-chain-entry-id]: + +[^notify-new-fluffy-block-id]: + +[^notify-request-fluffy-missing-tx-id]: + +[^notify-get-txpool-compliment-id]: diff --git a/books/protocol/src/p2p_network/message_flows.md b/books/protocol/src/p2p_network/message_flows.md new file mode 100644 index 0000000..8f1004c --- /dev/null +++ b/books/protocol/src/p2p_network/message_flows.md @@ -0,0 +1,19 @@ +# Message Flows + +Message flows are sets of messages sent between peers, that achieve an identifiable goal, like a handshake. +Some message flows are complex, involving many message types, whereas others are simple, requiring only 1. + +The message flows here are not every possible request/response. + +When documenting checks on the messages, not all checks are documented, only the ones notable. This should help +to reduce the maintenance burden. + +## Different Flows + +- [Handshakes](./message_flows/handshake.md) +- [Timed Sync](./message_flows/timed_sync.md) +- [New Block](./message_flows/new_block.md) +- [New Transactions](./message_flows/new_transactions.md) +- [Chain Sync](./message_flows/chain_sync.md) +- [Get Blocks](./message_flows/get_blocks.md) + diff --git a/books/protocol/src/p2p_network/message_flows/chain_sync.md b/books/protocol/src/p2p_network/message_flows/chain_sync.md new file mode 100644 index 0000000..1b66132 --- /dev/null +++ b/books/protocol/src/p2p_network/message_flows/chain_sync.md @@ -0,0 +1,28 @@ +# Chain Sync + +Chain sync is the first step in syncing a peer's blockchain, it allows a peers to find the split point in their chains and for the peer +to learn about the missing block IDs. + +## Flow + +The first step is for the initiating peer is to get its compact chain history. The compact chain history must be in reverse chronological +order, with the first block being the top block and the last the genesis, if the only block is the genesis then that only needs to be included +once. The blocks in the middle are not enforced to be at certain locations, however `monerod` will use the top 11 blocks and will then go power +of 2 offsets from then on, i.e. `{13, 17, 25, ...}` + +Then, with the compact history, the initiating peer will send a [request chain](../levin/protocol.md#notify-request-chain) message, the receiving +peer will then find the split point and return a [response chain entry](../levin/protocol.md#notify-response-chain-entry) message. + +The `response chain entry` will contain a list of block IDs with the first being a common ancestor and the rest being the next blocks that come after +that block in the peer's chain. + +### Response Checks + +- There must be an overlapping block.[^res-overlapping-block] +- The amount of returned block IDs must be less than `25,000`.[^res-max-blocks] + +--- + +[^res-overlapping-block]: + +[^res-max-blocks]: \ No newline at end of file diff --git a/books/protocol/src/p2p_network/message_flows/get_blocks.md b/books/protocol/src/p2p_network/message_flows/get_blocks.md new file mode 100644 index 0000000..eacca7f --- /dev/null +++ b/books/protocol/src/p2p_network/message_flows/get_blocks.md @@ -0,0 +1,19 @@ +# Get Blocks + +The get block flow is used to download batches of blocks from a peer. + +## Flow + +The initiating peer needs a list of block IDs that the receiving peer has, this can be done with +the [chain sync flow](./chain_sync.md). + +With a list a block IDs the initiating peer will send a [get objects request](../levin/protocol.md#notify-request-get-objects) message, the receiving +peer will then respond with [get objects response](../levin/protocol.md#notify-response-get-objects). + +### Request Checks + +- The amount of blocks must be less than `100`.[^max-block-requests] + +--- + +[^max-block-requests]: diff --git a/books/protocol/src/p2p_network/message_flows/handshake.md b/books/protocol/src/p2p_network/message_flows/handshake.md new file mode 100644 index 0000000..2a4abe1 --- /dev/null +++ b/books/protocol/src/p2p_network/message_flows/handshake.md @@ -0,0 +1,51 @@ +# Handshakes + +Handshakes are used to establish connections to peers. + +## Flow + +The default handshake flow is made up of the connecting peer sending a [handshake request](../levin/admin.md#handshake-request) and the +receiving peer responding with a [handshake response](../levin/admin.md#handshake-response). + +It should be noted that not all other messages are banned during handshakes, for example, support flag requests and even some protocol +requests can be sent. + +### Handshake Request Checks + +The receiving peer will check: + +- The `network_id` is network ID expected.[^network-id] +- The connection is an incoming connection.[^req-incoming-only] +- The peer hasn't already completed a handshake.[^double-handshake] +- If the network zone is public, then the `peer_id` must not be the same as ours.[^same-peer-id] +- The core sync data is not malformed.[^core-sync-data-checks] + +### Handshake Response Checks + +The initiating peer will check: + +- The `network_id` is network ID expected.[^res-network-id] +- The number of peers in the peer list is less than `250`.[^max-peer-list-res] +- All peers in the peer list are in the same zone.[^peers-all-in-same-zone] +- The core sync data is not malformed.[^core-sync-data-checks] +- If the network zone is public, then the `peer_id` must not be the same as ours.[^same-peer-id-res] + +--- + +[^network-id]: + +[^req-incoming-only]: + +[^double-handshake]: + +[^same-peer-id]: + +[^core-sync-data-checks]: + +[^res-network-id]: + +[^max-peer-list-res]: + +[^peers-all-in-same-zone]: + +[^same-peer-id-res]: diff --git a/books/protocol/src/p2p_network/message_flows/new_block.md b/books/protocol/src/p2p_network/message_flows/new_block.md new file mode 100644 index 0000000..452aa44 --- /dev/null +++ b/books/protocol/src/p2p_network/message_flows/new_block.md @@ -0,0 +1,29 @@ +# New Block + +This is used whenever a new block is to be sent to peers. Only the fluffy block flow is described here, as the other method is deprecated. + +## Flow + +First the peer with the new block will send a [new fluffy block](../levin/protocol.md#notify-new-fluffy-block) notification, if the receiving +peer has all the txs in the block then the flow is complete. Otherwise the peer sends a [fluffy missing transactions request](../levin/protocol.md#notify-request-fluffy-missing-tx) +to the first peer, the first peer will then respond with again a [new fluffy block](../levin/protocol.md#notify-new-fluffy-block) notification but +with the transactions requested. + +```bob + + ,-----------. ,----------. + | Initiator | | Receiver | + `-----+-----' `-----+----' + | New Fluffy Block | + |-------------------->| + | | + | Missing Txs Request | + |<- - - - - - - - - - | + | | + | New Fluffy Block | + | - - - - - - - - - ->| + | | + | | + V v +``` + diff --git a/books/protocol/src/p2p_network/message_flows/new_transactions.md b/books/protocol/src/p2p_network/message_flows/new_transactions.md new file mode 100644 index 0000000..2a90a3f --- /dev/null +++ b/books/protocol/src/p2p_network/message_flows/new_transactions.md @@ -0,0 +1,16 @@ +# New Transactions + +Monero uses the dandelion++ protocol to pass transactions around the network, this flow just describes the actual tx passing between nodes part. + +## Flow + +This flow is pretty simple, the txs are put into a [new transactions](../levin/protocol.md#notify-new-transactions) notification and sent to +peers. + +Hopefully in the future [this is changed](https://github.com/monero-project/monero/issues/9334). + +There must be no duplicate txs in the notification.[^duplicate-txs] + +--- + +[^duplicate-txs]: \ No newline at end of file diff --git a/books/protocol/src/p2p_network/message_flows/timed_sync.md b/books/protocol/src/p2p_network/message_flows/timed_sync.md new file mode 100644 index 0000000..4d258d7 --- /dev/null +++ b/books/protocol/src/p2p_network/message_flows/timed_sync.md @@ -0,0 +1,28 @@ +# Timed Syncs + +A timed sync request is sent every 60 seconds to make sure the connection is still live. + +## Flow + +First the timed sync initiator will send a [timed sync request](../levin/admin.md#timed-sync-request), the receiver will then +respond with a [timed sync response](../levin/admin.md#timed-sync-response) + +### Timed Sync Request Checks + +- The core sync data is not malformed.[^core-sync-data-checks] + + +### Timed Sync Response Checks + +- The core sync data is not malformed.[^core-sync-data-checks] +- The number of peers in the peer list is less than `250`.[^max-peer-list-res] +- All peers in the peer list are in the same zone.[^peers-all-in-same-zone] + +--- + +[^core-sync-data-checks]: + +[^max-peer-list-res]: + +[^peers-all-in-same-zone]: + diff --git a/books/protocol/src/p2p_network/messages.md b/books/protocol/src/p2p_network/messages.md deleted file mode 100644 index c3f1828..0000000 --- a/books/protocol/src/p2p_network/messages.md +++ /dev/null @@ -1,37 +0,0 @@ -# P2P Messages - -This chapter contains every P2P message. - -## Index - -## Types - -Types used in multiple P2P messages. - -### Support Flags - -Support flags specify any protocol extensions the peer supports, currently only the first bit is used: - -`FLUFFY_BLOCKS = 1` - for if the peer supports receiving fluffy blocks. - -### Basic Node Data - -| Fields | Type (Epee Type) | Description | -| ---------------------- | ------------------------------------- | ---------------------------------------------------------------------------------------- | -| `network_id` | A UUID (String) | A fixed constant value for a specific network (mainnet,testnet,stagenet) | -| `my_port` | u32 (u32) | The peer's inbound port, if the peer does not want inbound connections this should be `0` | -| `rpc_port` | u16 (u16) | The peer's RPC port, if the peer does not want inbound connections this should be `0` | -| `rpc_credits_per_hash` | u32 (u32) | TODO | -| `peer_id` | u64 (u64) | A fixed ID for the node, set to 1 for anonymity networks | -| `support_flags` | [support flags](#support-flags) (u32) | Specifies any protocol extensions the peer supports | - -## Messages - -### Handshake Requests - -levin command: 1001 - -| Fields | Type (Epee Type) | Description | -| ----------- | -------------------------------------------- | ----------- | -| `node_data` | [basic node data](#basic-node-data) (Object) | | -| | | | From 0a88ea13fc853088ec6ba59c6c30eb1442993d99 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 18 Jul 2024 16:35:52 -0400 Subject: [PATCH 020/104] books: architecture skeleton (#224) * upstream https://github.com/hinto-janai/cuprate-architecture * tab -> 4 spaces --- books/architecture/README.md | 2 +- books/architecture/book.toml | 22 ++-- books/architecture/last-changed.css | 7 + books/architecture/src/SUMMARY.md | 123 +++++++++++++++++- .../src/appendix/build-targets.md | 7 + .../architecture/src/appendix/contributing.md | 2 + .../src/appendix/crate-documentation.md | 4 + books/architecture/src/appendix/intro.md | 1 + .../src/appendix/protocol-book.md | 2 + books/architecture/src/appendix/user-book.md | 1 + .../src/benchmarking/criterion.md | 1 + .../architecture/src/benchmarking/harness.md | 1 + books/architecture/src/benchmarking/intro.md | 1 + books/architecture/src/binary/cli.md | 1 + books/architecture/src/binary/config.md | 1 + books/architecture/src/binary/intro.md | 1 + books/architecture/src/binary/logging.md | 1 + .../src/birds-eye-view/components.md | 1 + .../architecture/src/birds-eye-view/intro.md | 1 + books/architecture/src/birds-eye-view/map.md | 1 + books/architecture/src/consensus/intro.md | 1 + books/architecture/src/consensus/todo.md | 1 + books/architecture/src/consensus/verifier.md | 1 + .../architecture/src/cuprate-architecture.md | 22 ++++ .../external-monero-libraries/cryptonight.md | 1 + .../src/external-monero-libraries/intro.md | 1 + .../external-monero-libraries/monero_serai.md | 2 + .../src/external-monero-libraries/randomx.md | 2 + books/architecture/src/foreword.md | 36 +++++ .../formats-protocols-types/cuprate-helper.md | 1 + .../formats-protocols-types/cuprate-types.md | 1 + .../src/formats-protocols-types/epee.md | 1 + .../src/formats-protocols-types/intro.md | 1 + .../src/formats-protocols-types/levin.md | 1 + .../formats-protocols-types/monero-serai.md | 1 + .../src/instrumentation/data-collection.md | 1 + .../architecture/src/instrumentation/intro.md | 2 + .../src/instrumentation/logging.md | 1 + books/architecture/src/intro.md | 4 + .../src/intro/how-to-use-this-book.md | 5 + books/architecture/src/intro/intro.md | 15 +++ .../src/intro/required-knowledge.md | 28 ++++ .../src/intro/who-this-book-is-for.md | 31 +++++ .../src/known-issues-and-tradeoffs/intro.md | 1 + .../known-issues-and-tradeoffs/networking.md | 1 + .../src/known-issues-and-tradeoffs/rpc.md | 1 + .../src/known-issues-and-tradeoffs/storage.md | 1 + .../architecture/src/networking/dandelion.md | 1 + books/architecture/src/networking/i2p.md | 1 + books/architecture/src/networking/intro.md | 1 + .../architecture/src/networking/ipv4-ipv6.md | 1 + books/architecture/src/networking/p2p.md | 1 + books/architecture/src/networking/proxy.md | 1 + books/architecture/src/networking/tor.md | 1 + .../concurrency-and-parallelism/intro.md | 32 +++++ .../concurrency-and-parallelism/map.md | 1 + .../the-block-downloader.md | 1 + .../the-database.md | 1 + .../the-rpc-server.md | 1 + .../the-verifier.md | 1 + .../thread-exit.md | 1 + .../src/resource-model/file-system.md | 1 + .../architecture/src/resource-model/intro.md | 1 + .../architecture/src/resource-model/memory.md | 1 + .../src/resource-model/sockets.md | 1 + books/architecture/src/rpc/handler.md | 1 + books/architecture/src/rpc/interface.md | 1 + books/architecture/src/rpc/intro.md | 3 + books/architecture/src/rpc/methods/intro.md | 1 + books/architecture/src/rpc/router.md | 1 + books/architecture/src/rpc/types/binary.md | 1 + books/architecture/src/rpc/types/intro.md | 1 + books/architecture/src/rpc/types/json.md | 1 + books/architecture/src/rpc/types/other.md | 1 + books/architecture/src/storage/blockchain.md | 1 + .../src/storage/database-abstraction.md | 1 + books/architecture/src/storage/intro.md | 1 + books/architecture/src/storage/pruning.md | 1 + .../src/storage/transaction-pool.md | 1 + books/architecture/src/testing/intro.md | 1 + books/architecture/src/testing/monero-data.md | 1 + books/architecture/src/testing/rpc-client.md | 1 + .../src/testing/spawning-monerod.md | 1 + books/architecture/src/zmq/intro.md | 1 + books/architecture/src/{ => zmq}/todo.md | 0 85 files changed, 401 insertions(+), 14 deletions(-) create mode 100644 books/architecture/last-changed.css create mode 100644 books/architecture/src/appendix/build-targets.md create mode 100644 books/architecture/src/appendix/contributing.md create mode 100644 books/architecture/src/appendix/crate-documentation.md create mode 100644 books/architecture/src/appendix/intro.md create mode 100644 books/architecture/src/appendix/protocol-book.md create mode 100644 books/architecture/src/appendix/user-book.md create mode 100644 books/architecture/src/benchmarking/criterion.md create mode 100644 books/architecture/src/benchmarking/harness.md create mode 100644 books/architecture/src/benchmarking/intro.md create mode 100644 books/architecture/src/binary/cli.md create mode 100644 books/architecture/src/binary/config.md create mode 100644 books/architecture/src/binary/intro.md create mode 100644 books/architecture/src/binary/logging.md create mode 100644 books/architecture/src/birds-eye-view/components.md create mode 100644 books/architecture/src/birds-eye-view/intro.md create mode 100644 books/architecture/src/birds-eye-view/map.md create mode 100644 books/architecture/src/consensus/intro.md create mode 100644 books/architecture/src/consensus/todo.md create mode 100644 books/architecture/src/consensus/verifier.md create mode 100644 books/architecture/src/cuprate-architecture.md create mode 100644 books/architecture/src/external-monero-libraries/cryptonight.md create mode 100644 books/architecture/src/external-monero-libraries/intro.md create mode 100644 books/architecture/src/external-monero-libraries/monero_serai.md create mode 100644 books/architecture/src/external-monero-libraries/randomx.md create mode 100644 books/architecture/src/foreword.md create mode 100644 books/architecture/src/formats-protocols-types/cuprate-helper.md create mode 100644 books/architecture/src/formats-protocols-types/cuprate-types.md create mode 100644 books/architecture/src/formats-protocols-types/epee.md create mode 100644 books/architecture/src/formats-protocols-types/intro.md create mode 100644 books/architecture/src/formats-protocols-types/levin.md create mode 100644 books/architecture/src/formats-protocols-types/monero-serai.md create mode 100644 books/architecture/src/instrumentation/data-collection.md create mode 100644 books/architecture/src/instrumentation/intro.md create mode 100644 books/architecture/src/instrumentation/logging.md create mode 100644 books/architecture/src/intro.md create mode 100644 books/architecture/src/intro/how-to-use-this-book.md create mode 100644 books/architecture/src/intro/intro.md create mode 100644 books/architecture/src/intro/required-knowledge.md create mode 100644 books/architecture/src/intro/who-this-book-is-for.md create mode 100644 books/architecture/src/known-issues-and-tradeoffs/intro.md create mode 100644 books/architecture/src/known-issues-and-tradeoffs/networking.md create mode 100644 books/architecture/src/known-issues-and-tradeoffs/rpc.md create mode 100644 books/architecture/src/known-issues-and-tradeoffs/storage.md create mode 100644 books/architecture/src/networking/dandelion.md create mode 100644 books/architecture/src/networking/i2p.md create mode 100644 books/architecture/src/networking/intro.md create mode 100644 books/architecture/src/networking/ipv4-ipv6.md create mode 100644 books/architecture/src/networking/p2p.md create mode 100644 books/architecture/src/networking/proxy.md create mode 100644 books/architecture/src/networking/tor.md create mode 100644 books/architecture/src/resource-model/concurrency-and-parallelism/intro.md create mode 100644 books/architecture/src/resource-model/concurrency-and-parallelism/map.md create mode 100644 books/architecture/src/resource-model/concurrency-and-parallelism/the-block-downloader.md create mode 100644 books/architecture/src/resource-model/concurrency-and-parallelism/the-database.md create mode 100644 books/architecture/src/resource-model/concurrency-and-parallelism/the-rpc-server.md create mode 100644 books/architecture/src/resource-model/concurrency-and-parallelism/the-verifier.md create mode 100644 books/architecture/src/resource-model/concurrency-and-parallelism/thread-exit.md create mode 100644 books/architecture/src/resource-model/file-system.md create mode 100644 books/architecture/src/resource-model/intro.md create mode 100644 books/architecture/src/resource-model/memory.md create mode 100644 books/architecture/src/resource-model/sockets.md create mode 100644 books/architecture/src/rpc/handler.md create mode 100644 books/architecture/src/rpc/interface.md create mode 100644 books/architecture/src/rpc/intro.md create mode 100644 books/architecture/src/rpc/methods/intro.md create mode 100644 books/architecture/src/rpc/router.md create mode 100644 books/architecture/src/rpc/types/binary.md create mode 100644 books/architecture/src/rpc/types/intro.md create mode 100644 books/architecture/src/rpc/types/json.md create mode 100644 books/architecture/src/rpc/types/other.md create mode 100644 books/architecture/src/storage/blockchain.md create mode 100644 books/architecture/src/storage/database-abstraction.md create mode 100644 books/architecture/src/storage/intro.md create mode 100644 books/architecture/src/storage/pruning.md create mode 100644 books/architecture/src/storage/transaction-pool.md create mode 100644 books/architecture/src/testing/intro.md create mode 100644 books/architecture/src/testing/monero-data.md create mode 100644 books/architecture/src/testing/rpc-client.md create mode 100644 books/architecture/src/testing/spawning-monerod.md create mode 100644 books/architecture/src/zmq/intro.md rename books/architecture/src/{ => zmq}/todo.md (100%) diff --git a/books/architecture/README.md b/books/architecture/README.md index e487843..88e86eb 100644 --- a/books/architecture/README.md +++ b/books/architecture/README.md @@ -1,4 +1,4 @@ -## Cuprate's architecture (implementation) book +## Cuprate's architecture book This book documents Cuprate's architecture and implementation. See: diff --git a/books/architecture/book.toml b/books/architecture/book.toml index 76724aa..996f7fe 100644 --- a/books/architecture/book.toml +++ b/books/architecture/book.toml @@ -1,19 +1,17 @@ [book] -authors = ["hinto-janai"] +authors = ["Cuprate Contributors"] language = "en" multilingual = false src = "src" title = "Cuprate Architecture" git-repository-url = "https://github.com/Cuprate/architecture-book" -# TODO: fix after importing real files. -# -# [preprocessor.last-changed] -# command = "mdbook-last-changed" -# renderer = ["html"] -# -# [output.html] -# default-theme = "ayu" -# preferred-dark-theme = "ayu" -# git-repository-url = "https://github.com/hinto-janai/cuprate-architecture" -# additional-css = ["last-changed.css"] +[preprocessor.last-changed] +command = "mdbook-last-changed" +renderer = ["html"] + +[output.html] +default-theme = "ayu" +preferred-dark-theme = "ayu" +git-repository-url = "https://github.com/Cuprate/architecture-book" +additional-css = ["last-changed.css"] diff --git a/books/architecture/last-changed.css b/books/architecture/last-changed.css new file mode 100644 index 0000000..a9abae5 --- /dev/null +++ b/books/architecture/last-changed.css @@ -0,0 +1,7 @@ +footer { + font-size: 0.8em; + text-align: center; + border-top: 1px solid; + margin-top: 4%; + padding: 5px 0; +} \ No newline at end of file diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index 2b8615c..74ecda6 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -1,3 +1,124 @@ # Summary -- [TODO](todo.md) +[Cuprate Architecture](cuprate-architecture.md) +[🟡 Foreword](foreword.md) + +--- + +- [🟠 Intro](intro/intro.md) + - [🟡 Who this book is for](intro/who-this-book-is-for.md) + - [🔴 Required knowledge](intro/required-knowledge.md) + - [🔴 How to use this book](intro/how-to-use-this-book.md) + +--- + +- [⚪️ Bird's eye view](birds-eye-view/intro.md) + - [⚪️ Map](birds-eye-view/map.md) + - [⚪️ Components](birds-eye-view/components.md) + +--- + +- [⚪️ Formats, protocols, types](formats-protocols-types/intro.md) + - [⚪️ monero_serai](formats-protocols-types/monero-serai.md) + - [⚪️ cuprate_types](formats-protocols-types/cuprate-types.md) + - [⚪️ cuprate_helper](formats-protocols-types/cuprate-helper.md) + - [⚪️ Epee](formats-protocols-types/epee.md) + - [⚪️ Levin](formats-protocols-types/levin.md) + +--- + +- [⚪️ Storage](storage/intro.md) + - [⚪️ Database abstraction](storage/database-abstraction.md) + - [⚪️ Blockchain](storage/blockchain.md) + - [⚪️ Transaction pool](storage/transaction-pool.md) + - [⚪️ Pruning](storage/pruning.md) + +--- + +- [🔴 RPC](rpc/intro.md) + - [⚪️ Types](rpc/types/intro.md) + - [⚪️ JSON](rpc/types/json.md) + - [⚪️ Binary](rpc/types/binary.md) + - [⚪️ Other](rpc/types/other.md) + - [⚪️ Interface](rpc/interface.md) + - [⚪️ Router](rpc/router.md) + - [⚪️ Handler](rpc/handler.md) + - [⚪️ Methods](rpc/methods/intro.md) + +--- + +- [⚪️ ZMQ](zmq/intro.md) + - [⚪️ TODO](zmq/todo.md) + +--- + +- [⚪️ Consensus](consensus/intro.md) + - [⚪️ Verifier](consensus/verifier.md) + - [⚪️ TODO](consensus/todo.md) + +--- + +- [⚪️ Networking](networking/intro.md) + - [⚪️ P2P](networking/p2p.md) + - [⚪️ Dandelion++](networking/dandelion.md) + - [⚪️ Proxy](networking/proxy.md) + - [⚪️ Tor](networking/tor.md) + - [⚪️ i2p](networking/i2p.md) + - [⚪️ IPv4/IPv6](networking/ipv4-ipv6.md) + +--- + +- [🔴 Instrumentation](instrumentation/intro.md) + - [⚪️ Logging](instrumentation/logging.md) + - [⚪️ Data collection](instrumentation/data-collection.md) + +--- + +- [⚪️ Binary](binary/intro.md) + - [⚪️ CLI](binary/cli.md) + - [⚪️ Config](binary/config.md) + - [⚪️ Logging](binary/logging.md) + +--- + +- [⚪️ Resource model](resource-model/intro.md) + - [⚪️ File system](resource-model/file-system.md) + - [⚪️ Sockets](resource-model/sockets.md) + - [⚪️ Memory](resource-model/memory.md) + - [🟡 Concurrency and parallelism](resource-model/concurrency-and-parallelism/intro.md) + - [⚪️ Map](resource-model/concurrency-and-parallelism/map.md) + - [⚪️ The RPC server](resource-model/concurrency-and-parallelism/the-rpc-server.md) + - [⚪️ The database](resource-model/concurrency-and-parallelism/the-database.md) + - [⚪️ The block downloader](resource-model/concurrency-and-parallelism/the-block-downloader.md) + - [⚪️ The verifier](resource-model/concurrency-and-parallelism/the-verifier.md) + - [⚪️ Thread exit](resource-model/concurrency-and-parallelism/thread-exit.md) + +--- + +- [⚪️ External Monero libraries](external-monero-libraries/intro.md) + - [⚪️ Cryptonight](external-monero-libraries/cryptonight.md) + - [🔴 RandomX](external-monero-libraries/randomx.md) + - [🔴 monero_serai](external-monero-libraries/monero_serai.md) + +--- + +- [⚪️ Benchmarking](benchmarking/intro.md) + - [⚪️ Criterion](benchmarking/criterion.md) + - [⚪️ Harness](benchmarking/harness.md) +- [⚪️ Testing](testing/intro.md) + - [⚪️ Monero data](testing/monero-data.md) + - [⚪️ RPC client](testing/rpc-client.md) + - [⚪️ Spawning `monerod`](testing/spawning-monerod.md) +- [⚪️ Known issues and tradeoffs](known-issues-and-tradeoffs/intro.md) + - [⚪️ Networking](known-issues-and-tradeoffs/networking.md) + - [⚪️ RPC](known-issues-and-tradeoffs/rpc.md) + - [⚪️ Storage](known-issues-and-tradeoffs/storage.md) + +--- + +- [⚪️ Appendix](appendix/intro.md) + - [🔴 Contributing](appendix/contributing.md) + - [🔴 Crate documentation](appendix/crate-documentation.md) + - [🔴 Build targets](appendix/build-targets.md) + - [🔴 Protocol book](appendix/protocol-book.md) + - [⚪️ User book](appendix/user-book.md) \ No newline at end of file diff --git a/books/architecture/src/appendix/build-targets.md b/books/architecture/src/appendix/build-targets.md new file mode 100644 index 0000000..495a3d6 --- /dev/null +++ b/books/architecture/src/appendix/build-targets.md @@ -0,0 +1,7 @@ +# Build targets +- x86 +- ARM64 +- Windows +- Linux +- macOS +- FreeBSD(?) diff --git a/books/architecture/src/appendix/contributing.md b/books/architecture/src/appendix/contributing.md new file mode 100644 index 0000000..675937a --- /dev/null +++ b/books/architecture/src/appendix/contributing.md @@ -0,0 +1,2 @@ +# Contributing + \ No newline at end of file diff --git a/books/architecture/src/appendix/crate-documentation.md b/books/architecture/src/appendix/crate-documentation.md new file mode 100644 index 0000000..0f4d96d --- /dev/null +++ b/books/architecture/src/appendix/crate-documentation.md @@ -0,0 +1,4 @@ +# Crate documentation +```bash +cargo doc --package $CUPRATE_CRATE +``` \ No newline at end of file diff --git a/books/architecture/src/appendix/intro.md b/books/architecture/src/appendix/intro.md new file mode 100644 index 0000000..fad5ae4 --- /dev/null +++ b/books/architecture/src/appendix/intro.md @@ -0,0 +1 @@ +# Appendix diff --git a/books/architecture/src/appendix/protocol-book.md b/books/architecture/src/appendix/protocol-book.md new file mode 100644 index 0000000..a855b73 --- /dev/null +++ b/books/architecture/src/appendix/protocol-book.md @@ -0,0 +1,2 @@ +# Protocol book + \ No newline at end of file diff --git a/books/architecture/src/appendix/user-book.md b/books/architecture/src/appendix/user-book.md new file mode 100644 index 0000000..0f12476 --- /dev/null +++ b/books/architecture/src/appendix/user-book.md @@ -0,0 +1 @@ +# ⚪️ User book diff --git a/books/architecture/src/benchmarking/criterion.md b/books/architecture/src/benchmarking/criterion.md new file mode 100644 index 0000000..e9d61e6 --- /dev/null +++ b/books/architecture/src/benchmarking/criterion.md @@ -0,0 +1 @@ +# ⚪️ Criterion diff --git a/books/architecture/src/benchmarking/harness.md b/books/architecture/src/benchmarking/harness.md new file mode 100644 index 0000000..6f82b52 --- /dev/null +++ b/books/architecture/src/benchmarking/harness.md @@ -0,0 +1 @@ +# ⚪️ Harness diff --git a/books/architecture/src/benchmarking/intro.md b/books/architecture/src/benchmarking/intro.md new file mode 100644 index 0000000..f043a0b --- /dev/null +++ b/books/architecture/src/benchmarking/intro.md @@ -0,0 +1 @@ +# ⚪️ Benchmarking diff --git a/books/architecture/src/binary/cli.md b/books/architecture/src/binary/cli.md new file mode 100644 index 0000000..1c515f4 --- /dev/null +++ b/books/architecture/src/binary/cli.md @@ -0,0 +1 @@ +# ⚪️ CLI diff --git a/books/architecture/src/binary/config.md b/books/architecture/src/binary/config.md new file mode 100644 index 0000000..c9582d0 --- /dev/null +++ b/books/architecture/src/binary/config.md @@ -0,0 +1 @@ +# ⚪️ Config diff --git a/books/architecture/src/binary/intro.md b/books/architecture/src/binary/intro.md new file mode 100644 index 0000000..dea12fa --- /dev/null +++ b/books/architecture/src/binary/intro.md @@ -0,0 +1 @@ +# ⚪️ Binary diff --git a/books/architecture/src/binary/logging.md b/books/architecture/src/binary/logging.md new file mode 100644 index 0000000..c7c88a3 --- /dev/null +++ b/books/architecture/src/binary/logging.md @@ -0,0 +1 @@ +# ⚪️ Logging diff --git a/books/architecture/src/birds-eye-view/components.md b/books/architecture/src/birds-eye-view/components.md new file mode 100644 index 0000000..19a17e2 --- /dev/null +++ b/books/architecture/src/birds-eye-view/components.md @@ -0,0 +1 @@ +# ⚪️ Components diff --git a/books/architecture/src/birds-eye-view/intro.md b/books/architecture/src/birds-eye-view/intro.md new file mode 100644 index 0000000..5ee2eb3 --- /dev/null +++ b/books/architecture/src/birds-eye-view/intro.md @@ -0,0 +1 @@ +# ⚪️ Bird's eye view diff --git a/books/architecture/src/birds-eye-view/map.md b/books/architecture/src/birds-eye-view/map.md new file mode 100644 index 0000000..1bde994 --- /dev/null +++ b/books/architecture/src/birds-eye-view/map.md @@ -0,0 +1 @@ +# ⚪️ Map diff --git a/books/architecture/src/consensus/intro.md b/books/architecture/src/consensus/intro.md new file mode 100644 index 0000000..32013b6 --- /dev/null +++ b/books/architecture/src/consensus/intro.md @@ -0,0 +1 @@ +# ⚪️ Consensus diff --git a/books/architecture/src/consensus/todo.md b/books/architecture/src/consensus/todo.md new file mode 100644 index 0000000..460d445 --- /dev/null +++ b/books/architecture/src/consensus/todo.md @@ -0,0 +1 @@ +# ⚪️ TODO diff --git a/books/architecture/src/consensus/verifier.md b/books/architecture/src/consensus/verifier.md new file mode 100644 index 0000000..128a3b0 --- /dev/null +++ b/books/architecture/src/consensus/verifier.md @@ -0,0 +1 @@ +# ⚪️ Verifier diff --git a/books/architecture/src/cuprate-architecture.md b/books/architecture/src/cuprate-architecture.md new file mode 100644 index 0000000..3c6c073 --- /dev/null +++ b/books/architecture/src/cuprate-architecture.md @@ -0,0 +1,22 @@ +# Cuprate Architecture +WIP + +[Cuprate](https://github.com/Cuprate/cuprate)'s architecture book. + +Sections are notated with colors indicating how complete they are: + +| Color | Meaning | +|-------|---------| +| ⚪️ | Empty +| 🔴 | Severely lacking information +| 🟠 | Lacking some information +| 🟡 | Almost ready +| 🟢 | OK + +--- + +Continue to the next chapter by clicking the right `>` button, or by selecting it on the left side. + +All chapters are viewable by clicking the top-left `☰` button. + +The entire book can searched by clicking the top-left 🔍 button. \ No newline at end of file diff --git a/books/architecture/src/external-monero-libraries/cryptonight.md b/books/architecture/src/external-monero-libraries/cryptonight.md new file mode 100644 index 0000000..80647b0 --- /dev/null +++ b/books/architecture/src/external-monero-libraries/cryptonight.md @@ -0,0 +1 @@ +# ⚪️ Cryptonight diff --git a/books/architecture/src/external-monero-libraries/intro.md b/books/architecture/src/external-monero-libraries/intro.md new file mode 100644 index 0000000..440a344 --- /dev/null +++ b/books/architecture/src/external-monero-libraries/intro.md @@ -0,0 +1 @@ +# ⚪️ External Monero libraries diff --git a/books/architecture/src/external-monero-libraries/monero_serai.md b/books/architecture/src/external-monero-libraries/monero_serai.md new file mode 100644 index 0000000..f1567b1 --- /dev/null +++ b/books/architecture/src/external-monero-libraries/monero_serai.md @@ -0,0 +1,2 @@ +# monero_serai + diff --git a/books/architecture/src/external-monero-libraries/randomx.md b/books/architecture/src/external-monero-libraries/randomx.md new file mode 100644 index 0000000..7705151 --- /dev/null +++ b/books/architecture/src/external-monero-libraries/randomx.md @@ -0,0 +1,2 @@ +# RandomX + \ No newline at end of file diff --git a/books/architecture/src/foreword.md b/books/architecture/src/foreword.md new file mode 100644 index 0000000..c85f18b --- /dev/null +++ b/books/architecture/src/foreword.md @@ -0,0 +1,36 @@ +# Foreword +Monero[^1] is a large software project, coming in at 329k lines of C++, C, headers, and make files.[^2] It is directly responsible for 2.6 billion dollars worth of value.[^3] It has had over 400 contributors, more if counting unnamed contributions.[^4] It has over 10,000 node operators and a large active userbase.[^5] + +The project wasn't always this big, but somewhere in the midst of contributors coming and going, various features being added, bugs being fixed, and celebrated cryptography being implemented - there was an aspect that was lost by the project that it could not easily gain again: **maintainability**. + +Within large and complicated software projects, there is an important transfer of knowledge that must occur for long-term survival. Much like an organism that must eventually pass the torch onto the next generation, projects must do the same for future contributors. + +However, newcomers often lack experience, past contributors might not be around, and current maintainers may be too busy. For whatever reason, this transfer of knowledge is not always smooth. + +There is a solution to this problem: **documentation**. + +The activity of writing the what, where, why, and how of the solutions to technical problems can be done in an author's lonesome. + +The activity of reading these ideas can be done by future readers at any time without permission. + +These readers may be new prospective contributors, it may be the current maintainers, it may be researchers, it may be users of various scale. Whoever it may be, documentation acts as the link between the past and present; a bottle of wisdom thrown into the river of time for future participants to open. + +This book is the manifestation of this will, for Cuprate[^6], an alternative Monero node. It documents Cuprate's implementation from head-to-toe such that in the case of a contributor's untimely disappearance, the project can continue. + +People come and go, documentation is forever. + +— hinto-janai + +--- + +[^1]: [`monero-project/monero`](https://github.com/monero-project/monero) + +[^2]: `git ls-files | grep "\.cpp$\|\.h$\|\.c$\|CMake" | xargs cat | wc -l` on [`cc73fe7`](https://github.com/monero-project/monero/tree/cc73fe71162d564ffda8e549b79a350bca53c454) + +[^3]: 2024-05-24: $143.55 USD * 18,151,608 XMR = $2,605,663,258 + +[^4]: `git log --all --pretty="%an" | sort -u | wc -l` on [`cc73fe7`](https://github.com/monero-project/monero/tree/cc73fe71162d564ffda8e549b79a350bca53c454) + +[^5]: + +[^6]: \ No newline at end of file diff --git a/books/architecture/src/formats-protocols-types/cuprate-helper.md b/books/architecture/src/formats-protocols-types/cuprate-helper.md new file mode 100644 index 0000000..6227829 --- /dev/null +++ b/books/architecture/src/formats-protocols-types/cuprate-helper.md @@ -0,0 +1 @@ +# ⚪️ cuprate_helper diff --git a/books/architecture/src/formats-protocols-types/cuprate-types.md b/books/architecture/src/formats-protocols-types/cuprate-types.md new file mode 100644 index 0000000..1069ce5 --- /dev/null +++ b/books/architecture/src/formats-protocols-types/cuprate-types.md @@ -0,0 +1 @@ +# ⚪️ cuprate_types diff --git a/books/architecture/src/formats-protocols-types/epee.md b/books/architecture/src/formats-protocols-types/epee.md new file mode 100644 index 0000000..4c0b17e --- /dev/null +++ b/books/architecture/src/formats-protocols-types/epee.md @@ -0,0 +1 @@ +# ⚪️ Epee diff --git a/books/architecture/src/formats-protocols-types/intro.md b/books/architecture/src/formats-protocols-types/intro.md new file mode 100644 index 0000000..77052fd --- /dev/null +++ b/books/architecture/src/formats-protocols-types/intro.md @@ -0,0 +1 @@ +# ⚪️ Formats, protocols, types diff --git a/books/architecture/src/formats-protocols-types/levin.md b/books/architecture/src/formats-protocols-types/levin.md new file mode 100644 index 0000000..72a88cc --- /dev/null +++ b/books/architecture/src/formats-protocols-types/levin.md @@ -0,0 +1 @@ +# ⚪️ Levin diff --git a/books/architecture/src/formats-protocols-types/monero-serai.md b/books/architecture/src/formats-protocols-types/monero-serai.md new file mode 100644 index 0000000..139af8e --- /dev/null +++ b/books/architecture/src/formats-protocols-types/monero-serai.md @@ -0,0 +1 @@ +# ⚪️ monero_serai diff --git a/books/architecture/src/instrumentation/data-collection.md b/books/architecture/src/instrumentation/data-collection.md new file mode 100644 index 0000000..7ea3d9f --- /dev/null +++ b/books/architecture/src/instrumentation/data-collection.md @@ -0,0 +1 @@ +# ⚪️ Data collection diff --git a/books/architecture/src/instrumentation/intro.md b/books/architecture/src/instrumentation/intro.md new file mode 100644 index 0000000..33640dd --- /dev/null +++ b/books/architecture/src/instrumentation/intro.md @@ -0,0 +1,2 @@ +# Instrumentation +Cuprate is built with [instrumentation](https://en.wikipedia.org/wiki/Instrumentation) in mind. \ No newline at end of file diff --git a/books/architecture/src/instrumentation/logging.md b/books/architecture/src/instrumentation/logging.md new file mode 100644 index 0000000..c7c88a3 --- /dev/null +++ b/books/architecture/src/instrumentation/logging.md @@ -0,0 +1 @@ +# ⚪️ Logging diff --git a/books/architecture/src/intro.md b/books/architecture/src/intro.md new file mode 100644 index 0000000..c708d61 --- /dev/null +++ b/books/architecture/src/intro.md @@ -0,0 +1,4 @@ +# Systems +Cuprate is made up of multiple distinct internal systems that work together. + +This section provides informal specifications and implementation details about each. \ No newline at end of file diff --git a/books/architecture/src/intro/how-to-use-this-book.md b/books/architecture/src/intro/how-to-use-this-book.md new file mode 100644 index 0000000..7664e04 --- /dev/null +++ b/books/architecture/src/intro/how-to-use-this-book.md @@ -0,0 +1,5 @@ +# How to use this book + +## Maintainers +## Contributors +## Researchers \ No newline at end of file diff --git a/books/architecture/src/intro/intro.md b/books/architecture/src/intro/intro.md new file mode 100644 index 0000000..db2603c --- /dev/null +++ b/books/architecture/src/intro/intro.md @@ -0,0 +1,15 @@ +# Intro +[Cuprate](https://github.com/Cuprate/cuprate) is an alternative [Monero](https://getmonero.org) node implementation. + +This book describes Cuprate's architecture, ranging from small things like database pruning to larger meta-components like the networking stack. + +A brief overview of some aspects covered within this book: +- Component designs +- Implementation details +- File location and purpose +- Design decisions and tradeoffs +- Things in relation to `monerod` +- Dependency usage + +## Source code +The source files for this book can be found on at: . \ No newline at end of file diff --git a/books/architecture/src/intro/required-knowledge.md b/books/architecture/src/intro/required-knowledge.md new file mode 100644 index 0000000..3262f05 --- /dev/null +++ b/books/architecture/src/intro/required-knowledge.md @@ -0,0 +1,28 @@ +# Required knowledge + +## General +- Rust +- Monero +- System design + +## Components +### Storage +- Embedded databases +- LMDB +- redb + +### RPC +- `axum` +- `tower` +- `async` +- JSON-RPC 2.0 +- Epee + +### Networking +- `tower` +- `tokio` +- `async` +- Levin + +### Instrumentation +- `tracing` diff --git a/books/architecture/src/intro/who-this-book-is-for.md b/books/architecture/src/intro/who-this-book-is-for.md new file mode 100644 index 0000000..4b5be2b --- /dev/null +++ b/books/architecture/src/intro/who-this-book-is-for.md @@ -0,0 +1,31 @@ +# Who this book is for + +## Maintainers +As mentioned in [`Foreword`](../foreword.md), the group of people that benefit from this book's value the most by far are the current and future Cuprate maintainers. + +Cuprate's system design is documented in this book such that if you were ever to build it again from scratch, you would have an excellent guide on how to do such, and also where improvements could be made. + +Practically, what that means for maintainers is that it acts as _the_ reference. During maintenance, it is quite valuable to have a book that contains condensed knowledge on the behavior of components, or how certain code works, or why it was built a certain way. + +## Contributors +Contributors also have access to the inner-workings of Cuprate via this book, which helps when making larger contributions. + +Design decisions and implementation details notated in this book helps answer questions such as: +- Why is it done this way? +- Why can it _not_ be done this way? +- Were other methods attempted? + +Cuprate's testing and benchmarking suites, unknown to new contributors, are also documented within this book. + +## Researchers +This book contains the why, where, and how of the _implementation_ of formal research. + +Although it is an informal specification, this book still acts as a more accessible overview of Cuprate compared to examining the codebase itself. + +## Operators & users +This book is not a practical guide for using Cuprate itself. + +For configuration, data collection (also important for researchers), and other practical usage, see [Cuprate's user book](https://user.cuprate.org). + +## Observers +Anyone curious enough is free to learn the inner-workings of Cuprate via this book, and maybe even contribute someday. \ No newline at end of file diff --git a/books/architecture/src/known-issues-and-tradeoffs/intro.md b/books/architecture/src/known-issues-and-tradeoffs/intro.md new file mode 100644 index 0000000..20ab7b5 --- /dev/null +++ b/books/architecture/src/known-issues-and-tradeoffs/intro.md @@ -0,0 +1 @@ +# ⚪️ Known issues and tradeoffs diff --git a/books/architecture/src/known-issues-and-tradeoffs/networking.md b/books/architecture/src/known-issues-and-tradeoffs/networking.md new file mode 100644 index 0000000..20487cb --- /dev/null +++ b/books/architecture/src/known-issues-and-tradeoffs/networking.md @@ -0,0 +1 @@ +# ⚪️ Networking diff --git a/books/architecture/src/known-issues-and-tradeoffs/rpc.md b/books/architecture/src/known-issues-and-tradeoffs/rpc.md new file mode 100644 index 0000000..3337f37 --- /dev/null +++ b/books/architecture/src/known-issues-and-tradeoffs/rpc.md @@ -0,0 +1 @@ +# ⚪️ RPC diff --git a/books/architecture/src/known-issues-and-tradeoffs/storage.md b/books/architecture/src/known-issues-and-tradeoffs/storage.md new file mode 100644 index 0000000..214cf15 --- /dev/null +++ b/books/architecture/src/known-issues-and-tradeoffs/storage.md @@ -0,0 +1 @@ +# ⚪️ Storage diff --git a/books/architecture/src/networking/dandelion.md b/books/architecture/src/networking/dandelion.md new file mode 100644 index 0000000..30916b7 --- /dev/null +++ b/books/architecture/src/networking/dandelion.md @@ -0,0 +1 @@ +# ⚪️ Dandelion++ diff --git a/books/architecture/src/networking/i2p.md b/books/architecture/src/networking/i2p.md new file mode 100644 index 0000000..986ab2a --- /dev/null +++ b/books/architecture/src/networking/i2p.md @@ -0,0 +1 @@ +# ⚪️ i2p diff --git a/books/architecture/src/networking/intro.md b/books/architecture/src/networking/intro.md new file mode 100644 index 0000000..20487cb --- /dev/null +++ b/books/architecture/src/networking/intro.md @@ -0,0 +1 @@ +# ⚪️ Networking diff --git a/books/architecture/src/networking/ipv4-ipv6.md b/books/architecture/src/networking/ipv4-ipv6.md new file mode 100644 index 0000000..07339b4 --- /dev/null +++ b/books/architecture/src/networking/ipv4-ipv6.md @@ -0,0 +1 @@ +# ⚪️ IPv4/IPv6 diff --git a/books/architecture/src/networking/p2p.md b/books/architecture/src/networking/p2p.md new file mode 100644 index 0000000..11b2015 --- /dev/null +++ b/books/architecture/src/networking/p2p.md @@ -0,0 +1 @@ +# ⚪️ P2P diff --git a/books/architecture/src/networking/proxy.md b/books/architecture/src/networking/proxy.md new file mode 100644 index 0000000..bd9e5f7 --- /dev/null +++ b/books/architecture/src/networking/proxy.md @@ -0,0 +1 @@ +# ⚪️ Proxy diff --git a/books/architecture/src/networking/tor.md b/books/architecture/src/networking/tor.md new file mode 100644 index 0000000..cc0a809 --- /dev/null +++ b/books/architecture/src/networking/tor.md @@ -0,0 +1 @@ +# ⚪️ Tor diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/intro.md b/books/architecture/src/resource-model/concurrency-and-parallelism/intro.md new file mode 100644 index 0000000..2cca180 --- /dev/null +++ b/books/architecture/src/resource-model/concurrency-and-parallelism/intro.md @@ -0,0 +1,32 @@ +# Concurrency and parallelism +It is incumbent upon software like Cuprate to take advantage of today's highly parallel hardware as much as practically possible. + +With that said, programs must setup guardrails when operating in a concurrent and parallel manner, [for correctness and safety](https://en.wikipedia.org/wiki/Concurrency_(computer_science)). + +There are "synchronization primitives" that help with this, common ones being: +- [Locks](https://en.wikipedia.org/wiki/Lock_(computer_science)) +- [Channels](https://en.wikipedia.org/wiki/Channel_(programming)) +- [Atomics](https://en.wikipedia.org/wiki/Linearizability#Primitive_atomic_instructions) + +These tools are relatively easy to use in isolation, but trickier to do so when considering the entire system. It is not uncommon for _the_ bottleneck to be the [poor orchastration](https://en.wikipedia.org/wiki/Starvation_(computer_science)) of these primitives. + +## Analogy +A common analogy for a parallel system is an intersection. + +Like a parallel computer system, an intersection contains: +1. **Parallelism:** multiple individual units that want to move around (cars, pedestrians, etc) +1. **Synchronization primitives:** traffic lights, car lights, walk signals + +In theory, the amount of "work" the units can do is only limited by the speed of the units themselves, but in practice, the slow cascading reaction speeds between all units, the frequent hiccups that can occur, and the synchronization primitives themselves become bottlenecks far before the maximum speed of any unit is reached. + +A car that hogs the middle of the intersection on the wrong light is akin to a system thread holding onto a lock longer than it should be - it degrades total system output. + +Unlike humans however, computer systems at least have the potential to move at lightning speeds, but only if the above synchronization primitives are used correctly. + +## Goal +To aid the long-term maintenance of highly concurrent and parallel code, this section documents: +1. All system threads spawned and maintained +1. All major sections where synchronization primitives are used +1. The asynchronous behavior of some components + +and how these compose together efficiently in Cuprate. \ No newline at end of file diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/map.md b/books/architecture/src/resource-model/concurrency-and-parallelism/map.md new file mode 100644 index 0000000..1bde994 --- /dev/null +++ b/books/architecture/src/resource-model/concurrency-and-parallelism/map.md @@ -0,0 +1 @@ +# ⚪️ Map diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-block-downloader.md b/books/architecture/src/resource-model/concurrency-and-parallelism/the-block-downloader.md new file mode 100644 index 0000000..c0dccba --- /dev/null +++ b/books/architecture/src/resource-model/concurrency-and-parallelism/the-block-downloader.md @@ -0,0 +1 @@ +# ⚪️ The block downloader diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-database.md b/books/architecture/src/resource-model/concurrency-and-parallelism/the-database.md new file mode 100644 index 0000000..32e4b62 --- /dev/null +++ b/books/architecture/src/resource-model/concurrency-and-parallelism/the-database.md @@ -0,0 +1 @@ +# ⚪️ The database diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-rpc-server.md b/books/architecture/src/resource-model/concurrency-and-parallelism/the-rpc-server.md new file mode 100644 index 0000000..cd654cf --- /dev/null +++ b/books/architecture/src/resource-model/concurrency-and-parallelism/the-rpc-server.md @@ -0,0 +1 @@ +# ⚪️ The RPC server diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-verifier.md b/books/architecture/src/resource-model/concurrency-and-parallelism/the-verifier.md new file mode 100644 index 0000000..eeaedc6 --- /dev/null +++ b/books/architecture/src/resource-model/concurrency-and-parallelism/the-verifier.md @@ -0,0 +1 @@ +# ⚪️ The verifier diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/thread-exit.md b/books/architecture/src/resource-model/concurrency-and-parallelism/thread-exit.md new file mode 100644 index 0000000..3925975 --- /dev/null +++ b/books/architecture/src/resource-model/concurrency-and-parallelism/thread-exit.md @@ -0,0 +1 @@ +# ⚪️ Thread exit diff --git a/books/architecture/src/resource-model/file-system.md b/books/architecture/src/resource-model/file-system.md new file mode 100644 index 0000000..b67ca07 --- /dev/null +++ b/books/architecture/src/resource-model/file-system.md @@ -0,0 +1 @@ +# ⚪️ File system diff --git a/books/architecture/src/resource-model/intro.md b/books/architecture/src/resource-model/intro.md new file mode 100644 index 0000000..28d1dd6 --- /dev/null +++ b/books/architecture/src/resource-model/intro.md @@ -0,0 +1 @@ +# ⚪️ Resource model diff --git a/books/architecture/src/resource-model/memory.md b/books/architecture/src/resource-model/memory.md new file mode 100644 index 0000000..e3624b5 --- /dev/null +++ b/books/architecture/src/resource-model/memory.md @@ -0,0 +1 @@ +# ⚪️ Memory diff --git a/books/architecture/src/resource-model/sockets.md b/books/architecture/src/resource-model/sockets.md new file mode 100644 index 0000000..0d590ca --- /dev/null +++ b/books/architecture/src/resource-model/sockets.md @@ -0,0 +1 @@ +# ⚪️ Sockets diff --git a/books/architecture/src/rpc/handler.md b/books/architecture/src/rpc/handler.md new file mode 100644 index 0000000..fffa45f --- /dev/null +++ b/books/architecture/src/rpc/handler.md @@ -0,0 +1 @@ +# ⚪️ Handler diff --git a/books/architecture/src/rpc/interface.md b/books/architecture/src/rpc/interface.md new file mode 100644 index 0000000..541b744 --- /dev/null +++ b/books/architecture/src/rpc/interface.md @@ -0,0 +1 @@ +# ⚪️ Interface diff --git a/books/architecture/src/rpc/intro.md b/books/architecture/src/rpc/intro.md new file mode 100644 index 0000000..dcfc82b --- /dev/null +++ b/books/architecture/src/rpc/intro.md @@ -0,0 +1,3 @@ +# RPC +- +- \ No newline at end of file diff --git a/books/architecture/src/rpc/methods/intro.md b/books/architecture/src/rpc/methods/intro.md new file mode 100644 index 0000000..d4a3a15 --- /dev/null +++ b/books/architecture/src/rpc/methods/intro.md @@ -0,0 +1 @@ +# ⚪️ Methods diff --git a/books/architecture/src/rpc/router.md b/books/architecture/src/rpc/router.md new file mode 100644 index 0000000..1827dd3 --- /dev/null +++ b/books/architecture/src/rpc/router.md @@ -0,0 +1 @@ +# ⚪️ Router diff --git a/books/architecture/src/rpc/types/binary.md b/books/architecture/src/rpc/types/binary.md new file mode 100644 index 0000000..dea12fa --- /dev/null +++ b/books/architecture/src/rpc/types/binary.md @@ -0,0 +1 @@ +# ⚪️ Binary diff --git a/books/architecture/src/rpc/types/intro.md b/books/architecture/src/rpc/types/intro.md new file mode 100644 index 0000000..22e430c --- /dev/null +++ b/books/architecture/src/rpc/types/intro.md @@ -0,0 +1 @@ +# ⚪️ Types diff --git a/books/architecture/src/rpc/types/json.md b/books/architecture/src/rpc/types/json.md new file mode 100644 index 0000000..0bf9351 --- /dev/null +++ b/books/architecture/src/rpc/types/json.md @@ -0,0 +1 @@ +# ⚪️ JSON diff --git a/books/architecture/src/rpc/types/other.md b/books/architecture/src/rpc/types/other.md new file mode 100644 index 0000000..49a36cc --- /dev/null +++ b/books/architecture/src/rpc/types/other.md @@ -0,0 +1 @@ +# ⚪️ Other diff --git a/books/architecture/src/storage/blockchain.md b/books/architecture/src/storage/blockchain.md new file mode 100644 index 0000000..6046687 --- /dev/null +++ b/books/architecture/src/storage/blockchain.md @@ -0,0 +1 @@ +# ⚪️ Blockchain diff --git a/books/architecture/src/storage/database-abstraction.md b/books/architecture/src/storage/database-abstraction.md new file mode 100644 index 0000000..b21a192 --- /dev/null +++ b/books/architecture/src/storage/database-abstraction.md @@ -0,0 +1 @@ +# ⚪️ Database abstraction diff --git a/books/architecture/src/storage/intro.md b/books/architecture/src/storage/intro.md new file mode 100644 index 0000000..214cf15 --- /dev/null +++ b/books/architecture/src/storage/intro.md @@ -0,0 +1 @@ +# ⚪️ Storage diff --git a/books/architecture/src/storage/pruning.md b/books/architecture/src/storage/pruning.md new file mode 100644 index 0000000..cfeee69 --- /dev/null +++ b/books/architecture/src/storage/pruning.md @@ -0,0 +1 @@ +# ⚪️ Pruning diff --git a/books/architecture/src/storage/transaction-pool.md b/books/architecture/src/storage/transaction-pool.md new file mode 100644 index 0000000..4eb139b --- /dev/null +++ b/books/architecture/src/storage/transaction-pool.md @@ -0,0 +1 @@ +# ⚪️ Transaction pool diff --git a/books/architecture/src/testing/intro.md b/books/architecture/src/testing/intro.md new file mode 100644 index 0000000..397ae90 --- /dev/null +++ b/books/architecture/src/testing/intro.md @@ -0,0 +1 @@ +# ⚪️ Testing diff --git a/books/architecture/src/testing/monero-data.md b/books/architecture/src/testing/monero-data.md new file mode 100644 index 0000000..915af28 --- /dev/null +++ b/books/architecture/src/testing/monero-data.md @@ -0,0 +1 @@ +# ⚪️ Monero data diff --git a/books/architecture/src/testing/rpc-client.md b/books/architecture/src/testing/rpc-client.md new file mode 100644 index 0000000..5a373c2 --- /dev/null +++ b/books/architecture/src/testing/rpc-client.md @@ -0,0 +1 @@ +# ⚪️ RPC client diff --git a/books/architecture/src/testing/spawning-monerod.md b/books/architecture/src/testing/spawning-monerod.md new file mode 100644 index 0000000..1552266 --- /dev/null +++ b/books/architecture/src/testing/spawning-monerod.md @@ -0,0 +1 @@ +# ⚪️ Spawning monerod diff --git a/books/architecture/src/zmq/intro.md b/books/architecture/src/zmq/intro.md new file mode 100644 index 0000000..0b668b3 --- /dev/null +++ b/books/architecture/src/zmq/intro.md @@ -0,0 +1 @@ +# ⚪️ ZMQ diff --git a/books/architecture/src/todo.md b/books/architecture/src/zmq/todo.md similarity index 100% rename from books/architecture/src/todo.md rename to books/architecture/src/zmq/todo.md From aa718e224f480849d2ed665cabe0794875231abf Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 18 Jul 2024 19:50:27 -0400 Subject: [PATCH 021/104] test-utils: add `crate::rpc::data` module (#231) * test-utils: add `crate::rpc::types` module * test-utils: conditional json doc-tests * json: add test data, fix macro doc tests * json: add all data * other: add all data * bin: add skeleton * docs * move type to correct file * rpc: `client/{client,constants}.rs` -> `client.rs` * lib.rs: remove `clippy::module_inception` --- Cargo.lock | 1 + test-utils/Cargo.toml | 34 +- test-utils/README.md | 1 + test-utils/src/data/free.rs | 2 +- test-utils/src/rpc/client.rs | 4 +- test-utils/src/rpc/constants.rs | 7 - test-utils/src/rpc/data/bin.rs | 55 ++ test-utils/src/rpc/data/json.rs | 1292 +++++++++++++++++++++++++++++ test-utils/src/rpc/data/macros.rs | 168 ++++ test-utils/src/rpc/data/mod.rs | 18 + test-utils/src/rpc/data/other.rs | 839 +++++++++++++++++++ test-utils/src/rpc/mod.rs | 27 +- typos.toml | 1 + 13 files changed, 2400 insertions(+), 49 deletions(-) delete mode 100644 test-utils/src/rpc/constants.rs create mode 100644 test-utils/src/rpc/data/bin.rs create mode 100644 test-utils/src/rpc/data/json.rs create mode 100644 test-utils/src/rpc/data/macros.rs create mode 100644 test-utils/src/rpc/data/mod.rs create mode 100644 test-utils/src/rpc/data/other.rs diff --git a/Cargo.lock b/Cargo.lock index 965e2c6..426ccc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -785,6 +785,7 @@ dependencies = [ "hex", "hex-literal", "monero-serai", + "paste", "pretty_assertions", "serde", "serde_json", diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index f9a5c6d..dd24fd5 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -1,30 +1,30 @@ [package] -name = "cuprate-test-utils" +name = "cuprate-test-utils" version = "0.1.0" edition = "2021" license = "MIT" authors = ["Boog900", "hinto-janai"] [dependencies] -cuprate-types = { path = "../types" } -cuprate-helper = { path = "../helper", features = ["map"] } -cuprate-wire = { path = "../net/wire" } +cuprate-types = { path = "../types" } +cuprate-helper = { path = "../helper", features = ["map"] } +cuprate-wire = { path = "../net/wire" } cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] } -hex = { workspace = true } -hex-literal = { workspace = true } +hex = { workspace = true } +hex-literal = { workspace = true } monero-serai = { workspace = true, features = ["std", "http-rpc"] } -futures = { workspace = true, features = ["std"] } -async-trait = { workspace = true } -tokio = { workspace = true, features = ["full"] } -tokio-util = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -bytes = { workspace = true, features = ["std"] } -tempfile = { workspace = true } - -borsh = { workspace = true, features = ["derive"]} +futures = { workspace = true, features = ["std"] } +async-trait = { workspace = true } +tokio = { workspace = true, features = ["full"] } +tokio-util = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +bytes = { workspace = true, features = ["std"] } +tempfile = { workspace = true } +paste = { workspace = true } +borsh = { workspace = true, features = ["derive"]} [dev-dependencies] -hex = { workspace = true } +hex = { workspace = true } pretty_assertions = { workspace = true } \ No newline at end of file diff --git a/test-utils/README.md b/test-utils/README.md index c210686..3c71c0a 100644 --- a/test-utils/README.md +++ b/test-utils/README.md @@ -7,3 +7,4 @@ It currently contains: - Code to spawn monerod instances and a testing network zone - Real raw and typed Monero data, e.g. `Block, Transaction` - An RPC client to generate types from `cuprate_types` +- Raw RPC request/response strings and binary data \ No newline at end of file diff --git a/test-utils/src/data/free.rs b/test-utils/src/data/free.rs index e80bdda..ee6f49a 100644 --- a/test-utils/src/data/free.rs +++ b/test-utils/src/data/free.rs @@ -292,7 +292,7 @@ mod tests { use pretty_assertions::assert_eq; - use crate::rpc::HttpRpcClient; + use crate::rpc::client::HttpRpcClient; /// Assert the defined blocks are the same compared to ones received from a local RPC call. #[ignore] // FIXME: doesn't work in CI, we need a real unrestricted node diff --git a/test-utils/src/rpc/client.rs b/test-utils/src/rpc/client.rs index 22ae11f..28c49d8 100644 --- a/test-utils/src/rpc/client.rs +++ b/test-utils/src/rpc/client.rs @@ -12,7 +12,9 @@ use monero_serai::{ use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; -use crate::rpc::constants::LOCALHOST_RPC_URL; +//---------------------------------------------------------------------------------------------------- Constants +/// The default URL used for Monero RPC connections. +pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081"; //---------------------------------------------------------------------------------------------------- HttpRpcClient /// An HTTP RPC client for Monero. diff --git a/test-utils/src/rpc/constants.rs b/test-utils/src/rpc/constants.rs deleted file mode 100644 index ce44a88..0000000 --- a/test-utils/src/rpc/constants.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! RPC-related Constants. - -//---------------------------------------------------------------------------------------------------- Use - -//---------------------------------------------------------------------------------------------------- Constants -/// The default URL used for Monero RPC connections. -pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081"; diff --git a/test-utils/src/rpc/data/bin.rs b/test-utils/src/rpc/data/bin.rs new file mode 100644 index 0000000..cf98a4a --- /dev/null +++ b/test-utils/src/rpc/data/bin.rs @@ -0,0 +1,55 @@ +//! Binary data from [`.bin` endpoints](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_blocksbin). +//! +//! TODO: Not implemented yet. + +//---------------------------------------------------------------------------------------------------- Import +use crate::rpc::data::macros::define_request_and_response; + +//---------------------------------------------------------------------------------------------------- TODO +define_request_and_response! { + get_blocksbin, + GET_BLOCKS: &[u8], + Request = &[]; + Response = &[]; +} + +define_request_and_response! { + get_blocks_by_heightbin, + GET_BLOCKS_BY_HEIGHT: &[u8], + Request = &[]; + Response = &[]; +} + +define_request_and_response! { + get_hashesbin, + GET_HASHES: &[u8], + Request = &[]; + Response = &[]; +} + +define_request_and_response! { + get_o_indexesbin, + GET_O_INDEXES: &[u8], + Request = &[]; + Response = &[]; +} + +define_request_and_response! { + get_outsbin, + GET_OUTS: &[u8], + Request = &[]; + Response = &[]; +} + +define_request_and_response! { + get_transaction_pool_hashesbin, + GET_TRANSACTION_POOL_HASHES: &[u8], + Request = &[]; + Response = &[]; +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/test-utils/src/rpc/data/json.rs b/test-utils/src/rpc/data/json.rs new file mode 100644 index 0000000..2463e45 --- /dev/null +++ b/test-utils/src/rpc/data/json.rs @@ -0,0 +1,1292 @@ +//! JSON data from the [`/json_rpc`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#json-rpc-methods) endpoint. + +//---------------------------------------------------------------------------------------------------- Import +use crate::rpc::data::macros::define_request_and_response; + +//---------------------------------------------------------------------------------------------------- Struct definitions +// This generates 2 const strings: +// +// - `const GET_BLOCK_TEMPLATE_REQUEST: &str = "..."` +// - `const GET_BLOCK_TEMPLATE_RESPONSE: &str = "..."` +// +// with some interconnected documentation. +define_request_and_response! { + // The markdown tag for Monero RPC documentation. Not necessarily the endpoint (json). + // + // Adding `(json_rpc)` after this will trigger the macro to automatically + // add a `serde_json` test for the request/response data. + get_block_template (json_rpc), + + // The base const name: the type of the request/response. + GET_BLOCK_TEMPLATE: &str, + + // The request data. + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_block_template", + "params": { + "wallet_address": "44GBHzv6ZyQdJkjqZje6KLZ3xSyN1hBSFAnLP6EAqJtCRVzMzZmeXTC2AHKDS9aEDTRKmo6a6o9r9j86pYfhCWDkKjbtcns", + "reserve_size": 60 + } +}"#; + + // The response data. + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "blockhashing_blob": "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a00000000e0c20372be23d356347091025c5b5e8f2abf83ab618378565cce2b703491523401", + "blocktemplate_blob": "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "difficulty": 283305047039, + "difficulty_top64": 0, + "expected_reward": 600000000000, + "height": 3195018, + "next_seed_hash": "", + "prev_hash": "9d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a", + "reserved_offset": 131, + "seed_hash": "e2aa0b7b55042cd48b02e395d78fa66a29815ccc1584e38db2d1f0e8485cd44f", + "seed_height": 3194880, + "status": "OK", + "untrusted": false, + "wide_difficulty": "0x41f64bf3ff" + } +}"#; +} + +define_request_and_response! { + get_block_count (json_rpc), + GET_BLOCK_COUNT: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_block_count" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "count": 3195019, + "status": "OK", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + on_get_block_hash (json_rpc), + ON_GET_BLOCK_HASH: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "on_get_block_hash", + "params": [912345] +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6" +}"#; +} + +define_request_and_response! { + submit_block (json_rpc), + SUBMIT_BLOCK: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "submit_block", + "params": ["0707e6bdfedc053771512f1bc27c62731ae9e8f2443db64ce742f4e57f5cf8d393de28551e441a0000000002fb830a01ffbf830a018cfe88bee283060274c0aae2ef5730e680308d9c00b6da59187ad0352efe3c71d36eeeb28782f29f2501bd56b952c3ddc3e350c2631d3a5086cac172c56893831228b17de296ff4669de020200000000"] +}"#; + Response = +r#"{ + "error": { + "code": -7, + "message": "Block not accepted" + }, + "id": "0", + "jsonrpc": "2.0" +}"#; +} + +define_request_and_response! { + generateblocks (json_rpc), + GENERATE_BLOCKS: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "generateblocks", + "params": { + "amount_of_blocks": 1, + "wallet_address": "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A", + "starting_nonce": 0 + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "blocks": ["49b712db7760e3728586f8434ee8bc8d7b3d410dac6bb6e98bf5845c83b917e4"], + "height": 9783, + "status": "OK", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_last_block_header (json_rpc), + GET_LAST_BLOCK_HEADER: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_last_block_header" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "block_header": { + "block_size": 200419, + "block_weight": 200419, + "cumulative_difficulty": 366125734645190820, + "cumulative_difficulty_top64": 0, + "depth": 0, + "difficulty": 282052561854, + "difficulty_top64": 0, + "hash": "57238217820195ac4c08637a144a885491da167899cf1d20e8e7ce0ae0a3434e", + "height": 3195020, + "long_term_weight": 200419, + "major_version": 16, + "miner_tx_hash": "7a42667237d4f79891bb407c49c712a9299fb87fce799833a7b633a3a9377dbd", + "minor_version": 16, + "nonce": 1885649739, + "num_txes": 37, + "orphan_status": false, + "pow_hash": "", + "prev_hash": "22c72248ae9c5a2863c94735d710a3525c499f70707d1c2f395169bc5c8a0da3", + "reward": 615702960000, + "timestamp": 1721245548, + "wide_cumulative_difficulty": "0x514bd6a74a7d0a4", + "wide_difficulty": "0x41aba48bbe" + }, + "credits": 0, + "status": "OK", + "top_hash": "", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_block_header_by_hash (json_rpc), + GET_BLOCK_HEADER_BY_HASH: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_block_header_by_hash", + "params": { + "hash": "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6" + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "block_header": { + "block_size": 210, + "block_weight": 210, + "cumulative_difficulty": 754734824984346, + "cumulative_difficulty_top64": 0, + "depth": 2282676, + "difficulty": 815625611, + "difficulty_top64": 0, + "hash": "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6", + "height": 912345, + "long_term_weight": 210, + "major_version": 1, + "miner_tx_hash": "c7da3965f25c19b8eb7dd8db48dcd4e7c885e2491db77e289f0609bf8e08ec30", + "minor_version": 2, + "nonce": 1646, + "num_txes": 0, + "orphan_status": false, + "pow_hash": "", + "prev_hash": "b61c58b2e0be53fad5ef9d9731a55e8a81d972b8d90ed07c04fd37ca6403ff78", + "reward": 7388968946286, + "timestamp": 1452793716, + "wide_cumulative_difficulty": "0x2ae6d65248f1a", + "wide_difficulty": "0x309d758b" + }, + "credits": 0, + "status": "OK", + "top_hash": "", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_block_header_by_height (json_rpc), + GET_BLOCK_HEADER_BY_HEIGHT: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_block_header_by_height", + "params": { + "height": 912345 + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "block_header": { + "block_size": 210, + "block_weight": 210, + "cumulative_difficulty": 754734824984346, + "cumulative_difficulty_top64": 0, + "depth": 2282677, + "difficulty": 815625611, + "difficulty_top64": 0, + "hash": "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6", + "height": 912345, + "long_term_weight": 210, + "major_version": 1, + "miner_tx_hash": "c7da3965f25c19b8eb7dd8db48dcd4e7c885e2491db77e289f0609bf8e08ec30", + "minor_version": 2, + "nonce": 1646, + "num_txes": 0, + "orphan_status": false, + "pow_hash": "", + "prev_hash": "b61c58b2e0be53fad5ef9d9731a55e8a81d972b8d90ed07c04fd37ca6403ff78", + "reward": 7388968946286, + "timestamp": 1452793716, + "wide_cumulative_difficulty": "0x2ae6d65248f1a", + "wide_difficulty": "0x309d758b" + }, + "credits": 0, + "status": "OK", + "top_hash": "", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_block_headers_range (json_rpc), + GET_BLOCK_HEADERS_RANGE: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_block_headers_range", + "params": { + "start_height": 1545999, + "end_height": 1546000 + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "credits": 0, + "headers": [{ + "block_size": 301413, + "block_weight": 301413, + "cumulative_difficulty": 13185267971483472, + "cumulative_difficulty_top64": 0, + "depth": 1649024, + "difficulty": 134636057921, + "difficulty_top64": 0, + "hash": "86d1d20a40cefcf3dd410ff6967e0491613b77bf73ea8f1bf2e335cf9cf7d57a", + "height": 1545999, + "long_term_weight": 301413, + "major_version": 6, + "miner_tx_hash": "9909c6f8a5267f043c3b2b079fb4eacc49ef9c1dee1c028eeb1a259b95e6e1d9", + "minor_version": 6, + "nonce": 3246403956, + "num_txes": 20, + "orphan_status": false, + "pow_hash": "", + "prev_hash": "0ef6e948f77b8f8806621003f5de24b1bcbea150bc0e376835aea099674a5db5", + "reward": 5025593029981, + "timestamp": 1523002893, + "wide_cumulative_difficulty": "0x2ed7ee6db56750", + "wide_difficulty": "0x1f58ef3541" + },{ + "block_size": 13322, + "block_weight": 13322, + "cumulative_difficulty": 13185402687569710, + "cumulative_difficulty_top64": 0, + "depth": 1649023, + "difficulty": 134716086238, + "difficulty_top64": 0, + "hash": "b408bf4cfcd7de13e7e370c84b8314c85b24f0ba4093ca1d6eeb30b35e34e91a", + "height": 1546000, + "long_term_weight": 13322, + "major_version": 7, + "miner_tx_hash": "7f749c7c64acb35ef427c7454c45e6688781fbead9bbf222cb12ad1a96a4e8f6", + "minor_version": 7, + "nonce": 3737164176, + "num_txes": 1, + "orphan_status": false, + "pow_hash": "", + "prev_hash": "86d1d20a40cefcf3dd410ff6967e0491613b77bf73ea8f1bf2e335cf9cf7d57a", + "reward": 4851952181070, + "timestamp": 1523002931, + "wide_cumulative_difficulty": "0x2ed80dcb69bf2e", + "wide_difficulty": "0x1f5db457de" + }], + "status": "OK", + "top_hash": "", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_block (json_rpc), + GET_BLOCK: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_block", + "params": { + "height": 2751506 + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "blob": "1010c58bab9b06b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7807e07f502cef8a70101ff92f8a7010180e0a596bb1103d7cbf826b665d7a532c316982dc8dbc24f285cbc18bbcc27c7164cd9b3277a85d034019f629d8b36bd16a2bfce3ea80c31dc4d8762c67165aec21845494e32b7582fe00211000000297a787a000000000000000000000000", + "block_header": { + "block_size": 106, + "block_weight": 106, + "cumulative_difficulty": 236046001376524168, + "cumulative_difficulty_top64": 0, + "depth": 443517, + "difficulty": 313732272488, + "difficulty_top64": 0, + "hash": "43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428", + "height": 2751506, + "long_term_weight": 176470, + "major_version": 16, + "miner_tx_hash": "e49b854c5f339d7410a77f2a137281d8042a0ffc7ef9ab24cd670b67139b24cd", + "minor_version": 16, + "nonce": 4110909056, + "num_txes": 0, + "orphan_status": false, + "pow_hash": "", + "prev_hash": "b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7", + "reward": 600000000000, + "timestamp": 1667941829, + "wide_cumulative_difficulty": "0x3469a966eb2f788", + "wide_difficulty": "0x490be69168" + }, + "credits": 0, + "json": "{\n \"major_version\": 16, \n \"minor_version\": 16, \n \"timestamp\": 1667941829, \n \"prev_id\": \"b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7\", \n \"nonce\": 4110909056, \n \"miner_tx\": {\n \"version\": 2, \n \"unlock_time\": 2751566, \n \"vin\": [ {\n \"gen\": {\n \"height\": 2751506\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 600000000000, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"d7cbf826b665d7a532c316982dc8dbc24f285cbc18bbcc27c7164cd9b3277a85\", \n \"view_tag\": \"d0\"\n }\n }\n }\n ], \n \"extra\": [ 1, 159, 98, 157, 139, 54, 189, 22, 162, 191, 206, 62, 168, 12, 49, 220, 77, 135, 98, 198, 113, 101, 174, 194, 24, 69, 73, 78, 50, 183, 88, 47, 224, 2, 17, 0, 0, 0, 41, 122, 120, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n ], \n \"rct_signatures\": {\n \"type\": 0\n }\n }, \n \"tx_hashes\": [ ]\n}", + "miner_tx_hash": "e49b854c5f339d7410a77f2a137281d8042a0ffc7ef9ab24cd670b67139b24cd", + "status": "OK", + "top_hash": "", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_block (json_rpc), + /// This is the same as [`GET_BLOCK_REQUEST`] and + /// [`GET_BLOCK_RESPONSE`] but it uses the `hash` parameter. + GET_BLOCK_HASH: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_block", + "params": { + "hash": "86d421322b700166dde2d7eba1cc8600925ef640abf6c0a2cc8ce0d6dd90abfd" + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "blob": "1010d8faa89b06f8a36d0dbe4d27d2f52160000563896048d71067c31e99a3869bf9b7142227bb5328010b02a6f6a70101ffeaf5a70101a08bc8b3bb11036d6713f5aa552a1aaf33baed7591f795b86daf339e51029a9062dfe09f0f909b312b0124d6023d591c4d434000e5e31c6db718a1e96e865939930e90a7042a1cd4cbd202083786a78452fdfc000002a89e380a44d8dfc64b551baa171447a0f9c9262255be6e8f8ef10896e36e2bf90c4d343e416e394ad9cc10b7d2df7b2f39370a554730f75dfcb04944bd62c299", + "block_header": { + "block_size": 3166, + "block_weight": 3166, + "cumulative_difficulty": 235954020187853162, + "cumulative_difficulty_top64": 0, + "depth": 443814, + "difficulty": 312527777859, + "difficulty_top64": 0, + "hash": "86d421322b700166dde2d7eba1cc8600925ef640abf6c0a2cc8ce0d6dd90abfd", + "height": 2751210, + "long_term_weight": 176470, + "major_version": 16, + "miner_tx_hash": "dabe07900d3123ed895612f4a151adb3e39681b145f0f85bfee23ea1fe47acf2", + "minor_version": 16, + "nonce": 184625235, + "num_txes": 2, + "orphan_status": false, + "pow_hash": "", + "prev_hash": "f8a36d0dbe4d27d2f52160000563896048d71067c31e99a3869bf9b7142227bb", + "reward": 600061380000, + "timestamp": 1667906904, + "wide_cumulative_difficulty": "0x34646ee649f516a", + "wide_difficulty": "0x48c41b7043" + }, + "credits": 0, + "json": "{\n \"major_version\": 16, \n \"minor_version\": 16, \n \"timestamp\": 1667906904, \n \"prev_id\": \"f8a36d0dbe4d27d2f52160000563896048d71067c31e99a3869bf9b7142227bb\", \n \"nonce\": 184625235, \n \"miner_tx\": {\n \"version\": 2, \n \"unlock_time\": 2751270, \n \"vin\": [ {\n \"gen\": {\n \"height\": 2751210\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 600061380000, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"6d6713f5aa552a1aaf33baed7591f795b86daf339e51029a9062dfe09f0f909b\", \n \"view_tag\": \"31\"\n }\n }\n }\n ], \n \"extra\": [ 1, 36, 214, 2, 61, 89, 28, 77, 67, 64, 0, 229, 227, 28, 109, 183, 24, 161, 233, 110, 134, 89, 57, 147, 14, 144, 167, 4, 42, 28, 212, 203, 210, 2, 8, 55, 134, 167, 132, 82, 253, 252, 0\n ], \n \"rct_signatures\": {\n \"type\": 0\n }\n }, \n \"tx_hashes\": [ \"a89e380a44d8dfc64b551baa171447a0f9c9262255be6e8f8ef10896e36e2bf9\", \"0c4d343e416e394ad9cc10b7d2df7b2f39370a554730f75dfcb04944bd62c299\"\n ]\n}", + "miner_tx_hash": "dabe07900d3123ed895612f4a151adb3e39681b145f0f85bfee23ea1fe47acf2", + "status": "OK", + "top_hash": "", + "tx_hashes": ["a89e380a44d8dfc64b551baa171447a0f9c9262255be6e8f8ef10896e36e2bf9","0c4d343e416e394ad9cc10b7d2df7b2f39370a554730f75dfcb04944bd62c299"], + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_connections (json_rpc), + GET_CONNECTIONS: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_connections" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "connections": [{ + "address": "3evk3kezfjg44ma6tvesy7rbxwwpgpympj45xar5fo4qajrsmkoaqdqd.onion:18083", + "address_type": 4, + "avg_download": 0, + "avg_upload": 0, + "connection_id": "22ef856d0f1d44cc95e84fecfd065fe2", + "current_download": 0, + "current_upload": 0, + "height": 3195026, + "host": "3evk3kezfjg44ma6tvesy7rbxwwpgpympj45xar5fo4qajrsmkoaqdqd.onion", + "incoming": false, + "ip": "", + "live_time": 76651, + "local_ip": false, + "localhost": false, + "peer_id": "0000000000000001", + "port": "", + "pruning_seed": 0, + "recv_count": 240328, + "recv_idle_time": 34, + "rpc_credits_per_hash": 0, + "rpc_port": 0, + "send_count": 3406572, + "send_idle_time": 30, + "state": "normal", + "support_flags": 0 + },{ + "address": "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083", + "address_type": 4, + "avg_download": 0, + "avg_upload": 0, + "connection_id": "c7734e15936f485a86d2b0534f87e499", + "current_download": 0, + "current_upload": 0, + "height": 3195024, + "host": "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion", + "incoming": false, + "ip": "", + "live_time": 76755, + "local_ip": false, + "localhost": false, + "peer_id": "0000000000000001", + "port": "", + "pruning_seed": 389, + "recv_count": 237657, + "recv_idle_time": 120, + "rpc_credits_per_hash": 0, + "rpc_port": 0, + "send_count": 3370566, + "send_idle_time": 120, + "state": "normal", + "support_flags": 0 + }], + "status": "OK", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_info (json_rpc), + GET_INFO: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_info" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "adjusted_time": 1721245289, + "alt_blocks_count": 16, + "block_size_limit": 600000, + "block_size_median": 300000, + "block_weight_limit": 600000, + "block_weight_median": 300000, + "bootstrap_daemon_address": "", + "busy_syncing": false, + "credits": 0, + "cumulative_difficulty": 366127702242611947, + "cumulative_difficulty_top64": 0, + "database_size": 235169075200, + "difficulty": 280716748706, + "difficulty_top64": 0, + "free_space": 30521749504, + "grey_peerlist_size": 4996, + "height": 3195028, + "height_without_bootstrap": 3195028, + "incoming_connections_count": 62, + "mainnet": true, + "nettype": "mainnet", + "offline": false, + "outgoing_connections_count": 1143, + "restricted": false, + "rpc_connections_count": 1, + "stagenet": false, + "start_time": 1720462427, + "status": "OK", + "synchronized": true, + "target": 120, + "target_height": 0, + "testnet": false, + "top_block_hash": "bdf06d18ed1931a8ee62654e9b6478cc459bc7072628b8e36f4524d339552946", + "top_hash": "", + "tx_count": 43205750, + "tx_pool_size": 12, + "untrusted": false, + "update_available": false, + "version": "0.18.3.3-release", + "was_bootstrap_ever_used": false, + "white_peerlist_size": 1000, + "wide_cumulative_difficulty": "0x514bf349299d2eb", + "wide_difficulty": "0x415c05a7a2" + } +}"#; +} + +define_request_and_response! { + hard_fork_info (json_rpc), + HARD_FORK_INFO: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "hard_fork_info" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "credits": 0, + "earliest_height": 2689608, + "enabled": true, + "state": 0, + "status": "OK", + "threshold": 0, + "top_hash": "", + "untrusted": false, + "version": 16, + "votes": 10080, + "voting": 16, + "window": 10080 + } +}"#; +} + +define_request_and_response! { + set_bans (json_rpc), + SET_BANS: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "set_bans", + "params": { + "bans": [{ + "host": "192.168.1.51", + "ban": true, + "seconds": 30 + }] + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "status": "OK", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + set_bans (json_rpc), + /// This is the same as [`SET_BANS_REQUEST`] and + /// [`SET_BANS_RESPONSE`] but it uses the `ip` parameter. + SET_BANS_IP: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "set_bans", + "params": { + "bans": [{ + "ip": 838969536, + "ban": true, + "seconds": 30 + }] + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "status": "OK", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_bans (json_rpc), + GET_BANS: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_bans" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "bans": [{ + "host": "104.248.206.131", + "ip": 2211379304, + "seconds": 689754 + },{ + "host": "209.222.252.0\/24", + "ip": 0, + "seconds": 689754 + }], + "status": "OK", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + banned (json_rpc), + BANNED: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "banned", + "params": { + "address": "95.216.203.255" + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "banned": true, + "seconds": 689655, + "status": "OK" + } +}"#; +} + +define_request_and_response! { + flush_txpool (json_rpc), + FLUSH_TRANSACTION_POOL: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "flush_txpool", + "params": { + "txids": ["dc16fa8eaffe1484ca9014ea050e13131d3acf23b419f33bb4cc0b32b6c49308"] + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "status": "OK" + } +}"#; +} + +define_request_and_response! { + get_output_histogram (json_rpc), + GET_OUTPUT_HISTOGRAM: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_output_histogram", + "params": { + "amounts": ["20000000000"] + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "credits": 0, + "histogram": [{ + "amount": 20000000000, + "recent_instances": 0, + "total_instances": 381490, + "unlocked_instances": 0 + }], + "status": "OK", + "top_hash": "", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_coinbase_tx_sum (json_rpc), + GET_COINBASE_TX_SUM: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_coinbase_tx_sum", + "params": { + "height": 1563078, + "count": 2 + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "credits": 0, + "emission_amount": 9387854817320, + "emission_amount_top64": 0, + "fee_amount": 83981380000, + "fee_amount_top64": 0, + "status": "OK", + "top_hash": "", + "untrusted": false, + "wide_emission_amount": "0x889c7c06828", + "wide_fee_amount": "0x138dae29a0" + } +}"#; +} + +define_request_and_response! { + get_version (json_rpc), + GET_VERSION: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_version" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "current_height": 3195051, + "hard_forks": [{ + "height": 1, + "hf_version": 1 + },{ + "height": 1009827, + "hf_version": 2 + },{ + "height": 1141317, + "hf_version": 3 + },{ + "height": 1220516, + "hf_version": 4 + },{ + "height": 1288616, + "hf_version": 5 + },{ + "height": 1400000, + "hf_version": 6 + },{ + "height": 1546000, + "hf_version": 7 + },{ + "height": 1685555, + "hf_version": 8 + },{ + "height": 1686275, + "hf_version": 9 + },{ + "height": 1788000, + "hf_version": 10 + },{ + "height": 1788720, + "hf_version": 11 + },{ + "height": 1978433, + "hf_version": 12 + },{ + "height": 2210000, + "hf_version": 13 + },{ + "height": 2210720, + "hf_version": 14 + },{ + "height": 2688888, + "hf_version": 15 + },{ + "height": 2689608, + "hf_version": 16 + }], + "release": true, + "status": "OK", + "untrusted": false, + "version": 196621 + } +}"#; +} + +define_request_and_response! { + get_fee_estimate (json_rpc), + GET_FEE_ESTIMATE: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_fee_estimate" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "credits": 0, + "fee": 20000, + "fees": [20000,80000,320000,4000000], + "quantization_mask": 10000, + "status": "OK", + "top_hash": "", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + get_alternate_chains (json_rpc), + GET_ALTERNATE_CHAINS: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_alternate_chains" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "chains": [{ + "block_hash": "4826c7d45d7cf4f02985b5c405b0e5d7f92c8d25e015492ce19aa3b209295dce", + "block_hashes": ["4826c7d45d7cf4f02985b5c405b0e5d7f92c8d25e015492ce19aa3b209295dce"], + "difficulty": 357404825113208373, + "difficulty_top64": 0, + "height": 3167471, + "length": 1, + "main_chain_parent_block": "69b5075ea627d6ba06b1c30b7e023884eeaef5282cf58ec847dab838ddbcdd86", + "wide_difficulty": "0x4f5c1cb79e22635" + },{ + "block_hash": "33ee476f5a1c5b9d889274cbbe171f5e0112df7ed69021918042525485deb401", + "block_hashes": ["33ee476f5a1c5b9d889274cbbe171f5e0112df7ed69021918042525485deb401"], + "difficulty": 354736121711617293, + "difficulty_top64": 0, + "height": 3157465, + "length": 1, + "main_chain_parent_block": "fd522fcc4cefe5c8c0e5c5600981b3151772c285df3a4e38e5c4011cf466d2cb", + "wide_difficulty": "0x4ec469f8b9ee50d" + }], + "status": "OK", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + relay_tx (json_rpc), + RELAY_TX: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "relay_tx", + "params": { + "txids": ["9fd75c429cbe52da9a52f2ffc5fbd107fe7fd2099c0d8de274dc8a67e0c98613"] + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "status": "OK" + } +}"#; +} + +define_request_and_response! { + sync_info (json_rpc), + SYNC_INFO: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "sync_info" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "credits": 0, + "height": 3195157, + "next_needed_pruning_seed": 0, + "overview": "[]", + "peers": [{ + "info": { + "address": "142.93.128.65:44986", + "address_type": 1, + "avg_download": 1, + "avg_upload": 1, + "connection_id": "a5803c4c2dac49e7b201dccdef54c862", + "current_download": 2, + "current_upload": 1, + "height": 3195157, + "host": "142.93.128.65", + "incoming": true, + "ip": "142.93.128.65", + "live_time": 18, + "local_ip": false, + "localhost": false, + "peer_id": "6830e9764d3e5687", + "port": "44986", + "pruning_seed": 0, + "recv_count": 20340, + "recv_idle_time": 0, + "rpc_credits_per_hash": 0, + "rpc_port": 18089, + "send_count": 32235, + "send_idle_time": 6, + "state": "normal", + "support_flags": 1 + } + },{ + "info": { + "address": "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083", + "address_type": 4, + "avg_download": 0, + "avg_upload": 0, + "connection_id": "277f7c821bc546878c8bd29977e780f5", + "current_download": 0, + "current_upload": 0, + "height": 3195157, + "host": "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion", + "incoming": false, + "ip": "", + "live_time": 2246, + "local_ip": false, + "localhost": false, + "peer_id": "0000000000000001", + "port": "", + "pruning_seed": 389, + "recv_count": 65164, + "recv_idle_time": 15, + "rpc_credits_per_hash": 0, + "rpc_port": 0, + "send_count": 99120, + "send_idle_time": 15, + "state": "normal", + "support_flags": 0 + } + }], + "status": "OK", + "target_height": 0, + "top_hash": "", + "untrusted": false + } +}"#; +} + +// TODO: binary string. +// define_request_and_response! { +// get_txpool_backlog (json_rpc), +// GET_TRANSACTION_POOL_BACKLOG: &str, +// Request = +// r#"{ +// "jsonrpc": "2.0", +// "id": "0", +// "method": "get_txpool_backlog" +// }"#; +// Response = +// r#"{ +// "id": "0", +// "jsonrpc": "2.0", +// "result": { +// "backlog": "...Binary...", +// "status": "OK", +// "untrusted": false +// } +// }"#; +// } + +define_request_and_response! { + get_output_distribution (json_rpc), + GET_OUTPUT_DISTRIBUTION: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_output_distribution", + "params": { + "amounts": [628780000], + "from_height": 1462078 + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "distributions": [{ + "amount": 2628780000, + "base": 0, + "distribution": "", + "start_height": 1462078 + }], + "status": "OK" + } +}"#; +} + +define_request_and_response! { + get_miner_data (json_rpc), + GET_MINER_DATA: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_miner_data" +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "already_generated_coins": 18186022843595960691, + "difficulty": "0x48afae42de", + "height": 2731375, + "major_version": 16, + "median_weight": 300000, + "prev_id": "78d50c5894d187c4946d54410990ca59a75017628174a9e8c7055fa4ca5c7c6d", + "seed_hash": "a6b869d50eca3a43ec26fe4c369859cf36ae37ce6ecb76457d31ffeb8a6ca8a6", + "status": "OK", + "tx_backlog": [{ + "fee": 30700000, + "id": "9868490d6bb9207fdd9cf17ca1f6c791b92ca97de0365855ea5c089f67c22208", + "weight": 1535 + },{ + "fee": 44280000, + "id": "b6000b02bbec71e18ad704bcae09fb6e5ae86d897ced14a718753e76e86c0a0a", + "weight": 2214 + }], + "untrusted": false + } +}"#; +} + +define_request_and_response! { + prune_blockchain (json_rpc), + PRUNE_BLOCKCHAIN: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "prune_blockchain", + "params": { + "check": true + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "pruned": true, + "pruning_seed": 387, + "status": "OK", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + calc_pow (json_rpc), + CALC_POW: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "calc_pow", + "params": { + "major_version": 14, + "height": 2286447, + "block_blob": "0e0ed286da8006ecdc1aab3033cf1716c52f13f9d8ae0051615a2453643de94643b550d543becd0000000002abc78b0101ffefc68b0101fcfcf0d4b422025014bb4a1eade6622fd781cb1063381cad396efa69719b41aa28b4fce8c7ad4b5f019ce1dc670456b24a5e03c2d9058a2df10fec779e2579753b1847b74ee644f16b023c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000051399a1bc46a846474f5b33db24eae173a26393b976054ee14f9feefe99925233802867097564c9db7a36af5bb5ed33ab46e63092bd8d32cef121608c3258edd55562812e21cc7e3ac73045745a72f7d74581d9a0849d6f30e8b2923171253e864f4e9ddea3acb5bc755f1c4a878130a70c26297540bc0b7a57affb6b35c1f03d8dbd54ece8457531f8cba15bb74516779c01193e212050423020e45aa2c15dcb", + "seed_hash": "d432f499205150873b2572b5f033c9c6e4b7c6f3394bd2dd93822cd7085e7307" + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": "d0402d6834e26fb94a9ce38c6424d27d2069896a9b8b1ce685d79936bca6e0a8" +}"#; +} + +define_request_and_response! { + flush_cache (json_rpc), + FLUSH_CACHE: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "flush_cache", + "params": { + "bad_txs": true, + "bad_blocks": true + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "status": "OK", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + add_aux_pow (json_rpc), + ADD_AUX_POW: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "add_aux_pow", + "params": { + "blocktemplate_blob": "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "aux_pow": [{ + "id": "3200b4ea97c3b2081cd4190b58e49572b2319fed00d030ad51809dff06b5d8c8", + "hash": "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a" + }] + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "aux_pow": [{ + "hash": "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a", + "id": "3200b4ea97c3b2081cd4190b58e49572b2319fed00d030ad51809dff06b5d8c8" + }], + "blockhashing_blob": "1010ee97e2a106e9f8ebe8887e5b609949ac8ea6143e560ed13552b110cb009b21f0cfca1eaccf00000000b2685c1283a646bc9020c758daa443be145b7370ce5a6efacb3e614117032e2c22", + "blocktemplate_blob": "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "merkle_root": "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a", + "merkle_tree_depth": 0, + "status": "OK", + "untrusted": false + } +}"#; +} + +define_request_and_response! { + UNDOCUMENTED_METHOD (json_rpc), + GET_TX_IDS_LOOSE: &str, + Request = +r#"{ + "jsonrpc": "2.0", + "id": "0", + "method": "get_txids_loose", + "params": { + "txid_template": "0000000000000000aea473c43708aa50b2c9eaf0e441aa209afc9b43458fb09e", + "num_matching_bits": 192 + } +}"#; + Response = +r#"{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "txids": "", + "status": "OK", + "untrusted": false + } +}"#; +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/test-utils/src/rpc/data/macros.rs b/test-utils/src/rpc/data/macros.rs new file mode 100644 index 0000000..632917a --- /dev/null +++ b/test-utils/src/rpc/data/macros.rs @@ -0,0 +1,168 @@ +//! Macros. + +//---------------------------------------------------------------------------------------------------- define_request_and_response +/// A template for generating the RPC request and response `const` data. +/// +/// See the [`crate::json`] module for example usage. +/// +/// # Macro internals +/// This macro uses: +/// - [`define_request_and_response_doc`] +/// - [`define_request_and_response_test`] +macro_rules! define_request_and_response { + ( + // The markdown tag for Monero daemon RPC documentation. Not necessarily the endpoint. + // + // Adding `(json)` after this will trigger the macro to automatically + // add a `serde_json` test for the request/response data. + $monero_daemon_rpc_doc_link:ident $(($test:ident))?, + + // The base name. + // Attributes added here will apply to _both_ + // request and response types. + $( #[$attr:meta] )* + $name:ident: $type:ty, + + // The request type (and any doc comments, derives, etc). + $( #[$request_attr:meta] )* + Request = $request:expr; + + // The response type (and any doc comments, derives, etc). + $( #[$response_attr:meta] )* + Response = $response:expr; + ) => { paste::paste! { + #[doc = $crate::rpc::data::macros::define_request_and_response_doc!( + "response" => [<$name:upper _RESPONSE>], + $monero_daemon_rpc_doc_link, + )] + /// + $( #[$attr] )* + /// + $( #[$request_attr] )* + /// + $( + #[doc = $crate::rpc::data::macros::define_request_and_response_doc_test!([<$name:upper _REQUEST>], $test)] + )? + pub const [<$name:upper _REQUEST>]: $type = $request; + + #[doc = $crate::rpc::data::macros::define_request_and_response_doc!( + "request" => [<$name:upper _REQUEST>], + $monero_daemon_rpc_doc_link, + )] + /// + $( #[$attr] )* + /// + $( #[$response_attr] )* + /// + $( + #[doc = $crate::rpc::data::macros::define_request_and_response_doc_test!([<$name:upper _RESPONSE>], $test)] + )? + pub const [<$name:upper _RESPONSE>]: $type = $response; + }}; +} +pub(super) use define_request_and_response; + +//---------------------------------------------------------------------------------------------------- define_request_and_response_doc +/// Generate documentation for the types generated +/// by the [`define_request_and_response`] macro. +/// +/// See it for more info on inputs. +macro_rules! define_request_and_response_doc { + ( + // This labels the last `[request]` or `[response]` + // hyperlink in documentation. Input is either: + // - "request" + // - "response" + // + // Remember this is linking to the _other_ type, + // so if defining a `Request` type, input should + // be "response". + $request_or_response:literal => $request_or_response_type:ident, + $monero_daemon_rpc_doc_link:ident, + ) => { + concat!( + "", + "[Documentation](", + "https://www.getmonero.org/resources/developer-guides/daemon-rpc.html", + "#", + stringify!($monero_daemon_rpc_doc_link), + "), [", + $request_or_response, + "](", + stringify!($request_or_response_type), + ")." + ) + }; +} +pub(super) use define_request_and_response_doc; + +//---------------------------------------------------------------------------------------------------- define_request_and_response_test +/// Generate documentation for the types generated +/// by the [`define_request_and_response`] macro. +/// +/// See it for more info on inputs. +macro_rules! define_request_and_response_doc_test { + // `/json_rpc` doc test. + ( + // The ident of the `const` request/response. + $name:ident, + json_rpc + ) => { + concat!( + "```rust\n", + "use cuprate_test_utils::rpc::data::json::*;\n", + "use serde_json::{to_value, Value};\n", + "\n", + "let value = serde_json::from_str::(&", + stringify!($name), + ").unwrap();\n", + "let Value::Object(map) = value else {\n", + " panic!();\n", + "};\n", + "\n", + r#"assert_eq!(map.get("jsonrpc").unwrap(), "2.0");"#, + "\n", + r#"map.get("id").unwrap();"#, + "\n\n", + r#"if map.get("method").is_some() {"#, + "\n", + r#" return;"#, + "\n", + "}\n", + "\n", + r#"if map.get("result").is_none() {"#, + "\n", + r#" map.get("error").unwrap();"#, + "\n", + "}\n", + "\n", + "```\n", + ) + }; + + // Other JSON endpoint doc test. + ( + $name:ident, + other + ) => { + concat!( + "```rust\n", + "use cuprate_test_utils::rpc::data::other::*;\n", + "use serde_json::{to_value, Value};\n", + "\n", + "let value = serde_json::from_str::(&", + stringify!($name), + ");\n", + "```\n", + ) + }; + + // No doc test. + ( + $name:ident, + $test:ident, + ) => { + "" + }; +} +pub(super) use define_request_and_response_doc_test; diff --git a/test-utils/src/rpc/data/mod.rs b/test-utils/src/rpc/data/mod.rs new file mode 100644 index 0000000..09f0d60 --- /dev/null +++ b/test-utils/src/rpc/data/mod.rs @@ -0,0 +1,18 @@ +//! Monero RPC data. +//! +//! This module contains real `monerod` RPC requests/responses +//! as `const` [`str`]s and byte arrays (binary). +//! +//! The strings include the JSON-RPC 2.0 portions of the JSON. +//! - Tests exist within this crate that ensure the JSON is valid +//! - Tests exist within Cuprate's `rpc/` crates that ensure these strings (de)serialize as valid types +//! +//! # Determinism +//! Note that although both request/response data is defined, +//! they aren't necessarily tied to each other, i.e. the request +//! will not deterministically lead to the response. + +pub mod bin; +pub mod json; +mod macros; +pub mod other; diff --git a/test-utils/src/rpc/data/other.rs b/test-utils/src/rpc/data/other.rs new file mode 100644 index 0000000..2559bbe --- /dev/null +++ b/test-utils/src/rpc/data/other.rs @@ -0,0 +1,839 @@ +//! JSON data from the [`other`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#other-daemon-rpc-calls) endpoints. + +//---------------------------------------------------------------------------------------------------- Import +use crate::rpc::data::macros::define_request_and_response; + +//---------------------------------------------------------------------------------------------------- TODO +define_request_and_response! { + // `(other)` adds a JSON sanity-check test. + get_height (other), + GET_HEIGHT: &str, + Request = +r#"{}"#; + Response = +r#"{ + "hash": "68bb1a1cff8e2a44c3221e8e1aff80bc6ca45d06fa8eff4d2a3a7ac31d4efe3f", + "height": 3195160, + "status": "OK", + "untrusted": false +}"#; +} + +define_request_and_response! { + get_transactions (other), + GET_TRANSACTIONS: &str, + Request = +r#"{ + "txs_hashes": ["d6e48158472848e6687173a91ae6eebfa3e1d778e65252ee99d7515d63090408"] +}"#; + Response = +r#"{ + "credits": 0, + "status": "OK", + "top_hash": "", + "txs": [{ + "as_hex": "0100940102ffc7afa02501b3056ebee1b651a8da723462b4891d471b990ddc226049a0866d3029b8e2f75b70120280a0b6cef785020190dd0a200bd02b70ee707441a8863c5279b4e4d9f376dc97a140b1e5bc7d72bc5080690280c0caf384a30201d0b12b751e8f6e2e31316110fa6631bf2eb02e88ac8d778ec70d42b24ef54843fd75d90280d0dbc3f40201c498358287895f16b62a000a3f2fd8fb2e70d8e376858fb9ba7d9937d3a076e36311bb0280f092cbdd0801e5a230c6250d5835877b735c71d41587082309bf593d06a78def1b4ec57355a37838b5028080bfb59dd20d01c36c6dd3a9826658642ba4d1d586366f2782c0768c7e9fb93f32e8fdfab18c0228ed0280d0b8e1981a01bfb0158a530682f78754ab5b1b81b15891b2c7a22d4d7a929a5b51c066ffd73ac360230280f092cbdd0801f9a330a1217984cc5d31bf0e76ed4f8e3d4115f470824bc214fa84929fcede137173a60280e0bcefa75701f3910e3a8b3c031e15573f7a69db9f8dda3b3f960253099d8f844169212f2de772f6ff0280d0b8e1981a01adc1157920f2c72d6140afd4b858da3f41d07fc1655f2ebe593d32f96d5335d11711ee0280d0dbc3f40201ca8635a1373fa829f58e8f46d72c8e52aa1ce53fa1d798284ed08b44849e2e9ad79b620280a094a58d1d01faf729e5ab208fa809dd2efc6f0b74d3e7eff2a66c689a3b5c31c33c8a14e2359ac484028080e983b1de1601eced0182c8d37d77ce439824ddb3c8ff7bd60642181e183c409545c9d6f9c36683908f028080d194b57401ead50b3eefebb5303e14a5087de37ad1799a4592cf0e897eafb46d9b57257b5732949e0280a094a58d1d01d3882a1e949b2d1b6fc1fd5e44df95bae9068b090677d76b6c307188da44dd4e343cef028090cad2c60e0196c73a74a60fc4ce3a7b14d1abdf7a0c70a9efb490a9de6ec6208a846f8282d878132b028080bb8b939b4401c03dbcbfd9fb02e181d99c0093e53aceecf42bf6ccc0ec611a5093fe6f2b2738a07f0280f092cbdd0801b98d30c27f297ae4cb89fb7bb29ed11adff17db9b71d39edf736172892784897488c5d0280d0dbc3f40201da9a353d39555c27a2d620bf69136e4c665aaa19557d6fc0255cbb67ec69bf2403b63e0280b09dc2df0101f8820caab7a5e736f5445b5624837de46e9ef906cb538f7c860f688a7f7d155e19e0ac0280808d93f5d77101b544e62708eb27ff140b58c521e4a90acab5eca36f1ce9516a6318306f7d48beddbc0280a0b6cef7850201abdd0a5453712326722427f66b865e67f8cdb7188001aaacb70f1a018403d3289fcb130280c0caf384a30201a2b32b2cfb06b7022668b2cd5b8263a162511c03154b259ce91c6c97270e4c19efe4710280c0caf384a302018db32bda81bfbe5f9cdf94b20047d12a7fb5f097a83099fafdfedc03397826fb4d18d50280c0fc82aa0201b2e60b825e8c0360b4b44f4fe0a30f4d2f18c80d5bbb7bfc5ddf671f27b6867461c51d028080e983b1de1601b2eb0156dd7ab6dcb0970d4a5dbcb4e04281c1db350198e31893cec9b9d77863fedaf60280e08d84ddcb0101e0960fe3cafedb154111449c5112fc1d9e065222ed0243de3207c3e6f974941a66f177028080df9ad7949f01019815c8c5032f2c28e7e6c9f9c70f6fccdece659d8df53e54ad99a0f7fa5d831cf762028090dfc04a01b4fb123d97504b9d832f7041c4d1db1cda3b7a6d307194aff104ec6b711cced2b005e2028080dd9da41701bef1179c9a459e75a0c4cf4aff1a81f31f477bd682e28a155231da1a1aa7a25ef219910280d88ee16f01facd0f043485225a1e708aa89d71f951bc092724b53942a67a35b2315bfeac4e8af0eb0280d0dbc3f40201c4e634d6e1f3a1b232ef130d4a5417379c4fcc9d078f71899f0617cec8b1e72a1844b60280f092cbdd0801f6b9300c8c94a337fefc1c19f12dee0f2551a09ee0aaa954d1762c93fec8dadae2146c0280c0f9decfae0101ce8d09f26c90144257b5462791487fd1b017eb283268b1c86c859c4194bf1a987c62bf0280c0caf384a30201cead2bbb01653d0d7ff8a42958040814c3cbf228ebb772e03956b367bace3b684b9b7f0280a0e5b9c2910101c1b40c1796904ac003f7a6dd72b4845625e99ba12bdd003e65b2dd2760a4e460821178028080e983b1de160186e9013f55160cd9166756ea8e2c9af065dcdfb16a684e9376c909d18b65fd5306f9690280a0e5b9c2910101aeb70c4433f95ff4cdc4aa54a1ede9ae725cec06350db5d3056815486e761e381ae4d00280c0a8ca9a3a01ebe2139bd558b63ebb9f4d12aca270159ccf565e9cffaadd717ce200db779f202b106f0280d0dbc3f40201b9963568acf599958be4e72f71c3446332a39c815876c185198fa2dcf13877eba3627b0280c0f4c198af0b01bccc01408cbb5a210ad152bd6138639673a6161efd2f85be310b477ae14891870985f90280a0b6cef7850201cadd0a82e7950f5d9e62d14d0f7c6af84002ea9822cdeefabbb866b7a5776c6436636b028080d287e2bc2d01d888013a7146b96a7abc5ce5249b7981fb54250eef751964ff00530915084479b5d6ba028080d287e2bc2d018c8901d8cc1933366dceb49416b2f48fd2ce297cfd8da8baadc7f63856c46130368fca0280a0b787e90501b6d242f93a6570ad920332a354b14ad93b37c0f3815bb5fa2dcc7ca5e334256bd165320280a0e5b9c2910101a3ac0c4ed5ebf0c11285c351ddfd0bb52bd225ee0f8a319025dc416a5e2ba8e84186680280c0f9decfae0101f18709daddc52dccb6e1527ac83da15e19c2272206ee0b2ac37ac478b4dd3e6bcac5dc0280f8cce2840201b7c80bc872a1e1323d61342a2a7ac480b4b061163811497e08698057a8774493a1abe50280e0bcefa75701f38b0e532d34791916b1f56c3f008b2231de5cc62bd1ec898c62d19fb1ec716d467ae20280c0fc82aa0201d6da0b7de4dc001430473852620e13e5931960c55ab6ebeff574fbddea995cbc9d7c010280c0f4c198af0b01edca017ec6af4ece2622edaf9914cfb1cc6663639285256912d7d9d70e905120a6961c028090cad2c60e01cad43abcc63a192d53fe8269ecaf2d9ca3171c2172d85956fe44fcc1ac01efe4c610dd0280809aa6eaafe30101a92fccd2bcadfa42dcbd28d483d32abde14b377b72c4e6ef31a1f1e0ff6c2c9f452f0280a0b787e9050197d9420ce413f5321a785cd5bea4952e6c32acd0b733240a2ea2835747bb916a032da7028080d287e2bc2d01c48a01a3c4afcbbdac70524b27585c68ed1f8ea3858c1c392aeb7ada3432f3eed7cab10280f092cbdd0801abb130d362484a00ea63b2a250a8ae7cf8b904522638838a460653a3c37db1b76ff3de0280e08d84ddcb0101cc980fce5c2d8b34b3039e612adeb707f9ab397c75f76c1f0da8af92c64cd021976ff3028080dd9da4170198c217e4a7b63fd645bd27aa5afc6fae7db1e66896cece0d4b84ac76428268b5c213c30280a0b6cef7850201e3e20acab086a758c56372cf461e5339c09745ee510a785b540d68c7f62c35b8b6c5c10280a094a58d1d01ab842ac0ba57e87a3f204a56cbecca405419e4c896c817c5ced715d903a104a09735660280e0a596bb1101ade921f1ef2fe56c74320ceb1f6c52506d0b835808474b928ad6309398b42434d27f3d0280d0b8e1981a01dab515d684a324cff4b00753a9ef0868f308b8121cbc79077ede84d70cf6015ec989be0280c0ee8ed20b01f99c227da8d616ce3669517282902cdf1ef75e75a427385270d1a94197b93cf6620c15028080dd9da41701d2d0175c8494f151e585f01e80c303c84eea460b69874b773ba01d20f28a05916111a0028090dfc04a01a4f312c12a9f52a99f69e43979354446fd4e2ba5e2d5fb8aaa17cd25cdf591543149da0280d0dbc3f40201969c3510fbca0efa6d5b0c45dca32a5a91b10608594a58e5154d6a453493d4a0f10cf70280f8cce2840201ddcb0b76ca6a2df4544ea2d9634223becf72b6d6a176eae609d8a496ee7c0a45bec8240280e0bcefa7570180920e95d8d04f9f7a4b678497ded16da4caca0934fc019f582d8e1db1239920914d35028090cad2c60e01c2d43a30bbb2dcbb2b6c361dc49649a6cf733b29df5f6e7504b03a55ee707ed3db2c4e028080d287e2bc2d01c38801941b46cb00712de68cebc99945dc7b472b352c9a2e582a9217ea6d0b8c3f07590280e0a596bb1101b6eb219463e6e8aa6395eefc4e7d2d7d6484b5f684e7018fe56d3d6ddca82f4b89c5840280d0dbc3f40201db9535db1fb02f4a45c21eae26f5c40c01ab1bca304deac2fb08d2b3d9ac4f65fd10c60280a094a58d1d01948c2a413da2503eb92880f02f764c2133ed6f2951ae86e8c8c17d1e9e024ca4dc72320280c0ee8ed20b01869f22d3e106082527d6f0b052106a4850801fcd59d0b6ce61b237c2321111ed8bdf47028080d194b57401acd20b9c0e61b23698c4b4d47965a597284d409f71d7f16f4997bc04ba042d3cbe044d028090cad2c60e0194b83ac3b448f0bd45f069df6a80e49778c289edeb93b9f213039e53a828e685c270f90280a094a58d1d01bdfb2984b37167dce720d3972eaa50ba42ae1c73ce8e8bc15b5b420e55c9ae96e5ca8c028090dfc04a01abf3120595fbef2082079af5448c6d0d6491aa758576881c1839f4934fa5f6276b33810280e0a596bb1101f9ea2170a571f721540ec01ae22501138fa808045bb8d86b22b1be686b258b2cc999c5028088aca3cf02019cb60d1ffda55346c6612364a9f426a8b9942d9269bef1360f20b8f3ccf57e9996b5f70280e8eda1ba0101aff90c87588ff1bb510a30907357afbf6c3292892c2d9ff41e363889af32e70891cb9b028080d49ca7981201d65ee875df2a98544318a5f4e9aa70a799374b40cff820c132a388736b86ff6c7b7d0280c0caf384a30201dab52bbf532aa44298858b0a313d0f29953ea90efd3ac3421c674dbda79530e4a6b0060280f092cbdd0801c3ab30b0fc9f93dddc6c3e4d976e9c5e4cfee5bfd58415c96a3e7ec05a3172c29f223f0280a094a58d1d01a2812a3e0ec75af0330302c35c582d9a14c8e5f00a0bf84da22eec672c4926ca6fccb10280a094a58d1d01ca842a2b03a22e56f164bae94e43d1c353217c1a1048375731c0c47bb63216e1ef6c480280e08d84ddcb0101d68b0fb2d29505b3f25a8e36f17a2fde13bce41752ecec8c2042a7e1a7d65a0fd35cdf028090cad2c60e0199ce3afa0b62692f1b87324fd4904bf9ffd45ed169d1f5096634a3ba8602919681e5660280c0f9decfae010193ed081977c266b88f1c3cb7027c0216adb506f0e929ce650cd178b81645458c3af4c6028090cad2c60e01eec13a9cce0e6750904e5649907b0bdc6c6963f62fef41ef3932765418d933fc1cd97a0280c0ee8ed20b019ea8228d467474d1073d5c906acdec6ee799b71e16141930c9d66b7f181dbd7a6e924a028080bb8b939b4401c23d3cb4e840ad6766bb0fd6d2b81462f1e4828d2eae341ce3bd8d7ce38b036ac6fb028080e983b1de1601b9e601e3cf485fa441836d83d1f1be6d8611599eccc29f3af832b922e45ab1cd7f31d00280f092cbdd0801fc9e30fff408d7b0c5d88f662dddb5d06ff382baa06191102278e54a0030f7e3246e7c0280d88ee16f01bfcd0f96a24f27ac225278701c6b54df41c6aa511dd86ce682516fb1824ff104c572fb0280f092cbdd0801cbb430bd5f343e45c62efcd6e0f62e77ceb3c95ef945b0cff7002872ea350b5dfffef10280c0caf384a30201bfb22b14dccbba4582da488aef91b530395979f73fa83511e3b3bcb311221c6832b18d0280a0b6cef7850201c4dc0a31fb268316ab21229072833191b7a77b9832afea700a1b93f2e86fb31be9480f028090cad2c60e01cab63a313af96a15c06fcf1f1cf10b69eae50e2c1d005357df457768d384b7a35fa0bb0280d0dbc3f40201fe9835fffd89020879ec3ca02db3eadbb81b0c34a6d77f9d1031438d55fd9c33827db00280d0dbc3f40201d19b354a25ddf2641fc7e000f9306df1c6bf731bddfe9ab148713781bbfab4814ed87e0280e08d84ddcb0101ba960fec80e1dcda9fae2d63e14500354f191f287811f5503e269c9ec1ae56cef4cd110280a0b787e90501acde42b0bdfd00ab0518c8bd6938f0a6efab1b1762704e86c71a154f6d6378dd63ce840280e0bcefa75701b5900eedc2ce12618978351788e46fefe8b9b776510ec32add7423f649c613b9db853a028080e983b1de1601edeb01d68b226cd6b71903a812aa6a6f0799381cf6f70870810df560042cd732b26526028080f696a6b6880101ca18a91fd53d6370a95ca2e0700aabc3784e355afcafb25656c70d780de90e30be31028090cad2c60e0184c03adc307ee3753a20f8f687344aae487639ab12276b604b1f74789d47f1371cac6b0280c0fc82aa0201a2dc0b000aa40a7e3e44d0181aaa5cc64df6307cf119798797fbf82421e3b78a0aa2760280e8eda1ba0101daf20caa8a7c80b400f4dd189e4a00ef1074e26fcc186fed46f0d97814c464aa7561e20280c0f9decfae0101a18b092ee7d48a9fb88cefb22874e5a1ed7a1bf99cc06e93e55c7f75ca4bf38ad185a60280a094a58d1d01dff92904ef53b1415cdb435a1589c072a7e6bd8e69a31bf31153c3beb07ebf585aa838028080bfb59dd20d01916c9d21b580aed847f256b4f507f562858396a9d392adc92b7ed3585d78acf9b38b028080a2a9eae80101fab80b153098181e5fabf1056b4e88db7ce5ed875132e3b7d78ed3b6fc528edda921050280d88ee16f019fcf0fd5f4d68c9afe2e543c125963043024fe557e817c279dbd0602b158fe96ec4b6f0280e0bcefa75701d1910e44b59722c588c30a65b920fc72e0e58c5acc1535b4cad4fc889a89fccfa271510280d0dbc3f40201b78b358b066d485145ada1c39153caacf843fcd9c2f4681d226d210a9a9942109314d90280e0bcefa75701a88b0e5f100858379f9edbbfe92d8f3de825990af354e38edc3b0d246d8a62f01ab3220280d0dbc3f40201959135c6a904269f0bf29fbf8cef1f705dde8c7364ba4618ad9ee378b69a3d590af5680280e0a596bb1101edeb2140e07858aa36619a74b0944c52663b7d3818ab6bf9e66ee792cda1b6dd39842e0280c0a8ca9a3a01aae213a90a6708e741f688a32fb5f1b800800e64cfd341c0f82f8e1ca822336d70c78e0280c0fc82aa02018ddf0b5e03adc078c32952c9077fee655a65a933558c610f23245cd7416669da12611e0280f092cbdd0801aca0305b7157269b35d5068d64f8d43386e8463f2893695bc94f07b4a14f9f5c85e8c50280e0bcefa75701b18f0efd26a0ad840829429252c7e6db2ff0eb7980a8f4c4e400b3a68475f6831cc5f50280b09dc2df0101a6830c2b7555fd29e82d1f0cf6a00f2c671c94c3c683254853c045519d1c5d5dc314fb028080bb8b939b4401be3d76fcfea2c6216513382a75aedaba8932f339ed56f4aad33fb04565429c7f7fa50280c0ee8ed20b01b4a322218a5fd3a13ed0847e8165a28861fb3edc0e2c1603e95e042e2cbb0040e49ab50280c0caf384a30201ecb42b7c10020495d95b3c1ea45ce8709ff4a181771fc053911e5ec51d237d585f19610280f092cbdd0801eba3309533ea103add0540f9624cb24c5cecadf4b93ceb39aa2e541668a0dd23bf3f2f028090dfc04a01a6f3121520ad99387cf4dd779410542b3f5ed9991f0fadbb40f292be0057f4a1dfbf10028090cad2c60e019ac83a125492706ba043a4e3b927ab451c8dccb4b798f83312320dcf4d306bc45c3016028080a2a9eae80101b4ba0bd413da8f7f0aad9cd41d728b4fef20e31fbc61fc397a585c6755134406680b14028080d49ca798120192600ef342c8cf4c0e9ebf52986429add3de7f7757c3d5f7c951810b2fb5352aec620280a0b787e90501afe442797256544eb3515e6fa45b1785d65816dd179bd7f0372a561709f87fae7f95f10280a094a58d1d01dc882aacbd3e13a0b97c2a08b6b6deec5e9685b94409d30c774c85a373b252169d588f028090dfc04a0184f81225e7ded2e83d4f9f0ee64f60c9c8bce2dcb110fd2f3d66c17aafdff53fbf6bbe028080d287e2bc2d01d18901e2fd0eeb4fe9223b4610e05022fcc194240e8afe5472fceda8346cb5b66a0a5902808095e789c60401cf88036cf7317af6dc47cd0ce319a51aaaf936854287c07a24afad791a1431cbd2df5c0280c0f9decfae0101999909d038b9c30a2de009813e56ba2ba17964a24d5f195aaa5f7f2f5fefacd69893e80280a0e5b9c291010199b10cf336c49e2864d07ad3c7a0b9a19e0c17aaf0e72f9fcc980180272000fe5ba1260280a0b6cef7850201a2e20a7a870af412e8fff7eba50b2f8f3be318736996a347fa1222019be9971b6f9b81028090dfc04a01bae5127889a54246328815e9819a05eea4c93bdfffaa2a2cc9747c5d8e74a9a4a8bfe10280f8cce284020191da0b24ee29cd3f554bb618f336dd2841ba23168bf123ee88ebdb48bcbb033a67a02f0280f8cce2840201e6c30b2756e87b0b6ff35103c20c1ddb3b0502f712977fd7909a0b552f1c7dfc3e0c3c028080e983b1de16018fed01a3c245ee280ff115f7e92b16dc2c25831a2da6af5321ad76a1fbbcdd6afc780c0280e0bcefa7570183920ef957193122bb2624d28c0a3cbd4370a1cfff4e1c2e0c8bb22d4c4b47e7f0a5a60280f092cbdd0801ccab30f5440aceabe0c8c408dddf755f789fae2afbf21a64bc183f2d4218a8a792f2870280e08d84ddcb0101f8870f8e26eacca06623c8291d2b29d26ca7f476f09e89c21302d0b85e144267b2712a028080aace938c0901b0b1014c9b9fab49660c2067f4b60151427cf415aa0887447da450652f83a8027524170580b09dc2df01028c792dea94dab48160e067fb681edd6247ba375281fbcfedc03cb970f3b98e2d80b081daaf14021ab33e69737e157d23e33274c42793be06a8711670e73fa10ecebc604f87cc7180a0b6cef78502020752a6308df9466f0838c978216926cb69e113761e84446d5c8453863f06a05c808095e789c60402edc8db59ee3f13d361537cb65c6c89b218e5580a8fbaf9734e9dc71c26a996d780809ce5fd9ed40a024d3ae5019faae01f3e7ae5e978ae0f6a4344976c414b617146f7e76d9c6020c52101038c6d9ccd2f949909115d5321a26e98018b4679138a0a2c06378cf27c8fdbacfd82214a59c99d9251fa00126d353f9cf502a80d8993a6c223e3c802a40ab405555637f495903d3ba558312881e586d452e6e95826d8e128345f6c0a8f9f350e8c04ef50cf34afa3a9ec19c457143496f8cf7045ed869b581f9efa2f1d65e30f1cec5272b00e9c61a34bdd3c78cf82ae8ef4df3132f70861391069b9c255cd0875496ee376e033ee44f8a2d5605a19c88c07f354483a4144f1116143bb212f02fafb5ef7190ce928d2ab32601de56eb944b76935138ca496a345b89b54526a0265614d7932ac0b99289b75b2e18eb4f2918de8cb419bf19d916569b8f90e450bb5bc0da806b7081ecf36da21737ec52379a143632ef483633059741002ee520807713c344b38f710b904c806cf93d3f604111e6565de5f4a64e9d7ea5c24140965684e03cefcb9064ecefb46b82bb5589a6b9baac1800bed502bbc6636ad92026f57fdf2839f36726b0d69615a03b35bb182ec1ef1dcd790a259127a65208e08ea0dd55c8f8cd993c32458562638cf1fb09f77aa7f40e3fecc432f16b2396d0cb7239f7e9f5600bdface5d5f5c0285a9dca1096bd033c4ccf9ceebe063c01e0ec6e2d551189a3d70ae6214a22cd79322de7710ac834c98955d93a5aeed21f900792a98210a1a4a44a17901de0d93e20863a04903e2e77eb119b31c9971653f070ddec02bd08a577bf132323ccf763d6bfc615f1a35802877c6703b70ab7216089a3d5f9b9eacb55ba430484155cb195f736d6c094528b29d3e01032fe61c2c07da6618cf5edad594056db4f6db44adb47721616c4c70e770661634d436e6e90cbcdfdb44603948338401a6ba60c64ca6b51bbf493ecd99ccddd92e6cad20160b0b983744f90cdc4260f60b0776af7c9e664eeb5394ee1182fb6881026271db0a9aad0764782ba106074a0576239681ecae941a9ef56b7b6dda7dbf08ecafac08ab8302d52ee495e4403f2c8b9b18d53ac3863e22d4181688f2bda37943afbf04a436302498f2298b50761eb6e1f43f6354bdc79671b9e97fa239f77924683904e0cf6b1351d4535393a9352d27b007dfda7a8ae8b767e2b5241313d7b5daf20523a80dd6cc9c049da66a5d23f76c132a85d772c45b4c10f2032f58b90c862f09f625cbd18c91a37bb3fc3a413a2e081618da845910cf5b2e6bffea555e883b0bb9c5f9063380a1c33ebdb764d9ffefe9e3169de40b18eeb9bfca48296457bb0b4e29d7b2b5bc4e0021ba0a1d389f77a8e253d6db149d400f675a9330f3bcfd09c7169224a947b6b4e0745ae08cd7adea4151277a94f51f85292ba082cf28300cca233ff4966b093c9cb6abcef476026040fec2b435021aff717b8bb90a40950e010f70bb416a618dc3c5c03f590c5b7ec8e0c05b85ba94078de4817918f783022364b8aa228b5df43b38fba3060c30616f265022584ab6034ddbc832450f90047d0cf41a4af8a20fb1aa66406133a17c2e905ee28d8acd186c872859c196db0474dfaaaded2d63768143cf6b5e2e34662f7bae573a08cb15069ef881892e5a0c08b5c6c7b2e6376cd2080fb29e8d3d5aa5b853662b4f1784ba7f072130e4dc00cba3cc9278fc4213f2ce2fc82bd1ea9fa91bb17b4f7c36962c78d864eab9f30ef327039da6607962a156a05c384a4a58ddd8f51a0d4fe91f64ae7b0a5199110a66f1e676392ec8d31b20a65f7a7fcff90b37a8a3962bff0c83ee6033a70c5b0af663ca48a8f22ced255839444fc51f5b6a6c1237eda5804289aa25fc93f14d0d4a63cecfa30d213eb3b2497af4a22396cc8c0e7c8b8bb57be8878bfc7fb29c038d39cf9fe0c964ebac13354a580527b1dbaced58500a292eb5f7cdafc772860f8d5c324a7079de9e0c1382228effaf2ac0278ebedad1117c5edacf08105a3f0905bca6e59cdf9fd074e1fbb53628a3d9bf3b7be28b33747438a12ae4fed62d035aa49965912839e41d35206a87fff7f79c686584cc23f38277db146dc4bebd0e612edf8b031021e88d1134188cde11bb6ea30883e6a0b0cc38ababe1eb55bf06f26955f25c25c93f40c77f27423131a7769719b09225723dd5283192f74c8a050829fc6fdec46e708111c2bcb1f562a00e831c804fad7a1f74a9be75a7e1720a552f8bd135b6d2b8e8e2a7712b562c33ec9e1030224c0cfc7a6f3b5dc2e6bd02a98d25c73b3a168daa768b70b8aef5bd12f362a89725571c4a82a06d55b22e071a30e15b006a8ea03012d2bb9a7c6a90b7fbd012efbb1c4fa4f35b2a2a2e4f0d54c4e125084208e096cdee54b2763c0f6fbd1f4f341d8829a2d551bfb889e30ca3af81b2fbecc10d0f106845b73475ec5033baab1bad777c23fa55704ba14e01d228597339f3ba6b6caaaa53a8c701b513c4272ff617494277baf9cdea37870ce0d3c03203f93a4ce87b63c577a9d41a7ccbf1c9d0bcdecd8b72a71e9b911b014e172ff24bc63ba064f6fde212df25c40e88257c92f8bc35c4139f058748b00fa511755d9ae5a7b2b2bdf7cdca13b3171ca85a0a1f75c3cae1983c7da7c748076a1c0d2669e7b2e6b71913677af2bc1a21f1c7c436509514320e248015798a050b2cbb1b076cd5eb72cc336d2aad290f959dc6636a050b0811933b01ea25ec006688da1b7e8b4bb963fbe8bc06b5f716a96b15e22be7f8b99b8feba54ac74f080b799ea3a7599daa723067bf837ca32d8921b7584b17d708971fb21cbb8a2808c7da811cff4967363fe7748f0f8378b7c14dd7a5bd10055c78ccb8b8e8b88206317f35dcad0cb2951e5eb7697d484c63764483f7bbc0ad3ca41630fc76a44006e310249d8a73d7f9ca7ce648b5602b331afb584a3e1db1ec9f2a1fc1d030650557c7dbc62008235911677709dea7b60c8d400c9da16b4b0a988b25e5cf26c00c3ef02812def049bb149ea635280e5b339db1035b7275e154b587cc50464a4c0bfd15c79f54faa10fbe571b73cf1aa4a20746b11c80c8c95899521fe5f0bb3104b0a050c55a79511e202fee30c005339694b18f4e18ab5e36ea21952a01864a0e067d9f19362e009a21c6c1a798f7c1325edd95e98fd1f9cb544909fdf9d076070d1233e183fb6d46a46fbc6e10452ef4c45fa0b88a84962ad6e91cbcc52bc000b12a82e93ae5998b20ee9000a8ef68ec8a44862cc108869fd388142692be6b0657e3fe79eff0e8b72f63aeec5874acf5fb0bfc9fa22645ed6ecaaf186eca690ecdf8a71b8f4789ac41b1f4f7539e04c53dd05e67488ea5849bf069d4eefc040273f6018819fdcbaa170c2ce078062b7bbe951d2214b077c4c836db85e1b138059c382ab408a65a3b94132136945cc4a3974c0f96d88eaa1b07cce02dce04ea0126e6210a9543129bb8296839949f6c3867243d4b0e1ff32be58c188ba905d40e32c53f7871920967210de94f71709f73e826036b4e3fa3e42c23f2912f4ea50557dff78aeb34cb35444965614812cbe14068a62be075fce6bf3310b9e8b12e0dd8379104360f728d47a327c172257134e2c0e7c32e01321f4d636f9047bd750e7993eeda7d39fc16f29696b1becee4d8026e967f8149935b947fce8517b2ce02b7831a232f3a29010129c49494ed2b84c7f881b7e4b02a00ebabf5a36023c404002d6cb88cee76c8ce97b03143ca867359d7e118d54e053b02c94998e6fd8409f8d46fc1741a2e56aebb1e7dab7ca3296a2566263d9be2f4bbef4872a49ee1082cbaf86e21b0c232c4182fc660f0c0b6aaeb0393750e553bc406e2a27842bd033da45a562ed1998ef9bd83e35ed813bef00a3e6147cb363bee63c543ba5e770b043dbacc155214a2496f91879bbc9170a2a513d7b48fad40c8c2d96f951e3a0932f6d12956789198430b352803852aa9726163fbe839979b33f8dbf7f76cd50755c1ce0c40a072aeec35057d06abaf59e878000b1d796e51908bfbf23b13900dcb30f9bd52b52994e7245a7017653a404a70d1c444b8c613ff10a2b057c02d062c5faf13cdc4445809fb6e096923cdbbdca18f59318ff86c7e449f596050b404d3cde0338dfdf9b1389178b1d4c70eefa2bbd76fefc1ee1f1688ef507821e40ae31d8d8e673d183b54563e2cbd27e0e042f61b046877d37a68c1b5784830690f2dd4ebbbd2dbdb35800b9e0ba8ea985fa106dd2ce8493e845586716c538ee9008b88a7c482f3c00c14c08468230d40cdc040e145282c4d61985cb5800306e305146204f63e96ad194bcdf1338ab8480341b6fbccf18fc32145f84bece4069c09e41096e94c24fa4f0db988e860a3bff3604143f2b17e8c219f28189e4cd49a0e506fe62dc419299bcd78c6ccb107f63eb31b4bd8ea1e2fed10e3ac17341d3505019e2376b01f7a7fcea3db110fb090c681c866ac86f13e6f8d44a32861e0580def063736b5c771b2b3b9067045867b4393f3eb2a4610bd0216e29906aaac370986451c6bf78264dda7e7a5fcbcf7bd6e024ff6003c6db780d89b97765cee8d0ff3ff25d94d4b4b919f722b26a6903a017daa62af387843087680c57952de06064de05b662af87be49b6e34cf0991cec7be3396e2eec9678ba259bd8de1c192014d02928f9113488215658df4078ed661fa4e79e58decaeb0ee5a00488b094b0b77f083b2b7844f481e7788ffe8004b96ccdf853532bfd9632a8a652c2d97d10173c90864fbb6facf47fae415df4acc0b099140a657b35d083d74dbdfbf107303e74c64471bed4b2199f2babcb4e1fc593d6f309e21f85e68ffd9904731559d0f2b673b36d3984e5d66d897dfa17d601edef3ed78cb70dc5115d4ae240c203e031263f0cf1e98075bac0361fde24cbcb852b8055d53ae01d61a0a1e1ba423d00833747e7364df7ebfd1f84598d801c249e1805279dc37d39fc7f7e27b067e4e0287aec432ed49e4d701a0ff377e88179968430d110cb20476ed4c6bf1624d1907ef24406d3295fcacde2a102cc85f4f3d0cb87a8fae7535a06e442833e58cfc04242ff85fb654d05f9874c0a6756f542db4e9d8b0366191fbb8b09a1bbcb6af04c069978417ca80d92f442b7dbd092f74e1268aa73b54e4b64e84543449ecd30b5ea392a1669a5f441d7208925e91c75df611cd26042630c6b98f160b8c0156048108d5465b71bbc54d31a9f90e34428d97590a427e1ae618d4a35fc1022d4e007c6108dcb1672b88d43ae4d886a5adcc26faf56bc5e5a0b08342fb88263fd80940d1edf794c6ad6d339b974e164b38439e11b4fa87cc793b080b4f8bf0eb56043f79ed3911da21092475fcf8320b55b9f558f194c6c8121b2e696039340d97057be2583726d762b5ae4327e5286a2d8c14ddbe0027c75aacbf7e9de13037390df7d72e13b46bc06bad0363b070e0174d034120d7fa7b4550e7dc28f7f0241f059ae266fc13dccd1d07f744208a7d6a2e565b6613d46e4550f79ef3209c46a805b97284df558719e131f44e419e690f4fc28ee4862b9d1f8f7e1a164ac18141076087693e70ac76a10f7851530d4cbc65def90d5544671ad64249569c3abf0200d09be3c63efaa7cb723b39ccffc9b3a3ba0c8426847123d2a881efbd4937a40cb3e8011c70ba427f80b3dc91a608086f8b61f8bd86fcb482ea388299a7cfbd00a3ddfadb4b6d0e51c1369276c25889a9f3592900b6502d9af1c732e1fb7db307d71e45deb1553ba1568d0480ea9e132b52564da6ac5c56eff823e7f37976cd075ce8f7a78aaef1b87f7437a0b84035677f266f7d0596e493101fec3e14fcf80b22322454587b51fda0636231c07d4e63e007f1b89137d8a37b03bf00f3a7c10169f757d9a74b7bffba797c746e3845decc3d0559d7cf6f08f3bd67dac5f33109b212582dc7df5d561ad63dddc794f2aea4e493db1a73c702d258c9b922c35d04c47f88f87c54c821a29f04abd91a079ce8cef252a21dc72d409fd1618c9be709af029ba98b0140e74666fcb01bced4f88ab68e6b63b8ed6febc0905d22cb2200493c071ce136833697406f8a04e77b21747dda997046cf3c7080096fe481790d77cf5904e7f7128ed95a6e576d109fdf10eb0c888db35a4a685b62253987b70fb1538e6c0932889460fa31c60d123266b7bcb828f846a35b2851127679b05f05a75266529c343a6075e54c455e02b2c83e6f7bf1ae23326506a5f532472d780815c5af425f7d8b543a8f014966e0538f48ca84d181695381a09701eb65c9ae084bf2a4dc84f1b2071be32be25d5f4fcdc59668fd800496ef7eb6dddf867ab908e543cb51f0451706cce4be6b9f68a537a79ea88e17fcd78965b3da68c0d9d30623a2a9e275e1c320f59e118e09c02eee527167bc06f7693e7b584e3b25ecc1093d46b80a1cacced87c2b32e2f90c5bbb9cd1b701aae69a04b16d535fac6eab0d091790fc5fdfa8a8842bfcb62dbf963cbf62c4afb4c468be98c770e6078b8c0a8cfcbae43dcfff17d3c6d587c3e4309fd39c66acd14781fea66fc57278b02302c0fa386280e67acff19955b6a428b0e22ceb1e54e913a37cd19eb6e9d2268a039f2b5fdda7d5804db79385f0e50082b128c952f8dfdedc4411d0675d95127f0bfc01710a869b10d7a8b9e632dad944062567439e6d192fb09329d058e87ecd0aa8981328f541e87ed02cfe4031f2d3a046ff517a2a80486b04ade31a647aec0884fb96ed753ffc47892431c6e6f08fd1c633a1a44e882d3d8b92c567e0fb8305327a354851464ca0f18d89c6ee2a91a4afef0c55883acf8fcb68c2c3b7402e005d8affc19c13f1f26fee0698dff181ab22cb84a2b31e0a6a81dc5d02e60a3c07090397ae58a985526b2ad6ee5725e82328062b68566b4871705ce3b9856e550d068c20fd9aaeb27740c07aad53d79fc20e46e40e7103e2d69626ee64b6aa600f6f1a86f37948ff4990d88f43c34994e2fe586cb779997be323da53329c10480aeb08fe440e9e4b979171371c73b94da9f928a3f6c8f6792f782f3d6432b86d06f54557327fef31fd6ae0a3f6d2f16c9ad947d132e14def33fa24cb4565370e0832fa50f5f5f93c9f3d65776cc22608b68a4f3719e9be47a19432991e4a2c49089c0ea20e7f7c73feaa47970da424d8543d80d622e2f2be9f4c65cc39dc369009a9d41a52bdea7cc0e8e04da87a633fd4f814fda1b646121a469ba0b5b8006d0e9118761d97b5d1856e2d690f27a81c42b176df853d07cf4a66ee83c9eb24ac0a382f5143a10a33ec3ddf17dcd8a8303fac8f279d31b4d04d74bd8804cefbb400c86174ad444e43ed33ee1e1e73f660b9814d5ca3cb1d650f1978a825a617bb05f84eab3b9b8359b991e1084cf4e8179ecb67f92398638e31227ff63427b67f0f232b454a341d85d4b56e31135c9035e231f7d9318ca12b5ab524f87bb0ca9b04b80effed202897ab016d5acc054c4fe62a5f0192f136cf2cd714998a4b164b0c2cdbace52243fdc9ea879b0d247d4fe8bd80481fad6b325cb5f2cfa2534dec0e47d41b6b99352e6e5faccb5ee28ca2fe96e04f9c83a0461ba34cfb499d864f05dc734b6c8f51cc0c994290b2a868cb8312df39fb6d457a81e62d872c65d4f3007094be42663bca3d64ebbcc8401158fce4f5a38a49c20f029c338f126451820459866e77c6984a467aad571cc7452392a9cb9f8e65099fff2f2acd170a833e01ed3d22a683356ee42dcbe6bab6d05d3edda2d40e9ba53884d430c2e0cd87c0067dc8cb68c868bd9f29db1dd73703c139ffc15e4f7264e727c70560ae02da100871f30e8a7c1275805f621752d73aafecddc2a7808b6c2ecbb8d0134a644bb603f30f8d18b3fc5efaa7f206ce180bfb14f5dbd3b0115145a227113eeaf1c1ec04244227b931388e72960833a40baa4319c5cf74aa94f7e3233e1d09f0a4f74409999684ad1cc836ac74563c85b164664dfab08ea084b25e2cbd7e7b94a781a10fcd455ee38b65126dcc52016127fd195c80b05660ed931e128b0cb92868955c0d032ada9fb951210a5442d21c1718ebc4702ad4a57967e15a63ffb05e1e072a0c41ebdf1e7205373eeaf4587f695de887fa3a8c96b8deb99e040fa1fc4dc2a402a017891943d734ae2f3798b22b1269d3d9f6d65581b9c637a6896a4fb554810bbd3db5c5737391a74150b43413b2e3824490b7911cbeb845147f1a8521620b0dd31306f13a9754a01bcdbd18bfdeade06b0ec97f48df56c45d3670a1fe18d00ef13e613c8a77aeb40401a814b377137cf44f29cb2cb94186ad1161ecb05a7c07837a5ab3474e57990cff2ab16b4d99f62e646da28e8bb712a5b561cf0e25be039c3e08583c8ebc3dd2fdb8fdc6e135ecc7851c73218a70b75e697cc84ea50504b9c34a33ed52f87230b9d192a940f3b7bb6d45b58dbf52f0afeb8dac85c77b06bdf9b70a10cb81c50055c9d8cf7e3a5c4b7dfae55beabcb3e8a8a1cb822d8d0bf6c01e32056929f853021eae6c97fdb0c5031df6b2e7c57f1318866769a9cc09c38ed62d8bf4663334c0df67c47236ed73f6ce7f54e0ada9270398c1aa558d0f993b0d25d97aea77b1635ee4832362cd590bae5fc1549402ddcd42b15efc930111a01535c0242116078d6d2d53b8612d378c4370e90d0d01b01bd7da591bec07981652a98485d8ed5c8f3def2bdac7d992ee5fc6a1ec7bd36940e1bc58c7050451248fc3ee6069e6b1b0d3ef122c6ef2a9b99aa0f145fb43341c58dbb472130b51730c956273a3ef6df9e000f6a87c2bacdefcdb5daef28b6170f61bc3a9c101f439755c86e6b85ee06a7a60688b3843eb359cd4acd9221a2ee131e2fd2e190652e5c47c0b98c41010eb99a991ec48a5de99cc8f403d6d76f8307d6657c1e007ebd64eec7bbd0d4f1ba2db7bb0efe27c7828f053e00def775943ab01a7e33d0fffcfe6f9a7285237f2c381b638758e373f8ceac672190664bb25fb5d355c240bd1773d61bda7f7ef1f4261b80ff5058ec6f7e024ab9459b1103815624b81f80c39db2f6fecb72de452b11636b0f71b16cb55f883d93bebb94328f13ef1ab6d0df449e32d27884f5139af584035547dace65ee25ba05cc461e74760d4468af90dcaa982e52cb902e2b84b3324019da575601ca54e91655913892e703257deaa01d14fd8459ff780c724161ba4d4280b70a5039dcfb5d775560714009724cb0d0b7e178c71e777b896bcfcde7d4c9c3dc6ab819d74a1a1fda8486448b1ad79be02fb134ea93a8600f1bc2a42e68d0213ab461a07cef3ad3965bc130beb76bab409102f82bf6c4cd626f6df3388e17b87584310c50832cde3191f6557f0014bdc0a68d924119e43111043bc6f26d16a5f2612dae6ab24984e2d87a71d93d5f4670dba2176d4f16633407bf7c10b51b6842dfdbc6fe3eaa4b6a12f0550700ece070ca382dec3b587e0e1fc317a48a83754d15aaf9a6971b8cb641fd8b32846d89002e6301700a0e7056e8002d8f269d29ebaf64f4493b1f1e676fc78e673067fb00625df15dc0490235b386ee14e55b335f3bc6dcedd7d3a80fd3a6e9bc2ccf3af0d89be71b5ca92bd7a9b97b9ff8976f75702419aa5bf9be34600496ca1bfa8ad0400602a23579365574252434f2bcd7efb360b0e8a495e8f7e78923b6fbf2207049e9179f0d4d7d6b4a4a10ca10f0ef4dd6cb5a74f7574e832044d6120fbc1580a68eddfbc65ab300bed960a6f24a102dc36b72937a8be4385daf5946e81ccde0619251babbff17e5685217a134d22f6130d0322483b3475227ffd27adc73ca202a6debfa37e5731747f4449ac70a33684f460eede65918c6d89acf4b50fd28d040ffbd436a944d3be0210606bfc2301e7ac66d462dba29a0489eb55af714a760e5302592cccc726e535b945ceb6126eb84e31f0f140ff54df8be0fa3a22f418036ad996787a5616a97a42049ebce351dc11857cab3dc914ef26833b0e75653004a8cafea099fb0750135255c41ef43e2f29c75714e2f0be2545e7c109b70c43004a471daa85b47befc65907d033f133b2f3ac2ad568df630ee80506610b8dc9052d442668dc06b13ea76ab1ab7b34870341d660af5d3007c21bb72512e4f8a60d8916a037b93f9e15ac9e4a6a1246d73ebb40e5fdd5a0d6dc0cf175023b891301f69fe5a3ca6f12cb8312d16333de1cd3ebb99339ab18c0715bfcd35b8365b407ad759e2c591d8270ad335381573e27ec18af7ca157b4a2bbca921db083d9b0009dc332a79dde14354a8c18bce76a1bfc1a25a1e702ccaa0feb521ee9279b8a01ceab6e237bbe4128b23cb53b1e5185f3266e20670a307ea0cfb5377025e0bb0790d48f1636c8b836c1a1f69ad61265f19057197e86cd526da6ddb94fd1ece80b60852f27ef2ce56ccb5a32d8cab6d16be06f380dfde3602ea4c1ae927173b2001ff0d9e29bc66b2b2a20c3e3ac174fcba187aacab0876c1356d30d4021e6dd0048c3bdfbf254108bb09d3ca9f2be423a92408bca52fbcd68f972c46fc8d20e0350d12c2f2d6c7da85e96bcec3ce61119793d44a210f81ece859fef6360ae3b0e1af0634fc141a8b50b3b383fb264e8a4fb84ea06db6becbf5e140edf66ee190da8968da579eb349fedea45e4c252a79570501278bab5fa984d7b1179d7c2460faa7beafee153bbae0a591701632aa94839528d3ef50cf809c1f7209b9e5c99010eaff7f921c45b6546358ee7a90948e3c710cd3e1796860839a345516fdf4f07c415029627abe1273a1f510c36a662562d18169b23305b4efadfefbfbb41a400ab533e61c14cafa49bc5d2818058ee4f3e1aeb329e150820d1de1f1eaaad31051a6dfdd3a1d5cec7b16bc0ea2c649d409917faa42138b1f824b4d534a050be0a99ea6772daf0b2e58623cc7a250ef37599bd556508f08886e663ef0917ecd3077072c3268ea5b9b89cbb6b761ee9f9c4765d8b267d9eb19728a28ce67a42ed0cad142b5dc0fc5313853860ec3f0ee2bc3d47cbe12dbe9633db809967d5b8bf0e45574eac657059530c30aeeade1e4f858a4a6e79d6e441b4af0127a13340d908d48cfec849ee93d53b1564231f048d34885e791a9d40c61a7b00f12f6f72a5050bcaabcc98480170ea6e20bef6b5c6f504c808108454fe2f3c275bf8f89a5e0a3304a7c4787e6d4fbe569930f7cfd38ab7d1d2ebd599bbb411950cec3e53b90cefb82234990d353c71ce4c21ef674a1c4070f71c90e1ea7edf35f5a421118f01b49a92ea97720e2d4df6b5885c181002656629a90eaa1904fe1c379b8291480ca15d0dc2b65a20c22f1e01d612d21ecb5738e5ebfc578a4a65066ee6e913e3030d3fdfb0168fd75022492728ee82869deb9ff2827f4e10759ecddb20f67e9808e707257a74d3dc0a6068f264066f95c9f772a3dcec0b4f0a327e3745517ad60ccbc5392890d2479b724d068fcdb83607e02291c06e1a5a1dac7604889cce2500f418da2f7080a7e9a1bdf28b87028a2bbb0c14f059f10f46d46716eac2cdfc06676cbec91b8c2c0f7c9bea7e27fa5048662398b23a9b488a49e1d3330c04e60179a4492c8b836780899899d2af17e6119a94d54a890ce8c0b550e87fd54cba0821fa7c48f6e09a60dcbddf853f82b47195aa44a5ceae14a9257296acd711c8073ff3345befca5d3ebf64901b283df96395fe9785d7090176bfe5a9f13ceae701c6c93af0e13d949bc3c7e9b06674a73e7affc508302258a27fb34569c3742201c0721aef282a31c69a5d98a67ac5c3d920d50c089896f7f8c8c237a81f803f0444f417246d695e89a3a523b62a3cd2203d42607cac7c7782dec1f9edbb806c0a7a37d1a969082a126bc726151a50233456a07d374399e74aaa8cc66821511d092615950d302e815cfcc021e1250cdea20fd9e1e4b5e88280d6e4283b918e780d12cbba59ef2ed2ce86135a48fba6c0dc2bf2efee190d9a3f9aa22a622b1953058f2bb3a371637d13e045d54e7eb54c0d25851f49283d7d34e9785d2d5c3f70086c48a8325a2083bdf5b3531fcc697cc0c9f63892a866c84585d673a2a63fd60e77995bfb0c0a44a4b63c0ff67e813027d3e84cddd393a0f4e6bc95525c5ae20eed9d0cea4a12aa748eb5209cfd75990b055f1ad0472f9f7599f569a8743a720755aa11555df4bb2e725fa93bc5dea603a964e8dc9fb1742e81825022866fc50a6b2a19b6a234a38ee27a74f2f5832b294143ca7ff8d07fd7d4e01f479e9792058871d90ee3aaa3329e82cebe41dff5e6d00a36268a7965466b80c6510ac1350cee797e1d6737f6aaff155266d2a2d611b2124affed1ac73a6a06515627b2230ce0d7fed33ecbde511f4d472cbc556cc8d9c5640e67657035112976b626847a0a4ea5fdd14d4a3eed57f0dfbe153393d8bd28c8b4f9e62940e8379790393fa20c617050c780a7d870193b4611bd7a12d26947a3cf4605e225da8b1646a76984015a5e317016a4d8301eaeec0db3ae0daa719182e2f4479154dbcccfcce1f365099de6c91934c395ce82abba8062a51d7773b418330921766cd3d275c689098e06039698db6f09accb292e7eb79e7a022d4257bb2f9ed993c519860919bc229a06ad88954c9ebf7f5b9fe95cf56e8181cb9175dac06be0be70fd28df20cdb4600ef0869668c645c9ea01360fdae7c922cb3d2b3583ae1de5ae7d899a83ff2bb00d7365c782a0fccfcba7f87bb29416469bb051f9b0755123e0f2fa76dc7644b70e452f49a84bc372b384c843b8161b7f9b63699adcadd5cb2b33b36c7eb3e1b00f25218bc16447968b939016242fceaebd796c17a24d1b9870991a9c3ae90e380302b7bb320adacc08cfb9249d29cd9275c52476dac6a7e9870ee3776cbc3352036f9c8f681d44856c6c5f90b7cde0877472ddd48719c449f59dca1f49442f7505e4809c6d323b37530ecccf3e41e19822f53d64dc90efb113405ee88799c37f0a342293b5bfc019a9057138326de6107b5613554dffc737aed7237fb16cd77e09f581d12220ac930c6ca279efd1d07a92125fb2606ec3ec35351987a15fc72806cfb3cb66fce8dcfabee5c1e586bf0f802fa12ae5ad5a708e3a5d54e1926dbd0202bf1150f1bb612b9a4590b5b520b86a90860ec3d9c2184f9975ced15ae1300882d9918021b43a1184ba88ddd7091539fe5a7017b8708d0f5c916f9c42de5103f8116863864b508f5880ca60b7492385c16a02b6ceb64d257a4838873b85d2041517c5c7c4508e4d5a5faa72729d73af0361e11828eeca992b8f20d903a5ef065976a9f322e34bd4b3984bb09e18be40e77e833c8c1a2e80093227d3f40d4a067f5e3aee9fce9bd234bb6ff4d0c34fc060d23e86b1f5a6d8d052e53e913182052a2d9c5e97bb0e0a51bb2fafbe7346bacfcbadb00ce2ba129f29d41a11f7d105cf19bb60b5f5b0dfd6a894698ef7f56a02d69cc03eb62a56563d3a77e3ac2302", + "as_json": "", + "block_height": 993442, + "block_timestamp": 1457749396, + "confirmations": 2201720, + "double_spend_seen": false, + "in_pool": false, + "output_indices": [198769,418598,176616,50345,509], + "prunable_as_hex": "", + "prunable_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "pruned_as_hex": "", + "tx_hash": "d6e48158472848e6687173a91ae6eebfa3e1d778e65252ee99d7515d63090408" + }], + "txs_as_hex": ["0100940102ffc7afa02501b3056ebee1b651a8da723462b4891d471b990ddc226049a0866d3029b8e2f75b70120280a0b6cef785020190dd0a200bd02b70ee707441a8863c5279b4e4d9f376dc97a140b1e5bc7d72bc5080690280c0caf384a30201d0b12b751e8f6e2e31316110fa6631bf2eb02e88ac8d778ec70d42b24ef54843fd75d90280d0dbc3f40201c498358287895f16b62a000a3f2fd8fb2e70d8e376858fb9ba7d9937d3a076e36311bb0280f092cbdd0801e5a230c6250d5835877b735c71d41587082309bf593d06a78def1b4ec57355a37838b5028080bfb59dd20d01c36c6dd3a9826658642ba4d1d586366f2782c0768c7e9fb93f32e8fdfab18c0228ed0280d0b8e1981a01bfb0158a530682f78754ab5b1b81b15891b2c7a22d4d7a929a5b51c066ffd73ac360230280f092cbdd0801f9a330a1217984cc5d31bf0e76ed4f8e3d4115f470824bc214fa84929fcede137173a60280e0bcefa75701f3910e3a8b3c031e15573f7a69db9f8dda3b3f960253099d8f844169212f2de772f6ff0280d0b8e1981a01adc1157920f2c72d6140afd4b858da3f41d07fc1655f2ebe593d32f96d5335d11711ee0280d0dbc3f40201ca8635a1373fa829f58e8f46d72c8e52aa1ce53fa1d798284ed08b44849e2e9ad79b620280a094a58d1d01faf729e5ab208fa809dd2efc6f0b74d3e7eff2a66c689a3b5c31c33c8a14e2359ac484028080e983b1de1601eced0182c8d37d77ce439824ddb3c8ff7bd60642181e183c409545c9d6f9c36683908f028080d194b57401ead50b3eefebb5303e14a5087de37ad1799a4592cf0e897eafb46d9b57257b5732949e0280a094a58d1d01d3882a1e949b2d1b6fc1fd5e44df95bae9068b090677d76b6c307188da44dd4e343cef028090cad2c60e0196c73a74a60fc4ce3a7b14d1abdf7a0c70a9efb490a9de6ec6208a846f8282d878132b028080bb8b939b4401c03dbcbfd9fb02e181d99c0093e53aceecf42bf6ccc0ec611a5093fe6f2b2738a07f0280f092cbdd0801b98d30c27f297ae4cb89fb7bb29ed11adff17db9b71d39edf736172892784897488c5d0280d0dbc3f40201da9a353d39555c27a2d620bf69136e4c665aaa19557d6fc0255cbb67ec69bf2403b63e0280b09dc2df0101f8820caab7a5e736f5445b5624837de46e9ef906cb538f7c860f688a7f7d155e19e0ac0280808d93f5d77101b544e62708eb27ff140b58c521e4a90acab5eca36f1ce9516a6318306f7d48beddbc0280a0b6cef7850201abdd0a5453712326722427f66b865e67f8cdb7188001aaacb70f1a018403d3289fcb130280c0caf384a30201a2b32b2cfb06b7022668b2cd5b8263a162511c03154b259ce91c6c97270e4c19efe4710280c0caf384a302018db32bda81bfbe5f9cdf94b20047d12a7fb5f097a83099fafdfedc03397826fb4d18d50280c0fc82aa0201b2e60b825e8c0360b4b44f4fe0a30f4d2f18c80d5bbb7bfc5ddf671f27b6867461c51d028080e983b1de1601b2eb0156dd7ab6dcb0970d4a5dbcb4e04281c1db350198e31893cec9b9d77863fedaf60280e08d84ddcb0101e0960fe3cafedb154111449c5112fc1d9e065222ed0243de3207c3e6f974941a66f177028080df9ad7949f01019815c8c5032f2c28e7e6c9f9c70f6fccdece659d8df53e54ad99a0f7fa5d831cf762028090dfc04a01b4fb123d97504b9d832f7041c4d1db1cda3b7a6d307194aff104ec6b711cced2b005e2028080dd9da41701bef1179c9a459e75a0c4cf4aff1a81f31f477bd682e28a155231da1a1aa7a25ef219910280d88ee16f01facd0f043485225a1e708aa89d71f951bc092724b53942a67a35b2315bfeac4e8af0eb0280d0dbc3f40201c4e634d6e1f3a1b232ef130d4a5417379c4fcc9d078f71899f0617cec8b1e72a1844b60280f092cbdd0801f6b9300c8c94a337fefc1c19f12dee0f2551a09ee0aaa954d1762c93fec8dadae2146c0280c0f9decfae0101ce8d09f26c90144257b5462791487fd1b017eb283268b1c86c859c4194bf1a987c62bf0280c0caf384a30201cead2bbb01653d0d7ff8a42958040814c3cbf228ebb772e03956b367bace3b684b9b7f0280a0e5b9c2910101c1b40c1796904ac003f7a6dd72b4845625e99ba12bdd003e65b2dd2760a4e460821178028080e983b1de160186e9013f55160cd9166756ea8e2c9af065dcdfb16a684e9376c909d18b65fd5306f9690280a0e5b9c2910101aeb70c4433f95ff4cdc4aa54a1ede9ae725cec06350db5d3056815486e761e381ae4d00280c0a8ca9a3a01ebe2139bd558b63ebb9f4d12aca270159ccf565e9cffaadd717ce200db779f202b106f0280d0dbc3f40201b9963568acf599958be4e72f71c3446332a39c815876c185198fa2dcf13877eba3627b0280c0f4c198af0b01bccc01408cbb5a210ad152bd6138639673a6161efd2f85be310b477ae14891870985f90280a0b6cef7850201cadd0a82e7950f5d9e62d14d0f7c6af84002ea9822cdeefabbb866b7a5776c6436636b028080d287e2bc2d01d888013a7146b96a7abc5ce5249b7981fb54250eef751964ff00530915084479b5d6ba028080d287e2bc2d018c8901d8cc1933366dceb49416b2f48fd2ce297cfd8da8baadc7f63856c46130368fca0280a0b787e90501b6d242f93a6570ad920332a354b14ad93b37c0f3815bb5fa2dcc7ca5e334256bd165320280a0e5b9c2910101a3ac0c4ed5ebf0c11285c351ddfd0bb52bd225ee0f8a319025dc416a5e2ba8e84186680280c0f9decfae0101f18709daddc52dccb6e1527ac83da15e19c2272206ee0b2ac37ac478b4dd3e6bcac5dc0280f8cce2840201b7c80bc872a1e1323d61342a2a7ac480b4b061163811497e08698057a8774493a1abe50280e0bcefa75701f38b0e532d34791916b1f56c3f008b2231de5cc62bd1ec898c62d19fb1ec716d467ae20280c0fc82aa0201d6da0b7de4dc001430473852620e13e5931960c55ab6ebeff574fbddea995cbc9d7c010280c0f4c198af0b01edca017ec6af4ece2622edaf9914cfb1cc6663639285256912d7d9d70e905120a6961c028090cad2c60e01cad43abcc63a192d53fe8269ecaf2d9ca3171c2172d85956fe44fcc1ac01efe4c610dd0280809aa6eaafe30101a92fccd2bcadfa42dcbd28d483d32abde14b377b72c4e6ef31a1f1e0ff6c2c9f452f0280a0b787e9050197d9420ce413f5321a785cd5bea4952e6c32acd0b733240a2ea2835747bb916a032da7028080d287e2bc2d01c48a01a3c4afcbbdac70524b27585c68ed1f8ea3858c1c392aeb7ada3432f3eed7cab10280f092cbdd0801abb130d362484a00ea63b2a250a8ae7cf8b904522638838a460653a3c37db1b76ff3de0280e08d84ddcb0101cc980fce5c2d8b34b3039e612adeb707f9ab397c75f76c1f0da8af92c64cd021976ff3028080dd9da4170198c217e4a7b63fd645bd27aa5afc6fae7db1e66896cece0d4b84ac76428268b5c213c30280a0b6cef7850201e3e20acab086a758c56372cf461e5339c09745ee510a785b540d68c7f62c35b8b6c5c10280a094a58d1d01ab842ac0ba57e87a3f204a56cbecca405419e4c896c817c5ced715d903a104a09735660280e0a596bb1101ade921f1ef2fe56c74320ceb1f6c52506d0b835808474b928ad6309398b42434d27f3d0280d0b8e1981a01dab515d684a324cff4b00753a9ef0868f308b8121cbc79077ede84d70cf6015ec989be0280c0ee8ed20b01f99c227da8d616ce3669517282902cdf1ef75e75a427385270d1a94197b93cf6620c15028080dd9da41701d2d0175c8494f151e585f01e80c303c84eea460b69874b773ba01d20f28a05916111a0028090dfc04a01a4f312c12a9f52a99f69e43979354446fd4e2ba5e2d5fb8aaa17cd25cdf591543149da0280d0dbc3f40201969c3510fbca0efa6d5b0c45dca32a5a91b10608594a58e5154d6a453493d4a0f10cf70280f8cce2840201ddcb0b76ca6a2df4544ea2d9634223becf72b6d6a176eae609d8a496ee7c0a45bec8240280e0bcefa7570180920e95d8d04f9f7a4b678497ded16da4caca0934fc019f582d8e1db1239920914d35028090cad2c60e01c2d43a30bbb2dcbb2b6c361dc49649a6cf733b29df5f6e7504b03a55ee707ed3db2c4e028080d287e2bc2d01c38801941b46cb00712de68cebc99945dc7b472b352c9a2e582a9217ea6d0b8c3f07590280e0a596bb1101b6eb219463e6e8aa6395eefc4e7d2d7d6484b5f684e7018fe56d3d6ddca82f4b89c5840280d0dbc3f40201db9535db1fb02f4a45c21eae26f5c40c01ab1bca304deac2fb08d2b3d9ac4f65fd10c60280a094a58d1d01948c2a413da2503eb92880f02f764c2133ed6f2951ae86e8c8c17d1e9e024ca4dc72320280c0ee8ed20b01869f22d3e106082527d6f0b052106a4850801fcd59d0b6ce61b237c2321111ed8bdf47028080d194b57401acd20b9c0e61b23698c4b4d47965a597284d409f71d7f16f4997bc04ba042d3cbe044d028090cad2c60e0194b83ac3b448f0bd45f069df6a80e49778c289edeb93b9f213039e53a828e685c270f90280a094a58d1d01bdfb2984b37167dce720d3972eaa50ba42ae1c73ce8e8bc15b5b420e55c9ae96e5ca8c028090dfc04a01abf3120595fbef2082079af5448c6d0d6491aa758576881c1839f4934fa5f6276b33810280e0a596bb1101f9ea2170a571f721540ec01ae22501138fa808045bb8d86b22b1be686b258b2cc999c5028088aca3cf02019cb60d1ffda55346c6612364a9f426a8b9942d9269bef1360f20b8f3ccf57e9996b5f70280e8eda1ba0101aff90c87588ff1bb510a30907357afbf6c3292892c2d9ff41e363889af32e70891cb9b028080d49ca7981201d65ee875df2a98544318a5f4e9aa70a799374b40cff820c132a388736b86ff6c7b7d0280c0caf384a30201dab52bbf532aa44298858b0a313d0f29953ea90efd3ac3421c674dbda79530e4a6b0060280f092cbdd0801c3ab30b0fc9f93dddc6c3e4d976e9c5e4cfee5bfd58415c96a3e7ec05a3172c29f223f0280a094a58d1d01a2812a3e0ec75af0330302c35c582d9a14c8e5f00a0bf84da22eec672c4926ca6fccb10280a094a58d1d01ca842a2b03a22e56f164bae94e43d1c353217c1a1048375731c0c47bb63216e1ef6c480280e08d84ddcb0101d68b0fb2d29505b3f25a8e36f17a2fde13bce41752ecec8c2042a7e1a7d65a0fd35cdf028090cad2c60e0199ce3afa0b62692f1b87324fd4904bf9ffd45ed169d1f5096634a3ba8602919681e5660280c0f9decfae010193ed081977c266b88f1c3cb7027c0216adb506f0e929ce650cd178b81645458c3af4c6028090cad2c60e01eec13a9cce0e6750904e5649907b0bdc6c6963f62fef41ef3932765418d933fc1cd97a0280c0ee8ed20b019ea8228d467474d1073d5c906acdec6ee799b71e16141930c9d66b7f181dbd7a6e924a028080bb8b939b4401c23d3cb4e840ad6766bb0fd6d2b81462f1e4828d2eae341ce3bd8d7ce38b036ac6fb028080e983b1de1601b9e601e3cf485fa441836d83d1f1be6d8611599eccc29f3af832b922e45ab1cd7f31d00280f092cbdd0801fc9e30fff408d7b0c5d88f662dddb5d06ff382baa06191102278e54a0030f7e3246e7c0280d88ee16f01bfcd0f96a24f27ac225278701c6b54df41c6aa511dd86ce682516fb1824ff104c572fb0280f092cbdd0801cbb430bd5f343e45c62efcd6e0f62e77ceb3c95ef945b0cff7002872ea350b5dfffef10280c0caf384a30201bfb22b14dccbba4582da488aef91b530395979f73fa83511e3b3bcb311221c6832b18d0280a0b6cef7850201c4dc0a31fb268316ab21229072833191b7a77b9832afea700a1b93f2e86fb31be9480f028090cad2c60e01cab63a313af96a15c06fcf1f1cf10b69eae50e2c1d005357df457768d384b7a35fa0bb0280d0dbc3f40201fe9835fffd89020879ec3ca02db3eadbb81b0c34a6d77f9d1031438d55fd9c33827db00280d0dbc3f40201d19b354a25ddf2641fc7e000f9306df1c6bf731bddfe9ab148713781bbfab4814ed87e0280e08d84ddcb0101ba960fec80e1dcda9fae2d63e14500354f191f287811f5503e269c9ec1ae56cef4cd110280a0b787e90501acde42b0bdfd00ab0518c8bd6938f0a6efab1b1762704e86c71a154f6d6378dd63ce840280e0bcefa75701b5900eedc2ce12618978351788e46fefe8b9b776510ec32add7423f649c613b9db853a028080e983b1de1601edeb01d68b226cd6b71903a812aa6a6f0799381cf6f70870810df560042cd732b26526028080f696a6b6880101ca18a91fd53d6370a95ca2e0700aabc3784e355afcafb25656c70d780de90e30be31028090cad2c60e0184c03adc307ee3753a20f8f687344aae487639ab12276b604b1f74789d47f1371cac6b0280c0fc82aa0201a2dc0b000aa40a7e3e44d0181aaa5cc64df6307cf119798797fbf82421e3b78a0aa2760280e8eda1ba0101daf20caa8a7c80b400f4dd189e4a00ef1074e26fcc186fed46f0d97814c464aa7561e20280c0f9decfae0101a18b092ee7d48a9fb88cefb22874e5a1ed7a1bf99cc06e93e55c7f75ca4bf38ad185a60280a094a58d1d01dff92904ef53b1415cdb435a1589c072a7e6bd8e69a31bf31153c3beb07ebf585aa838028080bfb59dd20d01916c9d21b580aed847f256b4f507f562858396a9d392adc92b7ed3585d78acf9b38b028080a2a9eae80101fab80b153098181e5fabf1056b4e88db7ce5ed875132e3b7d78ed3b6fc528edda921050280d88ee16f019fcf0fd5f4d68c9afe2e543c125963043024fe557e817c279dbd0602b158fe96ec4b6f0280e0bcefa75701d1910e44b59722c588c30a65b920fc72e0e58c5acc1535b4cad4fc889a89fccfa271510280d0dbc3f40201b78b358b066d485145ada1c39153caacf843fcd9c2f4681d226d210a9a9942109314d90280e0bcefa75701a88b0e5f100858379f9edbbfe92d8f3de825990af354e38edc3b0d246d8a62f01ab3220280d0dbc3f40201959135c6a904269f0bf29fbf8cef1f705dde8c7364ba4618ad9ee378b69a3d590af5680280e0a596bb1101edeb2140e07858aa36619a74b0944c52663b7d3818ab6bf9e66ee792cda1b6dd39842e0280c0a8ca9a3a01aae213a90a6708e741f688a32fb5f1b800800e64cfd341c0f82f8e1ca822336d70c78e0280c0fc82aa02018ddf0b5e03adc078c32952c9077fee655a65a933558c610f23245cd7416669da12611e0280f092cbdd0801aca0305b7157269b35d5068d64f8d43386e8463f2893695bc94f07b4a14f9f5c85e8c50280e0bcefa75701b18f0efd26a0ad840829429252c7e6db2ff0eb7980a8f4c4e400b3a68475f6831cc5f50280b09dc2df0101a6830c2b7555fd29e82d1f0cf6a00f2c671c94c3c683254853c045519d1c5d5dc314fb028080bb8b939b4401be3d76fcfea2c6216513382a75aedaba8932f339ed56f4aad33fb04565429c7f7fa50280c0ee8ed20b01b4a322218a5fd3a13ed0847e8165a28861fb3edc0e2c1603e95e042e2cbb0040e49ab50280c0caf384a30201ecb42b7c10020495d95b3c1ea45ce8709ff4a181771fc053911e5ec51d237d585f19610280f092cbdd0801eba3309533ea103add0540f9624cb24c5cecadf4b93ceb39aa2e541668a0dd23bf3f2f028090dfc04a01a6f3121520ad99387cf4dd779410542b3f5ed9991f0fadbb40f292be0057f4a1dfbf10028090cad2c60e019ac83a125492706ba043a4e3b927ab451c8dccb4b798f83312320dcf4d306bc45c3016028080a2a9eae80101b4ba0bd413da8f7f0aad9cd41d728b4fef20e31fbc61fc397a585c6755134406680b14028080d49ca798120192600ef342c8cf4c0e9ebf52986429add3de7f7757c3d5f7c951810b2fb5352aec620280a0b787e90501afe442797256544eb3515e6fa45b1785d65816dd179bd7f0372a561709f87fae7f95f10280a094a58d1d01dc882aacbd3e13a0b97c2a08b6b6deec5e9685b94409d30c774c85a373b252169d588f028090dfc04a0184f81225e7ded2e83d4f9f0ee64f60c9c8bce2dcb110fd2f3d66c17aafdff53fbf6bbe028080d287e2bc2d01d18901e2fd0eeb4fe9223b4610e05022fcc194240e8afe5472fceda8346cb5b66a0a5902808095e789c60401cf88036cf7317af6dc47cd0ce319a51aaaf936854287c07a24afad791a1431cbd2df5c0280c0f9decfae0101999909d038b9c30a2de009813e56ba2ba17964a24d5f195aaa5f7f2f5fefacd69893e80280a0e5b9c291010199b10cf336c49e2864d07ad3c7a0b9a19e0c17aaf0e72f9fcc980180272000fe5ba1260280a0b6cef7850201a2e20a7a870af412e8fff7eba50b2f8f3be318736996a347fa1222019be9971b6f9b81028090dfc04a01bae5127889a54246328815e9819a05eea4c93bdfffaa2a2cc9747c5d8e74a9a4a8bfe10280f8cce284020191da0b24ee29cd3f554bb618f336dd2841ba23168bf123ee88ebdb48bcbb033a67a02f0280f8cce2840201e6c30b2756e87b0b6ff35103c20c1ddb3b0502f712977fd7909a0b552f1c7dfc3e0c3c028080e983b1de16018fed01a3c245ee280ff115f7e92b16dc2c25831a2da6af5321ad76a1fbbcdd6afc780c0280e0bcefa7570183920ef957193122bb2624d28c0a3cbd4370a1cfff4e1c2e0c8bb22d4c4b47e7f0a5a60280f092cbdd0801ccab30f5440aceabe0c8c408dddf755f789fae2afbf21a64bc183f2d4218a8a792f2870280e08d84ddcb0101f8870f8e26eacca06623c8291d2b29d26ca7f476f09e89c21302d0b85e144267b2712a028080aace938c0901b0b1014c9b9fab49660c2067f4b60151427cf415aa0887447da450652f83a8027524170580b09dc2df01028c792dea94dab48160e067fb681edd6247ba375281fbcfedc03cb970f3b98e2d80b081daaf14021ab33e69737e157d23e33274c42793be06a8711670e73fa10ecebc604f87cc7180a0b6cef78502020752a6308df9466f0838c978216926cb69e113761e84446d5c8453863f06a05c808095e789c60402edc8db59ee3f13d361537cb65c6c89b218e5580a8fbaf9734e9dc71c26a996d780809ce5fd9ed40a024d3ae5019faae01f3e7ae5e978ae0f6a4344976c414b617146f7e76d9c6020c52101038c6d9ccd2f949909115d5321a26e98018b4679138a0a2c06378cf27c8fdbacfd82214a59c99d9251fa00126d353f9cf502a80d8993a6c223e3c802a40ab405555637f495903d3ba558312881e586d452e6e95826d8e128345f6c0a8f9f350e8c04ef50cf34afa3a9ec19c457143496f8cf7045ed869b581f9efa2f1d65e30f1cec5272b00e9c61a34bdd3c78cf82ae8ef4df3132f70861391069b9c255cd0875496ee376e033ee44f8a2d5605a19c88c07f354483a4144f1116143bb212f02fafb5ef7190ce928d2ab32601de56eb944b76935138ca496a345b89b54526a0265614d7932ac0b99289b75b2e18eb4f2918de8cb419bf19d916569b8f90e450bb5bc0da806b7081ecf36da21737ec52379a143632ef483633059741002ee520807713c344b38f710b904c806cf93d3f604111e6565de5f4a64e9d7ea5c24140965684e03cefcb9064ecefb46b82bb5589a6b9baac1800bed502bbc6636ad92026f57fdf2839f36726b0d69615a03b35bb182ec1ef1dcd790a259127a65208e08ea0dd55c8f8cd993c32458562638cf1fb09f77aa7f40e3fecc432f16b2396d0cb7239f7e9f5600bdface5d5f5c0285a9dca1096bd033c4ccf9ceebe063c01e0ec6e2d551189a3d70ae6214a22cd79322de7710ac834c98955d93a5aeed21f900792a98210a1a4a44a17901de0d93e20863a04903e2e77eb119b31c9971653f070ddec02bd08a577bf132323ccf763d6bfc615f1a35802877c6703b70ab7216089a3d5f9b9eacb55ba430484155cb195f736d6c094528b29d3e01032fe61c2c07da6618cf5edad594056db4f6db44adb47721616c4c70e770661634d436e6e90cbcdfdb44603948338401a6ba60c64ca6b51bbf493ecd99ccddd92e6cad20160b0b983744f90cdc4260f60b0776af7c9e664eeb5394ee1182fb6881026271db0a9aad0764782ba106074a0576239681ecae941a9ef56b7b6dda7dbf08ecafac08ab8302d52ee495e4403f2c8b9b18d53ac3863e22d4181688f2bda37943afbf04a436302498f2298b50761eb6e1f43f6354bdc79671b9e97fa239f77924683904e0cf6b1351d4535393a9352d27b007dfda7a8ae8b767e2b5241313d7b5daf20523a80dd6cc9c049da66a5d23f76c132a85d772c45b4c10f2032f58b90c862f09f625cbd18c91a37bb3fc3a413a2e081618da845910cf5b2e6bffea555e883b0bb9c5f9063380a1c33ebdb764d9ffefe9e3169de40b18eeb9bfca48296457bb0b4e29d7b2b5bc4e0021ba0a1d389f77a8e253d6db149d400f675a9330f3bcfd09c7169224a947b6b4e0745ae08cd7adea4151277a94f51f85292ba082cf28300cca233ff4966b093c9cb6abcef476026040fec2b435021aff717b8bb90a40950e010f70bb416a618dc3c5c03f590c5b7ec8e0c05b85ba94078de4817918f783022364b8aa228b5df43b38fba3060c30616f265022584ab6034ddbc832450f90047d0cf41a4af8a20fb1aa66406133a17c2e905ee28d8acd186c872859c196db0474dfaaaded2d63768143cf6b5e2e34662f7bae573a08cb15069ef881892e5a0c08b5c6c7b2e6376cd2080fb29e8d3d5aa5b853662b4f1784ba7f072130e4dc00cba3cc9278fc4213f2ce2fc82bd1ea9fa91bb17b4f7c36962c78d864eab9f30ef327039da6607962a156a05c384a4a58ddd8f51a0d4fe91f64ae7b0a5199110a66f1e676392ec8d31b20a65f7a7fcff90b37a8a3962bff0c83ee6033a70c5b0af663ca48a8f22ced255839444fc51f5b6a6c1237eda5804289aa25fc93f14d0d4a63cecfa30d213eb3b2497af4a22396cc8c0e7c8b8bb57be8878bfc7fb29c038d39cf9fe0c964ebac13354a580527b1dbaced58500a292eb5f7cdafc772860f8d5c324a7079de9e0c1382228effaf2ac0278ebedad1117c5edacf08105a3f0905bca6e59cdf9fd074e1fbb53628a3d9bf3b7be28b33747438a12ae4fed62d035aa49965912839e41d35206a87fff7f79c686584cc23f38277db146dc4bebd0e612edf8b031021e88d1134188cde11bb6ea30883e6a0b0cc38ababe1eb55bf06f26955f25c25c93f40c77f27423131a7769719b09225723dd5283192f74c8a050829fc6fdec46e708111c2bcb1f562a00e831c804fad7a1f74a9be75a7e1720a552f8bd135b6d2b8e8e2a7712b562c33ec9e1030224c0cfc7a6f3b5dc2e6bd02a98d25c73b3a168daa768b70b8aef5bd12f362a89725571c4a82a06d55b22e071a30e15b006a8ea03012d2bb9a7c6a90b7fbd012efbb1c4fa4f35b2a2a2e4f0d54c4e125084208e096cdee54b2763c0f6fbd1f4f341d8829a2d551bfb889e30ca3af81b2fbecc10d0f106845b73475ec5033baab1bad777c23fa55704ba14e01d228597339f3ba6b6caaaa53a8c701b513c4272ff617494277baf9cdea37870ce0d3c03203f93a4ce87b63c577a9d41a7ccbf1c9d0bcdecd8b72a71e9b911b014e172ff24bc63ba064f6fde212df25c40e88257c92f8bc35c4139f058748b00fa511755d9ae5a7b2b2bdf7cdca13b3171ca85a0a1f75c3cae1983c7da7c748076a1c0d2669e7b2e6b71913677af2bc1a21f1c7c436509514320e248015798a050b2cbb1b076cd5eb72cc336d2aad290f959dc6636a050b0811933b01ea25ec006688da1b7e8b4bb963fbe8bc06b5f716a96b15e22be7f8b99b8feba54ac74f080b799ea3a7599daa723067bf837ca32d8921b7584b17d708971fb21cbb8a2808c7da811cff4967363fe7748f0f8378b7c14dd7a5bd10055c78ccb8b8e8b88206317f35dcad0cb2951e5eb7697d484c63764483f7bbc0ad3ca41630fc76a44006e310249d8a73d7f9ca7ce648b5602b331afb584a3e1db1ec9f2a1fc1d030650557c7dbc62008235911677709dea7b60c8d400c9da16b4b0a988b25e5cf26c00c3ef02812def049bb149ea635280e5b339db1035b7275e154b587cc50464a4c0bfd15c79f54faa10fbe571b73cf1aa4a20746b11c80c8c95899521fe5f0bb3104b0a050c55a79511e202fee30c005339694b18f4e18ab5e36ea21952a01864a0e067d9f19362e009a21c6c1a798f7c1325edd95e98fd1f9cb544909fdf9d076070d1233e183fb6d46a46fbc6e10452ef4c45fa0b88a84962ad6e91cbcc52bc000b12a82e93ae5998b20ee9000a8ef68ec8a44862cc108869fd388142692be6b0657e3fe79eff0e8b72f63aeec5874acf5fb0bfc9fa22645ed6ecaaf186eca690ecdf8a71b8f4789ac41b1f4f7539e04c53dd05e67488ea5849bf069d4eefc040273f6018819fdcbaa170c2ce078062b7bbe951d2214b077c4c836db85e1b138059c382ab408a65a3b94132136945cc4a3974c0f96d88eaa1b07cce02dce04ea0126e6210a9543129bb8296839949f6c3867243d4b0e1ff32be58c188ba905d40e32c53f7871920967210de94f71709f73e826036b4e3fa3e42c23f2912f4ea50557dff78aeb34cb35444965614812cbe14068a62be075fce6bf3310b9e8b12e0dd8379104360f728d47a327c172257134e2c0e7c32e01321f4d636f9047bd750e7993eeda7d39fc16f29696b1becee4d8026e967f8149935b947fce8517b2ce02b7831a232f3a29010129c49494ed2b84c7f881b7e4b02a00ebabf5a36023c404002d6cb88cee76c8ce97b03143ca867359d7e118d54e053b02c94998e6fd8409f8d46fc1741a2e56aebb1e7dab7ca3296a2566263d9be2f4bbef4872a49ee1082cbaf86e21b0c232c4182fc660f0c0b6aaeb0393750e553bc406e2a27842bd033da45a562ed1998ef9bd83e35ed813bef00a3e6147cb363bee63c543ba5e770b043dbacc155214a2496f91879bbc9170a2a513d7b48fad40c8c2d96f951e3a0932f6d12956789198430b352803852aa9726163fbe839979b33f8dbf7f76cd50755c1ce0c40a072aeec35057d06abaf59e878000b1d796e51908bfbf23b13900dcb30f9bd52b52994e7245a7017653a404a70d1c444b8c613ff10a2b057c02d062c5faf13cdc4445809fb6e096923cdbbdca18f59318ff86c7e449f596050b404d3cde0338dfdf9b1389178b1d4c70eefa2bbd76fefc1ee1f1688ef507821e40ae31d8d8e673d183b54563e2cbd27e0e042f61b046877d37a68c1b5784830690f2dd4ebbbd2dbdb35800b9e0ba8ea985fa106dd2ce8493e845586716c538ee9008b88a7c482f3c00c14c08468230d40cdc040e145282c4d61985cb5800306e305146204f63e96ad194bcdf1338ab8480341b6fbccf18fc32145f84bece4069c09e41096e94c24fa4f0db988e860a3bff3604143f2b17e8c219f28189e4cd49a0e506fe62dc419299bcd78c6ccb107f63eb31b4bd8ea1e2fed10e3ac17341d3505019e2376b01f7a7fcea3db110fb090c681c866ac86f13e6f8d44a32861e0580def063736b5c771b2b3b9067045867b4393f3eb2a4610bd0216e29906aaac370986451c6bf78264dda7e7a5fcbcf7bd6e024ff6003c6db780d89b97765cee8d0ff3ff25d94d4b4b919f722b26a6903a017daa62af387843087680c57952de06064de05b662af87be49b6e34cf0991cec7be3396e2eec9678ba259bd8de1c192014d02928f9113488215658df4078ed661fa4e79e58decaeb0ee5a00488b094b0b77f083b2b7844f481e7788ffe8004b96ccdf853532bfd9632a8a652c2d97d10173c90864fbb6facf47fae415df4acc0b099140a657b35d083d74dbdfbf107303e74c64471bed4b2199f2babcb4e1fc593d6f309e21f85e68ffd9904731559d0f2b673b36d3984e5d66d897dfa17d601edef3ed78cb70dc5115d4ae240c203e031263f0cf1e98075bac0361fde24cbcb852b8055d53ae01d61a0a1e1ba423d00833747e7364df7ebfd1f84598d801c249e1805279dc37d39fc7f7e27b067e4e0287aec432ed49e4d701a0ff377e88179968430d110cb20476ed4c6bf1624d1907ef24406d3295fcacde2a102cc85f4f3d0cb87a8fae7535a06e442833e58cfc04242ff85fb654d05f9874c0a6756f542db4e9d8b0366191fbb8b09a1bbcb6af04c069978417ca80d92f442b7dbd092f74e1268aa73b54e4b64e84543449ecd30b5ea392a1669a5f441d7208925e91c75df611cd26042630c6b98f160b8c0156048108d5465b71bbc54d31a9f90e34428d97590a427e1ae618d4a35fc1022d4e007c6108dcb1672b88d43ae4d886a5adcc26faf56bc5e5a0b08342fb88263fd80940d1edf794c6ad6d339b974e164b38439e11b4fa87cc793b080b4f8bf0eb56043f79ed3911da21092475fcf8320b55b9f558f194c6c8121b2e696039340d97057be2583726d762b5ae4327e5286a2d8c14ddbe0027c75aacbf7e9de13037390df7d72e13b46bc06bad0363b070e0174d034120d7fa7b4550e7dc28f7f0241f059ae266fc13dccd1d07f744208a7d6a2e565b6613d46e4550f79ef3209c46a805b97284df558719e131f44e419e690f4fc28ee4862b9d1f8f7e1a164ac18141076087693e70ac76a10f7851530d4cbc65def90d5544671ad64249569c3abf0200d09be3c63efaa7cb723b39ccffc9b3a3ba0c8426847123d2a881efbd4937a40cb3e8011c70ba427f80b3dc91a608086f8b61f8bd86fcb482ea388299a7cfbd00a3ddfadb4b6d0e51c1369276c25889a9f3592900b6502d9af1c732e1fb7db307d71e45deb1553ba1568d0480ea9e132b52564da6ac5c56eff823e7f37976cd075ce8f7a78aaef1b87f7437a0b84035677f266f7d0596e493101fec3e14fcf80b22322454587b51fda0636231c07d4e63e007f1b89137d8a37b03bf00f3a7c10169f757d9a74b7bffba797c746e3845decc3d0559d7cf6f08f3bd67dac5f33109b212582dc7df5d561ad63dddc794f2aea4e493db1a73c702d258c9b922c35d04c47f88f87c54c821a29f04abd91a079ce8cef252a21dc72d409fd1618c9be709af029ba98b0140e74666fcb01bced4f88ab68e6b63b8ed6febc0905d22cb2200493c071ce136833697406f8a04e77b21747dda997046cf3c7080096fe481790d77cf5904e7f7128ed95a6e576d109fdf10eb0c888db35a4a685b62253987b70fb1538e6c0932889460fa31c60d123266b7bcb828f846a35b2851127679b05f05a75266529c343a6075e54c455e02b2c83e6f7bf1ae23326506a5f532472d780815c5af425f7d8b543a8f014966e0538f48ca84d181695381a09701eb65c9ae084bf2a4dc84f1b2071be32be25d5f4fcdc59668fd800496ef7eb6dddf867ab908e543cb51f0451706cce4be6b9f68a537a79ea88e17fcd78965b3da68c0d9d30623a2a9e275e1c320f59e118e09c02eee527167bc06f7693e7b584e3b25ecc1093d46b80a1cacced87c2b32e2f90c5bbb9cd1b701aae69a04b16d535fac6eab0d091790fc5fdfa8a8842bfcb62dbf963cbf62c4afb4c468be98c770e6078b8c0a8cfcbae43dcfff17d3c6d587c3e4309fd39c66acd14781fea66fc57278b02302c0fa386280e67acff19955b6a428b0e22ceb1e54e913a37cd19eb6e9d2268a039f2b5fdda7d5804db79385f0e50082b128c952f8dfdedc4411d0675d95127f0bfc01710a869b10d7a8b9e632dad944062567439e6d192fb09329d058e87ecd0aa8981328f541e87ed02cfe4031f2d3a046ff517a2a80486b04ade31a647aec0884fb96ed753ffc47892431c6e6f08fd1c633a1a44e882d3d8b92c567e0fb8305327a354851464ca0f18d89c6ee2a91a4afef0c55883acf8fcb68c2c3b7402e005d8affc19c13f1f26fee0698dff181ab22cb84a2b31e0a6a81dc5d02e60a3c07090397ae58a985526b2ad6ee5725e82328062b68566b4871705ce3b9856e550d068c20fd9aaeb27740c07aad53d79fc20e46e40e7103e2d69626ee64b6aa600f6f1a86f37948ff4990d88f43c34994e2fe586cb779997be323da53329c10480aeb08fe440e9e4b979171371c73b94da9f928a3f6c8f6792f782f3d6432b86d06f54557327fef31fd6ae0a3f6d2f16c9ad947d132e14def33fa24cb4565370e0832fa50f5f5f93c9f3d65776cc22608b68a4f3719e9be47a19432991e4a2c49089c0ea20e7f7c73feaa47970da424d8543d80d622e2f2be9f4c65cc39dc369009a9d41a52bdea7cc0e8e04da87a633fd4f814fda1b646121a469ba0b5b8006d0e9118761d97b5d1856e2d690f27a81c42b176df853d07cf4a66ee83c9eb24ac0a382f5143a10a33ec3ddf17dcd8a8303fac8f279d31b4d04d74bd8804cefbb400c86174ad444e43ed33ee1e1e73f660b9814d5ca3cb1d650f1978a825a617bb05f84eab3b9b8359b991e1084cf4e8179ecb67f92398638e31227ff63427b67f0f232b454a341d85d4b56e31135c9035e231f7d9318ca12b5ab524f87bb0ca9b04b80effed202897ab016d5acc054c4fe62a5f0192f136cf2cd714998a4b164b0c2cdbace52243fdc9ea879b0d247d4fe8bd80481fad6b325cb5f2cfa2534dec0e47d41b6b99352e6e5faccb5ee28ca2fe96e04f9c83a0461ba34cfb499d864f05dc734b6c8f51cc0c994290b2a868cb8312df39fb6d457a81e62d872c65d4f3007094be42663bca3d64ebbcc8401158fce4f5a38a49c20f029c338f126451820459866e77c6984a467aad571cc7452392a9cb9f8e65099fff2f2acd170a833e01ed3d22a683356ee42dcbe6bab6d05d3edda2d40e9ba53884d430c2e0cd87c0067dc8cb68c868bd9f29db1dd73703c139ffc15e4f7264e727c70560ae02da100871f30e8a7c1275805f621752d73aafecddc2a7808b6c2ecbb8d0134a644bb603f30f8d18b3fc5efaa7f206ce180bfb14f5dbd3b0115145a227113eeaf1c1ec04244227b931388e72960833a40baa4319c5cf74aa94f7e3233e1d09f0a4f74409999684ad1cc836ac74563c85b164664dfab08ea084b25e2cbd7e7b94a781a10fcd455ee38b65126dcc52016127fd195c80b05660ed931e128b0cb92868955c0d032ada9fb951210a5442d21c1718ebc4702ad4a57967e15a63ffb05e1e072a0c41ebdf1e7205373eeaf4587f695de887fa3a8c96b8deb99e040fa1fc4dc2a402a017891943d734ae2f3798b22b1269d3d9f6d65581b9c637a6896a4fb554810bbd3db5c5737391a74150b43413b2e3824490b7911cbeb845147f1a8521620b0dd31306f13a9754a01bcdbd18bfdeade06b0ec97f48df56c45d3670a1fe18d00ef13e613c8a77aeb40401a814b377137cf44f29cb2cb94186ad1161ecb05a7c07837a5ab3474e57990cff2ab16b4d99f62e646da28e8bb712a5b561cf0e25be039c3e08583c8ebc3dd2fdb8fdc6e135ecc7851c73218a70b75e697cc84ea50504b9c34a33ed52f87230b9d192a940f3b7bb6d45b58dbf52f0afeb8dac85c77b06bdf9b70a10cb81c50055c9d8cf7e3a5c4b7dfae55beabcb3e8a8a1cb822d8d0bf6c01e32056929f853021eae6c97fdb0c5031df6b2e7c57f1318866769a9cc09c38ed62d8bf4663334c0df67c47236ed73f6ce7f54e0ada9270398c1aa558d0f993b0d25d97aea77b1635ee4832362cd590bae5fc1549402ddcd42b15efc930111a01535c0242116078d6d2d53b8612d378c4370e90d0d01b01bd7da591bec07981652a98485d8ed5c8f3def2bdac7d992ee5fc6a1ec7bd36940e1bc58c7050451248fc3ee6069e6b1b0d3ef122c6ef2a9b99aa0f145fb43341c58dbb472130b51730c956273a3ef6df9e000f6a87c2bacdefcdb5daef28b6170f61bc3a9c101f439755c86e6b85ee06a7a60688b3843eb359cd4acd9221a2ee131e2fd2e190652e5c47c0b98c41010eb99a991ec48a5de99cc8f403d6d76f8307d6657c1e007ebd64eec7bbd0d4f1ba2db7bb0efe27c7828f053e00def775943ab01a7e33d0fffcfe6f9a7285237f2c381b638758e373f8ceac672190664bb25fb5d355c240bd1773d61bda7f7ef1f4261b80ff5058ec6f7e024ab9459b1103815624b81f80c39db2f6fecb72de452b11636b0f71b16cb55f883d93bebb94328f13ef1ab6d0df449e32d27884f5139af584035547dace65ee25ba05cc461e74760d4468af90dcaa982e52cb902e2b84b3324019da575601ca54e91655913892e703257deaa01d14fd8459ff780c724161ba4d4280b70a5039dcfb5d775560714009724cb0d0b7e178c71e777b896bcfcde7d4c9c3dc6ab819d74a1a1fda8486448b1ad79be02fb134ea93a8600f1bc2a42e68d0213ab461a07cef3ad3965bc130beb76bab409102f82bf6c4cd626f6df3388e17b87584310c50832cde3191f6557f0014bdc0a68d924119e43111043bc6f26d16a5f2612dae6ab24984e2d87a71d93d5f4670dba2176d4f16633407bf7c10b51b6842dfdbc6fe3eaa4b6a12f0550700ece070ca382dec3b587e0e1fc317a48a83754d15aaf9a6971b8cb641fd8b32846d89002e6301700a0e7056e8002d8f269d29ebaf64f4493b1f1e676fc78e673067fb00625df15dc0490235b386ee14e55b335f3bc6dcedd7d3a80fd3a6e9bc2ccf3af0d89be71b5ca92bd7a9b97b9ff8976f75702419aa5bf9be34600496ca1bfa8ad0400602a23579365574252434f2bcd7efb360b0e8a495e8f7e78923b6fbf2207049e9179f0d4d7d6b4a4a10ca10f0ef4dd6cb5a74f7574e832044d6120fbc1580a68eddfbc65ab300bed960a6f24a102dc36b72937a8be4385daf5946e81ccde0619251babbff17e5685217a134d22f6130d0322483b3475227ffd27adc73ca202a6debfa37e5731747f4449ac70a33684f460eede65918c6d89acf4b50fd28d040ffbd436a944d3be0210606bfc2301e7ac66d462dba29a0489eb55af714a760e5302592cccc726e535b945ceb6126eb84e31f0f140ff54df8be0fa3a22f418036ad996787a5616a97a42049ebce351dc11857cab3dc914ef26833b0e75653004a8cafea099fb0750135255c41ef43e2f29c75714e2f0be2545e7c109b70c43004a471daa85b47befc65907d033f133b2f3ac2ad568df630ee80506610b8dc9052d442668dc06b13ea76ab1ab7b34870341d660af5d3007c21bb72512e4f8a60d8916a037b93f9e15ac9e4a6a1246d73ebb40e5fdd5a0d6dc0cf175023b891301f69fe5a3ca6f12cb8312d16333de1cd3ebb99339ab18c0715bfcd35b8365b407ad759e2c591d8270ad335381573e27ec18af7ca157b4a2bbca921db083d9b0009dc332a79dde14354a8c18bce76a1bfc1a25a1e702ccaa0feb521ee9279b8a01ceab6e237bbe4128b23cb53b1e5185f3266e20670a307ea0cfb5377025e0bb0790d48f1636c8b836c1a1f69ad61265f19057197e86cd526da6ddb94fd1ece80b60852f27ef2ce56ccb5a32d8cab6d16be06f380dfde3602ea4c1ae927173b2001ff0d9e29bc66b2b2a20c3e3ac174fcba187aacab0876c1356d30d4021e6dd0048c3bdfbf254108bb09d3ca9f2be423a92408bca52fbcd68f972c46fc8d20e0350d12c2f2d6c7da85e96bcec3ce61119793d44a210f81ece859fef6360ae3b0e1af0634fc141a8b50b3b383fb264e8a4fb84ea06db6becbf5e140edf66ee190da8968da579eb349fedea45e4c252a79570501278bab5fa984d7b1179d7c2460faa7beafee153bbae0a591701632aa94839528d3ef50cf809c1f7209b9e5c99010eaff7f921c45b6546358ee7a90948e3c710cd3e1796860839a345516fdf4f07c415029627abe1273a1f510c36a662562d18169b23305b4efadfefbfbb41a400ab533e61c14cafa49bc5d2818058ee4f3e1aeb329e150820d1de1f1eaaad31051a6dfdd3a1d5cec7b16bc0ea2c649d409917faa42138b1f824b4d534a050be0a99ea6772daf0b2e58623cc7a250ef37599bd556508f08886e663ef0917ecd3077072c3268ea5b9b89cbb6b761ee9f9c4765d8b267d9eb19728a28ce67a42ed0cad142b5dc0fc5313853860ec3f0ee2bc3d47cbe12dbe9633db809967d5b8bf0e45574eac657059530c30aeeade1e4f858a4a6e79d6e441b4af0127a13340d908d48cfec849ee93d53b1564231f048d34885e791a9d40c61a7b00f12f6f72a5050bcaabcc98480170ea6e20bef6b5c6f504c808108454fe2f3c275bf8f89a5e0a3304a7c4787e6d4fbe569930f7cfd38ab7d1d2ebd599bbb411950cec3e53b90cefb82234990d353c71ce4c21ef674a1c4070f71c90e1ea7edf35f5a421118f01b49a92ea97720e2d4df6b5885c181002656629a90eaa1904fe1c379b8291480ca15d0dc2b65a20c22f1e01d612d21ecb5738e5ebfc578a4a65066ee6e913e3030d3fdfb0168fd75022492728ee82869deb9ff2827f4e10759ecddb20f67e9808e707257a74d3dc0a6068f264066f95c9f772a3dcec0b4f0a327e3745517ad60ccbc5392890d2479b724d068fcdb83607e02291c06e1a5a1dac7604889cce2500f418da2f7080a7e9a1bdf28b87028a2bbb0c14f059f10f46d46716eac2cdfc06676cbec91b8c2c0f7c9bea7e27fa5048662398b23a9b488a49e1d3330c04e60179a4492c8b836780899899d2af17e6119a94d54a890ce8c0b550e87fd54cba0821fa7c48f6e09a60dcbddf853f82b47195aa44a5ceae14a9257296acd711c8073ff3345befca5d3ebf64901b283df96395fe9785d7090176bfe5a9f13ceae701c6c93af0e13d949bc3c7e9b06674a73e7affc508302258a27fb34569c3742201c0721aef282a31c69a5d98a67ac5c3d920d50c089896f7f8c8c237a81f803f0444f417246d695e89a3a523b62a3cd2203d42607cac7c7782dec1f9edbb806c0a7a37d1a969082a126bc726151a50233456a07d374399e74aaa8cc66821511d092615950d302e815cfcc021e1250cdea20fd9e1e4b5e88280d6e4283b918e780d12cbba59ef2ed2ce86135a48fba6c0dc2bf2efee190d9a3f9aa22a622b1953058f2bb3a371637d13e045d54e7eb54c0d25851f49283d7d34e9785d2d5c3f70086c48a8325a2083bdf5b3531fcc697cc0c9f63892a866c84585d673a2a63fd60e77995bfb0c0a44a4b63c0ff67e813027d3e84cddd393a0f4e6bc95525c5ae20eed9d0cea4a12aa748eb5209cfd75990b055f1ad0472f9f7599f569a8743a720755aa11555df4bb2e725fa93bc5dea603a964e8dc9fb1742e81825022866fc50a6b2a19b6a234a38ee27a74f2f5832b294143ca7ff8d07fd7d4e01f479e9792058871d90ee3aaa3329e82cebe41dff5e6d00a36268a7965466b80c6510ac1350cee797e1d6737f6aaff155266d2a2d611b2124affed1ac73a6a06515627b2230ce0d7fed33ecbde511f4d472cbc556cc8d9c5640e67657035112976b626847a0a4ea5fdd14d4a3eed57f0dfbe153393d8bd28c8b4f9e62940e8379790393fa20c617050c780a7d870193b4611bd7a12d26947a3cf4605e225da8b1646a76984015a5e317016a4d8301eaeec0db3ae0daa719182e2f4479154dbcccfcce1f365099de6c91934c395ce82abba8062a51d7773b418330921766cd3d275c689098e06039698db6f09accb292e7eb79e7a022d4257bb2f9ed993c519860919bc229a06ad88954c9ebf7f5b9fe95cf56e8181cb9175dac06be0be70fd28df20cdb4600ef0869668c645c9ea01360fdae7c922cb3d2b3583ae1de5ae7d899a83ff2bb00d7365c782a0fccfcba7f87bb29416469bb051f9b0755123e0f2fa76dc7644b70e452f49a84bc372b384c843b8161b7f9b63699adcadd5cb2b33b36c7eb3e1b00f25218bc16447968b939016242fceaebd796c17a24d1b9870991a9c3ae90e380302b7bb320adacc08cfb9249d29cd9275c52476dac6a7e9870ee3776cbc3352036f9c8f681d44856c6c5f90b7cde0877472ddd48719c449f59dca1f49442f7505e4809c6d323b37530ecccf3e41e19822f53d64dc90efb113405ee88799c37f0a342293b5bfc019a9057138326de6107b5613554dffc737aed7237fb16cd77e09f581d12220ac930c6ca279efd1d07a92125fb2606ec3ec35351987a15fc72806cfb3cb66fce8dcfabee5c1e586bf0f802fa12ae5ad5a708e3a5d54e1926dbd0202bf1150f1bb612b9a4590b5b520b86a90860ec3d9c2184f9975ced15ae1300882d9918021b43a1184ba88ddd7091539fe5a7017b8708d0f5c916f9c42de5103f8116863864b508f5880ca60b7492385c16a02b6ceb64d257a4838873b85d2041517c5c7c4508e4d5a5faa72729d73af0361e11828eeca992b8f20d903a5ef065976a9f322e34bd4b3984bb09e18be40e77e833c8c1a2e80093227d3f40d4a067f5e3aee9fce9bd234bb6ff4d0c34fc060d23e86b1f5a6d8d052e53e913182052a2d9c5e97bb0e0a51bb2fafbe7346bacfcbadb00ce2ba129f29d41a11f7d105cf19bb60b5f5b0dfd6a894698ef7f56a02d69cc03eb62a56563d3a77e3ac2302"], + "untrusted": false +}"#; +} + +define_request_and_response! { + get_alt_blocks_hashes (other), + GET_ALT_BLOCKS_HASHES: &str, + Request = +r#"{}"#; + Response = +r#"{ + "blks_hashes": ["8ee10db35b1baf943f201b303890a29e7d45437bd76c2bd4df0d2f2ee34be109"], + "credits": 0, + "status": "OK", + "top_hash": "", + "untrusted": false +}"#; +} + +define_request_and_response! { + is_key_image_spent (other), + IS_KEY_IMAGE_SPENT: &str, + Request = +r#"{ + "key_images": [ + "8d1bd8181bf7d857bdb281e0153d84cd55a3fcaa57c3e570f4a49f935850b5e3", + "7319134bfc50668251f5b899c66b005805ee255c136f0e1cecbb0f3a912e09d4" + ] +}"#; + Response = +r#"{ + "credits": 0, + "spent_status": [1,1], + "status": "OK", + "top_hash": "", + "untrusted": false +}"#; +} + +define_request_and_response! { + send_raw_transaction (other), + SEND_RAW_TRANSACTION: &str, + Request = +r#"{ + "tx_as_hex": "dc16fa8eaffe1484ca9014ea050e13131d3acf23b419f33bb4cc0b32b6c49308", + "do_not_relay": false +}"#; + Response = +r#"{ + "credits": 0, + "double_spend": false, + "fee_too_low": false, + "invalid_input": false, + "invalid_output": false, + "low_mixin": false, + "not_relayed": false, + "overspend": false, + "reason": "", + "sanity_check_failed": false, + "status": "Failed", + "too_big": false, + "too_few_outputs": false, + "top_hash": "", + "tx_extra_too_big": false, + "untrusted": false +}"#; +} + +define_request_and_response! { + start_mining (other), + START_MINING: &str, + Request = +r#"{ + "do_background_mining": false, + "ignore_battery": true, + "miner_address": "47xu3gQpF569au9C2ajo5SSMrWji6xnoE5vhr94EzFRaKAGw6hEGFXYAwVADKuRpzsjiU1PtmaVgcjUJF89ghGPhUXkndHc", + "threads_count": 1 +}"#; + Response = +r#"{ + "status": "OK", + "untrusted": false +}"#; +} + +define_request_and_response! { + stop_mining (other), + STOP_MINING: &str, + Request = +r#"{}"#; + Response = +r#"{ + "status": "OK", + "untrusted": false +}"#; +} + +define_request_and_response! { + mining_status (other), + MINING_STATUS: &str, + Request = +r#"{}"#; + Response = +r#"{ + "active": false, + "address": "", + "bg_idle_threshold": 0, + "bg_ignore_battery": false, + "bg_min_idle_seconds": 0, + "bg_target": 0, + "block_reward": 0, + "block_target": 120, + "difficulty": 292022797663, + "difficulty_top64": 0, + "is_background_mining_enabled": false, + "pow_algorithm": "RandomX", + "speed": 0, + "status": "OK", + "threads_count": 0, + "untrusted": false, + "wide_difficulty": "0x43fdea455f" +}"#; +} + +define_request_and_response! { + save_bc (other), + SAVE_BC: &str, + Request = +r#"{}"#; + Response = +r#"{ + "status": "OK", + "untrusted": false +}"#; +} + +define_request_and_response! { + get_peer_list (other), + GET_PEER_LIST: &str, + Request = +r#"{}"#; + Response = +r#"{ + "gray_list": [{ + "host": "161.97.193.0", + "id": 18269586253849566614, + "ip": 12673441, + "last_seen": 0, + "port": 18080 + },{ + "host": "193.142.4.2", + "id": 10865563782170056467, + "ip": 33853121, + "last_seen": 0, + "port": 18085, + "pruning_seed": 387, + "rpc_port": 19085 + }], + "status": "OK", + "untrusted": false, + "white_list": [{ + "host": "78.27.98.0", + "id": 11368279936682035606, + "ip": 6429518, + "last_seen": 1721246387, + "port": 18080, + "pruning_seed": 384 + },{ + "host": "67.4.163.2", + "id": 16545113262826842499, + "ip": 44237891, + "last_seen": 1721246387, + "port": 18080 + },{ + "host": "70.52.75.3", + "id": 3863337548778177169, + "ip": 55260230, + "last_seen": 1721246387, + "port": 18080, + "rpc_port": 18081 + }] +}"#; +} + +define_request_and_response! { + set_log_hash_rate (other), + SET_LOG_HASH_RATE: &str, + Request = +r#"{}"#; + Response = +r#" +{ + "status": "OK" + "untrusted": false +}"#; +} + +define_request_and_response! { + set_log_level (other), + SET_LOG_LEVEL: &str, + Request = +r#"{ + "level": 1 +}"#; + Response = +r#"{ + "status": "OK" + "untrusted": false +}"#; +} + +define_request_and_response! { + set_log_categories (other), + SET_LOG_CATEGORIES: &str, + Request = +r#"{ + "categories": "*:INFO" +}"#; + Response = +r#" +{ + "categories": "*:INFO", + "status": "OK", + "untrusted": false +}"#; +} + +define_request_and_response! { + set_bootstrap_daemon (other), + SET_BOOTSTRAP_DAEMON: &str, + Request = +r#"{ + "address": "http://getmonero.org:18081" +}"#; + Response = +r#"{ + "status": "OK" +}"#; +} + +define_request_and_response! { + get_transaction_pool (other), + GET_TRANSACTION_POOL: &str, + Request = +r#"{}"#; + Response = +r#"{ + "credits": 0, + "spent_key_images": [{ + "id_hash": "563cd0f22a17177353e494beb070af0f53ed6d003ada32123c7ec3c23f681393", + "txs_hashes": ["63b7d903d41ab2605043be9df08eb45b752727bf7a02d0d686c823d5863d7d83"] + },{ + "id_hash": "913f889441c829e62c741c27614cdbb6278555b768fbd583424e1bb45c65e43b", + "txs_hashes": ["3fd963b931b1ac20e3709ba0249143fe8cff4856200055336ba9330970e6306a"] + },{ + "id_hash": "0007a41ed49aa2f094518d30db5442accaa7d3632381474d649644678b6d23c0", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "05138378dedfae3adbd844cf76c060226aaeddcd4450c67178e41085d0ae9e53", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "1cccfcece29fbd7a28052821fdd7aac6548212cab0d679dd779a37799111f9ec", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "1eda8e08b1024028064450019b924eca2e3b3e3446d1ac58d0b8e89dc4ba980d", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "38d739cfb68aba73f0f451c7d8d8e51ae8821e17b275d03214054cc1fe4f72d6", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "40e57cb9a9f313f864eef7bf70dea07c2636952f3cbff30385ac26ee244a4349", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "52418ac25be58fbfcc8bd35c9833532d0fa911c875fa34b53118df5be0b3ba48", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "65bb760c9a31da39911fa6d0e918e884538f0a218d479f84a1c9cca2f9a5f500", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "7d805459f05d89c92443f43863fa5a4d17241d936fc042cc9847a33a461090c5", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "88f7594b26dcbaff22f7e7569473462c49d8fb845aa916d7a7663be8b85b8553", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "a2b08a090f611ea1097622cc63a49256a2d94a90b8dbaaa5e53a85001c86d55a", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "a5ebf4914f887ecdfde8e7ef303a7f2cc20521a2a305ba9a618e63d95debfb22", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "c5b7d94e661c5eb09714b243f3854cc06531b1085442834c9e870501031b73da", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "987605d678e8bfb17e8d2651e8dd5c69c73c705d003c82e4e35d2b5b89c9ebe3", + "txs_hashes": ["7c32ac906393a55797b17efef623ca9577ba5e3d26c1cf54231dcf06459eff81"] + },{ + "id_hash": "ca559feaf79de4445ca4d2bcc05883b25ecff2f6dd8fd02a9a14adea4849f06f", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "d656ac13a64576e7af5ca416d99b899b0bafef5e71d50e349e467fa463b13600", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "dc006e92fc1e623298b3415ddccfc96a8cae64cb7c9199505a767a16ddd39bb9", + "txs_hashes": ["b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986"] + },{ + "id_hash": "fb3e7cc08761a6037ca29965f27d2a145f045da5a1018ca7e6a5a5a93dbbd33d", + "txs_hashes": ["c072513a1e96497ad7a99c2cc39182bcb4f820e42cce0f04718048424713d9b1"] + },{ + "id_hash": "4ffd1487bf46e5a1929ca0dd48077cb8ddbff923e74517f1aeb7c54317c0fd68", + "txs_hashes": ["88504bd6a72b26bccbc7563efe365baeedb295011a4022089bdc735f508a9412"] + },{ + "id_hash": "f64056280ede74b3b1fe275cf9b9aa1feda77b3b5fd5218d6a765384e3d180ff", + "txs_hashes": ["88504bd6a72b26bccbc7563efe365baeedb295011a4022089bdc735f508a9412"] + },{ + "id_hash": "d2ed8513f48724933df6229a9fb6ededdcf5d0963280ee44fa9216ceebe7941f", + "txs_hashes": ["a60834967cc6d22e61acbc298d5d2c725cbf5c8c492b999f3420da126f41b6c7"] + },{ + "id_hash": "428be79097b510e49fe5b25804029ac8bfa5e2a640a8b0e3e0a8199b1d26f22f", + "txs_hashes": ["d696e0a07d4a5315239fda1d2fec3fa94c7f87148e254a2e6ce8a648bed86bb3"] + },{ + "id_hash": "368fbc77179fb30bf07073783f6ef08bfb1a8c096e9bd60bb57aead3b0f3663d", + "txs_hashes": ["9d1bcbdb17d24a4e615a9af7100da671ab34bffc808da978004dcef86ddf831e"] + },{ + "id_hash": "45a88adb7fcac982f5f4d8367f84e0f205235f58ad997f5dfa4707192fd3d9e0", + "txs_hashes": ["9d1bcbdb17d24a4e615a9af7100da671ab34bffc808da978004dcef86ddf831e"] + },{ + "id_hash": "6d80d9c12f1439b0a994f767d71d98d2d2cde1a54c6a6134a00c2f07135d98cf", + "txs_hashes": ["dbabb82a5f97d9da58d587c179b5861576144ea0cd14b96bef62b83d8838f363"] + },{ + "id_hash": "2c479dbff819502441604a914af485db2f795b7f5bc0eab877d60a1419ee5498", + "txs_hashes": ["aef60754dc1b2cd788faf23dd3c62afd3a0ac14e088cd6c8d22f1597860e47cd"] + },{ + "id_hash": "a7f204f932169b1b056fc63be06db8ec91a436f7188a30545bcd6a8bae817ca4", + "txs_hashes": ["8a6ebea82ede84b743c256c050a40ae7999e67b443c06ccb2322db626d62d970"] + }], + "status": "OK", + "top_hash": "", + "transactions": [{ + "blob_size": 2221, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 44420000, + "id_hash": "88504bd6a72b26bccbc7563efe365baeedb295011a4022089bdc735f508a9412", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261656, + "max_used_block_height": 3195160, + "max_used_block_id_hash": "2f7b8ca3dbd64cb33f428ece414b2b1cef405cfcd85fab1a70383490cc7ed603", + "receive_time": 1721261656, + "relayed": true, + "tx_blob": "020002020010f0bcd533b7d71bdb8915caa004b3a214f99f0993a303fd9804d1f101aa6c870de32a932ab774f80fc92cf64056280ede74b3b1fe275cf9b9aa1feda77b3b5fd5218d6a765384e3d180ff020010bc85cd27e4bfb407c598a104bc5e8e8c2d9bfc40add114c0e501b09e04edb204d1a901d2f4019603d50d9f07c4354ffd1487bf46e5a1929ca0dd48077cb8ddbff923e74517f1aeb7c54317c0fd68020003bbb37ea2c935e3c7245150d154748b59bdf280d557779b6cf8063165a7d9b5b9210003ffc23770cf9e4536c0db95978dbc937d1de339cb8dedf909f5adf7ce6fb8c4ea5b2c01d3b65a92cbd04597cc4f3da6a003ca5309af343f2d0f190102fdd7ed13e5b9ee020901d979920cfa70c6ef06a0979715e6d4eb51a032a1511050389f0d8eafbe5f91af7e37907d37de11bae31af65af35e1fabe13cece1d503a9987d7f4813c8ad2b6ef2b5e77281113637ace74d9ac41f16f431a1a49e6d8de1b73fe877c5c301fcaac875ca31629041923a1bb014f86a232227c41e1611f5961a0e095c6201ea34a7b7367b0045c3a841d57ebac2a9b323ab21f6d954f4441fc79fd98414e5c4acdbff571108da31d0face012eb149b24bc16de858b4cb8d96fdc0bb614cc2895a6859cdeb086e647983308714da41be9ada21abfbec1ed4d224315017dacf01c5b2a59d18ac5c3b81c9bfd5c031b9929c1dc802a22593bdea39612039601c0e09f64702dde1507e3daef5655b0f1f32e19fcbcbaeea6b495fed05543cbb65010730de65cd66a314cfdbe7474a387045b3000dd43eedc021ed492075d314da6d8c6a3905275d41cdc8758e258c4a71a64d2ba1aec68b7ad68018aa8fdcf97538898c61392ded8e0715ddd471638be54eda62622f5787cafc577da4ff7dda01578982328c51f59ad3d9218eb0d3201d1136d54e7567e15c3f8bc956772bee20f5976b0f343096ab4a0c2b68099bca4d61eff7a078c91875483213f4cd226b587b5c12bf7a41abc9079e274e6229187f4c3cc1a8579f60f2a8112aafa78eaefa765ed7588be97d471720979fa5b907c5b83be30d62d5a2b0b9a59f1330dface4cddd07f591829caac227efef5fe5076e3fa93dc9a787be8f57c3d2ec216342784321c80b956f44dec2d484500371f9a4fdad1de571f16d2cccca13f2f3bb65718dc4a861276d08d11bc72536b787537aa0b26d68462500baa1b5b47a1ff669346481ac5c0d2199d6197dfc9c74cdd6adf13e06223af430e48bcafce9cd8765ae6411d5a3ff2c8827ca2fb9ec63cfd0c84c2e1cc76d2fb0a3f9619034adc3d0fa60b729fa3352433a1f4f2c7bbb51fc61673b61833f70d8700446442d57e0a6fde600fc1cd0d659f8b6b6ca8e320395b831d2b79b95d006c2fc5afb72635535ce1e953d9e70a0022ac9091cb5810450d72edf9bff63c2b64933e0d69881b6ae9c9bc402b11bcd2ca24cea5171ce4040398ba42f87dec9791ac7376adce1cb47be22bc0ba083395b214bf88c5e81357eb95b461c1ea4c814357b5fa7dda0b7083a3360089af604f25927d738dfb3806a559285f04435a9245356051d27cbb0f5c020f40612a4ed7e2d124a8d6dc7d2e39127f5d66bbcad0bd8af4cf173b89283d09c610ec53ffb5b2e0aaedad8700de5555decda90d9b1f022ec0f1cabe3627ea89bb80420d73a60f7f33886541626e5aa0cb758bc9775a80c2427bd9fd373ce1492f90e93d0cd063f1233975d5ae4d732970c686b21850cd4146e5775af2a3b48f6920f021bc91e59b79200bacd2ebef0e2045ae01d7287f14a3ef08de813492f92c6034a7ef9b7a74660125a9761379d3495eb8cbd5e0461a0ae90ac7bb4dd6ee57902158dab0fbe495663b5536ac17e444ea5d6a5ec67f2145d5ac6eb033fe3ed88023133d514073957b3ab9506b375b52ab4e25df97c81f210d2a5d0ac2fd625c00522ee9cecb7dc0c84a47eef9338472bfa766f0bf5919be6a5b56bf5b84f1be1058d570d7ea8f372c5c253c189f006b314d377610bbc9a41fc162b3df3d860d904c74d0451fccecdcb8c0fdb66f55f10a955f49406f16c6ce397b78af25dcbfa001a1cc63df1dfcd2918dba5e64532af7a24f3a95c722815ad2192f488fe8da9080f8c295fdf955dbfe98666d411605e11598745385d7b639d8aed5b5499ffd007143ff548f1f2956da85253ed716d16f7ed1ba3ed100426e2a81dfa2bdd952f06997389359aef4673cff1fcf634c4261c3f8a028c25712896381ef8e88b53ce0996cd93d9bfc6fa1a578554d1b0767962bbfb88f553d5bb129cf18ed93685b50d60d8e13ef8c06f14e7f4fb212ac28be059f83bd3c375220c4368d405ecc9f601a31d48f081ab49014d562c39b464f850af6679daffddb75935f4bdf2d8735a013df11848f92dca8088339595f99024bb766c19e863175c0234157738925e4c0f4b5a83686667e9711547b3a2a96946fc126a026cdfc477de41e6c85835dad80a1a370f59950b9c9759595425609d6371d41801098202cb87ff96fcfb0247730bb2178497eeb94f794d151fd5393082ec7a0359b409b7508303493f749723780159badacf201cb6bf41691ba3ed894dc4a3b22a3a829ff13a349256379e65b108b53c62247b27176ae5d22295393e1856372f1a89fa7d364173647ba296b76c04950e768eec5f38634e8cf3beff55bd7ce266b5bebe5b854a02cad0307b0b670433bdbf8bc2631430773465cb091c7a666709285816e6d503acd5e649045d3c07baad6c71df6b89a0481a1fe7f45f6aa8837625ccb43ed5f9e8e9f7daf2cdee0e7f1baa61b625d9f5d0c9ee49605d403afa625927e9bf41d7e5d48b454dc1520b19e1c35dd25fc5dff641ea05bc2b6b5697485f96bd3664f90077c567923d4f0404107251310935d78e2d06471962f23277b5207500917d528aeef43e2b670c03e614bd3ee3fd58b486a3d0a2494916785325e6546dec8fb880cc401319a7f90b4d3832853ea6e0ae698543e20975eaa9c6606068f2465bade18d110a6937e199229fc569e4dbb09f253fdc89279b76b70f3bf61d6808d7fa8ff438c795464101a97d68d18f240c9d7137f2db1d38013ba94cf478338fa0353b2a5cbf937f00bb", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 108355184, 453559, 345307, 69706, 332083, 151545, 53651, 68733, 30929, 13866, 1671, 5475, 5395, 14903, 2040, 5705\n ], \n \"k_image\": \"f64056280ede74b3b1fe275cf9b9aa1feda77b3b5fd5218d6a765384e3d180ff\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 83051196, 15540196, 8932421, 12092, 738830, 1064475, 338093, 29376, 69424, 72045, 21713, 31314, 406, 1749, 927, 6852\n ], \n \"k_image\": \"4ffd1487bf46e5a1929ca0dd48077cb8ddbff923e74517f1aeb7c54317c0fd68\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"bbb37ea2c935e3c7245150d154748b59bdf280d557779b6cf8063165a7d9b5b9\", \n \"view_tag\": \"21\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"ffc23770cf9e4536c0db95978dbc937d1de339cb8dedf909f5adf7ce6fb8c4ea\", \n \"view_tag\": \"5b\"\n }\n }\n }\n ], \n \"extra\": [ 1, 211, 182, 90, 146, 203, 208, 69, 151, 204, 79, 61, 166, 160, 3, 202, 83, 9, 175, 52, 63, 45, 15, 25, 1, 2, 253, 215, 237, 19, 229, 185, 238, 2, 9, 1, 217, 121, 146, 12, 250, 112, 198, 239\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 44420000, \n \"ecdhInfo\": [ {\n \"amount\": \"e6d4eb51a032a151\"\n }, {\n \"amount\": \"1050389f0d8eafbe\"\n }], \n \"outPk\": [ \"5f91af7e37907d37de11bae31af65af35e1fabe13cece1d503a9987d7f4813c8\", \"ad2b6ef2b5e77281113637ace74d9ac41f16f431a1a49e6d8de1b73fe877c5c3\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"fcaac875ca31629041923a1bb014f86a232227c41e1611f5961a0e095c6201ea\", \n \"A1\": \"34a7b7367b0045c3a841d57ebac2a9b323ab21f6d954f4441fc79fd98414e5c4\", \n \"B\": \"acdbff571108da31d0face012eb149b24bc16de858b4cb8d96fdc0bb614cc289\", \n \"r1\": \"5a6859cdeb086e647983308714da41be9ada21abfbec1ed4d224315017dacf01\", \n \"s1\": \"c5b2a59d18ac5c3b81c9bfd5c031b9929c1dc802a22593bdea39612039601c0e\", \n \"d1\": \"09f64702dde1507e3daef5655b0f1f32e19fcbcbaeea6b495fed05543cbb6501\", \n \"L\": [ \"30de65cd66a314cfdbe7474a387045b3000dd43eedc021ed492075d314da6d8c\", \"6a3905275d41cdc8758e258c4a71a64d2ba1aec68b7ad68018aa8fdcf9753889\", \"8c61392ded8e0715ddd471638be54eda62622f5787cafc577da4ff7dda015789\", \"82328c51f59ad3d9218eb0d3201d1136d54e7567e15c3f8bc956772bee20f597\", \"6b0f343096ab4a0c2b68099bca4d61eff7a078c91875483213f4cd226b587b5c\", \"12bf7a41abc9079e274e6229187f4c3cc1a8579f60f2a8112aafa78eaefa765e\", \"d7588be97d471720979fa5b907c5b83be30d62d5a2b0b9a59f1330dface4cddd\"\n ], \n \"R\": [ \"f591829caac227efef5fe5076e3fa93dc9a787be8f57c3d2ec216342784321c8\", \"0b956f44dec2d484500371f9a4fdad1de571f16d2cccca13f2f3bb65718dc4a8\", \"61276d08d11bc72536b787537aa0b26d68462500baa1b5b47a1ff669346481ac\", \"5c0d2199d6197dfc9c74cdd6adf13e06223af430e48bcafce9cd8765ae6411d5\", \"a3ff2c8827ca2fb9ec63cfd0c84c2e1cc76d2fb0a3f9619034adc3d0fa60b729\", \"fa3352433a1f4f2c7bbb51fc61673b61833f70d8700446442d57e0a6fde600fc\", \"1cd0d659f8b6b6ca8e320395b831d2b79b95d006c2fc5afb72635535ce1e953d\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"9e70a0022ac9091cb5810450d72edf9bff63c2b64933e0d69881b6ae9c9bc402\", \"b11bcd2ca24cea5171ce4040398ba42f87dec9791ac7376adce1cb47be22bc0b\", \"a083395b214bf88c5e81357eb95b461c1ea4c814357b5fa7dda0b7083a336008\", \"9af604f25927d738dfb3806a559285f04435a9245356051d27cbb0f5c020f406\", \"12a4ed7e2d124a8d6dc7d2e39127f5d66bbcad0bd8af4cf173b89283d09c610e\", \"c53ffb5b2e0aaedad8700de5555decda90d9b1f022ec0f1cabe3627ea89bb804\", \"20d73a60f7f33886541626e5aa0cb758bc9775a80c2427bd9fd373ce1492f90e\", \"93d0cd063f1233975d5ae4d732970c686b21850cd4146e5775af2a3b48f6920f\", \"021bc91e59b79200bacd2ebef0e2045ae01d7287f14a3ef08de813492f92c603\", \"4a7ef9b7a74660125a9761379d3495eb8cbd5e0461a0ae90ac7bb4dd6ee57902\", \"158dab0fbe495663b5536ac17e444ea5d6a5ec67f2145d5ac6eb033fe3ed8802\", \"3133d514073957b3ab9506b375b52ab4e25df97c81f210d2a5d0ac2fd625c005\", \"22ee9cecb7dc0c84a47eef9338472bfa766f0bf5919be6a5b56bf5b84f1be105\", \"8d570d7ea8f372c5c253c189f006b314d377610bbc9a41fc162b3df3d860d904\", \"c74d0451fccecdcb8c0fdb66f55f10a955f49406f16c6ce397b78af25dcbfa00\", \"1a1cc63df1dfcd2918dba5e64532af7a24f3a95c722815ad2192f488fe8da908\"], \n \"c1\": \"0f8c295fdf955dbfe98666d411605e11598745385d7b639d8aed5b5499ffd007\", \n \"D\": \"143ff548f1f2956da85253ed716d16f7ed1ba3ed100426e2a81dfa2bdd952f06\"\n }, {\n \"s\": [ \"997389359aef4673cff1fcf634c4261c3f8a028c25712896381ef8e88b53ce09\", \"96cd93d9bfc6fa1a578554d1b0767962bbfb88f553d5bb129cf18ed93685b50d\", \"60d8e13ef8c06f14e7f4fb212ac28be059f83bd3c375220c4368d405ecc9f601\", \"a31d48f081ab49014d562c39b464f850af6679daffddb75935f4bdf2d8735a01\", \"3df11848f92dca8088339595f99024bb766c19e863175c0234157738925e4c0f\", \"4b5a83686667e9711547b3a2a96946fc126a026cdfc477de41e6c85835dad80a\", \"1a370f59950b9c9759595425609d6371d41801098202cb87ff96fcfb0247730b\", \"b2178497eeb94f794d151fd5393082ec7a0359b409b7508303493f7497237801\", \"59badacf201cb6bf41691ba3ed894dc4a3b22a3a829ff13a349256379e65b108\", \"b53c62247b27176ae5d22295393e1856372f1a89fa7d364173647ba296b76c04\", \"950e768eec5f38634e8cf3beff55bd7ce266b5bebe5b854a02cad0307b0b6704\", \"33bdbf8bc2631430773465cb091c7a666709285816e6d503acd5e649045d3c07\", \"baad6c71df6b89a0481a1fe7f45f6aa8837625ccb43ed5f9e8e9f7daf2cdee0e\", \"7f1baa61b625d9f5d0c9ee49605d403afa625927e9bf41d7e5d48b454dc1520b\", \"19e1c35dd25fc5dff641ea05bc2b6b5697485f96bd3664f90077c567923d4f04\", \"04107251310935d78e2d06471962f23277b5207500917d528aeef43e2b670c03\"], \n \"c1\": \"e614bd3ee3fd58b486a3d0a2494916785325e6546dec8fb880cc401319a7f90b\", \n \"D\": \"4d3832853ea6e0ae698543e20975eaa9c6606068f2465bade18d110a6937e199\"\n }], \n \"pseudoOuts\": [ \"229fc569e4dbb09f253fdc89279b76b70f3bf61d6808d7fa8ff438c795464101\", \"a97d68d18f240c9d7137f2db1d38013ba94cf478338fa0353b2a5cbf937f00bb\"]\n }\n}", + "weight": 2221 + },{ + "blob_size": 2348, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 56160000, + "id_hash": "9d1bcbdb17d24a4e615a9af7100da671ab34bffc808da978004dcef86ddf831e", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261653, + "max_used_block_height": 3195160, + "max_used_block_id_hash": "2f7b8ca3dbd64cb33f428ece414b2b1cef405cfcd85fab1a70383490cc7ed603", + "receive_time": 1721261653, + "relayed": true, + "tx_blob": "0200020200108df79b209386c61387df3decb508a9bb05acc3028d9902deaf01bf208d05ac06ac09b014fd05b501de0d45a88adb7fcac982f5f4d8367f84e0f205235f58ad997f5dfa4707192fd3d9e0020010ace4d528efdb8009c5f8ca01a6f859b4a204c6bc24e3c306d68a01b1e203d401cbc6038103cb61840ad204a40f368fbc77179fb30bf07073783f6ef08bfb1a8c096e9bd60bb57aead3b0f3663d03000300b4750401a83c37a01ad5e0a9404faacc1356e0b553d17f31e16e8b8570e86cea0003befd3d4f80d897085c68417d4af03a4c2520b5ff75c400d1d018ec0d4617e6770a00037b2e69e0187086e600aa2ee921c61ff6847ebf80512e5f86c0500a889da2e8205c210147aa7f30ddab6c818f008f074a4a20528d522fbed0be3f581a8f574e697876a00680dee31a0a24de88aa0fcb7e567bad3c08c3a7247b06d68e46efce9940d6036d4b2b7e03c383c26989ac5176cef29aa6592d688747a7bc1989d88311aaad63d60db25d64d04ed7d40ebbbd4b6adeff521ddc60c9b00594381c89b9d9e4afabcff8a906fe5120b70bd38328753d4cee997f92552087ba220aa427758f010c8c7c9eb2e43443be439372d50e3cad3141de924fa118bad635f8105a086eb741d4b609c9a7fca073984dd6b9561b8a5dd0bc1fefd32a839ba25fa34b2c3d5021cd4b2157a936ecd28cbd4ad243876c84a0c09447b0ecfcf216c7c9f7ae6ec8c58d694f185dc9c4d4115e5d8d58dbffec407c39cd455c5842410c92ff9c0107359836b8b462f81e20852f2b14ad81c2931a6bc41097d824d175310f15d7890a99ac8ab0cf3e9cf0048da193a3a7706c824dbc98d3be3c7aee69d296c519380d085c64adc25186867f568afbd0ab351aeaebda94ff0832ea71771c255ff165669495211b42b692e6f6f745365f45baa6af535b6bf4868eb9699d7fd7b5d8a00a4c35bb4a24827cfc8d6f77f56a907a20dc80437a3f9cd3a4a889ad30eae65bfe3481af3a7f398933896be7bb06bce96c64113c3da8732c4f28f0a83b12ebc7ec329627128e8f6332b0c89fc1f18850786f594a7ef566a2970a4c308d5a0a7649dd81890916da41de7d8f74349fdea603f529d6aa77747ef2bda70d02688b4f121399f25ddbed7f3d2fe2cb136daa069b3548c9f0736057b68e249165662f8782f6cdbfe3e6a9eee2623dcb25eb492ad6ad825ae9917453c2772643e8bcb44b8122085661c1ea7185fdabc273bd62dffd938e1bee1b95c6a5940900331254bdb7f0361d66af8aa68615444bc98631b9cbee7ad6fb2c64fa92e42b472b7098acf2564206e152560f919105aee65e4116fd730f6201e639b585a0886262aef0e65db50b3413e399527b6a031081a272c0bdc89516b55107d186038fd8b2641690dbed6148e3084de6599e25bda1c719290dc31742b6863d3fcc57980ccb808215dd17a4b949f725231f2c29eabf7487e7c93bdf92cc5786c5614e77b2d2d10d9b9ccd475a2bf8992c52219480706e5676b40c203ee6799bca4636aeb7011875eb13dc2da7e6a777c7ccf246a19195e7aa4d7a617f0e5b7797a13228abfa6d288799b17702e263aa90ab2883591877afd734279ccf3442e40437401e4a6875b6603bb10db2503ff62c27a6ee89ba54efff4a15c149aae234cbdd165b6acd87f8f68d0408c3357e22b6ac83ea40186091d41ee82a467e6eedfb81065e1a6261493572820bb3a02732c48afe03a04344e108a16c6ed9b57f8bc70c0373c14e02aa58c39d081b9d63df16411a24065419e149dd5cf0d16c361d52077e372668658217ae0008052f944bf07460dc2650b271abbd3470f9d9f1151915b47f4e843c2cb66e8104b954b9b11711f668d700a7e8e73bba0e1f3325c7ee168c2e52fbbc42a9a37c0a5acfbd247117d89d3dec678cd2f1e5a358adba45cd96eb83bdefa574fe7ee70462e723e878f558b4ff133353015d12039d88ff4e4a42542274a29eec9d48f606f819b255f796a383db4db031d966f0512c0be65bcb570be40c5afeb301ced60bbd96cb11101317b582c5a4938f4b42aa4e4764923917fe03a849783821052701b601b95768bf304145239a0dcad54850a49431706b58139f92ab1aff4083ce021da8fbb5e19ef43bdcb40a61b67d615fdcc7d1ef9937efe0f64c65a6fc7d3a0f656bc19201f87d279c7dec96160b8cd0001f5064609b3561259728e19b5da20e57181f2d5e36ab9edc5fe93b71fde30f262d4e60525df697dcd66d45f079da0e5d95974c8bff7e89d06a64c25714eaa1b65522218c996c7ddb6cf902519f340524e084e6b195f994bc9be4cbfd8e6e9c5a1e00551148e1f8b6c154b2115b1f088c5ab9747f5d6f49a819f0eb06a4277090292c3b14f219196d91a3d8333b11278b5ca04440eb42f46cca567d8a2c3e9fd2880078544e47a6e932a828b494a7056038560559de1bdefeda1e9da672b6e931f22d530a7dd7ff8586184b5d593d0ebed9c5263818225fb9db1e64cbfc0174f872a3ea5e92f314c87b96daa8e7a400742ea90a1c357194d610b998a0c07dbad2bd2541158439be096b8f59ad7dcc073345072ecdb181af27eecaa21642d23abca25c1cd6a45daec5d423c8c002f60a99e4218d23ae7bd8055f9d473a8de7a36d5c00c64eea4be99c751655ab904604992a602c49416d56179835e3c6c5d0e89aca53a372dc61cfd9d105c7c0bdeb02765e41608cec6275ba3b7c19f2dcecd8cf7c02eed764b953066535c9395ba50ea8e8223b185992e646d89d4ab71009b3244dd2c2949d98557b21ad8cd2f03d0f140613a8e1366fe9d73e2282e471274b7304b94dc5a99a84411c5504b86176042351e695642aa3608fb3c413e7221b153d2ba3c0f8adca0d9636b3f95922ed040c19b92c35505041a39b8c9492c08577c9689bebbf1955827ab7298779d9d50b861f800ae0baa6761569a92e7b74d2d9dde4d8c98ffb37737edba691f4c94a0c78ef2c281ffe9c1d7f290cc7c60efd992f2b436c73b04f920984a1e8b8256a053d938f9f385b823aeee4f760e7c4346a3be6921700c433664d6806deaa3a6c039a1dc227d0fb1d07b26156b4b998053b2d07e037c024eb305212852edea364010936723eed3db539fd792c5acfe56bf2444998a107cff83eb7e37504b05c990f78111416918e784305b77030f0e2a14124afce9bf4d285c09a77264b2cf85e7fe99253575d3a7dbca07223afc25c085bc2e45aea9b399d58339b4ac1edfcd5e9ef14d6f64ab94d4f94eca4765dfabac9c91a423283387f51301a31d35de5b61d", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 67566477, 40993555, 1011591, 137964, 89513, 41388, 35981, 22494, 4159, 653, 812, 1196, 2608, 765, 181, 1758\n ], \n \"k_image\": \"45a88adb7fcac982f5f4d8367f84e0f205235f58ad997f5dfa4707192fd3d9e0\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 85291564, 18886127, 3324997, 1473574, 69940, 597574, 106979, 17750, 61745, 212, 58187, 385, 12491, 1284, 594, 1956\n ], \n \"k_image\": \"368fbc77179fb30bf07073783f6ef08bfb1a8c096e9bd60bb57aead3b0f3663d\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"00b4750401a83c37a01ad5e0a9404faacc1356e0b553d17f31e16e8b8570e86c\", \n \"view_tag\": \"ea\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"befd3d4f80d897085c68417d4af03a4c2520b5ff75c400d1d018ec0d4617e677\", \n \"view_tag\": \"0a\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"7b2e69e0187086e600aa2ee921c61ff6847ebf80512e5f86c0500a889da2e820\", \n \"view_tag\": \"5c\"\n }\n }\n }\n ], \n \"extra\": [ 1, 71, 170, 127, 48, 221, 171, 108, 129, 143, 0, 143, 7, 74, 74, 32, 82, 141, 82, 47, 190, 208, 190, 63, 88, 26, 143, 87, 78, 105, 120, 118, 160\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 56160000, \n \"ecdhInfo\": [ {\n \"amount\": \"0a24de88aa0fcb7e\"\n }, {\n \"amount\": \"567bad3c08c3a724\"\n }, {\n \"amount\": \"7b06d68e46efce99\"\n }], \n \"outPk\": [ \"40d6036d4b2b7e03c383c26989ac5176cef29aa6592d688747a7bc1989d88311\", \"aaad63d60db25d64d04ed7d40ebbbd4b6adeff521ddc60c9b00594381c89b9d9\", \"e4afabcff8a906fe5120b70bd38328753d4cee997f92552087ba220aa427758f\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"0c8c7c9eb2e43443be439372d50e3cad3141de924fa118bad635f8105a086eb7\", \n \"A1\": \"41d4b609c9a7fca073984dd6b9561b8a5dd0bc1fefd32a839ba25fa34b2c3d50\", \n \"B\": \"21cd4b2157a936ecd28cbd4ad243876c84a0c09447b0ecfcf216c7c9f7ae6ec8\", \n \"r1\": \"c58d694f185dc9c4d4115e5d8d58dbffec407c39cd455c5842410c92ff9c0107\", \n \"s1\": \"359836b8b462f81e20852f2b14ad81c2931a6bc41097d824d175310f15d7890a\", \n \"d1\": \"99ac8ab0cf3e9cf0048da193a3a7706c824dbc98d3be3c7aee69d296c519380d\", \n \"L\": [ \"5c64adc25186867f568afbd0ab351aeaebda94ff0832ea71771c255ff1656694\", \"95211b42b692e6f6f745365f45baa6af535b6bf4868eb9699d7fd7b5d8a00a4c\", \"35bb4a24827cfc8d6f77f56a907a20dc80437a3f9cd3a4a889ad30eae65bfe34\", \"81af3a7f398933896be7bb06bce96c64113c3da8732c4f28f0a83b12ebc7ec32\", \"9627128e8f6332b0c89fc1f18850786f594a7ef566a2970a4c308d5a0a7649dd\", \"81890916da41de7d8f74349fdea603f529d6aa77747ef2bda70d02688b4f1213\", \"99f25ddbed7f3d2fe2cb136daa069b3548c9f0736057b68e249165662f8782f6\", \"cdbfe3e6a9eee2623dcb25eb492ad6ad825ae9917453c2772643e8bcb44b8122\"\n ], \n \"R\": [ \"5661c1ea7185fdabc273bd62dffd938e1bee1b95c6a5940900331254bdb7f036\", \"1d66af8aa68615444bc98631b9cbee7ad6fb2c64fa92e42b472b7098acf25642\", \"06e152560f919105aee65e4116fd730f6201e639b585a0886262aef0e65db50b\", \"3413e399527b6a031081a272c0bdc89516b55107d186038fd8b2641690dbed61\", \"48e3084de6599e25bda1c719290dc31742b6863d3fcc57980ccb808215dd17a4\", \"b949f725231f2c29eabf7487e7c93bdf92cc5786c5614e77b2d2d10d9b9ccd47\", \"5a2bf8992c52219480706e5676b40c203ee6799bca4636aeb7011875eb13dc2d\", \"a7e6a777c7ccf246a19195e7aa4d7a617f0e5b7797a13228abfa6d288799b177\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"02e263aa90ab2883591877afd734279ccf3442e40437401e4a6875b6603bb10d\", \"b2503ff62c27a6ee89ba54efff4a15c149aae234cbdd165b6acd87f8f68d0408\", \"c3357e22b6ac83ea40186091d41ee82a467e6eedfb81065e1a6261493572820b\", \"b3a02732c48afe03a04344e108a16c6ed9b57f8bc70c0373c14e02aa58c39d08\", \"1b9d63df16411a24065419e149dd5cf0d16c361d52077e372668658217ae0008\", \"052f944bf07460dc2650b271abbd3470f9d9f1151915b47f4e843c2cb66e8104\", \"b954b9b11711f668d700a7e8e73bba0e1f3325c7ee168c2e52fbbc42a9a37c0a\", \"5acfbd247117d89d3dec678cd2f1e5a358adba45cd96eb83bdefa574fe7ee704\", \"62e723e878f558b4ff133353015d12039d88ff4e4a42542274a29eec9d48f606\", \"f819b255f796a383db4db031d966f0512c0be65bcb570be40c5afeb301ced60b\", \"bd96cb11101317b582c5a4938f4b42aa4e4764923917fe03a849783821052701\", \"b601b95768bf304145239a0dcad54850a49431706b58139f92ab1aff4083ce02\", \"1da8fbb5e19ef43bdcb40a61b67d615fdcc7d1ef9937efe0f64c65a6fc7d3a0f\", \"656bc19201f87d279c7dec96160b8cd0001f5064609b3561259728e19b5da20e\", \"57181f2d5e36ab9edc5fe93b71fde30f262d4e60525df697dcd66d45f079da0e\", \"5d95974c8bff7e89d06a64c25714eaa1b65522218c996c7ddb6cf902519f3405\"], \n \"c1\": \"24e084e6b195f994bc9be4cbfd8e6e9c5a1e00551148e1f8b6c154b2115b1f08\", \n \"D\": \"8c5ab9747f5d6f49a819f0eb06a4277090292c3b14f219196d91a3d8333b1127\"\n }, {\n \"s\": [ \"8b5ca04440eb42f46cca567d8a2c3e9fd2880078544e47a6e932a828b494a705\", \"6038560559de1bdefeda1e9da672b6e931f22d530a7dd7ff8586184b5d593d0e\", \"bed9c5263818225fb9db1e64cbfc0174f872a3ea5e92f314c87b96daa8e7a400\", \"742ea90a1c357194d610b998a0c07dbad2bd2541158439be096b8f59ad7dcc07\", \"3345072ecdb181af27eecaa21642d23abca25c1cd6a45daec5d423c8c002f60a\", \"99e4218d23ae7bd8055f9d473a8de7a36d5c00c64eea4be99c751655ab904604\", \"992a602c49416d56179835e3c6c5d0e89aca53a372dc61cfd9d105c7c0bdeb02\", \"765e41608cec6275ba3b7c19f2dcecd8cf7c02eed764b953066535c9395ba50e\", \"a8e8223b185992e646d89d4ab71009b3244dd2c2949d98557b21ad8cd2f03d0f\", \"140613a8e1366fe9d73e2282e471274b7304b94dc5a99a84411c5504b8617604\", \"2351e695642aa3608fb3c413e7221b153d2ba3c0f8adca0d9636b3f95922ed04\", \"0c19b92c35505041a39b8c9492c08577c9689bebbf1955827ab7298779d9d50b\", \"861f800ae0baa6761569a92e7b74d2d9dde4d8c98ffb37737edba691f4c94a0c\", \"78ef2c281ffe9c1d7f290cc7c60efd992f2b436c73b04f920984a1e8b8256a05\", \"3d938f9f385b823aeee4f760e7c4346a3be6921700c433664d6806deaa3a6c03\", \"9a1dc227d0fb1d07b26156b4b998053b2d07e037c024eb305212852edea36401\"], \n \"c1\": \"0936723eed3db539fd792c5acfe56bf2444998a107cff83eb7e37504b05c990f\", \n \"D\": \"78111416918e784305b77030f0e2a14124afce9bf4d285c09a77264b2cf85e7f\"\n }], \n \"pseudoOuts\": [ \"e99253575d3a7dbca07223afc25c085bc2e45aea9b399d58339b4ac1edfcd5e9\", \"ef14d6f64ab94d4f94eca4765dfabac9c91a423283387f51301a31d35de5b61d\"]\n }\n}", + "weight": 2808 + },{ + "blob_size": 2387, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 116500000, + "id_hash": "dbabb82a5f97d9da58d587c179b5861576144ea0cd14b96bef62b83d8838f363", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261653, + "max_used_block_height": 3195160, + "max_used_block_id_hash": "2f7b8ca3dbd64cb33f428ece414b2b1cef405cfcd85fab1a70383490cc7ed603", + "receive_time": 1721261653, + "relayed": true, + "tx_blob": "020001020010df97d533d1c51185e031f69e08adce05b0aa06fb8a03b137895afb14c08e02b0549c1af60da305cb1f6d80d9c12f1439b0a994f767d71d98d2d2cde1a54c6a6134a00c2f07135d98cf0b0003e205de62fdbf4ff1b6554e3180bfd09051e368d70a5739bc039d864eea9d355da60003dacd0606c42ee249c304393e3169de7baacc28a9ff94240b603c54b0238d542d11000324dffd0f9dfb98b459a463f1c62df0d5ec76ad3781c41d5a65bdce94dc43aa5e5b0003631aff9f6291c9890f02a667c976b7e13bce27d443d00bda47d72d32ca2eacff40000360ba12b274ebbfe8f74abfe132a02a4a91e96f2f5db3b5772e5bab2e1066015959000348e42fd446643b5ab32024d193050ad976173b0ce43cc33add718167b0d99b33ac000356a51fe30876666e834c9741ee4e4fcbd08372005319f58cdc1a46bd98db5dec4b0003421637f078091694b19f1d2d3691a2ba8fe38198a808d0bd38f4202270d5b47dbb0003b42ff611a9b780b8e4afc2958d059013c55512102b3f6d7b6f30f4da75c5bbbb340003c3253ebf1a9125a52b9a7d4fca7d29cc41c701aefd7b8d5caa6ca02a128a0433cf00030fbe7d06b657e84ffa29bcb937a649a647cc1e967fdeb3a1c7a00f2e72faddbbaf21016b391fa654bc1e4141eec1d8a55bfbe363d9632b56dd4c7393b7b1f8934932e506a0ccc6376dad44ee6f7e9686a70f600a18341bd60dcbe99a71bc8a05eb8f5ad01274c1590bb0e0848ccd0872e311b24115067be37fa3244c7785ce295d3eaff88e7bd50450bcfe9aebd902dbf3fd4d3d5f5deaa8f5517e89834007556c99f923978cb868d155d761085d03d11684d6d75b79e461da935fe8b8ccff02aaba98f24626e21abd6b7cee23ff35c800e99b7b8719b7adcf2987f61a51c7e81728cfab8ac5ec5fa22a10dbd58ae64976b188860a88ea9a7ea3ce49cc5460168c355de58788139c74011a95e49b0d91e6e033be4f212573cae4ae8b77c7f59e727f4ff9ef5045de7a733c53eb98c57bf4c643a1183da886f66d7984e4559192c1de69533b7777e5da4290431063f42b9f900f82564deffb967e214eb22c6835fda6a27569c7211b2ccf9602046049c5b72ed13a70193cec07dc1336b609c575b4dd58693376a102246f13508d44c88dec158b3a9b263bf8557818979f7a833f758602ffc62037d78994082ea998cd1b4944ebed5a3cba2f3e19dcc7f9bfc96901a60c2293b2949b0e9d3b84260d2f6085c628ba1a0fbfaab1611bdad0ec39ba599342f1de56d2beb0930aea3ef3f446ebf52bf5c0cc89566a68ca68ad05f9cb012acf619b5da2f005a99b98d6ab1244a4458435fdd4ba5aabab8884eede69ee25d063c5e054a0763bd8e6cad5859a1f54d5d280af664cd2b3ab6a8ba944e2858bc9979f63b12e75b3a647fcb31e8796502947ada7d65526c74f70e2ff4820e599a03200a8bc6475cde069c7e90a9fda27d204a8c01c10706f1fec6c791d6e6d05707f92d85119d8b54b5818e894ddc4e5ce45612e601094aca90fac825b8f13060b9cf42dc96310520835ddff845dd782508fd8b73d99613d425323b206d3ae060a6ae9c2bf3c383f11392df4b0cdb537c62e618cd97de259135abc4de9fc3fa1e9d64eaa642cdd7d6a6e3f2208ed76771b2be4a2416e53c15f1e3bd7eee3ec3e92f6ceb61873e03d5ac5f2bf583fb5f86453310e232a2620a6bfa4cfb4981b533b8625b08e0938291ba52a9850629e3097bfef1b3764c73643ade61a8d425d3f2da30df56d9fd7c736efe223b158ed49114e1eb8e616a2ef47d673c8ed52b2d94c5c25ebab35e7e0512aa8d9a48c01ece39a0dd29125cafa904e1a33cca56d64ea06551895fa1f1691674216e56eca2ac8ba84da58eac436746fe39e028815dd812f19d8236dadccbc23378bece2a78a8cf6ce42a40d7e97ac095f2fbe3c769eb5674359a42c69eba31b8952c54fcc90ef3f141acb32d9f4f7d88c0d416d541aa2300560059f8f8ed209d935b3e26185f387ebc5f0ac4a279b661a97f13da023010afcc6f7d3542ee9511acf4ff6782d5f516ebcb08f76ef089cfbca6012001893bb8ef584c963ec50d1249f063d3a138a836e98fd3c845b15d97e819e60fc8b7298bb52ac2e31690570e777e3a18b0d2d30e9219d4c53ad018c05f9150963045dfacd472e1d89929f4df50429794c4214bd02cf7ef7a1dbac9bd7d882748c36ade1c99a664e1ebf3b88c9f2af6b0facadce56c11ee6e7f9bb642d612952642a2e584bc487bae947cac5301fb28e8635b282d5aeb999ac21fe3060a351649c00ed98646e663edab73c1d545771f1e85919b2d89ba44149a086e188ecaec21f5ecebe04198c506cae6da01a2413e414d2aaa0309f16c8afb17b146577f41fc383957d8ab02506d67795f41c6709c5cba79a14fa32842c4e38e9d97b0c69500f9dffcf78e6aa8232f46e7f6b2e402d0a5ee548a0e503be9b789a08803e0970f2d745249fbc34dc464b3ae4d5ba8b98946e3201fb1d9185507aae2a77e129394c8b64071d658e5eff25510bd2becae400f2db0a865a4a6fd55fbe6ea016f1370eab2d0090eddd7545194e0b3fce7cab06c968a6bd17cc848f5d3d20a26f5dccd004e803839d366680832ed1290ed6469f0a75a5ef9a35c426381a9372d4f0cddd77370d506ec1d7573950192844f4f3fe1089077dc6faa84d6513eb75621f5ade8eff0bceba99fa0b988daa13fe0c7fd15d85f050a339d447581c8459db9833952e710ae6c97b50495765c8d034d821da26bb950c6d379b923ba2482ae700098579400f3337c5898f002ab7832e258e3ead08da9c71bcde6449a5cacd506b6977549b08ffcdd6d90ce4f0b06e36f0b1c61055211284292df52d88532ade8962e3679f0e313298b6a931e790e641d5176247e5dcfa715134624e4bca5dfd92259cacd907014bf9138373942890180ea291a80e2594d0c062d1826dce12d4167bb10cd30a90cd211a857b80f7c1887c1ce14aa8124ede4b989f3520cce7011978b955cf030c76902a5b22867f9c3d47e8523c9cfd8a4d97ad2592f1debea8c30a7856bf04a435b617fd6b6a78ee5bd65e354f1b6ca5a21fc66d399e4debb8c2a97acc07048967d4c0fa15746f102312f0fd1f8ea851ee133282cba0f1f439408f2742df0af040c7e3ec8e5a0deeb6e952316fa300d986c29986cfb6293b90f5dac8a9760e2c8178c818aa7a44292f3129578d6bead21638d57b020709f39379f3b579cc0ae6aa3044eb12eb2487b34119b5f3ed92fc020406eaa991cf0a51c60cb1b7789afa8610e6d02955b7063068ebc5b481d70f39e570e880cd8551eaa333670fb213", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 108350431, 287441, 815109, 135030, 91949, 103728, 50555, 7089, 11529, 2683, 34624, 10800, 3356, 1782, 675, 4043\n ], \n \"k_image\": \"6d80d9c12f1439b0a994f767d71d98d2d2cde1a54c6a6134a00c2f07135d98cf\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"e205de62fdbf4ff1b6554e3180bfd09051e368d70a5739bc039d864eea9d355d\", \n \"view_tag\": \"a6\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"dacd0606c42ee249c304393e3169de7baacc28a9ff94240b603c54b0238d542d\", \n \"view_tag\": \"11\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"24dffd0f9dfb98b459a463f1c62df0d5ec76ad3781c41d5a65bdce94dc43aa5e\", \n \"view_tag\": \"5b\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"631aff9f6291c9890f02a667c976b7e13bce27d443d00bda47d72d32ca2eacff\", \n \"view_tag\": \"40\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"60ba12b274ebbfe8f74abfe132a02a4a91e96f2f5db3b5772e5bab2e10660159\", \n \"view_tag\": \"59\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"48e42fd446643b5ab32024d193050ad976173b0ce43cc33add718167b0d99b33\", \n \"view_tag\": \"ac\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"56a51fe30876666e834c9741ee4e4fcbd08372005319f58cdc1a46bd98db5dec\", \n \"view_tag\": \"4b\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"421637f078091694b19f1d2d3691a2ba8fe38198a808d0bd38f4202270d5b47d\", \n \"view_tag\": \"bb\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"b42ff611a9b780b8e4afc2958d059013c55512102b3f6d7b6f30f4da75c5bbbb\", \n \"view_tag\": \"34\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"c3253ebf1a9125a52b9a7d4fca7d29cc41c701aefd7b8d5caa6ca02a128a0433\", \n \"view_tag\": \"cf\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"0fbe7d06b657e84ffa29bcb937a649a647cc1e967fdeb3a1c7a00f2e72faddbb\", \n \"view_tag\": \"af\"\n }\n }\n }\n ], \n \"extra\": [ 1, 107, 57, 31, 166, 84, 188, 30, 65, 65, 238, 193, 216, 165, 91, 251, 227, 99, 217, 99, 43, 86, 221, 76, 115, 147, 183, 177, 248, 147, 73, 50, 229\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 116500000, \n \"ecdhInfo\": [ {\n \"amount\": \"6dad44ee6f7e9686\"\n }, {\n \"amount\": \"a70f600a18341bd6\"\n }, {\n \"amount\": \"0dcbe99a71bc8a05\"\n }, {\n \"amount\": \"eb8f5ad01274c159\"\n }, {\n \"amount\": \"0bb0e0848ccd0872\"\n }, {\n \"amount\": \"e311b24115067be3\"\n }, {\n \"amount\": \"7fa3244c7785ce29\"\n }, {\n \"amount\": \"5d3eaff88e7bd504\"\n }, {\n \"amount\": \"50bcfe9aebd902db\"\n }, {\n \"amount\": \"f3fd4d3d5f5deaa8\"\n }, {\n \"amount\": \"f5517e8983400755\"\n }], \n \"outPk\": [ \"6c99f923978cb868d155d761085d03d11684d6d75b79e461da935fe8b8ccff02\", \"aaba98f24626e21abd6b7cee23ff35c800e99b7b8719b7adcf2987f61a51c7e8\", \"1728cfab8ac5ec5fa22a10dbd58ae64976b188860a88ea9a7ea3ce49cc546016\", \"8c355de58788139c74011a95e49b0d91e6e033be4f212573cae4ae8b77c7f59e\", \"727f4ff9ef5045de7a733c53eb98c57bf4c643a1183da886f66d7984e4559192\", \"c1de69533b7777e5da4290431063f42b9f900f82564deffb967e214eb22c6835\", \"fda6a27569c7211b2ccf9602046049c5b72ed13a70193cec07dc1336b609c575\", \"b4dd58693376a102246f13508d44c88dec158b3a9b263bf8557818979f7a833f\", \"758602ffc62037d78994082ea998cd1b4944ebed5a3cba2f3e19dcc7f9bfc969\", \"01a60c2293b2949b0e9d3b84260d2f6085c628ba1a0fbfaab1611bdad0ec39ba\", \"599342f1de56d2beb0930aea3ef3f446ebf52bf5c0cc89566a68ca68ad05f9cb\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"2acf619b5da2f005a99b98d6ab1244a4458435fdd4ba5aabab8884eede69ee25\", \n \"A1\": \"d063c5e054a0763bd8e6cad5859a1f54d5d280af664cd2b3ab6a8ba944e2858b\", \n \"B\": \"c9979f63b12e75b3a647fcb31e8796502947ada7d65526c74f70e2ff4820e599\", \n \"r1\": \"a03200a8bc6475cde069c7e90a9fda27d204a8c01c10706f1fec6c791d6e6d05\", \n \"s1\": \"707f92d85119d8b54b5818e894ddc4e5ce45612e601094aca90fac825b8f1306\", \n \"d1\": \"0b9cf42dc96310520835ddff845dd782508fd8b73d99613d425323b206d3ae06\", \n \"L\": [ \"6ae9c2bf3c383f11392df4b0cdb537c62e618cd97de259135abc4de9fc3fa1e9\", \"d64eaa642cdd7d6a6e3f2208ed76771b2be4a2416e53c15f1e3bd7eee3ec3e92\", \"f6ceb61873e03d5ac5f2bf583fb5f86453310e232a2620a6bfa4cfb4981b533b\", \"8625b08e0938291ba52a9850629e3097bfef1b3764c73643ade61a8d425d3f2d\", \"a30df56d9fd7c736efe223b158ed49114e1eb8e616a2ef47d673c8ed52b2d94c\", \"5c25ebab35e7e0512aa8d9a48c01ece39a0dd29125cafa904e1a33cca56d64ea\", \"06551895fa1f1691674216e56eca2ac8ba84da58eac436746fe39e028815dd81\", \"2f19d8236dadccbc23378bece2a78a8cf6ce42a40d7e97ac095f2fbe3c769eb5\", \"674359a42c69eba31b8952c54fcc90ef3f141acb32d9f4f7d88c0d416d541aa2\", \"300560059f8f8ed209d935b3e26185f387ebc5f0ac4a279b661a97f13da02301\"\n ], \n \"R\": [ \"fcc6f7d3542ee9511acf4ff6782d5f516ebcb08f76ef089cfbca6012001893bb\", \"8ef584c963ec50d1249f063d3a138a836e98fd3c845b15d97e819e60fc8b7298\", \"bb52ac2e31690570e777e3a18b0d2d30e9219d4c53ad018c05f9150963045dfa\", \"cd472e1d89929f4df50429794c4214bd02cf7ef7a1dbac9bd7d882748c36ade1\", \"c99a664e1ebf3b88c9f2af6b0facadce56c11ee6e7f9bb642d612952642a2e58\", \"4bc487bae947cac5301fb28e8635b282d5aeb999ac21fe3060a351649c00ed98\", \"646e663edab73c1d545771f1e85919b2d89ba44149a086e188ecaec21f5ecebe\", \"04198c506cae6da01a2413e414d2aaa0309f16c8afb17b146577f41fc383957d\", \"8ab02506d67795f41c6709c5cba79a14fa32842c4e38e9d97b0c69500f9dffcf\", \"78e6aa8232f46e7f6b2e402d0a5ee548a0e503be9b789a08803e0970f2d74524\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"9fbc34dc464b3ae4d5ba8b98946e3201fb1d9185507aae2a77e129394c8b6407\", \"1d658e5eff25510bd2becae400f2db0a865a4a6fd55fbe6ea016f1370eab2d00\", \"90eddd7545194e0b3fce7cab06c968a6bd17cc848f5d3d20a26f5dccd004e803\", \"839d366680832ed1290ed6469f0a75a5ef9a35c426381a9372d4f0cddd77370d\", \"506ec1d7573950192844f4f3fe1089077dc6faa84d6513eb75621f5ade8eff0b\", \"ceba99fa0b988daa13fe0c7fd15d85f050a339d447581c8459db9833952e710a\", \"e6c97b50495765c8d034d821da26bb950c6d379b923ba2482ae700098579400f\", \"3337c5898f002ab7832e258e3ead08da9c71bcde6449a5cacd506b6977549b08\", \"ffcdd6d90ce4f0b06e36f0b1c61055211284292df52d88532ade8962e3679f0e\", \"313298b6a931e790e641d5176247e5dcfa715134624e4bca5dfd92259cacd907\", \"014bf9138373942890180ea291a80e2594d0c062d1826dce12d4167bb10cd30a\", \"90cd211a857b80f7c1887c1ce14aa8124ede4b989f3520cce7011978b955cf03\", \"0c76902a5b22867f9c3d47e8523c9cfd8a4d97ad2592f1debea8c30a7856bf04\", \"a435b617fd6b6a78ee5bd65e354f1b6ca5a21fc66d399e4debb8c2a97acc0704\", \"8967d4c0fa15746f102312f0fd1f8ea851ee133282cba0f1f439408f2742df0a\", \"f040c7e3ec8e5a0deeb6e952316fa300d986c29986cfb6293b90f5dac8a9760e\"], \n \"c1\": \"2c8178c818aa7a44292f3129578d6bead21638d57b020709f39379f3b579cc0a\", \n \"D\": \"e6aa3044eb12eb2487b34119b5f3ed92fc020406eaa991cf0a51c60cb1b7789a\"\n }], \n \"pseudoOuts\": [ \"fa8610e6d02955b7063068ebc5b481d70f39e570e880cd8551eaa333670fb213\"]\n }\n}", + "weight": 5817 + },{ + "blob_size": 1664, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 42480000, + "id_hash": "3fd963b931b1ac20e3709ba0249143fe8cff4856200055336ba9330970e6306a", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261660, + "max_used_block_height": 3195160, + "max_used_block_id_hash": "2f7b8ca3dbd64cb33f428ece414b2b1cef405cfcd85fab1a70383490cc7ed603", + "receive_time": 1721261660, + "relayed": true, + "tx_blob": "020001020010cfc19c31d9c65acfdb1bcfb91dd6e00aa7c2b601e2b51489e01ab3cd038e99039aa705c0f806ba2ffc1da40ba002913f889441c829e62c741c27614cdbb6278555b768fbd583424e1bb45c65e43b030003ad696cce55ac392f061b7e0f1acccbd17cbdfc11d20805df2ee8efb087476ed59c000357483f9fa7379f9b72e6149fd2b319c4e60653a627d0107d73f5604159d597ebf000034360e4f9cfb1109148e0b8428c36fa62270e4aa68e6aabd768270363b53a195acc210173c64bf5cd3e9f401a3800c9624c45f8e9408163348c4e2c473b88f610874eff0680e3a01479bce24f3573dbafaae47cc85d8723551119c984e226cab7daa8193188e5b32952ea0a464812c5f1189db475fe78ad4a6351e7d335a79163d9bd1c7b680e62a0ef36e83b122965bf6042bef9cf535da9286ea586681c4bef225c49f8f54b6b1a92a4a2d53c699fd021e1d2cf9ae75729ba7cf244a0264cf901fd2a5b8b2a2016880735d531b55fa6e206cc0c0d5177c93d10e2f60fadde3bebebf7ff3558e9949869a8401f4174a6e2e6092074d44b2665008391bcef3be3b38dcbb6f45f57379ba40acdffb81ffc8a3d312c6b728eff17a8c8f15d49f1517aca615e2f8bc5257f7b7a27cd455c633a400b6ff63f58d7c66c1172e1e56ef30e4edf1f17a43bd06754bcc657391acef09cbc15d6b827e61cfa8f6e33a4f7ce0837c0edf5a192c739ca6c8da976af9a1fa2ca1649024639f6c26e09dd01489b0a08e64e75e0dd89bd999cdaf72f1334263b2daa2b27b593951c8ea7781d7f4479fa8b24c4e3e7d02de85eb0144aa0e26be30c4a283db27c5b488ee4895345f81a51b54741caf457eef595c9753aa58d82d45182325aea6d77198090e8e14fd413620fcb3a7750dcb87178431ed8b4b23f39fd893c1e500dce31de0bf1861540e212e1263d567951d7c73e3f082a9fd7086aba9b58fbbbf61783fda9d037afa96250fe234734c186422495f5d5db92690958e43e5fd5ec5e302537d3353950f5a8e2a65a13ea58137481ebc02768f12ba06d4c3a58cc3f2725567a544434acc7934c87eff281268d9495670dc73032cab96056aeeda57fb16e153074dbe5b490ee2608d4ee6684886fec29e4f03741b6cfeb7bf468eca760fdf20085df50309f6942585ded84a963d18448029d3990a43ad3ba730b70193b7195961375390863ef0e721f26495ea684c129a7bfe0b58837269c2f3b9ed5697b4e5f50205e1699086db0d89e441c287e7aff567135d73a6724f206a15909cf4505c99a1ece8d2d44197ec9c2d28ca55008ba1819f7092e694e0048502dd937674b825e8325ccf06d2257e50dccf6d450f07a040def2b0af67d7a9ca8d54179bf474b11f3e0342b92e6f0d5c2c5b01dd7769f3651c5a5b7e4af34428cfd9429ecf373cc409db223a3f81075b8d10b0e409336f9922b0acc60780ed8c7e6916eafa813e486f678133c7c287040847e31fae2ed636fc2830394ecfef0c93ae4a380394907018dfdcf9814083efcaee92919e11f36ec098917436902e5752d62d43e054178b9cb6a66e7d4087b234f9f3fff46cdec0fd531ab50b82ffbbcf22171250fd8ce8015e6ccb940022ee8c5d7a0d10186b416c51c3370b0300f54651accfbebd3ac3e4a22ebbf2409b5e7ebc40dae1be188c5208668d6216d1cdb0552541c27fc6b9f770aa6fa2e0f50eedef2110162cd67b027348beb3865d40b71dfcd4bb02caa5d85454868f8071d6fda35f3729b3fdf7041ae02302733ed00dafdbb91e81d304daa7a99e0350ed8967cea2637fddda63bb273ed406be7ee694bff1fb5a0cdbf792fe25da78d0be68bff0add3f8868ece66332a446415da39994ed247606a7951ccbe639006008a2796c33bb8d53f64de030b7618deac7e15fc6b53bea8f04b30d244007e6b805a5c6a79cfa5e75c89eb96b298fe5f3722c9ab5219b839a6a88999d288913d9025d07125c47cc210f19bf42dc7edd84d62fd4d6ebfba9d865c8909477d5aa98047b12e6430d55c9f19d9ee5fe6020cb0fd4604ff5999ef3e8904d6016f99334035c1a93efd452b75925209e0eabbefade7d92f8ed581135d87f791170b3dd7b024eb818dba8ccfb23e897c6edc986ac47cd95292156e50c06797f9d9e552e3e01ac75a709909f7650bb41125bda22a768d3673bb7a28cf418514fbde82c3cc7035c483ea223a5c017b42873aca5381b7d4926c47d6ac62e8289a4bfe1f72751049b04cbacc98c9796300170897711eab28348eb4c73fc31ba3fc82d0043a67d628e9f3dc440a410d0dbe2c8c3c46caf95165cc82f3716f56fd0a6c3b187e1f892", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 103227599, 1483609, 454095, 482511, 176214, 2990375, 334562, 438281, 59059, 52366, 86938, 113728, 6074, 3836, 1444, 288\n ], \n \"k_image\": \"913f889441c829e62c741c27614cdbb6278555b768fbd583424e1bb45c65e43b\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"ad696cce55ac392f061b7e0f1acccbd17cbdfc11d20805df2ee8efb087476ed5\", \n \"view_tag\": \"9c\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"57483f9fa7379f9b72e6149fd2b319c4e60653a627d0107d73f5604159d597eb\", \n \"view_tag\": \"f0\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"4360e4f9cfb1109148e0b8428c36fa62270e4aa68e6aabd768270363b53a195a\", \n \"view_tag\": \"cc\"\n }\n }\n }\n ], \n \"extra\": [ 1, 115, 198, 75, 245, 205, 62, 159, 64, 26, 56, 0, 201, 98, 76, 69, 248, 233, 64, 129, 99, 52, 140, 78, 44, 71, 59, 136, 246, 16, 135, 78, 255\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 42480000, \n \"ecdhInfo\": [ {\n \"amount\": \"79bce24f3573dbaf\"\n }, {\n \"amount\": \"aae47cc85d872355\"\n }, {\n \"amount\": \"1119c984e226cab7\"\n }], \n \"outPk\": [ \"daa8193188e5b32952ea0a464812c5f1189db475fe78ad4a6351e7d335a79163\", \"d9bd1c7b680e62a0ef36e83b122965bf6042bef9cf535da9286ea586681c4bef\", \"225c49f8f54b6b1a92a4a2d53c699fd021e1d2cf9ae75729ba7cf244a0264cf9\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"fd2a5b8b2a2016880735d531b55fa6e206cc0c0d5177c93d10e2f60fadde3beb\", \n \"A1\": \"ebf7ff3558e9949869a8401f4174a6e2e6092074d44b2665008391bcef3be3b3\", \n \"B\": \"8dcbb6f45f57379ba40acdffb81ffc8a3d312c6b728eff17a8c8f15d49f1517a\", \n \"r1\": \"ca615e2f8bc5257f7b7a27cd455c633a400b6ff63f58d7c66c1172e1e56ef30e\", \n \"s1\": \"4edf1f17a43bd06754bcc657391acef09cbc15d6b827e61cfa8f6e33a4f7ce08\", \n \"d1\": \"37c0edf5a192c739ca6c8da976af9a1fa2ca1649024639f6c26e09dd01489b0a\", \n \"L\": [ \"e64e75e0dd89bd999cdaf72f1334263b2daa2b27b593951c8ea7781d7f4479fa\", \"8b24c4e3e7d02de85eb0144aa0e26be30c4a283db27c5b488ee4895345f81a51\", \"b54741caf457eef595c9753aa58d82d45182325aea6d77198090e8e14fd41362\", \"0fcb3a7750dcb87178431ed8b4b23f39fd893c1e500dce31de0bf1861540e212\", \"e1263d567951d7c73e3f082a9fd7086aba9b58fbbbf61783fda9d037afa96250\", \"fe234734c186422495f5d5db92690958e43e5fd5ec5e302537d3353950f5a8e2\", \"a65a13ea58137481ebc02768f12ba06d4c3a58cc3f2725567a544434acc7934c\", \"87eff281268d9495670dc73032cab96056aeeda57fb16e153074dbe5b490ee26\"\n ], \n \"R\": [ \"d4ee6684886fec29e4f03741b6cfeb7bf468eca760fdf20085df50309f694258\", \"5ded84a963d18448029d3990a43ad3ba730b70193b7195961375390863ef0e72\", \"1f26495ea684c129a7bfe0b58837269c2f3b9ed5697b4e5f50205e1699086db0\", \"d89e441c287e7aff567135d73a6724f206a15909cf4505c99a1ece8d2d44197e\", \"c9c2d28ca55008ba1819f7092e694e0048502dd937674b825e8325ccf06d2257\", \"e50dccf6d450f07a040def2b0af67d7a9ca8d54179bf474b11f3e0342b92e6f0\", \"d5c2c5b01dd7769f3651c5a5b7e4af34428cfd9429ecf373cc409db223a3f810\", \"75b8d10b0e409336f9922b0acc60780ed8c7e6916eafa813e486f678133c7c28\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"7040847e31fae2ed636fc2830394ecfef0c93ae4a380394907018dfdcf981408\", \"3efcaee92919e11f36ec098917436902e5752d62d43e054178b9cb6a66e7d408\", \"7b234f9f3fff46cdec0fd531ab50b82ffbbcf22171250fd8ce8015e6ccb94002\", \"2ee8c5d7a0d10186b416c51c3370b0300f54651accfbebd3ac3e4a22ebbf2409\", \"b5e7ebc40dae1be188c5208668d6216d1cdb0552541c27fc6b9f770aa6fa2e0f\", \"50eedef2110162cd67b027348beb3865d40b71dfcd4bb02caa5d85454868f807\", \"1d6fda35f3729b3fdf7041ae02302733ed00dafdbb91e81d304daa7a99e0350e\", \"d8967cea2637fddda63bb273ed406be7ee694bff1fb5a0cdbf792fe25da78d0b\", \"e68bff0add3f8868ece66332a446415da39994ed247606a7951ccbe639006008\", \"a2796c33bb8d53f64de030b7618deac7e15fc6b53bea8f04b30d244007e6b805\", \"a5c6a79cfa5e75c89eb96b298fe5f3722c9ab5219b839a6a88999d288913d902\", \"5d07125c47cc210f19bf42dc7edd84d62fd4d6ebfba9d865c8909477d5aa9804\", \"7b12e6430d55c9f19d9ee5fe6020cb0fd4604ff5999ef3e8904d6016f9933403\", \"5c1a93efd452b75925209e0eabbefade7d92f8ed581135d87f791170b3dd7b02\", \"4eb818dba8ccfb23e897c6edc986ac47cd95292156e50c06797f9d9e552e3e01\", \"ac75a709909f7650bb41125bda22a768d3673bb7a28cf418514fbde82c3cc703\"], \n \"c1\": \"5c483ea223a5c017b42873aca5381b7d4926c47d6ac62e8289a4bfe1f7275104\", \n \"D\": \"9b04cbacc98c9796300170897711eab28348eb4c73fc31ba3fc82d0043a67d62\"\n }], \n \"pseudoOuts\": [ \"8e9f3dc440a410d0dbe2c8c3c46caf95165cc82f3716f56fd0a6c3b187e1f892\"]\n }\n}", + "weight": 2124 + },{ + "blob_size": 1537, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 491840000, + "id_hash": "8a6ebea82ede84b743c256c050a40ae7999e67b443c06ccb2322db626d62d970", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261651, + "max_used_block_height": 3195153, + "max_used_block_id_hash": "8d15c2bf99e9a1c5a0513dca2106c5bcd81d94aa07c1c2d17d7ea96883cd1158", + "receive_time": 1721261651, + "relayed": true, + "tx_blob": "020001020010f783dd2ee9ec9804c7ca2ed7dd3aea990fd3fa0ae8c00edcfe1bee950493de03ba8302b494048114ab6ac7b70110a7f204f932169b1b056fc63be06db8ec91a436f7188a30545bcd6a8bae817ca4020003aa62b55146dd60777113d2c62c8bc41c9339b63e758513d57d5ada453786995ab00003b71531f0476ff381570af6bff1ca3bc8bf8286a098a939e32eaee36450895bef412c019ee82c35546584a78c1713407439e107e98c4da5e6735d77f5d61a369bbf4e2b020901c8c9a284894d6ce90680c4c3ea011d42ea4108a9eb4c47d3233193a71c74ed6c841d99c6eaad2a605daac8f0cdcb6b75c30f2a4aec0e955b2fd93b2cf65bb62d9b9ae29de54a88a5157484d03bd04b300ad7516fb0dc7c647b3ce7f2b2c301e106566731515046dbcf50b5e8f6edeea6bbd2e945b4eb49bc121da137e276481e0265fb162e11730623ead0848e5abfb5007e24282d4a5dc95ad1332e383bfd3d403de9cc283ef16eac799ea9ecfffd24b0a0faed955a5ce82f2a717f5b30bba3cc22335eb0edcfc3b2af623db3309981e8f39722accfc7fd215bd3f1f6a60f476ca5eb56798e464d5791a1cd5d57fbe2d2fbf1c6824a3e80e48d3fa807240fa78029fdf2795b8bc4f0b5d5336ebaa65c179534ec8038dec26c8b5cc103230607d277a8b482900c60791c03c6a072eb576cdf8532b2da3bb493872c264559a7832f46f076e6b541c665df3baaf4486d9466655a26e3aea8f9d6e4d3f5613f4dbcb5c1cc78c9698551ed28ebaccebba9ffe10b38c4791afa73c620cf433342e45733b26f192642f6498a0d482df12e605b91957614d4a6707d0c103883104210508812038852ef55bf0ceed35382c508d19b82bacb5891bf9070c4634bbdb229c22c4e1296d55ccad33aac94342e8f75acee853fa05e69fe1f545c3b4ef839b401f7dd53846c9a9d8b1b0b04282f802fa451475b86d39edc01368a86cf9382a037075bba459244e0dc6488730bc628330b776bf8c6ababb3046ee3b103fcc2d9d544cd92ea335770c68c0e6f385105299bffd1ae17a12fe4b40e6880a9c6f5038daed1464b0fee68ecdfcdc712405936422918197dfec8f4351b82e7da04f6bcb9c765924afbd077c168ee0306cf456e1f4606086e2155260b3f5e9d8443533de0bb2fa410fc5465603b16c0137a236f761fb254e7b53c8667d8ba931e41bf6ed5cd131a4171a265158843415ca2f5c1690c9ec431091f3312b7b308420878e7e9118bed3c3f7df4e5acc3bd7d209d3539ce182e551af5f5e6369d29367dfa97c8951cfa0ba11a23b5c5e1bf6cd59db4b658b1437652f6e3f25717c9f4f522b37a0767c193fdbbdb9dada9d5d46e38a7d8a42460c0bf37c7adc0a317fd3e4a29390de895b526f0368b4737af0746647e2025a10cae52d452400710c9709e2b375c09c73b2529c97d87b8e62f15d0c776dbb0f5564cde6fb5872c124193d47d92ac007bfab088ebd9a44f194597802213475438f21112a50cb4f02c7f650ecfd3290f1c9bffdd70877dbc77cd5485cfbec143687cf605938787c1426ed42646a1f0080757ba370ba2b36b492a71708e36736db7857405cae3b128879d8e9c7dff030b3fb734e6faf49c85ed5d7940b400375b12674b3858d31b7ab56222e82ab03e00ce265d7d694571d3db08a7f23baf68007c4aa67f31185aeaba276eb3688b26018d8c904b9deea0fd68ceeebdf1aac9d967aec1ebaa4d3c6df2c3ce7c874ace05a3cb5af380be76fd67a7fc64d7da5a3ad94350b295a86cf7943c05c12f8267017e2beada8abf3e0fbb104492c222bbd8a6058dbdb05042213ed077c0d36cbf0cc6f1333a49e977ef452cfba812ba428b19cca4ea461681abce4f707e8f691b06fa6c56667fae356bcfa47f8f74f1db6bb470ce8fb96815e3fa2aaf2d22d1b00f4487e8e83e98b7d879c1699fd6f1092b1a9b5827effc22f14e3c7bf59ef2fc0b9eaf15677934da8c39016b3f771dd9227e76f541818cd4ca03d3bc78bd18cc0ee9dea19ceb194f06dc893c192932468ef107461f70a03b4d9137b72958ef68031ae7ae8bfa3fcc4d0f335959b7ac162dd86aae3da1580817d5fa6f932fe3c5511d1585d0f1383e6c1035c5ace104f8b03f27958c9160375a80498f8b73f861c4", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 97993207, 8795753, 763207, 962263, 249066, 179539, 237672, 458588, 68334, 61203, 33210, 68148, 2561, 13611, 23495, 16\n ], \n \"k_image\": \"a7f204f932169b1b056fc63be06db8ec91a436f7188a30545bcd6a8bae817ca4\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"aa62b55146dd60777113d2c62c8bc41c9339b63e758513d57d5ada453786995a\", \n \"view_tag\": \"b0\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"b71531f0476ff381570af6bff1ca3bc8bf8286a098a939e32eaee36450895bef\", \n \"view_tag\": \"41\"\n }\n }\n }\n ], \n \"extra\": [ 1, 158, 232, 44, 53, 84, 101, 132, 167, 140, 23, 19, 64, 116, 57, 225, 7, 233, 140, 77, 165, 230, 115, 93, 119, 245, 214, 26, 54, 155, 191, 78, 43, 2, 9, 1, 200, 201, 162, 132, 137, 77, 108, 233\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 491840000, \n \"ecdhInfo\": [ {\n \"amount\": \"1d42ea4108a9eb4c\"\n }, {\n \"amount\": \"47d3233193a71c74\"\n }], \n \"outPk\": [ \"ed6c841d99c6eaad2a605daac8f0cdcb6b75c30f2a4aec0e955b2fd93b2cf65b\", \"b62d9b9ae29de54a88a5157484d03bd04b300ad7516fb0dc7c647b3ce7f2b2c3\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"e106566731515046dbcf50b5e8f6edeea6bbd2e945b4eb49bc121da137e27648\", \n \"A1\": \"1e0265fb162e11730623ead0848e5abfb5007e24282d4a5dc95ad1332e383bfd\", \n \"B\": \"3d403de9cc283ef16eac799ea9ecfffd24b0a0faed955a5ce82f2a717f5b30bb\", \n \"r1\": \"a3cc22335eb0edcfc3b2af623db3309981e8f39722accfc7fd215bd3f1f6a60f\", \n \"s1\": \"476ca5eb56798e464d5791a1cd5d57fbe2d2fbf1c6824a3e80e48d3fa807240f\", \n \"d1\": \"a78029fdf2795b8bc4f0b5d5336ebaa65c179534ec8038dec26c8b5cc1032306\", \n \"L\": [ \"d277a8b482900c60791c03c6a072eb576cdf8532b2da3bb493872c264559a783\", \"2f46f076e6b541c665df3baaf4486d9466655a26e3aea8f9d6e4d3f5613f4dbc\", \"b5c1cc78c9698551ed28ebaccebba9ffe10b38c4791afa73c620cf433342e457\", \"33b26f192642f6498a0d482df12e605b91957614d4a6707d0c10388310421050\", \"8812038852ef55bf0ceed35382c508d19b82bacb5891bf9070c4634bbdb229c2\", \"2c4e1296d55ccad33aac94342e8f75acee853fa05e69fe1f545c3b4ef839b401\", \"f7dd53846c9a9d8b1b0b04282f802fa451475b86d39edc01368a86cf9382a037\"\n ], \n \"R\": [ \"5bba459244e0dc6488730bc628330b776bf8c6ababb3046ee3b103fcc2d9d544\", \"cd92ea335770c68c0e6f385105299bffd1ae17a12fe4b40e6880a9c6f5038dae\", \"d1464b0fee68ecdfcdc712405936422918197dfec8f4351b82e7da04f6bcb9c7\", \"65924afbd077c168ee0306cf456e1f4606086e2155260b3f5e9d8443533de0bb\", \"2fa410fc5465603b16c0137a236f761fb254e7b53c8667d8ba931e41bf6ed5cd\", \"131a4171a265158843415ca2f5c1690c9ec431091f3312b7b308420878e7e911\", \"8bed3c3f7df4e5acc3bd7d209d3539ce182e551af5f5e6369d29367dfa97c895\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"1cfa0ba11a23b5c5e1bf6cd59db4b658b1437652f6e3f25717c9f4f522b37a07\", \"67c193fdbbdb9dada9d5d46e38a7d8a42460c0bf37c7adc0a317fd3e4a29390d\", \"e895b526f0368b4737af0746647e2025a10cae52d452400710c9709e2b375c09\", \"c73b2529c97d87b8e62f15d0c776dbb0f5564cde6fb5872c124193d47d92ac00\", \"7bfab088ebd9a44f194597802213475438f21112a50cb4f02c7f650ecfd3290f\", \"1c9bffdd70877dbc77cd5485cfbec143687cf605938787c1426ed42646a1f008\", \"0757ba370ba2b36b492a71708e36736db7857405cae3b128879d8e9c7dff030b\", \"3fb734e6faf49c85ed5d7940b400375b12674b3858d31b7ab56222e82ab03e00\", \"ce265d7d694571d3db08a7f23baf68007c4aa67f31185aeaba276eb3688b2601\", \"8d8c904b9deea0fd68ceeebdf1aac9d967aec1ebaa4d3c6df2c3ce7c874ace05\", \"a3cb5af380be76fd67a7fc64d7da5a3ad94350b295a86cf7943c05c12f826701\", \"7e2beada8abf3e0fbb104492c222bbd8a6058dbdb05042213ed077c0d36cbf0c\", \"c6f1333a49e977ef452cfba812ba428b19cca4ea461681abce4f707e8f691b06\", \"fa6c56667fae356bcfa47f8f74f1db6bb470ce8fb96815e3fa2aaf2d22d1b00f\", \"4487e8e83e98b7d879c1699fd6f1092b1a9b5827effc22f14e3c7bf59ef2fc0b\", \"9eaf15677934da8c39016b3f771dd9227e76f541818cd4ca03d3bc78bd18cc0e\"], \n \"c1\": \"e9dea19ceb194f06dc893c192932468ef107461f70a03b4d9137b72958ef6803\", \n \"D\": \"1ae7ae8bfa3fcc4d0f335959b7ac162dd86aae3da1580817d5fa6f932fe3c551\"\n }], \n \"pseudoOuts\": [ \"1d1585d0f1383e6c1035c5ace104f8b03f27958c9160375a80498f8b73f861c4\"]\n }\n}", + "weight": 1537 + },{ + "blob_size": 1534, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 122720000, + "id_hash": "7c32ac906393a55797b17efef623ca9577ba5e3d26c1cf54231dcf06459eff81", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261673, + "max_used_block_height": 3195144, + "max_used_block_id_hash": "464cb0e47663a64ee8eaf483c46d6584e9a7945a0c792b19cdbde426ec3a5034", + "receive_time": 1721261673, + "relayed": true, + "tx_blob": "0200010200108088a024a3deeb0ef5939001e7eb0fc19401cab201aeaf01e08001cd16f4a601f52a8c8d01a106d811c83e5e987605d678e8bfb17e8d2651e8dd5c69c73c705d003c82e4e35d2b5b89c9ebe3020003507533a540f57548b44e011305bd39d6a6a64e5ccc82bac1833b6ededb9121f3690003b32a2175124a8e0a0fd95a37d51a290a397dc74637a8cdd73ba4dfdec91b2c0ec42c0159ef5288bf4ea3ba0f6780416feedec329416b7fd78af51f494f3eb89db900a7020901810cdc8e9ce7315906809ec23ab7e164d1d01bfc65b2f30e77c390a5fd68d73b4565628faee8a1f9bd7cffe31a3bdb37ef7fc670e18f6190cc20459a32ac6e15f56e257ed3471f4bbf5394501db831fa2d05b784f3e5091f322c1df20601326379bcc24fdcb0e68dbca4cbfac5fc5238e31b4ea8619c5eff8776a42e9c9a88287c0c5fc24c5a1872237e971762f9ec12853ca27eb3b1e611d3713bdd4778bc8585ef0adb3e8f144518a22d2eb4e77c526335c5ad9f5c4754740eaba4623f59cce4ea9eab890ec0976f3e03bc08f3218192c56d82c36ddc4692f5a96f8b056f11c7f15635ba12a22274a0a171999444f21079496b016867b7dac9f05f5f0141b181816c004f0f3033d7e0a9819f5a8623a637dc7d949b34c0da6eba6a7d07073c7219d079608c6ff8d04b0d4bb425b73142e84750e7e5548ef86cc02ac0b91132c3e4df24b11a32f8c30a483bcd5b903b2cd87197d8172bd4bd190c4034322dabd82600f852c346716518c93c439a799a2763ff9d2457c47f96e22371440cc1db354184fddabde2d51512556b7d05ad6be9f44fbf1671834f6fd45a6f8f1f09edf96551160fe83c207fb8eabab30ed294aca1287ea196f1640af3a183cc7891a680264c41b34d56ab4219e8175b1d847de3174d4c298cb5ac40e1c84169b0a5fbee401b237ad92093cb34b6752229e03cc7ceb2546102360ca5fe823192024b0775b50b22fe9e7cfb29fec3400ea8a03b047ca1c92af76d14da40589415116b922b080f61446f1e5a11aaaa84acd4bee2e67f25c8a0c77db1e3cf8d9a2f57a1f6a622a4c76475ecb20fb1b6221caa4be32876414e6d6b0375582fbf9c1d50403981d303b53d877af580443431499ed7a030d01a618c37139ac4fec11b3a2afa1aee51a3605abd36c3cc05c02348430bdec52a481e04249a7adde0a4b718d459ee1aa67e4ce05980a451753b0e9b7dd543b373a137cd900f81929699bfa4fd5bd51d096c93673bc035031bf18d1e153a5b62e5965f4865827d7c871403fcda46a7e38ec6b2c7c7de6f6e88e7e32514fdbcf04bfdc851a1a052ad32052ca4a74f039ca030a488b3f3160043add8d6b6f7f5275c49ca6c5f3e6556641b5d08c80f053adcb2928105c91712723592a72f37c0da5ec12f62325cf8ce9a9d98244c830715d805c79b3e0d09ff094563869a28864beaf3d2e4a257f2ac7e05b253f84801fa53fc02c27fb618a582eeb68261458ff5a24f19db62368984e35c13a9b3b10de92a20b1f757574390e8a9e6cf58a33e6f2d7425dd5cff36f15b5992cca1690cedb97159b128e4e509bb7735bbfab47d0e925103bd69585fd60a772b9fbf300bde8d80256510c267a7fba688969b4e89e06d8faaa7b1369cdbba78b19520fd0a8e3e0858e4ca6457e9444053ef08c62efdf1eb2176e81261c5f2b1febf72f4047ef2981ea380eea8a4429fc107fa6f96bb9a60feb70db7cda063be92d3f650098f85e14b1a45dab1687f1e2d4e3b11099286945c997c1cc2f9f908367ce5af0d7fc4f2f770cce3184f98be164dfcd15b896c5290e46dbc2e1458683022d9980d87afe5ff98453b33f1ce50617b33ebe0565bfe0166c1af28cee2ff788e623c0614b3db3ca8b04ae0ca9fc6a036ba40269a3da9cbfd2cd1ce2c8690a6df13ed00cc5527e60edd183603ffc5fb3fec14ee3e6c24bd708192756a2f10d6c3d54102ba4b38a06f9ca37fe3e10c68a4a6bd3858015f2b18f82e7e446a347c488890046b48a02d406ce2f4ae39ab70fa019f4c5086aa861f73f8b343eb433873c52e0901d1794138c64ea8943d6763d334920c5d57014a4ff9b2e163abcd9af5da269cb8dd672e123f737a896c99551b4c610bc68bb68b30ba5206881e18fd2288a42f", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 76022784, 31125283, 2361845, 259559, 19009, 22858, 22446, 16480, 2893, 21364, 5493, 18060, 801, 2264, 8008, 94\n ], \n \"k_image\": \"987605d678e8bfb17e8d2651e8dd5c69c73c705d003c82e4e35d2b5b89c9ebe3\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"507533a540f57548b44e011305bd39d6a6a64e5ccc82bac1833b6ededb9121f3\", \n \"view_tag\": \"69\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"b32a2175124a8e0a0fd95a37d51a290a397dc74637a8cdd73ba4dfdec91b2c0e\", \n \"view_tag\": \"c4\"\n }\n }\n }\n ], \n \"extra\": [ 1, 89, 239, 82, 136, 191, 78, 163, 186, 15, 103, 128, 65, 111, 238, 222, 195, 41, 65, 107, 127, 215, 138, 245, 31, 73, 79, 62, 184, 157, 185, 0, 167, 2, 9, 1, 129, 12, 220, 142, 156, 231, 49, 89\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 122720000, \n \"ecdhInfo\": [ {\n \"amount\": \"b7e164d1d01bfc65\"\n }, {\n \"amount\": \"b2f30e77c390a5fd\"\n }], \n \"outPk\": [ \"68d73b4565628faee8a1f9bd7cffe31a3bdb37ef7fc670e18f6190cc20459a32\", \"ac6e15f56e257ed3471f4bbf5394501db831fa2d05b784f3e5091f322c1df206\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"326379bcc24fdcb0e68dbca4cbfac5fc5238e31b4ea8619c5eff8776a42e9c9a\", \n \"A1\": \"88287c0c5fc24c5a1872237e971762f9ec12853ca27eb3b1e611d3713bdd4778\", \n \"B\": \"bc8585ef0adb3e8f144518a22d2eb4e77c526335c5ad9f5c4754740eaba4623f\", \n \"r1\": \"59cce4ea9eab890ec0976f3e03bc08f3218192c56d82c36ddc4692f5a96f8b05\", \n \"s1\": \"6f11c7f15635ba12a22274a0a171999444f21079496b016867b7dac9f05f5f01\", \n \"d1\": \"41b181816c004f0f3033d7e0a9819f5a8623a637dc7d949b34c0da6eba6a7d07\", \n \"L\": [ \"3c7219d079608c6ff8d04b0d4bb425b73142e84750e7e5548ef86cc02ac0b911\", \"32c3e4df24b11a32f8c30a483bcd5b903b2cd87197d8172bd4bd190c4034322d\", \"abd82600f852c346716518c93c439a799a2763ff9d2457c47f96e22371440cc1\", \"db354184fddabde2d51512556b7d05ad6be9f44fbf1671834f6fd45a6f8f1f09\", \"edf96551160fe83c207fb8eabab30ed294aca1287ea196f1640af3a183cc7891\", \"a680264c41b34d56ab4219e8175b1d847de3174d4c298cb5ac40e1c84169b0a5\", \"fbee401b237ad92093cb34b6752229e03cc7ceb2546102360ca5fe823192024b\"\n ], \n \"R\": [ \"75b50b22fe9e7cfb29fec3400ea8a03b047ca1c92af76d14da40589415116b92\", \"2b080f61446f1e5a11aaaa84acd4bee2e67f25c8a0c77db1e3cf8d9a2f57a1f6\", \"a622a4c76475ecb20fb1b6221caa4be32876414e6d6b0375582fbf9c1d504039\", \"81d303b53d877af580443431499ed7a030d01a618c37139ac4fec11b3a2afa1a\", \"ee51a3605abd36c3cc05c02348430bdec52a481e04249a7adde0a4b718d459ee\", \"1aa67e4ce05980a451753b0e9b7dd543b373a137cd900f81929699bfa4fd5bd5\", \"1d096c93673bc035031bf18d1e153a5b62e5965f4865827d7c871403fcda46a7\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"e38ec6b2c7c7de6f6e88e7e32514fdbcf04bfdc851a1a052ad32052ca4a74f03\", \"9ca030a488b3f3160043add8d6b6f7f5275c49ca6c5f3e6556641b5d08c80f05\", \"3adcb2928105c91712723592a72f37c0da5ec12f62325cf8ce9a9d98244c8307\", \"15d805c79b3e0d09ff094563869a28864beaf3d2e4a257f2ac7e05b253f84801\", \"fa53fc02c27fb618a582eeb68261458ff5a24f19db62368984e35c13a9b3b10d\", \"e92a20b1f757574390e8a9e6cf58a33e6f2d7425dd5cff36f15b5992cca1690c\", \"edb97159b128e4e509bb7735bbfab47d0e925103bd69585fd60a772b9fbf300b\", \"de8d80256510c267a7fba688969b4e89e06d8faaa7b1369cdbba78b19520fd0a\", \"8e3e0858e4ca6457e9444053ef08c62efdf1eb2176e81261c5f2b1febf72f404\", \"7ef2981ea380eea8a4429fc107fa6f96bb9a60feb70db7cda063be92d3f65009\", \"8f85e14b1a45dab1687f1e2d4e3b11099286945c997c1cc2f9f908367ce5af0d\", \"7fc4f2f770cce3184f98be164dfcd15b896c5290e46dbc2e1458683022d9980d\", \"87afe5ff98453b33f1ce50617b33ebe0565bfe0166c1af28cee2ff788e623c06\", \"14b3db3ca8b04ae0ca9fc6a036ba40269a3da9cbfd2cd1ce2c8690a6df13ed00\", \"cc5527e60edd183603ffc5fb3fec14ee3e6c24bd708192756a2f10d6c3d54102\", \"ba4b38a06f9ca37fe3e10c68a4a6bd3858015f2b18f82e7e446a347c48889004\"], \n \"c1\": \"6b48a02d406ce2f4ae39ab70fa019f4c5086aa861f73f8b343eb433873c52e09\", \n \"D\": \"01d1794138c64ea8943d6763d334920c5d57014a4ff9b2e163abcd9af5da269c\"\n }], \n \"pseudoOuts\": [ \"b8dd672e123f737a896c99551b4c610bc68bb68b30ba5206881e18fd2288a42f\"]\n }\n}", + "weight": 1534 + },{ + "blob_size": 1535, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 491200000, + "id_hash": "63b7d903d41ab2605043be9df08eb45b752727bf7a02d0d686c823d5863d7d83", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261665, + "max_used_block_height": 3195155, + "max_used_block_id_hash": "c8ad671ebe68cc5244ea7aed3a70f13682c3e93fc21321abcb20609d42a5b6e7", + "receive_time": 1721261665, + "relayed": true, + "tx_blob": "020001020010d6e18c2ffcc1e40483f90be9b30b9af70af8ac0dc3bc099313ccbb01c79f04aa25946bc7a102ac37c30a8d1d563cd0f22a17177353e494beb070af0f53ed6d003ada32123c7ec3c23f681393020003416ee9d85c13be6a1ad2a0b9a5c3fad790bc3c266cd0eb55f1d38959a2eef8a49b0003d6292302f486945eb9bab6beaa2564a4dbe09cf8e92a107eb8734f0f8a09a1c9052c017ce27d3675d2db5af9968b93733350b10749e9fa4a0c1bfcfdc86c550088fdd1020901c6821af937903f5f0680bc9cea01e240eebed50fd3fd162b7b3b54185d5ecb8253de6123d50449a2746c6a82023347509df7efd5f8106b4e3d60dd0d7f86958d05ee5bc8a6963af900849d2a4118e031bcaeb2788aa2a1e56b036ebadab201d92af1585cb9cb3db78d3da41c66221180e1c8128a35b304ae96de328ce538d160dd66e00a9b57988589eb48c4e131b24bb266de41540a164b34008c591871cd93fe7c3fa4aacd67f35315c0927e7b4add9c63e94a95732b7f12cf1346b7b26e1d1d893d582b2b0787f00f331e787749b14a3e0cb5363537cb4654d90a4c90001e08066b76e5b7cb3d4a9a58b88c613f5b4ca2fb0a875becee6a26f287d14906f3be11a310ff9a3bdcb579818bb958229c1b0ab3d4ce9b935723bdf8c888190d071089e5724f75ce14fe58360ba419f7e7bda58a0175b9ce9b12627b599ae896d060e7a21f3b9d65cf4d386aa6ac044e2283c64b92f4b3f4234ad7e1036fa96f53f187ed53d0c753805df8de748339f72e901ca157aeabff55f14c451f0084e725a55ffc85d7ba5d26a7f678bbbcd1e40e8a3a5400620ea4eee86796b7ec2266b20f8fd361a94759654d7d39ba8cdd6b3a002f140dcefe0f498a6b33655bfb2981ac38db4dd0cc6b7c25d408bbe69fb7cb86114fd7aea4136aacfe15475f20f3d4f7cd51eedec4e4a1890fcc0d29f18d6270a84d9c4aa6ba6bf9ec3900016057240713ad161ed5d9dd3a852ad64f7183a57b83015ab461f36e5ac1ed0bb0d2f7e1ff6c37cb4ef43c6e65f11858490d29b6fff50e91acfb2799fa5b8c0c9e9f913f2941ffa2414ca459db6de293f9e8a231152f6fbdb1dcb2a79bebb5687870f69ec96254f56d963bc8e283a7fdf1fbdf4f60d5d97d4224d9f1a9d4c52c14ba4ffabf5dd9955c0e075da49a8f8d84686d4213331b64a5f770fa35260ff1e1e13ec9da2b7aa35b728810febb734410cb117e37040d2a3c3198b816272e8e76fca9199064b01511b86f7578718590b6ffedb78a5ae175e4efeb3ed71028913deb2e52e5f17d3792990cec53ff4b834616d77cd50e32c84b95422b8436645c304b22c2018c6308ab20f4f7e06c5f67bd59c6f106fabe2b14dc1713a4b13f6522aa74f40410fd14ef905febb4e95ccd80265803c37e285ed3939d43e368b20b9e49b955041dd3d895f4885e8264d99f574d4da3e94b785e5679e300b817e15f1c60228f0745d6faf106d0d36d000be6d5779d6d83165dec7ae167ed7fe6ef7a688f391b0e93c15448214aa121b110d4573e9f386b432451f5689b29a81cec1497ba9c17022f3c310407533b549210dc47186bf117beb821a3ac0d255a31bc57c795b4bd02982a42b4ce5d5a86dade14e37ef9a9a4870687800d2d8ae637cad458e3c7a80ed3e47cc7f6af7523f1ed6bdbbd3f4eaeb6fb9cc9ae13a905618821ca41f13902430b77134bc29f5319b96b17c9fda24cc72af61a890b422bbcd10fd323001605dd25ae17c9c1492fb180e3ac206e780db485c3f8940e1d80301e962fb3384d05d187fdf65cf5b1e2ad3e379f5a765773a3f196bc0b835ad305946328ace26502dc43c5790a5ca6c3076206b56bb1c4bc53bfb4b0fba11dfa24809087e6a6760d7475a3c2465b02f2068d356289d4f17ee35fc0956deb061a737fb4bde998320cfd3e85d3e5113062b0b49f860318d909c9cb714758e6203a2af2ee69e5bf58032014aafdf920276f5a50ca4c3a1b4b7c7f7099ea414e0982bc806beeb68bd303b51723220ce63175d0ba8577400c59109d97357692b9ace8ec0a57992585ed07bea256456a8600ec85716f9bf7dae9cd0cf88ccc2c542600e12c41c7403cf4056db57e4fe2422adcc28c3ec0a02dbb6b1b5926d4012ec3f27ad757faeae60cad", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 98775254, 10035452, 195715, 186857, 179098, 218744, 155203, 2451, 24012, 69575, 4778, 13716, 37063, 7084, 1347, 3725\n ], \n \"k_image\": \"563cd0f22a17177353e494beb070af0f53ed6d003ada32123c7ec3c23f681393\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"416ee9d85c13be6a1ad2a0b9a5c3fad790bc3c266cd0eb55f1d38959a2eef8a4\", \n \"view_tag\": \"9b\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"d6292302f486945eb9bab6beaa2564a4dbe09cf8e92a107eb8734f0f8a09a1c9\", \n \"view_tag\": \"05\"\n }\n }\n }\n ], \n \"extra\": [ 1, 124, 226, 125, 54, 117, 210, 219, 90, 249, 150, 139, 147, 115, 51, 80, 177, 7, 73, 233, 250, 74, 12, 27, 252, 253, 200, 108, 85, 0, 136, 253, 209, 2, 9, 1, 198, 130, 26, 249, 55, 144, 63, 95\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 491200000, \n \"ecdhInfo\": [ {\n \"amount\": \"e240eebed50fd3fd\"\n }, {\n \"amount\": \"162b7b3b54185d5e\"\n }], \n \"outPk\": [ \"cb8253de6123d50449a2746c6a82023347509df7efd5f8106b4e3d60dd0d7f86\", \"958d05ee5bc8a6963af900849d2a4118e031bcaeb2788aa2a1e56b036ebadab2\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"d92af1585cb9cb3db78d3da41c66221180e1c8128a35b304ae96de328ce538d1\", \n \"A1\": \"60dd66e00a9b57988589eb48c4e131b24bb266de41540a164b34008c591871cd\", \n \"B\": \"93fe7c3fa4aacd67f35315c0927e7b4add9c63e94a95732b7f12cf1346b7b26e\", \n \"r1\": \"1d1d893d582b2b0787f00f331e787749b14a3e0cb5363537cb4654d90a4c9000\", \n \"s1\": \"1e08066b76e5b7cb3d4a9a58b88c613f5b4ca2fb0a875becee6a26f287d14906\", \n \"d1\": \"f3be11a310ff9a3bdcb579818bb958229c1b0ab3d4ce9b935723bdf8c888190d\", \n \"L\": [ \"1089e5724f75ce14fe58360ba419f7e7bda58a0175b9ce9b12627b599ae896d0\", \"60e7a21f3b9d65cf4d386aa6ac044e2283c64b92f4b3f4234ad7e1036fa96f53\", \"f187ed53d0c753805df8de748339f72e901ca157aeabff55f14c451f0084e725\", \"a55ffc85d7ba5d26a7f678bbbcd1e40e8a3a5400620ea4eee86796b7ec2266b2\", \"0f8fd361a94759654d7d39ba8cdd6b3a002f140dcefe0f498a6b33655bfb2981\", \"ac38db4dd0cc6b7c25d408bbe69fb7cb86114fd7aea4136aacfe15475f20f3d4\", \"f7cd51eedec4e4a1890fcc0d29f18d6270a84d9c4aa6ba6bf9ec390001605724\"\n ], \n \"R\": [ \"13ad161ed5d9dd3a852ad64f7183a57b83015ab461f36e5ac1ed0bb0d2f7e1ff\", \"6c37cb4ef43c6e65f11858490d29b6fff50e91acfb2799fa5b8c0c9e9f913f29\", \"41ffa2414ca459db6de293f9e8a231152f6fbdb1dcb2a79bebb5687870f69ec9\", \"6254f56d963bc8e283a7fdf1fbdf4f60d5d97d4224d9f1a9d4c52c14ba4ffabf\", \"5dd9955c0e075da49a8f8d84686d4213331b64a5f770fa35260ff1e1e13ec9da\", \"2b7aa35b728810febb734410cb117e37040d2a3c3198b816272e8e76fca91990\", \"64b01511b86f7578718590b6ffedb78a5ae175e4efeb3ed71028913deb2e52e5\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"f17d3792990cec53ff4b834616d77cd50e32c84b95422b8436645c304b22c201\", \"8c6308ab20f4f7e06c5f67bd59c6f106fabe2b14dc1713a4b13f6522aa74f404\", \"10fd14ef905febb4e95ccd80265803c37e285ed3939d43e368b20b9e49b95504\", \"1dd3d895f4885e8264d99f574d4da3e94b785e5679e300b817e15f1c60228f07\", \"45d6faf106d0d36d000be6d5779d6d83165dec7ae167ed7fe6ef7a688f391b0e\", \"93c15448214aa121b110d4573e9f386b432451f5689b29a81cec1497ba9c1702\", \"2f3c310407533b549210dc47186bf117beb821a3ac0d255a31bc57c795b4bd02\", \"982a42b4ce5d5a86dade14e37ef9a9a4870687800d2d8ae637cad458e3c7a80e\", \"d3e47cc7f6af7523f1ed6bdbbd3f4eaeb6fb9cc9ae13a905618821ca41f13902\", \"430b77134bc29f5319b96b17c9fda24cc72af61a890b422bbcd10fd323001605\", \"dd25ae17c9c1492fb180e3ac206e780db485c3f8940e1d80301e962fb3384d05\", \"d187fdf65cf5b1e2ad3e379f5a765773a3f196bc0b835ad305946328ace26502\", \"dc43c5790a5ca6c3076206b56bb1c4bc53bfb4b0fba11dfa24809087e6a6760d\", \"7475a3c2465b02f2068d356289d4f17ee35fc0956deb061a737fb4bde998320c\", \"fd3e85d3e5113062b0b49f860318d909c9cb714758e6203a2af2ee69e5bf5803\", \"2014aafdf920276f5a50ca4c3a1b4b7c7f7099ea414e0982bc806beeb68bd303\"], \n \"c1\": \"b51723220ce63175d0ba8577400c59109d97357692b9ace8ec0a57992585ed07\", \n \"D\": \"bea256456a8600ec85716f9bf7dae9cd0cf88ccc2c542600e12c41c7403cf405\"\n }], \n \"pseudoOuts\": [ \"6db57e4fe2422adcc28c3ec0a02dbb6b1b5926d4012ec3f27ad757faeae60cad\"]\n }\n}", + "weight": 1535 + },{ + "blob_size": 11843, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 236860000, + "id_hash": "b8a15acb832330b5070c7615fa1bb5142e8a45ecca022c4136f61dcbcc493986", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261659, + "max_used_block_height": 3195160, + "max_used_block_id_hash": "2f7b8ca3dbd64cb33f428ece414b2b1cef405cfcd85fab1a70383490cc7ed603", + "receive_time": 1721261659, + "relayed": true, + "tx_blob": "020010020010f081a521c5a1d41186eb5698f516b5e818bbba1bedb705a5d00df89f039357c73b890bf54f9217b344cb19dc006e92fc1e623298b3415ddccfc96a8cae64cb7c9199505a767a16ddd39bb9020010f3eab531b38b98029ff116d1ec1faad00187d521d51bc7e80487c304913cbaac01629510e861df07da0fd656ac13a64576e7af5ca416d99b899b0bafef5e71d50e349e467fa463b13600020010e6cf81308cc3e702cbd661ecd64b8650a7bd029cdb05b7e5019ae20cd8f801e87ca479ba3c8a24d08c01b667ca559feaf79de4445ca4d2bcc05883b25ecff2f6dd8fd02a9a14adea4849f06f020010d5989d32d582698beb2499c931dcb2379ae705fcad02ac9e0e85a902f2f30193ed019c0796d302a85ec50e872bc5b7d94e661c5eb09714b243f3854cc06531b1085442834c9e870501031b73da02001084f7ef27b2c2e20acbd767c49a16a092298fe60a8a880286d903aa8d02b2bb1ecb8d02d4d306a814d612f25ba605a5ebf4914f887ecdfde8e7ef303a7f2cc20521a2a305ba9a618e63d95debfb22020010ea98ad1ad2e88618ddf413bbbc9b01feba1d908508938e15eca804fbd803f4f50bc154c661fd60a706be1dae03a2b08a090f611ea1097622cc63a49256a2d94a90b8dbaaa5e53a85001c86d55a0200108acfff31e1c28e02bfd8139bd603e09604e26387af0586f501c79401a059800a9e24e10d9629fb010288f7594b26dcbaff22f7e7569473462c49d8fb845aa916d7a7663be8b85b85530200109c83ed30a19a25d3a9bf01a08c1cbccd14fbc90cccb59701b953c0e403df41e69608bc1dae33c40edc2482037d805459f05d89c92443f43863fa5a4d17241d936fc042cc9847a33a461090c5020010d4a2f828fda6f308b19f8101eacab90199cc02ffae01eff701a4c005d320b32cc10bca9001b101ff59a906cd0365bb760c9a31da39911fa6d0e918e884538f0a218d479f84a1c9cca2f9a5f500020010b8b18c2f80a3c704dfa34e808b02d0a704daa501ecb2039a29eef503a19102ef07669343c20ed525a10452418ac25be58fbfcc8bd35c9833532d0fa911c875fa34b53118df5be0b3ba480200108cf3cc3296f472e2e75184ee0aa3cf0483bb01fec70a96c602c562cb59f6cf01da07d861c14ac411ea0c40e57cb9a9f313f864eef7bf70dea07c2636952f3cbff30385ac26ee244a4349020010b9a4e12cfa958d02b4e18705e6831da49807c8a007b7c202d3ba04b40ba7ca02edb402b4b803d38c01b431ecdb01e50438d739cfb68aba73f0f451c7d8d8e51ae8821e17b275d03214054cc1fe4f72d60200109ccba132acadc401c98731a0ef0ce461b9ad0895f702a24ec955c68401f975da32f616bf07e530c81c1eda8e08b1024028064450019b924eca2e3b3e3446d1ac58d0b8e89dc4ba980d020010cf888128c1f8fb0ac0f221efe24fd7bb14e3a40fddca0bfdcc0fb78f02d7ed02b5a0018e09be10cd1eb10d89051cccfcece29fbd7a28052821fdd7aac6548212cab0d679dd779a37799111f9ec020010db90af22e0eaf40bcbd19b04b6d0b401aec905f48402e5bc0f94cb08bf9808b9db13809701b817d49902fe629903db1b05138378dedfae3adbd844cf76c060226aaeddcd4450c67178e41085d0ae9e53020010a7bbf4299ff7fa05ff9bd201b1fee801e1d529b38341e8900f81a908c78b03b0e101b89201fe6faa5aa701d501ce050007a41ed49aa2f094518d30db5442accaa7d3632381474d649644678b6d23c00200038c2ba531d06e4ac990213d765751e89981303d4714d81229ac09e385c8ac1cd3c900030c9d3c41171e04e42f6ed61a932c2ecdb5c0103d7cdd909fa0ab15881469c66af52c01b848c2cbf8e0ee4a984bf645c0e6118450971072a18d23b6cf716f0bf681fd40020901d0fecbabcf50dc5706e0e4f870dc9c59cdde692b10e76b3405d76aec5c479df6fa3357cbddcb22e20d2b458011dd31dfc1c78316bad7ed9ffd39ba12b86a8ad571f7f5aa9668806d10d2b03c2c84c90690e4ddeb3dc6f870fbd5df5b0901c55abb99bdda211773d14dcf7cd68ca2c2e1df64896976c36da0b72b9b0b5abbf68499752558d3b7b342a554f91ea3171cba93075d35968565e37020bd60f6bf0086548c6fd600cc04f704607fd008bdd1ef83ed341109114d9301c30a86d3d87b9a9a3a6cf62d4aedea2cbe4f48af3a830c33486b3e3c7942ad7b4dfbc322076cbaf3b5e0afffc36549482dc4e5220ad7be9284470405e2a889dc4b0eda950b9eacce92f4b6fe9bbb9f853cd5a68b1ea882a66fc1f86df6b7f21ed49e21fb07070c2330673beaf7961285d721e00c55e6d97b77efb12cfcdac429cbe92fe7ed641ae9878d713edc784573be96c47e80dec59c487590f721dda777066a66a388a8cbd007967d90e323d91cae997f9449518ca71dd5e0473cd88d52a1b63f4994cb355fc68976cdff244295ea25e9d579eeee92211f52ed942307827480bf639e9b945e1f12d5a0369f990945dc143dcbb3e7bb7eada7b4e10ccf65128ab3a203d27441035465a96cac073e12db24bb696f38587021576582596bd0351ea8dcd30123af3e518560a843188e19aa666d9b70d7c55b2072c3813f05084de714988a9a07e21b543d2d50a2b7949e9c6ee3638731aa1f19909ac0978ef10ebe83e52558f6e96814d9b64fb0344d8a37e43e9933a9bcb087173ad895400a680f3f7919a585572b4c0639855b4a146492b00c82eb9d7bd9566aa94387e79f80561676a045e4be44d3a42061a710a8c5be6ac2af7531504911a4e17d27249f7395c40ca90ac64e8a0eaee615e0a23be18c2a2b74e5afea867b60acf8ddf7301055d2e9b6556e2b8750caaddea43764d08b5dbe1497cc7db2be2ccee30629fb313078fdb9fc4f9f01b9d7e2e9cb90fdf2654411be9914ac32fe797c427f2a3f5e0ab974226f243363e401c6214a87d6d33bd5d66a6f74f273c9eaeba1210b64089c7ca7418d0ae9ea96a6e3608b004341be48b3fb658836e80e9c3c2786764db9e239d115c206e2c5d846b20c33614aa533f068a9363f9e78b728faedd5fd5ac6c2b427dd0b00278d9000c2872ff0d2506b842847a65eaf93d1f5ed3267d63cf9128158e1d307aa6a0c69a73c3a1b9914c285981e40de53329e666a98e065354dae1c92765d0af2a1b19b7a3f102fe8a7ba53487879d77c8df704ee4785bd27a6083ab60ec00681a849c4a7064e6265c60e06924fe32cec542a40b1c89d81356cdb200e8429033f75ee3e3749acce3d130cca4e7892f68cd7cdfa8e122bc78530b2e8d0176d07ebae1b0281338c2b2ec14d71d211f1e5500ea3bf298e88b2bb39edc1a4614d0d5048bbe60650d351ec2ff4cafb7f29ed646885572bab7ca50503bd7cb1a6e40775276f4e3914a473acc0a6520ea22947173052c525ff7f69b4161bab3f4e55097adc053156eda0d8d255298da99b7612756be610b5d4c2d4c5622c2e3d45a00a2847e2e122a38f797072440f3b2860dc4a184d42f64b1a3db4c26d43cc3c6a040a1df4fe0cb81346469ee48b4c64ca3a163a234dd107cfa155d610e859d04405d90e1d6a6c4f5b5cd93fa65fc80abff6607d8dc1dd932abb1a760e5a05b8d90003f7429d83df3df24cad2012bd01134cf6ac445145c54434b6e12aea99ed030e66902fd5bbbefe4ec9d59d9e38da2e3e182da804b9cd69fc6516e5c42d12e409807e7e4661a8300cc35c9a5aaf05c74f315d301d3b58af8423e8b3a9dab2a5f47cb7bac961689da9c8480fdcd7699abfc7b1846b5406095930e305225408a80fff283d2ff7b05766873ea25e37a51af6e80fe42c040c09e1254fec0c561d7005078faf699880bad1bff0adcaf0fbbed6a6b708e993738fcf3f51307f49427b0dca0a31c36ab9453b909cc184d4c63908b94427deeaac5521a838ce0e7c6547082dac768455bb669ab5297535a143999b247d8999b9c8d041a2eb3bc3d152c50ae586639c731aa945cc0ee851bcef18a27d942354cb8d66ea356d8c2c04eb490e54f4fd74727b0237fee689ea8802a8d8c885884c216702e32c8e8769430e8709954ea828466f98d09e756e5ed6c12ef5eff3a4ee36c990542f935a460da469037bb52fb8e07f50801ab6a9b81b8eab75b73a7fe93b9d65b75679b3103d5e880aa1d7c47088f50c690def8612384bfd2f01dc17e184a9d0529735d34bba8e0704c1d20036078d9394a593de43e28afeed8e9f9e37a283de8bfa383f333281b502baab61d08ee55dc83a7a7bf7b9be015aa90cfaa3cf2c7976eb1616b6a53277079c48c0cd37af84fd9804839055052758c47dcbf861d6f481edd26426abec9b0f23396a7e14bd9f6ce9168c88eb5cc00c16e92c7133ef6cf66c16ab51d6209401f49e4a023ecaac0ec7ffac5f168d4e7d4ccf098bf7c59a769430b9131bad6500fe9c562cafea11ce248b40a51faf4a9a380b9abf6407f1184c05ab6f1c2caf0d83d399b9afb3f50c7d1b305bb321c14df0787e226ecd2def7bfc6b7c312dca0b255e606d60022c07faf74422223d45772072dc8a7452802ec6e0a208deee8d7da374add905f27f162b82bd779d88d0f533b9cb550de4b7a51679ba3469bf5b0d003038393e91408af3ab39972995190a27c377fd568695b06cf0a2c0f374680c7e6b2a71d81eb28f336edf96840978b03a9c42bc72c9004dea1ed37d1208670da75ed7d6209b278e9362c880ad85e32253589c0ba3fb6a613df14128040e4d0b37dcdb26edc3ed3d88b1c2608439d2b930cfff4eea3c5386d27928ad0f8af70b6e81677aa7c2ceb9280628cd810e5b7e175393342d5cd560ca14ead9574bc20cbf0d7268a249e56526af3029493eb31fd3d20160aa32d30a0f53c44f9d82970105a30031d96eb6efa0bca4d61a6739c37cde501a33ed7de3b8f1fc3ea696830c52fa1350698901ad198fa276f73406da4a2d4cac670860fd0da77c42d9c67305e4044076d2fb8bf2765ecb825e468e698130674102dc58f3c7fcb8194a6ad60546555d48c63f1354f34db8b6c6a381a4052cf4fd6a30ee2274af2da5684e3d0d32f2ad376e21aed1ffd8b3c0e48032a16283c3ce6f12ac2cc90b389e81f59c053fc9310d8c947f78ea37f66d2df64ec8c4e04b8bc58801a6a362802ad8c26106743aa699d067999feb437d92b438ca45bf1a8d1ca5106f6c8609f277a360f70b9237b6b9a393af4c7af6da7647c4bcb7b50da483f82478af3d99bcdbe4ef5e00464e5e541e187854dc98a69aa48fc444019a803cb71719f44b175e968330850f4cc9c96f6c2c24075edcb03b79a8d82d83b49db4a33b7eb993d48e4ece3d2a0f524c847d7f15c03e03708b96b6ea8eaca6a4657c3af0244e6401779a0ee21221fa92f13efd7721c5cc4500e7cdb8254997d8b73a7b2243f580a5e91cc246910e7fa7d13567b4164e6911f6722ed13aadd2a76f127a3435a4bd919836c363cb09ebf110b68ed56b28b4e8bd5d3e4568adfa509e914d98eb21aeaafb6734e42f0710aaa746fe4ea17c423f38ac951dd1007dc17486d2b823ecebf9fb9dcb62dd0de523f315c266de82c74f0378fcbfd2b030c0e1e30b034ec8f7d27946b8c86403863350520c89e34975e27b05df44cbcb017e0abbb6a756faf0b34f43f27529079fa6b98c8b7a26493a215c954448e1958f6f1aa558cee11352fceac48273440c199f152d34e7d3c805b35e61a2ce914b5e03dc5d7858ec15c44413e5b2569c00f31567df8324d01565d4e4e9e04d86f75f24dea9d9d24568cc0ff02740c50c06e0c1712ec5b711aa78a21e7433da47430ed76a9e5a20748ff5df632a9cfdce0d1e54ab98f2bf59db008b5c6a03c2d47fa12540d1c927fde4da851325e28afc0efc1fdd8a0cb2544e57306a0b5f486299a713a8cd4aad16b38f3c1b789937690b35287cdb337f33ce203d6303892c64e8be17016436717ff0c5c1af6ad7082e0b0bb3ca7bdf4a6369598e25d28c42189fc879daef22144424854ba28fef20680b5d32e9ae7872d9c134d6d9fe5b0ffe9352ceabd3805a6af5ac2b7b6234af960a77523d6e80b27849bd7f784a98d2422ff79c902f9dcbe4609c429e5b4a9fc90b15bac05d97ba07fe3734d3f7491c6a962ae942f382a7d28ea1fa5302c51bb40ccfdbd190f3286cf798bb345b51723f6e8ea8515534773a340613d8612db8f9adfff3278d190f7894a6ae534e2ef7a0a8b7b2fae577aaf438b62ff0e4d79b8a0856a76827f3e29042f3cb444e67dc16633c30d8db17c541ab4db7d81fc5a32f00b3609aa0b8d2544843206117a826e456a41aa87d80d320c09f1ac0f93ed8e30a22b70ab8b6be5dd1288e09f9c20906d6bca5f0eff1e2ed57da9465ce60ecc00faeebd450c3c3ad2453b75c18bdb725cca06d8cee657c77564ef03cda16d06f0d8e5ac1cce961abddbb5879738d04997263622263d9f27ee8d5868814afc7a2024e8807bfbf6943fd0792d13b9bef4ff83ed82f0c34df5f9c42aa472cc2c1ac023e152321855eb77e8f762d5b6937d9e7cb3a9f3c2419b806203aa2deb57ae90680a3c83cdcee63d0bf296027ef22cfac0938f9b115359e6c6d91306dadb2fb0441107ab8a88b18f0bde29fd68432767c7ef193cb6815c4456eb0d917b9a9e80f4cf02dee189d4b8accb839002f8b6eba365c9f3f693e156932c09941bfc64600fa9a1ed117e99068c3220c9f0e94b9d7c122c6c391e8f6dc0ddd0ec8097f4d0da7e3987fc86c5523fc788c68b25f86ff5e347849e71171ff0f5c6b15a0b501076b1dcd3e4ba22639beba069d6c813548c99db963077c1f2bcdd173f38e79bb05b450a4748c5959889f22d53f62476626e22587a023dfc4517d49396b0c2efc05817d8187aee15d20ef0f623615bf4da108d2429e596dce7d8e9fe6e8596c06009824b7bc0d7dc58a097658babc3701f19a7210f2c197eea886c5d00dfc50c108570002d437713df562fa1e3d67f70450db016570e1119532ed0e3e1bdcfd3155792e33a1110e87d24c8eedbd1b5d731c4fdea67a7d31168432e7d7cfd6564706d6a5c45ffc307daa0cbc600a5e6e3c9c82d493388ada75deffd3c83906dba60d6de0af97e952ddad74e59b92013a6e55d63a0289e09feb33641ae2bc1b2fd60e33ebb741b522f6c0d47a331846edd4f4d82223d193e0e5aae4b88750280b5702afb63b883a67e6a442ebaf9c7c60be8037a040c0a6e1814696ca2238717ecc0ef670db23a3063db45d74557d715d643255f5cc2b02ce024a42a2c4f48d0a020c4fe82d187b3b6edb7e2e9444cb2cb3b8862523febcd7bd5e7549b5031eb8c90c27907eadeef4328d325dfa862735dc9ba8ae26df2fbf9712ac4fc813122ea10c0ffd43cf70c217fbfed3edd012d66d959d03aba43873f87634a3ec91efdc920986321b3ae6152c4e1688b81775c069fd5a41e71e2dae304bd5c479e3247ee20a94266393a0761debae592b659f32e71c39343636ac1ab2d3732af1fa9778d701fceb0b71bad10c860e51e637430e6520d5639c6a1c430aaad3802c30a92b500fe55616cfb92ae1ce65f3eba6285da428c7849d24918f6d3bf5bcd8b37db6a403a3220064e82e94bc8a3df93e39332b133bda28c866f30e45c824039e448ef70459086810004d7613201583e9497b5c17e878238bc1280c87ed2381b8fd30860656df5be83e02f18e2308f2546a553299ca285b53c7f66ad0297c907e01dccb021d0e21631768d826068eeb457b877f4009977de0fda3d981dc3887515060c50b6cbeb0704498abea125f623107f97b33daf3ba02d619b6edb8223167fcc1df4c1909cb506805d1f464b1ebeaa69a1b1a0cf02387504296c7ce63c61e0385f00036dde496366c9988b48d904c4111379548fa829d55787ed9c629ca873862a40648f6f8e24387826471f8e6047baf0babff8f21cdfb81d4930749638856f2340d306f636d277ae1792f84a293e4f66350c75023a1b5967eb40a43c440a246c60342d8e7ebaa4ee68b8551b9807d7b308574a90a7c54134112d6fdb3151af2c90a2463bad24e336dd34bffcb34713b6802e711a168e4afd30bfc3dba82550fba036a8d9f5070875b622865ce43fa773e77ebcca52dd7fb846aca9c133eb93bfd08030bf7d2f95f541cee1f8ede2d0099177bc447d8c67a6d3b05ee3183498b1b0a9e8c271f40e36941df4924502e31d8122d28fef01063e9692152371ffbd44d0d54b527ba4a550b6191a40192e1735f9e9634df5647c3580179ed7f11d893890e968845f97aa642bccfb6b9e84a4d68189d59fc729f23059dead12b5bce438403aa7a1ef89da4ac0e4617a978acd99ce57213509702e31941dcab7530c902690a7da1e1c0ae121c31fa19cf8889ecd8b61cf086be7cfe695b172a36cdc326d504e55918e88a21efc120a54ffb6f852d5b97ccd7cba3ed1e44b6fda91be7b5de0589ee42794cc797c25249a9a6d40f873e52b491389065a950c8aed5b6675f5c066d1dce8ae2cee39a8f7b1c4adf5fba44fbb41a81b06a5ca9b597ff6081d89a0edf9d832bc866de4095119e7fb0eaeb8930b705d0712aa17e11754ee27da80105befbd6dfc2d3e6e8a040c080a7d4508621474e11a3ba7fee3a2d6ed428324c22d723f61c46b0dedf57ed8a1f5b4588f9922fa5bcbfe9cb9d97d558a4030e7809c1057838edc60be81f357bf466bc5b9fe05d58ce8009fa509db7d24e0404f609b7ade4df5611ef00761069d18aec275e33941db96feb81abcf87171b7d54f0013d88f8d39dbcb57df1b0a3a3463bc75995e8193afb08815de576c1de957b700aba945788360754f9575a3d9c3909e56b171e193da4dec2789cc41ce933c82904e5c4c01c218680c44c46ff1d501ffef20d6a5b067c523ea161e115e7b0179007835c82d226c6d3f7615d13ee729f7c926d429a33302cc61febb9ec53d24dcc00322740613d4714cdf380dec0673929b91f0c173455c1d5f605b3c2a2998e970309c59130a49a6dfe6b9f56d4ae04e21e9b14034e54e0b8745ba2c918f39d2f0f95e48323f664da395a8e2d6e294c202a51e5cb513a664f947deb2ce6c270000632cf0c9decc4857d801b939cb2e97498832d469e9487978a447e40b25904f80ce5d6e7d6a32ea5fdbaa470d134b318b41c8ab7e8cdffec513ad4cc2abe280406081827c389a2c247d0ee40235dd62f1ffecbf6aa3389abda43298ab752f9480f898f59005b4d8a2fdc4631bf7f2cb10a6cb789dd028e5ce71f0cb3512b3ed30a497bedd241b6bbef8bc099340e859f20d47d928fd1bd804eed2b41e266dcb00e3ba7ab8b1d442ef9200458654d613fa08ced2b8d86cbc27260e099bea23e4904e5ce096899cee734b4a524e714c1b98e9acf571563328ef71d45e3bf6fca2d0bafe7b32ad8919db80210eaef8c2c89bcbbafabcd6d5292cdfe439c1a9f1fae66090411640c6ccf905547102b49f7a025a3c7e095e9f7eadd457c147d96210208962f737018ec057b427e2d31ca7eb58b25e9fc4cf7cd9550129035d6b98da60ba7919d4fe9b99e4da7598fe85015c43be4eca45d1913458e9db9bb8a155c1a0eb64774939673b8914c6bfe98348182ec3a222da702175ebd2b8c970bb878d807858ebb7d35f6675bd2f4ab35af156defc38ca3fe0ec5b305f2d13481bdfaf40b09d0076aa6d6e2de0a284fc8027f7ef58df0eba11a524764b4e939371566a50a2f1c2171207626099ea70cbb62112001ce82353e7640b91370b2a277047b9903e6aac7bdc67db5d1e316b7ecd9d26d9efa52a1427aef62ffe91d19e5549f9106059a55d8085e937920beceb69b6233a950913aedb6c9913853257d80a7f2df09240a0b0e2265f861f25f3b3f020295e4c3a557d8f210a19713fbea955e2fbc00aee13bb44e9d935d547cf7b071b05d94292a4d753018e4b2058fe919e98fca0209f9e3815440b6b34990f49af2210cb6074f74dc7bbf5804a6a2986c2a1fe60c70493fb69d8a0a256d3ad34313bb2d88cd9e3dbee169bd656369c2ad0b598e03e2d43843317dcd53af92eaf7de73f89d0aae108da35e9639a951cc4e6bb6bc08478d9108aa862add6731184656515718c3bbe4c666bd3730ecbb150447fb2a0f6a1cd0c4c26f836163f5bd045d30243b49e10bedad9c3a974faed2469b128d02025202ee0e8e262e50081c08bf278b9de30afe9fa80ed99f0b50915bb83752051ce7b914a5b6c8eb0e4a2de2f412a377ebfbe8e3f409ef8580b4878a380c11e476588b8804b369130947a7b805aaf563125b08d79fc2a7a6dd59edbfcac7aa08de4ec98ad49fa4c88de7702238c883b2790c56b5752326843113fc7a3e860e058ac662acfecfe2f803960414b6c5a3f0e16ba8bc45d2e9442b74d13f7774f30af7f4cbb7eb1a2545914dd2562cd1cc69ca12a46b76e4b3d0fdaf520e4a4a7f0dd8ce412d8b0567f994a8c1684e5549402078b59200b8b697c776e6e58d34cd0b797938f4eba967963055852af7194e1d776f455e2e84402598e1ce415f8720000531b2935021bad2348c4b5976d3cf2d89626138a5f204102559c592a33be507dd596db1cbc03ab91f1f5a80729619bfb1bc830d88188e3f9593fb430d4050092f9064ce03ebf328aa662c7adf612240056e0418f531fe086f19509d44cd8e0c83393059bff678437c3d61acafab248c7b41119bb777354b836e9752849c80054fcad749c5660627ac7a24adce564b62426563b0caeb16769ffd5afae42643071d211d06f860445b0a384ea4275a1f6f2b0caf11ec2fead1b8a8abd71215fb0ba65cee1f28aeaf4fb54685c4b9c1d4583d05be800fb3b392c9c9b16d45b4740589c45a1d7adc9fbc4a8a91a6d95a256954408f1e1794b9b73c7fa92d9d1d3d0d62e035bd6a3c91f354608ac7d002f1ec1481b92fe1efb47dd717c58388432e0821b4642894dc1d95bbcb4bea812edd2be509e3723eb27dab02f0aa5174bfda011bac757ac05198bfae729198b21ad83ed77d7099933ca8d438223eb5172e29005f8d4bb439e9f1fd7d9319bc235a800ad8cea46cf72aa161403411fcd239c2ccc1caf86b559d263ec57ba80793cb6045195b2306dc2d3d1545b4f9f628733f0150a751cc8468d64e4fd87d8f6808c387112ea6cbd2747347a2702a1c53d036061c256f19897dced055dedc2f6000444b891b2f0f103ec52169bef5620ecc8802b9c40573c76e1a18907d9aaa82cfde4f69fbe29a031ae39e9d47161b52561104b9c4272fd23a758596329701d313cf2a2a5a0783b9f39b0d9d376b1a53d9aa02008e31c110ea76d7f3dd3b6119d18436d7d87638aaa30c633eef1fcd5923ef0f702f504ae3e3157382c50c022725f50e0de515d1bef117ca08d832e098b0240e6c2be19c7e25a8f32f876f4ea74809a659e02b8f2ec908dbd119ccf0edcd790994de8034f11691ac2cd6500c32e20287e968196dfd93de98d31a5733b19e7a0735c541ce2da123a622e10b79abd68ed242218fc198d3fd9bbe3dced2aabf070750593ef0ddbcf3da82d63d30faf24289e054434fe9b2547de07056c2ae927f00ddf5820c7620d2b9cc0f5047fe4a74d163cc3813f5465bcad682ea0be2d0070d34b1b382dc98e343b261a1a605ce916498f476b2131ff2353e1f9a3ce3e71902f2820f6f514b72f32b2a3e2d50fad8c47129849e2643ffd978f3d6a8893a6a0c1dfb540d9405f41c1b62bba4b0893273535b223deb4bfbf64c2a8b6ac040f00610d5d9beda68f687ebd4035c39c45cd80cef2c15ab5f35f24af9c0560ae08f07b50751c330e15f3ee3a8e07783dc00ca6b8f78d465d7dea2c8c83de74d5f000e240a2046c4d4346fde8802305e568074e94ff89cf0a3d5346eb77564c35b15b6b5746f3412f0a78c40323c98a985c255d63656bf1115c4c826622d462b26f608130852be14f740b8570f5f4a5511b86a784c51a5e4fb8e945e31c8040628da04fcadb8cc0c23e4b24bf2222932c7893ca6b9904dca2ff8a4968d35a0d368ac017496afccec6a815cb767f0b779ae95e3ba5260724a34f18cbc9204840414e60344dedaec24d73ccdbe4807b8f1555c074c94f53d94177fd1698997c431bd100d9a501dbae938247f5b77eb42505ef9cefefd4e7f42287903b316f036dacf80089174621ca177d5a71c9aed771de721522b0f72c815e97b130f1cbcb83877010f56c9926851bccc1f48abeb9972a7cd765435ed59666c643e8bf9f6f0dbf91e0c32da0d9416c4c27b002fe1547ad2406532ecf44c89e6245ac041ac0ce4e4eb0d7ca53b3bab439481930d4e214b3b8460354614c9fc4a2c5035671c4fbc522d0b0ea983f506dcead626b4f7932bc9b7f497f8233a1556478c0786218bac666e0ea9e7ac6a29eba7c27edc4f25dbf6906883bbf4ec47d1ebf38a3fb70c0df46f0b0541a8b5e21a9ebfa0a08a660daf10a2dac0d7c8bba437f3b931c96455b58805b0d522d58f81354805dc073ae2dfd82bc827ae72807fd533687f78a450eba70ef15be34213fc5ca3b3762944f1d018c46e7bb0b9829132633e54528c7178b7000c3852eff49d00338308b216cf77fa36f67130ca7424138560ff9adbbec1ac0f3c300aa9725662614ee790040e3668b7b512e4953b5a68a3df04f881978dac0e77be03f07de3ea414ab5de8b4148df7b2c7548de2392fed2d26dcb61deeb329a434e34961ba8fc0a8533731fbf13923c60ecaece5ac0633100fd18b1a47ab40a9c3d10e14fdd5f1c448479b933db813f16c87d74e925e91a370be9d5cc5f550f3bec745edc71c836a48d90e51777f2c480f5338917fa9ab9ce258bcf6eae6708542e1d2ac11c524ee41c307b7deac63d13dcea39ca2a3fe38f4fe0ba59a6f4052485619c52efba1b68fb0f0ef5aa921cc57fd1d341f3556a1d6b73d17f6db60b496621f542826cddad8de2e517258ab046842f696b9c4f2835506522f9d2b50093ed535605c4815979bac5627c103f51e4ad001e6a600f0d0b4ca4b81f06390b14ad02ea0c3b0349824c1e0573549bd853d20cad9fa1edcca9bc566014f0af06528ffe2ecbaa066240c9bff71fd5ddeff863b27cb00a74fccfb1db5499a8bc06eb01cdeb3b01b261e61f3fdf0f3973a4e1104fb997c8b6e3ae436f1ce0353c028c292daef3187740a653da65cf9610bc22e0fdd7dd5ae16293b057f31e8db002edb9e8b76d70aeb7d1aafa772a757c790590260f51c7671871f0df38f6cb230d84ab59ad81a924c2c8e28a7537ada86ea383b9476311c1ba4fc3ab2cdfec0205637c21cdb13444bc4b3a74e18f399b62fdabea074a3fbf866486b4ed86a6a4068b85c656cca6ed3e68b35175e1924973e82d462e4c2704e06609d552f5e81203d04fbbdf96911fd3fef07f359a8ea935d5a3657bb773783cdc060395d0081102f50063354fae3f6e43ab851dc5c29fc36d48b1e654b09260aa5e62c743cc6b0861f9bfa62752561e2a80f8275dc0e2abaf8f65572fe4fe8e8ccc2d91bdd7c6468783932798cf483e2b6793f0fbc08495d135ae16abd9d334a58bc97de071d10bf13f1dd379ef24f8e41f63787166417e18e5960f38163fb0b92a7127fa22ee03db3562c139a0887a8820e0f85f1714fd29a59d500474f92e1187bf9729c7210cf85a5b1f13d581dd7b8b25a709888f147420a493cc6e036ab9c6366c0e4ebb07026b0e3fd5b34eee445669452cf92bbab5fad18b9b164179b2bd489155c017091a448388d6ca40cea2523618c4a992c37d3c20a7139b940417cdda9c7852bd082bc30446d38f51d4d3ef9b4a8d2482dc05a1efe359d65e9fac6d673fc3a9570b123f5a609851be3b90461ccdd602d85a251164e1c757ed256c4d92b10bec9302a5e9f89069c73bbd2266ab755a4575254a43604b69965de8d131cb812e6a6e0c56a983915850fe148d329c29d15eec67464427da9d763b89c52995b32c1c500ff81d35fa348ec90c2f4680c1e073fb259df6a4dd145b8951e017948c2976050784ed0e7207997ffc619ef2ee4e53fa88af9aea63cd2f1a761151bcab58025a07e3a2b2ce40f9c25e0a88d82efd67c0fefa4cab7cfaa290865508e2e17edee80898d4eed281cd7a8d0e4090a97ff3cbc0ac62712528b367a32bea105840701e0c6588cd1a118bc1b8c4625a3e48f69b779f0f575cd2e505b362051021df8c1b04fb273a52cfe42e7fb16164d3218777622f89580bea14ed586073df23c6c9da01574ae621f62af3728281a4c3cffcfd6f192324ea9eece0755366bde98b5daf0e2b9c58587c15f7b76023125dfbd5fadb374d71481f4427e8a15eb189ee2bc4cbe3bb3b0fdcac5fce8f46a31e9a33a1ce34b8dcf22990f9e15218f67d8ce5470ae819fbd3781424adda93c5a443cbb6c209b9ca424c0eee28ab168f05331b610606f7a9f33ed3ea477451373c0f3578017beacc7b15fae66c0b11abace6d3860bc48977b4d2a9ecaca3b0e3f43b4583f86d560ae1ff0e2cb77edb5419deea3f0cacba39780fc58f3ce0c29c66a3591892c176e1a7d3bb54fbcec8f79fa2bbc004229afd6fdc18c4cb89edf1459ee314763cafe1e75287615c3dbb7f5b05fbab0a9adf444c01a3227b51509dc5bcbf4e6a2eec548d68d1fc6d1bb52673c7efe303c80b9b1cacaac0cfd73dcca1618f30116410c8e6a1475fc191bd6a080b0166087aa6531e59f9d96dc30101580f2cac5efda8fcb7996e085fac2ae03f7b0a460d5cab710600c77c4d3f7f6af775a422b9af2b687d6cab4ddff8e6190e0d9985035f6529e1a277a089ad1b5894cd8f1261918587b89e66dd9bf5891943a319b607ebf127991ac2145696f4b8489d14fe076983381b0ceb983014bea17aed90d90a79d2c9c6e58453483488bf5c892cce23273e98ba4847f39250b126885643ed00540fcb83defd5588e0a342a4d5f73b815e8b18c7d0c7a9aa32b86e65783d2b07e2465870e9a8e1364e6baa88dea32a47f215f661a692d52c0ddada551bbc8d0db7a4e1e987f9f330b0abfcab7318c30fcb4d801f008bbadad93172b8d8f0ce085648a5b033e7f5921900756f12d59d70334cfe24a848e1b85d83e316d303a100add604567b2f5703238410cb63177a22dc934864af6763498346f6039ff360352519cca87dbad1d3f8003bb46a9072b8b396114731d0e1f0df03fcdfee3a0401435fcd82fb448d96064ca438928ab3f2dbc705ef16e9b9dfd65ae8d800a49c0162c35f8ab595be1da87c6c3a8e10886b16519589f22a260d1ef3c2ab11ee9204bda92882ad266b29b1eb64f959c3cf828b27afe849a61195870c65d34ea70706c0944f8091ea483dbc06745a8658dcf5749cbb4887a079399af13cfa69a306088e5bbc7389fd33bbedc0f8b2bcd730b8fa0fb7d5c957a4551e9dce29c4b15f04e057cd99771aab75a7887b82cb12453cea07f785b4ef560e654eac26a079e90a9d3a60c39c1a3a301b9b3a2a0bb7c7071c96ce37bf59b1b847632ceba30b5d044c532e896c29fb2b3769aa0f7495f97a3475a718ce17479f805ede3c893c8d0c0e362207bf9a4c5a48ca988550185e3d010a52b4ad65bce64419142d7238c80020e43b99cb80ceb18bc6a2b045333f8ec169211dd26dc8dfa398691f1d76d30f1b4204a9113a1e353594033e50c908cae266ccd449bae6e030a0acb6619b360f92833140fe243c5be4bd1fda19455a96818f3cbee2232383fbb909c615d1050b687f4451524d507cc9130149899e040927a553b90dd324d57db73c08a5fd0e013f08be740ff23243f87b2c0f4fab5d65edf4e7297ed774852e423e9412e7be0f99a677719928d879cd56bcabfd4dce3ff96a7c5ce34f23471268f42d4b73990295b03026c77645eccf963218535168fd08ab0969ba97b812b6bf3a598e710f09450dd5123579658951b21eb143e4b76ee660a32654a0723e48d2069dcb52edde0690dbbb2bd0c2de2536eb40c89238127c48085de1eb89c8f5bd3208e2c2292422bf268ade80cb5331cbf74cb7338f5d8c99b106b837d0b7679b40907ce4fc4a054bf6418951192254108b6765270f70cecbc01f3e145ca05f1123ad79f8f09bbf7320af0e1802202b1fccb190fa8750d845bd8ea324bcbf558233ae7b65835aa84a144c11883a3a19c084b9c9a9e4a4fbc4a722294a0bd35a8374fa6910fe4b635c8ab7bc1a7951f0f16cfea09a52ab31e5c85848273df135cfacf630e2e8db9728ef733e2adde7dc65b29d6816743ee4b57cdbcc507ae8156edc3f5548e616439cc02daa04a7e0edfc5755b0463072cd9f1480c33e7730e098e6201dd68c35574847b37652d8dfc7ab8eb88788fcc3a54304f69287396bd6783c33dd2f9d92e6bf19bd48cd062b5a5b222e8cc9a84883593261ee7308701453e7250749a7b0f26a28be17dbcbe096e717476b12c1093da5718771a43de20d9b51b76c3a206946f097408973c8441e5a94cd160192ef7e6aac57b8b82ca809f7e504fb6a48dc58a18f5fa5952de6df1f3af19d3041b908946d480ad2328876bc22fc174762e475a5502d85e878e536a78f06574df2119e5c514f8e78910659bf079b189725d9ee0918829b2772828ea6691664868283974656827c44687c47b6164aca68817a888859e80063da37ac442386181fa238fb2db123b49d3f158c2b8a444c80c9c9", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 69812464, 37032133, 1422726, 375448, 406581, 449851, 89069, 223269, 53240, 11155, 7623, 1417, 10229, 2962, 8755, 3275\n ], \n \"k_image\": \"dc006e92fc1e623298b3415ddccfc96a8cae64cb7c9199505a767a16ddd39bb9\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 103642483, 4588979, 374943, 521809, 26666, 551559, 3541, 78919, 74119, 7697, 22074, 98, 2069, 12520, 991, 2010\n ], \n \"k_image\": \"d656ac13a64576e7af5ca416d99b899b0bafef5e71d50e349e467fa463b13600\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 100689894, 5890444, 1600331, 1239916, 10246, 40615, 93596, 29367, 209178, 31832, 15976, 15524, 7738, 4618, 18000, 13238\n ], \n \"k_image\": \"ca559feaf79de4445ca4d2bcc05883b25ecff2f6dd8fd02a9a14adea4849f06f\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 105335893, 1720661, 603531, 812185, 907612, 95130, 38652, 233260, 38021, 31218, 30355, 924, 43414, 12072, 1861, 5511\n ], \n \"k_image\": \"c5b7d94e661c5eb09714b243f3854cc06531b1085442834c9e870501031b73da\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 83622788, 22585650, 1698763, 363844, 674080, 176911, 33802, 60550, 34474, 499122, 34507, 109012, 2600, 2390, 11762, 678\n ], \n \"k_image\": \"a5ebf4914f887ecdfde8e7ef303a7f2cc20521a2a305ba9a618e63d95debfb22\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 55266410, 50443346, 326237, 2547259, 482686, 131728, 345875, 70764, 60539, 195316, 10817, 12486, 12413, 807, 3774, 430\n ], \n \"k_image\": \"a2b08a090f611ea1097622cc63a49256a2d94a90b8dbaaa5e53a85001c86d55a\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 104851338, 4432225, 322623, 60187, 68448, 12770, 87943, 31366, 19015, 11424, 1280, 4638, 1761, 5270, 251, 2\n ], \n \"k_image\": \"88f7594b26dcbaff22f7e7569473462c49d8fb845aa916d7a7663be8b85b8553\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 102449564, 609569, 3134675, 460320, 337596, 206075, 2480844, 10681, 62016, 8415, 133990, 3772, 6574, 1860, 4700, 386\n ], \n \"k_image\": \"7d805459f05d89c92443f43863fa5a4d17241d936fc042cc9847a33a461090c5\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 85856596, 18666365, 2117553, 3040618, 42521, 22399, 31727, 90148, 4179, 5683, 1473, 18506, 177, 11519, 809, 461\n ], \n \"k_image\": \"65bb760c9a31da39911fa6d0e918e884538f0a218d479f84a1c9cca2f9a5f500\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 98769080, 9556352, 1282527, 34176, 70608, 21210, 55660, 5274, 64238, 34977, 1007, 102, 8595, 1858, 4821, 545\n ], \n \"k_image\": \"52418ac25be58fbfcc8bd35c9833532d0fa911c875fa34b53118df5be0b3ba48\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 106117516, 1882646, 1340386, 177924, 75683, 23939, 173054, 41750, 12613, 11467, 26614, 986, 12504, 9537, 2244, 1642\n ], \n \"k_image\": \"40e57cb9a9f313f864eef7bf70dea07c2636952f3cbff30385ac26ee244a4349\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 93868601, 4410106, 10612916, 475622, 117796, 118856, 41271, 73043, 1460, 42279, 39533, 56372, 18003, 6324, 28140, 613\n ], \n \"k_image\": \"38d739cfb68aba73f0f451c7d8d8e51ae8821e17b275d03214054cc1fe4f72d6\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 105407900, 3217068, 803785, 210848, 12516, 136889, 48021, 10018, 10953, 16966, 15097, 6490, 2934, 959, 6245, 3656\n ], \n \"k_image\": \"1eda8e08b1024028064450019b924eca2e3b3e3446d1ac58d0b8e89dc4ba980d\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 83903567, 23002177, 555328, 1306991, 335319, 250467, 189789, 255613, 34743, 46807, 20533, 1166, 2110, 3917, 1713, 649\n ], \n \"k_image\": \"1cccfcece29fbd7a28052821fdd7aac6548212cab0d679dd779a37799111f9ec\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 72075355, 24982880, 8841419, 2959414, 91310, 33396, 253541, 140692, 134207, 323001, 19328, 3000, 36052, 12670, 409, 3547\n ], \n \"k_image\": \"05138378dedfae3adbd844cf76c060226aaeddcd4450c67178e41085d0ae9e53\"\n }\n }, {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 87891367, 12499871, 3444223, 3817265, 682721, 1065395, 247912, 136321, 50631, 28848, 18744, 14334, 11562, 167, 213, 718\n ], \n \"k_image\": \"0007a41ed49aa2f094518d30db5442accaa7d3632381474d649644678b6d23c0\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"8c2ba531d06e4ac990213d765751e89981303d4714d81229ac09e385c8ac1cd3\", \n \"view_tag\": \"c9\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"0c9d3c41171e04e42f6ed61a932c2ecdb5c0103d7cdd909fa0ab15881469c66a\", \n \"view_tag\": \"f5\"\n }\n }\n }\n ], \n \"extra\": [ 1, 184, 72, 194, 203, 248, 224, 238, 74, 152, 75, 246, 69, 192, 230, 17, 132, 80, 151, 16, 114, 161, 141, 35, 182, 207, 113, 111, 11, 246, 129, 253, 64, 2, 9, 1, 208, 254, 203, 171, 207, 80, 220, 87\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 236860000, \n \"ecdhInfo\": [ {\n \"amount\": \"dc9c59cdde692b10\"\n }, {\n \"amount\": \"e76b3405d76aec5c\"\n }], \n \"outPk\": [ \"479df6fa3357cbddcb22e20d2b458011dd31dfc1c78316bad7ed9ffd39ba12b8\", \"6a8ad571f7f5aa9668806d10d2b03c2c84c90690e4ddeb3dc6f870fbd5df5b09\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"c55abb99bdda211773d14dcf7cd68ca2c2e1df64896976c36da0b72b9b0b5abb\", \n \"A1\": \"f68499752558d3b7b342a554f91ea3171cba93075d35968565e37020bd60f6bf\", \n \"B\": \"0086548c6fd600cc04f704607fd008bdd1ef83ed341109114d9301c30a86d3d8\", \n \"r1\": \"7b9a9a3a6cf62d4aedea2cbe4f48af3a830c33486b3e3c7942ad7b4dfbc32207\", \n \"s1\": \"6cbaf3b5e0afffc36549482dc4e5220ad7be9284470405e2a889dc4b0eda950b\", \n \"d1\": \"9eacce92f4b6fe9bbb9f853cd5a68b1ea882a66fc1f86df6b7f21ed49e21fb07\", \n \"L\": [ \"0c2330673beaf7961285d721e00c55e6d97b77efb12cfcdac429cbe92fe7ed64\", \"1ae9878d713edc784573be96c47e80dec59c487590f721dda777066a66a388a8\", \"cbd007967d90e323d91cae997f9449518ca71dd5e0473cd88d52a1b63f4994cb\", \"355fc68976cdff244295ea25e9d579eeee92211f52ed942307827480bf639e9b\", \"945e1f12d5a0369f990945dc143dcbb3e7bb7eada7b4e10ccf65128ab3a203d2\", \"7441035465a96cac073e12db24bb696f38587021576582596bd0351ea8dcd301\", \"23af3e518560a843188e19aa666d9b70d7c55b2072c3813f05084de714988a9a\"\n ], \n \"R\": [ \"e21b543d2d50a2b7949e9c6ee3638731aa1f19909ac0978ef10ebe83e52558f6\", \"e96814d9b64fb0344d8a37e43e9933a9bcb087173ad895400a680f3f7919a585\", \"572b4c0639855b4a146492b00c82eb9d7bd9566aa94387e79f80561676a045e4\", \"be44d3a42061a710a8c5be6ac2af7531504911a4e17d27249f7395c40ca90ac6\", \"4e8a0eaee615e0a23be18c2a2b74e5afea867b60acf8ddf7301055d2e9b6556e\", \"2b8750caaddea43764d08b5dbe1497cc7db2be2ccee30629fb313078fdb9fc4f\", \"9f01b9d7e2e9cb90fdf2654411be9914ac32fe797c427f2a3f5e0ab974226f24\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"3363e401c6214a87d6d33bd5d66a6f74f273c9eaeba1210b64089c7ca7418d0a\", \"e9ea96a6e3608b004341be48b3fb658836e80e9c3c2786764db9e239d115c206\", \"e2c5d846b20c33614aa533f068a9363f9e78b728faedd5fd5ac6c2b427dd0b00\", \"278d9000c2872ff0d2506b842847a65eaf93d1f5ed3267d63cf9128158e1d307\", \"aa6a0c69a73c3a1b9914c285981e40de53329e666a98e065354dae1c92765d0a\", \"f2a1b19b7a3f102fe8a7ba53487879d77c8df704ee4785bd27a6083ab60ec006\", \"81a849c4a7064e6265c60e06924fe32cec542a40b1c89d81356cdb200e842903\", \"3f75ee3e3749acce3d130cca4e7892f68cd7cdfa8e122bc78530b2e8d0176d07\", \"ebae1b0281338c2b2ec14d71d211f1e5500ea3bf298e88b2bb39edc1a4614d0d\", \"5048bbe60650d351ec2ff4cafb7f29ed646885572bab7ca50503bd7cb1a6e407\", \"75276f4e3914a473acc0a6520ea22947173052c525ff7f69b4161bab3f4e5509\", \"7adc053156eda0d8d255298da99b7612756be610b5d4c2d4c5622c2e3d45a00a\", \"2847e2e122a38f797072440f3b2860dc4a184d42f64b1a3db4c26d43cc3c6a04\", \"0a1df4fe0cb81346469ee48b4c64ca3a163a234dd107cfa155d610e859d04405\", \"d90e1d6a6c4f5b5cd93fa65fc80abff6607d8dc1dd932abb1a760e5a05b8d900\", \"03f7429d83df3df24cad2012bd01134cf6ac445145c54434b6e12aea99ed030e\"], \n \"c1\": \"66902fd5bbbefe4ec9d59d9e38da2e3e182da804b9cd69fc6516e5c42d12e409\", \n \"D\": \"807e7e4661a8300cc35c9a5aaf05c74f315d301d3b58af8423e8b3a9dab2a5f4\"\n }, {\n \"s\": [ \"7cb7bac961689da9c8480fdcd7699abfc7b1846b5406095930e305225408a80f\", \"ff283d2ff7b05766873ea25e37a51af6e80fe42c040c09e1254fec0c561d7005\", \"078faf699880bad1bff0adcaf0fbbed6a6b708e993738fcf3f51307f49427b0d\", \"ca0a31c36ab9453b909cc184d4c63908b94427deeaac5521a838ce0e7c654708\", \"2dac768455bb669ab5297535a143999b247d8999b9c8d041a2eb3bc3d152c50a\", \"e586639c731aa945cc0ee851bcef18a27d942354cb8d66ea356d8c2c04eb490e\", \"54f4fd74727b0237fee689ea8802a8d8c885884c216702e32c8e8769430e8709\", \"954ea828466f98d09e756e5ed6c12ef5eff3a4ee36c990542f935a460da46903\", \"7bb52fb8e07f50801ab6a9b81b8eab75b73a7fe93b9d65b75679b3103d5e880a\", \"a1d7c47088f50c690def8612384bfd2f01dc17e184a9d0529735d34bba8e0704\", \"c1d20036078d9394a593de43e28afeed8e9f9e37a283de8bfa383f333281b502\", \"baab61d08ee55dc83a7a7bf7b9be015aa90cfaa3cf2c7976eb1616b6a5327707\", \"9c48c0cd37af84fd9804839055052758c47dcbf861d6f481edd26426abec9b0f\", \"23396a7e14bd9f6ce9168c88eb5cc00c16e92c7133ef6cf66c16ab51d6209401\", \"f49e4a023ecaac0ec7ffac5f168d4e7d4ccf098bf7c59a769430b9131bad6500\", \"fe9c562cafea11ce248b40a51faf4a9a380b9abf6407f1184c05ab6f1c2caf0d\"], \n \"c1\": \"83d399b9afb3f50c7d1b305bb321c14df0787e226ecd2def7bfc6b7c312dca0b\", \n \"D\": \"255e606d60022c07faf74422223d45772072dc8a7452802ec6e0a208deee8d7d\"\n }, {\n \"s\": [ \"a374add905f27f162b82bd779d88d0f533b9cb550de4b7a51679ba3469bf5b0d\", \"003038393e91408af3ab39972995190a27c377fd568695b06cf0a2c0f374680c\", \"7e6b2a71d81eb28f336edf96840978b03a9c42bc72c9004dea1ed37d1208670d\", \"a75ed7d6209b278e9362c880ad85e32253589c0ba3fb6a613df14128040e4d0b\", \"37dcdb26edc3ed3d88b1c2608439d2b930cfff4eea3c5386d27928ad0f8af70b\", \"6e81677aa7c2ceb9280628cd810e5b7e175393342d5cd560ca14ead9574bc20c\", \"bf0d7268a249e56526af3029493eb31fd3d20160aa32d30a0f53c44f9d829701\", \"05a30031d96eb6efa0bca4d61a6739c37cde501a33ed7de3b8f1fc3ea696830c\", \"52fa1350698901ad198fa276f73406da4a2d4cac670860fd0da77c42d9c67305\", \"e4044076d2fb8bf2765ecb825e468e698130674102dc58f3c7fcb8194a6ad605\", \"46555d48c63f1354f34db8b6c6a381a4052cf4fd6a30ee2274af2da5684e3d0d\", \"32f2ad376e21aed1ffd8b3c0e48032a16283c3ce6f12ac2cc90b389e81f59c05\", \"3fc9310d8c947f78ea37f66d2df64ec8c4e04b8bc58801a6a362802ad8c26106\", \"743aa699d067999feb437d92b438ca45bf1a8d1ca5106f6c8609f277a360f70b\", \"9237b6b9a393af4c7af6da7647c4bcb7b50da483f82478af3d99bcdbe4ef5e00\", \"464e5e541e187854dc98a69aa48fc444019a803cb71719f44b175e968330850f\"], \n \"c1\": \"4cc9c96f6c2c24075edcb03b79a8d82d83b49db4a33b7eb993d48e4ece3d2a0f\", \n \"D\": \"524c847d7f15c03e03708b96b6ea8eaca6a4657c3af0244e6401779a0ee21221\"\n }, {\n \"s\": [ \"fa92f13efd7721c5cc4500e7cdb8254997d8b73a7b2243f580a5e91cc246910e\", \"7fa7d13567b4164e6911f6722ed13aadd2a76f127a3435a4bd919836c363cb09\", \"ebf110b68ed56b28b4e8bd5d3e4568adfa509e914d98eb21aeaafb6734e42f07\", \"10aaa746fe4ea17c423f38ac951dd1007dc17486d2b823ecebf9fb9dcb62dd0d\", \"e523f315c266de82c74f0378fcbfd2b030c0e1e30b034ec8f7d27946b8c86403\", \"863350520c89e34975e27b05df44cbcb017e0abbb6a756faf0b34f43f2752907\", \"9fa6b98c8b7a26493a215c954448e1958f6f1aa558cee11352fceac48273440c\", \"199f152d34e7d3c805b35e61a2ce914b5e03dc5d7858ec15c44413e5b2569c00\", \"f31567df8324d01565d4e4e9e04d86f75f24dea9d9d24568cc0ff02740c50c06\", \"e0c1712ec5b711aa78a21e7433da47430ed76a9e5a20748ff5df632a9cfdce0d\", \"1e54ab98f2bf59db008b5c6a03c2d47fa12540d1c927fde4da851325e28afc0e\", \"fc1fdd8a0cb2544e57306a0b5f486299a713a8cd4aad16b38f3c1b789937690b\", \"35287cdb337f33ce203d6303892c64e8be17016436717ff0c5c1af6ad7082e0b\", \"0bb3ca7bdf4a6369598e25d28c42189fc879daef22144424854ba28fef20680b\", \"5d32e9ae7872d9c134d6d9fe5b0ffe9352ceabd3805a6af5ac2b7b6234af960a\", \"77523d6e80b27849bd7f784a98d2422ff79c902f9dcbe4609c429e5b4a9fc90b\"], \n \"c1\": \"15bac05d97ba07fe3734d3f7491c6a962ae942f382a7d28ea1fa5302c51bb40c\", \n \"D\": \"cfdbd190f3286cf798bb345b51723f6e8ea8515534773a340613d8612db8f9ad\"\n }, {\n \"s\": [ \"fff3278d190f7894a6ae534e2ef7a0a8b7b2fae577aaf438b62ff0e4d79b8a08\", \"56a76827f3e29042f3cb444e67dc16633c30d8db17c541ab4db7d81fc5a32f00\", \"b3609aa0b8d2544843206117a826e456a41aa87d80d320c09f1ac0f93ed8e30a\", \"22b70ab8b6be5dd1288e09f9c20906d6bca5f0eff1e2ed57da9465ce60ecc00f\", \"aeebd450c3c3ad2453b75c18bdb725cca06d8cee657c77564ef03cda16d06f0d\", \"8e5ac1cce961abddbb5879738d04997263622263d9f27ee8d5868814afc7a202\", \"4e8807bfbf6943fd0792d13b9bef4ff83ed82f0c34df5f9c42aa472cc2c1ac02\", \"3e152321855eb77e8f762d5b6937d9e7cb3a9f3c2419b806203aa2deb57ae906\", \"80a3c83cdcee63d0bf296027ef22cfac0938f9b115359e6c6d91306dadb2fb04\", \"41107ab8a88b18f0bde29fd68432767c7ef193cb6815c4456eb0d917b9a9e80f\", \"4cf02dee189d4b8accb839002f8b6eba365c9f3f693e156932c09941bfc64600\", \"fa9a1ed117e99068c3220c9f0e94b9d7c122c6c391e8f6dc0ddd0ec8097f4d0d\", \"a7e3987fc86c5523fc788c68b25f86ff5e347849e71171ff0f5c6b15a0b50107\", \"6b1dcd3e4ba22639beba069d6c813548c99db963077c1f2bcdd173f38e79bb05\", \"b450a4748c5959889f22d53f62476626e22587a023dfc4517d49396b0c2efc05\", \"817d8187aee15d20ef0f623615bf4da108d2429e596dce7d8e9fe6e8596c0600\"], \n \"c1\": \"9824b7bc0d7dc58a097658babc3701f19a7210f2c197eea886c5d00dfc50c108\", \n \"D\": \"570002d437713df562fa1e3d67f70450db016570e1119532ed0e3e1bdcfd3155\"\n }, {\n \"s\": [ \"792e33a1110e87d24c8eedbd1b5d731c4fdea67a7d31168432e7d7cfd6564706\", \"d6a5c45ffc307daa0cbc600a5e6e3c9c82d493388ada75deffd3c83906dba60d\", \"6de0af97e952ddad74e59b92013a6e55d63a0289e09feb33641ae2bc1b2fd60e\", \"33ebb741b522f6c0d47a331846edd4f4d82223d193e0e5aae4b88750280b5702\", \"afb63b883a67e6a442ebaf9c7c60be8037a040c0a6e1814696ca2238717ecc0e\", \"f670db23a3063db45d74557d715d643255f5cc2b02ce024a42a2c4f48d0a020c\", \"4fe82d187b3b6edb7e2e9444cb2cb3b8862523febcd7bd5e7549b5031eb8c90c\", \"27907eadeef4328d325dfa862735dc9ba8ae26df2fbf9712ac4fc813122ea10c\", \"0ffd43cf70c217fbfed3edd012d66d959d03aba43873f87634a3ec91efdc9209\", \"86321b3ae6152c4e1688b81775c069fd5a41e71e2dae304bd5c479e3247ee20a\", \"94266393a0761debae592b659f32e71c39343636ac1ab2d3732af1fa9778d701\", \"fceb0b71bad10c860e51e637430e6520d5639c6a1c430aaad3802c30a92b500f\", \"e55616cfb92ae1ce65f3eba6285da428c7849d24918f6d3bf5bcd8b37db6a403\", \"a3220064e82e94bc8a3df93e39332b133bda28c866f30e45c824039e448ef704\", \"59086810004d7613201583e9497b5c17e878238bc1280c87ed2381b8fd308606\", \"56df5be83e02f18e2308f2546a553299ca285b53c7f66ad0297c907e01dccb02\"], \n \"c1\": \"1d0e21631768d826068eeb457b877f4009977de0fda3d981dc3887515060c50b\", \n \"D\": \"6cbeb0704498abea125f623107f97b33daf3ba02d619b6edb8223167fcc1df4c\"\n }, {\n \"s\": [ \"1909cb506805d1f464b1ebeaa69a1b1a0cf02387504296c7ce63c61e0385f000\", \"36dde496366c9988b48d904c4111379548fa829d55787ed9c629ca873862a406\", \"48f6f8e24387826471f8e6047baf0babff8f21cdfb81d4930749638856f2340d\", \"306f636d277ae1792f84a293e4f66350c75023a1b5967eb40a43c440a246c603\", \"42d8e7ebaa4ee68b8551b9807d7b308574a90a7c54134112d6fdb3151af2c90a\", \"2463bad24e336dd34bffcb34713b6802e711a168e4afd30bfc3dba82550fba03\", \"6a8d9f5070875b622865ce43fa773e77ebcca52dd7fb846aca9c133eb93bfd08\", \"030bf7d2f95f541cee1f8ede2d0099177bc447d8c67a6d3b05ee3183498b1b0a\", \"9e8c271f40e36941df4924502e31d8122d28fef01063e9692152371ffbd44d0d\", \"54b527ba4a550b6191a40192e1735f9e9634df5647c3580179ed7f11d893890e\", \"968845f97aa642bccfb6b9e84a4d68189d59fc729f23059dead12b5bce438403\", \"aa7a1ef89da4ac0e4617a978acd99ce57213509702e31941dcab7530c902690a\", \"7da1e1c0ae121c31fa19cf8889ecd8b61cf086be7cfe695b172a36cdc326d504\", \"e55918e88a21efc120a54ffb6f852d5b97ccd7cba3ed1e44b6fda91be7b5de05\", \"89ee42794cc797c25249a9a6d40f873e52b491389065a950c8aed5b6675f5c06\", \"6d1dce8ae2cee39a8f7b1c4adf5fba44fbb41a81b06a5ca9b597ff6081d89a0e\"], \n \"c1\": \"df9d832bc866de4095119e7fb0eaeb8930b705d0712aa17e11754ee27da80105\", \n \"D\": \"befbd6dfc2d3e6e8a040c080a7d4508621474e11a3ba7fee3a2d6ed428324c22\"\n }, {\n \"s\": [ \"d723f61c46b0dedf57ed8a1f5b4588f9922fa5bcbfe9cb9d97d558a4030e7809\", \"c1057838edc60be81f357bf466bc5b9fe05d58ce8009fa509db7d24e0404f609\", \"b7ade4df5611ef00761069d18aec275e33941db96feb81abcf87171b7d54f001\", \"3d88f8d39dbcb57df1b0a3a3463bc75995e8193afb08815de576c1de957b700a\", \"ba945788360754f9575a3d9c3909e56b171e193da4dec2789cc41ce933c82904\", \"e5c4c01c218680c44c46ff1d501ffef20d6a5b067c523ea161e115e7b0179007\", \"835c82d226c6d3f7615d13ee729f7c926d429a33302cc61febb9ec53d24dcc00\", \"322740613d4714cdf380dec0673929b91f0c173455c1d5f605b3c2a2998e9703\", \"09c59130a49a6dfe6b9f56d4ae04e21e9b14034e54e0b8745ba2c918f39d2f0f\", \"95e48323f664da395a8e2d6e294c202a51e5cb513a664f947deb2ce6c2700006\", \"32cf0c9decc4857d801b939cb2e97498832d469e9487978a447e40b25904f80c\", \"e5d6e7d6a32ea5fdbaa470d134b318b41c8ab7e8cdffec513ad4cc2abe280406\", \"081827c389a2c247d0ee40235dd62f1ffecbf6aa3389abda43298ab752f9480f\", \"898f59005b4d8a2fdc4631bf7f2cb10a6cb789dd028e5ce71f0cb3512b3ed30a\", \"497bedd241b6bbef8bc099340e859f20d47d928fd1bd804eed2b41e266dcb00e\", \"3ba7ab8b1d442ef9200458654d613fa08ced2b8d86cbc27260e099bea23e4904\"], \n \"c1\": \"e5ce096899cee734b4a524e714c1b98e9acf571563328ef71d45e3bf6fca2d0b\", \n \"D\": \"afe7b32ad8919db80210eaef8c2c89bcbbafabcd6d5292cdfe439c1a9f1fae66\"\n }, {\n \"s\": [ \"090411640c6ccf905547102b49f7a025a3c7e095e9f7eadd457c147d96210208\", \"962f737018ec057b427e2d31ca7eb58b25e9fc4cf7cd9550129035d6b98da60b\", \"a7919d4fe9b99e4da7598fe85015c43be4eca45d1913458e9db9bb8a155c1a0e\", \"b64774939673b8914c6bfe98348182ec3a222da702175ebd2b8c970bb878d807\", \"858ebb7d35f6675bd2f4ab35af156defc38ca3fe0ec5b305f2d13481bdfaf40b\", \"09d0076aa6d6e2de0a284fc8027f7ef58df0eba11a524764b4e939371566a50a\", \"2f1c2171207626099ea70cbb62112001ce82353e7640b91370b2a277047b9903\", \"e6aac7bdc67db5d1e316b7ecd9d26d9efa52a1427aef62ffe91d19e5549f9106\", \"059a55d8085e937920beceb69b6233a950913aedb6c9913853257d80a7f2df09\", \"240a0b0e2265f861f25f3b3f020295e4c3a557d8f210a19713fbea955e2fbc00\", \"aee13bb44e9d935d547cf7b071b05d94292a4d753018e4b2058fe919e98fca02\", \"09f9e3815440b6b34990f49af2210cb6074f74dc7bbf5804a6a2986c2a1fe60c\", \"70493fb69d8a0a256d3ad34313bb2d88cd9e3dbee169bd656369c2ad0b598e03\", \"e2d43843317dcd53af92eaf7de73f89d0aae108da35e9639a951cc4e6bb6bc08\", \"478d9108aa862add6731184656515718c3bbe4c666bd3730ecbb150447fb2a0f\", \"6a1cd0c4c26f836163f5bd045d30243b49e10bedad9c3a974faed2469b128d02\"], \n \"c1\": \"025202ee0e8e262e50081c08bf278b9de30afe9fa80ed99f0b50915bb8375205\", \n \"D\": \"1ce7b914a5b6c8eb0e4a2de2f412a377ebfbe8e3f409ef8580b4878a380c11e4\"\n }, {\n \"s\": [ \"76588b8804b369130947a7b805aaf563125b08d79fc2a7a6dd59edbfcac7aa08\", \"de4ec98ad49fa4c88de7702238c883b2790c56b5752326843113fc7a3e860e05\", \"8ac662acfecfe2f803960414b6c5a3f0e16ba8bc45d2e9442b74d13f7774f30a\", \"f7f4cbb7eb1a2545914dd2562cd1cc69ca12a46b76e4b3d0fdaf520e4a4a7f0d\", \"d8ce412d8b0567f994a8c1684e5549402078b59200b8b697c776e6e58d34cd0b\", \"797938f4eba967963055852af7194e1d776f455e2e84402598e1ce415f872000\", \"0531b2935021bad2348c4b5976d3cf2d89626138a5f204102559c592a33be507\", \"dd596db1cbc03ab91f1f5a80729619bfb1bc830d88188e3f9593fb430d405009\", \"2f9064ce03ebf328aa662c7adf612240056e0418f531fe086f19509d44cd8e0c\", \"83393059bff678437c3d61acafab248c7b41119bb777354b836e9752849c8005\", \"4fcad749c5660627ac7a24adce564b62426563b0caeb16769ffd5afae4264307\", \"1d211d06f860445b0a384ea4275a1f6f2b0caf11ec2fead1b8a8abd71215fb0b\", \"a65cee1f28aeaf4fb54685c4b9c1d4583d05be800fb3b392c9c9b16d45b47405\", \"89c45a1d7adc9fbc4a8a91a6d95a256954408f1e1794b9b73c7fa92d9d1d3d0d\", \"62e035bd6a3c91f354608ac7d002f1ec1481b92fe1efb47dd717c58388432e08\", \"21b4642894dc1d95bbcb4bea812edd2be509e3723eb27dab02f0aa5174bfda01\"], \n \"c1\": \"1bac757ac05198bfae729198b21ad83ed77d7099933ca8d438223eb5172e2900\", \n \"D\": \"5f8d4bb439e9f1fd7d9319bc235a800ad8cea46cf72aa161403411fcd239c2cc\"\n }, {\n \"s\": [ \"c1caf86b559d263ec57ba80793cb6045195b2306dc2d3d1545b4f9f628733f01\", \"50a751cc8468d64e4fd87d8f6808c387112ea6cbd2747347a2702a1c53d03606\", \"1c256f19897dced055dedc2f6000444b891b2f0f103ec52169bef5620ecc8802\", \"b9c40573c76e1a18907d9aaa82cfde4f69fbe29a031ae39e9d47161b52561104\", \"b9c4272fd23a758596329701d313cf2a2a5a0783b9f39b0d9d376b1a53d9aa02\", \"008e31c110ea76d7f3dd3b6119d18436d7d87638aaa30c633eef1fcd5923ef0f\", \"702f504ae3e3157382c50c022725f50e0de515d1bef117ca08d832e098b0240e\", \"6c2be19c7e25a8f32f876f4ea74809a659e02b8f2ec908dbd119ccf0edcd7909\", \"94de8034f11691ac2cd6500c32e20287e968196dfd93de98d31a5733b19e7a07\", \"35c541ce2da123a622e10b79abd68ed242218fc198d3fd9bbe3dced2aabf0707\", \"50593ef0ddbcf3da82d63d30faf24289e054434fe9b2547de07056c2ae927f00\", \"ddf5820c7620d2b9cc0f5047fe4a74d163cc3813f5465bcad682ea0be2d0070d\", \"34b1b382dc98e343b261a1a605ce916498f476b2131ff2353e1f9a3ce3e71902\", \"f2820f6f514b72f32b2a3e2d50fad8c47129849e2643ffd978f3d6a8893a6a0c\", \"1dfb540d9405f41c1b62bba4b0893273535b223deb4bfbf64c2a8b6ac040f006\", \"10d5d9beda68f687ebd4035c39c45cd80cef2c15ab5f35f24af9c0560ae08f07\"], \n \"c1\": \"b50751c330e15f3ee3a8e07783dc00ca6b8f78d465d7dea2c8c83de74d5f000e\", \n \"D\": \"240a2046c4d4346fde8802305e568074e94ff89cf0a3d5346eb77564c35b15b6\"\n }, {\n \"s\": [ \"b5746f3412f0a78c40323c98a985c255d63656bf1115c4c826622d462b26f608\", \"130852be14f740b8570f5f4a5511b86a784c51a5e4fb8e945e31c8040628da04\", \"fcadb8cc0c23e4b24bf2222932c7893ca6b9904dca2ff8a4968d35a0d368ac01\", \"7496afccec6a815cb767f0b779ae95e3ba5260724a34f18cbc9204840414e603\", \"44dedaec24d73ccdbe4807b8f1555c074c94f53d94177fd1698997c431bd100d\", \"9a501dbae938247f5b77eb42505ef9cefefd4e7f42287903b316f036dacf8008\", \"9174621ca177d5a71c9aed771de721522b0f72c815e97b130f1cbcb83877010f\", \"56c9926851bccc1f48abeb9972a7cd765435ed59666c643e8bf9f6f0dbf91e0c\", \"32da0d9416c4c27b002fe1547ad2406532ecf44c89e6245ac041ac0ce4e4eb0d\", \"7ca53b3bab439481930d4e214b3b8460354614c9fc4a2c5035671c4fbc522d0b\", \"0ea983f506dcead626b4f7932bc9b7f497f8233a1556478c0786218bac666e0e\", \"a9e7ac6a29eba7c27edc4f25dbf6906883bbf4ec47d1ebf38a3fb70c0df46f0b\", \"0541a8b5e21a9ebfa0a08a660daf10a2dac0d7c8bba437f3b931c96455b58805\", \"b0d522d58f81354805dc073ae2dfd82bc827ae72807fd533687f78a450eba70e\", \"f15be34213fc5ca3b3762944f1d018c46e7bb0b9829132633e54528c7178b700\", \"0c3852eff49d00338308b216cf77fa36f67130ca7424138560ff9adbbec1ac0f\"], \n \"c1\": \"3c300aa9725662614ee790040e3668b7b512e4953b5a68a3df04f881978dac0e\", \n \"D\": \"77be03f07de3ea414ab5de8b4148df7b2c7548de2392fed2d26dcb61deeb329a\"\n }, {\n \"s\": [ \"434e34961ba8fc0a8533731fbf13923c60ecaece5ac0633100fd18b1a47ab40a\", \"9c3d10e14fdd5f1c448479b933db813f16c87d74e925e91a370be9d5cc5f550f\", \"3bec745edc71c836a48d90e51777f2c480f5338917fa9ab9ce258bcf6eae6708\", \"542e1d2ac11c524ee41c307b7deac63d13dcea39ca2a3fe38f4fe0ba59a6f405\", \"2485619c52efba1b68fb0f0ef5aa921cc57fd1d341f3556a1d6b73d17f6db60b\", \"496621f542826cddad8de2e517258ab046842f696b9c4f2835506522f9d2b500\", \"93ed535605c4815979bac5627c103f51e4ad001e6a600f0d0b4ca4b81f06390b\", \"14ad02ea0c3b0349824c1e0573549bd853d20cad9fa1edcca9bc566014f0af06\", \"528ffe2ecbaa066240c9bff71fd5ddeff863b27cb00a74fccfb1db5499a8bc06\", \"eb01cdeb3b01b261e61f3fdf0f3973a4e1104fb997c8b6e3ae436f1ce0353c02\", \"8c292daef3187740a653da65cf9610bc22e0fdd7dd5ae16293b057f31e8db002\", \"edb9e8b76d70aeb7d1aafa772a757c790590260f51c7671871f0df38f6cb230d\", \"84ab59ad81a924c2c8e28a7537ada86ea383b9476311c1ba4fc3ab2cdfec0205\", \"637c21cdb13444bc4b3a74e18f399b62fdabea074a3fbf866486b4ed86a6a406\", \"8b85c656cca6ed3e68b35175e1924973e82d462e4c2704e06609d552f5e81203\", \"d04fbbdf96911fd3fef07f359a8ea935d5a3657bb773783cdc060395d0081102\"], \n \"c1\": \"f50063354fae3f6e43ab851dc5c29fc36d48b1e654b09260aa5e62c743cc6b08\", \n \"D\": \"61f9bfa62752561e2a80f8275dc0e2abaf8f65572fe4fe8e8ccc2d91bdd7c646\"\n }, {\n \"s\": [ \"8783932798cf483e2b6793f0fbc08495d135ae16abd9d334a58bc97de071d10b\", \"f13f1dd379ef24f8e41f63787166417e18e5960f38163fb0b92a7127fa22ee03\", \"db3562c139a0887a8820e0f85f1714fd29a59d500474f92e1187bf9729c7210c\", \"f85a5b1f13d581dd7b8b25a709888f147420a493cc6e036ab9c6366c0e4ebb07\", \"026b0e3fd5b34eee445669452cf92bbab5fad18b9b164179b2bd489155c01709\", \"1a448388d6ca40cea2523618c4a992c37d3c20a7139b940417cdda9c7852bd08\", \"2bc30446d38f51d4d3ef9b4a8d2482dc05a1efe359d65e9fac6d673fc3a9570b\", \"123f5a609851be3b90461ccdd602d85a251164e1c757ed256c4d92b10bec9302\", \"a5e9f89069c73bbd2266ab755a4575254a43604b69965de8d131cb812e6a6e0c\", \"56a983915850fe148d329c29d15eec67464427da9d763b89c52995b32c1c500f\", \"f81d35fa348ec90c2f4680c1e073fb259df6a4dd145b8951e017948c29760507\", \"84ed0e7207997ffc619ef2ee4e53fa88af9aea63cd2f1a761151bcab58025a07\", \"e3a2b2ce40f9c25e0a88d82efd67c0fefa4cab7cfaa290865508e2e17edee808\", \"98d4eed281cd7a8d0e4090a97ff3cbc0ac62712528b367a32bea105840701e0c\", \"6588cd1a118bc1b8c4625a3e48f69b779f0f575cd2e505b362051021df8c1b04\", \"fb273a52cfe42e7fb16164d3218777622f89580bea14ed586073df23c6c9da01\"], \n \"c1\": \"574ae621f62af3728281a4c3cffcfd6f192324ea9eece0755366bde98b5daf0e\", \n \"D\": \"2b9c58587c15f7b76023125dfbd5fadb374d71481f4427e8a15eb189ee2bc4cb\"\n }, {\n \"s\": [ \"e3bb3b0fdcac5fce8f46a31e9a33a1ce34b8dcf22990f9e15218f67d8ce5470a\", \"e819fbd3781424adda93c5a443cbb6c209b9ca424c0eee28ab168f05331b6106\", \"06f7a9f33ed3ea477451373c0f3578017beacc7b15fae66c0b11abace6d3860b\", \"c48977b4d2a9ecaca3b0e3f43b4583f86d560ae1ff0e2cb77edb5419deea3f0c\", \"acba39780fc58f3ce0c29c66a3591892c176e1a7d3bb54fbcec8f79fa2bbc004\", \"229afd6fdc18c4cb89edf1459ee314763cafe1e75287615c3dbb7f5b05fbab0a\", \"9adf444c01a3227b51509dc5bcbf4e6a2eec548d68d1fc6d1bb52673c7efe303\", \"c80b9b1cacaac0cfd73dcca1618f30116410c8e6a1475fc191bd6a080b016608\", \"7aa6531e59f9d96dc30101580f2cac5efda8fcb7996e085fac2ae03f7b0a460d\", \"5cab710600c77c4d3f7f6af775a422b9af2b687d6cab4ddff8e6190e0d998503\", \"5f6529e1a277a089ad1b5894cd8f1261918587b89e66dd9bf5891943a319b607\", \"ebf127991ac2145696f4b8489d14fe076983381b0ceb983014bea17aed90d90a\", \"79d2c9c6e58453483488bf5c892cce23273e98ba4847f39250b126885643ed00\", \"540fcb83defd5588e0a342a4d5f73b815e8b18c7d0c7a9aa32b86e65783d2b07\", \"e2465870e9a8e1364e6baa88dea32a47f215f661a692d52c0ddada551bbc8d0d\", \"b7a4e1e987f9f330b0abfcab7318c30fcb4d801f008bbadad93172b8d8f0ce08\"], \n \"c1\": \"5648a5b033e7f5921900756f12d59d70334cfe24a848e1b85d83e316d303a100\", \n \"D\": \"add604567b2f5703238410cb63177a22dc934864af6763498346f6039ff36035\"\n }, {\n \"s\": [ \"2519cca87dbad1d3f8003bb46a9072b8b396114731d0e1f0df03fcdfee3a0401\", \"435fcd82fb448d96064ca438928ab3f2dbc705ef16e9b9dfd65ae8d800a49c01\", \"62c35f8ab595be1da87c6c3a8e10886b16519589f22a260d1ef3c2ab11ee9204\", \"bda92882ad266b29b1eb64f959c3cf828b27afe849a61195870c65d34ea70706\", \"c0944f8091ea483dbc06745a8658dcf5749cbb4887a079399af13cfa69a30608\", \"8e5bbc7389fd33bbedc0f8b2bcd730b8fa0fb7d5c957a4551e9dce29c4b15f04\", \"e057cd99771aab75a7887b82cb12453cea07f785b4ef560e654eac26a079e90a\", \"9d3a60c39c1a3a301b9b3a2a0bb7c7071c96ce37bf59b1b847632ceba30b5d04\", \"4c532e896c29fb2b3769aa0f7495f97a3475a718ce17479f805ede3c893c8d0c\", \"0e362207bf9a4c5a48ca988550185e3d010a52b4ad65bce64419142d7238c800\", \"20e43b99cb80ceb18bc6a2b045333f8ec169211dd26dc8dfa398691f1d76d30f\", \"1b4204a9113a1e353594033e50c908cae266ccd449bae6e030a0acb6619b360f\", \"92833140fe243c5be4bd1fda19455a96818f3cbee2232383fbb909c615d1050b\", \"687f4451524d507cc9130149899e040927a553b90dd324d57db73c08a5fd0e01\", \"3f08be740ff23243f87b2c0f4fab5d65edf4e7297ed774852e423e9412e7be0f\", \"99a677719928d879cd56bcabfd4dce3ff96a7c5ce34f23471268f42d4b739902\"], \n \"c1\": \"95b03026c77645eccf963218535168fd08ab0969ba97b812b6bf3a598e710f09\", \n \"D\": \"450dd5123579658951b21eb143e4b76ee660a32654a0723e48d2069dcb52edde\"\n }], \n \"pseudoOuts\": [ \"0690dbbb2bd0c2de2536eb40c89238127c48085de1eb89c8f5bd3208e2c22924\", \"22bf268ade80cb5331cbf74cb7338f5d8c99b106b837d0b7679b40907ce4fc4a\", \"054bf6418951192254108b6765270f70cecbc01f3e145ca05f1123ad79f8f09b\", \"bf7320af0e1802202b1fccb190fa8750d845bd8ea324bcbf558233ae7b65835a\", \"a84a144c11883a3a19c084b9c9a9e4a4fbc4a722294a0bd35a8374fa6910fe4b\", \"635c8ab7bc1a7951f0f16cfea09a52ab31e5c85848273df135cfacf630e2e8db\", \"9728ef733e2adde7dc65b29d6816743ee4b57cdbcc507ae8156edc3f5548e616\", \"439cc02daa04a7e0edfc5755b0463072cd9f1480c33e7730e098e6201dd68c35\", \"574847b37652d8dfc7ab8eb88788fcc3a54304f69287396bd6783c33dd2f9d92\", \"e6bf19bd48cd062b5a5b222e8cc9a84883593261ee7308701453e7250749a7b0\", \"f26a28be17dbcbe096e717476b12c1093da5718771a43de20d9b51b76c3a2069\", \"46f097408973c8441e5a94cd160192ef7e6aac57b8b82ca809f7e504fb6a48dc\", \"58a18f5fa5952de6df1f3af19d3041b908946d480ad2328876bc22fc174762e4\", \"75a5502d85e878e536a78f06574df2119e5c514f8e78910659bf079b189725d9\", \"ee0918829b2772828ea6691664868283974656827c44687c47b6164aca68817a\", \"888859e80063da37ac442386181fa238fb2db123b49d3f158c2b8a444c80c9c9\"]\n }\n}", + "weight": 11843 + },{ + "blob_size": 2320, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 115000000, + "id_hash": "c072513a1e96497ad7a99c2cc39182bcb4f820e42cce0f04718048424713d9b1", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261657, + "max_used_block_height": 3195160, + "max_used_block_id_hash": "2f7b8ca3dbd64cb33f428ece414b2b1cef405cfcd85fab1a70383490cc7ed603", + "receive_time": 1721261657, + "relayed": true, + "tx_blob": "0200010200108fb0ad269cfd890bb8a5980195e69e0184fc06c7f705d0d21b8df305b7c601e5da08dc0197ed03e6cd06a632cc7680bf01fb3e7cc08761a6037ca29965f27d2a145f045da5a1018ca7e6a5a5a93dbbd33d0a0003dcfa3a2800cdabeea3c3206f05408adb96d2d6fdbbf704a0372bfee0535ca036f00003992edd42ab4ef6f21ac1639c8515b5930f39788788671122993164515bedfe4c8a0003e255c00974b24fbfd3cd45fd4004d6dc76201813ac827295b1eb04e7d14d10dd9500037d42e7770c696359631ae7e566fedd1417077d2f3bdee93240f544703b299ef7e40003755335d298ae54a80e4f3fca0c1066fe744d5a2719d188c0ab10e45a94ada4d3850003e5ccb6fc793c664acfd057d7f6fcc77850032b96702ce4d7112bda338c7aa576d10003f7e699af9f0b9ff887845f0b0600816a58faf680ad32c61ee4465c643e30a340d200031de678a8a50613c5db143e7b3d5aef14be9f1f5ce2805b3a3f2fabd0af950acc380003d1af1d1d909b9d07e4cbd8d4792ce3c719280a2c4a7618d8b2be89d3e2472dab56000365159da725872bbe45cd5b5a8eb8b2082c52f161c66130ecd74aa1690e31e1cf2e2101669fe64b9088044833435038ee59827b0b3f93b042e2ea7a2de803e8889a473a06c085eb36a2d1d7927d22c62713d484cfcc32ce08d57d1588f967ef3dce69f0b186e9417afe28372e3011410c15d178d85cb7992d5489eeaa56ab735167278d301a060edacb9944a0d91f4d1890403da176509a5a8fe8b16032ba1abf24aa58fcf1f37233985203599a5196678b96873b000d7e7138e1a39ec14709f7e44ed3d5a8d9be17401d2d4d362ce2798383fc2954c8f79876471eb52ad4e7cbbd0a1647771d11aef73612f15ca15c13ac18ed477bef4492b0b229ae04b00ac4a144329a0ea2f86e4decd8ab76e8f76e604a39aa8350777c113b99f7c2808a50c8556079f6b4b1e857d75ccf4fa43429e27d1bb205c9bd8dd2ccd6151c8106eaeda4ac3f28771864870d2327e129acd4a88a16e143901bdf17e2bbba7e8934c8b05bd95a35a04a020beb1298b8ae94c2b2bf7911ad173f20a9dd9a2dfd3929a3300e8522e0db027f391f8b03ae58064b76218608c02ef5771f3058852719367c5c7cef36169357e3b9ee637eecba7f28df2ac097dad28fc995a01f42f19181d1d691f92e8e2737cac55974f4d3e0f86f674c3e84cee205f60143108ec2a3072162e9b3b82aed46df599bd192c8f923293a398281f0c67e8aae7a1a1878999827176bcfc32df00414cd6be57d8bbad907564d071e77c625005d6e497cd58d30d420d5cde92a7ef8ddcdc5e953d55eb223e95cc1e9edba4049e7728542ceacaa8bceb6dc82ce3b0294eb57476103f4669b1b446fc8b3ff970409ad02e606b08b273e39f50344351bbe3e7f1f09437982f3c9817698394df12004635da6726be57bc727d6cc2488e2d1f1a7f64cd9fffcad5281176cb43cfc36040ae73b14501d732dda57d80108176de3a885cd7ca322e7368c5d1ef63b74d38d4f94a7ea1436feead534e9bfdbe5fd186199a172462f2165026673c883f6093633257c2b940a6a5c7dc3561e7685017a7a612d59facd45ad3aedee4433d66d7f6194c0ca2b30c86c26c07ff69a8fc5c568980c3662598a19f5c63259258dacf50ee6b83395d52fba08cb0305746c9ff74c8e0431ac10b65fada1ab841921b5a70538a2a1792830106d045c51f95043ca1ee492169208ec52dd8ce307025f5297d7d40cd94668079ea06fb9b65b2ca7d8edf58062cde615fc324b921db1611f24228ff52d986087dc7c56247aff907f7434b094e2d1f6c69be514196acbf758a3ab3db25b0a80c0fc3e9076e0a934f4cf5d2bdddbd7d9af0c47fe3dfe5d8de4dc203dd4a862cde72b72f138109137de2481ea146e6758877e7bdf4d98db15e97b610acaca6ad74c33497b2ca9a3e4445dd211d89156337d0b2e6bbc5ad5f6833f36b239e79526a48fc7b86cf9c7882af011404cab69de4b9c4f2b7929e7d23763eaf5fc673c2c2755231bf1e41bf1dcd4ea549e0b610d7a449337c499817921ef2fce52795d3ac0410faea86d480fe37dfe169ec177d53a83532d27b3ccfa7663c6d727a093c62d3f7988584f579bd146fde10992155bcb47ffec0d5390e4190aaf24a8709c9954e65d370ec79a47eb973fdcee672afa283c986be21f59db0f1b914438ae83c19c43c6a0113400e6883c84aa64a788086d2e6827bf125db4f23612d7e92d8fe260b2e1e76a235ef3ab824a000713ab23f34661ccc596e2198239f29bfcc05d99a86c42fe802eed8da93490cffadfc7adc848568af250835d9e926a71c446422faa9b57cb1ee4c4b9251d8e99c43635e7ce796d0dec1918393562bb70463c67a254e58ae723dd8c2f4534ced8e8fd68c2b655a046756cd42c93b4660418b166ac1ca569cc5642462664b87d02a15cd8fcc81c335d4a131b34e880ee01241d90a51e88fbeaaeb3952f8e2c32376cb883b28445d345ccd3991632732f0b315388f9b8fc18599ac2d74bc0654e8e6f684c87a274889384dc7393b3301f04e94dc23c3c41d08ad7cebe96c4221b9c1b9f02d24ee707a28a91e7e9f9fa84073be8794c7c34beaa52086b541f9904607eb115f2111dc96b5bce5b75454214098279c030920e6d1bfbfd083421eba2365d671467b2a2a3fcba832c5a6d86a60173f228a07d33596edc8aa922a0c9b26996502b33eeec0038f74fe87485f7b6075a484e14e6c961d8417dd2cbc48029b272dd42b935f27fe7ca7afed71dceed0b5f3a4688c6ea8b0a444b442ac0396a20784f4f9f94d025e2098c482717cf4e05a1862e45b8d12b90b19773857752fe4721a49a97eedf189d25c8b0d659883d0d3e2ce49e18a2f85ecf736191b9058f16cb65d96bf12be8300642c31400e3df05a8d748a1c84d932aa94c1337ed2c6303e4e4e1e4360cb23e4879760e60501007e2d2e18200688a0d71e5fb79211614cdf88bd2cd96252cd8742a2d8ea5914f0946752e3c0ffca147d0b962337b4e8674966b3ef8ed0f7698ea50e6dfc43a8105989c046ed697d5317e81ab30add72eadaf7d5cc74e68fd4b23e1d70a3ba43d0bd64720bb8524bf732f9f5ea45fbaf38470c693834b48658c4961e4d4987e30068b15e598afbade29176f45c702a78f0af5576519e177d355d8c17e2f2a859261369cc31747f01d8cb57916b59a11ff4aba68e8ea5ead7fa431f5389f8620f759", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 80435215, 23232156, 2495160, 2601749, 114180, 97223, 452944, 96653, 25399, 142693, 220, 63127, 108262, 6438, 15180, 24448\n ], \n \"k_image\": \"fb3e7cc08761a6037ca29965f27d2a145f045da5a1018ca7e6a5a5a93dbbd33d\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"dcfa3a2800cdabeea3c3206f05408adb96d2d6fdbbf704a0372bfee0535ca036\", \n \"view_tag\": \"f0\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"992edd42ab4ef6f21ac1639c8515b5930f39788788671122993164515bedfe4c\", \n \"view_tag\": \"8a\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"e255c00974b24fbfd3cd45fd4004d6dc76201813ac827295b1eb04e7d14d10dd\", \n \"view_tag\": \"95\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"7d42e7770c696359631ae7e566fedd1417077d2f3bdee93240f544703b299ef7\", \n \"view_tag\": \"e4\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"755335d298ae54a80e4f3fca0c1066fe744d5a2719d188c0ab10e45a94ada4d3\", \n \"view_tag\": \"85\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"e5ccb6fc793c664acfd057d7f6fcc77850032b96702ce4d7112bda338c7aa576\", \n \"view_tag\": \"d1\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"f7e699af9f0b9ff887845f0b0600816a58faf680ad32c61ee4465c643e30a340\", \n \"view_tag\": \"d2\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"1de678a8a50613c5db143e7b3d5aef14be9f1f5ce2805b3a3f2fabd0af950acc\", \n \"view_tag\": \"38\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"d1af1d1d909b9d07e4cbd8d4792ce3c719280a2c4a7618d8b2be89d3e2472dab\", \n \"view_tag\": \"56\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"65159da725872bbe45cd5b5a8eb8b2082c52f161c66130ecd74aa1690e31e1cf\", \n \"view_tag\": \"2e\"\n }\n }\n }\n ], \n \"extra\": [ 1, 102, 159, 230, 75, 144, 136, 4, 72, 51, 67, 80, 56, 238, 89, 130, 123, 11, 63, 147, 176, 66, 226, 234, 122, 45, 232, 3, 232, 136, 154, 71, 58\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 115000000, \n \"ecdhInfo\": [ {\n \"amount\": \"a2d1d7927d22c627\"\n }, {\n \"amount\": \"13d484cfcc32ce08\"\n }, {\n \"amount\": \"d57d1588f967ef3d\"\n }, {\n \"amount\": \"ce69f0b186e9417a\"\n }, {\n \"amount\": \"fe28372e3011410c\"\n }, {\n \"amount\": \"15d178d85cb7992d\"\n }, {\n \"amount\": \"5489eeaa56ab7351\"\n }, {\n \"amount\": \"67278d301a060eda\"\n }, {\n \"amount\": \"cb9944a0d91f4d18\"\n }, {\n \"amount\": \"90403da176509a5a\"\n }], \n \"outPk\": [ \"8fe8b16032ba1abf24aa58fcf1f37233985203599a5196678b96873b000d7e71\", \"38e1a39ec14709f7e44ed3d5a8d9be17401d2d4d362ce2798383fc2954c8f798\", \"76471eb52ad4e7cbbd0a1647771d11aef73612f15ca15c13ac18ed477bef4492\", \"b0b229ae04b00ac4a144329a0ea2f86e4decd8ab76e8f76e604a39aa8350777c\", \"113b99f7c2808a50c8556079f6b4b1e857d75ccf4fa43429e27d1bb205c9bd8d\", \"d2ccd6151c8106eaeda4ac3f28771864870d2327e129acd4a88a16e143901bdf\", \"17e2bbba7e8934c8b05bd95a35a04a020beb1298b8ae94c2b2bf7911ad173f20\", \"a9dd9a2dfd3929a3300e8522e0db027f391f8b03ae58064b76218608c02ef577\", \"1f3058852719367c5c7cef36169357e3b9ee637eecba7f28df2ac097dad28fc9\", \"95a01f42f19181d1d691f92e8e2737cac55974f4d3e0f86f674c3e84cee205f6\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"43108ec2a3072162e9b3b82aed46df599bd192c8f923293a398281f0c67e8aae\", \n \"A1\": \"7a1a1878999827176bcfc32df00414cd6be57d8bbad907564d071e77c625005d\", \n \"B\": \"6e497cd58d30d420d5cde92a7ef8ddcdc5e953d55eb223e95cc1e9edba4049e7\", \n \"r1\": \"728542ceacaa8bceb6dc82ce3b0294eb57476103f4669b1b446fc8b3ff970409\", \n \"s1\": \"ad02e606b08b273e39f50344351bbe3e7f1f09437982f3c9817698394df12004\", \n \"d1\": \"635da6726be57bc727d6cc2488e2d1f1a7f64cd9fffcad5281176cb43cfc3604\", \n \"L\": [ \"e73b14501d732dda57d80108176de3a885cd7ca322e7368c5d1ef63b74d38d4f\", \"94a7ea1436feead534e9bfdbe5fd186199a172462f2165026673c883f6093633\", \"257c2b940a6a5c7dc3561e7685017a7a612d59facd45ad3aedee4433d66d7f61\", \"94c0ca2b30c86c26c07ff69a8fc5c568980c3662598a19f5c63259258dacf50e\", \"e6b83395d52fba08cb0305746c9ff74c8e0431ac10b65fada1ab841921b5a705\", \"38a2a1792830106d045c51f95043ca1ee492169208ec52dd8ce307025f5297d7\", \"d40cd94668079ea06fb9b65b2ca7d8edf58062cde615fc324b921db1611f2422\", \"8ff52d986087dc7c56247aff907f7434b094e2d1f6c69be514196acbf758a3ab\", \"3db25b0a80c0fc3e9076e0a934f4cf5d2bdddbd7d9af0c47fe3dfe5d8de4dc20\", \"3dd4a862cde72b72f138109137de2481ea146e6758877e7bdf4d98db15e97b61\"\n ], \n \"R\": [ \"caca6ad74c33497b2ca9a3e4445dd211d89156337d0b2e6bbc5ad5f6833f36b2\", \"39e79526a48fc7b86cf9c7882af011404cab69de4b9c4f2b7929e7d23763eaf5\", \"fc673c2c2755231bf1e41bf1dcd4ea549e0b610d7a449337c499817921ef2fce\", \"52795d3ac0410faea86d480fe37dfe169ec177d53a83532d27b3ccfa7663c6d7\", \"27a093c62d3f7988584f579bd146fde10992155bcb47ffec0d5390e4190aaf24\", \"a8709c9954e65d370ec79a47eb973fdcee672afa283c986be21f59db0f1b9144\", \"38ae83c19c43c6a0113400e6883c84aa64a788086d2e6827bf125db4f23612d7\", \"e92d8fe260b2e1e76a235ef3ab824a000713ab23f34661ccc596e2198239f29b\", \"fcc05d99a86c42fe802eed8da93490cffadfc7adc848568af250835d9e926a71\", \"c446422faa9b57cb1ee4c4b9251d8e99c43635e7ce796d0dec1918393562bb70\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"463c67a254e58ae723dd8c2f4534ced8e8fd68c2b655a046756cd42c93b46604\", \"18b166ac1ca569cc5642462664b87d02a15cd8fcc81c335d4a131b34e880ee01\", \"241d90a51e88fbeaaeb3952f8e2c32376cb883b28445d345ccd3991632732f0b\", \"315388f9b8fc18599ac2d74bc0654e8e6f684c87a274889384dc7393b3301f04\", \"e94dc23c3c41d08ad7cebe96c4221b9c1b9f02d24ee707a28a91e7e9f9fa8407\", \"3be8794c7c34beaa52086b541f9904607eb115f2111dc96b5bce5b7545421409\", \"8279c030920e6d1bfbfd083421eba2365d671467b2a2a3fcba832c5a6d86a601\", \"73f228a07d33596edc8aa922a0c9b26996502b33eeec0038f74fe87485f7b607\", \"5a484e14e6c961d8417dd2cbc48029b272dd42b935f27fe7ca7afed71dceed0b\", \"5f3a4688c6ea8b0a444b442ac0396a20784f4f9f94d025e2098c482717cf4e05\", \"a1862e45b8d12b90b19773857752fe4721a49a97eedf189d25c8b0d659883d0d\", \"3e2ce49e18a2f85ecf736191b9058f16cb65d96bf12be8300642c31400e3df05\", \"a8d748a1c84d932aa94c1337ed2c6303e4e4e1e4360cb23e4879760e60501007\", \"e2d2e18200688a0d71e5fb79211614cdf88bd2cd96252cd8742a2d8ea5914f09\", \"46752e3c0ffca147d0b962337b4e8674966b3ef8ed0f7698ea50e6dfc43a8105\", \"989c046ed697d5317e81ab30add72eadaf7d5cc74e68fd4b23e1d70a3ba43d0b\"], \n \"c1\": \"d64720bb8524bf732f9f5ea45fbaf38470c693834b48658c4961e4d4987e3006\", \n \"D\": \"8b15e598afbade29176f45c702a78f0af5576519e177d355d8c17e2f2a859261\"\n }], \n \"pseudoOuts\": [ \"369cc31747f01d8cb57916b59a11ff4aba68e8ea5ead7fa431f5389f8620f759\"]\n }\n}", + "weight": 5750 + },{ + "blob_size": 1879, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 66240000, + "id_hash": "d696e0a07d4a5315239fda1d2fec3fa94c7f87148e254a2e6ce8a648bed86bb3", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261654, + "max_used_block_height": 3195160, + "max_used_block_id_hash": "2f7b8ca3dbd64cb33f428ece414b2b1cef405cfcd85fab1a70383490cc7ed603", + "receive_time": 1721261653, + "relayed": true, + "tx_blob": "020001020010d4cbc025f888800aa9b3da02d483d801b4c3188ad105b38a0ee7c3089831b542af37b3f901e68006885282d2028701428be79097b510e49fe5b25804029ac8bfa5e2a640a8b0e3e0a8199b1d26f22f050003f4c8ce3812f84c4dae9fab0e120faf7e9583c5bb024674a4ceb36550a58da368690003edcedc3b21577e2cb73380c7a829cc6707109e672cee030aa007618ff033ed9e8c00037d3b66c65d73e2c172c52ef1336987487da2507fc7c6ca0579ff7e8d34c47f4d170003d423aefc2536b8839995f569dc5e61385f8a537067f5c00aac957cccfb5a4790b5000397fcebd0877bdd71d2a90aed5a58180a0deecaed068f9e43ce44df011aecae0e382101430ad497db87046e33184d9617daa64add82a53dc16982a056b682b2f9a2da980680fcca1f6a94c40de9d6fb8935d837180bad6fc5bb224d8ed00140afb3123c36a4ff194f108db26877967b024bbd0cb51d34a7f5cb26262573b9a66cd70635204820f80d01f04297474b9ca965cdc9302126eebe75aa86a0a837f9dcface5fa38ad81bdcaa40e1fe942f2fc2768f267feece915df41d9864291b57885c1e0f8e4c93a7a4610aef477cf53b805dad5a1006071fde67ca8c47f545c2fdf3202120ef652e0173b4257b91811f7f39df6d0c9c1d2e8f00074c619e9e4322dd006c41c10b67c4ed21aae62ce8748701865c7cb3df6f168e9564b1c7f0f7ac8429f2ecc311a10557feec8395cd8e03be8f38908c2fee85f557cdb534d9775ae9583f7a73024d9a7efa1d75b74f4e62b0a40954b6ca4c0c2091beb3021f21cce994f5a679c0f45126a83d9cb431ab6babb1d3272ed9456ea6af5c35037b4a89a23640c6edf1a4576d592bbc5dd0c4f40aa3e918712d4812f5bc8a01e38e6dc1d4b5dc39ccb5e26eeaf5c88b85d968c800e10bfca42e320f6b03c3065856b58908555a266b7308321ba94e80aab0956b0c09e2f1c8c5da991672dea0c43de4c27d232f50c35e3fc836ac8eec7d6466d8af2ce9277a92aca9f4d4d4230d1dfbb042871dc92498339a6225d6ac016e0e9dde852364c31878a19a2820bd196c1f837b398a56152ae01928f9f2fb80aae7301795da93006123b6cea54f34f1232a9f1ddafa340764a5b0b305af112c72c76a6ed5d5513739fbbc56bc027b4251b23a8c7727a2abf5b3e9a803aa3d144022d129fede2abec4f787d574df639bd48d0de0d1e981d88de680a7d40033b0f93b51ed6e60b8199c2adbc7de0b8f76802d8e74d9e46d2145a61232920571bc1d23fce134c02c268b523f5d5d31876d2259b2292597c15937c3057e5c4350391327429428ac6069bdd5db4515113d046f8058506b6e04399016157964b7c7dd41b929e9fb09997f51fa6db7903465b3cab1093ad38a783b67542e75bde42bbf5c07e415c24241017f2365a3e05b1bb8eab7b4ef681d3208a98715fd84e60be86931800276ce667812e9602afa5baacb0caa1ef9fc37cbac120918564ca04d73ce130ca42c28577d504516e5f44b843a76b2c1558d142e64ce4a55c34bf197a0251022a94e973f59a4352fe5e086e79fc90319e29834ab75bb24c4438c383f19ff77463d0f322566c1b5c64f1dd5ab8472ae4e8b4ac20ab110f5281a848ceca73330fc45d3366b7bd1accb139fec1ed99f15d11ef3ec2e7207a5e7bdafa136a85ccb0a55c3e77c86a70c2496fbcd0373c8aac6067731183cf211bbefbc7d71269d6ba62c98aa791613cbd267b4424763141075bf53d6c4472d3f7685f73f726b16caff0b90183a71c42ca6a27bf6d818abea8c67eb90ce4aca1f9efcb43215214336da99b4074faba4a99c9cfb429e70759024f3d5ce150e9e97e77c4f70501efc575c485e0aea5ed0f246930d73be9d8662f41a408323beacd53135df2eeea93722a00bfc026f5827dccdb05f8e9fedbfc7632b36324d891eb8220978da3ad6e4ad1f3e3507f4b7b8f4914326b28fda56141b2be6350c9c8499afbe66da9417d03598c40f05ec043be6b64b2aa2a0a5da6ab9825d6ebc03578ef5e7370ea63d051cb9f87e01933cfe48a5b22c49d6aca2cf869f6e98bb89c51c6fc9a5cea61a03920214aa0a014a19d0f3fe97670e3990021dae0fdbb5ed4979b1e2144ec3d1ba4b68c39205a86c3350cf2315a25c4ddb21d24ff2a3bdd5e0c53556e2c917920059781700081bdc3fd3966568487382cdb556e8fce96995dc2fb2d598edd55abb423e1ab50b74fdd8568356d62c742394e7bd5e1869a8f28a8557525664b8b28ab14c3d1108e4bae38d05447408f71bdb233aab9cd9a740e242794b9c75760064fdc90c2e0b902b407f311b34d9a172731fe7aa10c9bd189071bdf6f24be900fd8711a08407052db276f4ec535ffb2341e486de7e4ef4b0bbd10de4409c80221a653455930b82edf3a40421c3c665bba621821945a05a7c6c7ca3946f0b673a7e8fa2023d0e2b05b2fc21ffbdb68b0ccb9e0aacb2623f1a843896e68eb7157f8d115a3d7e0e02fd894c154ea56eb1b0d8f296325d3a71f581b868cbade1bbb50b73bf52fa0883cf71eb20aae786f9b27637e3d19ae75ae5206c0a6f789241741bfee9ce05259fe40f2628445d7d741b152930c35ab1c40aba265817242be986971e83665677", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 78652884, 20972664, 5675433, 3539412, 401844, 92298, 230707, 139751, 6296, 8501, 7087, 31923, 98406, 10504, 43266, 135\n ], \n \"k_image\": \"428be79097b510e49fe5b25804029ac8bfa5e2a640a8b0e3e0a8199b1d26f22f\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"f4c8ce3812f84c4dae9fab0e120faf7e9583c5bb024674a4ceb36550a58da368\", \n \"view_tag\": \"69\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"edcedc3b21577e2cb73380c7a829cc6707109e672cee030aa007618ff033ed9e\", \n \"view_tag\": \"8c\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"7d3b66c65d73e2c172c52ef1336987487da2507fc7c6ca0579ff7e8d34c47f4d\", \n \"view_tag\": \"17\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"d423aefc2536b8839995f569dc5e61385f8a537067f5c00aac957cccfb5a4790\", \n \"view_tag\": \"b5\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"97fcebd0877bdd71d2a90aed5a58180a0deecaed068f9e43ce44df011aecae0e\", \n \"view_tag\": \"38\"\n }\n }\n }\n ], \n \"extra\": [ 1, 67, 10, 212, 151, 219, 135, 4, 110, 51, 24, 77, 150, 23, 218, 166, 74, 221, 130, 165, 61, 193, 105, 130, 160, 86, 182, 130, 178, 249, 162, 218, 152\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 66240000, \n \"ecdhInfo\": [ {\n \"amount\": \"6a94c40de9d6fb89\"\n }, {\n \"amount\": \"35d837180bad6fc5\"\n }, {\n \"amount\": \"bb224d8ed00140af\"\n }, {\n \"amount\": \"b3123c36a4ff194f\"\n }, {\n \"amount\": \"108db26877967b02\"\n }], \n \"outPk\": [ \"4bbd0cb51d34a7f5cb26262573b9a66cd70635204820f80d01f04297474b9ca9\", \"65cdc9302126eebe75aa86a0a837f9dcface5fa38ad81bdcaa40e1fe942f2fc2\", \"768f267feece915df41d9864291b57885c1e0f8e4c93a7a4610aef477cf53b80\", \"5dad5a1006071fde67ca8c47f545c2fdf3202120ef652e0173b4257b91811f7f\", \"39df6d0c9c1d2e8f00074c619e9e4322dd006c41c10b67c4ed21aae62ce87487\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"865c7cb3df6f168e9564b1c7f0f7ac8429f2ecc311a10557feec8395cd8e03be\", \n \"A1\": \"8f38908c2fee85f557cdb534d9775ae9583f7a73024d9a7efa1d75b74f4e62b0\", \n \"B\": \"a40954b6ca4c0c2091beb3021f21cce994f5a679c0f45126a83d9cb431ab6bab\", \n \"r1\": \"b1d3272ed9456ea6af5c35037b4a89a23640c6edf1a4576d592bbc5dd0c4f40a\", \n \"s1\": \"a3e918712d4812f5bc8a01e38e6dc1d4b5dc39ccb5e26eeaf5c88b85d968c800\", \n \"d1\": \"e10bfca42e320f6b03c3065856b58908555a266b7308321ba94e80aab0956b0c\", \n \"L\": [ \"e2f1c8c5da991672dea0c43de4c27d232f50c35e3fc836ac8eec7d6466d8af2c\", \"e9277a92aca9f4d4d4230d1dfbb042871dc92498339a6225d6ac016e0e9dde85\", \"2364c31878a19a2820bd196c1f837b398a56152ae01928f9f2fb80aae7301795\", \"da93006123b6cea54f34f1232a9f1ddafa340764a5b0b305af112c72c76a6ed5\", \"d5513739fbbc56bc027b4251b23a8c7727a2abf5b3e9a803aa3d144022d129fe\", \"de2abec4f787d574df639bd48d0de0d1e981d88de680a7d40033b0f93b51ed6e\", \"60b8199c2adbc7de0b8f76802d8e74d9e46d2145a61232920571bc1d23fce134\", \"c02c268b523f5d5d31876d2259b2292597c15937c3057e5c4350391327429428\", \"ac6069bdd5db4515113d046f8058506b6e04399016157964b7c7dd41b929e9fb\"\n ], \n \"R\": [ \"997f51fa6db7903465b3cab1093ad38a783b67542e75bde42bbf5c07e415c242\", \"41017f2365a3e05b1bb8eab7b4ef681d3208a98715fd84e60be86931800276ce\", \"667812e9602afa5baacb0caa1ef9fc37cbac120918564ca04d73ce130ca42c28\", \"577d504516e5f44b843a76b2c1558d142e64ce4a55c34bf197a0251022a94e97\", \"3f59a4352fe5e086e79fc90319e29834ab75bb24c4438c383f19ff77463d0f32\", \"2566c1b5c64f1dd5ab8472ae4e8b4ac20ab110f5281a848ceca73330fc45d336\", \"6b7bd1accb139fec1ed99f15d11ef3ec2e7207a5e7bdafa136a85ccb0a55c3e7\", \"7c86a70c2496fbcd0373c8aac6067731183cf211bbefbc7d71269d6ba62c98aa\", \"791613cbd267b4424763141075bf53d6c4472d3f7685f73f726b16caff0b9018\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"3a71c42ca6a27bf6d818abea8c67eb90ce4aca1f9efcb43215214336da99b407\", \"4faba4a99c9cfb429e70759024f3d5ce150e9e97e77c4f70501efc575c485e0a\", \"ea5ed0f246930d73be9d8662f41a408323beacd53135df2eeea93722a00bfc02\", \"6f5827dccdb05f8e9fedbfc7632b36324d891eb8220978da3ad6e4ad1f3e3507\", \"f4b7b8f4914326b28fda56141b2be6350c9c8499afbe66da9417d03598c40f05\", \"ec043be6b64b2aa2a0a5da6ab9825d6ebc03578ef5e7370ea63d051cb9f87e01\", \"933cfe48a5b22c49d6aca2cf869f6e98bb89c51c6fc9a5cea61a03920214aa0a\", \"014a19d0f3fe97670e3990021dae0fdbb5ed4979b1e2144ec3d1ba4b68c39205\", \"a86c3350cf2315a25c4ddb21d24ff2a3bdd5e0c53556e2c91792005978170008\", \"1bdc3fd3966568487382cdb556e8fce96995dc2fb2d598edd55abb423e1ab50b\", \"74fdd8568356d62c742394e7bd5e1869a8f28a8557525664b8b28ab14c3d1108\", \"e4bae38d05447408f71bdb233aab9cd9a740e242794b9c75760064fdc90c2e0b\", \"902b407f311b34d9a172731fe7aa10c9bd189071bdf6f24be900fd8711a08407\", \"052db276f4ec535ffb2341e486de7e4ef4b0bbd10de4409c80221a653455930b\", \"82edf3a40421c3c665bba621821945a05a7c6c7ca3946f0b673a7e8fa2023d0e\", \"2b05b2fc21ffbdb68b0ccb9e0aacb2623f1a843896e68eb7157f8d115a3d7e0e\"], \n \"c1\": \"02fd894c154ea56eb1b0d8f296325d3a71f581b868cbade1bbb50b73bf52fa08\", \n \"D\": \"83cf71eb20aae786f9b27637e3d19ae75ae5206c0a6f789241741bfee9ce0525\"\n }], \n \"pseudoOuts\": [ \"9fe40f2628445d7d741b152930c35ab1c40aba265817242be986971e83665677\"]\n }\n}", + "weight": 3312 + },{ + "blob_size": 1539, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 492480000, + "id_hash": "a60834967cc6d22e61acbc298d5d2c725cbf5c8c492b999f3420da126f41b6c7", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261654, + "max_used_block_height": 3195144, + "max_used_block_id_hash": "464cb0e47663a64ee8eaf483c46d6584e9a7945a0c792b19cdbde426ec3a5034", + "receive_time": 1721261653, + "relayed": true, + "tx_blob": "020001020010dab5ae1feefaf606fea6c609aca1ec018fa0e70186de3df48307c5b72ab9a001dea302de3c9c17c2b701a923a801d302d2ed8513f48724933df6229a9fb6ededdcf5d0963280ee44fa9216ceebe7941f020003c9558a2daba528058e11b62fb334f0c1324096575db177d3676f2f3a7287f154640003cfc87315c4bbbfc8053d6f4921465da881c2675a531141d272c7b4e085829909c82c01af5114379f90bb68c84743bedf90bd63aa394ea61191a0c315c9816d8e0b9efd020901d1c905878ab04b850680cceaea016f7857e7ade8bd0ee83ef9696bc9b3823e5b43d81c8f49c1f4c6e6ccdb93f7d3d500bcc38801eba594e5377f8339c5e2baed73a25fdcb6162c8cac13ab20adf8fa0652db145499fff83ca114f5b5c39b01d0531831fce743ab9dcdd3571f8a93c433dfa17e9885741b50e34ed92e5d3b41b43bd7a99784d3e95385b94d578222b3bd7e8f0d981a318c424c98eaeffb9cab22da9b9ff6d666a72f2e56751796015b36634f455d2b1f99874b6b30707a20961c12819b752f60ec321436fe0d3bfd681b14048578b14eef8bbedad3f672fc010518a1c63af4ed08d8e7b875641eff37acf9fe46ca0a1b428ea685131eab8003a3cbe82bb87769ab6c5763eb468d04c985a650dde6db581ed3926f44f68bd9040796c94d7d0655ace603e046b12e0267718d575ac32b6fbfe0b911158198d2c5bc08c54b7f62ed326d8965c90e9bb29ad63fcb6d1d4ca5f849aba2eccb5a51ed1d28f2eceda481bd1f3a0229a9cce6b8816831ff5ac7d129f8f3088cdcfe356d73cd4b07a4453a016b3cd65bcb3ac7f85bed185044ffced69b4e111af67582c85551a1fee0e370eb1360611653b571f6dd35f1ab90b1af509dd630b88e6851322e65f1189e46bde8175c4add525e5975c8de17b718155ba857dcba3695543ef6e6a71d548c4940850707eeaf3769839ec636f3e0088d92bbdd5eb2e508529cfb4c078cfa9204c35863256579aefad74fbc7e411174375840bc3070c74939a0de834131dabafa47de92c03d38384d870f3b65f73e7e411ca4130de1f0a706399b64f63dc809524dcdcc76b3a1c0eb963384c39779125ec10c7eeccf9190200a76c4df88dedc7a6b7661ef39be599844c88f5320a1288db2f6395f73907851f966fe67c584ec77c89b15e597254b6550c460f4607c9aa7287577c6c1fc3c3b0b01f0f30818dce251455e18138c4509100cf7c1c79e9b0d332a57a0e3734b8bdaddfa0a3233560a441f2bff47b98de49b8c87e5401e7dfe844b7ad4e3b669394e44816e4cb057b6afce2a9fe3915084deae4d047e698b6753ec6da7e4b3a34babe73206a5990f57933939c242e27891786882631d5fd264e26d2853aba16336ae15070f00602edfbfd9abf9702c7cf9607b52ad0a04d08518054c7f4cada448853092090164292412d8c4196fb4ddb842669ea9de50421b6f219cb05a0befe63f618f043ececbc47f1e3547e4e7abe935c972ed8f60258e1bb7b7b97a74109bfb5b6a03308939028b18f56c3128641e4bf7c7c5884fc05ee7ebdb76b85ad30cb9a1d90680dfdfecbfee9ea2e98bc50e3021f165dcf3890713572b0847f4767a244dc908c8931d54a79dbc34f35888485b977aa7d809b3513aec10d4a522175edfcc9e044a15f68a6e1f5b2095f9c7791984e794b75d3f9940b602b8984773443e4c760bf28dca4e93a036aa4be7771073d23aca7732aae7a364c628437064dc5e8dbf0dfb1fe2333a90b6950dc764e90840ebb74fb98803afe6ccd792ad637183747402c3218537f5b9416b9559b6cadd148c5a8bb52b4f92be7cf130ef729227acac092c13e6c8a9b5046688b81ded00f5ccfbd2f70bfb5aeec37de7d7ea4097feb5005590fab38475f3e37dcc9df3fa3a176d8f45e732acfd12e3f680bc4b511aa90e8c853f0d294f351f8a16a486dbd7bdbe582d48ff956a7c17d719220293d9b3088b24cbaa956ee96d662fe1f30b3d477c96dd3fab9902fcecf8692725440a380f5ffe1c8872ada9fcba8ad026cbb5e9d8324943cb5ebde77eefdcd5bb175c750a577032337113d6d1f45d488bfedbc3a25427633bb1873eb45a9a4ea726c33c0b297af95c01880e1f3bcb0ac86f4d7105e5c6cee032282fc50aa2d34c06b4c4bb", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 65772250, 14531950, 20026238, 3870892, 3788815, 1011462, 115188, 695237, 20537, 37342, 7774, 2972, 23490, 4521, 168, 339\n ], \n \"k_image\": \"d2ed8513f48724933df6229a9fb6ededdcf5d0963280ee44fa9216ceebe7941f\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"c9558a2daba528058e11b62fb334f0c1324096575db177d3676f2f3a7287f154\", \n \"view_tag\": \"64\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"cfc87315c4bbbfc8053d6f4921465da881c2675a531141d272c7b4e085829909\", \n \"view_tag\": \"c8\"\n }\n }\n }\n ], \n \"extra\": [ 1, 175, 81, 20, 55, 159, 144, 187, 104, 200, 71, 67, 190, 223, 144, 189, 99, 170, 57, 78, 166, 17, 145, 160, 195, 21, 201, 129, 109, 142, 11, 158, 253, 2, 9, 1, 209, 201, 5, 135, 138, 176, 75, 133\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 492480000, \n \"ecdhInfo\": [ {\n \"amount\": \"6f7857e7ade8bd0e\"\n }, {\n \"amount\": \"e83ef9696bc9b382\"\n }], \n \"outPk\": [ \"3e5b43d81c8f49c1f4c6e6ccdb93f7d3d500bcc38801eba594e5377f8339c5e2\", \"baed73a25fdcb6162c8cac13ab20adf8fa0652db145499fff83ca114f5b5c39b\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"d0531831fce743ab9dcdd3571f8a93c433dfa17e9885741b50e34ed92e5d3b41\", \n \"A1\": \"b43bd7a99784d3e95385b94d578222b3bd7e8f0d981a318c424c98eaeffb9cab\", \n \"B\": \"22da9b9ff6d666a72f2e56751796015b36634f455d2b1f99874b6b30707a2096\", \n \"r1\": \"1c12819b752f60ec321436fe0d3bfd681b14048578b14eef8bbedad3f672fc01\", \n \"s1\": \"0518a1c63af4ed08d8e7b875641eff37acf9fe46ca0a1b428ea685131eab8003\", \n \"d1\": \"a3cbe82bb87769ab6c5763eb468d04c985a650dde6db581ed3926f44f68bd904\", \n \"L\": [ \"96c94d7d0655ace603e046b12e0267718d575ac32b6fbfe0b911158198d2c5bc\", \"08c54b7f62ed326d8965c90e9bb29ad63fcb6d1d4ca5f849aba2eccb5a51ed1d\", \"28f2eceda481bd1f3a0229a9cce6b8816831ff5ac7d129f8f3088cdcfe356d73\", \"cd4b07a4453a016b3cd65bcb3ac7f85bed185044ffced69b4e111af67582c855\", \"51a1fee0e370eb1360611653b571f6dd35f1ab90b1af509dd630b88e6851322e\", \"65f1189e46bde8175c4add525e5975c8de17b718155ba857dcba3695543ef6e6\", \"a71d548c4940850707eeaf3769839ec636f3e0088d92bbdd5eb2e508529cfb4c\"\n ], \n \"R\": [ \"8cfa9204c35863256579aefad74fbc7e411174375840bc3070c74939a0de8341\", \"31dabafa47de92c03d38384d870f3b65f73e7e411ca4130de1f0a706399b64f6\", \"3dc809524dcdcc76b3a1c0eb963384c39779125ec10c7eeccf9190200a76c4df\", \"88dedc7a6b7661ef39be599844c88f5320a1288db2f6395f73907851f966fe67\", \"c584ec77c89b15e597254b6550c460f4607c9aa7287577c6c1fc3c3b0b01f0f3\", \"0818dce251455e18138c4509100cf7c1c79e9b0d332a57a0e3734b8bdaddfa0a\", \"3233560a441f2bff47b98de49b8c87e5401e7dfe844b7ad4e3b669394e44816e\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"4cb057b6afce2a9fe3915084deae4d047e698b6753ec6da7e4b3a34babe73206\", \"a5990f57933939c242e27891786882631d5fd264e26d2853aba16336ae15070f\", \"00602edfbfd9abf9702c7cf9607b52ad0a04d08518054c7f4cada44885309209\", \"0164292412d8c4196fb4ddb842669ea9de50421b6f219cb05a0befe63f618f04\", \"3ececbc47f1e3547e4e7abe935c972ed8f60258e1bb7b7b97a74109bfb5b6a03\", \"308939028b18f56c3128641e4bf7c7c5884fc05ee7ebdb76b85ad30cb9a1d906\", \"80dfdfecbfee9ea2e98bc50e3021f165dcf3890713572b0847f4767a244dc908\", \"c8931d54a79dbc34f35888485b977aa7d809b3513aec10d4a522175edfcc9e04\", \"4a15f68a6e1f5b2095f9c7791984e794b75d3f9940b602b8984773443e4c760b\", \"f28dca4e93a036aa4be7771073d23aca7732aae7a364c628437064dc5e8dbf0d\", \"fb1fe2333a90b6950dc764e90840ebb74fb98803afe6ccd792ad637183747402\", \"c3218537f5b9416b9559b6cadd148c5a8bb52b4f92be7cf130ef729227acac09\", \"2c13e6c8a9b5046688b81ded00f5ccfbd2f70bfb5aeec37de7d7ea4097feb500\", \"5590fab38475f3e37dcc9df3fa3a176d8f45e732acfd12e3f680bc4b511aa90e\", \"8c853f0d294f351f8a16a486dbd7bdbe582d48ff956a7c17d719220293d9b308\", \"8b24cbaa956ee96d662fe1f30b3d477c96dd3fab9902fcecf8692725440a380f\"], \n \"c1\": \"5ffe1c8872ada9fcba8ad026cbb5e9d8324943cb5ebde77eefdcd5bb175c750a\", \n \"D\": \"577032337113d6d1f45d488bfedbc3a25427633bb1873eb45a9a4ea726c33c0b\"\n }], \n \"pseudoOuts\": [ \"297af95c01880e1f3bcb0ac86f4d7105e5c6cee032282fc50aa2d34c06b4c4bb\"]\n }\n}", + "weight": 1539 + },{ + "blob_size": 1536, + "do_not_relay": false, + "double_spend_seen": false, + "fee": 491520000, + "id_hash": "aef60754dc1b2cd788faf23dd3c62afd3a0ac14e088cd6c8d22f1597860e47cd", + "kept_by_block": false, + "last_failed_height": 0, + "last_failed_id_hash": "0000000000000000000000000000000000000000000000000000000000000000", + "last_relayed_time": 1721261653, + "max_used_block_height": 3195160, + "max_used_block_id_hash": "2f7b8ca3dbd64cb33f428ece414b2b1cef405cfcd85fab1a70383490cc7ed603", + "receive_time": 1721261653, + "relayed": true, + "tx_blob": "02000102001081d9e02d98cd8606d18f0cbc9919ce9704e78b04bdeb03e28806af8601cec110c54dafc502a6118b1c9743c7052c479dbff819502441604a914af485db2f795b7f5bc0eab877d60a1419ee54980200031085022c023e67ccef3004d93b200be5df0cca89c5a17c51d76c3ee3c107d39190000332dd926cf0eac221cabeb5beee0cc9ef68a07a79748a69c3142377b5a8dbd6c7be2c019eb1fa0d8e9c4853c7db214bf6547ddd8b23bec77445b208e296fb26d0861a5b0209016ae500293b5f1167068080b0ea0113648ac5774104fe28d4ea6e7e28511a1c5c4189735a8c2e98bb12572283f3797ff1273d5dd44312e10cbb4cfdf34c3f7664dfc643db30f011129a53843d647e7cab9e03c2df68ef71b2040d59005f4501fb8885aaa4fc7f5fcae273f7a4612c629423864cb4d12c06044dad6629f40edfb2d6cb3c9dd78407302087c767c8bf17d507471a9cb9af54dc6528de99c8dd62ca69f07a623f4ca679eccc0fa03568e346cc061ff1624c4b73ee72a7a414c68164bc2abdf7f7798e6a0f13b2c3852371c2db35ebb00c7ba018a42b0c2c31fc06a748b257eafb46efc220a57a7767664f7744420bc3ee1c7c227aa711680dd301accf87381433838debd8e517ba86d06cb7475f3b8dfa301ee879112ab2e7a00807200f21ba8eeb86935deb899b3babb7a3ebb21b2f1dfd1bb08015ae4b8ff4f16356c7e807b6bf00f6178c752d92f9630508eeccf307f8efdc7dfae7592555a0824706000a2f329413ffe6ecfe68345053f441db1815286b951cbf83b4e7c327e27019fe81f3c534b297b9388438ffb1d5acd17514678a87719b843d64df591d1e478fdac7f3940088cfe0ea54a896fbcad933fb85c9e3f060db515cc8495f4a8939102799616733953f6bb959916ebdb8cfb8c958f62e787aa7b4a3bd45c311cd83be8617ee592413d8c76bf05922147cb08447e7b899344e9c7069cf710fe22a07744704b4b118e85e16e5dc3abc02e9e6fd45e95c73ec2412355907bc4a5d3d17849043695696eb46dbeb1389bc544c5ec11d5e35a9d9be1e5aa2884377c2f4410773a62507f87e7fe992e335377585bf63a7554e352066574e4e35fdf5d1c90a7ffa6b791e317f2c7c76a85532a7ae18a74c293e3ce9c853080fe23abc42d97fb8a8320d3586343706a56e8f293322aba82e9c159a41a3ceff344177dddc3df947cf9159c194d6b9c392dd76a5ddca3f1110ca3043f7b2e82ef1b55fbc87e8b3063bd71e497c90ae647f0c21719149341298ab1402e9bbee4b674bc9509070df1e5de5f3c400ef5230773a8c5b6fb1dc556bb25a8d0110b4c6f411785927930bcbc5b5a9057c4c6b55397a8000f4d8b6cc6159a4e642aa449512e56b405beb0ee74de267b38c7135eaba1f4171e7fbe7fa79807eba65b20a78e896c94e47ae0519d587d550fbf22015c99d604c7f55019901ac5fcbd674d0d2fd6d752087ca056f89bd649fc33ddb75888ecc0327e66d0642683825f262c5cec006599bdf1f07b0f9af7553379e798af1e9644d988e703c96ae415e22d053d4c42fbb19c44d062eec4bf6ce7888a7438c2f1d5b94a415ee8ff3bd95045429378b8c2b9fb7c600656b5a6f94b1149793517bce19d0ddf2f5b30fe3a16bbebed91ee0a03c47a003778322b5d214b960acc4b9b2cdd1e55c4aed04199aa9a0b5132b2d3553c3fe087b9ffbbd7409e47528c075ee1edc51e6cedd5fe05998fddab5a3f25edb0ff2083fcf87e8e261df81a9cf178f5facd906821a543560d3f5d0bb07cf0402d69302463bc90bf04cce521b1b115ea4c57c6512695ade926f3be142001ee73e7f130bc9e506619c93bdae690697a0a642fb15921a1426de845c949799218995a96207ebd6b2904f2a881b82a065eac32ade92b8cf9c73939b858cad212976836a8108f15230cfc5131e6a379c236531ea611b1e8d5277add4f4d211cb6f9f6364cf0e8c56ab32609f43aae59b20ada5e35e217d64e44d122665073837ed50ef23c90d8132b7a8e3a732f424f3166075ca4082ba690961a894a7ed2e6f42d2e772010133381b48d999bbd4cb548c381550626e9e5b8e91c7ea1e78d01e390b5a9de6ad42feb5d5d5f1408202abac88b8c70056af4bde7b2b1a412d8feb4b0b85b88c61", + "tx_json": "{\n \"version\": 2, \n \"unlock_time\": 0, \n \"vin\": [ {\n \"key\": {\n \"amount\": 0, \n \"key_offsets\": [ 95956097, 12691096, 198609, 412860, 68558, 67047, 62909, 99426, 17199, 270542, 9925, 41647, 2214, 3595, 8599, 711\n ], \n \"k_image\": \"2c479dbff819502441604a914af485db2f795b7f5bc0eab877d60a1419ee5498\"\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"1085022c023e67ccef3004d93b200be5df0cca89c5a17c51d76c3ee3c107d391\", \n \"view_tag\": \"90\"\n }\n }\n }, {\n \"amount\": 0, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"32dd926cf0eac221cabeb5beee0cc9ef68a07a79748a69c3142377b5a8dbd6c7\", \n \"view_tag\": \"be\"\n }\n }\n }\n ], \n \"extra\": [ 1, 158, 177, 250, 13, 142, 156, 72, 83, 199, 219, 33, 75, 246, 84, 125, 221, 139, 35, 190, 199, 116, 69, 178, 8, 226, 150, 251, 38, 208, 134, 26, 91, 2, 9, 1, 106, 229, 0, 41, 59, 95, 17, 103\n ], \n \"rct_signatures\": {\n \"type\": 6, \n \"txnFee\": 491520000, \n \"ecdhInfo\": [ {\n \"amount\": \"13648ac5774104fe\"\n }, {\n \"amount\": \"28d4ea6e7e28511a\"\n }], \n \"outPk\": [ \"1c5c4189735a8c2e98bb12572283f3797ff1273d5dd44312e10cbb4cfdf34c3f\", \"7664dfc643db30f011129a53843d647e7cab9e03c2df68ef71b2040d59005f45\"]\n }, \n \"rctsig_prunable\": {\n \"nbp\": 1, \n \"bpp\": [ {\n \"A\": \"fb8885aaa4fc7f5fcae273f7a4612c629423864cb4d12c06044dad6629f40edf\", \n \"A1\": \"b2d6cb3c9dd78407302087c767c8bf17d507471a9cb9af54dc6528de99c8dd62\", \n \"B\": \"ca69f07a623f4ca679eccc0fa03568e346cc061ff1624c4b73ee72a7a414c681\", \n \"r1\": \"64bc2abdf7f7798e6a0f13b2c3852371c2db35ebb00c7ba018a42b0c2c31fc06\", \n \"s1\": \"a748b257eafb46efc220a57a7767664f7744420bc3ee1c7c227aa711680dd301\", \n \"d1\": \"accf87381433838debd8e517ba86d06cb7475f3b8dfa301ee879112ab2e7a008\", \n \"L\": [ \"200f21ba8eeb86935deb899b3babb7a3ebb21b2f1dfd1bb08015ae4b8ff4f163\", \"56c7e807b6bf00f6178c752d92f9630508eeccf307f8efdc7dfae7592555a082\", \"4706000a2f329413ffe6ecfe68345053f441db1815286b951cbf83b4e7c327e2\", \"7019fe81f3c534b297b9388438ffb1d5acd17514678a87719b843d64df591d1e\", \"478fdac7f3940088cfe0ea54a896fbcad933fb85c9e3f060db515cc8495f4a89\", \"39102799616733953f6bb959916ebdb8cfb8c958f62e787aa7b4a3bd45c311cd\", \"83be8617ee592413d8c76bf05922147cb08447e7b899344e9c7069cf710fe22a\"\n ], \n \"R\": [ \"744704b4b118e85e16e5dc3abc02e9e6fd45e95c73ec2412355907bc4a5d3d17\", \"849043695696eb46dbeb1389bc544c5ec11d5e35a9d9be1e5aa2884377c2f441\", \"0773a62507f87e7fe992e335377585bf63a7554e352066574e4e35fdf5d1c90a\", \"7ffa6b791e317f2c7c76a85532a7ae18a74c293e3ce9c853080fe23abc42d97f\", \"b8a8320d3586343706a56e8f293322aba82e9c159a41a3ceff344177dddc3df9\", \"47cf9159c194d6b9c392dd76a5ddca3f1110ca3043f7b2e82ef1b55fbc87e8b3\", \"063bd71e497c90ae647f0c21719149341298ab1402e9bbee4b674bc9509070df\"\n ]\n }\n ], \n \"CLSAGs\": [ {\n \"s\": [ \"1e5de5f3c400ef5230773a8c5b6fb1dc556bb25a8d0110b4c6f411785927930b\", \"cbc5b5a9057c4c6b55397a8000f4d8b6cc6159a4e642aa449512e56b405beb0e\", \"e74de267b38c7135eaba1f4171e7fbe7fa79807eba65b20a78e896c94e47ae05\", \"19d587d550fbf22015c99d604c7f55019901ac5fcbd674d0d2fd6d752087ca05\", \"6f89bd649fc33ddb75888ecc0327e66d0642683825f262c5cec006599bdf1f07\", \"b0f9af7553379e798af1e9644d988e703c96ae415e22d053d4c42fbb19c44d06\", \"2eec4bf6ce7888a7438c2f1d5b94a415ee8ff3bd95045429378b8c2b9fb7c600\", \"656b5a6f94b1149793517bce19d0ddf2f5b30fe3a16bbebed91ee0a03c47a003\", \"778322b5d214b960acc4b9b2cdd1e55c4aed04199aa9a0b5132b2d3553c3fe08\", \"7b9ffbbd7409e47528c075ee1edc51e6cedd5fe05998fddab5a3f25edb0ff208\", \"3fcf87e8e261df81a9cf178f5facd906821a543560d3f5d0bb07cf0402d69302\", \"463bc90bf04cce521b1b115ea4c57c6512695ade926f3be142001ee73e7f130b\", \"c9e506619c93bdae690697a0a642fb15921a1426de845c949799218995a96207\", \"ebd6b2904f2a881b82a065eac32ade92b8cf9c73939b858cad212976836a8108\", \"f15230cfc5131e6a379c236531ea611b1e8d5277add4f4d211cb6f9f6364cf0e\", \"8c56ab32609f43aae59b20ada5e35e217d64e44d122665073837ed50ef23c90d\"], \n \"c1\": \"8132b7a8e3a732f424f3166075ca4082ba690961a894a7ed2e6f42d2e7720101\", \n \"D\": \"33381b48d999bbd4cb548c381550626e9e5b8e91c7ea1e78d01e390b5a9de6ad\"\n }], \n \"pseudoOuts\": [ \"42feb5d5d5f1408202abac88b8c70056af4bde7b2b1a412d8feb4b0b85b88c61\"]\n }\n}", + "weight": 1536 + }], + "untrusted": false +}"#; +} + +define_request_and_response! { + get_transaction_pool_stats (other), + GET_TRANSACTION_POOL_STATS: &str, + Request = +r#"{}"#; + Response = +r#"{ + "credits": 0, + "pool_stats": { + "bytes_max": 11843, + "bytes_med": 2219, + "bytes_min": 1528, + "bytes_total": 144192, + "fee_total": 7018100000, + "histo": [{ + "bytes": 11219, + "txs": 4 + },{ + "bytes": 9737, + "txs": 5 + },{ + "bytes": 8757, + "txs": 4 + },{ + "bytes": 14763, + "txs": 4 + },{ + "bytes": 15007, + "txs": 6 + },{ + "bytes": 15924, + "txs": 6 + },{ + "bytes": 17869, + "txs": 8 + },{ + "bytes": 10894, + "txs": 5 + },{ + "bytes": 38485, + "txs": 10 + },{ + "bytes": 1537, + "txs": 1 + }], + "histo_98pc": 186, + "num_10m": 0, + "num_double_spends": 0, + "num_failing": 0, + "num_not_relayed": 0, + "oldest": 1721261651, + "txs_total": 53 + }, + "status": "OK", + "top_hash": "", + "untrusted": false +}"#; +} + +define_request_and_response! { + stop_daemon (other), + STOP_DAEMON: &str, + Request = +r#"{}"#; + Response = +r#"{ + "status": "OK" +}"#; +} + +define_request_and_response! { + get_limit (other), + GET_LIMIT: &str, + Request = +r#"{}"#; + Response = +r#"{ + "limit_down": 1280000, + "limit_up": 1280000, + "status": "OK", + "untrusted": true +}"#; +} + +define_request_and_response! { + set_limit (other), + SET_LIMIT: &str, + Request = +r#"{ + "limit_down": 1024 +}"#; + Response = +r#"{ + "limit_down": 1024, + "limit_up": 128, + "status": "OK" + "untrusted": false +}"#; +} + +define_request_and_response! { + out_peers (other), + OUT_PEERS: &str, + Request = +r#"{ + "out_peers": 3232235535 +}"#; + Response = +r#"{ + "out_peers": 3232235535, + "status": "OK", + "untrusted": false +}"#; +} + +define_request_and_response! { + get_net_stats (other), + GET_NET_STATS: &str, + Request = +r#"{ + "in_peers": 3232235535 +}"#; + Response = +r#"{ + "in_peers": 3232235535, + "status": "OK", + "untrusted": false +}"#; +} + +define_request_and_response! { + get_outs (other), + GET_OUTS: &str, + Request = +r#"{ + "outputs": [{ + "amount": 1, + "index": 0 + },{ + "amount": 1, + "index": 1 + }], + "get_txid": true +}"#; + Response = +r#"{ + "credits": 0, + "outs": [{ + "height": 51941, + "key": "08980d939ec297dd597119f498ad69fed9ca55e3a68f29f2782aae887ef0cf8e", + "mask": "1738eb7a677c6149228a2beaa21bea9e3370802d72a3eec790119580e02bd522", + "txid": "9d651903b80fb70b9935b72081cd967f543662149aed3839222511acd9100601", + "unlocked": true + },{ + "height": 51945, + "key": "454fe46c405be77625fa7e3389a04d3be392346983f27603561ac3a3a74f4a75", + "mask": "1738eb7a677c6149228a2beaa21bea9e3370802d72a3eec790119580e02bd522", + "txid": "230bff732dc5f225df14fff82aadd1bf11b3fb7ad3a03413c396a617e843f7d0", + "unlocked": true + }], + "status": "OK", + "top_hash": "", + "untrusted": false +}"#; +} + +define_request_and_response! { + update (other), + UPDATE: &str, + Request = +r#"{ + "command": "check" +}"#; + Response = +r#"{ + "auto_uri": "", + "hash": "", + "path": "", + "status": "OK", + "untrusted": false, + "update": false, + "user_uri": "", + "version": "" +}"#; +} + +define_request_and_response! { + pop_blocks (other), + POP_BLOCKS: &str, + Request = +r#"{ + "nblocks": 6 +}"#; + Response = +r#"{ + "height": 76482, + "status": "OK", + "untrusted": false +}"#; +} + +define_request_and_response! { + UNDOCUMENTED_ENDPOINT (other), + GET_TRANSACTION_POOL_HASHES: &str, + Request = +r#"{}"#; + Response = +r#"{ + "credits": 0, + "status": "OK", + "top_hash": "", + "tx_hashes": ["aa928aed888acd6152c60194d50a4df29b0b851be6169acf11b6a8e304dd6c03","794345f321a98f3135151f3056c0fdf8188646a8dab27de971428acf3551dd11","1e9d2ae11f2168a228942077483e70940d34e8658c972bbc3e7f7693b90edf17","7375c928f261d00f07197775eb0bfa756e5f23319819152faa0b3c670fe54c1b","2e4d5f8c5a45498f37fb8b6ca4ebc1efa0c371c38c901c77e66b08c072287329","eee6d596cf855adfb10e1597d2018e3a61897ac467ef1d4a5406b8d20bfbd52f","59c574d7ba9bb4558470f74503c7518946a85ea22c60fccfbdec108ce7d8f236","0d57bec1e1075a9e1ac45cf3b3ced1ad95ccdf2a50ce360190111282a0178655","60d627b2369714a40009c07d6185ebe7fa4af324fdfa8d95a37a936eb878d062","661d7e728a901a8cb4cf851447d9cd5752462687ed0b776b605ba706f06bdc7d","b80e1f09442b00b3fffe6db5d263be6267c7586620afff8112d5a8775a6fc58e","974063906d1ddfa914baf85176b0f689d616d23f3d71ed4798458c8b4f9b9d8f","d2575ae152a180be4981a9d2fc009afcd073adaa5c6d8b022c540a62d6c905bb","3d78aa80ee50f506683bab9f02855eb10257a08adceda7cbfbdfc26b10f6b1bb","8b5bc125bdb73b708500f734501d55088c5ac381a0879e1141634eaa72b6a4da","11c06f4d2f00c912ca07313ed2ea5366f3cae914a762bed258731d3d9e3706df","b3644dc7c9a3a53465fe80ad3769e516edaaeb7835e16fdd493aac110d472ae1","ed2478ad793b923dbf652c8612c40799d764e5468897021234a14a37346bc6ee"], + "untrusted": false +}"#; +} + +define_request_and_response! { + UNDOCUMENTED_ENDPOINT (other), + GET_PUBLIC_NODES: &str, + Request = +r#"{}"#; + Response = +r#"{ + "status": "OK", + "untrusted": false, + "white": [{ + "host": "70.52.75.3", + "last_seen": 1721246387, + "rpc_credits_per_hash": 0, + "rpc_port": 18081 + },{ + "host": "zbjkbsxc5munw3qusl7j2hpcmikhqocdf4pqhnhtpzw5nt5jrmofptid.onion:18083", + "last_seen": 1720186288, + "rpc_credits_per_hash": 0, + "rpc_port": 18089 + }] +}"#; +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/test-utils/src/rpc/mod.rs b/test-utils/src/rpc/mod.rs index 14c963a..0da6b48 100644 --- a/test-utils/src/rpc/mod.rs +++ b/test-utils/src/rpc/mod.rs @@ -1,25 +1,6 @@ -//! Monero RPC client. +//! Monero RPC data & client. //! -//! This module is a client for Monero RPC that maps the types -//! into the native types used by Cuprate found in `cuprate_types`. -//! -//! # Usage -//! ```rust,ignore -//! #[tokio::main] -//! async fn main() { -//! // Create RPC client. -//! let rpc = HttpRpcClient::new(None).await; -//! -//! // Collect 20 blocks. -//! let mut vec: Vec = vec![]; -//! for height in (3130269 - 20)..3130269 { -//! vec.push(rpc.get_verified_block_information(height).await); -//! } -//! } -//! ``` +//! This module has a `monerod` RPC [`client`] and some real request/response [`data`]. -mod client; -pub use client::HttpRpcClient; - -mod constants; -pub use constants::LOCALHOST_RPC_URL; +pub mod client; +pub mod data; diff --git a/typos.toml b/typos.toml index abab190..0317c40 100644 --- a/typos.toml +++ b/typos.toml @@ -17,4 +17,5 @@ extend-ignore-identifiers-re = [ extend-exclude = [ "/misc/gpg_keys/", "cryptonight/", + "/test-utils/src/rpc/data/json.rs", ] From 929d19c4508a84d886ece03009a6fcdc5edea5c2 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 25 Jul 2024 11:46:41 -0400 Subject: [PATCH 022/104] rpc: custom epee for misc/bin types (#229) * fixed-bytes: add `serde`, document feature flags * fixed-bytes: add derives * rpc: add `as _` syntax to macro * rpc: use `ByteArrayVec` and `ContainerAsBlob` for binary types * fixed-bytes: re-add derives * rpc-types: dedup default value within macro * readme: fixed bytes section * types: custom epee - `BlockCompleteEntry` * types: custom epee - `KeyImageSpentStatus` * types: custom epee - `PoolInfoExtent` * types: add `Status::Other(String)` variant * types: custom epee - `TxEntry`, add `read_epee_field` macro * bin: custom epee - `GetBlocks` * types: add `serde.rs` * misc: make `TxEntry` an `enum`, impl serde * misc: `unimplemented!()` for `TxEntry`'s epee * types: add `BlockCompleteEntry` * rpc: replace `BlockCompleteEntry` with `cuprate-types` * types: document `BlockCompleteEntry` * bin: fix `number_of_fields` for `GetBlocksResponse` * misc: add `Distribution` * distribution: add todo * misc fixes * readme: add `(De)serialization invariants` * distribution: compress variants * types: add `block_complete_entry.rs` * net: fix imports * p2p: fix imports * turn off default-features * p2p: fix imports * misc fixes * Update net/wire/Cargo.toml Co-authored-by: Boog900 * distribution: module doc * wire: re-export types * bin: use enum for `GetBlocksResponse` * misc: use lowercase for stringify * remove duplicated fields for custom epee * types: remove `should_write()` for custom epee * bin: split `GetBlocksResponse` variant fields into structs * misc: split `Distribution` variant fields into structs * small fixes * put all fields in `read_epee_field!` * distribution: (de)compress during epee/serde (de)serialization * distribution: leave (de)compression functions as `todo!()` --------- Co-authored-by: Boog900 --- Cargo.lock | 2 + rpc/types/Cargo.toml | 4 +- rpc/types/README.md | 16 + rpc/types/src/base.rs | 2 +- rpc/types/src/bin.rs | 329 +++++++++++++++++-- rpc/types/src/constants.rs | 3 - rpc/types/src/json.rs | 21 +- rpc/types/src/lib.rs | 7 +- rpc/types/src/macros.rs | 44 ++- rpc/types/src/misc/block_complete_entry.rs | 37 --- rpc/types/src/misc/distribution.rs | 309 +++++++++++++++++ rpc/types/src/misc/key_image_spent_status.rs | 71 +++- rpc/types/src/misc/misc.rs | 15 +- rpc/types/src/misc/mod.rs | 6 +- rpc/types/src/misc/pool_info_extent.rs | 73 +++- rpc/types/src/misc/status.rs | 41 +-- rpc/types/src/misc/tx_entry.rs | 161 ++++++--- rpc/types/src/other.rs | 10 +- rpc/types/src/serde.rs | 32 ++ types/README.md | 2 +- 20 files changed, 949 insertions(+), 236 deletions(-) delete mode 100644 rpc/types/src/misc/block_complete_entry.rs create mode 100644 rpc/types/src/misc/distribution.rs create mode 100644 rpc/types/src/serde.rs diff --git a/Cargo.lock b/Cargo.lock index 426ccc2..ac29662 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -764,8 +764,10 @@ version = "0.0.0" dependencies = [ "cuprate-epee-encoding", "cuprate-fixed-bytes", + "cuprate-types", "monero-serai", "paste", + "pretty_assertions", "serde", "serde_json", ] diff --git a/rpc/types/Cargo.toml b/rpc/types/Cargo.toml index 1176526..fcec453 100644 --- a/rpc/types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -16,10 +16,12 @@ epee = ["dep:cuprate-epee-encoding"] [dependencies] cuprate-epee-encoding = { path = "../../net/epee-encoding", optional = true } cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } +cuprate-types = { path = "../../types" } monero-serai = { workspace = true } paste = { workspace = true } serde = { workspace = true, optional = true } [dev-dependencies] -serde_json = { workspace = true } +serde_json = { workspace = true } +pretty_assertions = { workspace = true } diff --git a/rpc/types/README.md b/rpc/types/README.md index eb8da01..566cca7 100644 --- a/rpc/types/README.md +++ b/rpc/types/README.md @@ -78,6 +78,22 @@ will be used instead of a more typical [`String`] for optimization reasons. --> +# (De)serialization invariants +Due to how types are defined in this library internally (all through a single macro), +most types implement both `serde` and `epee`. + +However, some of the types will panic with [`unimplemented`] +or will otherwise have undefined implementation in the incorrect context. + +In other words: +- The epee (de)serialization of [`json`] & [`other`] types should **not** be relied upon +- The JSON (de)serialization of [`bin`] types should **not** be relied upon + +The invariants that can be relied upon: +- Types in [`json`] & [`other`] will implement `serde` correctly +- Types in [`bin`] will implement `epee` correctly +- Misc types will implement `serde/epee` correctly as needed + # Feature flags List of feature flags for `cuprate-rpc-types`. diff --git a/rpc/types/src/base.rs b/rpc/types/src/base.rs index f13ac40..4990cdd 100644 --- a/rpc/types/src/base.rs +++ b/rpc/types/src/base.rs @@ -46,7 +46,7 @@ epee_object! { //---------------------------------------------------------------------------------------------------- Responses #[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "rpc/core_rpc_server_commands_defs.h", 101..=112)] /// The most common base for responses. -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ResponseBase { /// General RPC error code. [`Status::Ok`] means everything looks good. diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index 3dcfb96..c801c69 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -5,51 +5,32 @@ //---------------------------------------------------------------------------------------------------- Import use cuprate_fixed_bytes::ByteArrayVec; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + #[cfg(feature = "epee")] -use cuprate_epee_encoding::container_as_blob::ContainerAsBlob; +use cuprate_epee_encoding::{ + container_as_blob::ContainerAsBlob, + epee_object, error, + macros::bytes::{Buf, BufMut}, + read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue, +}; + +use cuprate_types::BlockCompleteEntry; use crate::{ base::{AccessResponseBase, ResponseBase}, defaults::{default_false, default_height, default_string, default_vec, default_zero}, free::{is_one, is_zero}, - macros::define_request_and_response, + macros::{define_request, define_request_and_response, define_request_and_response_doc}, misc::{ - AuxPow, BlockCompleteEntry, BlockHeader, BlockOutputIndices, ChainInfo, ConnectionInfo, - GetBan, GetOutputsOut, HardforkEntry, HistogramEntry, OutKeyBin, OutputDistributionData, - Peer, PoolTxInfo, SetBan, Span, Status, TxBacklogEntry, + AuxPow, BlockHeader, BlockOutputIndices, ChainInfo, ConnectionInfo, GetBan, GetOutputsOut, + HardforkEntry, HistogramEntry, OutKeyBin, OutputDistributionData, Peer, PoolInfoExtent, + PoolTxInfo, SetBan, Span, Status, TxBacklogEntry, }, }; -//---------------------------------------------------------------------------------------------------- TODO -define_request_and_response! { - get_blocksbin, - cc73fe71162d564ffda8e549b79a350bca53c454 => - core_rpc_server_commands_defs.h => 162..=262, - GetBlocks, - Request { - requested_info: u8 = default_zero(), "default_zero", - // FIXME: This is a `std::list` in `monerod` because...? - block_ids: ByteArrayVec<32>, - start_height: u64, - prune: bool, - no_miner_tx: bool = default_false(), "default_false", - pool_info_since: u64 = default_zero(), "default_zero", - }, - // TODO: this has custom epee (de)serialization. - // - ResponseBase { - blocks: Vec, - start_height: u64, - current_height: u64, - output_indices: Vec, - daemon_time: u64, - pool_info_extent: u8, - added_pool_txs: Vec, - remaining_added_pool_txids: Vec<[u8; 32]>, - removed_pool_txids: Vec<[u8; 32]>, - } -} - +//---------------------------------------------------------------------------------------------------- Definitions define_request_and_response! { get_blocks_by_heightbin, cc73fe71162d564ffda8e549b79a350bca53c454 => @@ -134,6 +115,284 @@ define_request_and_response! { } } +//---------------------------------------------------------------------------------------------------- GetBlocks +define_request! { + #[doc = define_request_and_response_doc!( + "response" => GetBlocksResponse, + get_blocksbin, + cc73fe71162d564ffda8e549b79a350bca53c454, + core_rpc_server_commands_defs, h, 162, 262, + )] + GetBlocksRequest { + requested_info: u8 = default_zero::(), "default_zero", + // FIXME: This is a `std::list` in `monerod` because...? + block_ids: ByteArrayVec<32>, + start_height: u64, + prune: bool, + no_miner_tx: bool = default_false(), "default_false", + pool_info_since: u64 = default_zero::(), "default_zero", + } +} + +#[doc = define_request_and_response_doc!( + "request" => GetBlocksRequest, + get_blocksbin, + cc73fe71162d564ffda8e549b79a350bca53c454, + core_rpc_server_commands_defs, h, 162, 262, +)] +/// +/// This response's variant depends upon [`PoolInfoExtent`]. +#[allow(dead_code, missing_docs)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum GetBlocksResponse { + /// Will always serialize a [`PoolInfoExtent::None`] field. + PoolInfoNone(GetBlocksResponsePoolInfoNone), + /// Will always serialize a [`PoolInfoExtent::Incremental`] field. + PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental), + /// Will always serialize a [`PoolInfoExtent::Full`] field. + PoolInfoFull(GetBlocksResponsePoolInfoFull), +} + +impl Default for GetBlocksResponse { + fn default() -> Self { + Self::PoolInfoNone(GetBlocksResponsePoolInfoNone::default()) + } +} + +/// Data within [`GetBlocksResponse::PoolInfoNone`]. +#[allow(dead_code, missing_docs)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct GetBlocksResponsePoolInfoNone { + pub status: Status, + pub untrusted: bool, + pub blocks: Vec, + pub start_height: u64, + pub current_height: u64, + pub output_indices: Vec, + pub daemon_time: u64, +} + +#[cfg(feature = "epee")] +epee_object! { + GetBlocksResponsePoolInfoNone, + status: Status, + untrusted: bool, + blocks: Vec, + start_height: u64, + current_height: u64, + output_indices: Vec, + daemon_time: u64, +} + +/// Data within [`GetBlocksResponse::PoolInfoIncremental`]. +#[allow(dead_code, missing_docs)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct GetBlocksResponsePoolInfoIncremental { + pub status: Status, + pub untrusted: bool, + pub blocks: Vec, + pub start_height: u64, + pub current_height: u64, + pub output_indices: Vec, + pub daemon_time: u64, + pub added_pool_txs: Vec, + pub remaining_added_pool_txids: ByteArrayVec<32>, + pub removed_pool_txids: ByteArrayVec<32>, +} + +#[cfg(feature = "epee")] +epee_object! { + GetBlocksResponsePoolInfoIncremental, + status: Status, + untrusted: bool, + blocks: Vec, + start_height: u64, + current_height: u64, + output_indices: Vec, + daemon_time: u64, + added_pool_txs: Vec, + remaining_added_pool_txids: ByteArrayVec<32>, + removed_pool_txids: ByteArrayVec<32>, +} + +/// Data within [`GetBlocksResponse::PoolInfoFull`]. +#[allow(dead_code, missing_docs)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct GetBlocksResponsePoolInfoFull { + pub status: Status, + pub untrusted: bool, + pub blocks: Vec, + pub start_height: u64, + pub current_height: u64, + pub output_indices: Vec, + pub daemon_time: u64, + pub added_pool_txs: Vec, + pub remaining_added_pool_txids: ByteArrayVec<32>, +} + +#[cfg(feature = "epee")] +epee_object! { + GetBlocksResponsePoolInfoFull, + status: Status, + untrusted: bool, + blocks: Vec, + start_height: u64, + current_height: u64, + output_indices: Vec, + daemon_time: u64, + added_pool_txs: Vec, + remaining_added_pool_txids: ByteArrayVec<32>, +} + +#[cfg(feature = "epee")] +/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`]. +/// +/// Not for public usage. +#[allow(dead_code, missing_docs)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct __GetBlocksResponseEpeeBuilder { + pub status: Option, + pub untrusted: Option, + pub blocks: Option>, + pub start_height: Option, + pub current_height: Option, + pub output_indices: Option>, + pub daemon_time: Option, + pub pool_info_extent: Option, + pub added_pool_txs: Option>, + pub remaining_added_pool_txids: Option>, + pub removed_pool_txids: Option>, +} + +#[cfg(feature = "epee")] +impl EpeeObjectBuilder for __GetBlocksResponseEpeeBuilder { + fn add_field(&mut self, name: &str, r: &mut B) -> error::Result { + macro_rules! read_epee_field { + ($($field:ident),*) => { + match name { + $( + stringify!($field) => { self.$field = Some(read_epee_value(r)?); }, + )* + _ => return Ok(false), + } + }; + } + + read_epee_field! { + status, + untrusted, + blocks, + start_height, + current_height, + output_indices, + daemon_time, + pool_info_extent, + added_pool_txs, + remaining_added_pool_txids, + removed_pool_txids + } + + Ok(true) + } + + fn finish(self) -> error::Result { + const ELSE: error::Error = error::Error::Format("Required field was not found!"); + + let status = self.status.ok_or(ELSE)?; + let untrusted = self.untrusted.ok_or(ELSE)?; + let blocks = self.blocks.ok_or(ELSE)?; + let start_height = self.start_height.ok_or(ELSE)?; + let current_height = self.current_height.ok_or(ELSE)?; + let output_indices = self.output_indices.ok_or(ELSE)?; + let daemon_time = self.daemon_time.ok_or(ELSE)?; + let pool_info_extent = self.pool_info_extent.ok_or(ELSE)?; + + let this = match pool_info_extent { + PoolInfoExtent::None => { + GetBlocksResponse::PoolInfoNone(GetBlocksResponsePoolInfoNone { + status, + untrusted, + blocks, + start_height, + current_height, + output_indices, + daemon_time, + }) + } + PoolInfoExtent::Incremental => { + GetBlocksResponse::PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental { + status, + untrusted, + blocks, + start_height, + current_height, + output_indices, + daemon_time, + added_pool_txs: self.added_pool_txs.ok_or(ELSE)?, + remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?, + removed_pool_txids: self.removed_pool_txids.ok_or(ELSE)?, + }) + } + PoolInfoExtent::Full => { + GetBlocksResponse::PoolInfoFull(GetBlocksResponsePoolInfoFull { + status, + untrusted, + blocks, + start_height, + current_height, + output_indices, + daemon_time, + added_pool_txs: self.added_pool_txs.ok_or(ELSE)?, + remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?, + }) + } + }; + + Ok(this) + } +} + +#[cfg(feature = "epee")] +#[allow(clippy::cognitive_complexity)] +impl EpeeObject for GetBlocksResponse { + type Builder = __GetBlocksResponseEpeeBuilder; + + fn number_of_fields(&self) -> u64 { + // [`PoolInfoExtent`] + inner struct fields. + let inner_fields = match self { + Self::PoolInfoNone(s) => s.number_of_fields(), + Self::PoolInfoIncremental(s) => s.number_of_fields(), + Self::PoolInfoFull(s) => s.number_of_fields(), + }; + + 1 + inner_fields + } + + fn write_fields(self, w: &mut B) -> error::Result<()> { + match self { + Self::PoolInfoNone(s) => { + s.write_fields(w)?; + write_field(PoolInfoExtent::None.to_u8(), "pool_info_extent", w)?; + } + Self::PoolInfoIncremental(s) => { + s.write_fields(w)?; + write_field(PoolInfoExtent::Incremental.to_u8(), "pool_info_extent", w)?; + } + Self::PoolInfoFull(s) => { + s.write_fields(w)?; + write_field(PoolInfoExtent::Full.to_u8(), "pool_info_extent", w)?; + } + } + + Ok(()) + } +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { diff --git a/rpc/types/src/constants.rs b/rpc/types/src/constants.rs index e580283..8c6120b 100644 --- a/rpc/types/src/constants.rs +++ b/rpc/types/src/constants.rs @@ -36,9 +36,6 @@ pub const CORE_RPC_STATUS_NOT_MINING: &str = "NOT MINING"; #[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "/rpc/core_rpc_server_commands_defs.h", 81)] pub const CORE_RPC_STATUS_PAYMENT_REQUIRED: &str = "PAYMENT REQUIRED"; -/// Custom `CORE_RPC_STATUS` for usage in Cuprate. -pub const CORE_RPC_STATUS_UNKNOWN: &str = "UNKNOWN"; - //---------------------------------------------------------------------------------------------------- Versions #[doc = monero_definition_link!(cc73fe71162d564ffda8e549b79a350bca53c454, "/rpc/core_rpc_server_commands_defs.h", 90)] /// RPC major version. diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index 2e7aa82..f4bca99 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -9,12 +9,12 @@ use crate::{ free::{is_one, is_zero}, macros::define_request_and_response, misc::{ - AuxPow, BlockHeader, ChainInfo, ConnectionInfo, GetBan, HardforkEntry, HistogramEntry, - OutputDistributionData, SetBan, Span, Status, SyncInfoPeer, TxBacklogEntry, + AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan, HardforkEntry, + HistogramEntry, OutputDistributionData, SetBan, Span, Status, SyncInfoPeer, TxBacklogEntry, }, }; -//---------------------------------------------------------------------------------------------------- Struct definitions +//---------------------------------------------------------------------------------------------------- Definitions // This generates 2 structs: // // - `GetBlockTemplateRequest` @@ -131,7 +131,6 @@ define_request_and_response! { // type alias to `()` instead of a `struct`. Request {}, - #[derive(Copy)] ResponseBase { count: u64, } @@ -292,7 +291,7 @@ define_request_and_response! { AccessResponseBase { blob: String, block_header: BlockHeader, - json: String, // TODO: this should be defined in a struct, it has many fields. + json: String, // FIXME: this should be defined in a struct, it has many fields. miner_tx_hash: String, tx_hashes: Vec, } @@ -409,7 +408,6 @@ define_request_and_response! { Request { address: String, }, - #[derive(Copy)] Response { banned: bool, seconds: u32, @@ -425,7 +423,6 @@ define_request_and_response! { Request { txids: Vec = default_vec::(), "default_vec", }, - #[derive(Copy)] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] Response { @@ -479,9 +476,9 @@ define_request_and_response! { version: u32, release: bool, #[serde(skip_serializing_if = "is_zero")] - current_height: u64 = default_zero(), "default_zero", + current_height: u64 = default_zero::(), "default_zero", #[serde(skip_serializing_if = "is_zero")] - target_height: u64 = default_zero(), "default_zero", + target_height: u64 = default_zero::(), "default_zero", #[serde(skip_serializing_if = "Vec::is_empty")] hard_forks: Vec = default_vec(), "default_vec", } @@ -520,7 +517,6 @@ define_request_and_response! { Request { txids: Vec, }, - #[derive(Copy)] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] Response { @@ -574,10 +570,8 @@ define_request_and_response! { from_height: u64, to_height: u64, }, - /// TODO: this request has custom serde: - /// AccessResponseBase { - distributions: Vec, + distributions: Vec, } } @@ -607,7 +601,6 @@ define_request_and_response! { Request { check: bool = default_false(), "default_false", }, - #[derive(Copy)] ResponseBase { pruned: bool, pruning_seed: u32, diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index 45cca69..d0d1e00 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -113,6 +113,9 @@ mod defaults; mod free; mod macros; +#[cfg(feature = "serde")] +mod serde; + pub mod base; pub mod bin; pub mod json; @@ -121,6 +124,6 @@ pub mod other; pub use constants::{ CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, - CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_STATUS_UNKNOWN, CORE_RPC_VERSION, - CORE_RPC_VERSION_MAJOR, CORE_RPC_VERSION_MINOR, + CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_VERSION, CORE_RPC_VERSION_MAJOR, + CORE_RPC_VERSION_MINOR, }; diff --git a/rpc/types/src/macros.rs b/rpc/types/src/macros.rs index e130138..fa0d518 100644 --- a/rpc/types/src/macros.rs +++ b/rpc/types/src/macros.rs @@ -16,11 +16,11 @@ /// /// # Macro internals /// This macro uses: -/// - [`__define_request`] -/// - [`__define_response`] -/// - [`__define_request_and_response_doc`] +/// - [`define_request`] +/// - [`define_response`] +/// - [`define_request_and_response_doc`] /// -/// # `__define_request` +/// # `define_request` /// This macro has 2 branches. If the caller provides /// `Request {}`, i.e. no fields, it will generate: /// ``` @@ -34,7 +34,7 @@ /// means they are not compatible and it makes it cumbersome for end-users. /// Really, they semantically are empty types, so `()` is used. /// -/// # `__define_response` +/// # `define_response` /// This macro has 2 branches. If the caller provides `Response` /// it will generate a normal struct with no additional fields. /// @@ -86,8 +86,8 @@ macro_rules! define_request_and_response { )* } ) => { paste::paste! { - $crate::macros::__define_request! { - #[doc = $crate::macros::__define_request_and_response_doc!( + $crate::macros::define_request! { + #[doc = $crate::macros::define_request_and_response_doc!( "response" => [<$type_name Response>], $monero_daemon_rpc_doc_link, $monero_code_commit, @@ -110,12 +110,12 @@ macro_rules! define_request_and_response { } } - $crate::macros::__define_response! { + $crate::macros::define_response! { #[allow(dead_code)] #[allow(missing_docs)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] - #[doc = $crate::macros::__define_request_and_response_doc!( + #[doc = $crate::macros::define_request_and_response_doc!( "request" => [<$type_name Request>], $monero_daemon_rpc_doc_link, $monero_code_commit, @@ -145,9 +145,7 @@ pub(crate) use define_request_and_response; /// Define a request type. /// /// This is only used in [`define_request_and_response`], see it for docs. -/// -/// `__` is used to notate that this shouldn't be called directly. -macro_rules! __define_request { +macro_rules! define_request { //------------------------------------------------------------------------------ // This branch will generate a type alias to `()` if only given `{}` as input. ( @@ -206,15 +204,13 @@ macro_rules! __define_request { } }; } -pub(crate) use __define_request; +pub(crate) use define_request; //---------------------------------------------------------------------------------------------------- define_response /// Define a response type. /// -/// This is only used in [`define_request_and_response`], see it for docs. -/// -/// `__` is used to notate that this shouldn't be called directly. -macro_rules! __define_response { +/// This is used in [`define_request_and_response`], see it for docs. +macro_rules! define_response { //------------------------------------------------------------------------------ // This version of the macro expects the literal ident // `Response` => $response_type_name. @@ -228,7 +224,7 @@ macro_rules! __define_response { // The response type. Response => $t:ident { // And any fields. - // See [`__define_request`] for docs, this does the same thing. + // See [`define_request`] for docs, this does the same thing. $( $( #[$field_attr:meta] )* $field:ident: $field_type:ty @@ -265,7 +261,7 @@ macro_rules! __define_response { // The response base type => actual name of the struct $base:ty => $t:ident { // And any fields. - // See [`__define_request`] for docs, this does the same thing. + // See [`define_request`] for docs, this does the same thing. $( $( #[$field_attr:meta] )* $field:ident: $field_type:ty @@ -298,16 +294,14 @@ macro_rules! __define_response { } }; } -pub(crate) use __define_response; +pub(crate) use define_response; //---------------------------------------------------------------------------------------------------- define_request_and_response_doc /// Generate documentation for the types generated -/// by the [`__define_request_and_response`] macro. +/// by the [`define_request_and_response`] macro. /// /// See it for more info on inputs. -/// -/// `__` is used to notate that this shouldn't be called directly. -macro_rules! __define_request_and_response_doc { +macro_rules! define_request_and_response_doc { ( // This labels the last `[request]` or `[response]` // hyperlink in documentation. Input is either: @@ -351,7 +345,7 @@ macro_rules! __define_request_and_response_doc { ) }; } -pub(crate) use __define_request_and_response_doc; +pub(crate) use define_request_and_response_doc; //---------------------------------------------------------------------------------------------------- Macro /// Output a string link to `monerod` source code. diff --git a/rpc/types/src/misc/block_complete_entry.rs b/rpc/types/src/misc/block_complete_entry.rs deleted file mode 100644 index ca791b0..0000000 --- a/rpc/types/src/misc/block_complete_entry.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! TODO - -//---------------------------------------------------------------------------------------------------- Use -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -#[cfg(feature = "epee")] -use cuprate_epee_encoding::epee_object; - -use crate::misc::TxBlobEntry; - -//---------------------------------------------------------------------------------------------------- BlockCompleteEntry -#[doc = crate::macros::monero_definition_link!( - cc73fe71162d564ffda8e549b79a350bca53c454, - "rpc/core_rpc_server_commands_defs.h", - 210..=221 -)] -/// Used in [`crate::bin::GetBlocksResponse`]. -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct BlockCompleteEntry { - pub pruned: bool, - pub block: String, - pub block_weight: u64, - pub txs: Vec, -} - -// TODO: custom epee -// -#[cfg(feature = "epee")] -epee_object! { - BlockCompleteEntry, - pruned: bool, - block: String, - block_weight: u64, - txs: Vec, -} diff --git a/rpc/types/src/misc/distribution.rs b/rpc/types/src/misc/distribution.rs new file mode 100644 index 0000000..1a488d4 --- /dev/null +++ b/rpc/types/src/misc/distribution.rs @@ -0,0 +1,309 @@ +//! Output distributions for [`crate::json::GetOutputDistributionResponse`]. + +//---------------------------------------------------------------------------------------------------- Use +use std::mem::size_of; + +#[cfg(feature = "serde")] +use serde::{ser::SerializeStruct, Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + epee_object, error, + macros::bytes::{Buf, BufMut}, + read_epee_value, read_varint, write_field, write_varint, EpeeObject, EpeeObjectBuilder, + EpeeValue, Marker, +}; + +//---------------------------------------------------------------------------------------------------- Free +/// TODO: . +/// +/// Used for [`Distribution::CompressedBinary::distribution`]. +#[doc = crate::macros::monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 45..=55 +)] +#[cfg(feature = "epee")] +fn compress_integer_array(array: &[u64]) -> error::Result> { + todo!() +} + +/// TODO: . +/// +/// Used for [`Distribution::CompressedBinary::distribution`]. +#[doc = crate::macros::monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 57..=72 +)] +fn decompress_integer_array(array: &[u8]) -> Vec { + todo!() +} + +//---------------------------------------------------------------------------------------------------- Distribution +#[doc = crate::macros::monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 2468..=2508 +)] +/// Used in [`crate::json::GetOutputDistributionResponse`]. +/// +/// # Internals +/// This type's (de)serialization depends on `monerod`'s (de)serialization. +/// +/// During serialization: +/// [`Self::Uncompressed`] will emit: +/// - `compress: false` +/// +/// [`Self::CompressedBinary`] will emit: +/// - `binary: true` +/// - `compress: true` +/// +/// Upon deserialization, the presence of a `compressed_data` +/// field signifies that the [`Self::CompressedBinary`] should +/// be selected. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", serde(untagged))] +pub enum Distribution { + /// Distribution data will be (de)serialized as either JSON or binary (uncompressed). + Uncompressed(DistributionUncompressed), + /// Distribution data will be (de)serialized as compressed binary. + CompressedBinary(DistributionCompressedBinary), +} + +impl Default for Distribution { + fn default() -> Self { + Self::Uncompressed(DistributionUncompressed::default()) + } +} + +/// Data within [`Distribution::Uncompressed`]. +#[allow(dead_code, missing_docs)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct DistributionUncompressed { + pub start_height: u64, + pub base: u64, + /// TODO: this is a binary JSON string if `binary == true`. + pub distribution: Vec, + pub amount: u64, + pub binary: bool, +} + +#[cfg(feature = "epee")] +epee_object! { + DistributionUncompressed, + start_height: u64, + base: u64, + distribution: Vec, + amount: u64, + binary: bool, +} + +/// Data within [`Distribution::CompressedBinary`]. +#[allow(dead_code, missing_docs)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct DistributionCompressedBinary { + pub start_height: u64, + pub base: u64, + #[cfg_attr( + feature = "serde", + serde(serialize_with = "serialize_distribution_as_compressed_data") + )] + #[cfg_attr( + feature = "serde", + serde(deserialize_with = "deserialize_compressed_data_as_distribution") + )] + #[cfg_attr(feature = "serde", serde(rename = "compressed_data"))] + pub distribution: Vec, + pub amount: u64, +} + +#[cfg(feature = "epee")] +epee_object! { + DistributionCompressedBinary, + start_height: u64, + base: u64, + distribution: Vec, + amount: u64, +} + +/// Serializer function for [`DistributionCompressedBinary::distribution`]. +/// +/// 1. Compresses the distribution array +/// 2. Serializes the compressed data +#[cfg(feature = "serde")] +#[allow(clippy::ptr_arg)] +fn serialize_distribution_as_compressed_data(v: &Vec, s: S) -> Result +where + S: serde::Serializer, +{ + match compress_integer_array(v) { + Ok(compressed_data) => compressed_data.serialize(s), + Err(_) => Err(serde::ser::Error::custom( + "error compressing distribution array", + )), + } +} + +/// Deserializer function for [`DistributionCompressedBinary::distribution`]. +/// +/// 1. Deserializes as `compressed_data` field. +/// 2. Decompresses and returns the data +#[cfg(feature = "serde")] +fn deserialize_compressed_data_as_distribution<'de, D>(d: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + Vec::::deserialize(d).map(|v| decompress_integer_array(&v)) +} + +//---------------------------------------------------------------------------------------------------- Epee +#[cfg(feature = "epee")] +/// [`EpeeObjectBuilder`] for [`Distribution`]. +/// +/// Not for public usage. +#[allow(dead_code, missing_docs)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct __DistributionEpeeBuilder { + pub start_height: Option, + pub base: Option, + pub distribution: Option>, + pub amount: Option, + pub compressed_data: Option>, + pub binary: Option, + pub compress: Option, +} + +#[cfg(feature = "epee")] +impl EpeeObjectBuilder for __DistributionEpeeBuilder { + fn add_field(&mut self, name: &str, r: &mut B) -> error::Result { + macro_rules! read_epee_field { + ($($field:ident),*) => { + match name { + $( + stringify!($field) => { self.$field = Some(read_epee_value(r)?); }, + )* + _ => return Ok(false), + } + }; + } + + read_epee_field! { + start_height, + base, + amount, + binary, + compress, + compressed_data, + distribution + } + + Ok(true) + } + + fn finish(self) -> error::Result { + const ELSE: error::Error = error::Error::Format("Required field was not found!"); + + let start_height = self.start_height.ok_or(ELSE)?; + let base = self.base.ok_or(ELSE)?; + let amount = self.amount.ok_or(ELSE)?; + + let distribution = if let Some(compressed_data) = self.compressed_data { + let distribution = decompress_integer_array(&compressed_data); + Distribution::CompressedBinary(DistributionCompressedBinary { + start_height, + base, + distribution, + amount, + }) + } else if let Some(distribution) = self.distribution { + Distribution::Uncompressed(DistributionUncompressed { + binary: self.binary.ok_or(ELSE)?, + distribution, + start_height, + base, + amount, + }) + } else { + return Err(ELSE); + }; + + Ok(distribution) + } +} + +#[cfg(feature = "epee")] +impl EpeeObject for Distribution { + type Builder = __DistributionEpeeBuilder; + + fn number_of_fields(&self) -> u64 { + match self { + // Inner struct fields + `compress`. + Self::Uncompressed(s) => s.number_of_fields() + 1, + // Inner struct fields + `compress` + `binary`. + Self::CompressedBinary(s) => s.number_of_fields() + 2, + } + } + + fn write_fields(self, w: &mut B) -> error::Result<()> { + match self { + Self::Uncompressed(s) => { + s.write_fields(w)?; + write_field(false, "compress", w)?; + } + + Self::CompressedBinary(DistributionCompressedBinary { + start_height, + base, + distribution, + amount, + }) => { + let compressed_data = compress_integer_array(&distribution)?; + + start_height.write(w)?; + base.write(w)?; + compressed_data.write(w)?; + amount.write(w)?; + + write_field(true, "binary", w)?; + write_field(true, "compress", w)?; + } + } + + Ok(()) + } +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod tests { + use pretty_assertions::assert_eq; + + use super::*; + + // TODO: re-enable tests after (de)compression functions are implemented. + + // /// Tests that [`compress_integer_array`] outputs as expected. + // #[test] + // fn compress() { + // let varints = &[16_384, 16_383, 16_382, 16_381]; + // let bytes = compress_integer_array(varints).unwrap(); + + // let expected = [2, 0, 1, 0, 253, 255, 249, 255, 245, 255]; + // assert_eq!(expected, *bytes); + // } + + // /// Tests that [`decompress_integer_array`] outputs as expected. + // #[test] + // fn decompress() { + // let bytes = &[2, 0, 1, 0, 253, 255, 249, 255, 245, 255]; + // let varints = decompress_integer_array(bytes); + + // let expected = vec![16_384, 16_383, 16_382, 16_381]; + // assert_eq!(expected, varints); + // } +} diff --git a/rpc/types/src/misc/key_image_spent_status.rs b/rpc/types/src/misc/key_image_spent_status.rs index d075e64..4b2eb53 100644 --- a/rpc/types/src/misc/key_image_spent_status.rs +++ b/rpc/types/src/misc/key_image_spent_status.rs @@ -6,6 +6,7 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::{ + error, macros::bytes::{Buf, BufMut}, EpeeValue, Marker, }; @@ -17,7 +18,7 @@ use cuprate_epee_encoding::{ 456..=460 )] /// Used in [`crate::other::IsKeyImageSpentResponse`]. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[repr(u8)] pub enum KeyImageSpentStatus { @@ -26,23 +27,59 @@ pub enum KeyImageSpentStatus { SpentInPool = 2, } -#[cfg(feature = "epee")] -impl EpeeValue for KeyImageSpentStatus { - const MARKER: Marker = ::MARKER; - - fn read(r: &mut B, marker: &Marker) -> cuprate_epee_encoding::Result { - todo!() +impl KeyImageSpentStatus { + /// Convert [`Self`] to a [`u8`]. + /// + /// ```rust + /// use cuprate_rpc_types::misc::KeyImageSpentStatus as K; + /// + /// assert_eq!(K::Unspent.to_u8(), 0); + /// assert_eq!(K::SpentInBlockchain.to_u8(), 1); + /// assert_eq!(K::SpentInPool.to_u8(), 2); + /// ``` + pub const fn to_u8(self) -> u8 { + match self { + Self::Unspent => 0, + Self::SpentInBlockchain => 1, + Self::SpentInPool => 2, + } } - fn should_write(&self) -> bool { - todo!() - } - - fn epee_default_value() -> Option { - todo!() - } - - fn write(self, w: &mut B) -> cuprate_epee_encoding::Result<()> { - todo!() + /// Convert a [`u8`] to a [`Self`]. + /// + /// # Errors + /// This returns [`None`] if `u > 2`. + /// + /// ```rust + /// use cuprate_rpc_types::misc::KeyImageSpentStatus as K; + /// + /// assert_eq!(K::from_u8(0), Some(K::Unspent)); + /// assert_eq!(K::from_u8(1), Some(K::SpentInBlockchain)); + /// assert_eq!(K::from_u8(2), Some(K::SpentInPool)); + /// assert_eq!(K::from_u8(3), None); + /// ``` + pub const fn from_u8(u: u8) -> Option { + Some(match u { + 0 => Self::Unspent, + 1 => Self::SpentInBlockchain, + 2 => Self::SpentInPool, + _ => return None, + }) + } +} + +#[cfg(feature = "epee")] +impl EpeeValue for KeyImageSpentStatus { + const MARKER: Marker = u8::MARKER; + + fn read(r: &mut B, marker: &Marker) -> error::Result { + let u = u8::read(r, marker)?; + Self::from_u8(u).ok_or(error::Error::Format("u8 was greater than 2")) + } + + fn write(self, w: &mut B) -> error::Result<()> { + let u = self.to_u8(); + u8::write(u, w)?; + Ok(()) } } diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 31719a3..4643ecc 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -20,7 +20,7 @@ use cuprate_epee_encoding::{ use crate::{ constants::{ CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, - CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_STATUS_UNKNOWN, + CORE_RPC_STATUS_PAYMENT_REQUIRED, }, defaults::default_zero, macros::monero_definition_link, @@ -352,19 +352,6 @@ define_struct_and_impl_epee! { } } -define_struct_and_impl_epee! { - #[doc = monero_definition_link!( - cc73fe71162d564ffda8e549b79a350bca53c454, - "cryptonote_protocol/cryptonote_protocol_defs.h", - 121..=131 - )] - /// Used in [`crate::bin::GetBlocksResponse`]. - TxBlobEntry { - blob: String, - prunable_hash: [u8; 32], - } -} - define_struct_and_impl_epee! { #[doc = monero_definition_link!( cc73fe71162d564ffda8e549b79a350bca53c454, diff --git a/rpc/types/src/misc/mod.rs b/rpc/types/src/misc/mod.rs index 31dba35..bd6454d 100644 --- a/rpc/types/src/misc/mod.rs +++ b/rpc/types/src/misc/mod.rs @@ -13,7 +13,7 @@ //---------------------------------------------------------------------------------------------------- Mod mod binary_string; -mod block_complete_entry; +mod distribution; mod key_image_spent_status; mod misc; mod pool_info_extent; @@ -21,13 +21,13 @@ mod status; mod tx_entry; pub use binary_string::BinaryString; -pub use block_complete_entry::BlockCompleteEntry; +pub use distribution::{Distribution, DistributionCompressedBinary, DistributionUncompressed}; pub use key_image_spent_status::KeyImageSpentStatus; pub use misc::{ AuxPow, BlockHeader, BlockOutputIndices, ChainInfo, ConnectionInfo, GetBan, GetMinerDataTxBacklogEntry, GetOutputsOut, HardforkEntry, HistogramEntry, OutKey, OutKeyBin, OutputDistributionData, Peer, PoolTxInfo, PublicNode, SetBan, Span, SpentKeyImageInfo, - SyncInfoPeer, TxBacklogEntry, TxBlobEntry, TxInfo, TxOutputIndices, TxpoolHisto, TxpoolStats, + SyncInfoPeer, TxBacklogEntry, TxInfo, TxOutputIndices, TxpoolHisto, TxpoolStats, }; pub use pool_info_extent::PoolInfoExtent; pub use status::Status; diff --git a/rpc/types/src/misc/pool_info_extent.rs b/rpc/types/src/misc/pool_info_extent.rs index 09b6c96..6372cd6 100644 --- a/rpc/types/src/misc/pool_info_extent.rs +++ b/rpc/types/src/misc/pool_info_extent.rs @@ -6,6 +6,7 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::{ + error, macros::bytes::{Buf, BufMut}, EpeeValue, Marker, }; @@ -17,33 +18,69 @@ use cuprate_epee_encoding::{ 223..=228 )] /// Used in [`crate::bin::GetBlocksResponse`]. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[repr(u8)] pub enum PoolInfoExtent { + #[default] None = 0, Incremental = 1, Full = 2, } -// -#[cfg(feature = "epee")] -impl EpeeValue for PoolInfoExtent { - const MARKER: Marker = ::MARKER; - - fn read(r: &mut B, marker: &Marker) -> cuprate_epee_encoding::Result { - todo!() +impl PoolInfoExtent { + /// Convert [`Self`] to a [`u8`]. + /// + /// ```rust + /// use cuprate_rpc_types::misc::PoolInfoExtent as P; + /// + /// assert_eq!(P::None.to_u8(), 0); + /// assert_eq!(P::Incremental.to_u8(), 1); + /// assert_eq!(P::Full.to_u8(), 2); + /// ``` + pub const fn to_u8(self) -> u8 { + match self { + Self::None => 0, + Self::Incremental => 1, + Self::Full => 2, + } } - fn should_write(&self) -> bool { - todo!() - } - - fn epee_default_value() -> Option { - todo!() - } - - fn write(self, w: &mut B) -> cuprate_epee_encoding::Result<()> { - todo!() + /// Convert a [`u8`] to a [`Self`]. + /// + /// # Errors + /// This returns [`None`] if `u > 2`. + /// + /// ```rust + /// use cuprate_rpc_types::misc::PoolInfoExtent as P; + /// + /// assert_eq!(P::from_u8(0), Some(P::None)); + /// assert_eq!(P::from_u8(1), Some(P::Incremental)); + /// assert_eq!(P::from_u8(2), Some(P::Full)); + /// assert_eq!(P::from_u8(3), None); + /// ``` + pub const fn from_u8(u: u8) -> Option { + Some(match u { + 0 => Self::None, + 1 => Self::Incremental, + 2 => Self::Full, + _ => return None, + }) + } +} + +#[cfg(feature = "epee")] +impl EpeeValue for PoolInfoExtent { + const MARKER: Marker = ::MARKER; + + fn read(r: &mut B, marker: &Marker) -> error::Result { + let u = u8::read(r, marker)?; + Self::from_u8(u).ok_or(error::Error::Format("u8 was greater than 2")) + } + + fn write(self, w: &mut B) -> error::Result<()> { + let u = self.to_u8(); + u8::write(u, w)?; + Ok(()) } } diff --git a/rpc/types/src/misc/status.rs b/rpc/types/src/misc/status.rs index f2dff1a..79725cf 100644 --- a/rpc/types/src/misc/status.rs +++ b/rpc/types/src/misc/status.rs @@ -14,7 +14,7 @@ use cuprate_epee_encoding::{ use crate::constants::{ CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, - CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_STATUS_UNKNOWN, + CORE_RPC_STATUS_PAYMENT_REQUIRED, }; //---------------------------------------------------------------------------------------------------- Status @@ -33,37 +33,37 @@ use crate::constants::{ /// use cuprate_rpc_types::{ /// misc::Status, /// CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, -/// CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_STATUS_UNKNOWN +/// CORE_RPC_STATUS_PAYMENT_REQUIRED /// }; /// use serde_json::to_string; /// -/// let unknown = Status::Unknown; +/// let other = Status::Other("OTHER".into()); /// /// assert_eq!(to_string(&Status::Ok).unwrap(), r#""OK""#); /// assert_eq!(to_string(&Status::Busy).unwrap(), r#""BUSY""#); /// assert_eq!(to_string(&Status::NotMining).unwrap(), r#""NOT MINING""#); /// assert_eq!(to_string(&Status::PaymentRequired).unwrap(), r#""PAYMENT REQUIRED""#); -/// assert_eq!(to_string(&unknown).unwrap(), r#""UNKNOWN""#); +/// assert_eq!(to_string(&other).unwrap(), r#""OTHER""#); /// /// assert_eq!(Status::Ok.as_ref(), CORE_RPC_STATUS_OK); /// assert_eq!(Status::Busy.as_ref(), CORE_RPC_STATUS_BUSY); /// assert_eq!(Status::NotMining.as_ref(), CORE_RPC_STATUS_NOT_MINING); /// assert_eq!(Status::PaymentRequired.as_ref(), CORE_RPC_STATUS_PAYMENT_REQUIRED); -/// assert_eq!(unknown.as_ref(), CORE_RPC_STATUS_UNKNOWN); +/// assert_eq!(other.as_ref(), "OTHER"); /// /// assert_eq!(format!("{}", Status::Ok), CORE_RPC_STATUS_OK); /// assert_eq!(format!("{}", Status::Busy), CORE_RPC_STATUS_BUSY); /// assert_eq!(format!("{}", Status::NotMining), CORE_RPC_STATUS_NOT_MINING); /// assert_eq!(format!("{}", Status::PaymentRequired), CORE_RPC_STATUS_PAYMENT_REQUIRED); -/// assert_eq!(format!("{}", unknown), CORE_RPC_STATUS_UNKNOWN); +/// assert_eq!(format!("{}", other), "OTHER"); /// /// assert_eq!(format!("{:?}", Status::Ok), "Ok"); /// assert_eq!(format!("{:?}", Status::Busy), "Busy"); /// assert_eq!(format!("{:?}", Status::NotMining), "NotMining"); /// assert_eq!(format!("{:?}", Status::PaymentRequired), "PaymentRequired"); -/// assert_eq!(format!("{:?}", unknown), "Unknown"); +/// assert_eq!(format!("{:?}", other), r#"Other("OTHER")"#); /// ``` -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum Status { // FIXME: @@ -86,17 +86,12 @@ pub enum Status { #[cfg_attr(feature = "serde", serde(rename = "PAYMENT REQUIRED"))] PaymentRequired, - /// Some unknown other string; [`CORE_RPC_STATUS_UNKNOWN`]. + /// Some unknown other string. /// - /// This exists to act as a catch-all if `monerod` adds - /// a string and a Cuprate node hasn't updated yet. - /// - /// The reason this isn't `Unknown(String)` is because that - /// disallows [`Status`] to be [`Copy`], and thus other types - /// that contain it. - #[cfg_attr(feature = "serde", serde(other))] - #[cfg_attr(feature = "serde", serde(rename = "UNKNOWN"))] - Unknown, + /// This exists to act as a catch-all for all of + /// `monerod`'s other strings it puts in the `status` field. + #[cfg_attr(feature = "serde", serde(rename = "OTHER"), serde(untagged))] + Other(String), } impl From for Status { @@ -106,7 +101,7 @@ impl From for Status { CORE_RPC_STATUS_BUSY => Self::Busy, CORE_RPC_STATUS_NOT_MINING => Self::NotMining, CORE_RPC_STATUS_PAYMENT_REQUIRED => Self::PaymentRequired, - _ => Self::Unknown, + _ => Self::Other(s), } } } @@ -118,7 +113,7 @@ impl AsRef for Status { Self::Busy => CORE_RPC_STATUS_BUSY, Self::NotMining => CORE_RPC_STATUS_NOT_MINING, Self::PaymentRequired => CORE_RPC_STATUS_PAYMENT_REQUIRED, - Self::Unknown => CORE_RPC_STATUS_UNKNOWN, + Self::Other(s) => s, } } } @@ -150,7 +145,7 @@ impl EpeeValue for Status { fn epee_default_value() -> Option { // - Some(Self::Unknown) + Some(Self::Other(String::new())) } fn write(self, w: &mut B) -> cuprate_epee_encoding::Result<()> { @@ -172,11 +167,11 @@ mod test { Status::Busy, Status::NotMining, Status::PaymentRequired, - Status::Unknown, + Status::Other(String::new()), ] { let mut buf = vec![]; - ::write(status, &mut buf).unwrap(); + ::write(status.clone(), &mut buf).unwrap(); let status2 = ::read(&mut buf.as_slice(), &::MARKER) .unwrap(); diff --git a/rpc/types/src/misc/tx_entry.rs b/rpc/types/src/misc/tx_entry.rs index 70fbdff..e643076 100644 --- a/rpc/types/src/misc/tx_entry.rs +++ b/rpc/types/src/misc/tx_entry.rs @@ -2,13 +2,15 @@ //---------------------------------------------------------------------------------------------------- Use #[cfg(feature = "serde")] +use crate::serde::{serde_false, serde_true}; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::{ - epee_object, + epee_object, error, macros::bytes::{Buf, BufMut}, - EpeeValue, Marker, + read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue, Marker, }; //---------------------------------------------------------------------------------------------------- TxEntry @@ -18,42 +20,127 @@ use cuprate_epee_encoding::{ 389..=428 )] /// Used in [`crate::other::GetTransactionsResponse`]. -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +/// +/// # Epee +/// This type is only used in a JSON endpoint, so the +/// epee implementation on this type only panics. +/// +/// It is only implemented to satisfy the RPC type generator +/// macro, which requires all objects to be serde + epee. +/// +/// # Example +/// ```rust +/// use cuprate_rpc_types::misc::*; +/// use serde_json::{json, from_value}; +/// +/// let json = json!({ +/// "as_hex": String::default(), +/// "as_json": String::default(), +/// "block_height": u64::default(), +/// "block_timestamp": u64::default(), +/// "confirmations": u64::default(), +/// "double_spend_seen": bool::default(), +/// "output_indices": Vec::::default(), +/// "prunable_as_hex": String::default(), +/// "prunable_hash": String::default(), +/// "pruned_as_hex": String::default(), +/// "tx_hash": String::default(), +/// "in_pool": bool::default(), +/// }); +/// let tx_entry = from_value::(json).unwrap(); +/// assert!(matches!(tx_entry, TxEntry::InPool { .. })); +/// +/// let json = json!({ +/// "as_hex": String::default(), +/// "as_json": String::default(), +/// "double_spend_seen": bool::default(), +/// "prunable_as_hex": String::default(), +/// "prunable_hash": String::default(), +/// "pruned_as_hex": String::default(), +/// "received_timestamp": u64::default(), +/// "relayed": bool::default(), +/// "tx_hash": String::default(), +/// "in_pool": bool::default(), +/// }); +/// let tx_entry = from_value::(json).unwrap(); +/// assert!(matches!(tx_entry, TxEntry::NotInPool { .. })); +/// ``` +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct TxEntry { - pub as_hex: String, - pub as_json: String, - pub block_height: u64, - pub block_timestamp: u64, - pub confirmations: u64, - pub double_spend_seen: bool, - pub in_pool: bool, - pub output_indices: Vec, - pub prunable_as_hex: String, - pub prunable_hash: String, - pub pruned_as_hex: String, - pub received_timestamp: u64, - pub relayed: bool, - pub tx_hash: String, +#[cfg_attr(feature = "serde", serde(untagged))] +pub enum TxEntry { + /// This entry exists in the transaction pool. + InPool { + as_hex: String, + as_json: String, + block_height: u64, + block_timestamp: u64, + confirmations: u64, + double_spend_seen: bool, + output_indices: Vec, + prunable_as_hex: String, + prunable_hash: String, + pruned_as_hex: String, + tx_hash: String, + #[cfg_attr(feature = "serde", serde(serialize_with = "serde_true"))] + /// Will always be serialized as `true`. + in_pool: bool, + }, + /// This entry _does not_ exist in the transaction pool. + NotInPool { + as_hex: String, + as_json: String, + double_spend_seen: bool, + prunable_as_hex: String, + prunable_hash: String, + pruned_as_hex: String, + received_timestamp: u64, + relayed: bool, + tx_hash: String, + #[cfg_attr(feature = "serde", serde(serialize_with = "serde_false"))] + /// Will always be serialized as `false`. + in_pool: bool, + }, } -// TODO: custom epee -// -#[cfg(feature = "epee")] -epee_object! { - TxEntry, - as_hex: String, - as_json: String, // TODO: should be its own struct - block_height: u64, - block_timestamp: u64, - confirmations: u64, - double_spend_seen: bool, - in_pool: bool, - output_indices: Vec, - prunable_as_hex: String, - prunable_hash: String, - pruned_as_hex: String, - received_timestamp: u64, - relayed: bool, - tx_hash: String, +impl Default for TxEntry { + fn default() -> Self { + Self::NotInPool { + as_hex: String::default(), + as_json: String::default(), + double_spend_seen: bool::default(), + prunable_as_hex: String::default(), + prunable_hash: String::default(), + pruned_as_hex: String::default(), + received_timestamp: u64::default(), + relayed: bool::default(), + tx_hash: String::default(), + in_pool: false, + } + } +} + +//---------------------------------------------------------------------------------------------------- Epee +#[cfg(feature = "epee")] +impl EpeeObjectBuilder for () { + fn add_field(&mut self, name: &str, r: &mut B) -> error::Result { + unreachable!() + } + + fn finish(self) -> error::Result { + unreachable!() + } +} + +#[cfg(feature = "epee")] +impl EpeeObject for TxEntry { + type Builder = (); + + fn number_of_fields(&self) -> u64 { + unreachable!() + } + + fn write_fields(self, w: &mut B) -> error::Result<()> { + unreachable!() + } } diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 5ad2caa..41530cb 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -8,12 +8,12 @@ use crate::{ defaults::{default_false, default_string, default_true}, macros::define_request_and_response, misc::{ - GetOutputsOut, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, TxEntry, TxInfo, - TxpoolStats, + GetOutputsOut, KeyImageSpentStatus, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, + TxEntry, TxInfo, TxpoolStats, }, }; -//---------------------------------------------------------------------------------------------------- TODO +//---------------------------------------------------------------------------------------------------- Definitions define_request_and_response! { get_height, cc73fe71162d564ffda8e549b79a350bca53c454 => @@ -68,7 +68,8 @@ define_request_and_response! { key_images: Vec, }, AccessResponseBase { - spent_status: Vec, // TODO: should be `KeyImageSpentStatus`. + /// FIXME: These are [`KeyImageSpentStatus`] in [`u8`] form. + spent_status: Vec, } } @@ -219,7 +220,6 @@ define_request_and_response! { password: String, proxy: String, }, - #[derive(Copy)] Response { status: Status, } diff --git a/rpc/types/src/serde.rs b/rpc/types/src/serde.rs new file mode 100644 index 0000000..70885e0 --- /dev/null +++ b/rpc/types/src/serde.rs @@ -0,0 +1,32 @@ +//! Custom (de)serialization functions for serde. + +//---------------------------------------------------------------------------------------------------- Lints +#![allow(clippy::trivially_copy_pass_by_ref)] // serde fn signature + +//---------------------------------------------------------------------------------------------------- Import +use serde::Serializer; + +//---------------------------------------------------------------------------------------------------- Free functions +/// Always serializes `true`. +#[inline] +pub(crate) fn serde_true(_: &bool, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_bool(true) +} + +/// Always serializes `false`. +#[inline] +pub(crate) fn serde_false(_: &bool, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_bool(false) +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + use super::*; +} diff --git a/types/README.md b/types/README.md index 4023e9f..876931f 100644 --- a/types/README.md +++ b/types/README.md @@ -8,4 +8,4 @@ This crate is a kitchen-sink for data types that are shared across Cuprate. |--------------|-----------| | `blockchain` | Enables the `blockchain` module, containing the blockchain database request/response types | `serde` | Enables `serde` on types where applicable -| `epee` | Enables `cuprate-epee-encoding` on types where applicable \ No newline at end of file +| `epee` | Enables `cuprate-epee-encoding` on types where applicable From 7416164b198ecbfda88e7a37ef381cc1a6011ac2 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 25 Jul 2024 17:47:39 -0400 Subject: [PATCH 023/104] workspace: fix lint error (#234) * cargo.toml: split `keyword_idents` lint * dandelion-tower: fix doc * fix doc/clippy --- Cargo.toml | 3 ++- helper/src/time.rs | 2 +- p2p/dandelion-tower/src/pool.rs | 2 +- p2p/p2p/src/block_downloader.rs | 2 +- p2p/p2p/src/block_downloader/block_queue.rs | 1 + 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9b090ba..22a1585 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -275,7 +275,8 @@ clone_on_ref_ptr = "deny" # Cold absolute_paths_not_starting_with_crate = "deny" explicit_outlives_requirements = "deny" -keyword_idents = "deny" +keyword_idents_2018 = "deny" +keyword_idents_2024 = "deny" missing_abi = "deny" non_ascii_idents = "deny" non_local_definitions = "deny" diff --git a/helper/src/time.rs b/helper/src/time.rs index 7bc155f..28aff7f 100644 --- a/helper/src/time.rs +++ b/helper/src/time.rs @@ -55,7 +55,7 @@ pub const fn unix_clock(seconds_after_unix_epoch: u64) -> u32 { /// - The seconds returned is guaranteed to be `0..=59` /// - The minutes returned is guaranteed to be `0..=59` /// - The hours returned can be over `23`, as this is not a clock function, -/// see [`secs_to_clock`] for clock-like behavior that wraps around on `24` +/// see [`secs_to_clock`] for clock-like behavior that wraps around on `24` /// /// ```rust /// # use cuprate_helper::time::*; diff --git a/p2p/dandelion-tower/src/pool.rs b/p2p/dandelion-tower/src/pool.rs index 68f7945..5f4f734 100644 --- a/p2p/dandelion-tower/src/pool.rs +++ b/p2p/dandelion-tower/src/pool.rs @@ -51,7 +51,7 @@ use crate::{ /// /// - `buffer_size` is the size of the channel's buffer between the [`DandelionPoolService`] and [`DandelionPool`]. /// - `dandelion_router` is the router service, kept generic instead of [`DandelionRouter`](crate::DandelionRouter) to allow -/// user to customise routing functionality. +/// user to customise routing functionality. /// - `backing_pool` is the backing transaction storage service /// - `config` is [`DandelionConfig`]. pub fn start_dandelion_pool( diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index 81640e9..5f53054 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -194,7 +194,7 @@ where /// - download the next batch of blocks /// - request the next chain entry /// - download an already requested batch of blocks (this might happen due to an error in the previous request -/// or because the queue of ready blocks is too large, so we need the oldest block to clear it). +/// or because the queue of ready blocks is too large, so we need the oldest block to clear it). struct BlockDownloader { /// The client pool. client_pool: Arc>, diff --git a/p2p/p2p/src/block_downloader/block_queue.rs b/p2p/p2p/src/block_downloader/block_queue.rs index d846c22..708eb3e 100644 --- a/p2p/p2p/src/block_downloader/block_queue.rs +++ b/p2p/p2p/src/block_downloader/block_queue.rs @@ -140,6 +140,7 @@ mod tests { proptest! { #[test] + #[allow(clippy::mutable_key_type)] fn block_queue_returns_items_in_order(batches in vec(ready_batch_strategy(), 0..10_000)) { block_on(async move { let (buffer_tx, mut buffer_rx) = cuprate_async_buffer::new_buffer(usize::MAX); From a2bca1b889051d3312c252803f8917f72c629e30 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Sun, 28 Jul 2024 16:43:16 -0400 Subject: [PATCH 024/104] rpc: add JSON doc-tests (#232) * fixed-bytes: add `serde`, document feature flags * fixed-bytes: add derives * rpc: add `as _` syntax to macro * rpc: use `ByteArrayVec` and `ContainerAsBlob` for binary types * fixed-bytes: re-add derives * rpc-types: dedup default value within macro * readme: fixed bytes section * types: custom epee - `BlockCompleteEntry` * types: custom epee - `KeyImageSpentStatus` * types: custom epee - `PoolInfoExtent` * types: add `Status::Other(String)` variant * types: custom epee - `TxEntry`, add `read_epee_field` macro * bin: custom epee - `GetBlocks` * types: add `serde.rs` * misc: make `TxEntry` an `enum`, impl serde * misc: `unimplemented!()` for `TxEntry`'s epee * types: add `BlockCompleteEntry` * rpc: replace `BlockCompleteEntry` with `cuprate-types` * types: document `BlockCompleteEntry` * bin: fix `number_of_fields` for `GetBlocksResponse` * misc: add `Distribution` * distribution: add todo * misc fixes * readme: add `(De)serialization invariants` * distribution: compress variants * types: add `block_complete_entry.rs` * net: fix imports * p2p: fix imports * turn off default-features * p2p: fix imports * misc fixes * Update net/wire/Cargo.toml Co-authored-by: Boog900 * distribution: module doc * wire: re-export types * test-utils: add `crate::rpc::types` module * test-utils: conditional json doc-tests * bin: use enum for `GetBlocksResponse` * misc: use lowercase for stringify * json: add test data, fix macro doc tests * json: add all data * other: add all data * bin: add skeleton * docs * move type to correct file * remove duplicated fields for custom epee * rpc: `client/{client,constants}.rs` -> `client.rs` * lib.rs: remove `clippy::module_inception` * macros: add json doc test macro * json: add some tests * json: add doc-test for all types * add all other JSON doc-tests * move doc-test macros to files * base: add doc-tests * json: TODO distribution test --------- Co-authored-by: Boog900 --- Cargo.lock | 2 + rpc/types/Cargo.toml | 5 +- rpc/types/src/base.rs | 126 ++++ rpc/types/src/defaults.rs | 6 + rpc/types/src/json.rs | 990 +++++++++++++++++++++++++++++-- rpc/types/src/misc/misc.rs | 10 +- rpc/types/src/other.rs | 586 +++++++++++++++++- test-utils/src/rpc/data/json.rs | 10 +- test-utils/src/rpc/data/other.rs | 43 +- typos.toml | 1 + 10 files changed, 1677 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ac29662..55f17a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -764,6 +764,8 @@ version = "0.0.0" dependencies = [ "cuprate-epee-encoding", "cuprate-fixed-bytes", + "cuprate-json-rpc", + "cuprate-test-utils", "cuprate-types", "monero-serai", "paste", diff --git a/rpc/types/Cargo.toml b/rpc/types/Cargo.toml index fcec453..9c99681 100644 --- a/rpc/types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -23,5 +23,8 @@ paste = { workspace = true } serde = { workspace = true, optional = true } [dev-dependencies] +cuprate-test-utils = { path = "../../test-utils" } +cuprate-json-rpc = { path = "../json-rpc" } + serde_json = { workspace = true } -pretty_assertions = { workspace = true } +pretty_assertions = { workspace = true } \ No newline at end of file diff --git a/rpc/types/src/base.rs b/rpc/types/src/base.rs index 4990cdd..c131e41 100644 --- a/rpc/types/src/base.rs +++ b/rpc/types/src/base.rs @@ -57,6 +57,64 @@ pub struct ResponseBase { pub untrusted: bool, } +impl ResponseBase { + /// `const` version of [`Default::default`]. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let new = ResponseBase::new(); + /// assert_eq!(new, ResponseBase { + /// status: Status::Ok, + /// untrusted: false, + /// }); + /// ``` + pub const fn new() -> Self { + Self { + status: Status::Ok, + untrusted: false, + } + } + + /// Returns OK and trusted [`Self`]. + /// + /// This is the most common version of [`Self`]. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let ok = ResponseBase::ok(); + /// assert_eq!(ok, ResponseBase { + /// status: Status::Ok, + /// untrusted: false, + /// }); + /// ``` + pub const fn ok() -> Self { + Self { + status: Status::Ok, + untrusted: false, + } + } + + /// Same as [`Self::ok`] but with [`Self::untrusted`] set to `true`. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let ok_untrusted = ResponseBase::ok_untrusted(); + /// assert_eq!(ok_untrusted, ResponseBase { + /// status: Status::Ok, + /// untrusted: true, + /// }); + /// ``` + pub const fn ok_untrusted() -> Self { + Self { + status: Status::Ok, + untrusted: true, + } + } +} + #[cfg(feature = "epee")] epee_object! { ResponseBase, @@ -80,6 +138,74 @@ pub struct AccessResponseBase { pub top_hash: String, } +impl AccessResponseBase { + /// Creates a new [`Self`] with default values. + /// + /// Since RPC payment is semi-deprecated, [`Self::credits`] + /// and [`Self::top_hash`] will always be set to the default + /// values. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let new = AccessResponseBase::new(ResponseBase::ok()); + /// assert_eq!(new, AccessResponseBase { + /// response_base: ResponseBase::ok(), + /// credits: 0, + /// top_hash: "".into(), + /// }); + /// ``` + pub const fn new(response_base: ResponseBase) -> Self { + Self { + response_base, + credits: 0, + top_hash: String::new(), + } + } + + /// Returns OK and trusted [`Self`]. + /// + /// This is the most common version of [`Self`]. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let ok = AccessResponseBase::ok(); + /// assert_eq!(ok, AccessResponseBase { + /// response_base: ResponseBase::ok(), + /// credits: 0, + /// top_hash: "".into(), + /// }); + /// ``` + pub const fn ok() -> Self { + Self { + response_base: ResponseBase::ok(), + credits: 0, + top_hash: String::new(), + } + } + + /// Same as [`Self::ok`] but with `untrusted` set to `true`. + /// + /// ```rust + /// use cuprate_rpc_types::{misc::*, base::*}; + /// + /// let ok_untrusted = AccessResponseBase::ok_untrusted(); + /// assert_eq!(ok_untrusted, AccessResponseBase { + /// response_base: ResponseBase::ok_untrusted(), + /// credits: 0, + /// top_hash: "".into(), + /// }); + /// ``` + pub const fn ok_untrusted() -> Self { + Self { + response_base: ResponseBase::ok_untrusted(), + credits: 0, + top_hash: String::new(), + } + } +} + #[cfg(feature = "epee")] epee_object! { AccessResponseBase, diff --git a/rpc/types/src/defaults.rs b/rpc/types/src/defaults.rs index 9366a26..6addd0a 100644 --- a/rpc/types/src/defaults.rs +++ b/rpc/types/src/defaults.rs @@ -53,6 +53,12 @@ pub(crate) fn default_zero>() -> T { T::from(0) } +/// Default `1` value used in request/response types. +#[inline] +pub(crate) fn default_one>() -> T { + T::from(1) +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index f4bca99..dd2e648 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -5,15 +5,78 @@ //---------------------------------------------------------------------------------------------------- Import use crate::{ base::{AccessResponseBase, ResponseBase}, - defaults::{default_false, default_height, default_string, default_vec, default_zero}, + defaults::{ + default_false, default_height, default_one, default_string, default_true, default_vec, + default_zero, + }, free::{is_one, is_zero}, macros::define_request_and_response, misc::{ - AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan, HardforkEntry, - HistogramEntry, OutputDistributionData, SetBan, Span, Status, SyncInfoPeer, TxBacklogEntry, + AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan, + GetMinerDataTxBacklogEntry, HardforkEntry, HistogramEntry, OutputDistributionData, SetBan, + Span, Status, SyncInfoPeer, TxBacklogEntry, }, }; +//---------------------------------------------------------------------------------------------------- Macro +/// Adds a (de)serialization doc-test to a type in `json.rs`. +/// +/// It expects a const string from `cuprate_test_utils::rpc::data` +/// and the expected value it should (de)serialize into/from. +/// +/// It tests that the provided const JSON string can properly +/// (de)serialize into the expected value. +/// +/// See below for example usage. This macro is only used in this file. +macro_rules! serde_doc_test { + ( + // `const` string from `cuprate_test_utils::rpc::data` + // v + $cuprate_test_utils_rpc_const:ident => $expected:expr + // ^ + // Expected value as an expression + ) => { + paste::paste! { + concat!( + "```rust\n", + "use cuprate_test_utils::rpc::data::json::*;\n", + "use cuprate_rpc_types::{misc::*, base::*, json::*};\n", + "use serde_json::{Value, from_str, from_value};\n", + "\n", + "// The expected data.\n", + "let expected = ", + stringify!($expected), + ";\n", + "\n", + "// Assert it can be turned into a JSON value.\n", + "let value = from_str::(", + stringify!($cuprate_test_utils_rpc_const), + ").unwrap();\n", + "let Value::Object(map) = value else {\n", + " panic!();\n", + "};\n", + "\n", + "// If a request...\n", + "if let Some(params) = map.get(\"params\") {\n", + " let response = from_value::<", + stringify!([<$cuprate_test_utils_rpc_const:camel>]), + ">(params.clone()).unwrap();\n", + " assert_eq!(response, expected);\n", + " return;\n", + "}\n", + "\n", + "// Else, if a response...\n", + "let result = map.get(\"result\").unwrap().clone();\n", + "let response = from_value::<", + stringify!([<$cuprate_test_utils_rpc_const:camel>]), + ">(result.clone()).unwrap();\n", + "assert_eq!(response, expected);\n", + "```\n", + ) + } + }; +} + //---------------------------------------------------------------------------------------------------- Definitions // This generates 2 structs: // @@ -41,7 +104,22 @@ define_request_and_response! { // // If there are any additional attributes (`/// docs` or `#[derive]`s) // for the struct, they go here, e.g.: - // #[derive(Copy)] + // + #[doc = serde_doc_test!( + // ^ This is a macro that adds a doc-test to this type. + // It is optional but it is added to nearly all types. + // The syntax is: + // `$const` => `$expected` + // where `$const` is a `const` string from + // `cuprate_test_utils::rpc::data` and `$expected` is an + // actual expression that the string _should_ (de)serialize into/from. + GET_BLOCK_TEMPLATE_REQUEST => GetBlockTemplateRequest { + extra_nonce: String::default(), + prev_block: String::default(), + reserve_size: 60, + wallet_address: "44GBHzv6ZyQdJkjqZje6KLZ3xSyN1hBSFAnLP6EAqJtCRVzMzZmeXTC2AHKDS9aEDTRKmo6a6o9r9j86pYfhCWDkKjbtcns".into(), + } + )] Request { // Within the `{}` is an infinite matching pattern of: // ``` @@ -66,17 +144,16 @@ define_request_and_response! { // // This is a HACK since `serde`'s default attribute only takes in // string literals and macros (stringify) within attributes do not work. - extra_nonce: String /* = default_expression, "default_literal" */, + extra_nonce: String = default_string(), "default_string", + prev_block: String = default_string(), "default_string", // Another optional expression: // This indicates to the macro to (de)serialize // this field as another type in epee. // // See `cuprate_epee_encoding::epee_object` for info. - prev_block: String /* as Type */, + reserve_size: u64 /* as Type */, - // Regular fields. - reserve_size: u64, wallet_address: String, }, @@ -92,6 +169,23 @@ define_request_and_response! { // "Flatten" means the field(s) of a struct gets inlined // directly into the struct during (de)serialization, see: // . + #[doc = serde_doc_test!( + GET_BLOCK_TEMPLATE_RESPONSE => GetBlockTemplateResponse { + base: ResponseBase::ok(), + blockhashing_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a00000000e0c20372be23d356347091025c5b5e8f2abf83ab618378565cce2b703491523401".into(), + blocktemplate_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(), + difficulty_top64: 0, + difficulty: 283305047039, + expected_reward: 600000000000, + height: 3195018, + next_seed_hash: "".into(), + prev_hash: "9d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a".into(), + reserved_offset: 131, + seed_hash: "e2aa0b7b55042cd48b02e395d78fa66a29815ccc1584e38db2d1f0e8485cd44f".into(), + seed_height: 3194880, + wide_difficulty: "0x41f64bf3ff".into(), + } + )] ResponseBase { // This is using [`crate::base::ResponseBase`], // so the type we generate will contain this field: @@ -131,6 +225,12 @@ define_request_and_response! { // type alias to `()` instead of a `struct`. Request {}, + #[doc = serde_doc_test!( + GET_BLOCK_COUNT_RESPONSE => GetBlockCountResponse { + base: ResponseBase::ok(), + count: 3195019, + } + )] ResponseBase { count: u64, } @@ -140,15 +240,14 @@ define_request_and_response! { on_get_block_hash, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 935..=939, + OnGetBlockHash, - /// ```rust - /// use serde_json::*; - /// use cuprate_rpc_types::json::*; - /// - /// let x = OnGetBlockHashRequest { block_height: [3] }; - /// let x = to_string(&x).unwrap(); - /// assert_eq!(x, "[3]"); - /// ``` + + #[doc = serde_doc_test!( + ON_GET_BLOCK_HASH_REQUEST => OnGetBlockHashRequest { + block_height: [912345], + } + )] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] #[derive(Copy)] @@ -157,14 +256,12 @@ define_request_and_response! { // it must be a 1 length array or else it will error. block_height: [u64; 1], }, - /// ```rust - /// use serde_json::*; - /// use cuprate_rpc_types::json::*; - /// - /// let x = OnGetBlockHashResponse { block_hash: String::from("asdf") }; - /// let x = to_string(&x).unwrap(); - /// assert_eq!(x, "\"asdf\""); - /// ``` + + #[doc = serde_doc_test!( + ON_GET_BLOCK_HASH_RESPONSE => OnGetBlockHashResponse { + block_hash: "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6".into(), + } + )] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] Response { @@ -176,15 +273,14 @@ define_request_and_response! { submit_block, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1114..=1128, + SubmitBlock, - /// ```rust - /// use serde_json::*; - /// use cuprate_rpc_types::json::*; - /// - /// let x = SubmitBlockRequest { block_blob: ["a".into()] }; - /// let x = to_string(&x).unwrap(); - /// assert_eq!(x, r#"["a"]"#); - /// ``` + + #[doc = serde_doc_test!( + SUBMIT_BLOCK_REQUEST => SubmitBlockRequest { + block_blob: ["0707e6bdfedc053771512f1bc27c62731ae9e8f2443db64ce742f4e57f5cf8d393de28551e441a0000000002fb830a01ffbf830a018cfe88bee283060274c0aae2ef5730e680308d9c00b6da59187ad0352efe3c71d36eeeb28782f29f2501bd56b952c3ddc3e350c2631d3a5086cac172c56893831228b17de296ff4669de020200000000".into()], + } + )] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] Request { @@ -192,6 +288,8 @@ define_request_and_response! { // it must be a 1 length array or else it will error. block_blob: [String; 1], }, + + // FIXME: `cuprate_test_utils` only has an `error` response for this. ResponseBase { block_id: String, } @@ -201,13 +299,31 @@ define_request_and_response! { generateblocks, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1130..=1161, + GenerateBlocks, + + #[doc = serde_doc_test!( + GENERATE_BLOCKS_REQUEST => GenerateBlocksRequest { + amount_of_blocks: 1, + prev_block: String::default(), + wallet_address: "44AFFq5kSiGBoZ4NMDwYtN18obc8AemS33DBLWs3H7otXft3XjrpDtQGv7SqSsaBYBb98uNbr2VBBEt7f2wfn3RVGQBEP3A".into(), + starting_nonce: 0 + } + )] Request { amount_of_blocks: u64, - prev_block: String, + prev_block: String = default_string(), "default_string", starting_nonce: u32, wallet_address: String, }, + + #[doc = serde_doc_test!( + GENERATE_BLOCKS_RESPONSE => GenerateBlocksResponse { + base: ResponseBase::ok(), + blocks: vec!["49b712db7760e3728586f8434ee8bc8d7b3d410dac6bb6e98bf5845c83b917e4".into()], + height: 9783, + } + )] ResponseBase { blocks: Vec, height: u64, @@ -218,11 +334,43 @@ define_request_and_response! { get_last_block_header, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1214..=1238, + GetLastBlockHeader, + #[derive(Copy)] Request { fill_pow_hash: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_LAST_BLOCK_HEADER_RESPONSE => GetLastBlockHeaderResponse { + base: AccessResponseBase::ok(), + block_header: BlockHeader { + block_size: 200419, + block_weight: 200419, + cumulative_difficulty: 366125734645190820, + cumulative_difficulty_top64: 0, + depth: 0, + difficulty: 282052561854, + difficulty_top64: 0, + hash: "57238217820195ac4c08637a144a885491da167899cf1d20e8e7ce0ae0a3434e".into(), + height: 3195020, + long_term_weight: 200419, + major_version: 16, + miner_tx_hash: "7a42667237d4f79891bb407c49c712a9299fb87fce799833a7b633a3a9377dbd".into(), + minor_version: 16, + nonce: 1885649739, + num_txes: 37, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "22c72248ae9c5a2863c94735d710a3525c499f70707d1c2f395169bc5c8a0da3".into(), + reward: 615702960000, + timestamp: 1721245548, + wide_cumulative_difficulty: "0x514bd6a74a7d0a4".into(), + wide_difficulty: "0x41aba48bbe".into() + } + } + )] AccessResponseBase { block_header: BlockHeader, } @@ -233,14 +381,52 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1240..=1269, GetBlockHeaderByHash, + #[doc = serde_doc_test!( + GET_BLOCK_HEADER_BY_HASH_REQUEST => GetBlockHeaderByHashRequest { + hash: "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6".into(), + hashes: vec![], + fill_pow_hash: false, + } + )] Request { hash: String, - hashes: Vec, + hashes: Vec = default_vec::(), "default_vec", fill_pow_hash: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_BLOCK_HEADER_BY_HASH_RESPONSE => GetBlockHeaderByHashResponse { + base: AccessResponseBase::ok(), + block_headers: vec![], + block_header: BlockHeader { + block_size: 210, + block_weight: 210, + cumulative_difficulty: 754734824984346, + cumulative_difficulty_top64: 0, + depth: 2282676, + difficulty: 815625611, + difficulty_top64: 0, + hash: "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6".into(), + height: 912345, + long_term_weight: 210, + major_version: 1, + miner_tx_hash: "c7da3965f25c19b8eb7dd8db48dcd4e7c885e2491db77e289f0609bf8e08ec30".into(), + minor_version: 2, + nonce: 1646, + num_txes: 0, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "b61c58b2e0be53fad5ef9d9731a55e8a81d972b8d90ed07c04fd37ca6403ff78".into(), + reward: 7388968946286, + timestamp: 1452793716, + wide_cumulative_difficulty: "0x2ae6d65248f1a".into(), + wide_difficulty: "0x309d758b".into() + }, + } + )] AccessResponseBase { block_header: BlockHeader, - block_headers: Vec, + block_headers: Vec = default_vec::(), "default_vec", } } @@ -248,12 +434,50 @@ define_request_and_response! { get_block_header_by_height, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1271..=1296, + GetBlockHeaderByHeight, + #[derive(Copy)] + #[doc = serde_doc_test!( + GET_BLOCK_HEADER_BY_HEIGHT_REQUEST => GetBlockHeaderByHeightRequest { + height: 912345, + fill_pow_hash: false, + } + )] Request { height: u64, fill_pow_hash: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_BLOCK_HEADER_BY_HEIGHT_RESPONSE => GetBlockHeaderByHeightResponse { + base: AccessResponseBase::ok(), + block_header: BlockHeader { + block_size: 210, + block_weight: 210, + cumulative_difficulty: 754734824984346, + cumulative_difficulty_top64: 0, + depth: 2282677, + difficulty: 815625611, + difficulty_top64: 0, + hash: "e22cf75f39ae720e8b71b3d120a5ac03f0db50bba6379e2850975b4859190bc6".into(), + height: 912345, + long_term_weight: 210, + major_version: 1, + miner_tx_hash: "c7da3965f25c19b8eb7dd8db48dcd4e7c885e2491db77e289f0609bf8e08ec30".into(), + minor_version: 2, + nonce: 1646, + num_txes: 0, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "b61c58b2e0be53fad5ef9d9731a55e8a81d972b8d90ed07c04fd37ca6403ff78".into(), + reward: 7388968946286, + timestamp: 1452793716, + wide_cumulative_difficulty: "0x2ae6d65248f1a".into(), + wide_difficulty: "0x309d758b".into() + }, + } + )] AccessResponseBase { block_header: BlockHeader, } @@ -263,13 +487,78 @@ define_request_and_response! { get_block_headers_range, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1756..=1783, + GetBlockHeadersRange, + #[derive(Copy)] + #[doc = serde_doc_test!( + GET_BLOCK_HEADERS_RANGE_REQUEST => GetBlockHeadersRangeRequest { + start_height: 1545999, + end_height: 1546000, + fill_pow_hash: false, + } + )] Request { start_height: u64, end_height: u64, fill_pow_hash: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_BLOCK_HEADERS_RANGE_RESPONSE => GetBlockHeadersRangeResponse { + base: AccessResponseBase::ok(), + headers: vec![ + BlockHeader { + block_size: 301413, + block_weight: 301413, + cumulative_difficulty: 13185267971483472, + cumulative_difficulty_top64: 0, + depth: 1649024, + difficulty: 134636057921, + difficulty_top64: 0, + hash: "86d1d20a40cefcf3dd410ff6967e0491613b77bf73ea8f1bf2e335cf9cf7d57a".into(), + height: 1545999, + long_term_weight: 301413, + major_version: 6, + miner_tx_hash: "9909c6f8a5267f043c3b2b079fb4eacc49ef9c1dee1c028eeb1a259b95e6e1d9".into(), + minor_version: 6, + nonce: 3246403956, + num_txes: 20, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "0ef6e948f77b8f8806621003f5de24b1bcbea150bc0e376835aea099674a5db5".into(), + reward: 5025593029981, + timestamp: 1523002893, + wide_cumulative_difficulty: "0x2ed7ee6db56750".into(), + wide_difficulty: "0x1f58ef3541".into() + }, + BlockHeader { + block_size: 13322, + block_weight: 13322, + cumulative_difficulty: 13185402687569710, + cumulative_difficulty_top64: 0, + depth: 1649023, + difficulty: 134716086238, + difficulty_top64: 0, + hash: "b408bf4cfcd7de13e7e370c84b8314c85b24f0ba4093ca1d6eeb30b35e34e91a".into(), + height: 1546000, + long_term_weight: 13322, + major_version: 7, + miner_tx_hash: "7f749c7c64acb35ef427c7454c45e6688781fbead9bbf222cb12ad1a96a4e8f6".into(), + minor_version: 7, + nonce: 3737164176, + num_txes: 1, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "86d1d20a40cefcf3dd410ff6967e0491613b77bf73ea8f1bf2e335cf9cf7d57a".into(), + reward: 4851952181070, + timestamp: 1523002931, + wide_cumulative_difficulty: "0x2ed80dcb69bf2e".into(), + wide_difficulty: "0x1f5db457de".into() + } + ], + } + )] AccessResponseBase { headers: Vec, } @@ -280,6 +569,14 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1298..=1313, GetBlock, + + #[doc = serde_doc_test!( + GET_BLOCK_REQUEST => GetBlockRequest { + height: 2751506, + hash: String::default(), + fill_pow_hash: false, + } + )] Request { // `monerod` has both `hash` and `height` fields. // In the RPC handler, if `hash.is_empty()`, it will use it, else, it uses `height`. @@ -288,12 +585,46 @@ define_request_and_response! { height: u64 = default_height(), "default_height", fill_pow_hash: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_BLOCK_RESPONSE => GetBlockResponse { + base: AccessResponseBase::ok(), + blob: "1010c58bab9b06b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7807e07f502cef8a70101ff92f8a7010180e0a596bb1103d7cbf826b665d7a532c316982dc8dbc24f285cbc18bbcc27c7164cd9b3277a85d034019f629d8b36bd16a2bfce3ea80c31dc4d8762c67165aec21845494e32b7582fe00211000000297a787a000000000000000000000000".into(), + block_header: BlockHeader { + block_size: 106, + block_weight: 106, + cumulative_difficulty: 236046001376524168, + cumulative_difficulty_top64: 0, + depth: 443517, + difficulty: 313732272488, + difficulty_top64: 0, + hash: "43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428".into(), + height: 2751506, + long_term_weight: 176470, + major_version: 16, + miner_tx_hash: "e49b854c5f339d7410a77f2a137281d8042a0ffc7ef9ab24cd670b67139b24cd".into(), + minor_version: 16, + nonce: 4110909056, + num_txes: 0, + orphan_status: false, + pow_hash: "".into(), + prev_hash: "b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7".into(), + reward: 600000000000, + timestamp: 1667941829, + wide_cumulative_difficulty: "0x3469a966eb2f788".into(), + wide_difficulty: "0x490be69168".into() + }, + json: "{\n \"major_version\": 16, \n \"minor_version\": 16, \n \"timestamp\": 1667941829, \n \"prev_id\": \"b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7\", \n \"nonce\": 4110909056, \n \"miner_tx\": {\n \"version\": 2, \n \"unlock_time\": 2751566, \n \"vin\": [ {\n \"gen\": {\n \"height\": 2751506\n }\n }\n ], \n \"vout\": [ {\n \"amount\": 600000000000, \n \"target\": {\n \"tagged_key\": {\n \"key\": \"d7cbf826b665d7a532c316982dc8dbc24f285cbc18bbcc27c7164cd9b3277a85\", \n \"view_tag\": \"d0\"\n }\n }\n }\n ], \n \"extra\": [ 1, 159, 98, 157, 139, 54, 189, 22, 162, 191, 206, 62, 168, 12, 49, 220, 77, 135, 98, 198, 113, 101, 174, 194, 24, 69, 73, 78, 50, 183, 88, 47, 224, 2, 17, 0, 0, 0, 41, 122, 120, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n ], \n \"rct_signatures\": {\n \"type\": 0\n }\n }, \n \"tx_hashes\": [ ]\n}".into(), + miner_tx_hash: "e49b854c5f339d7410a77f2a137281d8042a0ffc7ef9ab24cd670b67139b24cd".into(), + tx_hashes: vec![], + } + )] AccessResponseBase { blob: String, block_header: BlockHeader, json: String, // FIXME: this should be defined in a struct, it has many fields. miner_tx_hash: String, - tx_hashes: Vec, + tx_hashes: Vec = default_vec::(), "default_vec", } } @@ -301,8 +632,72 @@ define_request_and_response! { get_connections, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1734..=1754, + GetConnections, + Request {}, + + #[doc = serde_doc_test!( + GET_CONNECTIONS_RESPONSE => GetConnectionsResponse { + base: ResponseBase::ok(), + connections: vec![ + ConnectionInfo { + address: "3evk3kezfjg44ma6tvesy7rbxwwpgpympj45xar5fo4qajrsmkoaqdqd.onion:18083".into(), + address_type: 4, + avg_download: 0, + avg_upload: 0, + connection_id: "22ef856d0f1d44cc95e84fecfd065fe2".into(), + current_download: 0, + current_upload: 0, + height: 3195026, + host: "3evk3kezfjg44ma6tvesy7rbxwwpgpympj45xar5fo4qajrsmkoaqdqd.onion".into(), + incoming: false, + ip: "".into(), + live_time: 76651, + local_ip: false, + localhost: false, + peer_id: "0000000000000001".into(), + port: "".into(), + pruning_seed: 0, + recv_count: 240328, + recv_idle_time: 34, + rpc_credits_per_hash: 0, + rpc_port: 0, + send_count: 3406572, + send_idle_time: 30, + state: "normal".into(), + support_flags: 0 + }, + ConnectionInfo { + address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(), + address_type: 4, + avg_download: 0, + avg_upload: 0, + connection_id: "c7734e15936f485a86d2b0534f87e499".into(), + current_download: 0, + current_upload: 0, + height: 3195024, + host: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion".into(), + incoming: false, + ip: "".into(), + live_time: 76755, + local_ip: false, + localhost: false, + peer_id: "0000000000000001".into(), + port: "".into(), + pruning_seed: 389, + recv_count: 237657, + recv_idle_time: 120, + rpc_credits_per_hash: 0, + rpc_port: 0, + send_count: 3370566, + send_idle_time: 120, + state: "normal".into(), + support_flags: 0 + } + ], + } + )] ResponseBase { // FIXME: This is a `std::list` in `monerod` because...? connections: Vec, @@ -315,6 +710,51 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 693..=789, GetInfo, Request {}, + + #[doc = serde_doc_test!( + GET_INFO_RESPONSE => GetInfoResponse { + base: AccessResponseBase::ok(), + adjusted_time: 1721245289, + alt_blocks_count: 16, + block_size_limit: 600000, + block_size_median: 300000, + block_weight_limit: 600000, + block_weight_median: 300000, + bootstrap_daemon_address: "".into(), + busy_syncing: false, + cumulative_difficulty: 366127702242611947, + cumulative_difficulty_top64: 0, + database_size: 235169075200, + difficulty: 280716748706, + difficulty_top64: 0, + free_space: 30521749504, + grey_peerlist_size: 4996, + height: 3195028, + height_without_bootstrap: 3195028, + incoming_connections_count: 62, + mainnet: true, + nettype: "mainnet".into(), + offline: false, + outgoing_connections_count: 1143, + restricted: false, + rpc_connections_count: 1, + stagenet: false, + start_time: 1720462427, + synchronized: true, + target: 120, + target_height: 0, + testnet: false, + top_block_hash: "bdf06d18ed1931a8ee62654e9b6478cc459bc7072628b8e36f4524d339552946".into(), + tx_count: 43205750, + tx_pool_size: 12, + update_available: false, + version: "0.18.3.3-release".into(), + was_bootstrap_ever_used: false, + white_peerlist_size: 1000, + wide_cumulative_difficulty: "0x514bf349299d2eb".into(), + wide_difficulty: "0x415c05a7a2".into() + } + )] AccessResponseBase { adjusted_time: u64, alt_blocks_count: u64, @@ -364,6 +804,20 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 1958..=1995, HardForkInfo, Request {}, + + #[doc = serde_doc_test!( + HARD_FORK_INFO_RESPONSE => HardForkInfoResponse { + base: AccessResponseBase::ok(), + earliest_height: 2689608, + enabled: true, + state: 0, + threshold: 0, + version: 16, + votes: 10080, + voting: 16, + window: 10080 + } + )] AccessResponseBase { earliest_height: u64, enabled: bool, @@ -381,9 +835,26 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2032..=2067, SetBans, + + #[doc = serde_doc_test!( + SET_BANS_REQUEST => SetBansRequest { + bans: vec![ SetBan { + host: "192.168.1.51".into(), + ip: 0, + ban: true, + seconds: 30 + }] + } + )] Request { bans: Vec, }, + + #[doc = serde_doc_test!( + SET_BANS_RESPONSE => SetBansResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -393,6 +864,24 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 1997..=2030, GetBans, Request {}, + + #[doc = serde_doc_test!( + GET_BANS_RESPONSE => GetBansResponse { + base: ResponseBase::ok(), + bans: vec![ + GetBan { + host: "104.248.206.131".into(), + ip: 2211379304, + seconds: 689754 + }, + GetBan { + host: "209.222.252.0/24".into(), + ip: 0, + seconds: 689754 + } + ] + } + )] ResponseBase { bans: Vec, } @@ -403,11 +892,23 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2069..=2094, Banned, - #[cfg_attr(feature = "serde", serde(transparent))] - #[repr(transparent)] + + #[doc = serde_doc_test!( + BANNED_REQUEST => BannedRequest { + address: "95.216.203.255".into(), + } + )] Request { address: String, }, + + #[doc = serde_doc_test!( + BANNED_RESPONSE => BannedResponse { + banned: true, + seconds: 689655, + status: Status::Ok, + } + )] Response { banned: bool, seconds: u32, @@ -420,10 +921,21 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2096..=2116, FlushTransactionPool, + + #[doc = serde_doc_test!( + FLUSH_TRANSACTION_POOL_REQUEST => FlushTransactionPoolRequest { + txids: vec!["dc16fa8eaffe1484ca9014ea050e13131d3acf23b419f33bb4cc0b32b6c49308".into()], + } + )] Request { txids: Vec = default_vec::(), "default_vec", }, - #[cfg_attr(feature = "serde", serde(transparent))] + + #[doc = serde_doc_test!( + FLUSH_TRANSACTION_POOL_RESPONSE => FlushTransactionPoolResponse { + status: Status::Ok, + } + )] #[repr(transparent)] Response { status: Status, @@ -435,13 +947,35 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2118..=2168, GetOutputHistogram, + + #[doc = serde_doc_test!( + GET_OUTPUT_HISTOGRAM_REQUEST => GetOutputHistogramRequest { + amounts: vec![20000000000], + min_count: 0, + max_count: 0, + unlocked: false, + recent_cutoff: 0, + } + )] Request { amounts: Vec, - min_count: u64, - max_count: u64, - unlocked: bool, - recent_cutoff: u64, + min_count: u64 = default_zero::(), "default_zero", + max_count: u64 = default_zero::(), "default_zero", + unlocked: bool = default_false(), "default_false", + recent_cutoff: u64 = default_zero::(), "default_zero", }, + + #[doc = serde_doc_test!( + GET_OUTPUT_HISTOGRAM_RESPONSE => GetOutputHistogramResponse { + base: AccessResponseBase::ok(), + histogram: vec![HistogramEntry { + amount: 20000000000, + recent_instances: 0, + total_instances: 381490, + unlocked_instances: 0 + }] + } + )] AccessResponseBase { histogram: Vec, } @@ -451,11 +985,31 @@ define_request_and_response! { get_coinbase_tx_sum, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2213..=2248, + GetCoinbaseTxSum, + + #[doc = serde_doc_test!( + GET_COINBASE_TX_SUM_REQUEST => GetCoinbaseTxSumRequest { + height: 1563078, + count: 2 + } + )] Request { height: u64, count: u64, }, + + #[doc = serde_doc_test!( + GET_COINBASE_TX_SUM_RESPONSE => GetCoinbaseTxSumResponse { + base: AccessResponseBase::ok(), + emission_amount: 9387854817320, + emission_amount_top64: 0, + fee_amount: 83981380000, + fee_amount_top64: 0, + wide_emission_amount: "0x889c7c06828".into(), + wide_fee_amount: "0x138dae29a0".into() + } + )] AccessResponseBase { emission_amount: u64, emission_amount_top64: u64, @@ -470,16 +1024,90 @@ define_request_and_response! { get_version, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2170..=2211, + GetVersion, Request {}, + + #[doc = serde_doc_test!( + GET_VERSION_RESPONSE => GetVersionResponse { + base: ResponseBase::ok(), + current_height: 3195051, + hard_forks: vec![ + HardforkEntry { + height: 1, + hf_version: 1 + }, + HardforkEntry { + height: 1009827, + hf_version: 2 + }, + HardforkEntry { + height: 1141317, + hf_version: 3 + }, + HardforkEntry { + height: 1220516, + hf_version: 4 + }, + HardforkEntry { + height: 1288616, + hf_version: 5 + }, + HardforkEntry { + height: 1400000, + hf_version: 6 + }, + HardforkEntry { + height: 1546000, + hf_version: 7 + }, + HardforkEntry { + height: 1685555, + hf_version: 8 + }, + HardforkEntry { + height: 1686275, + hf_version: 9 + }, + HardforkEntry { + height: 1788000, + hf_version: 10 + }, + HardforkEntry { + height: 1788720, + hf_version: 11 + }, + HardforkEntry { + height: 1978433, + hf_version: 12 + }, + HardforkEntry { + height: 2210000, + hf_version: 13 + }, + HardforkEntry { + height: 2210720, + hf_version: 14 + }, + HardforkEntry { + height: 2688888, + hf_version: 15 + }, + HardforkEntry { + height: 2689608, + hf_version: 16 + } + ], + release: true, + version: 196621, + target_height: 0, + } + )] ResponseBase { version: u32, release: bool, - #[serde(skip_serializing_if = "is_zero")] current_height: u64 = default_zero::(), "default_zero", - #[serde(skip_serializing_if = "is_zero")] target_height: u64 = default_zero::(), "default_zero", - #[serde(skip_serializing_if = "Vec::is_empty")] hard_forks: Vec = default_vec(), "default_vec", } } @@ -490,11 +1118,19 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 2250..=2277, GetFeeEstimate, Request {}, + + #[doc = serde_doc_test!( + GET_FEE_ESTIMATE_RESPONSE => GetFeeEstimateResponse { + base: AccessResponseBase::ok(), + fee: 20000, + fees: vec![20000,80000,320000,4000000], + quantization_mask: 10000, + } + )] AccessResponseBase { fee: u64, fees: Vec, - #[serde(skip_serializing_if = "is_one")] - quantization_mask: u64, + quantization_mask: u64 = default_one::(), "default_one", } } @@ -504,6 +1140,34 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 2279..=2310, GetAlternateChains, Request {}, + + #[doc = serde_doc_test!( + GET_ALTERNATE_CHAINS_RESPONSE => GetAlternateChainsResponse { + base: ResponseBase::ok(), + chains: vec![ + ChainInfo { + block_hash: "4826c7d45d7cf4f02985b5c405b0e5d7f92c8d25e015492ce19aa3b209295dce".into(), + block_hashes: vec!["4826c7d45d7cf4f02985b5c405b0e5d7f92c8d25e015492ce19aa3b209295dce".into()], + difficulty: 357404825113208373, + difficulty_top64: 0, + height: 3167471, + length: 1, + main_chain_parent_block: "69b5075ea627d6ba06b1c30b7e023884eeaef5282cf58ec847dab838ddbcdd86".into(), + wide_difficulty: "0x4f5c1cb79e22635".into(), + }, + ChainInfo { + block_hash: "33ee476f5a1c5b9d889274cbbe171f5e0112df7ed69021918042525485deb401".into(), + block_hashes: vec!["33ee476f5a1c5b9d889274cbbe171f5e0112df7ed69021918042525485deb401".into()], + difficulty: 354736121711617293, + difficulty_top64: 0, + height: 3157465, + length: 1, + main_chain_parent_block: "fd522fcc4cefe5c8c0e5c5600981b3151772c285df3a4e38e5c4011cf466d2cb".into(), + wide_difficulty: "0x4ec469f8b9ee50d".into(), + } + ], + } + )] ResponseBase { chains: Vec, } @@ -513,11 +1177,23 @@ define_request_and_response! { relay_tx, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2361..=2381, + RelayTx, + + #[doc = serde_doc_test!( + RELAY_TX_REQUEST => RelayTxRequest { + txids: vec!["9fd75c429cbe52da9a52f2ffc5fbd107fe7fd2099c0d8de274dc8a67e0c98613".into()] + } + )] Request { txids: Vec, }, - #[cfg_attr(feature = "serde", serde(transparent))] + + #[doc = serde_doc_test!( + RELAY_TX_RESPONSE => RelayTxResponse { + status: Status::Ok, + } + )] #[repr(transparent)] Response { status: Status, @@ -528,16 +1204,88 @@ define_request_and_response! { sync_info, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2383..=2443, + SyncInfo, Request {}, + + #[doc = serde_doc_test!( + SYNC_INFO_RESPONSE => SyncInfoResponse { + base: AccessResponseBase::ok(), + height: 3195157, + next_needed_pruning_seed: 0, + overview: "[]".into(), + spans: vec![], + peers: vec![ + SyncInfoPeer { + info: ConnectionInfo { + address: "142.93.128.65:44986".into(), + address_type: 1, + avg_download: 1, + avg_upload: 1, + connection_id: "a5803c4c2dac49e7b201dccdef54c862".into(), + current_download: 2, + current_upload: 1, + height: 3195157, + host: "142.93.128.65".into(), + incoming: true, + ip: "142.93.128.65".into(), + live_time: 18, + local_ip: false, + localhost: false, + peer_id: "6830e9764d3e5687".into(), + port: "44986".into(), + pruning_seed: 0, + recv_count: 20340, + recv_idle_time: 0, + rpc_credits_per_hash: 0, + rpc_port: 18089, + send_count: 32235, + send_idle_time: 6, + state: "normal".into(), + support_flags: 1 + } + }, + SyncInfoPeer { + info: ConnectionInfo { + address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(), + address_type: 4, + avg_download: 0, + avg_upload: 0, + connection_id: "277f7c821bc546878c8bd29977e780f5".into(), + current_download: 0, + current_upload: 0, + height: 3195157, + host: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion".into(), + incoming: false, + ip: "".into(), + live_time: 2246, + local_ip: false, + localhost: false, + peer_id: "0000000000000001".into(), + port: "".into(), + pruning_seed: 389, + recv_count: 65164, + recv_idle_time: 15, + rpc_credits_per_hash: 0, + rpc_port: 0, + send_count: 99120, + send_idle_time: 15, + state: "normal".into(), + support_flags: 0 + } + } + ], + target_height: 0, + } + )] AccessResponseBase { height: u64, next_needed_pruning_seed: u32, overview: String, // FIXME: This is a `std::list` in `monerod` because...? - peers: Vec, + peers: Vec = default_vec::(), "default_vec", // FIXME: This is a `std::list` in `monerod` because...? - spans: Vec, + spans: Vec = default_vec::(), "default_vec", target_height: u64, } } @@ -548,6 +1296,14 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 1637..=1664, GetTransactionPoolBacklog, Request {}, + + // TODO: enable test after binary string impl. + // #[doc = serde_doc_test!( + // GET_TRANSACTION_POOL_BACKLOG_RESPONSE => GetTransactionPoolBacklogResponse { + // base: ResponseBase::ok(), + // backlog: "...Binary...".into(), + // } + // )] ResponseBase { // TODO: this is a [`BinaryString`]. backlog: Vec, @@ -558,18 +1314,44 @@ define_request_and_response! { get_output_distribution, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2445..=2520, + /// This type is also used in the (undocumented) /// [`/get_output_distribution.bin`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server.h#L138) /// binary endpoint. GetOutputDistribution, + + #[doc = serde_doc_test!( + GET_OUTPUT_DISTRIBUTION_REQUEST => GetOutputDistributionRequest { + amounts: vec![628780000], + from_height: 1462078, + binary: true, + compress: false, + cumulative: false, + to_height: 0, + } + )] Request { amounts: Vec, - binary: bool, - compress: bool, - cumulative: bool, - from_height: u64, - to_height: u64, + binary: bool = default_true(), "default_true", + compress: bool = default_false(), "default_false", + cumulative: bool = default_false(), "default_false", + from_height: u64 = default_zero::(), "default_zero", + to_height: u64 = default_zero::(), "default_zero", }, + + // TODO: enable test after binary string impl. + // #[doc = serde_doc_test!( + // GET_OUTPUT_DISTRIBUTION_RESPONSE => GetOutputDistributionResponse { + // base: AccessResponseBase::ok(), + // distributions: vec![Distribution::Uncompressed(DistributionUncompressed { + // start_height: 1462078, + // base: 0, + // distribution: vec![], + // amount: 2628780000, + // binary: true, + // })], + // } + // )] AccessResponseBase { distributions: Vec, } @@ -581,6 +1363,31 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 996..=1044, GetMinerData, Request {}, + + #[doc = serde_doc_test!( + GET_MINER_DATA_RESPONSE => GetMinerDataResponse { + base: ResponseBase::ok(), + already_generated_coins: 18186022843595960691, + difficulty: "0x48afae42de".into(), + height: 2731375, + major_version: 16, + median_weight: 300000, + prev_id: "78d50c5894d187c4946d54410990ca59a75017628174a9e8c7055fa4ca5c7c6d".into(), + seed_hash: "a6b869d50eca3a43ec26fe4c369859cf36ae37ce6ecb76457d31ffeb8a6ca8a6".into(), + tx_backlog: vec![ + GetMinerDataTxBacklogEntry { + fee: 30700000, + id: "9868490d6bb9207fdd9cf17ca1f6c791b92ca97de0365855ea5c089f67c22208".into(), + weight: 1535 + }, + GetMinerDataTxBacklogEntry { + fee: 44280000, + id: "b6000b02bbec71e18ad704bcae09fb6e5ae86d897ced14a718753e76e86c0a0a".into(), + weight: 2214 + }, + ], + } + )] ResponseBase { major_version: u8, height: u64, @@ -589,6 +1396,7 @@ define_request_and_response! { difficulty: String, median_weight: u64, already_generated_coins: u64, + tx_backlog: Vec, } } @@ -596,11 +1404,26 @@ define_request_and_response! { prune_blockchain, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2747..=2772, + PruneBlockchain, + #[derive(Copy)] + #[doc = serde_doc_test!( + PRUNE_BLOCKCHAIN_REQUEST => PruneBlockchainRequest { + check: true + } + )] Request { check: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + PRUNE_BLOCKCHAIN_RESPONSE => PruneBlockchainResponse { + base: ResponseBase::ok(), + pruned: true, + pruning_seed: 387, + } + )] ResponseBase { pruned: bool, pruning_seed: u32, @@ -611,13 +1434,29 @@ define_request_and_response! { calc_pow, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1046..=1066, + CalcPow, + + #[doc = serde_doc_test!( + CALC_POW_REQUEST => CalcPowRequest { + major_version: 14, + height: 2286447, + block_blob: "0e0ed286da8006ecdc1aab3033cf1716c52f13f9d8ae0051615a2453643de94643b550d543becd0000000002abc78b0101ffefc68b0101fcfcf0d4b422025014bb4a1eade6622fd781cb1063381cad396efa69719b41aa28b4fce8c7ad4b5f019ce1dc670456b24a5e03c2d9058a2df10fec779e2579753b1847b74ee644f16b023c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000051399a1bc46a846474f5b33db24eae173a26393b976054ee14f9feefe99925233802867097564c9db7a36af5bb5ed33ab46e63092bd8d32cef121608c3258edd55562812e21cc7e3ac73045745a72f7d74581d9a0849d6f30e8b2923171253e864f4e9ddea3acb5bc755f1c4a878130a70c26297540bc0b7a57affb6b35c1f03d8dbd54ece8457531f8cba15bb74516779c01193e212050423020e45aa2c15dcb".into(), + seed_hash: "d432f499205150873b2572b5f033c9c6e4b7c6f3394bd2dd93822cd7085e7307".into(), + } + )] Request { major_version: u8, height: u64, block_blob: String, seed_hash: String, }, + + #[doc = serde_doc_test!( + CALC_POW_RESPONSE => CalcPowResponse { + pow_hash: "d0402d6834e26fb94a9ce38c6424d27d2069896a9b8b1ce685d79936bca6e0a8".into(), + } + )] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] Response { @@ -629,12 +1468,26 @@ define_request_and_response! { flush_cache, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2774..=2796, + FlushCache, + #[derive(Copy)] + #[doc = serde_doc_test!( + FLUSH_CACHE_REQUEST => FlushCacheRequest { + bad_txs: true, + bad_blocks: true + } + )] Request { bad_txs: bool = default_false(), "default_false", bad_blocks: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + FLUSH_CACHE_RESPONSE => FlushCacheResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -642,11 +1495,36 @@ define_request_and_response! { add_aux_pow, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1068..=1112, + AddAuxPow, + + #[doc = serde_doc_test!( + ADD_AUX_POW_REQUEST => AddAuxPowRequest { + blocktemplate_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(), + aux_pow: vec![AuxPow { + id: "3200b4ea97c3b2081cd4190b58e49572b2319fed00d030ad51809dff06b5d8c8".into(), + hash: "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a".into() + }] + } + )] Request { blocktemplate_blob: String, aux_pow: Vec, }, + + #[doc = serde_doc_test!( + ADD_AUX_POW_RESPONSE => AddAuxPowResponse { + base: ResponseBase::ok(), + aux_pow: vec![AuxPow { + hash: "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a".into(), + id: "3200b4ea97c3b2081cd4190b58e49572b2319fed00d030ad51809dff06b5d8c8".into(), + }], + blockhashing_blob: "1010ee97e2a106e9f8ebe8887e5b609949ac8ea6143e560ed13552b110cb009b21f0cfca1eaccf00000000b2685c1283a646bc9020c758daa443be145b7370ce5a6efacb3e614117032e2c22".into(), + blocktemplate_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(), + merkle_root: "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a".into(), + merkle_tree_depth: 0, + } + )] ResponseBase { blocktemplate_blob: String, blockhashing_blob: String, diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 4643ecc..2b31cab 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -22,7 +22,7 @@ use crate::{ CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, CORE_RPC_STATUS_PAYMENT_REQUIRED, }, - defaults::default_zero, + defaults::{default_string, default_zero}, macros::monero_definition_link, }; @@ -51,9 +51,9 @@ macro_rules! define_struct_and_impl_epee { )* } ) => { - $( #[$struct_attr] )* #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + $( #[$struct_attr] )* pub struct $struct_name { $( $( #[$field_attr] )* @@ -142,7 +142,9 @@ define_struct_and_impl_epee! { rpc_port: u16, send_count: u64, send_idle_time: u64, - ssl: bool, + // Exists in the original definition, but isn't + // used or (de)serialized for RPC purposes. + // ssl: bool, state: String, support_flags: u32, } @@ -156,7 +158,9 @@ define_struct_and_impl_epee! { )] /// Used in [`crate::json::SetBansRequest`]. SetBan { + #[cfg_attr(feature = "serde", serde(default = "default_string"))] host: String, + #[cfg_attr(feature = "serde", serde(default = "default_zero"))] ip: u32, ban: bool, seconds: u32, diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 41530cb..c140777 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -5,7 +5,7 @@ //---------------------------------------------------------------------------------------------------- Import use crate::{ base::{AccessResponseBase, ResponseBase}, - defaults::{default_false, default_string, default_true}, + defaults::{default_false, default_string, default_true, default_vec, default_zero}, macros::define_request_and_response, misc::{ GetOutputsOut, KeyImageSpentStatus, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, @@ -13,6 +13,81 @@ use crate::{ }, }; +//---------------------------------------------------------------------------------------------------- Macro +/// Adds a (de)serialization doc-test to a type in `other.rs`. +/// +/// It expects a const string from `cuprate_test_utils::rpc::data` +/// and the expected value it should (de)serialize into/from. +/// +/// It tests that the provided const JSON string can properly +/// (de)serialize into the expected value. +/// +/// See below for example usage. This macro is only used in this file. +macro_rules! serde_doc_test { + // This branch _only_ tests that the type can be deserialize + // from the string, not that any value is correct. + // + // Practically, this is used for structs that have + // many values that are complicated to test, e.g. `GET_TRANSACTIONS_RESPONSE`. + // + // HACK: + // The type itself doesn't need to be specified because it happens + // to just be the `CamelCase` version of the provided const. + ( + // `const` string from `cuprate_test_utils::rpc::data`. + $cuprate_test_utils_rpc_const:ident + ) => { + paste::paste! { + concat!( + "```rust\n", + "use cuprate_test_utils::rpc::data::other::*;\n", + "use cuprate_rpc_types::{misc::*, base::*, other::*};\n", + "use serde_json::{Value, from_str, from_value};\n", + "\n", + "let string = from_str::<", + stringify!([<$cuprate_test_utils_rpc_const:camel>]), + ">(", + stringify!($cuprate_test_utils_rpc_const), + ").unwrap();\n", + "```\n", + ) + } + }; + + // This branch tests that the type can be deserialize + // from the string AND that values are correct. + ( + // `const` string from `cuprate_test_utils::rpc::data` + // v + $cuprate_test_utils_rpc_const:ident => $expected:expr + // ^ + // Expected value as an expression + ) => { + paste::paste! { + concat!( + "```rust\n", + "use cuprate_test_utils::rpc::data::other::*;\n", + "use cuprate_rpc_types::{misc::*, base::*, other::*};\n", + "use serde_json::{Value, from_str, from_value};\n", + "\n", + "// The expected data.\n", + "let expected = ", + stringify!($expected), + ";\n", + "\n", + "let string = from_str::<", + stringify!([<$cuprate_test_utils_rpc_const:camel>]), + ">(", + stringify!($cuprate_test_utils_rpc_const), + ").unwrap();\n", + "\n", + "assert_eq!(string, expected);\n", + "```\n", + ) + } + }; +} + //---------------------------------------------------------------------------------------------------- Definitions define_request_and_response! { get_height, @@ -20,6 +95,14 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 138..=160, GetHeight, Request {}, + + #[doc = serde_doc_test!( + GET_HEIGHT_RESPONSE => GetHeightResponse { + base: ResponseBase::ok(), + hash: "68bb1a1cff8e2a44c3221e8e1aff80bc6ca45d06fa8eff4d2a3a7ac31d4efe3f".into(), + height: 3195160, + } + )] ResponseBase { hash: String, height: u64, @@ -31,6 +114,15 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 370..=451, GetTransactions, + + #[doc = serde_doc_test!( + GET_TRANSACTIONS_REQUEST => GetTransactionsRequest { + txs_hashes: vec!["d6e48158472848e6687173a91ae6eebfa3e1d778e65252ee99d7515d63090408".into()], + decode_as_json: false, + prune: false, + split: false, + } + )] Request { txs_hashes: Vec, // FIXME: this is documented as optional but it isn't serialized as an optional @@ -40,11 +132,13 @@ define_request_and_response! { prune: bool = default_false(), "default_false", split: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!(GET_TRANSACTIONS_RESPONSE)] AccessResponseBase { - txs_as_hex: Vec, - txs_as_json: Vec, - missed_tx: Vec, - txs: Vec, + txs_as_hex: Vec = default_vec::(), "default_vec", + txs_as_json: Vec = default_vec::(), "default_vec", + missed_tx: Vec = default_vec::(), "default_vec", + txs: Vec = default_vec::(), "default_vec", } } @@ -54,6 +148,13 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 288..=308, GetAltBlocksHashes, Request {}, + + #[doc = serde_doc_test!( + GET_ALT_BLOCKS_HASHES_RESPONSE => GetAltBlocksHashesResponse { + base: AccessResponseBase::ok(), + blks_hashes: vec!["8ee10db35b1baf943f201b303890a29e7d45437bd76c2bd4df0d2f2ee34be109".into()], + } + )] AccessResponseBase { blks_hashes: Vec, } @@ -63,10 +164,27 @@ define_request_and_response! { is_key_image_spent, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 454..=484, + IsKeyImageSpent, + + #[doc = serde_doc_test!( + IS_KEY_IMAGE_SPENT_REQUEST => IsKeyImageSpentRequest { + key_images: vec![ + "8d1bd8181bf7d857bdb281e0153d84cd55a3fcaa57c3e570f4a49f935850b5e3".into(), + "7319134bfc50668251f5b899c66b005805ee255c136f0e1cecbb0f3a912e09d4".into() + ] + } + )] Request { key_images: Vec, }, + + #[doc = serde_doc_test!( + IS_KEY_IMAGE_SPENT_RESPONSE => IsKeyImageSpentResponse { + base: AccessResponseBase::ok(), + spent_status: vec![1, 1], + } + )] AccessResponseBase { /// FIXME: These are [`KeyImageSpentStatus`] in [`u8`] form. spent_status: Vec, @@ -77,19 +195,54 @@ define_request_and_response! { send_raw_transaction, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 370..=451, + SendRawTransaction, + + #[doc = serde_doc_test!( + SEND_RAW_TRANSACTION_REQUEST => SendRawTransactionRequest { + tx_as_hex: "dc16fa8eaffe1484ca9014ea050e13131d3acf23b419f33bb4cc0b32b6c49308".into(), + do_not_relay: false, + do_sanity_checks: true, + } + )] Request { tx_as_hex: String, do_not_relay: bool = default_false(), "default_false", do_sanity_checks: bool = default_true(), "default_true", }, + + #[doc = serde_doc_test!( + SEND_RAW_TRANSACTION_RESPONSE => SendRawTransactionResponse { + base: AccessResponseBase { + response_base: ResponseBase { + status: Status::Other("Failed".into()), + untrusted: false, + }, + credits: 0, + top_hash: "".into(), + }, + double_spend: false, + fee_too_low: false, + invalid_input: false, + invalid_output: false, + low_mixin: false, + not_relayed: false, + overspend: false, + reason: "".into(), + sanity_check_failed: false, + too_big: false, + too_few_outputs: false, + tx_extra_too_big: false, + nonzero_unlock_time: false, + } + )] AccessResponseBase { double_spend: bool, fee_too_low: bool, invalid_input: bool, invalid_output: bool, low_mixin: bool, - nonzero_unlock_time: bool, + nonzero_unlock_time: bool = default_false(), "default_false", not_relayed: bool, overspend: bool, reason: String, @@ -104,13 +257,29 @@ define_request_and_response! { start_mining, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 665..=691, + StartMining, + + #[doc = serde_doc_test!( + START_MINING_REQUEST => StartMiningRequest { + do_background_mining: false, + ignore_battery: true, + miner_address: "47xu3gQpF569au9C2ajo5SSMrWji6xnoE5vhr94EzFRaKAGw6hEGFXYAwVADKuRpzsjiU1PtmaVgcjUJF89ghGPhUXkndHc".into(), + threads_count: 1 + } + )] Request { miner_address: String, threads_count: u64, do_background_mining: bool, ignore_battery: bool, }, + + #[doc = serde_doc_test!( + START_MINING_RESPONSE => StartMiningResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -120,6 +289,12 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 825..=843, StopMining, Request {}, + + #[doc = serde_doc_test!( + STOP_MINING_RESPONSE => StopMiningResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -129,6 +304,27 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 846..=895, MiningStatus, Request {}, + + #[doc = serde_doc_test!( + MINING_STATUS_RESPONSE => MiningStatusResponse { + base: ResponseBase::ok(), + active: false, + address: "".into(), + bg_idle_threshold: 0, + bg_ignore_battery: false, + bg_min_idle_seconds: 0, + bg_target: 0, + block_reward: 0, + block_target: 120, + difficulty: 292022797663, + difficulty_top64: 0, + is_background_mining_enabled: false, + pow_algorithm: "RandomX".into(), + speed: 0, + threads_count: 0, + wide_difficulty: "0x43fdea455f".into(), + } + )] ResponseBase { active: bool, address: String, @@ -154,6 +350,12 @@ define_request_and_response! { core_rpc_server_commands_defs.h => 898..=916, SaveBc, Request {}, + + #[doc = serde_doc_test!( + SAVE_BC_RESPONSE => SaveBcResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -161,11 +363,79 @@ define_request_and_response! { get_peer_list, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1369..=1417, + GetPeerList, + + #[doc = serde_doc_test!( + GET_PEER_LIST_REQUEST => GetPeerListRequest { + public_only: true, + include_blocked: false, + } + )] Request { public_only: bool = default_true(), "default_true", include_blocked: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_PEER_LIST_RESPONSE => GetPeerListResponse { + base: ResponseBase::ok(), + gray_list: vec![ + Peer { + host: "161.97.193.0".into(), + id: 18269586253849566614, + ip: 12673441, + last_seen: 0, + port: 18080, + rpc_port: 0, + rpc_credits_per_hash: 0, + pruning_seed: 0, + }, + Peer { + host: "193.142.4.2".into(), + id: 10865563782170056467, + ip: 33853121, + last_seen: 0, + port: 18085, + pruning_seed: 387, + rpc_port: 19085, + rpc_credits_per_hash: 0, + } + ], + white_list: vec![ + Peer { + host: "78.27.98.0".into(), + id: 11368279936682035606, + ip: 6429518, + last_seen: 1721246387, + port: 18080, + pruning_seed: 384, + rpc_port: 0, + rpc_credits_per_hash: 0, + }, + Peer { + host: "67.4.163.2".into(), + id: 16545113262826842499, + ip: 44237891, + last_seen: 1721246387, + port: 18080, + rpc_port: 0, + rpc_credits_per_hash: 0, + pruning_seed: 0, + }, + Peer { + host: "70.52.75.3".into(), + id: 3863337548778177169, + ip: 55260230, + last_seen: 1721246387, + port: 18080, + rpc_port: 18081, + rpc_credits_per_hash: 0, + pruning_seed: 0, + } + ] + } + )] ResponseBase { white_list: Vec, gray_list: Vec, @@ -177,10 +447,22 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1450..=1470, SetLogHashRate, + #[derive(Copy)] + #[doc = serde_doc_test!( + SET_LOG_HASH_RATE_REQUEST => SetLogHashRateRequest { + visible: true, + } + )] Request { - visible: bool, + visible: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + SET_LOG_HASH_RATE_RESPONSE => SetLogHashRateResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -188,11 +470,24 @@ define_request_and_response! { set_log_level, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1450..=1470, + SetLogLevel, + #[derive(Copy)] + #[doc = serde_doc_test!( + SET_LOG_LEVEL_REQUEST => SetLogLevelRequest { + level: 1 + } + )] Request { level: u8, }, + + #[doc = serde_doc_test!( + SET_LOG_LEVEL_RESPONSE => SetLogLevelResponse { + base: ResponseBase::ok(), + } + )] ResponseBase {} } @@ -200,10 +495,24 @@ define_request_and_response! { set_log_categories, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1494..=1517, + SetLogCategories, + + #[doc = serde_doc_test!( + SET_LOG_CATEGORIES_REQUEST => SetLogCategoriesRequest { + categories: "*:INFO".into(), + } + )] Request { categories: String = default_string(), "default_string", }, + + #[doc = serde_doc_test!( + SET_LOG_CATEGORIES_RESPONSE => SetLogCategoriesResponse { + base: ResponseBase::ok(), + categories: "*:INFO".into(), + } + )] ResponseBase { categories: String, } @@ -213,13 +522,29 @@ define_request_and_response! { set_bootstrap_daemon, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1785..=1812, + SetBootstrapDaemon, + + #[doc = serde_doc_test!( + SET_BOOTSTRAP_DAEMON_REQUEST => SetBootstrapDaemonRequest { + address: "http://getmonero.org:18081".into(), + username: String::new(), + password: String::new(), + proxy: String::new(), + } + )] Request { address: String, - username: String, - password: String, - proxy: String, + username: String = default_string(), "default_string", + password: String = default_string(), "default_string", + proxy: String = default_string(), "default_string", }, + + #[doc = serde_doc_test!( + SET_BOOTSTRAP_DAEMON_RESPONSE => SetBootstrapDaemonResponse { + status: Status::Ok, + } + )] Response { status: Status, } @@ -229,8 +554,11 @@ define_request_and_response! { get_transaction_pool, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1569..=1591, + GetTransactionPool, Request {}, + + #[doc = serde_doc_test!(GET_TRANSACTION_POOL_RESPONSE)] AccessResponseBase { transactions: Vec, spent_key_images: Vec, @@ -241,8 +569,41 @@ define_request_and_response! { get_transaction_pool_stats, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1712..=1732, + GetTransactionPoolStats, Request {}, + + #[doc = serde_doc_test!( + GET_TRANSACTION_POOL_STATS_RESPONSE => GetTransactionPoolStatsResponse { + base: AccessResponseBase::ok(), + pool_stats: TxpoolStats { + bytes_max: 11843, + bytes_med: 2219, + bytes_min: 1528, + bytes_total: 144192, + fee_total: 7018100000, + histo: vec![ + TxpoolHisto { bytes: 11219, txs: 4 }, + TxpoolHisto { bytes: 9737, txs: 5 }, + TxpoolHisto { bytes: 8757, txs: 4 }, + TxpoolHisto { bytes: 14763, txs: 4 }, + TxpoolHisto { bytes: 15007, txs: 6 }, + TxpoolHisto { bytes: 15924, txs: 6 }, + TxpoolHisto { bytes: 17869, txs: 8 }, + TxpoolHisto { bytes: 10894, txs: 5 }, + TxpoolHisto { bytes: 38485, txs: 10 }, + TxpoolHisto { bytes: 1537, txs: 1 }, + ], + histo_98pc: 186, + num_10m: 0, + num_double_spends: 0, + num_failing: 0, + num_not_relayed: 0, + oldest: 1721261651, + txs_total: 53 + } + } + )] AccessResponseBase { pool_stats: TxpoolStats, } @@ -252,9 +613,16 @@ define_request_and_response! { stop_daemon, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1814..=1831, + StopDaemon, Request {}, - ResponseBase { + + #[doc = serde_doc_test!( + STOP_DAEMON_RESPONSE => StopDaemonResponse { + status: Status::Ok, + } + )] + Response { status: Status, } } @@ -263,8 +631,17 @@ define_request_and_response! { get_limit, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1852..=1874, + GetLimit, Request {}, + + #[doc = serde_doc_test!( + GET_LIMIT_RESPONSE => GetLimitResponse { + base: ResponseBase::ok(), + limit_down: 1280000, + limit_up: 1280000, + } + )] ResponseBase { limit_down: u64, limit_up: u64, @@ -275,11 +652,27 @@ define_request_and_response! { set_limit, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1876..=1903, + SetLimit, + #[doc = serde_doc_test!( + SET_LIMIT_REQUEST => SetLimitRequest { + limit_down: 1024, + limit_up: 0, + } + )] Request { - limit_down: i64, - limit_up: i64, + // FIXME: These may need to be `Option`. + limit_down: i64 = default_zero::(), "default_zero", + limit_up: i64 = default_zero::(), "default_zero", }, + + #[doc = serde_doc_test!( + SET_LIMIT_RESPONSE => SetLimitResponse { + base: ResponseBase::ok(), + limit_down: 1024, + limit_up: 128, + } + )] ResponseBase { limit_down: i64, limit_up: i64, @@ -290,11 +683,26 @@ define_request_and_response! { out_peers, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1876..=1903, + OutPeers, + + #[doc = serde_doc_test!( + OUT_PEERS_REQUEST => OutPeersRequest { + out_peers: 3232235535, + set: true, + } + )] Request { set: bool = default_true(), "default_true", out_peers: u32, }, + + #[doc = serde_doc_test!( + OUT_PEERS_RESPONSE => OutPeersResponse { + base: ResponseBase::ok(), + out_peers: 3232235535, + } + )] ResponseBase { out_peers: u32, } @@ -304,8 +712,20 @@ define_request_and_response! { get_net_stats, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 793..=822, + GetNetStats, Request {}, + + #[doc = serde_doc_test!( + GET_NET_STATS_RESPONSE => GetNetStatsResponse { + base: ResponseBase::ok(), + start_time: 1721251858, + total_bytes_in: 16283817214, + total_bytes_out: 34225244079, + total_packets_in: 5981922, + total_packets_out: 3627107, + } + )] ResponseBase { start_time: u64, total_packets_in: u64, @@ -319,11 +739,43 @@ define_request_and_response! { get_outs, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 567..=609, + GetOuts, + #[doc = serde_doc_test!( + GET_OUTS_REQUEST => GetOutsRequest { + outputs: vec![ + GetOutputsOut { amount: 1, index: 0 }, + GetOutputsOut { amount: 1, index: 1 }, + ], + get_txid: true + } + )] Request { outputs: Vec, get_txid: bool, }, + + #[doc = serde_doc_test!( + GET_OUTS_RESPONSE => GetOutsResponse { + base: ResponseBase::ok(), + outs: vec![ + OutKey { + height: 51941, + key: "08980d939ec297dd597119f498ad69fed9ca55e3a68f29f2782aae887ef0cf8e".into(), + mask: "1738eb7a677c6149228a2beaa21bea9e3370802d72a3eec790119580e02bd522".into(), + txid: "9d651903b80fb70b9935b72081cd967f543662149aed3839222511acd9100601".into(), + unlocked: true + }, + OutKey { + height: 51945, + key: "454fe46c405be77625fa7e3389a04d3be392346983f27603561ac3a3a74f4a75".into(), + mask: "1738eb7a677c6149228a2beaa21bea9e3370802d72a3eec790119580e02bd522".into(), + txid: "230bff732dc5f225df14fff82aadd1bf11b3fb7ad3a03413c396a617e843f7d0".into(), + unlocked: true + }, + ] + } + )] ResponseBase { outs: Vec, } @@ -333,11 +785,31 @@ define_request_and_response! { update, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2324..=2359, + Update, + + #[doc = serde_doc_test!( + UPDATE_REQUEST => UpdateRequest { + command: "check".into(), + path: "".into(), + } + )] Request { command: String, path: String = default_string(), "default_string", }, + + #[doc = serde_doc_test!( + UPDATE_RESPONSE => UpdateResponse { + base: ResponseBase::ok(), + auto_uri: "".into(), + hash: "".into(), + path: "".into(), + update: false, + user_uri: "".into(), + version: "".into(), + } + )] ResponseBase { auto_uri: String, hash: String, @@ -352,35 +824,62 @@ define_request_and_response! { pop_blocks, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2722..=2745, + PopBlocks, + + #[doc = serde_doc_test!( + POP_BLOCKS_REQUEST => PopBlocksRequest { + nblocks: 6 + } + )] Request { nblocks: u64, }, + + #[doc = serde_doc_test!( + POP_BLOCKS_RESPONSE => PopBlocksResponse { + base: ResponseBase::ok(), + height: 76482, + } + )] ResponseBase { height: u64, } } -define_request_and_response! { - UNDOCUMENTED_ENDPOINT, - cc73fe71162d564ffda8e549b79a350bca53c454 => - core_rpc_server_commands_defs.h => 2798..=2823, - GetTxIdsLoose, - Request { - txid_template: String, - num_matching_bits: u32, - }, - ResponseBase { - txids: Vec, - } -} - define_request_and_response! { UNDOCUMENTED_ENDPOINT, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1615..=1635, + GetTransactionPoolHashes, Request {}, + + #[doc = serde_doc_test!( + GET_TRANSACTION_POOL_HASHES_RESPONSE => GetTransactionPoolHashesResponse { + base: ResponseBase::ok(), + tx_hashes: vec![ + "aa928aed888acd6152c60194d50a4df29b0b851be6169acf11b6a8e304dd6c03".into(), + "794345f321a98f3135151f3056c0fdf8188646a8dab27de971428acf3551dd11".into(), + "1e9d2ae11f2168a228942077483e70940d34e8658c972bbc3e7f7693b90edf17".into(), + "7375c928f261d00f07197775eb0bfa756e5f23319819152faa0b3c670fe54c1b".into(), + "2e4d5f8c5a45498f37fb8b6ca4ebc1efa0c371c38c901c77e66b08c072287329".into(), + "eee6d596cf855adfb10e1597d2018e3a61897ac467ef1d4a5406b8d20bfbd52f".into(), + "59c574d7ba9bb4558470f74503c7518946a85ea22c60fccfbdec108ce7d8f236".into(), + "0d57bec1e1075a9e1ac45cf3b3ced1ad95ccdf2a50ce360190111282a0178655".into(), + "60d627b2369714a40009c07d6185ebe7fa4af324fdfa8d95a37a936eb878d062".into(), + "661d7e728a901a8cb4cf851447d9cd5752462687ed0b776b605ba706f06bdc7d".into(), + "b80e1f09442b00b3fffe6db5d263be6267c7586620afff8112d5a8775a6fc58e".into(), + "974063906d1ddfa914baf85176b0f689d616d23f3d71ed4798458c8b4f9b9d8f".into(), + "d2575ae152a180be4981a9d2fc009afcd073adaa5c6d8b022c540a62d6c905bb".into(), + "3d78aa80ee50f506683bab9f02855eb10257a08adceda7cbfbdfc26b10f6b1bb".into(), + "8b5bc125bdb73b708500f734501d55088c5ac381a0879e1141634eaa72b6a4da".into(), + "11c06f4d2f00c912ca07313ed2ea5366f3cae914a762bed258731d3d9e3706df".into(), + "b3644dc7c9a3a53465fe80ad3769e516edaaeb7835e16fdd493aac110d472ae1".into(), + "ed2478ad793b923dbf652c8612c40799d764e5468897021234a14a37346bc6ee".into() + ], + } + )] ResponseBase { tx_hashes: Vec, } @@ -391,14 +890,43 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1419..=1448, GetPublicNodes, + + #[doc = serde_doc_test!( + GET_PUBLIC_NODES_REQUEST => GetPublicNodesRequest { + gray: false, + white: true, + include_blocked: false, + } + )] Request { gray: bool = default_false(), "default_false", white: bool = default_true(), "default_true", include_blocked: bool = default_false(), "default_false", }, + + #[doc = serde_doc_test!( + GET_PUBLIC_NODES_RESPONSE => GetPublicNodesResponse { + base: ResponseBase::ok(), + gray: vec![], + white: vec![ + PublicNode { + host: "70.52.75.3".into(), + last_seen: 1721246387, + rpc_credits_per_hash: 0, + rpc_port: 18081, + }, + PublicNode { + host: "zbjkbsxc5munw3qusl7j2hpcmikhqocdf4pqhnhtpzw5nt5jrmofptid.onion:18083".into(), + last_seen: 1720186288, + rpc_credits_per_hash: 0, + rpc_port: 18089, + } + ] + } + )] ResponseBase { - gray: Vec, - white: Vec, + gray: Vec = default_vec::(), "default_vec", + white: Vec = default_vec::(), "default_vec", } } diff --git a/test-utils/src/rpc/data/json.rs b/test-utils/src/rpc/data/json.rs index 2463e45..a05af67 100644 --- a/test-utils/src/rpc/data/json.rs +++ b/test-utils/src/rpc/data/json.rs @@ -771,7 +771,7 @@ r#"{ "id": "0", "method": "get_output_histogram", "params": { - "amounts": ["20000000000"] + "amounts": [20000000000] } }"#; Response = @@ -1106,13 +1106,17 @@ r#"{ "id": "0", "jsonrpc": "2.0", "result": { + "credits": 0, "distributions": [{ "amount": 2628780000, "base": 0, "distribution": "", - "start_height": 1462078 + "start_height": 1462078, + "binary": false }], - "status": "OK" + "status": "OK", + "top_hash": "", + "untrusted": false } }"#; } diff --git a/test-utils/src/rpc/data/other.rs b/test-utils/src/rpc/data/other.rs index 2559bbe..80a48ab 100644 --- a/test-utils/src/rpc/data/other.rs +++ b/test-utils/src/rpc/data/other.rs @@ -234,11 +234,13 @@ define_request_and_response! { set_log_hash_rate (other), SET_LOG_HASH_RATE: &str, Request = -r#"{}"#; +r#"{ + "visible": true +}"#; Response = r#" { - "status": "OK" + "status": "OK", "untrusted": false }"#; } @@ -252,7 +254,7 @@ r#"{ }"#; Response = r#"{ - "status": "OK" + "status": "OK", "untrusted": false }"#; } @@ -673,7 +675,7 @@ r#"{ "limit_down": 1280000, "limit_up": 1280000, "status": "OK", - "untrusted": true + "untrusted": false }"#; } @@ -688,7 +690,7 @@ r#"{ r#"{ "limit_down": 1024, "limit_up": 128, - "status": "OK" + "status": "OK", "untrusted": false }"#; } @@ -712,13 +714,15 @@ define_request_and_response! { get_net_stats (other), GET_NET_STATS: &str, Request = -r#"{ - "in_peers": 3232235535 -}"#; +r#"{}"#; Response = r#"{ - "in_peers": 3232235535, + "start_time": 1721251858, "status": "OK", + "total_bytes_in": 16283817214, + "total_bytes_out": 34225244079, + "total_packets_in": 5981922, + "total_packets_out": 3627107, "untrusted": false }"#; } @@ -804,7 +808,26 @@ r#"{ "credits": 0, "status": "OK", "top_hash": "", - "tx_hashes": ["aa928aed888acd6152c60194d50a4df29b0b851be6169acf11b6a8e304dd6c03","794345f321a98f3135151f3056c0fdf8188646a8dab27de971428acf3551dd11","1e9d2ae11f2168a228942077483e70940d34e8658c972bbc3e7f7693b90edf17","7375c928f261d00f07197775eb0bfa756e5f23319819152faa0b3c670fe54c1b","2e4d5f8c5a45498f37fb8b6ca4ebc1efa0c371c38c901c77e66b08c072287329","eee6d596cf855adfb10e1597d2018e3a61897ac467ef1d4a5406b8d20bfbd52f","59c574d7ba9bb4558470f74503c7518946a85ea22c60fccfbdec108ce7d8f236","0d57bec1e1075a9e1ac45cf3b3ced1ad95ccdf2a50ce360190111282a0178655","60d627b2369714a40009c07d6185ebe7fa4af324fdfa8d95a37a936eb878d062","661d7e728a901a8cb4cf851447d9cd5752462687ed0b776b605ba706f06bdc7d","b80e1f09442b00b3fffe6db5d263be6267c7586620afff8112d5a8775a6fc58e","974063906d1ddfa914baf85176b0f689d616d23f3d71ed4798458c8b4f9b9d8f","d2575ae152a180be4981a9d2fc009afcd073adaa5c6d8b022c540a62d6c905bb","3d78aa80ee50f506683bab9f02855eb10257a08adceda7cbfbdfc26b10f6b1bb","8b5bc125bdb73b708500f734501d55088c5ac381a0879e1141634eaa72b6a4da","11c06f4d2f00c912ca07313ed2ea5366f3cae914a762bed258731d3d9e3706df","b3644dc7c9a3a53465fe80ad3769e516edaaeb7835e16fdd493aac110d472ae1","ed2478ad793b923dbf652c8612c40799d764e5468897021234a14a37346bc6ee"], + "tx_hashes": [ + "aa928aed888acd6152c60194d50a4df29b0b851be6169acf11b6a8e304dd6c03", + "794345f321a98f3135151f3056c0fdf8188646a8dab27de971428acf3551dd11", + "1e9d2ae11f2168a228942077483e70940d34e8658c972bbc3e7f7693b90edf17", + "7375c928f261d00f07197775eb0bfa756e5f23319819152faa0b3c670fe54c1b", + "2e4d5f8c5a45498f37fb8b6ca4ebc1efa0c371c38c901c77e66b08c072287329", + "eee6d596cf855adfb10e1597d2018e3a61897ac467ef1d4a5406b8d20bfbd52f", + "59c574d7ba9bb4558470f74503c7518946a85ea22c60fccfbdec108ce7d8f236", + "0d57bec1e1075a9e1ac45cf3b3ced1ad95ccdf2a50ce360190111282a0178655", + "60d627b2369714a40009c07d6185ebe7fa4af324fdfa8d95a37a936eb878d062", + "661d7e728a901a8cb4cf851447d9cd5752462687ed0b776b605ba706f06bdc7d", + "b80e1f09442b00b3fffe6db5d263be6267c7586620afff8112d5a8775a6fc58e", + "974063906d1ddfa914baf85176b0f689d616d23f3d71ed4798458c8b4f9b9d8f", + "d2575ae152a180be4981a9d2fc009afcd073adaa5c6d8b022c540a62d6c905bb", + "3d78aa80ee50f506683bab9f02855eb10257a08adceda7cbfbdfc26b10f6b1bb", + "8b5bc125bdb73b708500f734501d55088c5ac381a0879e1141634eaa72b6a4da", + "11c06f4d2f00c912ca07313ed2ea5366f3cae914a762bed258731d3d9e3706df", + "b3644dc7c9a3a53465fe80ad3769e516edaaeb7835e16fdd493aac110d472ae1", + "ed2478ad793b923dbf652c8612c40799d764e5468897021234a14a37346bc6ee" + ], "untrusted": false }"#; } diff --git a/typos.toml b/typos.toml index 0317c40..fbd66d0 100644 --- a/typos.toml +++ b/typos.toml @@ -18,4 +18,5 @@ extend-exclude = [ "/misc/gpg_keys/", "cryptonight/", "/test-utils/src/rpc/data/json.rs", + "rpc/types/src/json.rs", ] From b44c6b045be5ece83a539f06be69f0f8622e3f51 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Mon, 29 Jul 2024 00:13:08 +0000 Subject: [PATCH 025/104] Consensus: add alt-chain handling (#214) * add `pop_blocks` to the difficulty cache * add a rolling median struct * use RollingMedian in weight cache * add pop_blocks to weight cache * add alt context cache * add getting alt RX vms * rework alt cache * add alt block verify function * keep alt caches around * add alt checked alt blocks to the cache * check the alt blocks timestamp * add docs + cleanup code * add popping blocks from the context cache * finish popping blocks + fix tests * fix doc * add a test popping blocks from HF cache * add a request to clear alt caches * add back lint * Apply suggestions from code review Co-authored-by: hinto-janai * review fixes * small changes * change panic doc --------- Co-authored-by: hinto-janai --- Cargo.lock | 2 + consensus/Cargo.toml | 2 + consensus/fast-sync/src/create.rs | 7 +- consensus/rules/src/blocks.rs | 4 +- consensus/rules/src/hard_forks.rs | 28 +- consensus/src/block.rs | 259 +++--------------- consensus/src/block/alt_block.rs | 304 ++++++++++++++++++++++ consensus/src/block/batch_prepare.rs | 207 +++++++++++++++ consensus/src/block/free.rs | 32 +++ consensus/src/context.rs | 88 ++++++- consensus/src/context/alt_chains.rs | 215 +++++++++++++++ consensus/src/context/difficulty.rs | 86 +++++- consensus/src/context/hardforks.rs | 58 ++++- consensus/src/context/rx_vms.rs | 52 +++- consensus/src/context/task.rs | 150 +++++++++-- consensus/src/context/weight.rs | 186 ++++++------- consensus/src/tests/context/difficulty.rs | 77 +++++- consensus/src/tests/context/hardforks.rs | 43 +++ consensus/src/tests/context/weight.rs | 81 +++++- consensus/src/tests/mock_db.rs | 12 +- helper/src/num.rs | 6 + helper/src/num/rolling_median.rs | 150 +++++++++++ storage/blockchain/src/service/mod.rs | 6 +- storage/blockchain/src/service/read.rs | 49 ++-- storage/blockchain/src/service/tests.rs | 32 ++- types/src/blockchain.rs | 24 +- types/src/lib.rs | 3 +- types/src/types.rs | 50 +++- 28 files changed, 1801 insertions(+), 412 deletions(-) create mode 100644 consensus/src/block/alt_block.rs create mode 100644 consensus/src/block/batch_prepare.rs create mode 100644 consensus/src/block/free.rs create mode 100644 consensus/src/context/alt_chains.rs create mode 100644 helper/src/num/rolling_median.rs diff --git a/Cargo.lock b/Cargo.lock index 55f17a1..32a5cbd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -535,11 +535,13 @@ dependencies = [ "multiexp", "proptest", "proptest-derive", + "rand", "randomx-rs", "rayon", "thiserror", "thread_local", "tokio", + "tokio-test", "tokio-util", "tower", "tracing", diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 624eb63..521b98c 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -29,6 +29,7 @@ tokio = { workspace = true, features = ["rt"] } tokio-util = { workspace = true } hex = { workspace = true } +rand = { workspace = true } [dev-dependencies] cuprate-test-utils = { path = "../test-utils" } @@ -37,5 +38,6 @@ cuprate-consensus-rules = {path = "./rules", features = ["proptest"]} hex-literal = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"]} +tokio-test = { workspace = true } proptest = { workspace = true } proptest-derive = { workspace = true } \ No newline at end of file diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index dc2311f..adae100 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -6,7 +6,10 @@ use tower::{Service, ServiceExt}; use cuprate_blockchain::{ config::ConfigBuilder, cuprate_database::RuntimeError, service::DatabaseReadHandle, }; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes}; @@ -19,7 +22,7 @@ async fn read_batch( let mut block_ids = Vec::::with_capacity(BATCH_SIZE as usize); for height in height_from..(height_from + BATCH_SIZE) { - let request = BCReadRequest::BlockHash(height); + let request = BCReadRequest::BlockHash(height, Chain::Main); let response_channel = handle.ready().await?.call(request); let response = response_channel.await?; diff --git a/consensus/rules/src/blocks.rs b/consensus/rules/src/blocks.rs index cb0e3e4..ecd6a11 100644 --- a/consensus/rules/src/blocks.rs +++ b/consensus/rules/src/blocks.rs @@ -148,7 +148,7 @@ fn block_size_sanity_check( /// Sanity check on the block weight. /// /// ref: -fn check_block_weight( +pub fn check_block_weight( block_weight: usize, median_for_block_reward: usize, ) -> Result<(), BlockError> { @@ -184,7 +184,7 @@ fn check_prev_id(block: &Block, top_hash: &[u8; 32]) -> Result<(), BlockError> { /// Checks the blocks timestamp is in the valid range. /// /// ref: -fn check_timestamp(block: &Block, median_timestamp: u64) -> Result<(), BlockError> { +pub fn check_timestamp(block: &Block, median_timestamp: u64) -> Result<(), BlockError> { if block.header.timestamp < median_timestamp || block.header.timestamp > current_unix_timestamp() + BLOCK_FUTURE_TIME_LIMIT { diff --git a/consensus/rules/src/hard_forks.rs b/consensus/rules/src/hard_forks.rs index b34b93d..016a51f 100644 --- a/consensus/rules/src/hard_forks.rs +++ b/consensus/rules/src/hard_forks.rs @@ -38,7 +38,7 @@ pub enum HardForkError { } /// Information about a given hard-fork. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct HFInfo { height: u64, threshold: u64, @@ -50,7 +50,7 @@ impl HFInfo { } /// Information about every hard-fork Monero has had. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct HFsInfo([HFInfo; NUMB_OF_HARD_FORKS]); impl HFsInfo { @@ -243,7 +243,7 @@ impl HardFork { } /// A struct holding the current voting state of the blockchain. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct HFVotes { votes: [u64; NUMB_OF_HARD_FORKS], vote_list: VecDeque, @@ -293,6 +293,28 @@ impl HFVotes { } } + /// Pop a number of blocks from the top of the cache and push some values into the front of the cache, + /// i.e. the oldest blocks. + /// + /// `old_block_votes` should contain the HFs below the window that now will be in the window after popping + /// blocks from the top. + /// + /// # Panics + /// + /// This will panic if `old_block_votes` contains more HFs than `numb_blocks`. + pub fn reverse_blocks(&mut self, numb_blocks: usize, old_block_votes: Self) { + assert!(old_block_votes.vote_list.len() <= numb_blocks); + + for hf in self.vote_list.drain(self.vote_list.len() - numb_blocks..) { + self.votes[hf as usize - 1] -= 1; + } + + for old_vote in old_block_votes.vote_list.into_iter().rev() { + self.vote_list.push_front(old_vote); + self.votes[old_vote as usize - 1] += 1; + } + } + /// Returns the total votes for a hard-fork. /// /// ref: diff --git a/consensus/src/block.rs b/consensus/src/block.rs index d3d0672..1b36eb9 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -12,31 +12,35 @@ use monero_serai::{ block::Block, transaction::{Input, Transaction}, }; -use rayon::prelude::*; use tower::{Service, ServiceExt}; -use tracing::instrument; + +use cuprate_helper::asynch::rayon_spawn_async; +use cuprate_types::{ + AltBlockInformation, VerifiedBlockInformation, VerifiedTransactionInformation, +}; use cuprate_consensus_rules::{ blocks::{ - calculate_pow_hash, check_block, check_block_pow, is_randomx_seed_height, - randomx_seed_height, BlockError, RandomX, + calculate_pow_hash, check_block, check_block_pow, randomx_seed_height, BlockError, RandomX, }, - hard_forks::HardForkError, miner_tx::MinerTxError, ConsensusError, HardFork, }; -use cuprate_helper::asynch::rayon_spawn_async; -use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; use crate::{ - context::{ - rx_vms::RandomXVM, BlockChainContextRequest, BlockChainContextResponse, - RawBlockChainContext, - }, + context::{BlockChainContextRequest, BlockChainContextResponse, RawBlockChainContext}, transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse}, Database, ExtendedConsensusError, }; +mod alt_block; +mod batch_prepare; +mod free; + +use alt_block::sanity_check_alt_block; +use batch_prepare::batch_prepare_main_chain_block; +use free::pull_ordered_transactions; + /// A pre-prepared block with all data needed to verify it, except the block's proof of work. #[derive(Debug)] pub struct PreparedBlockExPow { @@ -124,7 +128,7 @@ impl PreparedBlock { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?; - let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else { + let [Input::Gen(height)] = &block.miner_tx.prefix.inputs[..] else { Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, )))? @@ -191,6 +195,7 @@ pub enum VerifyBlockRequest { /// The already prepared block. block: PreparedBlock, /// The full list of transactions for this block, in the order given in `block`. + // TODO: Remove the Arc here txs: Vec>, }, /// Batch prepares a list of blocks and transactions for verification. @@ -198,6 +203,16 @@ pub enum VerifyBlockRequest { /// The list of blocks and their transactions (not necessarily in the order given in the block). blocks: Vec<(Block, Vec)>, }, + /// A request to sanity check an alt block, also returning the cumulative difficulty of the alt chain. + /// + /// Unlike requests to verify main chain blocks, you do not need to add the returned block to the context + /// service, you will still have to add it to the database though. + AltChain { + /// The alt block to sanity check. + block: Block, + /// The alt transactions. + prepared_txs: HashMap<[u8; 32], TransactionVerificationData>, + }, } /// A response from a verify block request. @@ -205,6 +220,8 @@ pub enum VerifyBlockRequest { pub enum VerifyBlockResponse { /// This block is valid. MainChain(VerifiedBlockInformation), + /// The sanity checked alt block. + AltChain(AltBlockInformation), /// A list of prepared blocks for verification, you should call [`VerifyBlockRequest::MainChainPrepped`] on each of the returned /// blocks to fully verify them. MainChainBatchPrepped(Vec<(PreparedBlock, Vec>)>), @@ -296,206 +313,20 @@ where verify_prepped_main_chain_block(block, txs, context_svc, tx_verifier_svc, None) .await } + VerifyBlockRequest::AltChain { + block, + prepared_txs, + } => sanity_check_alt_block(block, prepared_txs, context_svc).await, } } .boxed() } } -/// Batch prepares a list of blocks for verification. -#[instrument(level = "debug", name = "batch_prep_blocks", skip_all, fields(amt = blocks.len()))] -async fn batch_prepare_main_chain_block( - blocks: Vec<(Block, Vec)>, - mut context_svc: C, -) -> Result -where - C: Service< - BlockChainContextRequest, - Response = BlockChainContextResponse, - Error = tower::BoxError, - > + Send - + 'static, - C::Future: Send + 'static, -{ - let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip(); - - tracing::debug!("Calculating block hashes."); - let blocks: Vec = rayon_spawn_async(|| { - blocks - .into_iter() - .map(PreparedBlockExPow::new) - .collect::, _>>() - }) - .await?; - - let Some(last_block) = blocks.last() else { - return Err(ExtendedConsensusError::NoBlocksToVerify); - }; - - // hard-forks cannot be reversed, so the last block will contain the highest hard fork (provided the - // batch is valid). - let top_hf_in_batch = last_block.hf_version; - - // A Vec of (timestamp, HF) for each block to calculate the expected difficulty for each block. - let mut timestamps_hfs = Vec::with_capacity(blocks.len()); - let mut new_rx_vm = None; - - tracing::debug!("Checking blocks follow each other."); - - // For every block make sure they have the correct height and previous ID - for window in blocks.windows(2) { - let block_0 = &window[0]; - let block_1 = &window[1]; - - // Make sure no blocks in the batch have a higher hard fork than the last block. - if block_0.hf_version > top_hf_in_batch { - Err(ConsensusError::Block(BlockError::HardForkError( - HardForkError::VersionIncorrect, - )))?; - } - - if block_0.block_hash != block_1.block.header.previous - || block_0.height != block_1.height - 1 - { - tracing::debug!("Blocks do not follow each other, verification failed."); - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; - } - - // Cache any potential RX VM seeds as we may need them for future blocks in the batch. - if is_randomx_seed_height(block_0.height) && top_hf_in_batch >= HardFork::V12 { - new_rx_vm = Some((block_0.height, block_0.block_hash)); - } - - timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version)) - } - - // Get the current blockchain context. - let BlockChainContextResponse::Context(checked_context) = context_svc - .ready() - .await? - .call(BlockChainContextRequest::GetContext) - .await - .map_err(Into::::into)? - else { - panic!("Context service returned wrong response!"); - }; - - // Calculate the expected difficulties for each block in the batch. - let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc - .ready() - .await? - .call(BlockChainContextRequest::BatchGetDifficulties( - timestamps_hfs, - )) - .await - .map_err(Into::::into)? - else { - panic!("Context service returned wrong response!"); - }; - - let context = checked_context.unchecked_blockchain_context().clone(); - - // Make sure the blocks follow the main chain. - - if context.chain_height != blocks[0].height { - tracing::debug!("Blocks do not follow main chain, verification failed."); - - Err(ConsensusError::Block(BlockError::MinerTxError( - MinerTxError::InputsHeightIncorrect, - )))?; - } - - if context.top_hash != blocks[0].block.header.previous { - tracing::debug!("Blocks do not follow main chain, verification failed."); - - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; - } - - let mut rx_vms = if top_hf_in_batch < HardFork::V12 { - HashMap::new() - } else { - let BlockChainContextResponse::RxVms(rx_vms) = context_svc - .ready() - .await? - .call(BlockChainContextRequest::GetCurrentRxVm) - .await? - else { - panic!("Blockchain context service returned wrong response!"); - }; - - rx_vms - }; - - // If we have a RX seed in the batch calculate it. - if let Some((new_vm_height, new_vm_seed)) = new_rx_vm { - tracing::debug!("New randomX seed in batch, initialising VM"); - - let new_vm = rayon_spawn_async(move || { - Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!")) - }) - .await; - - context_svc - .oneshot(BlockChainContextRequest::NewRXVM(( - new_vm_seed, - new_vm.clone(), - ))) - .await - .map_err(Into::::into)?; - - rx_vms.insert(new_vm_height, new_vm); - } - - tracing::debug!("Calculating PoW and prepping transaction"); - - let blocks = rayon_spawn_async(move || { - blocks - .into_par_iter() - .zip(difficulties) - .zip(txs) - .map(|((block, difficultly), txs)| { - // Calculate the PoW for the block. - let height = block.height; - let block = PreparedBlock::new_prepped( - block, - rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref), - )?; - - // Check the PoW - check_block_pow(&block.pow_hash, difficultly).map_err(ConsensusError::Block)?; - - // Now setup the txs. - let mut txs = txs - .into_par_iter() - .map(|tx| { - let tx = TransactionVerificationData::new(tx)?; - Ok::<_, ConsensusError>((tx.tx_hash, tx)) - }) - .collect::, _>>()?; - - // Order the txs correctly. - let mut ordered_txs = Vec::with_capacity(txs.len()); - - for tx_hash in &block.block.txs { - let tx = txs - .remove(tx_hash) - .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; - ordered_txs.push(Arc::new(tx)); - } - - Ok((block, ordered_txs)) - }) - .collect::, ExtendedConsensusError>>() - }) - .await?; - - Ok(VerifyBlockResponse::MainChainBatchPrepped(blocks)) -} - /// Verifies a prepared block. async fn verify_main_chain_block( block: Block, - mut txs: HashMap<[u8; 32], TransactionVerificationData>, + txs: HashMap<[u8; 32], TransactionVerificationData>, mut context_svc: C, tx_verifier_svc: TxV, ) -> Result @@ -557,20 +388,11 @@ where .map_err(ConsensusError::Block)?; // Check that the txs included are what we need and that there are not any extra. - - let mut ordered_txs = Vec::with_capacity(txs.len()); - - tracing::debug!("Ordering transactions for block."); - - if !prepped_block.block.txs.is_empty() { - for tx_hash in &prepped_block.block.txs { - let tx = txs - .remove(tx_hash) - .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; - ordered_txs.push(Arc::new(tx)); - } - drop(txs); - } + // TODO: Remove the Arc here + let ordered_txs = pull_ordered_transactions(&prepped_block.block, txs)? + .into_iter() + .map(Arc::new) + .collect(); verify_prepped_main_chain_block( prepped_block, @@ -604,8 +426,7 @@ where } else { let BlockChainContextResponse::Context(checked_context) = context_svc .oneshot(BlockChainContextRequest::GetContext) - .await - .map_err(Into::::into)? + .await? else { panic!("Context service returned wrong response!"); }; diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs new file mode 100644 index 0000000..cf6f213 --- /dev/null +++ b/consensus/src/block/alt_block.rs @@ -0,0 +1,304 @@ +//! Alt Blocks +//! +//! Alt blocks are sanity checked by [`sanity_check_alt_block`], that function will also compute the cumulative +//! difficulty of the alt chain so callers will know if they should re-org to the alt chain. +use std::{collections::HashMap, sync::Arc}; + +use monero_serai::{block::Block, transaction::Input}; +use tower::{Service, ServiceExt}; + +use cuprate_consensus_rules::{ + blocks::{ + check_block_pow, check_block_weight, check_timestamp, randomx_seed_height, BlockError, + }, + miner_tx::MinerTxError, + ConsensusError, +}; +use cuprate_helper::asynch::rayon_spawn_async; +use cuprate_types::{AltBlockInformation, Chain, ChainId, VerifiedTransactionInformation}; + +use crate::{ + block::{free::pull_ordered_transactions, PreparedBlock}, + context::{ + difficulty::DifficultyCache, + rx_vms::RandomXVM, + weight::{self, BlockWeightsCache}, + AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, + }, + transactions::TransactionVerificationData, + BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, + VerifyBlockResponse, +}; + +/// This function sanity checks an alt-block. +/// +/// Returns [`AltBlockInformation`], which contains the cumulative difficulty of the alt chain. +/// +/// This function only checks the block's PoW and its weight. +pub async fn sanity_check_alt_block( + block: Block, + txs: HashMap<[u8; 32], TransactionVerificationData>, + mut context_svc: C, +) -> Result +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send + + 'static, + C::Future: Send + 'static, +{ + // Fetch the alt-chains context cache. + let BlockChainContextResponse::AltChainContextCache(mut alt_context_cache) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::AltChainContextCache { + prev_id: block.header.previous, + _token: AltChainRequestToken, + }) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + // Check if the block's miner input is formed correctly. + let [Input::Gen(height)] = &block.miner_tx.prefix.inputs[..] else { + Err(ConsensusError::Block(BlockError::MinerTxError( + MinerTxError::InputNotOfTypeGen, + )))? + }; + + if *height != alt_context_cache.chain_height { + Err(ConsensusError::Block(BlockError::MinerTxError( + MinerTxError::InputsHeightIncorrect, + )))? + } + + // prep the alt block. + let prepped_block = { + let rx_vm = alt_rx_vm( + alt_context_cache.chain_height, + block.header.major_version, + alt_context_cache.parent_chain, + &mut alt_context_cache, + &mut context_svc, + ) + .await?; + + rayon_spawn_async(move || PreparedBlock::new(block, rx_vm.as_deref())).await? + }; + + // get the difficulty cache for this alt chain. + let difficulty_cache = alt_difficulty_cache( + prepped_block.block.header.previous, + &mut alt_context_cache, + &mut context_svc, + ) + .await?; + + // Check the alt block timestamp is in the correct range. + if let Some(median_timestamp) = + difficulty_cache.median_timestamp(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW.try_into().unwrap()) + { + check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)? + }; + + let next_difficulty = difficulty_cache.next_difficulty(&prepped_block.hf_version); + // make sure the block's PoW is valid for this difficulty. + check_block_pow(&prepped_block.pow_hash, next_difficulty).map_err(ConsensusError::Block)?; + + let cumulative_difficulty = difficulty_cache.cumulative_difficulty() + next_difficulty; + + let ordered_txs = pull_ordered_transactions(&prepped_block.block, txs)?; + + let block_weight = + prepped_block.miner_tx_weight + ordered_txs.iter().map(|tx| tx.tx_weight).sum::(); + + let alt_weight_cache = alt_weight_cache( + prepped_block.block.header.previous, + &mut alt_context_cache, + &mut context_svc, + ) + .await?; + + // Check the block weight is below the limit. + check_block_weight( + block_weight, + alt_weight_cache.median_for_block_reward(&prepped_block.hf_version), + ) + .map_err(ConsensusError::Block)?; + + let long_term_weight = weight::calculate_block_long_term_weight( + &prepped_block.hf_version, + block_weight, + alt_weight_cache.median_long_term_weight(), + ); + + // Get the chainID or generate a new one if this is the first alt block in this alt chain. + let chain_id = *alt_context_cache + .chain_id + .get_or_insert_with(|| ChainId(rand::random())); + + // Create the alt block info. + let block_info = AltBlockInformation { + block_hash: prepped_block.block_hash, + block: prepped_block.block, + block_blob: prepped_block.block_blob, + txs: ordered_txs + .into_iter() + .map(|tx| VerifiedTransactionInformation { + tx_blob: tx.tx_blob, + tx_weight: tx.tx_weight, + fee: tx.fee, + tx_hash: tx.tx_hash, + tx: tx.tx, + }) + .collect(), + pow_hash: prepped_block.pow_hash, + weight: block_weight, + height: alt_context_cache.chain_height, + long_term_weight, + cumulative_difficulty, + chain_id, + }; + + // Add this block to the cache. + alt_context_cache.add_new_block( + block_info.height, + block_info.block_hash, + block_info.weight, + block_info.long_term_weight, + block_info.block.header.timestamp, + ); + + // Add this alt cache back to the context service. + context_svc + .oneshot(BlockChainContextRequest::AddAltChainContextCache { + prev_id: block_info.block.header.previous, + cache: alt_context_cache, + _token: AltChainRequestToken, + }) + .await?; + + Ok(VerifyBlockResponse::AltChain(block_info)) +} + +/// Retrieves the alt RX VM for the chosen block height. +/// +/// If the `hf` is less than 12 (the height RX activates), then [`None`] is returned. +async fn alt_rx_vm( + block_height: u64, + hf: u8, + parent_chain: Chain, + alt_chain_context: &mut AltChainContextCache, + context_svc: C, +) -> Result>, ExtendedConsensusError> +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send, + C::Future: Send + 'static, +{ + if hf < 12 { + return Ok(None); + } + + let seed_height = randomx_seed_height(block_height); + + let cached_vm = match alt_chain_context.cached_rx_vm.take() { + // If the VM is cached and the height is the height we need, we can use this VM. + Some((cached_seed_height, vm)) if seed_height == cached_seed_height => { + (cached_seed_height, vm) + } + // Otherwise we need to make a new VM. + _ => { + let BlockChainContextResponse::AltChainRxVM(vm) = context_svc + .oneshot(BlockChainContextRequest::AltChainRxVM { + height: block_height, + chain: parent_chain, + _token: AltChainRequestToken, + }) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + (seed_height, vm) + } + }; + + Ok(Some( + alt_chain_context.cached_rx_vm.insert(cached_vm).1.clone(), + )) +} + +/// Returns the [`DifficultyCache`] for the alt chain. +async fn alt_difficulty_cache( + prev_id: [u8; 32], + alt_chain_context: &mut AltChainContextCache, + context_svc: C, +) -> Result<&mut DifficultyCache, ExtendedConsensusError> +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send, + C::Future: Send + 'static, +{ + // First look to see if the difficulty cache for this alt chain is already cached. + match &mut alt_chain_context.difficulty_cache { + Some(cache) => Ok(cache), + // Otherwise make a new one. + difficulty_cache => { + let BlockChainContextResponse::AltChainDifficultyCache(cache) = context_svc + .oneshot(BlockChainContextRequest::AltChainDifficultyCache { + prev_id, + _token: AltChainRequestToken, + }) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + Ok(difficulty_cache.insert(cache)) + } + } +} + +/// Returns the [`BlockWeightsCache`] for the alt chain. +async fn alt_weight_cache( + prev_id: [u8; 32], + alt_chain_context: &mut AltChainContextCache, + context_svc: C, +) -> Result<&mut BlockWeightsCache, ExtendedConsensusError> +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send, + C::Future: Send + 'static, +{ + // First look to see if the weight cache for this alt chain is already cached. + match &mut alt_chain_context.weight_cache { + Some(cache) => Ok(cache), + // Otherwise make a new one. + weight_cache => { + let BlockChainContextResponse::AltChainWeightCache(cache) = context_svc + .oneshot(BlockChainContextRequest::AltChainWeightCache { + prev_id, + _token: AltChainRequestToken, + }) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + Ok(weight_cache.insert(cache)) + } + } +} diff --git a/consensus/src/block/batch_prepare.rs b/consensus/src/block/batch_prepare.rs new file mode 100644 index 0000000..64d1ccb --- /dev/null +++ b/consensus/src/block/batch_prepare.rs @@ -0,0 +1,207 @@ +use std::{collections::HashMap, sync::Arc}; + +use monero_serai::{block::Block, transaction::Transaction}; +use rayon::prelude::*; +use tower::{Service, ServiceExt}; +use tracing::instrument; + +use cuprate_consensus_rules::{ + blocks::{check_block_pow, is_randomx_seed_height, randomx_seed_height, BlockError}, + hard_forks::HardForkError, + miner_tx::MinerTxError, + ConsensusError, HardFork, +}; +use cuprate_helper::asynch::rayon_spawn_async; + +use crate::{ + block::{free::pull_ordered_transactions, PreparedBlock, PreparedBlockExPow}, + context::rx_vms::RandomXVM, + transactions::TransactionVerificationData, + BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, + VerifyBlockResponse, +}; + +/// Batch prepares a list of blocks for verification. +#[instrument(level = "debug", name = "batch_prep_blocks", skip_all, fields(amt = blocks.len()))] +pub(crate) async fn batch_prepare_main_chain_block( + blocks: Vec<(Block, Vec)>, + mut context_svc: C, +) -> Result +where + C: Service< + BlockChainContextRequest, + Response = BlockChainContextResponse, + Error = tower::BoxError, + > + Send + + 'static, + C::Future: Send + 'static, +{ + let (blocks, txs): (Vec<_>, Vec<_>) = blocks.into_iter().unzip(); + + tracing::debug!("Calculating block hashes."); + let blocks: Vec = rayon_spawn_async(|| { + blocks + .into_iter() + .map(PreparedBlockExPow::new) + .collect::, _>>() + }) + .await?; + + let Some(last_block) = blocks.last() else { + return Err(ExtendedConsensusError::NoBlocksToVerify); + }; + + // hard-forks cannot be reversed, so the last block will contain the highest hard fork (provided the + // batch is valid). + let top_hf_in_batch = last_block.hf_version; + + // A Vec of (timestamp, HF) for each block to calculate the expected difficulty for each block. + let mut timestamps_hfs = Vec::with_capacity(blocks.len()); + let mut new_rx_vm = None; + + tracing::debug!("Checking blocks follow each other."); + + // For every block make sure they have the correct height and previous ID + for window in blocks.windows(2) { + let block_0 = &window[0]; + let block_1 = &window[1]; + + // Make sure no blocks in the batch have a higher hard fork than the last block. + if block_0.hf_version > top_hf_in_batch { + Err(ConsensusError::Block(BlockError::HardForkError( + HardForkError::VersionIncorrect, + )))?; + } + + if block_0.block_hash != block_1.block.header.previous + || block_0.height != block_1.height - 1 + { + tracing::debug!("Blocks do not follow each other, verification failed."); + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + } + + // Cache any potential RX VM seeds as we may need them for future blocks in the batch. + if is_randomx_seed_height(block_0.height) && top_hf_in_batch >= HardFork::V12 { + new_rx_vm = Some((block_0.height, block_0.block_hash)); + } + + timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version)) + } + + // Get the current blockchain context. + let BlockChainContextResponse::Context(checked_context) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::GetContext) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + // Calculate the expected difficulties for each block in the batch. + let BlockChainContextResponse::BatchDifficulties(difficulties) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::BatchGetDifficulties( + timestamps_hfs, + )) + .await? + else { + panic!("Context service returned wrong response!"); + }; + + let context = checked_context.unchecked_blockchain_context().clone(); + + // Make sure the blocks follow the main chain. + + if context.chain_height != blocks[0].height { + tracing::debug!("Blocks do not follow main chain, verification failed."); + + Err(ConsensusError::Block(BlockError::MinerTxError( + MinerTxError::InputsHeightIncorrect, + )))?; + } + + if context.top_hash != blocks[0].block.header.previous { + tracing::debug!("Blocks do not follow main chain, verification failed."); + + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + } + + let mut rx_vms = if top_hf_in_batch < HardFork::V12 { + HashMap::new() + } else { + let BlockChainContextResponse::RxVms(rx_vms) = context_svc + .ready() + .await? + .call(BlockChainContextRequest::GetCurrentRxVm) + .await? + else { + panic!("Blockchain context service returned wrong response!"); + }; + + rx_vms + }; + + // If we have a RX seed in the batch calculate it. + if let Some((new_vm_height, new_vm_seed)) = new_rx_vm { + tracing::debug!("New randomX seed in batch, initialising VM"); + + let new_vm = rayon_spawn_async(move || { + Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!")) + }) + .await; + + // Give the new VM to the context service, so it can cache it. + context_svc + .oneshot(BlockChainContextRequest::NewRXVM(( + new_vm_seed, + new_vm.clone(), + ))) + .await?; + + rx_vms.insert(new_vm_height, new_vm); + } + + tracing::debug!("Calculating PoW and prepping transaction"); + + let blocks = rayon_spawn_async(move || { + blocks + .into_par_iter() + .zip(difficulties) + .zip(txs) + .map(|((block, difficultly), txs)| { + // Calculate the PoW for the block. + let height = block.height; + let block = PreparedBlock::new_prepped( + block, + rx_vms.get(&randomx_seed_height(height)).map(AsRef::as_ref), + )?; + + // Check the PoW + check_block_pow(&block.pow_hash, difficultly).map_err(ConsensusError::Block)?; + + // Now setup the txs. + let txs = txs + .into_par_iter() + .map(|tx| { + let tx = TransactionVerificationData::new(tx)?; + Ok::<_, ConsensusError>((tx.tx_hash, tx)) + }) + .collect::, _>>()?; + + // Order the txs correctly. + // TODO: Remove the Arc here + let ordered_txs = pull_ordered_transactions(&block.block, txs)? + .into_iter() + .map(Arc::new) + .collect(); + + Ok((block, ordered_txs)) + }) + .collect::, ExtendedConsensusError>>() + }) + .await?; + + Ok(VerifyBlockResponse::MainChainBatchPrepped(blocks)) +} diff --git a/consensus/src/block/free.rs b/consensus/src/block/free.rs new file mode 100644 index 0000000..8a61e80 --- /dev/null +++ b/consensus/src/block/free.rs @@ -0,0 +1,32 @@ +//! Free functions for block verification +use std::collections::HashMap; + +use monero_serai::block::Block; + +use crate::{transactions::TransactionVerificationData, ExtendedConsensusError}; + +/// Returns a list of transactions, pulled from `txs` in the order they are in the [`Block`]. +/// +/// Will error if a tx need is not in `txs` or if `txs` contain more txs than needed. +pub(crate) fn pull_ordered_transactions( + block: &Block, + mut txs: HashMap<[u8; 32], TransactionVerificationData>, +) -> Result, ExtendedConsensusError> { + if block.txs.len() != txs.len() { + return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); + } + + let mut ordered_txs = Vec::with_capacity(txs.len()); + + if !block.txs.is_empty() { + for tx_hash in &block.txs { + let tx = txs + .remove(tx_hash) + .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; + ordered_txs.push(tx); + } + drop(txs); + } + + Ok(ordered_txs) +} diff --git a/consensus/src/context.rs b/consensus/src/context.rs index 0752b8b..fffbe90 100644 --- a/consensus/src/context.rs +++ b/consensus/src/context.rs @@ -27,16 +27,22 @@ pub(crate) mod hardforks; pub(crate) mod rx_vms; pub(crate) mod weight; +mod alt_chains; mod task; mod tokens; +use cuprate_types::Chain; +use difficulty::DifficultyCache; +use rx_vms::RandomXVM; +use weight::BlockWeightsCache; + +pub(crate) use alt_chains::{sealed::AltChainRequestToken, AltChainContextCache}; pub use difficulty::DifficultyCacheConfig; pub use hardforks::HardForkConfig; -use rx_vms::RandomXVM; pub use tokens::*; pub use weight::BlockWeightsCacheConfig; -const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; +pub(crate) const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; /// Config for the context service. pub struct ContextConfig { @@ -233,6 +239,74 @@ pub enum BlockChainContextRequest { NewRXVM(([u8; 32], Arc)), /// A request to add a new block to the cache. Update(NewBlockData), + /// Pop blocks from the cache to the specified height. + PopBlocks { + /// The number of blocks to pop from the top of the chain. + /// + /// # Panics + /// + /// This will panic if the number of blocks will pop the genesis block. + numb_blocks: u64, + }, + /// Clear the alt chain context caches. + ClearAltCache, + //----------------------------------------------------------------------------------------------------------- AltChainRequests + /// A request for an alt chain context cache. + /// + /// This variant is private and is not callable from outside this crate, the block verifier service will + /// handle getting the alt cache. + AltChainContextCache { + /// The previous block field in a [`BlockHeader`](monero_serai::block::BlockHeader). + prev_id: [u8; 32], + /// An internal token to prevent external crates calling this request. + _token: AltChainRequestToken, + }, + /// A request for a difficulty cache of an alternative chin. + /// + /// This variant is private and is not callable from outside this crate, the block verifier service will + /// handle getting the difficulty cache of an alt chain. + AltChainDifficultyCache { + /// The previous block field in a [`BlockHeader`](monero_serai::block::BlockHeader). + prev_id: [u8; 32], + /// An internal token to prevent external crates calling this request. + _token: AltChainRequestToken, + }, + /// A request for a block weight cache of an alternative chin. + /// + /// This variant is private and is not callable from outside this crate, the block verifier service will + /// handle getting the weight cache of an alt chain. + AltChainWeightCache { + /// The previous block field in a [`BlockHeader`](monero_serai::block::BlockHeader). + prev_id: [u8; 32], + /// An internal token to prevent external crates calling this request. + _token: AltChainRequestToken, + }, + /// A request for a RX VM for an alternative chin. + /// + /// Response variant: [`BlockChainContextResponse::AltChainRxVM`]. + /// + /// This variant is private and is not callable from outside this crate, the block verifier service will + /// handle getting the randomX VM of an alt chain. + AltChainRxVM { + /// The height the RandomX VM is needed for. + height: u64, + /// The chain to look in for the seed. + chain: Chain, + /// An internal token to prevent external crates calling this request. + _token: AltChainRequestToken, + }, + /// A request to add an alt chain context cache to the context cache. + /// + /// This variant is private and is not callable from outside this crate, the block verifier service will + /// handle returning the alt cache to the context service. + AddAltChainContextCache { + /// The previous block field in a [`BlockHeader`](monero_serai::block::BlockHeader). + prev_id: [u8; 32], + /// The cache. + cache: Box, + /// An internal token to prevent external crates calling this request. + _token: AltChainRequestToken, + }, } pub enum BlockChainContextResponse { @@ -242,7 +316,15 @@ pub enum BlockChainContextResponse { RxVms(HashMap>), /// A list of difficulties. BatchDifficulties(Vec), - /// Ok response. + /// An alt chain context cache. + AltChainContextCache(Box), + /// A difficulty cache for an alt chain. + AltChainDifficultyCache(DifficultyCache), + /// A randomX VM for an alt chain. + AltChainRxVM(Arc), + /// A weight cache for an alt chain + AltChainWeightCache(BlockWeightsCache), + /// A generic Ok response. Ok, } diff --git a/consensus/src/context/alt_chains.rs b/consensus/src/context/alt_chains.rs new file mode 100644 index 0000000..71af8a1 --- /dev/null +++ b/consensus/src/context/alt_chains.rs @@ -0,0 +1,215 @@ +use std::{collections::HashMap, sync::Arc}; + +use tower::ServiceExt; + +use cuprate_consensus_rules::{blocks::BlockError, ConsensusError}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, ChainId, +}; + +use crate::{ + ExtendedConsensusError, + __private::Database, + context::{difficulty::DifficultyCache, rx_vms::RandomXVM, weight::BlockWeightsCache}, +}; + +pub(crate) mod sealed { + /// A token that should be hard to create from outside this crate. + /// + /// It is currently possible to safely create this from outside this crate, **DO NOT** rely on this + /// as it will be broken once we find a way to completely seal this. + #[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] + pub struct AltChainRequestToken; +} + +/// The context cache of an alternative chain. +#[derive(Debug, Clone)] +pub struct AltChainContextCache { + /// The alt chain weight cache, [`None`] if it has not been built yet. + pub weight_cache: Option, + /// The alt chain difficulty cache, [`None`] if it has not been built yet. + pub difficulty_cache: Option, + + /// A cached RX VM. + pub cached_rx_vm: Option<(u64, Arc)>, + + /// The chain height of the alt chain. + pub chain_height: u64, + /// The top hash of the alt chain. + pub top_hash: [u8; 32], + /// The [`ChainID`] of the alt chain. + pub chain_id: Option, + /// The parent [`Chain`] of this alt chain. + pub parent_chain: Chain, +} + +impl AltChainContextCache { + /// Add a new block to the cache. + pub fn add_new_block( + &mut self, + height: u64, + block_hash: [u8; 32], + block_weight: usize, + long_term_block_weight: usize, + timestamp: u64, + ) { + if let Some(difficulty_cache) = &mut self.difficulty_cache { + difficulty_cache.new_block(height, timestamp, difficulty_cache.cumulative_difficulty()); + } + + if let Some(weight_cache) = &mut self.weight_cache { + weight_cache.new_block(height, block_weight, long_term_block_weight); + } + + self.chain_height += 1; + self.top_hash = block_hash; + } +} + +/// A map of top IDs to alt chains. +pub struct AltChainMap { + alt_cache_map: HashMap<[u8; 32], Box>, +} + +impl AltChainMap { + pub fn new() -> Self { + Self { + alt_cache_map: HashMap::new(), + } + } + + pub fn clear(&mut self) { + self.alt_cache_map.clear(); + } + + /// Add an alt chain cache to the map. + pub fn add_alt_cache(&mut self, prev_id: [u8; 32], alt_cache: Box) { + self.alt_cache_map.insert(prev_id, alt_cache); + } + + /// Attempts to take an [`AltChainContextCache`] from the map, returning [`None`] if no cache is + /// present. + pub async fn get_alt_chain_context( + &mut self, + prev_id: [u8; 32], + database: D, + ) -> Result, ExtendedConsensusError> { + if let Some(cache) = self.alt_cache_map.remove(&prev_id) { + return Ok(cache); + } + + // find the block with hash == prev_id. + let BCResponse::FindBlock(res) = + database.oneshot(BCReadRequest::FindBlock(prev_id)).await? + else { + panic!("Database returned wrong response"); + }; + + let Some((parent_chain, top_height)) = res else { + // Couldn't find prev_id + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + }; + + Ok(Box::new(AltChainContextCache { + weight_cache: None, + difficulty_cache: None, + cached_rx_vm: None, + chain_height: top_height, + top_hash: prev_id, + chain_id: None, + parent_chain, + })) + } +} + +/// Builds a [`DifficultyCache`] for an alt chain. +pub async fn get_alt_chain_difficulty_cache( + prev_id: [u8; 32], + main_chain_difficulty_cache: &DifficultyCache, + mut database: D, +) -> Result { + // find the block with hash == prev_id. + let BCResponse::FindBlock(res) = database + .ready() + .await? + .call(BCReadRequest::FindBlock(prev_id)) + .await? + else { + panic!("Database returned wrong response"); + }; + + let Some((chain, top_height)) = res else { + // Can't find prev_id + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + }; + + Ok(match chain { + Chain::Main => { + // prev_id is in main chain, we can use the fast path and clone the main chain cache. + let mut difficulty_cache = main_chain_difficulty_cache.clone(); + difficulty_cache + .pop_blocks_main_chain( + difficulty_cache.last_accounted_height - top_height, + database, + ) + .await?; + + difficulty_cache + } + Chain::Alt(_) => { + // prev_id is in an alt chain, completely rebuild the cache. + DifficultyCache::init_from_chain_height( + top_height + 1, + main_chain_difficulty_cache.config, + database, + chain, + ) + .await? + } + }) +} + +/// Builds a [`BlockWeightsCache`] for an alt chain. +pub async fn get_alt_chain_weight_cache( + prev_id: [u8; 32], + main_chain_weight_cache: &BlockWeightsCache, + mut database: D, +) -> Result { + // find the block with hash == prev_id. + let BCResponse::FindBlock(res) = database + .ready() + .await? + .call(BCReadRequest::FindBlock(prev_id)) + .await? + else { + panic!("Database returned wrong response"); + }; + + let Some((chain, top_height)) = res else { + // Can't find prev_id + Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + }; + + Ok(match chain { + Chain::Main => { + // prev_id is in main chain, we can use the fast path and clone the main chain cache. + let mut weight_cache = main_chain_weight_cache.clone(); + weight_cache + .pop_blocks_main_chain(weight_cache.tip_height - top_height, database) + .await?; + + weight_cache + } + Chain::Alt(_) => { + // prev_id is in an alt chain, completely rebuild the cache. + BlockWeightsCache::init_from_chain_height( + top_height + 1, + main_chain_weight_cache.config, + database, + chain, + ) + .await? + } + }) +} diff --git a/consensus/src/context/difficulty.rs b/consensus/src/context/difficulty.rs index 9c8321f..b025dfc 100644 --- a/consensus/src/context/difficulty.rs +++ b/consensus/src/context/difficulty.rs @@ -12,7 +12,10 @@ use tower::ServiceExt; use tracing::instrument; use cuprate_helper::num::median; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; use crate::{Database, ExtendedConsensusError, HardFork}; @@ -28,7 +31,7 @@ const DIFFICULTY_LAG: usize = 15; /// Configuration for the difficulty cache. /// -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct DifficultyCacheConfig { pub(crate) window: usize, pub(crate) cut: usize, @@ -68,7 +71,7 @@ impl DifficultyCacheConfig { /// This struct is able to calculate difficulties from blockchain information. /// #[derive(Debug, Clone, Eq, PartialEq)] -pub(crate) struct DifficultyCache { +pub struct DifficultyCache { /// The list of timestamps in the window. /// len <= [`DIFFICULTY_BLOCKS_COUNT`] pub(crate) timestamps: VecDeque, @@ -87,6 +90,7 @@ impl DifficultyCache { chain_height: u64, config: DifficultyCacheConfig, database: D, + chain: Chain, ) -> Result { tracing::info!("Initializing difficulty cache this may take a while."); @@ -98,7 +102,9 @@ impl DifficultyCache { } let (timestamps, cumulative_difficulties) = - get_blocks_in_pow_info(database.clone(), block_start..chain_height).await?; + get_blocks_in_pow_info(database.clone(), block_start..chain_height, chain).await?; + + debug_assert_eq!(timestamps.len() as u64, chain_height - block_start); tracing::info!( "Current chain height: {}, accounting for {} blocks timestamps", @@ -116,6 +122,70 @@ impl DifficultyCache { Ok(diff) } + /// Pop some blocks from the top of the cache. + /// + /// The cache will be returned to the state it would have been in `numb_blocks` ago. + /// + /// # Invariant + /// + /// This _must_ only be used on a main-chain cache. + #[instrument(name = "pop_blocks_diff_cache", skip_all, fields(numb_blocks = numb_blocks))] + pub async fn pop_blocks_main_chain( + &mut self, + numb_blocks: u64, + database: D, + ) -> Result<(), ExtendedConsensusError> { + let Some(retained_blocks) = self + .timestamps + .len() + .checked_sub(usize::try_from(numb_blocks).unwrap()) + else { + // More blocks to pop than we have in the cache, so just restart a new cache. + *self = Self::init_from_chain_height( + self.last_accounted_height - numb_blocks + 1, + self.config, + database, + Chain::Main, + ) + .await?; + + return Ok(()); + }; + + let current_chain_height = self.last_accounted_height + 1; + + let mut new_start_height = current_chain_height + .saturating_sub(self.config.total_block_count()) + .saturating_sub(numb_blocks); + + // skip the genesis block. + if new_start_height == 0 { + new_start_height = 1; + } + + let (mut timestamps, mut cumulative_difficulties) = get_blocks_in_pow_info( + database, + new_start_height + // current_chain_height - self.timestamps.len() blocks are already in the cache. + ..(current_chain_height - u64::try_from(self.timestamps.len()).unwrap()), + Chain::Main, + ) + .await?; + + self.timestamps.drain(retained_blocks..); + self.cumulative_difficulties.drain(retained_blocks..); + timestamps.append(&mut self.timestamps); + cumulative_difficulties.append(&mut self.cumulative_difficulties); + + self.timestamps = timestamps; + self.cumulative_difficulties = cumulative_difficulties; + self.last_accounted_height -= numb_blocks; + + assert_eq!(self.timestamps.len(), self.cumulative_difficulties.len()); + + Ok(()) + } + /// Add a new block to the difficulty cache. pub fn new_block(&mut self, height: u64, timestamp: u64, cumulative_difficulty: u128) { assert_eq!(self.last_accounted_height + 1, height); @@ -200,7 +270,7 @@ impl DifficultyCache { if self.last_accounted_height + 1 == u64::try_from(numb_blocks).unwrap() { // if the chain height is equal to `numb_blocks` add the genesis block. // otherwise if the chain height is less than `numb_blocks` None is returned - // and if its more than it would be excluded from calculations. + // and if it's more it would be excluded from calculations. let mut timestamps = self.timestamps.clone(); // all genesis blocks have a timestamp of 0. // https://cuprate.github.io/monero-book/consensus_rules/genesis_block.html @@ -299,11 +369,15 @@ fn get_window_start_and_end( async fn get_blocks_in_pow_info( database: D, block_heights: Range, + chain: Chain, ) -> Result<(VecDeque, VecDeque), ExtendedConsensusError> { tracing::info!("Getting blocks timestamps"); let BCResponse::BlockExtendedHeaderInRange(ext_header) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights)) + .oneshot(BCReadRequest::BlockExtendedHeaderInRange( + block_heights, + chain, + )) .await? else { panic!("Database sent incorrect response"); diff --git a/consensus/src/context/hardforks.rs b/consensus/src/context/hardforks.rs index 92182c7..2243350 100644 --- a/consensus/src/context/hardforks.rs +++ b/consensus/src/context/hardforks.rs @@ -4,7 +4,10 @@ use tower::ServiceExt; use tracing::instrument; use cuprate_consensus_rules::{HFVotes, HFsInfo, HardFork}; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; use crate::{Database, ExtendedConsensusError}; @@ -15,7 +18,7 @@ const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a /// Configuration for hard-forks. /// -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct HardForkConfig { /// The network we are on. pub(crate) info: HFsInfo, @@ -50,7 +53,7 @@ impl HardForkConfig { } /// A struct that keeps track of the current hard-fork and current votes. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct HardForkState { /// The current active hard-fork. pub(crate) current_hardfork: HardFork, @@ -117,6 +120,50 @@ impl HardForkState { Ok(hfs) } + /// Pop some blocks from the top of the cache. + /// + /// The cache will be returned to the state it would have been in `numb_blocks` ago. + /// + /// # Invariant + /// + /// This _must_ only be used on a main-chain cache. + pub async fn pop_blocks_main_chain( + &mut self, + numb_blocks: u64, + database: D, + ) -> Result<(), ExtendedConsensusError> { + let Some(retained_blocks) = self.votes.total_votes().checked_sub(self.config.window) else { + *self = Self::init_from_chain_height( + self.last_height + 1 - numb_blocks, + self.config, + database, + ) + .await?; + + return Ok(()); + }; + + let current_chain_height = self.last_height + 1; + + let oldest_votes = get_votes_in_range( + database, + current_chain_height + .saturating_sub(self.config.window) + .saturating_sub(numb_blocks) + ..current_chain_height + .saturating_sub(numb_blocks) + .saturating_sub(retained_blocks), + usize::try_from(numb_blocks).unwrap(), + ) + .await?; + + self.votes + .reverse_blocks(usize::try_from(numb_blocks).unwrap(), oldest_votes); + self.last_height -= numb_blocks; + + Ok(()) + } + /// Add a new block to the cache. pub fn new_block(&mut self, vote: HardFork, height: u64) { // We don't _need_ to take in `height` but it's for safety, so we don't silently loose track @@ -168,7 +215,10 @@ async fn get_votes_in_range( let mut votes = HFVotes::new(window_size); let BCResponse::BlockExtendedHeaderInRange(vote_list) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange(block_heights)) + .oneshot(BCReadRequest::BlockExtendedHeaderInRange( + block_heights, + Chain::Main, + )) .await? else { panic!("Database sent incorrect response!"); diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index 08ecb95..3154648 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -15,12 +15,16 @@ use thread_local::ThreadLocal; use tower::ServiceExt; use tracing::instrument; +use cuprate_consensus_rules::blocks::randomx_seed_height; use cuprate_consensus_rules::{ blocks::{is_randomx_seed_height, RandomX, RX_SEEDHASH_EPOCH_BLOCKS}, HardFork, }; use cuprate_helper::asynch::rayon_spawn_async; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; use crate::{Database, ExtendedConsensusError}; @@ -124,7 +128,39 @@ impl RandomXVMCache { self.cached_vm.replace(vm); } - /// Get the RandomX VMs. + /// Creates a RX VM for an alt chain, looking at the main chain RX VMs to see if we can use one + /// of them first. + pub async fn get_alt_vm( + &mut self, + height: u64, + chain: Chain, + database: D, + ) -> Result, ExtendedConsensusError> { + let seed_height = randomx_seed_height(height); + + let BCResponse::BlockHash(seed_hash) = database + .oneshot(BCReadRequest::BlockHash(seed_height, chain)) + .await? + else { + panic!("Database returned wrong response!"); + }; + + for (vm_main_chain_height, vm_seed_hash) in &self.seeds { + if vm_seed_hash == &seed_hash { + let Some(vm) = self.vms.get(vm_main_chain_height) else { + break; + }; + + return Ok(vm.clone()); + } + } + + let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVM::new(&seed_hash).unwrap())).await; + + Ok(alt_vm) + } + + /// Get the main-chain RandomX VMs. pub async fn get_vms(&mut self) -> HashMap> { match self.seeds.len().checked_sub(self.vms.len()) { // No difference in the amount of seeds to VMs. @@ -176,6 +212,12 @@ impl RandomXVMCache { self.vms.clone() } + /// Removes all the RandomX VMs above the `new_height`. + pub fn pop_blocks_main_chain(&mut self, new_height: u64) { + self.seeds.retain(|(height, _)| *height < new_height); + self.vms.retain(|height, _| *height < new_height); + } + /// Add a new block to the VM cache. /// /// hash is the block hash not the blocks PoW hash. @@ -231,8 +273,10 @@ async fn get_block_hashes( for height in heights { let db = database.clone(); fut.push_back(async move { - let BCResponse::BlockHash(hash) = - db.clone().oneshot(BCReadRequest::BlockHash(height)).await? + let BCResponse::BlockHash(hash) = db + .clone() + .oneshot(BCReadRequest::BlockHash(height, Chain::Main)) + .await? else { panic!("Database sent incorrect response!"); }; diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index 108922d..1fa68a2 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -9,14 +9,20 @@ use tower::ServiceExt; use tracing::Instrument; use cuprate_consensus_rules::blocks::ContextToVerifyBlock; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; - -use super::{ - difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest, - BlockChainContextResponse, ContextConfig, RawBlockChainContext, ValidityToken, - BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; + +use crate::{ + context::{ + alt_chains::{get_alt_chain_difficulty_cache, get_alt_chain_weight_cache, AltChainMap}, + difficulty, hardforks, rx_vms, weight, BlockChainContext, BlockChainContextRequest, + BlockChainContextResponse, ContextConfig, RawBlockChainContext, ValidityToken, + BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, + }, + Database, ExtendedConsensusError, }; -use crate::{Database, ExtendedConsensusError}; /// A request from the context service to the context task. pub(super) struct ContextTaskRequest { @@ -29,7 +35,7 @@ pub(super) struct ContextTaskRequest { } /// The Context task that keeps the blockchain context and handles requests. -pub struct ContextTask { +pub struct ContextTask { /// A token used to invalidate previous contexts when a new /// block is added to the chain. current_validity_token: ValidityToken, @@ -43,25 +49,25 @@ pub struct ContextTask { /// The hard-fork state cache. hardfork_state: hardforks::HardForkState, + alt_chain_cache_map: AltChainMap, + /// The current chain height. chain_height: u64, /// The top block hash. top_block_hash: [u8; 32], /// The total amount of coins generated. already_generated_coins: u64, + + database: D, } -impl ContextTask { +impl ContextTask { /// Initialize the [`ContextTask`], this will need to pull a lot of data from the database so may take a /// while to complete. - pub async fn init_context( + pub async fn init_context( cfg: ContextConfig, mut database: D, - ) -> Result - where - D: Database + Clone + Send + Sync + 'static, - D::Future: Send + 'static, - { + ) -> Result { let ContextConfig { difficulty_cfg, weights_config, @@ -82,7 +88,7 @@ impl ContextTask { let BCResponse::GeneratedCoins(already_generated_coins) = database .ready() .await? - .call(BCReadRequest::GeneratedCoins) + .call(BCReadRequest::GeneratedCoins(chain_height - 1)) .await? else { panic!("Database sent incorrect response!"); @@ -95,14 +101,24 @@ impl ContextTask { let db = database.clone(); let difficulty_cache_handle = tokio::spawn(async move { - difficulty::DifficultyCache::init_from_chain_height(chain_height, difficulty_cfg, db) - .await + difficulty::DifficultyCache::init_from_chain_height( + chain_height, + difficulty_cfg, + db, + Chain::Main, + ) + .await }); let db = database.clone(); let weight_cache_handle = tokio::spawn(async move { - weight::BlockWeightsCache::init_from_chain_height(chain_height, weights_config, db) - .await + weight::BlockWeightsCache::init_from_chain_height( + chain_height, + weights_config, + db, + Chain::Main, + ) + .await }); // Wait for the hardfork state to finish first as we need it to start the randomX VM cache. @@ -120,9 +136,11 @@ impl ContextTask { weight_cache: weight_cache_handle.await.unwrap()?, rx_vm_cache: rx_seed_handle.await.unwrap()?, hardfork_state, + alt_chain_cache_map: AltChainMap::new(), chain_height, already_generated_coins, top_block_hash, + database, }; Ok(context_svc) @@ -211,6 +229,98 @@ impl ContextTask { BlockChainContextResponse::Ok } + BlockChainContextRequest::PopBlocks { numb_blocks } => { + assert!(numb_blocks < self.chain_height); + + self.difficulty_cache + .pop_blocks_main_chain(numb_blocks, self.database.clone()) + .await?; + self.weight_cache + .pop_blocks_main_chain(numb_blocks, self.database.clone()) + .await?; + self.rx_vm_cache + .pop_blocks_main_chain(self.chain_height - numb_blocks - 1); + self.hardfork_state + .pop_blocks_main_chain(numb_blocks, self.database.clone()) + .await?; + + self.alt_chain_cache_map.clear(); + + self.chain_height -= numb_blocks; + + let BCResponse::GeneratedCoins(already_generated_coins) = self + .database + .ready() + .await? + .call(BCReadRequest::GeneratedCoins(self.chain_height - 1)) + .await? + else { + panic!("Database sent incorrect response!"); + }; + + let BCResponse::BlockHash(top_block_hash) = self + .database + .ready() + .await? + .call(BCReadRequest::BlockHash(self.chain_height - 1, Chain::Main)) + .await? + else { + panic!("Database returned incorrect response!"); + }; + + self.already_generated_coins = already_generated_coins; + self.top_block_hash = top_block_hash; + + std::mem::replace(&mut self.current_validity_token, ValidityToken::new()) + .set_data_invalid(); + + BlockChainContextResponse::Ok + } + BlockChainContextRequest::ClearAltCache => { + self.alt_chain_cache_map.clear(); + + BlockChainContextResponse::Ok + } + BlockChainContextRequest::AltChainContextCache { prev_id, _token } => { + BlockChainContextResponse::AltChainContextCache( + self.alt_chain_cache_map + .get_alt_chain_context(prev_id, &mut self.database) + .await?, + ) + } + BlockChainContextRequest::AltChainDifficultyCache { prev_id, _token } => { + BlockChainContextResponse::AltChainDifficultyCache( + get_alt_chain_difficulty_cache( + prev_id, + &self.difficulty_cache, + self.database.clone(), + ) + .await?, + ) + } + BlockChainContextRequest::AltChainWeightCache { prev_id, _token } => { + BlockChainContextResponse::AltChainWeightCache( + get_alt_chain_weight_cache(prev_id, &self.weight_cache, self.database.clone()) + .await?, + ) + } + BlockChainContextRequest::AltChainRxVM { + height, + chain, + _token, + } => BlockChainContextResponse::AltChainRxVM( + self.rx_vm_cache + .get_alt_vm(height, chain, &mut self.database) + .await?, + ), + BlockChainContextRequest::AddAltChainContextCache { + prev_id, + cache, + _token, + } => { + self.alt_chain_cache_map.add_alt_cache(prev_id, cache); + BlockChainContextResponse::Ok + } }) } diff --git a/consensus/src/context/weight.rs b/consensus/src/context/weight.rs index 2511c59..1084086 100644 --- a/consensus/src/context/weight.rs +++ b/consensus/src/context/weight.rs @@ -8,17 +8,18 @@ //! use std::{ cmp::{max, min}, - collections::VecDeque, ops::Range, }; -use rayon::prelude::*; use tower::ServiceExt; use tracing::instrument; use cuprate_consensus_rules::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5}; -use cuprate_helper::{asynch::rayon_spawn_async, num::median}; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_helper::{asynch::rayon_spawn_async, num::RollingMedian}; +use cuprate_types::{ + blockchain::{BCReadRequest, BCResponse}, + Chain, +}; use crate::{Database, ExtendedConsensusError, HardFork}; @@ -29,7 +30,7 @@ const LONG_TERM_WINDOW: u64 = 100000; /// Configuration for the block weight cache. /// -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct BlockWeightsCacheConfig { short_term_window: u64, long_term_window: u64, @@ -58,25 +59,17 @@ impl BlockWeightsCacheConfig { /// /// These calculations require a lot of data from the database so by caching /// this data it reduces the load on the database. -#[derive(Clone)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct BlockWeightsCache { /// The short term block weights. - short_term_block_weights: VecDeque, + short_term_block_weights: RollingMedian, /// The long term block weights. - long_term_weights: VecDeque, - - /// The short term block weights sorted so we don't have to sort them every time we need - /// the median. - cached_sorted_long_term_weights: Vec, - /// The long term block weights sorted so we don't have to sort them every time we need - /// the median. - cached_sorted_short_term_weights: Vec, + long_term_weights: RollingMedian, /// The height of the top block. - tip_height: u64, + pub(crate) tip_height: u64, - /// The block weight config. - config: BlockWeightsCacheConfig, + pub(crate) config: BlockWeightsCacheConfig, } impl BlockWeightsCache { @@ -86,45 +79,109 @@ impl BlockWeightsCache { chain_height: u64, config: BlockWeightsCacheConfig, database: D, + chain: Chain, ) -> Result { tracing::info!("Initializing weight cache this may take a while."); let long_term_weights = get_long_term_weight_in_range( chain_height.saturating_sub(config.long_term_window)..chain_height, database.clone(), + chain, ) .await?; let short_term_block_weights = get_blocks_weight_in_range( chain_height.saturating_sub(config.short_term_window)..chain_height, database, + chain, ) .await?; tracing::info!("Initialized block weight cache, chain-height: {:?}, long term weights length: {:?}, short term weights length: {:?}", chain_height, long_term_weights.len(), short_term_block_weights.len()); - let mut cloned_short_term_weights = short_term_block_weights.clone(); - let mut cloned_long_term_weights = long_term_weights.clone(); Ok(BlockWeightsCache { - short_term_block_weights: short_term_block_weights.into(), - long_term_weights: long_term_weights.into(), - - cached_sorted_long_term_weights: rayon_spawn_async(|| { - cloned_long_term_weights.par_sort_unstable(); - cloned_long_term_weights + short_term_block_weights: rayon_spawn_async(move || { + RollingMedian::from_vec( + short_term_block_weights, + usize::try_from(config.short_term_window).unwrap(), + ) }) .await, - cached_sorted_short_term_weights: rayon_spawn_async(|| { - cloned_short_term_weights.par_sort_unstable(); - cloned_short_term_weights + long_term_weights: rayon_spawn_async(move || { + RollingMedian::from_vec( + long_term_weights, + usize::try_from(config.long_term_window).unwrap(), + ) }) .await, - tip_height: chain_height - 1, config, }) } + /// Pop some blocks from the top of the cache. + /// + /// The cache will be returned to the state it would have been in `numb_blocks` ago. + #[instrument(name = "pop_blocks_weight_cache", skip_all, fields(numb_blocks = numb_blocks))] + pub async fn pop_blocks_main_chain( + &mut self, + numb_blocks: u64, + database: D, + ) -> Result<(), ExtendedConsensusError> { + if self.long_term_weights.window_len() <= usize::try_from(numb_blocks).unwrap() { + // More blocks to pop than we have in the cache, so just restart a new cache. + *self = Self::init_from_chain_height( + self.tip_height - numb_blocks + 1, + self.config, + database, + Chain::Main, + ) + .await?; + + return Ok(()); + } + + let chain_height = self.tip_height + 1; + + let new_long_term_start_height = chain_height + .saturating_sub(self.config.long_term_window) + .saturating_sub(numb_blocks); + + let old_long_term_weights = get_long_term_weight_in_range( + new_long_term_start_height + // current_chain_height - self.long_term_weights.len() blocks are already in the cache. + ..(chain_height - u64::try_from(self.long_term_weights.window_len()).unwrap()), + database.clone(), + Chain::Main, + ) + .await?; + + let new_short_term_start_height = chain_height + .saturating_sub(self.config.short_term_window) + .saturating_sub(numb_blocks); + + let old_short_term_weights = get_blocks_weight_in_range( + new_short_term_start_height + // current_chain_height - self.long_term_weights.len() blocks are already in the cache. + ..(chain_height - u64::try_from(self.short_term_block_weights.window_len()).unwrap()), + database, + Chain::Main + ) + .await?; + + for _ in 0..numb_blocks { + self.short_term_block_weights.pop_back(); + self.long_term_weights.pop_back(); + } + + self.long_term_weights.append_front(old_long_term_weights); + self.short_term_block_weights + .append_front(old_short_term_weights); + self.tip_height -= numb_blocks; + + Ok(()) + } + /// Add a new block to the cache. /// /// The block_height **MUST** be one more than the last height the cache has @@ -139,74 +196,19 @@ impl BlockWeightsCache { long_term_weight ); - // add the new block to the `long_term_weights` list and the sorted `cached_sorted_long_term_weights` list. - self.long_term_weights.push_back(long_term_weight); - match self - .cached_sorted_long_term_weights - .binary_search(&long_term_weight) - { - Ok(idx) | Err(idx) => self - .cached_sorted_long_term_weights - .insert(idx, long_term_weight), - } + self.long_term_weights.push(long_term_weight); - // If the list now has too many entries remove the oldest. - if u64::try_from(self.long_term_weights.len()).unwrap() > self.config.long_term_window { - let val = self - .long_term_weights - .pop_front() - .expect("long term window can't be negative"); - - match self.cached_sorted_long_term_weights.binary_search(&val) { - Ok(idx) => self.cached_sorted_long_term_weights.remove(idx), - Err(_) => panic!("Long term cache has incorrect values!"), - }; - } - - // add the block to the short_term_block_weights and the sorted cached_sorted_short_term_weights list. - self.short_term_block_weights.push_back(block_weight); - match self - .cached_sorted_short_term_weights - .binary_search(&block_weight) - { - Ok(idx) | Err(idx) => self - .cached_sorted_short_term_weights - .insert(idx, block_weight), - } - - // If there are now too many entries remove the oldest. - if u64::try_from(self.short_term_block_weights.len()).unwrap() - > self.config.short_term_window - { - let val = self - .short_term_block_weights - .pop_front() - .expect("short term window can't be negative"); - - match self.cached_sorted_short_term_weights.binary_search(&val) { - Ok(idx) => self.cached_sorted_short_term_weights.remove(idx), - Err(_) => panic!("Short term cache has incorrect values"), - }; - } - - debug_assert_eq!( - self.cached_sorted_long_term_weights.len(), - self.long_term_weights.len() - ); - debug_assert_eq!( - self.cached_sorted_short_term_weights.len(), - self.short_term_block_weights.len() - ); + self.short_term_block_weights.push(block_weight); } /// Returns the median long term weight over the last [`LONG_TERM_WINDOW`] blocks, or custom amount of blocks in the config. pub fn median_long_term_weight(&self) -> usize { - median(&self.cached_sorted_long_term_weights) + self.long_term_weights.median() } /// Returns the median weight over the last [`SHORT_TERM_WINDOW`] blocks, or custom amount of blocks in the config. pub fn median_short_term_weight(&self) -> usize { - median(&self.cached_sorted_short_term_weights) + self.short_term_block_weights.median() } /// Returns the effective median weight, used for block reward calculations and to calculate @@ -290,11 +292,12 @@ pub fn calculate_block_long_term_weight( async fn get_blocks_weight_in_range( range: Range, database: D, + chain: Chain, ) -> Result, ExtendedConsensusError> { tracing::info!("getting block weights."); let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range)) + .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range, chain)) .await? else { panic!("Database sent incorrect response!") @@ -311,11 +314,12 @@ async fn get_blocks_weight_in_range( async fn get_long_term_weight_in_range( range: Range, database: D, + chain: Chain, ) -> Result, ExtendedConsensusError> { tracing::info!("getting block long term weights."); let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range)) + .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range, chain)) .await? else { panic!("Database sent incorrect response!") diff --git a/consensus/src/tests/context/difficulty.rs b/consensus/src/tests/context/difficulty.rs index c9886f3..b59f62e 100644 --- a/consensus/src/tests/context/difficulty.rs +++ b/consensus/src/tests/context/difficulty.rs @@ -1,15 +1,15 @@ use std::collections::VecDeque; -use proptest::collection::size_range; +use proptest::collection::{size_range, vec}; use proptest::{prelude::*, prop_assert_eq, prop_compose, proptest}; -use cuprate_helper::num::median; - use crate::{ context::difficulty::*, tests::{context::data::DIF_3000000_3002000, mock_db::*}, HardFork, }; +use cuprate_helper::num::median; +use cuprate_types::Chain; const TEST_WINDOW: usize = 72; const TEST_CUT: usize = 6; @@ -26,9 +26,13 @@ async fn first_3_blocks_fixed_difficulty() -> Result<(), tower::BoxError> { let genesis = DummyBlockExtendedHeader::default().with_difficulty_info(0, 1); db_builder.add_block(genesis); - let mut difficulty_cache = - DifficultyCache::init_from_chain_height(1, TEST_DIFFICULTY_CONFIG, db_builder.finish(None)) - .await?; + let mut difficulty_cache = DifficultyCache::init_from_chain_height( + 1, + TEST_DIFFICULTY_CONFIG, + db_builder.finish(None), + Chain::Main, + ) + .await?; for height in 1..3 { assert_eq!(difficulty_cache.next_difficulty(&HardFork::V1), 1); @@ -42,9 +46,13 @@ async fn genesis_block_skipped() -> Result<(), tower::BoxError> { let mut db_builder = DummyDatabaseBuilder::default(); let genesis = DummyBlockExtendedHeader::default().with_difficulty_info(0, 1); db_builder.add_block(genesis); - let diff_cache = - DifficultyCache::init_from_chain_height(1, TEST_DIFFICULTY_CONFIG, db_builder.finish(None)) - .await?; + let diff_cache = DifficultyCache::init_from_chain_height( + 1, + TEST_DIFFICULTY_CONFIG, + db_builder.finish(None), + Chain::Main, + ) + .await?; assert!(diff_cache.cumulative_difficulties.is_empty()); assert!(diff_cache.timestamps.is_empty()); Ok(()) @@ -66,8 +74,9 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { let mut diff_cache = DifficultyCache::init_from_chain_height( 3_000_720, - cfg.clone(), + cfg, db_builder.finish(Some(3_000_720)), + Chain::Main, ) .await?; @@ -208,4 +217,52 @@ proptest! { } } + + #[test] + fn pop_blocks_below_total_blocks( + mut database in arb_dummy_database(20), + new_blocks in vec(any::<(u64, u128)>(), 0..500) + ) { + tokio_test::block_on(async move { + let old_cache = DifficultyCache::init_from_chain_height(19, TEST_DIFFICULTY_CONFIG, database.clone(), Chain::Main).await.unwrap(); + + let blocks_to_pop = new_blocks.len(); + + let mut new_cache = old_cache.clone(); + for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty)); + new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); + } + + new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?; + + prop_assert_eq!(new_cache, old_cache); + + Ok::<_, TestCaseError>(()) + })?; + } + + #[test] + fn pop_blocks_above_total_blocks( + mut database in arb_dummy_database(2000), + new_blocks in vec(any::<(u64, u128)>(), 0..5_000) + ) { + tokio_test::block_on(async move { + let old_cache = DifficultyCache::init_from_chain_height(1999, TEST_DIFFICULTY_CONFIG, database.clone(), Chain::Main).await.unwrap(); + + let blocks_to_pop = new_blocks.len(); + + let mut new_cache = old_cache.clone(); + for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty)); + new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); + } + + new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?; + + prop_assert_eq!(new_cache, old_cache); + + Ok::<_, TestCaseError>(()) + })?; + } } diff --git a/consensus/src/tests/context/hardforks.rs b/consensus/src/tests/context/hardforks.rs index f6f0f23..d003b3c 100644 --- a/consensus/src/tests/context/hardforks.rs +++ b/consensus/src/tests/context/hardforks.rs @@ -1,3 +1,5 @@ +use proptest::{collection::vec, prelude::*}; + use cuprate_consensus_rules::hard_forks::{HFInfo, HFsInfo, HardFork, NUMB_OF_HARD_FORKS}; use crate::{ @@ -82,3 +84,44 @@ async fn hf_v15_v16_correct() { assert_eq!(state.current_hardfork, HardFork::V16); } + +proptest! { + fn pop_blocks( + hfs in vec(any::(), 0..100), + extra_hfs in vec(any::(), 0..100) + ) { + tokio_test::block_on(async move { + let numb_hfs = hfs.len() as u64; + let numb_pop_blocks = extra_hfs.len() as u64; + + let mut db_builder = DummyDatabaseBuilder::default(); + + for hf in hfs { + db_builder.add_block( + DummyBlockExtendedHeader::default().with_hard_fork_info(hf, hf), + ); + } + + let db = db_builder.finish(Some(numb_hfs as usize)); + + let mut state = HardForkState::init_from_chain_height( + numb_hfs, + TEST_HARD_FORK_CONFIG, + db.clone(), + ) + .await?; + + let state_clone = state.clone(); + + for (i, hf) in extra_hfs.into_iter().enumerate() { + state.new_block(hf, state.last_height + u64::try_from(i).unwrap() + 1); + } + + state.pop_blocks_main_chain(numb_pop_blocks, db).await?; + + prop_assert_eq!(state_clone, state); + + Ok::<(), TestCaseError>(()) + })?; + } +} diff --git a/consensus/src/tests/context/weight.rs b/consensus/src/tests/context/weight.rs index 902d446..83c8bb9 100644 --- a/consensus/src/tests/context/weight.rs +++ b/consensus/src/tests/context/weight.rs @@ -6,6 +6,7 @@ use crate::{ tests::{context::data::BW_2850000_3050000, mock_db::*}, HardFork, }; +use cuprate_types::Chain; pub const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = BlockWeightsCacheConfig::new(100, 5000); @@ -21,6 +22,7 @@ async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> { 5000, TEST_WEIGHT_CONFIG, db_builder.finish(None), + Chain::Main, ) .await?; assert_eq!(weight_cache.median_long_term_weight(), 2500); @@ -37,6 +39,74 @@ async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> { Ok(()) } +#[tokio::test] +async fn pop_blocks_greater_than_window() -> Result<(), tower::BoxError> { + let mut db_builder = DummyDatabaseBuilder::default(); + for weight in 1..=5000 { + let block = DummyBlockExtendedHeader::default().with_weight_into(weight, weight); + db_builder.add_block(block); + } + + let database = db_builder.finish(None); + + let mut weight_cache = BlockWeightsCache::init_from_chain_height( + 5000, + TEST_WEIGHT_CONFIG, + database.clone(), + Chain::Main, + ) + .await?; + + let old_cache = weight_cache.clone(); + + weight_cache.new_block(5000, 0, 0); + weight_cache.new_block(5001, 0, 0); + weight_cache.new_block(5002, 0, 0); + + weight_cache + .pop_blocks_main_chain(3, database) + .await + .unwrap(); + + assert_eq!(weight_cache, old_cache); + + Ok(()) +} + +#[tokio::test] +async fn pop_blocks_less_than_window() -> Result<(), tower::BoxError> { + let mut db_builder = DummyDatabaseBuilder::default(); + for weight in 1..=500 { + let block = DummyBlockExtendedHeader::default().with_weight_into(weight, weight); + db_builder.add_block(block); + } + + let database = db_builder.finish(None); + + let mut weight_cache = BlockWeightsCache::init_from_chain_height( + 500, + TEST_WEIGHT_CONFIG, + database.clone(), + Chain::Main, + ) + .await?; + + let old_cache = weight_cache.clone(); + + weight_cache.new_block(500, 0, 0); + weight_cache.new_block(501, 0, 0); + weight_cache.new_block(502, 0, 0); + + weight_cache + .pop_blocks_main_chain(3, database) + .await + .unwrap(); + + assert_eq!(weight_cache, old_cache); + + Ok(()) +} + #[tokio::test] async fn weight_cache_calculates_correct_median() -> Result<(), tower::BoxError> { let mut db_builder = DummyDatabaseBuilder::default(); @@ -44,9 +114,13 @@ async fn weight_cache_calculates_correct_median() -> Result<(), tower::BoxError> let block = DummyBlockExtendedHeader::default().with_weight_into(0, 0); db_builder.add_block(block); - let mut weight_cache = - BlockWeightsCache::init_from_chain_height(1, TEST_WEIGHT_CONFIG, db_builder.finish(None)) - .await?; + let mut weight_cache = BlockWeightsCache::init_from_chain_height( + 1, + TEST_WEIGHT_CONFIG, + db_builder.finish(None), + Chain::Main, + ) + .await?; for height in 1..=100 { weight_cache.new_block(height as u64, height, height); @@ -76,6 +150,7 @@ async fn calc_bw_ltw_2850000_3050000() { 2950000, TEST_WEIGHT_CONFIG, db_builder.finish(Some(2950000)), + Chain::Main, ) .await .unwrap(); diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index d1c6255..c4fd75d 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -127,6 +127,12 @@ pub struct DummyDatabase { dummy_height: Option, } +impl DummyDatabase { + pub fn add_block(&mut self, block: DummyBlockExtendedHeader) { + self.blocks.write().unwrap().push(block) + } +} + impl Service for DummyDatabase { type Response = BCResponse; type Error = BoxError; @@ -161,12 +167,12 @@ impl Service for DummyDatabase { .ok_or("block not in database!")?, ) } - BCReadRequest::BlockHash(id) => { + BCReadRequest::BlockHash(id, _) => { let mut hash = [0; 32]; hash[0..8].copy_from_slice(&id.to_le_bytes()); BCResponse::BlockHash(hash) } - BCReadRequest::BlockExtendedHeaderInRange(range) => { + BCReadRequest::BlockExtendedHeaderInRange(range, _) => { let mut end = usize::try_from(range.end).unwrap(); let mut start = usize::try_from(range.start).unwrap(); @@ -200,7 +206,7 @@ impl Service for DummyDatabase { BCResponse::ChainHeight(height, top_hash) } - BCReadRequest::GeneratedCoins => BCResponse::GeneratedCoins(0), + BCReadRequest::GeneratedCoins(_) => BCResponse::GeneratedCoins(0), _ => unimplemented!("the context svc should not need these requests!"), }) } diff --git a/helper/src/num.rs b/helper/src/num.rs index cc1feb1..f90357e 100644 --- a/helper/src/num.rs +++ b/helper/src/num.rs @@ -8,6 +8,9 @@ use core::{ ops::{Add, Div, Mul, Sub}, }; +#[cfg(feature = "std")] +mod rolling_median; + //---------------------------------------------------------------------------------------------------- Types // INVARIANT: must be private. // Protects against outside-crate implementations. @@ -15,6 +18,9 @@ mod private { pub trait Sealed: Copy + PartialOrd + core::fmt::Display {} } +#[cfg(feature = "std")] +pub use rolling_median::RollingMedian; + /// Non-floating point numbers /// /// This trait is sealed and is only implemented on: diff --git a/helper/src/num/rolling_median.rs b/helper/src/num/rolling_median.rs new file mode 100644 index 0000000..2babda2 --- /dev/null +++ b/helper/src/num/rolling_median.rs @@ -0,0 +1,150 @@ +use std::{ + collections::VecDeque, + ops::{Add, Div, Mul, Sub}, +}; + +use crate::num::median; + +/// A rolling median type. +/// +/// This keeps track of a window of items and allows calculating the [`RollingMedian::median`] of them. +/// +/// Example: +/// ```rust +/// # use cuprate_helper::num::RollingMedian; +/// let mut rolling_median = RollingMedian::new(2); +/// +/// rolling_median.push(1); +/// assert_eq!(rolling_median.median(), 1); +/// assert_eq!(rolling_median.window_len(), 1); +/// +/// rolling_median.push(3); +/// assert_eq!(rolling_median.median(), 2); +/// assert_eq!(rolling_median.window_len(), 2); +/// +/// rolling_median.push(5); +/// assert_eq!(rolling_median.median(), 4); +/// assert_eq!(rolling_median.window_len(), 2); +/// ``` +/// +// TODO: a more efficient structure is probably possible. +#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone)] +pub struct RollingMedian { + /// The window of items, in order of insertion. + window: VecDeque, + /// The window of items, sorted. + sorted_window: Vec, + + /// The target window length. + target_window: usize, +} + +impl RollingMedian +where + T: Ord + + PartialOrd + + Add + + Sub + + Div + + Mul + + Copy + + From, +{ + /// Creates a new [`RollingMedian`] with a certain target window length. + /// + /// `target_window` is the maximum amount of items to keep in the rolling window. + pub fn new(target_window: usize) -> Self { + Self { + window: VecDeque::with_capacity(target_window), + sorted_window: Vec::with_capacity(target_window), + target_window, + } + } + + /// Creates a new [`RollingMedian`] from a [`Vec`] with a certain target window length. + /// + /// `target_window` is the maximum amount of items to keep in the rolling window. + /// + /// # Panics + /// This function panics if `vec.len() > target_window`. + pub fn from_vec(vec: Vec, target_window: usize) -> Self { + assert!(vec.len() <= target_window); + + let mut sorted_window = vec.clone(); + sorted_window.sort_unstable(); + + Self { + window: vec.into(), + sorted_window, + target_window, + } + } + + /// Pops the front of the window, i.e. the oldest item. + /// + /// This is often not needed as [`RollingMedian::push`] will handle popping old values when they fall + /// out of the window. + pub fn pop_front(&mut self) { + if let Some(item) = self.window.pop_front() { + match self.sorted_window.binary_search(&item) { + Ok(idx) => { + self.sorted_window.remove(idx); + } + Err(_) => panic!("Value expected to be in sorted_window was not there"), + } + } + } + + /// Pops the back of the window, i.e. the youngest item. + pub fn pop_back(&mut self) { + if let Some(item) = self.window.pop_back() { + match self.sorted_window.binary_search(&item) { + Ok(idx) => { + self.sorted_window.remove(idx); + } + Err(_) => panic!("Value expected to be in sorted_window was not there"), + } + } + } + + /// Push an item to the _back_ of the window. + /// + /// This will pop the oldest item in the window if the target length has been exceeded. + pub fn push(&mut self, item: T) { + if self.window.len() >= self.target_window { + self.pop_front(); + } + + self.window.push_back(item); + match self.sorted_window.binary_search(&item) { + Ok(idx) | Err(idx) => self.sorted_window.insert(idx, item), + } + } + + /// Append some values to the _front_ of the window. + /// + /// These new values will be the oldest items in the window. The order of the inputted items will be + /// kept, i.e. the first item in the [`Vec`] will be the oldest item in the queue. + pub fn append_front(&mut self, items: Vec) { + for item in items.into_iter().rev() { + self.window.push_front(item); + match self.sorted_window.binary_search(&item) { + Ok(idx) | Err(idx) => self.sorted_window.insert(idx, item), + } + + if self.window.len() > self.target_window { + self.pop_back(); + } + } + } + + /// Returns the number of items currently in the [`RollingMedian`]. + pub fn window_len(&self) -> usize { + self.window.len() + } + + /// Calculates the median of the values currently in the [`RollingMedian`]. + pub fn median(&self) -> T { + median(&self.sorted_window) + } +} diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index 1d9d10b..bf2d8e7 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -63,7 +63,7 @@ //! use hex_literal::hex; //! use tower::{Service, ServiceExt}; //! -//! use cuprate_types::blockchain::{BCReadRequest, BCWriteRequest, BCResponse}; +//! use cuprate_types::{blockchain::{BCReadRequest, BCWriteRequest, BCResponse}, Chain}; //! use cuprate_test_utils::data::block_v16_tx0; //! //! use cuprate_blockchain::{ @@ -85,7 +85,7 @@ //! //! // Prepare a request to write block. //! let mut block = block_v16_tx0().clone(); -//! # block.height = 0 as u64; // must be 0th height or panic in `add_block()` +//! # block.height = 0_u64; // must be 0th height or panic in `add_block()` //! let request = BCWriteRequest::WriteBlock(block); //! //! // Send the request. @@ -100,7 +100,7 @@ //! //! // Now, let's try getting the block hash //! // of the block we just wrote. -//! let request = BCReadRequest::BlockHash(0); +//! let request = BCReadRequest::BlockHash(0, Chain::Main); //! let response_channel = read_handle.ready().await?.call(request); //! let response = response_channel.await?; //! assert_eq!( diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index 3f0b263..a5d51f1 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -17,7 +17,7 @@ use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; use cuprate_helper::{asynch::InfallibleOneshotReceiver, map::combine_low_high_bits_to_u128}; use cuprate_types::{ blockchain::{BCReadRequest, BCResponse}, - ExtendedBlockHeader, OutputOnChain, + Chain, ExtendedBlockHeader, OutputOnChain, }; use crate::{ @@ -206,11 +206,14 @@ fn map_request( let response = match request { R::BlockExtendedHeader(block) => block_extended_header(env, block), - R::BlockHash(block) => block_hash(env, block), + R::BlockHash(block, chain) => block_hash(env, block, chain), + R::FindBlock(_) => todo!("Add alt blocks to DB"), R::FilterUnknownHashes(hashes) => filter_unknown_hashes(env, hashes), - R::BlockExtendedHeaderInRange(range) => block_extended_header_in_range(env, range), + R::BlockExtendedHeaderInRange(range, chain) => { + block_extended_header_in_range(env, range, chain) + } R::ChainHeight => chain_height(env), - R::GeneratedCoins => generated_coins(env), + R::GeneratedCoins(height) => generated_coins(env, height), R::Outputs(map) => outputs(env, map), R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec), R::KeyImagesSpent(set) => key_images_spent(env, set), @@ -312,15 +315,18 @@ fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> Respon /// [`BCReadRequest::BlockHash`]. #[inline] -fn block_hash(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult { +fn block_hash(env: &ConcreteEnv, block_height: BlockHeight, chain: Chain) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro()?; let table_block_infos = env_inner.open_db_ro::(&tx_ro)?; - Ok(BCResponse::BlockHash( - get_block_info(&block_height, &table_block_infos)?.block_hash, - )) + let block_hash = match chain { + Chain::Main => get_block_info(&block_height, &table_block_infos)?.block_hash, + Chain::Alt(_) => todo!("Add alt blocks to DB"), + }; + + Ok(BCResponse::BlockHash(block_hash)) } /// [`BCReadRequest::FilterUnknownHashes`]. @@ -356,6 +362,7 @@ fn filter_unknown_hashes(env: &ConcreteEnv, mut hashes: HashSet) -> R fn block_extended_header_in_range( env: &ConcreteEnv, range: std::ops::Range, + chain: Chain, ) -> ResponseResult { // Prepare tx/tables in `ThreadLocal`. let env_inner = env.env_inner(); @@ -363,14 +370,17 @@ fn block_extended_header_in_range( let tables = thread_local(env); // Collect results using `rayon`. - let vec = range - .into_par_iter() - .map(|block_height| { - let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; - let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); - get_block_extended_header_from_height(&block_height, tables) - }) - .collect::, RuntimeError>>()?; + let vec = match chain { + Chain::Main => range + .into_par_iter() + .map(|block_height| { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + get_block_extended_header_from_height(&block_height, tables) + }) + .collect::, RuntimeError>>()?, + Chain::Alt(_) => todo!("Add alt blocks to DB"), + }; Ok(BCResponse::BlockExtendedHeaderInRange(vec)) } @@ -393,17 +403,14 @@ fn chain_height(env: &ConcreteEnv) -> ResponseResult { /// [`BCReadRequest::GeneratedCoins`]. #[inline] -fn generated_coins(env: &ConcreteEnv) -> ResponseResult { +fn generated_coins(env: &ConcreteEnv, height: u64) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro()?; - let table_block_heights = env_inner.open_db_ro::(&tx_ro)?; let table_block_infos = env_inner.open_db_ro::(&tx_ro)?; - let top_height = top_block_height(&table_block_heights)?; - Ok(BCResponse::GeneratedCoins(cumulative_generated_coins( - &top_height, + &height, &table_block_infos, )?)) } diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index 4f3fbe4..c00e32f 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -19,7 +19,7 @@ use cuprate_database::{ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, Run use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; use cuprate_types::{ blockchain::{BCReadRequest, BCResponse, BCWriteRequest}, - OutputOnChain, VerifiedBlockInformation, + Chain, OutputOnChain, VerifiedBlockInformation, }; use crate::{ @@ -138,10 +138,15 @@ async fn test_template( Err(RuntimeError::KeyNotFound) }; + let test_chain_height = chain_height(tables.block_heights()).unwrap(); + let chain_height = { - let height = chain_height(tables.block_heights()).unwrap(); - let block_info = get_block_info(&height.saturating_sub(1), tables.block_infos()).unwrap(); - Ok(BCResponse::ChainHeight(height, block_info.block_hash)) + let block_info = + get_block_info(&test_chain_height.saturating_sub(1), tables.block_infos()).unwrap(); + Ok(BCResponse::ChainHeight( + test_chain_height, + block_info.block_hash, + )) }; let cumulative_generated_coins = Ok(BCResponse::GeneratedCoins(cumulative_generated_coins)); @@ -182,12 +187,21 @@ async fn test_template( BCReadRequest::BlockExtendedHeader(1), extended_block_header_1, ), - (BCReadRequest::BlockHash(0), block_hash_0), - (BCReadRequest::BlockHash(1), block_hash_1), - (BCReadRequest::BlockExtendedHeaderInRange(0..1), range_0_1), - (BCReadRequest::BlockExtendedHeaderInRange(0..2), range_0_2), + (BCReadRequest::BlockHash(0, Chain::Main), block_hash_0), + (BCReadRequest::BlockHash(1, Chain::Main), block_hash_1), + ( + BCReadRequest::BlockExtendedHeaderInRange(0..1, Chain::Main), + range_0_1, + ), + ( + BCReadRequest::BlockExtendedHeaderInRange(0..2, Chain::Main), + range_0_2, + ), (BCReadRequest::ChainHeight, chain_height), - (BCReadRequest::GeneratedCoins, cumulative_generated_coins), + ( + BCReadRequest::GeneratedCoins(test_chain_height), + cumulative_generated_coins, + ), (BCReadRequest::NumberOutputsWithAmount(num_req), num_resp), (BCReadRequest::KeyImagesSpent(ki_req), ki_resp), ] { diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index 4a280be..1ff06c2 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -9,7 +9,7 @@ use std::{ ops::Range, }; -use crate::types::{ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}; +use crate::types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}; //---------------------------------------------------------------------------------------------------- ReadRequest /// A read request to the blockchain database. @@ -29,8 +29,13 @@ pub enum BCReadRequest { /// Request a block's hash. /// - /// The input is the block's height. - BlockHash(u64), + /// The input is the block's height and the chain it is on. + BlockHash(u64, Chain), + + /// Request to check if we have a block and which [`Chain`] it is on. + /// + /// The input is the block's hash. + FindBlock([u8; 32]), /// Removes the block hashes that are not in the _main_ chain. /// @@ -40,15 +45,15 @@ pub enum BCReadRequest { /// Request a range of block extended headers. /// /// The input is a range of block heights. - BlockExtendedHeaderInRange(Range), + BlockExtendedHeaderInRange(Range, Chain), /// Request the current chain height. /// /// Note that this is not the top-block height. ChainHeight, - /// Request the total amount of generated coins (atomic units) so far. - GeneratedCoins, + /// Request the total amount of generated coins (atomic units) at this height. + GeneratedCoins(u64), /// Request data for multiple outputs. /// @@ -129,6 +134,11 @@ pub enum BCResponse { /// Inner value is the hash of the requested block. BlockHash([u8; 32]), + /// Response to [`BCReadRequest::FindBlock`]. + /// + /// Inner value is the chain and height of the block if found. + FindBlock(Option<(Chain, u64)>), + /// Response to [`BCReadRequest::FilterUnknownHashes`]. /// /// Inner value is the list of hashes that were in the main chain. @@ -146,7 +156,7 @@ pub enum BCResponse { /// Response to [`BCReadRequest::GeneratedCoins`]. /// - /// Inner value is the total amount of generated coins so far, in atomic units. + /// Inner value is the total amount of generated coins up to and including the chosen height, in atomic units. GeneratedCoins(u64), /// Response to [`BCReadRequest::Outputs`]. diff --git a/types/src/lib.rs b/types/src/lib.rs index 1cdb9d5..bcf6a45 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -84,7 +84,8 @@ mod types; pub use block_complete_entry::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs}; pub use types::{ - ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation, VerifiedTransactionInformation, + AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, OutputOnChain, + VerifiedBlockInformation, VerifiedTransactionInformation, }; //---------------------------------------------------------------------------------------------------- Feature-gated diff --git a/types/src/types.rs b/types/src/types.rs index 76ffd57..db31507 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -38,7 +38,8 @@ pub struct ExtendedBlockHeader { //---------------------------------------------------------------------------------------------------- VerifiedTransactionInformation /// Verified information of a transaction. /// -/// This represents a transaction in a valid block. +/// - If this is in a [`VerifiedBlockInformation`] this represents a valid transaction +/// - If this is in an [`AltBlockInformation`] this represents a potentially valid transaction #[derive(Clone, Debug, PartialEq, Eq)] pub struct VerifiedTransactionInformation { /// The transaction itself. @@ -91,6 +92,53 @@ pub struct VerifiedBlockInformation { pub cumulative_difficulty: u128, } +//---------------------------------------------------------------------------------------------------- ChainID +/// A unique ID for an alt chain. +/// +/// The inner value is meaningless. +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] +pub struct ChainId(pub u64); + +//---------------------------------------------------------------------------------------------------- Chain +/// An identifier for a chain. +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] +pub enum Chain { + /// The main chain. + Main, + /// An alt chain. + Alt(ChainId), +} + +//---------------------------------------------------------------------------------------------------- AltBlockInformation +/// A block on an alternative chain. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct AltBlockInformation { + /// The block itself. + pub block: Block, + /// The serialized byte form of [`Self::block`]. + /// + /// [`Block::serialize`]. + pub block_blob: Vec, + /// All the transactions in the block, excluding the [`Block::miner_tx`]. + pub txs: Vec, + /// The block's hash. + /// + /// [`Block::hash`]. + pub block_hash: [u8; 32], + /// The block's proof-of-work hash. + pub pow_hash: [u8; 32], + /// The block's height. + pub height: u64, + /// The adjusted block size, in bytes. + pub weight: usize, + /// The long term block weight, which is the weight factored in with previous block weights. + pub long_term_weight: usize, + /// The cumulative difficulty of all blocks up until and including this block. + pub cumulative_difficulty: u128, + /// The [`ChainId`] of the chain this alt block is on. + pub chain_id: ChainId, +} + //---------------------------------------------------------------------------------------------------- OutputOnChain /// An already existing transaction output. #[derive(Clone, Copy, Debug, PartialEq, Eq)] From 86d1f408d8a8834a0e3ba7ba7d80a97f5448bbc2 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 30 Jul 2024 19:12:09 +0000 Subject: [PATCH 026/104] books: fix typo (#240) fix typo --- books/protocol/src/p2p_network/levin/admin.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/books/protocol/src/p2p_network/levin/admin.md b/books/protocol/src/p2p_network/levin/admin.md index 6f2b716..a718646 100644 --- a/books/protocol/src/p2p_network/levin/admin.md +++ b/books/protocol/src/p2p_network/levin/admin.md @@ -67,7 +67,7 @@ ID: `1007`[^support-flags] #### Request [^sf-req] { #support-flags-request } -No data is serialized for a ping request. +No data is serialized for a support flags request. #### Response [^sf-res] { #support-flags-response } From dced4ed7ecd6f5a93b0102f3bde6bf13ad5aff4d Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Tue, 30 Jul 2024 16:53:27 -0400 Subject: [PATCH 027/104] architecture-book: add `Crates` section (#239) * add `cuprate-crates.md` * fix links * Update books/architecture/src/appendix/crates.md Co-authored-by: Boog900 --------- Co-authored-by: Boog900 --- books/architecture/src/SUMMARY.md | 2 +- .../src/appendix/crate-documentation.md | 4 -- books/architecture/src/appendix/crates.md | 61 +++++++++++++++++++ books/architecture/src/intro.md | 4 -- 4 files changed, 62 insertions(+), 9 deletions(-) delete mode 100644 books/architecture/src/appendix/crate-documentation.md create mode 100644 books/architecture/src/appendix/crates.md delete mode 100644 books/architecture/src/intro.md diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index 74ecda6..3a8b351 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -117,8 +117,8 @@ --- - [⚪️ Appendix](appendix/intro.md) + - [🟢 Crates](appendix/crates.md) - [🔴 Contributing](appendix/contributing.md) - - [🔴 Crate documentation](appendix/crate-documentation.md) - [🔴 Build targets](appendix/build-targets.md) - [🔴 Protocol book](appendix/protocol-book.md) - [⚪️ User book](appendix/user-book.md) \ No newline at end of file diff --git a/books/architecture/src/appendix/crate-documentation.md b/books/architecture/src/appendix/crate-documentation.md deleted file mode 100644 index 0f4d96d..0000000 --- a/books/architecture/src/appendix/crate-documentation.md +++ /dev/null @@ -1,4 +0,0 @@ -# Crate documentation -```bash -cargo doc --package $CUPRATE_CRATE -``` \ No newline at end of file diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md new file mode 100644 index 0000000..224e678 --- /dev/null +++ b/books/architecture/src/appendix/crates.md @@ -0,0 +1,61 @@ +# Crates +This is an index of all of Cuprate's in-house crates it uses and maintains. + +They are categorized into groups. + +Crate documentation for each crate can be found by clicking the crate name or by visiting . Documentation can also be built manually by running this at the root of the `cuprate` repository: +```bash +cargo doc --package $CRATE +``` +For example, this will generate and open `cuprate-blockchain` documentation: +```bash +cargo doc --open --package cuprate-blockchain +``` + +## Consensus +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-consensus`](https://doc.cuprate.org/cuprate_consensus) | [`consensus/`](https://github.com/Cuprate/cuprate/tree/main/consensus) | TODO +| [`cuprate-consensus-rules`](https://doc.cuprate.org/cuprate_consensus_rules) | [`consensus/rules/`](https://github.com/Cuprate/cuprate/tree/main/consensus-rules) | TODO +| [`cuprate-fast-sync`](https://doc.cuprate.org/cuprate_fast_sync) | [`consensus/fast-sync/`](https://github.com/Cuprate/cuprate/tree/main/consensus/fast-sync) | Fast block synchronization + +## Networking +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-epee-encoding`](https://doc.cuprate.org/cuprate_epee_encoding) | [`net/epee-encoding/`](https://github.com/Cuprate/cuprate/tree/main/net/epee-encoding) | Epee (de)serialization +| [`cuprate-fixed-bytes`](https://doc.cuprate.org/cuprate_fixed_bytes) | [`net/fixed-bytes/`](https://github.com/Cuprate/cuprate/tree/main/net/fixed-bytes) | Fixed byte containers backed by `byte::Byte` +| [`cuprate-levin`](https://doc.cuprate.org/cuprate_levin) | [`net/levin/`](https://github.com/Cuprate/cuprate/tree/main/net/levin) | Levin bucket protocol implementation +| [`cuprate-wire`](https://doc.cuprate.org/cuprate_wire) | [`net/wire/`](https://github.com/Cuprate/cuprate/tree/main/net/wire) | TODO + +## P2P +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-address-book`](https://doc.cuprate.org/cuprate_address_book) | [`p2p/address-book/`](https://github.com/Cuprate/cuprate/tree/main/p2p/address-book) | TODO +| [`cuprate-async-buffer`](https://doc.cuprate.org/cuprate_async_buffer) | [`p2p/async-buffer/`](https://github.com/Cuprate/cuprate/tree/main/p2p/async-buffer) | A bounded SPSC, FIFO, asynchronous buffer that supports arbitrary weights for values +| [`cuprate-dandelion-tower`](https://doc.cuprate.org/cuprate_dandelion_tower) | [`p2p/dandelion-tower/`](https://github.com/Cuprate/cuprate/tree/main/p2p/dandelion-tower) | TODO +| [`cuprate-p2p`](https://doc.cuprate.org/cuprate_p2p) | [`p2p/p2p/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p) | TODO +| [`cuprate-p2p-core`](https://doc.cuprate.org/cuprate_p2p_core) | [`p2p/p2p-core/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p-core) | TODO + +## Storage +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-blockchain`](https://doc.cuprate.org/cuprate_blockchain) | [`storage/blockchain/`](https://github.com/Cuprate/cuprate/tree/main/storage/blockchain) | Blockchain database built on-top of `cuprate-database` & `cuprate-database-service` +| [`cuprate-database`](https://doc.cuprate.org/cuprate_database) | [`storage/database/`](https://github.com/Cuprate/cuprate/tree/main/storage/database) | Pure database abstraction +| [`cuprate-database-service`](https://doc.cuprate.org/cuprate_database_service) | [`storage/database-service/`](https://github.com/Cuprate/cuprate/tree/main/storage/database-service) | `tower::Service` + thread-pool abstraction built on-top of `cuprate-database` +| [`cuprate-txpool`](https://doc.cuprate.org/cuprate_txpool) | [`storage/txpool/`](https://github.com/Cuprate/cuprate/tree/main/storage/txpool) | Transaction pool database built on-top of `cuprate-database` & `cuprate-database-service` + +## RPC +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-json-rpc`](https://doc.cuprate.org/cuprate_json_rpc) | [`rpc/json-rpc/`](https://github.com/Cuprate/cuprate/tree/main/rpc/json-rpc) | JSON-RPC 2.0 implementation +| [`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types) | [`rpc/types/`](https://github.com/Cuprate/cuprate/tree/main/rpc/types) | Monero RPC types and traits +| [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing + +## 1-off crates +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-cryptonight`](https://doc.cuprate.org/cuprate_cryptonight) | [`cryptonight/`](https://github.com/Cuprate/cuprate/tree/main/cryptonight) | CryptoNight hash functions +| [`cuprate-pruning`](https://doc.cuprate.org/cuprate_pruning) | [`pruning/`](https://github.com/Cuprate/cuprate/tree/main/pruning) | Monero pruning logic/types +| [`cuprate-helper`](https://doc.cuprate.org/cuprate_helper) | [`helper/`](https://github.com/Cuprate/cuprate/tree/main/helper) | Kitchen-sink helper crate for Cuprate +| [`cuprate-test-utils`](https://doc.cuprate.org/cuprate_test_utils) | [`test-utils/`](https://github.com/Cuprate/cuprate/tree/main/test-utils) | Testing utilities for Cuprate +| [`cuprate-types`](https://doc.cuprate.org/cuprate_types) | [`types/`](https://github.com/Cuprate/cuprate/tree/main/types) | Shared types across Cuprate diff --git a/books/architecture/src/intro.md b/books/architecture/src/intro.md deleted file mode 100644 index c708d61..0000000 --- a/books/architecture/src/intro.md +++ /dev/null @@ -1,4 +0,0 @@ -# Systems -Cuprate is made up of multiple distinct internal systems that work together. - -This section provides informal specifications and implementation details about each. \ No newline at end of file From bd375eae40acfad7c8d0205bb10afd0b78e424d2 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 1 Aug 2024 17:04:22 -0400 Subject: [PATCH 028/104] rpc-types: add traits and `enum` requests/responses (#241) apply diff --- rpc/types/README.md | 8 ++ rpc/types/src/bin.rs | 67 ++++++++++ rpc/types/src/json.rs | 257 ++++++++++++++++++++++++++++++++++---- rpc/types/src/lib.rs | 2 + rpc/types/src/macros.rs | 93 +++++++++++--- rpc/types/src/other.rs | 238 +++++++++++++++++++++++++++++++---- rpc/types/src/rpc_call.rs | 96 ++++++++++++++ 7 files changed, 700 insertions(+), 61 deletions(-) create mode 100644 rpc/types/src/rpc_call.rs diff --git a/rpc/types/README.md b/rpc/types/README.md index 566cca7..b5a4f65 100644 --- a/rpc/types/README.md +++ b/rpc/types/README.md @@ -7,6 +7,8 @@ This crate ports the types used in Monero's RPC interface, including: - Mixed types - Other commonly used RPC types +It also includes some traits for these types. + # Modules This crate's types are split in the following manner: @@ -94,6 +96,12 @@ The invariants that can be relied upon: - Types in [`bin`] will implement `epee` correctly - Misc types will implement `serde/epee` correctly as needed +# Requests and responses +For `enum`s that encapsulate all request/response types, see: +- [`crate::json::JsonRpcRequest`] & [`crate::json::JsonRpcResponse`] +- [`crate::bin::BinRequest`] & [`crate::bin::BinResponse`] +- [`crate::other::OtherRequest`] & [`crate::other::OtherResponse`] + # Feature flags List of feature flags for `cuprate-rpc-types`. diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index c801c69..278e535 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -28,6 +28,7 @@ use crate::{ HardforkEntry, HistogramEntry, OutKeyBin, OutputDistributionData, Peer, PoolInfoExtent, PoolTxInfo, SetBan, Span, Status, TxBacklogEntry, }, + rpc_call::{RpcCall, RpcCallValue}, }; //---------------------------------------------------------------------------------------------------- Definitions @@ -393,6 +394,72 @@ impl EpeeObject for GetBlocksResponse { } } +//---------------------------------------------------------------------------------------------------- Request +/// Binary requests. +/// +/// This enum contains all [`crate::bin`] requests. +/// +/// See also: [`BinResponse`]. +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[cfg_attr(feature = "serde", serde(untagged))] +#[allow(missing_docs)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum BinRequest { + GetBlocks(GetBlocksRequest), + GetBlocksByHeight(GetBlocksByHeightRequest), + GetHashes(GetHashesRequest), + GetOutputIndexes(GetOutputIndexesRequest), + GetOuts(GetOutsRequest), + GetTransactionPoolHashes(GetTransactionPoolHashesRequest), + GetOutputDistribution(crate::json::GetOutputDistributionRequest), +} + +impl RpcCallValue for BinRequest { + fn is_restricted(&self) -> bool { + match self { + Self::GetBlocks(x) => x.is_restricted(), + Self::GetBlocksByHeight(x) => x.is_restricted(), + Self::GetHashes(x) => x.is_restricted(), + Self::GetOutputIndexes(x) => x.is_restricted(), + Self::GetOuts(x) => x.is_restricted(), + Self::GetTransactionPoolHashes(x) => x.is_restricted(), + Self::GetOutputDistribution(x) => x.is_restricted(), + } + } + + fn is_empty(&self) -> bool { + match self { + Self::GetBlocks(x) => x.is_empty(), + Self::GetBlocksByHeight(x) => x.is_empty(), + Self::GetHashes(x) => x.is_empty(), + Self::GetOutputIndexes(x) => x.is_empty(), + Self::GetOuts(x) => x.is_empty(), + Self::GetTransactionPoolHashes(x) => x.is_empty(), + Self::GetOutputDistribution(x) => x.is_empty(), + } + } +} + +//---------------------------------------------------------------------------------------------------- Response +/// Binary responses. +/// +/// This enum contains all [`crate::bin`] responses. +/// +/// See also: [`BinRequest`]. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[cfg_attr(feature = "serde", serde(untagged))] +#[allow(missing_docs)] +pub enum BinResponse { + GetBlocks(GetBlocksResponse), + GetBlocksByHeight(GetBlocksByHeightResponse), + GetHashes(GetHashesResponse), + GetOutputIndexes(GetOutputIndexesResponse), + GetOuts(GetOutsResponse), + GetTransactionPoolHashes(GetTransactionPoolHashesResponse), + GetOutputDistribution(crate::json::GetOutputDistributionResponse), +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index dd2e648..4971061 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -3,6 +3,9 @@ //! All types are originally defined in [`rpc/core_rpc_server_commands_defs.h`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h). //---------------------------------------------------------------------------------------------------- Import +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + use crate::{ base::{AccessResponseBase, ResponseBase}, defaults::{ @@ -16,6 +19,7 @@ use crate::{ GetMinerDataTxBacklogEntry, HardforkEntry, HistogramEntry, OutputDistributionData, SetBan, Span, Status, SyncInfoPeer, TxBacklogEntry, }, + rpc_call::RpcCallValue, }; //---------------------------------------------------------------------------------------------------- Macro @@ -93,7 +97,17 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 943..=994, // The base type name. - GetBlockTemplate, + // + // After the type name, 2 optional idents are allowed: + // - `restricted` + // - `empty` + // + // These have to be within `()` and will affect the + // [`crate::RpcCall`] implementation on the request type. + // + // This type is not either restricted or empty so nothing is + // here, but the correct syntax is shown in a comment below: + GetBlockTemplate /* (restricted, empty) */, // The request type. // @@ -218,7 +232,7 @@ define_request_and_response! { get_block_count, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 919..=933, - GetBlockCount, + GetBlockCount (empty), // There are no request fields specified, // this will cause the macro to generate a @@ -300,7 +314,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1130..=1161, - GenerateBlocks, + GenerateBlocks (restricted), #[doc = serde_doc_test!( GENERATE_BLOCKS_REQUEST => GenerateBlocksRequest { @@ -633,7 +647,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1734..=1754, - GetConnections, + GetConnections (restricted, empty), Request {}, @@ -708,7 +722,7 @@ define_request_and_response! { get_info, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 693..=789, - GetInfo, + GetInfo (empty), Request {}, #[doc = serde_doc_test!( @@ -802,7 +816,7 @@ define_request_and_response! { hard_fork_info, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1958..=1995, - HardForkInfo, + HardForkInfo (empty), Request {}, #[doc = serde_doc_test!( @@ -834,7 +848,8 @@ define_request_and_response! { set_bans, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2032..=2067, - SetBans, + + SetBans (restricted), #[doc = serde_doc_test!( SET_BANS_REQUEST => SetBansRequest { @@ -862,7 +877,7 @@ define_request_and_response! { get_bans, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1997..=2030, - GetBans, + GetBans (restricted, empty), Request {}, #[doc = serde_doc_test!( @@ -891,7 +906,8 @@ define_request_and_response! { banned, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2069..=2094, - Banned, + + Banned (restricted), #[doc = serde_doc_test!( BANNED_REQUEST => BannedRequest { @@ -920,7 +936,8 @@ define_request_and_response! { flush_txpool, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2096..=2116, - FlushTransactionPool, + + FlushTransactionPool (restricted), #[doc = serde_doc_test!( FLUSH_TRANSACTION_POOL_REQUEST => FlushTransactionPoolRequest { @@ -986,7 +1003,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2213..=2248, - GetCoinbaseTxSum, + GetCoinbaseTxSum (restricted), #[doc = serde_doc_test!( GET_COINBASE_TX_SUM_REQUEST => GetCoinbaseTxSumRequest { @@ -1025,7 +1042,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2170..=2211, - GetVersion, + GetVersion (empty), Request {}, #[doc = serde_doc_test!( @@ -1116,7 +1133,7 @@ define_request_and_response! { get_fee_estimate, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2250..=2277, - GetFeeEstimate, + GetFeeEstimate (empty), Request {}, #[doc = serde_doc_test!( @@ -1138,7 +1155,7 @@ define_request_and_response! { get_alternate_chains, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2279..=2310, - GetAlternateChains, + GetAlternateChains (restricted, empty), Request {}, #[doc = serde_doc_test!( @@ -1178,7 +1195,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2361..=2381, - RelayTx, + RelayTx (restricted), #[doc = serde_doc_test!( RELAY_TX_REQUEST => RelayTxRequest { @@ -1205,7 +1222,8 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2383..=2443, - SyncInfo, + SyncInfo (restricted, empty), + Request {}, #[doc = serde_doc_test!( @@ -1294,7 +1312,7 @@ define_request_and_response! { get_txpool_backlog, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1637..=1664, - GetTransactionPoolBacklog, + GetTransactionPoolBacklog (empty), Request {}, // TODO: enable test after binary string impl. @@ -1361,7 +1379,7 @@ define_request_and_response! { get_miner_data, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 996..=1044, - GetMinerData, + GetMinerData (empty), Request {}, #[doc = serde_doc_test!( @@ -1405,7 +1423,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2747..=2772, - PruneBlockchain, + PruneBlockchain (restricted), #[derive(Copy)] #[doc = serde_doc_test!( @@ -1435,7 +1453,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1046..=1066, - CalcPow, + CalcPow (restricted), #[doc = serde_doc_test!( CALC_POW_REQUEST => CalcPowRequest { @@ -1469,7 +1487,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2774..=2796, - FlushCache, + FlushCache (restricted), #[derive(Copy)] #[doc = serde_doc_test!( @@ -1534,6 +1552,203 @@ define_request_and_response! { } } +define_request_and_response! { + UNDOCUMENTED_METHOD, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 2798..=2823, + + GetTxIdsLoose, + + Request { + txid_template: String, + num_matching_bits: u32, + }, + ResponseBase { + txids: Vec, + } +} + +//---------------------------------------------------------------------------------------------------- Request +/// JSON-RPC requests. +/// +/// This enum contains all [`crate::json`] requests. +/// +/// See also: [`JsonRpcResponse`]. +/// +/// TODO: document and test (de)serialization behavior after figuring out `method/params`. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[cfg_attr( + feature = "serde", + serde(rename_all = "snake_case", tag = "method", content = "params") +)] +#[allow(missing_docs)] +pub enum JsonRpcRequest { + GetBlockCount(GetBlockCountRequest), + OnGetBlockHash(OnGetBlockHashRequest), + SubmitBlock(SubmitBlockRequest), + GenerateBlocks(GenerateBlocksRequest), + GetLastBlockHeader(GetLastBlockHeaderRequest), + GetBlockHeaderByHash(GetBlockHeaderByHashRequest), + GetBlockHeaderByHeight(GetBlockHeaderByHeightRequest), + GetBlockHeadersRange(GetBlockHeadersRangeRequest), + GetBlock(GetBlockRequest), + GetConnections(GetConnectionsRequest), + GetInfo(GetInfoRequest), + HardForkInfo(HardForkInfoRequest), + SetBans(SetBansRequest), + GetBans(GetBansRequest), + Banned(BannedRequest), + FlushTransactionPool(FlushTransactionPoolRequest), + GetOutputHistogram(GetOutputHistogramRequest), + GetCoinbaseTxSum(GetCoinbaseTxSumRequest), + GetVersion(GetVersionRequest), + GetFeeEstimate(GetFeeEstimateRequest), + GetAlternateChains(GetAlternateChainsRequest), + RelayTx(RelayTxRequest), + SyncInfo(SyncInfoRequest), + GetTransactionPoolBacklog(GetTransactionPoolBacklogRequest), + GetMinerData(GetMinerDataRequest), + PruneBlockchain(PruneBlockchainRequest), + CalcPow(CalcPowRequest), + FlushCache(FlushCacheRequest), + AddAuxPow(AddAuxPowRequest), + GetTxIdsLoose(GetTxIdsLooseRequest), +} + +impl RpcCallValue for JsonRpcRequest { + fn is_restricted(&self) -> bool { + match self { + Self::GetBlockCount(x) => x.is_restricted(), + Self::OnGetBlockHash(x) => x.is_restricted(), + Self::SubmitBlock(x) => x.is_restricted(), + Self::GetLastBlockHeader(x) => x.is_restricted(), + Self::GetBlockHeaderByHash(x) => x.is_restricted(), + Self::GetBlockHeaderByHeight(x) => x.is_restricted(), + Self::GetBlockHeadersRange(x) => x.is_restricted(), + Self::GetBlock(x) => x.is_restricted(), + Self::GetInfo(x) => x.is_restricted(), + Self::HardForkInfo(x) => x.is_restricted(), + Self::GetOutputHistogram(x) => x.is_restricted(), + Self::GetVersion(x) => x.is_restricted(), + Self::GetFeeEstimate(x) => x.is_restricted(), + Self::GetTransactionPoolBacklog(x) => x.is_restricted(), + Self::GetMinerData(x) => x.is_restricted(), + Self::AddAuxPow(x) => x.is_restricted(), + Self::GetTxIdsLoose(x) => x.is_restricted(), + Self::GenerateBlocks(x) => x.is_restricted(), + Self::GetConnections(x) => x.is_restricted(), + Self::SetBans(x) => x.is_restricted(), + Self::GetBans(x) => x.is_restricted(), + Self::Banned(x) => x.is_restricted(), + Self::FlushTransactionPool(x) => x.is_restricted(), + Self::GetCoinbaseTxSum(x) => x.is_restricted(), + Self::GetAlternateChains(x) => x.is_restricted(), + Self::RelayTx(x) => x.is_restricted(), + Self::SyncInfo(x) => x.is_restricted(), + Self::PruneBlockchain(x) => x.is_restricted(), + Self::CalcPow(x) => x.is_restricted(), + Self::FlushCache(x) => x.is_restricted(), + } + } + + fn is_empty(&self) -> bool { + match self { + Self::GetBlockCount(x) => x.is_empty(), + Self::OnGetBlockHash(x) => x.is_empty(), + Self::SubmitBlock(x) => x.is_empty(), + Self::GetLastBlockHeader(x) => x.is_empty(), + Self::GetBlockHeaderByHash(x) => x.is_empty(), + Self::GetBlockHeaderByHeight(x) => x.is_empty(), + Self::GetBlockHeadersRange(x) => x.is_empty(), + Self::GetBlock(x) => x.is_empty(), + Self::GetInfo(x) => x.is_empty(), + Self::HardForkInfo(x) => x.is_empty(), + Self::GetOutputHistogram(x) => x.is_empty(), + Self::GetVersion(x) => x.is_empty(), + Self::GetFeeEstimate(x) => x.is_empty(), + Self::GetTransactionPoolBacklog(x) => x.is_empty(), + Self::GetMinerData(x) => x.is_empty(), + Self::AddAuxPow(x) => x.is_empty(), + Self::GetTxIdsLoose(x) => x.is_empty(), + Self::GenerateBlocks(x) => x.is_empty(), + Self::GetConnections(x) => x.is_empty(), + Self::SetBans(x) => x.is_empty(), + Self::GetBans(x) => x.is_empty(), + Self::Banned(x) => x.is_empty(), + Self::FlushTransactionPool(x) => x.is_empty(), + Self::GetCoinbaseTxSum(x) => x.is_empty(), + Self::GetAlternateChains(x) => x.is_empty(), + Self::RelayTx(x) => x.is_empty(), + Self::SyncInfo(x) => x.is_empty(), + Self::PruneBlockchain(x) => x.is_empty(), + Self::CalcPow(x) => x.is_empty(), + Self::FlushCache(x) => x.is_empty(), + } + } +} + +//---------------------------------------------------------------------------------------------------- Response +/// JSON-RPC responses. +/// +/// This enum contains all [`crate::json`] responses. +/// +/// See also: [`JsonRpcRequest`]. +/// +/// # (De)serialization +/// The `serde` implementation will (de)serialize from +/// the inner variant itself, e.g. [`JsonRpcRequest::Banned`] +/// has the same (de)serialization as [`BannedResponse`]. +/// +/// ```rust +/// use cuprate_rpc_types::{misc::*, json::*}; +/// +/// let response = JsonRpcResponse::Banned(BannedResponse { +/// banned: true, +/// seconds: 123, +/// status: Status::Ok, +/// }); +/// let json = serde_json::to_string(&response).unwrap(); +/// assert_eq!(json, r#"{"banned":true,"seconds":123,"status":"OK"}"#); +/// let response: JsonRpcResponse = serde_json::from_str(&json).unwrap(); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[cfg_attr(feature = "serde", serde(untagged, rename_all = "snake_case"))] +#[allow(missing_docs)] +pub enum JsonRpcResponse { + GetBlockCount(GetBlockCountResponse), + OnGetBlockHash(OnGetBlockHashResponse), + SubmitBlock(SubmitBlockResponse), + GenerateBlocks(GenerateBlocksResponse), + GetLastBlockHeader(GetLastBlockHeaderResponse), + GetBlockHeaderByHash(GetBlockHeaderByHashResponse), + GetBlockHeaderByHeight(GetBlockHeaderByHeightResponse), + GetBlockHeadersRange(GetBlockHeadersRangeResponse), + GetBlock(GetBlockResponse), + GetConnections(GetConnectionsResponse), + GetInfo(GetInfoResponse), + HardForkInfo(HardForkInfoResponse), + SetBans(SetBansResponse), + GetBans(GetBansResponse), + Banned(BannedResponse), + FlushTransactionPool(FlushTransactionPoolResponse), + GetOutputHistogram(GetOutputHistogramResponse), + GetCoinbaseTxSum(GetCoinbaseTxSumResponse), + GetVersion(GetVersionResponse), + GetFeeEstimate(GetFeeEstimateResponse), + GetAlternateChains(GetAlternateChainsResponse), + RelayTx(RelayTxResponse), + SyncInfo(SyncInfoResponse), + GetTransactionPoolBacklog(GetTransactionPoolBacklogResponse), + GetMinerData(GetMinerDataResponse), + PruneBlockchain(PruneBlockchainResponse), + CalcPow(CalcPowResponse), + FlushCache(FlushCacheResponse), + AddAuxPow(AddAuxPowResponse), + GetTxIdsLoose(GetTxIdsLooseResponse), +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index d0d1e00..b48f22e 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -112,6 +112,7 @@ mod constants; mod defaults; mod free; mod macros; +mod rpc_call; #[cfg(feature = "serde")] mod serde; @@ -127,3 +128,4 @@ pub use constants::{ CORE_RPC_STATUS_PAYMENT_REQUIRED, CORE_RPC_VERSION, CORE_RPC_VERSION_MAJOR, CORE_RPC_VERSION_MINOR, }; +pub use rpc_call::{RpcCall, RpcCallValue}; diff --git a/rpc/types/src/macros.rs b/rpc/types/src/macros.rs index fa0d518..60ffa90 100644 --- a/rpc/types/src/macros.rs +++ b/rpc/types/src/macros.rs @@ -60,7 +60,14 @@ macro_rules! define_request_and_response { // Attributes added here will apply to _both_ // request and response types. $( #[$type_attr:meta] )* - $type_name:ident, + // After the type name, 2 optional idents are allowed: + // + // - `restricted` + // - `empty` + // + // These have to be within `()` and will affect the + // [`crate::RpcCall`] implementation on the request type. + $type_name:ident $(($restricted:ident $(, $empty:ident)?))?, // The request type (and any doc comments, derives, etc). $( #[$request_type_attr:meta] )* @@ -100,7 +107,7 @@ macro_rules! define_request_and_response { $( #[$type_attr] )* /// $( #[$request_type_attr] )* - [<$type_name Request>] { + [<$type_name Request>] $(($restricted $(, $empty)?))? { $( $( #[$request_field_attr] )* $request_field: $request_field_type @@ -141,6 +148,69 @@ macro_rules! define_request_and_response { } pub(crate) use define_request_and_response; +//---------------------------------------------------------------------------------------------------- impl_rpc_call +/// Implement [`crate::RpcCall`] and [`crate::RpcCallValue`] on request types. +/// +/// Input for this is: +/// `$REQUEST_TYPE restricted empty` +/// where `restricted` and `empty` are the idents themselves. +/// The implementation for [`crate::RpcCall`] will change +/// depending if they exist or not. +macro_rules! impl_rpc_call { + // Restricted and empty RPC calls. + ($t:ident, restricted, empty) => { + impl $crate::RpcCall for $t { + const IS_RESTRICTED: bool = true; + const IS_EMPTY: bool = true; + } + + impl From<()> for $t { + fn from(_: ()) -> Self { + Self {} + } + } + + impl From<$t> for () { + fn from(_: $t) -> Self {} + } + }; + + // Empty RPC calls. + ($t:ident, empty) => { + impl $crate::RpcCall for $t { + const IS_RESTRICTED: bool = false; + const IS_EMPTY: bool = true; + } + + impl From<()> for $t { + fn from(_: ()) -> Self { + Self {} + } + } + + impl From<$t> for () { + fn from(_: $t) -> Self {} + } + }; + + // Restricted RPC calls. + ($t:ident, restricted) => { + impl $crate::RpcCall for $t { + const IS_RESTRICTED: bool = true; + const IS_EMPTY: bool = false; + } + }; + + // Not restrict or empty RPC calls. + ($t:ident) => { + impl $crate::RpcCall for $t { + const IS_RESTRICTED: bool = false; + const IS_EMPTY: bool = false; + } + }; +} +pub(crate) use impl_rpc_call; + //---------------------------------------------------------------------------------------------------- define_request /// Define a request type. /// @@ -152,22 +222,7 @@ macro_rules! define_request { // Any doc comments, derives, etc. $( #[$attr:meta] )* // The response type. - $t:ident {} - ) => { - $( #[$attr] )* - /// - /// This request has no inputs. - pub type $t = (); - }; - - //------------------------------------------------------------------------------ - // This branch of the macro expects fields within the `{}`, - // and will generate a `struct` - ( - // Any doc comments, derives, etc. - $( #[$attr:meta] )* - // The response type. - $t:ident { + $t:ident $(($restricted:ident $(, $empty:ident)?))? { // And any fields. $( $( #[$field_attr:meta] )* // field attributes @@ -193,6 +248,8 @@ macro_rules! define_request { )* } + $crate::macros::impl_rpc_call!($t $(, $restricted $(, $empty)?)?); + #[cfg(feature = "epee")] ::cuprate_epee_encoding::epee_object! { $t, diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index c140777..9457250 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -3,6 +3,9 @@ //! All types are originally defined in [`rpc/core_rpc_server_commands_defs.h`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h). //---------------------------------------------------------------------------------------------------- Import +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + use crate::{ base::{AccessResponseBase, ResponseBase}, defaults::{default_false, default_string, default_true, default_vec, default_zero}, @@ -11,6 +14,8 @@ use crate::{ GetOutputsOut, KeyImageSpentStatus, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, TxEntry, TxInfo, TxpoolStats, }, + rpc_call::RpcCall, + RpcCallValue, }; //---------------------------------------------------------------------------------------------------- Macro @@ -93,7 +98,7 @@ define_request_and_response! { get_height, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 138..=160, - GetHeight, + GetHeight (empty), Request {}, #[doc = serde_doc_test!( @@ -146,7 +151,7 @@ define_request_and_response! { get_alt_blocks_hashes, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 288..=308, - GetAltBlocksHashes, + GetAltBlocksHashes (empty), Request {}, #[doc = serde_doc_test!( @@ -258,7 +263,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 665..=691, - StartMining, + StartMining (restricted), #[doc = serde_doc_test!( START_MINING_REQUEST => StartMiningRequest { @@ -287,7 +292,7 @@ define_request_and_response! { stop_mining, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 825..=843, - StopMining, + StopMining (restricted, empty), Request {}, #[doc = serde_doc_test!( @@ -302,7 +307,7 @@ define_request_and_response! { mining_status, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 846..=895, - MiningStatus, + MiningStatus (restricted), Request {}, #[doc = serde_doc_test!( @@ -348,7 +353,7 @@ define_request_and_response! { save_bc, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 898..=916, - SaveBc, + SaveBc (restricted), Request {}, #[doc = serde_doc_test!( @@ -364,7 +369,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1369..=1417, - GetPeerList, + GetPeerList (restricted), #[doc = serde_doc_test!( GET_PEER_LIST_REQUEST => GetPeerListRequest { @@ -446,7 +451,8 @@ define_request_and_response! { set_log_hash_rate, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1450..=1470, - SetLogHashRate, + + SetLogHashRate (restricted), #[derive(Copy)] #[doc = serde_doc_test!( @@ -471,7 +477,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1450..=1470, - SetLogLevel, + SetLogLevel (restricted), #[derive(Copy)] #[doc = serde_doc_test!( @@ -496,7 +502,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1494..=1517, - SetLogCategories, + SetLogCategories (restricted), #[doc = serde_doc_test!( SET_LOG_CATEGORIES_REQUEST => SetLogCategoriesRequest { @@ -523,7 +529,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1785..=1812, - SetBootstrapDaemon, + SetBootstrapDaemon (restricted), #[doc = serde_doc_test!( SET_BOOTSTRAP_DAEMON_REQUEST => SetBootstrapDaemonRequest { @@ -555,7 +561,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1569..=1591, - GetTransactionPool, + GetTransactionPool (empty), Request {}, #[doc = serde_doc_test!(GET_TRANSACTION_POOL_RESPONSE)] @@ -570,7 +576,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1712..=1732, - GetTransactionPoolStats, + GetTransactionPoolStats (empty), Request {}, #[doc = serde_doc_test!( @@ -614,7 +620,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1814..=1831, - StopDaemon, + StopDaemon (restricted, empty), Request {}, #[doc = serde_doc_test!( @@ -632,7 +638,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1852..=1874, - GetLimit, + GetLimit (empty), Request {}, #[doc = serde_doc_test!( @@ -653,7 +659,8 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1876..=1903, - SetLimit, + SetLimit (restricted), + #[doc = serde_doc_test!( SET_LIMIT_REQUEST => SetLimitRequest { limit_down: 1024, @@ -684,7 +691,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1876..=1903, - OutPeers, + OutPeers (restricted), #[doc = serde_doc_test!( OUT_PEERS_REQUEST => OutPeersRequest { @@ -708,12 +715,26 @@ define_request_and_response! { } } +define_request_and_response! { + in_peers, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 1932..=1956, + InPeers (restricted), + Request { + set: bool = default_true(), "default_true", + in_peers: u32, + }, + ResponseBase { + in_peers: u32, + } +} + define_request_and_response! { get_net_stats, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 793..=822, - GetNetStats, + GetNetStats (restricted, empty), Request {}, #[doc = serde_doc_test!( @@ -786,7 +807,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2324..=2359, - Update, + Update (restricted), #[doc = serde_doc_test!( UPDATE_REQUEST => UpdateRequest { @@ -825,7 +846,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2722..=2745, - PopBlocks, + PopBlocks (restricted), #[doc = serde_doc_test!( POP_BLOCKS_REQUEST => PopBlocksRequest { @@ -852,7 +873,7 @@ define_request_and_response! { cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1615..=1635, - GetTransactionPoolHashes, + GetTransactionPoolHashes (empty), Request {}, #[doc = serde_doc_test!( @@ -889,7 +910,8 @@ define_request_and_response! { UNDOCUMENTED_ENDPOINT, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 1419..=1448, - GetPublicNodes, + + GetPublicNodes (restricted), #[doc = serde_doc_test!( GET_PUBLIC_NODES_REQUEST => GetPublicNodesRequest { @@ -930,6 +952,178 @@ define_request_and_response! { } } +//---------------------------------------------------------------------------------------------------- Request +/// Other JSON requests. +/// +/// This enum contains all [`crate::other`] requests. +/// +/// See also: [`OtherResponse`]. +/// +/// # (De)serialization +/// The `serde` implementation will (de)serialize from +/// the inner variant itself, e.g. [`OtherRequest::SetLogLevel`] +/// has the same (de)serialization as [`SetLogLevelRequest`]. +/// +/// ```rust +/// use cuprate_rpc_types::other::*; +/// +/// let request = OtherRequest::SetLogLevel(Default::default()); +/// let json = serde_json::to_string(&request).unwrap(); +/// assert_eq!(json, r#"{"level":0}"#); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[cfg_attr(feature = "serde", serde(untagged))] +#[allow(missing_docs)] +pub enum OtherRequest { + GetHeight(GetHeightRequest), + GetTransactions(GetTransactionsRequest), + GetAltBlocksHashes(GetAltBlocksHashesRequest), + IsKeyImageSpent(IsKeyImageSpentRequest), + SendRawTransaction(SendRawTransactionRequest), + StartMining(StartMiningRequest), + StopMining(StopMiningRequest), + MiningStatus(MiningStatusRequest), + SaveBc(SaveBcRequest), + GetPeerList(GetPeerListRequest), + SetLogHashRate(SetLogHashRateRequest), + SetLogLevel(SetLogLevelRequest), + SetLogCategories(SetLogCategoriesRequest), + SetBootstrapDaemon(SetBootstrapDaemonRequest), + GetTransactionPool(GetTransactionPoolRequest), + GetTransactionPoolStats(GetTransactionPoolStatsRequest), + StopDaemon(StopDaemonRequest), + GetLimit(GetLimitRequest), + SetLimit(SetLimitRequest), + OutPeers(OutPeersRequest), + InPeers(InPeersRequest), + GetNetStats(GetNetStatsRequest), + GetOuts(GetOutsRequest), + Update(UpdateRequest), + PopBlocks(PopBlocksRequest), + GetTransactionPoolHashes(GetTransactionPoolHashesRequest), + GetPublicNodes(GetPublicNodesRequest), +} + +impl RpcCallValue for OtherRequest { + fn is_restricted(&self) -> bool { + match self { + Self::GetHeight(x) => x.is_restricted(), + Self::GetTransactions(x) => x.is_restricted(), + Self::GetAltBlocksHashes(x) => x.is_restricted(), + Self::IsKeyImageSpent(x) => x.is_restricted(), + Self::SendRawTransaction(x) => x.is_restricted(), + Self::StartMining(x) => x.is_restricted(), + Self::StopMining(x) => x.is_restricted(), + Self::MiningStatus(x) => x.is_restricted(), + Self::SaveBc(x) => x.is_restricted(), + Self::GetPeerList(x) => x.is_restricted(), + Self::SetLogHashRate(x) => x.is_restricted(), + Self::SetLogLevel(x) => x.is_restricted(), + Self::SetLogCategories(x) => x.is_restricted(), + Self::SetBootstrapDaemon(x) => x.is_restricted(), + Self::GetTransactionPool(x) => x.is_restricted(), + Self::GetTransactionPoolStats(x) => x.is_restricted(), + Self::StopDaemon(x) => x.is_restricted(), + Self::GetLimit(x) => x.is_restricted(), + Self::SetLimit(x) => x.is_restricted(), + Self::OutPeers(x) => x.is_restricted(), + Self::InPeers(x) => x.is_restricted(), + Self::GetNetStats(x) => x.is_restricted(), + Self::GetOuts(x) => x.is_restricted(), + Self::Update(x) => x.is_restricted(), + Self::PopBlocks(x) => x.is_restricted(), + Self::GetTransactionPoolHashes(x) => x.is_restricted(), + Self::GetPublicNodes(x) => x.is_restricted(), + } + } + + fn is_empty(&self) -> bool { + match self { + Self::GetHeight(x) => x.is_empty(), + Self::GetTransactions(x) => x.is_empty(), + Self::GetAltBlocksHashes(x) => x.is_empty(), + Self::IsKeyImageSpent(x) => x.is_empty(), + Self::SendRawTransaction(x) => x.is_empty(), + Self::StartMining(x) => x.is_empty(), + Self::StopMining(x) => x.is_empty(), + Self::MiningStatus(x) => x.is_empty(), + Self::SaveBc(x) => x.is_empty(), + Self::GetPeerList(x) => x.is_empty(), + Self::SetLogHashRate(x) => x.is_empty(), + Self::SetLogLevel(x) => x.is_empty(), + Self::SetLogCategories(x) => x.is_empty(), + Self::SetBootstrapDaemon(x) => x.is_empty(), + Self::GetTransactionPool(x) => x.is_empty(), + Self::GetTransactionPoolStats(x) => x.is_empty(), + Self::StopDaemon(x) => x.is_empty(), + Self::GetLimit(x) => x.is_empty(), + Self::SetLimit(x) => x.is_empty(), + Self::OutPeers(x) => x.is_empty(), + Self::InPeers(x) => x.is_empty(), + Self::GetNetStats(x) => x.is_empty(), + Self::GetOuts(x) => x.is_empty(), + Self::Update(x) => x.is_empty(), + Self::PopBlocks(x) => x.is_empty(), + Self::GetTransactionPoolHashes(x) => x.is_empty(), + Self::GetPublicNodes(x) => x.is_empty(), + } + } +} + +//---------------------------------------------------------------------------------------------------- Response +/// Other JSON responses. +/// +/// This enum contains all [`crate::other`] responses. +/// +/// See also: [`OtherRequest`]. +/// +/// # (De)serialization +/// The `serde` implementation will (de)serialize from +/// the inner variant itself, e.g. [`OtherRequest::SetBootstrapDaemon`] +/// has the same (de)serialization as [`SetBootstrapDaemonResponse`]. +/// +/// ```rust +/// use cuprate_rpc_types::other::*; +/// +/// let response = OtherResponse::SetBootstrapDaemon(Default::default()); +/// let json = serde_json::to_string(&response).unwrap(); +/// assert_eq!(json, r#"{"status":"OK"}"#); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[cfg_attr(feature = "serde", serde(untagged))] +#[allow(missing_docs)] +pub enum OtherResponse { + GetHeight(GetHeightResponse), + GetTransactions(GetTransactionsResponse), + GetAltBlocksHashes(GetAltBlocksHashesResponse), + IsKeyImageSpent(IsKeyImageSpentResponse), + SendRawTransaction(SendRawTransactionResponse), + StartMining(StartMiningResponse), + StopMining(StopMiningResponse), + MiningStatus(MiningStatusResponse), + SaveBc(SaveBcResponse), + GetPeerList(GetPeerListResponse), + SetLogHashRate(SetLogHashRateResponse), + SetLogLevel(SetLogLevelResponse), + SetLogCategories(SetLogCategoriesResponse), + SetBootstrapDaemon(SetBootstrapDaemonResponse), + GetTransactionPool(GetTransactionPoolResponse), + GetTransactionPoolStats(GetTransactionPoolStatsResponse), + StopDaemon(StopDaemonResponse), + GetLimit(GetLimitResponse), + SetLimit(SetLimitResponse), + OutPeers(OutPeersResponse), + InPeers(InPeersResponse), + GetNetStats(GetNetStatsResponse), + GetOuts(GetOutsResponse), + Update(UpdateResponse), + PopBlocks(PopBlocksResponse), + GetTransactionPoolHashes(GetTransactionPoolHashesResponse), + GetPublicNodes(GetPublicNodesResponse), +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { diff --git a/rpc/types/src/rpc_call.rs b/rpc/types/src/rpc_call.rs new file mode 100644 index 0000000..5fb742e --- /dev/null +++ b/rpc/types/src/rpc_call.rs @@ -0,0 +1,96 @@ +//! RPC call metadata. + +//---------------------------------------------------------------------------------------------------- Import + +//---------------------------------------------------------------------------------------------------- RpcCall +/// Metadata about an RPC call. +/// +/// This trait describes some metadata about RPC requests. +/// +/// It is implemented on all request types within: +/// - [`crate::json`] +/// - [`crate::other`] +/// - [`crate::bin`] +/// +/// See also [`RpcCallValue`] for a dynamic by-value version of this trait. +pub trait RpcCall { + /// Is `true` if this RPC method should + /// only be allowed on local servers. + /// + /// If this is `false`, it should be + /// okay to execute the method even on restricted + /// RPC servers. + /// + /// ```rust + /// use cuprate_rpc_types::{RpcCall, json::*}; + /// + /// // Allowed method, even on restricted RPC servers (18089). + /// assert!(!GetBlockCountRequest::IS_RESTRICTED); + /// + /// // Restricted methods, only allowed + /// // for unrestricted RPC servers (18081). + /// assert!(GetConnectionsRequest::IS_RESTRICTED); + /// ``` + const IS_RESTRICTED: bool; + + /// Is `true` if this RPC method has no inputs, i.e. it is a `struct` with no fields. + /// + /// ```rust + /// use cuprate_rpc_types::{RpcCall, json::*}; + /// + /// assert!(GetBlockCountRequest::IS_EMPTY); + /// assert!(!OnGetBlockHashRequest::IS_EMPTY); + /// ``` + const IS_EMPTY: bool; +} + +//---------------------------------------------------------------------------------------------------- RpcCallValue +/// By-value version of [`RpcCall`]. +/// +/// This trait is a mirror of [`RpcCall`], +/// except it takes `self` by value instead +/// of being a `const` property. +/// +/// This exists for `enum`s where requests must be dynamically +/// `match`ed like [`JsonRpcRequest`](crate::json::JsonRpcRequest). +/// +/// All types that implement [`RpcCall`] automatically implement [`RpcCallValue`]. +pub trait RpcCallValue { + /// Same as [`RpcCall::IS_RESTRICTED`]. + /// + /// ```rust + /// use cuprate_rpc_types::{RpcCallValue, json::*}; + /// + /// assert!(!GetBlockCountRequest::default().is_restricted()); + /// assert!(GetConnectionsRequest::default().is_restricted()); + /// ``` + fn is_restricted(&self) -> bool; + + /// Same as [`RpcCall::IS_EMPTY`]. + /// + /// ```rust + /// use cuprate_rpc_types::{RpcCallValue, json::*}; + /// + /// assert!(GetBlockCountRequest::default().is_empty()); + /// assert!(!OnGetBlockHashRequest::default().is_empty()); + /// ``` + fn is_empty(&self) -> bool; +} + +impl RpcCallValue for T { + #[inline] + fn is_restricted(&self) -> bool { + Self::IS_RESTRICTED + } + + #[inline] + fn is_empty(&self) -> bool { + Self::IS_EMPTY + } +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} From 1a178381dd0fd4d8436e86f5e03dbe475e91b24b Mon Sep 17 00:00:00 2001 From: Boog900 Date: Mon, 5 Aug 2024 20:47:30 +0000 Subject: [PATCH 029/104] Storage: split the DB service abstraction (#237) * split the DB service abstraction * fix ci * misc changes * Apply suggestions from code review Co-authored-by: hinto-janai * review fixes * Update storage/service/Cargo.toml Co-authored-by: hinto-janai * Update storage/service/Cargo.toml Co-authored-by: hinto-janai * fix clippy * fix doc * `bc` -> `blockchain` * doc fixes * Update storage/service/README.md Co-authored-by: hinto-janai * cargo fmt --------- Co-authored-by: hinto-janai --- Cargo.lock | 69 ++--- Cargo.toml | 2 +- consensus/fast-sync/src/create.rs | 12 +- consensus/src/context/alt_chains.rs | 15 +- consensus/src/context/difficulty.rs | 6 +- consensus/src/context/hardforks.rs | 10 +- consensus/src/context/rx_vms.rs | 10 +- consensus/src/context/task.rs | 21 +- consensus/src/context/weight.rs | 14 +- consensus/src/lib.rs | 21 +- consensus/src/tests/mock_db.rs | 26 +- consensus/src/transactions.rs | 12 +- consensus/src/transactions/contextual_data.rs | 14 +- consensus/tests/verify_correct_txs.rs | 14 +- storage/blockchain/Cargo.toml | 20 +- storage/blockchain/src/config/backend.rs | 31 --- storage/blockchain/src/config/config.rs | 3 +- storage/blockchain/src/config/mod.rs | 9 +- storage/blockchain/src/config/sync_mode.rs | 135 ---------- storage/blockchain/src/service/free.rs | 22 +- storage/blockchain/src/service/mod.rs | 38 +-- storage/blockchain/src/service/read.rs | 254 +++++------------- storage/blockchain/src/service/tests.rs | 74 ++--- storage/blockchain/src/service/types.rs | 30 +-- storage/blockchain/src/service/write.rs | 211 ++------------- storage/service/Cargo.toml | 22 ++ storage/service/README.md | 7 + storage/service/src/lib.rs | 8 + .../config => service/src}/reader_threads.rs | 39 ++- storage/service/src/service.rs | 5 + storage/service/src/service/read.rs | 95 +++++++ storage/service/src/service/write.rs | 178 ++++++++++++ types/src/blockchain.rs | 48 ++-- 33 files changed, 666 insertions(+), 809 deletions(-) delete mode 100644 storage/blockchain/src/config/backend.rs delete mode 100644 storage/blockchain/src/config/sync_mode.rs create mode 100644 storage/service/Cargo.toml create mode 100644 storage/service/README.md create mode 100644 storage/service/src/lib.rs rename storage/{blockchain/src/config => service/src}/reader_threads.rs (84%) create mode 100644 storage/service/src/service.rs create mode 100644 storage/service/src/service/read.rs create mode 100644 storage/service/src/service/write.rs diff --git a/Cargo.lock b/Cargo.lock index 32a5cbd..eaf5f99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,12 +29,6 @@ dependencies = [ "zerocopy", ] -[[package]] -name = "allocator-api2" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" - [[package]] name = "android-tzdata" version = "0.1.1" @@ -471,7 +465,7 @@ dependencies = [ "cuprate-test-utils", "cuprate-wire", "futures", - "indexmap 2.2.6", + "indexmap", "rand", "thiserror", "tokio", @@ -496,14 +490,13 @@ version = "0.0.0" dependencies = [ "bitflags 2.5.0", "bytemuck", - "crossbeam", "cuprate-database", + "cuprate-database-service", "cuprate-helper", "cuprate-pruning", "cuprate-test-utils", "cuprate-types", "curve25519-dalek", - "futures", "hex", "hex-literal", "monero-serai", @@ -514,7 +507,6 @@ dependencies = [ "tempfile", "thread_local", "tokio", - "tokio-util", "tower", ] @@ -609,6 +601,19 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cuprate-database-service" +version = "0.1.0" +dependencies = [ + "crossbeam", + "cuprate-database", + "cuprate-helper", + "futures", + "rayon", + "serde", + "tower", +] + [[package]] name = "cuprate-epee-encoding" version = "0.5.0" @@ -709,7 +714,7 @@ dependencies = [ "dashmap", "futures", "hex", - "indexmap 2.2.6", + "indexmap", "monero-serai", "pin-project", "proptest", @@ -882,7 +887,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.14.5", + "hashbrown", "lock_api", "once_cell", "parking_lot_core", @@ -1174,12 +1179,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.14.5" @@ -1187,17 +1186,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", - "allocator-api2", -] - -[[package]] -name = "hdrhistogram" -version = "7.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" -dependencies = [ - "byteorder", - "num-traits", ] [[package]] @@ -1521,16 +1509,6 @@ dependencies = [ "utf8_iter", ] -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.2.6" @@ -1538,7 +1516,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown", ] [[package]] @@ -2464,7 +2442,7 @@ name = "std-shims" version = "0.1.1" source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" dependencies = [ - "hashbrown 0.14.5", + "hashbrown", "spin", ] @@ -2669,10 +2647,7 @@ checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", - "futures-io", "futures-sink", - "futures-util", - "hashbrown 0.14.5", "pin-project-lite", "slab", "tokio", @@ -2691,7 +2666,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.6", + "indexmap", "toml_datetime", "winnow", ] @@ -2704,12 +2679,8 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "hdrhistogram", - "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", - "slab", "tokio", "tokio-util", "tower-layer", diff --git a/Cargo.toml b/Cargo.toml index 22a1585..da82d9e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,7 @@ members = [ "p2p/async-buffer", "p2p/address-book", "storage/blockchain", + "storage/service", "storage/txpool", "storage/database", "pruning", @@ -243,7 +244,6 @@ try_err = "deny" lossy_float_literal = "deny" let_underscore_must_use = "deny" iter_over_hash_type = "deny" -impl_trait_in_params = "deny" get_unwrap = "deny" error_impl_error = "deny" empty_structs_with_brackets = "deny" diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index adae100..8d4f9a6 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -4,10 +4,10 @@ use clap::Parser; use tower::{Service, ServiceExt}; use cuprate_blockchain::{ - config::ConfigBuilder, cuprate_database::RuntimeError, service::DatabaseReadHandle, + config::ConfigBuilder, cuprate_database::RuntimeError, service::BlockchainReadHandle, }; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, }; @@ -16,18 +16,18 @@ use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes}; const BATCH_SIZE: u64 = 512; async fn read_batch( - handle: &mut DatabaseReadHandle, + handle: &mut BlockchainReadHandle, height_from: u64, ) -> Result, RuntimeError> { let mut block_ids = Vec::::with_capacity(BATCH_SIZE as usize); for height in height_from..(height_from + BATCH_SIZE) { - let request = BCReadRequest::BlockHash(height, Chain::Main); + let request = BlockchainReadRequest::BlockHash(height, Chain::Main); let response_channel = handle.ready().await?.call(request); let response = response_channel.await?; match response { - BCResponse::BlockHash(block_id) => block_ids.push(block_id), + BlockchainResponse::BlockHash(block_id) => block_ids.push(block_id), _ => unreachable!(), } } @@ -63,7 +63,7 @@ async fn main() { let config = ConfigBuilder::new().build(); - let (mut read_handle, _) = cuprate_blockchain::service::init(config).unwrap(); + let (mut read_handle, _, _) = cuprate_blockchain::service::init(config).unwrap(); let mut hashes_of_hashes = Vec::new(); diff --git a/consensus/src/context/alt_chains.rs b/consensus/src/context/alt_chains.rs index 71af8a1..f0c391d 100644 --- a/consensus/src/context/alt_chains.rs +++ b/consensus/src/context/alt_chains.rs @@ -4,7 +4,7 @@ use tower::ServiceExt; use cuprate_consensus_rules::{blocks::BlockError, ConsensusError}; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, ChainId, }; @@ -100,8 +100,9 @@ impl AltChainMap { } // find the block with hash == prev_id. - let BCResponse::FindBlock(res) = - database.oneshot(BCReadRequest::FindBlock(prev_id)).await? + let BlockchainResponse::FindBlock(res) = database + .oneshot(BlockchainReadRequest::FindBlock(prev_id)) + .await? else { panic!("Database returned wrong response"); }; @@ -130,10 +131,10 @@ pub async fn get_alt_chain_difficulty_cache( mut database: D, ) -> Result { // find the block with hash == prev_id. - let BCResponse::FindBlock(res) = database + let BlockchainResponse::FindBlock(res) = database .ready() .await? - .call(BCReadRequest::FindBlock(prev_id)) + .call(BlockchainReadRequest::FindBlock(prev_id)) .await? else { panic!("Database returned wrong response"); @@ -177,10 +178,10 @@ pub async fn get_alt_chain_weight_cache( mut database: D, ) -> Result { // find the block with hash == prev_id. - let BCResponse::FindBlock(res) = database + let BlockchainResponse::FindBlock(res) = database .ready() .await? - .call(BCReadRequest::FindBlock(prev_id)) + .call(BlockchainReadRequest::FindBlock(prev_id)) .await? else { panic!("Database returned wrong response"); diff --git a/consensus/src/context/difficulty.rs b/consensus/src/context/difficulty.rs index b025dfc..9ec0f1e 100644 --- a/consensus/src/context/difficulty.rs +++ b/consensus/src/context/difficulty.rs @@ -13,7 +13,7 @@ use tracing::instrument; use cuprate_helper::num::median; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, }; @@ -373,8 +373,8 @@ async fn get_blocks_in_pow_info( ) -> Result<(VecDeque, VecDeque), ExtendedConsensusError> { tracing::info!("Getting blocks timestamps"); - let BCResponse::BlockExtendedHeaderInRange(ext_header) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange( + let BlockchainResponse::BlockExtendedHeaderInRange(ext_header) = database + .oneshot(BlockchainReadRequest::BlockExtendedHeaderInRange( block_heights, chain, )) diff --git a/consensus/src/context/hardforks.rs b/consensus/src/context/hardforks.rs index 2243350..7972a0e 100644 --- a/consensus/src/context/hardforks.rs +++ b/consensus/src/context/hardforks.rs @@ -5,7 +5,7 @@ use tracing::instrument; use cuprate_consensus_rules::{HFVotes, HFsInfo, HardFork}; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, }; @@ -90,10 +90,10 @@ impl HardForkState { debug_assert_eq!(votes.total_votes(), config.window) } - let BCResponse::BlockExtendedHeader(ext_header) = database + let BlockchainResponse::BlockExtendedHeader(ext_header) = database .ready() .await? - .call(BCReadRequest::BlockExtendedHeader(chain_height - 1)) + .call(BlockchainReadRequest::BlockExtendedHeader(chain_height - 1)) .await? else { panic!("Database sent incorrect response!"); @@ -214,8 +214,8 @@ async fn get_votes_in_range( ) -> Result { let mut votes = HFVotes::new(window_size); - let BCResponse::BlockExtendedHeaderInRange(vote_list) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange( + let BlockchainResponse::BlockExtendedHeaderInRange(vote_list) = database + .oneshot(BlockchainReadRequest::BlockExtendedHeaderInRange( block_heights, Chain::Main, )) diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index 3154648..649146f 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -22,7 +22,7 @@ use cuprate_consensus_rules::{ }; use cuprate_helper::asynch::rayon_spawn_async; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, }; @@ -138,8 +138,8 @@ impl RandomXVMCache { ) -> Result, ExtendedConsensusError> { let seed_height = randomx_seed_height(height); - let BCResponse::BlockHash(seed_hash) = database - .oneshot(BCReadRequest::BlockHash(seed_height, chain)) + let BlockchainResponse::BlockHash(seed_hash) = database + .oneshot(BlockchainReadRequest::BlockHash(seed_height, chain)) .await? else { panic!("Database returned wrong response!"); @@ -273,9 +273,9 @@ async fn get_block_hashes( for height in heights { let db = database.clone(); fut.push_back(async move { - let BCResponse::BlockHash(hash) = db + let BlockchainResponse::BlockHash(hash) = db .clone() - .oneshot(BCReadRequest::BlockHash(height, Chain::Main)) + .oneshot(BlockchainReadRequest::BlockHash(height, Chain::Main)) .await? else { panic!("Database sent incorrect response!"); diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index 1fa68a2..79ddf4c 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -10,7 +10,7 @@ use tracing::Instrument; use cuprate_consensus_rules::blocks::ContextToVerifyBlock; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, }; @@ -76,19 +76,19 @@ impl ContextTask { tracing::debug!("Initialising blockchain context"); - let BCResponse::ChainHeight(chain_height, top_block_hash) = database + let BlockchainResponse::ChainHeight(chain_height, top_block_hash) = database .ready() .await? - .call(BCReadRequest::ChainHeight) + .call(BlockchainReadRequest::ChainHeight) .await? else { panic!("Database sent incorrect response!"); }; - let BCResponse::GeneratedCoins(already_generated_coins) = database + let BlockchainResponse::GeneratedCoins(already_generated_coins) = database .ready() .await? - .call(BCReadRequest::GeneratedCoins(chain_height - 1)) + .call(BlockchainReadRequest::GeneratedCoins(chain_height - 1)) .await? else { panic!("Database sent incorrect response!"); @@ -248,21 +248,24 @@ impl ContextTask { self.chain_height -= numb_blocks; - let BCResponse::GeneratedCoins(already_generated_coins) = self + let BlockchainResponse::GeneratedCoins(already_generated_coins) = self .database .ready() .await? - .call(BCReadRequest::GeneratedCoins(self.chain_height - 1)) + .call(BlockchainReadRequest::GeneratedCoins(self.chain_height - 1)) .await? else { panic!("Database sent incorrect response!"); }; - let BCResponse::BlockHash(top_block_hash) = self + let BlockchainResponse::BlockHash(top_block_hash) = self .database .ready() .await? - .call(BCReadRequest::BlockHash(self.chain_height - 1, Chain::Main)) + .call(BlockchainReadRequest::BlockHash( + self.chain_height - 1, + Chain::Main, + )) .await? else { panic!("Database returned incorrect response!"); diff --git a/consensus/src/context/weight.rs b/consensus/src/context/weight.rs index 1084086..7cd5454 100644 --- a/consensus/src/context/weight.rs +++ b/consensus/src/context/weight.rs @@ -17,7 +17,7 @@ use tracing::instrument; use cuprate_consensus_rules::blocks::{penalty_free_zone, PENALTY_FREE_ZONE_5}; use cuprate_helper::{asynch::rayon_spawn_async, num::RollingMedian}; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, }; @@ -296,8 +296,10 @@ async fn get_blocks_weight_in_range( ) -> Result, ExtendedConsensusError> { tracing::info!("getting block weights."); - let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range, chain)) + let BlockchainResponse::BlockExtendedHeaderInRange(ext_headers) = database + .oneshot(BlockchainReadRequest::BlockExtendedHeaderInRange( + range, chain, + )) .await? else { panic!("Database sent incorrect response!") @@ -318,8 +320,10 @@ async fn get_long_term_weight_in_range( ) -> Result, ExtendedConsensusError> { tracing::info!("getting block long term weights."); - let BCResponse::BlockExtendedHeaderInRange(ext_headers) = database - .oneshot(BCReadRequest::BlockExtendedHeaderInRange(range, chain)) + let BlockchainResponse::BlockExtendedHeaderInRange(ext_headers) = database + .oneshot(BlockchainReadRequest::BlockExtendedHeaderInRange( + range, chain, + )) .await? else { panic!("Database sent incorrect response!") diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 1edafdc..3b7f2ae 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -7,8 +7,8 @@ //! - [`TxVerifierService`] Which handles transaction verification. //! //! This crate is generic over the database which is implemented as a [`tower::Service`]. To -//! implement a database you need to have a service which accepts [`BCReadRequest`] and responds -//! with [`BCResponse`]. +//! implement a database you need to have a service which accepts [`BlockchainReadRequest`] and responds +//! with [`BlockchainResponse`]. //! use cuprate_consensus_rules::{ConsensusError, HardFork}; @@ -27,7 +27,7 @@ pub use context::{ pub use transactions::{TxVerifierService, VerifyTxRequest, VerifyTxResponse}; // re-export. -pub use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +pub use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse}; /// An Error returned from one of the consensus services. #[derive(Debug, thiserror::Error)] @@ -83,7 +83,7 @@ use __private::Database; pub mod __private { use std::future::Future; - use cuprate_types::blockchain::{BCReadRequest, BCResponse}; + use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse}; /// A type alias trait used to represent a database, so we don't have to write [`tower::Service`] bounds /// everywhere. @@ -94,8 +94,8 @@ pub mod __private { /// ``` pub trait Database: tower::Service< - BCReadRequest, - Response = BCResponse, + BlockchainReadRequest, + Response = BlockchainResponse, Error = tower::BoxError, Future = Self::Future2, > @@ -103,8 +103,13 @@ pub mod __private { type Future2: Future> + Send + 'static; } - impl> - crate::Database for T + impl< + T: tower::Service< + BlockchainReadRequest, + Response = BlockchainResponse, + Error = tower::BoxError, + >, + > crate::Database for T where T::Future: Future> + Send + 'static, { diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index c4fd75d..a620003 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -16,7 +16,7 @@ use proptest_derive::Arbitrary; use tower::{BoxError, Service}; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, ExtendedBlockHeader, }; @@ -133,8 +133,8 @@ impl DummyDatabase { } } -impl Service for DummyDatabase { - type Response = BCResponse; +impl Service for DummyDatabase { + type Response = BlockchainResponse; type Error = BoxError; type Future = Pin> + Send + 'static>>; @@ -143,13 +143,13 @@ impl Service for DummyDatabase { Poll::Ready(Ok(())) } - fn call(&mut self, req: BCReadRequest) -> Self::Future { + fn call(&mut self, req: BlockchainReadRequest) -> Self::Future { let blocks = self.blocks.clone(); let dummy_height = self.dummy_height; async move { Ok(match req { - BCReadRequest::BlockExtendedHeader(id) => { + BlockchainReadRequest::BlockExtendedHeader(id) => { let mut id = usize::try_from(id).unwrap(); if let Some(dummy_height) = dummy_height { let block_len = blocks.read().unwrap().len(); @@ -157,7 +157,7 @@ impl Service for DummyDatabase { id -= dummy_height - block_len; } - BCResponse::BlockExtendedHeader( + BlockchainResponse::BlockExtendedHeader( blocks .read() .unwrap() @@ -167,12 +167,12 @@ impl Service for DummyDatabase { .ok_or("block not in database!")?, ) } - BCReadRequest::BlockHash(id, _) => { + BlockchainReadRequest::BlockHash(id, _) => { let mut hash = [0; 32]; hash[0..8].copy_from_slice(&id.to_le_bytes()); - BCResponse::BlockHash(hash) + BlockchainResponse::BlockHash(hash) } - BCReadRequest::BlockExtendedHeaderInRange(range, _) => { + BlockchainReadRequest::BlockExtendedHeaderInRange(range, _) => { let mut end = usize::try_from(range.end).unwrap(); let mut start = usize::try_from(range.start).unwrap(); @@ -183,7 +183,7 @@ impl Service for DummyDatabase { start -= dummy_height - block_len; } - BCResponse::BlockExtendedHeaderInRange( + BlockchainResponse::BlockExtendedHeaderInRange( blocks .read() .unwrap() @@ -195,7 +195,7 @@ impl Service for DummyDatabase { .collect(), ) } - BCReadRequest::ChainHeight => { + BlockchainReadRequest::ChainHeight => { let height: u64 = dummy_height .unwrap_or(blocks.read().unwrap().len()) .try_into() @@ -204,9 +204,9 @@ impl Service for DummyDatabase { let mut top_hash = [0; 32]; top_hash[0..8].copy_from_slice(&height.to_le_bytes()); - BCResponse::ChainHeight(height, top_hash) + BlockchainResponse::ChainHeight(height, top_hash) } - BCReadRequest::GeneratedCoins(_) => BCResponse::GeneratedCoins(0), + BlockchainReadRequest::GeneratedCoins(_) => BlockchainResponse::GeneratedCoins(0), _ => unimplemented!("the context svc should not need these requests!"), }) } diff --git a/consensus/src/transactions.rs b/consensus/src/transactions.rs index 417eb48..78104e9 100644 --- a/consensus/src/transactions.rs +++ b/consensus/src/transactions.rs @@ -28,7 +28,7 @@ use cuprate_consensus_rules::{ ConsensusError, HardFork, TxVersion, }; use cuprate_helper::asynch::rayon_spawn_async; -use cuprate_types::blockchain::{BCReadRequest, BCResponse}; +use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse}; use crate::{ batch_verifier::MultiThreadedBatchVerifier, @@ -308,10 +308,10 @@ where }) })?; - let BCResponse::KeyImagesSpent(kis_spent) = database + let BlockchainResponse::KeyImagesSpent(kis_spent) = database .ready() .await? - .call(BCReadRequest::KeyImagesSpent(spent_kis)) + .call(BlockchainReadRequest::KeyImagesSpent(spent_kis)) .await? else { panic!("Database sent incorrect response!"); @@ -340,10 +340,12 @@ where if !verified_at_block_hashes.is_empty() { tracing::trace!("Filtering block hashes not in the main chain."); - let BCResponse::FilterUnknownHashes(known_hashes) = database + let BlockchainResponse::FilterUnknownHashes(known_hashes) = database .ready() .await? - .call(BCReadRequest::FilterUnknownHashes(verified_at_block_hashes)) + .call(BlockchainReadRequest::FilterUnknownHashes( + verified_at_block_hashes, + )) .await? else { panic!("Database returned wrong response!"); diff --git a/consensus/src/transactions/contextual_data.rs b/consensus/src/transactions/contextual_data.rs index 95e5262..b17fbe0 100644 --- a/consensus/src/transactions/contextual_data.rs +++ b/consensus/src/transactions/contextual_data.rs @@ -27,7 +27,7 @@ use cuprate_consensus_rules::{ ConsensusError, HardFork, TxVersion, }; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, OutputOnChain, }; @@ -153,19 +153,19 @@ pub async fn batch_get_ring_member_info( .map_err(ConsensusError::Transaction)?; } - let BCResponse::Outputs(outputs) = database + let BlockchainResponse::Outputs(outputs) = database .ready() .await? - .call(BCReadRequest::Outputs(output_ids)) + .call(BlockchainReadRequest::Outputs(output_ids)) .await? else { panic!("Database sent incorrect response!") }; - let BCResponse::NumberOutputsWithAmount(outputs_with_amount) = database + let BlockchainResponse::NumberOutputsWithAmount(outputs_with_amount) = database .ready() .await? - .call(BCReadRequest::NumberOutputsWithAmount( + .call(BlockchainReadRequest::NumberOutputsWithAmount( outputs.keys().copied().collect(), )) .await? @@ -234,10 +234,10 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( unique_input_amounts.len() ); - let BCResponse::NumberOutputsWithAmount(outputs_with_amount) = database + let BlockchainResponse::NumberOutputsWithAmount(outputs_with_amount) = database .ready() .await? - .call(BCReadRequest::NumberOutputsWithAmount( + .call(BlockchainReadRequest::NumberOutputsWithAmount( unique_input_amounts.into_iter().collect(), )) .await? diff --git a/consensus/tests/verify_correct_txs.rs b/consensus/tests/verify_correct_txs.rs index b71b52d..7afb370 100644 --- a/consensus/tests/verify_correct_txs.rs +++ b/consensus/tests/verify_correct_txs.rs @@ -12,7 +12,7 @@ use cuprate_consensus::{ TxVerifierService, VerifyTxRequest, VerifyTxResponse, __private::Database, }; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, OutputOnChain, }; @@ -23,12 +23,12 @@ use cuprate_test_utils::data::TX_E2D393; fn dummy_database(outputs: BTreeMap) -> impl Database + Clone { let outputs = Arc::new(outputs); - service_fn(move |req: BCReadRequest| { + service_fn(move |req: BlockchainReadRequest| { ready(Ok(match req { - BCReadRequest::NumberOutputsWithAmount(_) => { - BCResponse::NumberOutputsWithAmount(HashMap::new()) + BlockchainReadRequest::NumberOutputsWithAmount(_) => { + BlockchainResponse::NumberOutputsWithAmount(HashMap::new()) } - BCReadRequest::Outputs(outs) => { + BlockchainReadRequest::Outputs(outs) => { let idxs = outs.get(&0).unwrap(); let mut ret = HashMap::new(); @@ -40,9 +40,9 @@ fn dummy_database(outputs: BTreeMap) -> impl Database + Clon .collect::>(), ); - BCResponse::Outputs(ret) + BlockchainResponse::Outputs(ret) } - BCReadRequest::KeyImagesSpent(_) => BCResponse::KeyImagesSpent(false), + BlockchainReadRequest::KeyImagesSpent(_) => BlockchainResponse::KeyImagesSpent(false), _ => panic!("Database request not needed for this test"), })) }) diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 79d0dc4..f45f1bc 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -15,15 +15,16 @@ default = ["heed", "service"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -service = ["dep:crossbeam", "dep:futures", "dep:tokio", "dep:tokio-util", "dep:tower", "dep:rayon"] +service = ["dep:thread_local", "dep:rayon"] [dependencies] # FIXME: # We only need the `thread` feature if `service` is enabled. # Figure out how to enable features of an already pulled in dependency conditionally. -cuprate-database = { path = "../database" } -cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } -cuprate-types = { path = "../../types", features = ["blockchain"] } +cuprate-database = { path = "../database" } +cuprate-database-service = { path = "../service" } +cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } +cuprate-types = { path = "../../types", features = ["blockchain"] } bitflags = { workspace = true, features = ["serde", "bytemuck"] } bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } @@ -33,19 +34,16 @@ monero-serai = { workspace = true, features = ["std"] } serde = { workspace = true, optional = true } # `service` feature. -crossbeam = { workspace = true, features = ["std"], optional = true } -futures = { workspace = true, optional = true } -tokio = { workspace = true, features = ["full"], optional = true } -tokio-util = { workspace = true, features = ["full"], optional = true } -tower = { workspace = true, features = ["full"], optional = true } -thread_local = { workspace = true } +tower = { workspace = true } +thread_local = { workspace = true, optional = true } rayon = { workspace = true, optional = true } [dev-dependencies] cuprate-helper = { path = "../../helper", features = ["thread"] } cuprate-test-utils = { path = "../../test-utils" } -tempfile = { version = "3.10.0" } +tokio = { workspace = true, features = ["full"] } +tempfile = { workspace = true } pretty_assertions = { workspace = true } proptest = { workspace = true } hex = { workspace = true } diff --git a/storage/blockchain/src/config/backend.rs b/storage/blockchain/src/config/backend.rs deleted file mode 100644 index ee72b3d..0000000 --- a/storage/blockchain/src/config/backend.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! SOMEDAY - -//---------------------------------------------------------------------------------------------------- Import -use std::{ - borrow::Cow, - num::NonZeroUsize, - path::{Path, PathBuf}, -}; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -use cuprate_helper::fs::cuprate_blockchain_dir; - -use crate::{ - config::{ReaderThreads, SyncMode}, - constants::DATABASE_DATA_FILENAME, - resize::ResizeAlgorithm, -}; - -//---------------------------------------------------------------------------------------------------- Backend -/// SOMEDAY: allow runtime hot-swappable backends. -#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum Backend { - #[default] - /// SOMEDAY - Heed, - /// SOMEDAY - Redb, -} diff --git a/storage/blockchain/src/config/config.rs b/storage/blockchain/src/config/config.rs index c58e292..5bfbf74 100644 --- a/storage/blockchain/src/config/config.rs +++ b/storage/blockchain/src/config/config.rs @@ -9,7 +9,8 @@ use serde::{Deserialize, Serialize}; use cuprate_database::{config::SyncMode, resize::ResizeAlgorithm}; use cuprate_helper::fs::cuprate_blockchain_dir; -use crate::config::ReaderThreads; +// re-exports +pub use cuprate_database_service::ReaderThreads; //---------------------------------------------------------------------------------------------------- ConfigBuilder /// Builder for [`Config`]. diff --git a/storage/blockchain/src/config/mod.rs b/storage/blockchain/src/config/mod.rs index 7ecc14c..555a6e6 100644 --- a/storage/blockchain/src/config/mod.rs +++ b/storage/blockchain/src/config/mod.rs @@ -34,14 +34,11 @@ //! .build(); //! //! // Start a database `service` using this configuration. -//! let (reader_handle, _) = cuprate_blockchain::service::init(config.clone())?; +//! let (_, _, env) = cuprate_blockchain::service::init(config.clone())?; //! // It's using the config we provided. -//! assert_eq!(reader_handle.env().config(), &config.db_config); +//! assert_eq!(env.config(), &config.db_config); //! # Ok(()) } //! ``` mod config; -pub use config::{Config, ConfigBuilder}; - -mod reader_threads; -pub use reader_threads::ReaderThreads; +pub use config::{Config, ConfigBuilder, ReaderThreads}; diff --git a/storage/blockchain/src/config/sync_mode.rs b/storage/blockchain/src/config/sync_mode.rs deleted file mode 100644 index 1d20339..0000000 --- a/storage/blockchain/src/config/sync_mode.rs +++ /dev/null @@ -1,135 +0,0 @@ -//! Database [`Env`](crate::Env) configuration. -//! -//! This module contains the main [`Config`]uration struct -//! for the database [`Env`](crate::Env)ironment, and data -//! structures related to any configuration setting. -//! -//! These configurations are processed at runtime, meaning -//! the `Env` can/will dynamically adjust its behavior -//! based on these values. - -//---------------------------------------------------------------------------------------------------- Import - -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -//---------------------------------------------------------------------------------------------------- SyncMode -/// Disk synchronization mode. -/// -/// This controls how/when the database syncs its data to disk. -/// -/// Regardless of the variant chosen, dropping [`Env`](crate::Env) -/// will always cause it to fully sync to disk. -/// -/// # Sync vs Async -/// All invariants except [`SyncMode::Async`] & [`SyncMode::Fast`] -/// are `synchronous`, as in the database will wait until the OS has -/// finished syncing all the data to disk before continuing. -/// -/// `SyncMode::Async` & `SyncMode::Fast` are `asynchronous`, meaning -/// the database will _NOT_ wait until the data is fully synced to disk -/// before continuing. Note that this doesn't mean the database itself -/// won't be synchronized between readers/writers, but rather that the -/// data _on disk_ may not be immediately synchronized after a write. -/// -/// Something like: -/// ```rust,ignore -/// db.put("key", value); -/// db.get("key"); -/// ``` -/// will be fine, most likely pulling from memory instead of disk. -/// -/// # SOMEDAY -/// Dynamic sync's are not yet supported. -/// -/// Only: -/// -/// - [`SyncMode::Safe`] -/// - [`SyncMode::Async`] -/// - [`SyncMode::Fast`] -/// -/// are supported, all other variants will panic on [`crate::Env::open`]. -#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum SyncMode { - /// Use [`SyncMode::Fast`] until fully synced, - /// then use [`SyncMode::Safe`]. - /// - // # SOMEDAY: how to implement this? - // ref: - // monerod-solution: - // cuprate-issue: - // - // We could: - // ```rust,ignore - // if current_db_block <= top_block.saturating_sub(N) { - // // don't sync() - // } else { - // // sync() - // } - // ``` - // where N is some threshold we pick that is _close_ enough - // to being synced where we want to start being safer. - // - // Essentially, when we are in a certain % range of being finished, - // switch to safe mode, until then, go fast. - FastThenSafe, - - #[default] - /// Fully sync to disk per transaction. - /// - /// Every database transaction commit will - /// fully sync all data to disk, _synchronously_, - /// so the database (writer) halts until synced. - /// - /// This is expected to be very slow. - /// - /// This matches: - /// - LMDB without any special sync flags - /// - [`redb::Durability::Immediate`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Immediate) - Safe, - - /// Asynchrously sync to disk per transaction. - /// - /// This is the same as [`SyncMode::Safe`], - /// but the syncs will be asynchronous, i.e. - /// each transaction commit will sync to disk, - /// but only eventually, not necessarily immediately. - /// - /// This matches: - /// - [`MDB_MAPASYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#gab034ed0d8e5938090aef5ee0997f7e94) - /// - [`redb::Durability::Eventual`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.Eventual) - Async, - - /// Fully sync to disk after we cross this transaction threshold. - /// - /// After committing [`usize`] amount of database - /// transactions, it will be sync to disk. - /// - /// `0` behaves the same as [`SyncMode::Safe`], and a ridiculously large - /// number like `usize::MAX` is practically the same as [`SyncMode::Fast`]. - Threshold(usize), - - /// Only flush at database shutdown. - /// - /// This is the fastest, yet unsafest option. - /// - /// It will cause the database to never _actively_ sync, - /// letting the OS decide when to flush data to disk. - /// - /// This matches: - /// - [`MDB_NOSYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#ga5791dd1adb09123f82dd1f331209e12e) + [`MDB_MAPASYNC`](http://www.lmdb.tech/doc/group__mdb__env.html#gab034ed0d8e5938090aef5ee0997f7e94) - /// - [`redb::Durability::None`](https://docs.rs/redb/1.5.0/redb/enum.Durability.html#variant.None) - /// - /// `monerod` reference: - /// - /// # Corruption - /// In the case of a system crash, the database - /// may become corrupted when using this option. - // - // FIXME: we could call this `unsafe` - // and use that terminology in the config file - // so users know exactly what they are getting - // themselves into. - Fast, -} diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index 3701f66..21fb05b 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -3,11 +3,12 @@ //---------------------------------------------------------------------------------------------------- Import use std::sync::Arc; -use cuprate_database::InitError; +use cuprate_database::{ConcreteEnv, InitError}; +use crate::service::{init_read_service, init_write_service}; use crate::{ config::Config, - service::{DatabaseReadHandle, DatabaseWriteHandle}, + service::types::{BlockchainReadHandle, BlockchainWriteHandle}, }; //---------------------------------------------------------------------------------------------------- Init @@ -20,17 +21,26 @@ use crate::{ /// /// # Errors /// This will forward the error if [`crate::open`] failed. -pub fn init(config: Config) -> Result<(DatabaseReadHandle, DatabaseWriteHandle), InitError> { +pub fn init( + config: Config, +) -> Result< + ( + BlockchainReadHandle, + BlockchainWriteHandle, + Arc, + ), + InitError, +> { let reader_threads = config.reader_threads; // Initialize the database itself. let db = Arc::new(crate::open(config)?); // Spawn the Reader thread pool and Writer. - let readers = DatabaseReadHandle::init(&db, reader_threads); - let writer = DatabaseWriteHandle::init(db); + let readers = init_read_service(db.clone(), reader_threads); + let writer = init_write_service(db.clone()); - Ok((readers, writer)) + Ok((readers, writer, db)) } //---------------------------------------------------------------------------------------------------- Compact history diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index bf2d8e7..993c52d 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -14,8 +14,8 @@ //! //! ## Handles //! The 2 handles to the database are: -//! - [`DatabaseReadHandle`] -//! - [`DatabaseWriteHandle`] +//! - [`BlockchainReadHandle`] +//! - [`BlockchainWriteHandle`] //! //! The 1st allows any caller to send [`ReadRequest`][req_r]s. //! @@ -33,8 +33,10 @@ //! //! ## Shutdown //! Upon the above handles being dropped, the corresponding thread(s) will automatically exit, i.e: -//! - The last [`DatabaseReadHandle`] is dropped => reader thread-pool exits -//! - The last [`DatabaseWriteHandle`] is dropped => writer thread exits +//! - The last [`BlockchainReadHandle`] is dropped => reader thread-pool exits +//! - The last [`BlockchainWriteHandle`] is dropped => writer thread exits +//! +//! TODO: update this when `ConcreteEnv` is removed //! //! Upon dropping the [`cuprate_database::ConcreteEnv`]: //! - All un-processed database transactions are completed @@ -50,11 +52,11 @@ //! This channel can be `.await`ed upon to (eventually) receive //! the corresponding `Response` to your `Request`. //! -//! [req_r]: cuprate_types::blockchain::BCReadRequest +//! [req_r]: cuprate_types::blockchain::BlockchainReadRequest //! -//! [req_w]: cuprate_types::blockchain::BCWriteRequest +//! [req_w]: cuprate_types::blockchain::BlockchainWriteRequest //! -//! [resp]: cuprate_types::blockchain::BCResponse +//! [resp]: cuprate_types::blockchain::BlockchainResponse //! //! # Example //! Simple usage of `service`. @@ -63,7 +65,7 @@ //! use hex_literal::hex; //! use tower::{Service, ServiceExt}; //! -//! use cuprate_types::{blockchain::{BCReadRequest, BCWriteRequest, BCResponse}, Chain}; +//! use cuprate_types::{blockchain::{BlockchainReadRequest, BlockchainWriteRequest, BlockchainResponse}, Chain}; //! use cuprate_test_utils::data::block_v16_tx0; //! //! use cuprate_blockchain::{ @@ -81,12 +83,12 @@ //! .build(); //! //! // Initialize the database thread-pool. -//! let (mut read_handle, mut write_handle) = cuprate_blockchain::service::init(config)?; +//! let (mut read_handle, mut write_handle, _) = cuprate_blockchain::service::init(config)?; //! //! // Prepare a request to write block. //! let mut block = block_v16_tx0().clone(); //! # block.height = 0_u64; // must be 0th height or panic in `add_block()` -//! let request = BCWriteRequest::WriteBlock(block); +//! let request = BlockchainWriteRequest::WriteBlock(block); //! //! // Send the request. //! // We receive back an `async` channel that will @@ -96,16 +98,16 @@ //! //! // Block write was OK. //! let response = response_channel.await?; -//! assert_eq!(response, BCResponse::WriteBlockOk); +//! assert_eq!(response, BlockchainResponse::WriteBlockOk); //! //! // Now, let's try getting the block hash //! // of the block we just wrote. -//! let request = BCReadRequest::BlockHash(0, Chain::Main); +//! let request = BlockchainReadRequest::BlockHash(0, Chain::Main); //! let response_channel = read_handle.ready().await?.call(request); //! let response = response_channel.await?; //! assert_eq!( //! response, -//! BCResponse::BlockHash( +//! BlockchainResponse::BlockHash( //! hex!("43bd1f2b6556dcafa413d8372974af59e4e8f37dbf74dc6b2a9b7212d0577428") //! ) //! ); @@ -118,17 +120,19 @@ //! # Ok(()) } //! ``` +// needed for docs +use tower as _; + mod read; -pub use read::DatabaseReadHandle; +pub use read::{init_read_service, init_read_service_with_pool}; mod write; -pub use write::DatabaseWriteHandle; +pub use write::init_write_service; mod free; pub use free::init; - -// Internal type aliases for `service`. mod types; +pub use types::{BlockchainReadHandle, BlockchainWriteHandle}; #[cfg(test)] mod tests; diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index a5d51f1..fbd9f89 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -4,24 +4,23 @@ use std::{ collections::{HashMap, HashSet}, sync::Arc, - task::{Context, Poll}, }; -use futures::{channel::oneshot, ready}; -use rayon::iter::{IntoParallelIterator, ParallelIterator}; +use rayon::{ + iter::{IntoParallelIterator, ParallelIterator}, + ThreadPool, +}; use thread_local::ThreadLocal; -use tokio::sync::{OwnedSemaphorePermit, Semaphore}; -use tokio_util::sync::PollSemaphore; use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; -use cuprate_helper::{asynch::InfallibleOneshotReceiver, map::combine_low_high_bits_to_u128}; +use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; +use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse}, + blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, ExtendedBlockHeader, OutputOnChain, }; use crate::{ - config::ReaderThreads, ops::{ block::{ block_exists, get_block_extended_header_from_height, get_block_height, get_block_info, @@ -32,156 +31,38 @@ use crate::{ }, service::{ free::{compact_history_genesis_not_included, compact_history_index_to_height_offset}, - types::{ResponseReceiver, ResponseResult, ResponseSender}, + types::{BlockchainReadHandle, ResponseResult}, }, - tables::OpenTables, - tables::{BlockHeights, BlockInfos, Tables}, + tables::{BlockHeights, BlockInfos, OpenTables, Tables}, types::{Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId}, }; -//---------------------------------------------------------------------------------------------------- DatabaseReadHandle -/// Read handle to the database. +//---------------------------------------------------------------------------------------------------- init_read_service +/// Initialize the [`BlockchainReadHandle`] thread-pool backed by [`rayon`]. /// -/// This is cheaply [`Clone`]able handle that -/// allows `async`hronously reading from the database. +/// This spawns `threads` amount of reader threads +/// attached to `env` and returns a handle to the pool. /// -/// Calling [`tower::Service::call`] with a [`DatabaseReadHandle`] & [`BCReadRequest`] -/// will return an `async`hronous channel that can be `.await`ed upon -/// to receive the corresponding [`BCResponse`]. -pub struct DatabaseReadHandle { - /// Handle to the custom `rayon` DB reader thread-pool. - /// - /// Requests are [`rayon::ThreadPool::spawn`]ed in this thread-pool, - /// and responses are returned via a channel we (the caller) provide. - pool: Arc, +/// Should be called _once_ per actual database. Calling this function more than once will create +/// multiple unnecessary rayon thread-pools. +#[cold] +#[inline(never)] // Only called once. +pub fn init_read_service(env: Arc, threads: ReaderThreads) -> BlockchainReadHandle { + init_read_service_with_pool(env, init_thread_pool(threads)) +} - /// Counting semaphore asynchronous permit for database access. - /// Each [`tower::Service::poll_ready`] will acquire a permit - /// before actually sending a request to the `rayon` DB threadpool. - semaphore: PollSemaphore, - - /// An owned permit. - /// This will be set to [`Some`] in `poll_ready()` when we successfully acquire - /// the permit, and will be [`Option::take()`]n after `tower::Service::call()` is called. - /// - /// The actual permit will be dropped _after_ the rayon DB thread has finished - /// the request, i.e., after [`map_request()`] finishes. - permit: Option, - - /// Access to the database. +/// Initialize the blockchain database read service, with a specific rayon thread-pool instead of +/// creating a new one. +/// +/// Should be called _once_ per actual database, although nothing bad will happen, cloning the [`BlockchainReadHandle`] +/// is the correct way to get multiple handles to the database. +#[cold] +#[inline(never)] // Only called once. +pub fn init_read_service_with_pool( env: Arc, -} - -// `OwnedSemaphorePermit` does not implement `Clone`, -// so manually clone all elements, while keeping `permit` -// `None` across clones. -impl Clone for DatabaseReadHandle { - fn clone(&self) -> Self { - Self { - pool: Arc::clone(&self.pool), - semaphore: self.semaphore.clone(), - permit: None, - env: Arc::clone(&self.env), - } - } -} - -impl DatabaseReadHandle { - /// Initialize the `DatabaseReader` thread-pool backed by `rayon`. - /// - /// This spawns `N` amount of `DatabaseReader`'s - /// attached to `env` and returns a handle to the pool. - /// - /// Should be called _once_ per actual database. - #[cold] - #[inline(never)] // Only called once. - pub(super) fn init(env: &Arc, reader_threads: ReaderThreads) -> Self { - // How many reader threads to spawn? - let reader_count = reader_threads.as_threads().get(); - - // Spawn `rayon` reader threadpool. - let pool = rayon::ThreadPoolBuilder::new() - .num_threads(reader_count) - .thread_name(|i| format!("cuprate_helper::service::read::DatabaseReader{i}")) - .build() - .unwrap(); - - // Create a semaphore with the same amount of - // permits as the amount of reader threads. - let semaphore = PollSemaphore::new(Arc::new(Semaphore::new(reader_count))); - - // Return a handle to the pool. - Self { - pool: Arc::new(pool), - semaphore, - permit: None, - env: Arc::clone(env), - } - } - - /// Access to the actual database environment. - /// - /// # ⚠️ Warning - /// This function gives you access to the actual - /// underlying database connected to by `self`. - /// - /// I.e. it allows you to read/write data _directly_ - /// instead of going through a request. - /// - /// Be warned that using the database directly - /// in this manner has not been tested. - #[inline] - pub const fn env(&self) -> &Arc { - &self.env - } -} - -impl tower::Service for DatabaseReadHandle { - type Response = BCResponse; - type Error = RuntimeError; - type Future = ResponseReceiver; - - #[inline] - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - // Check if we already have a permit. - if self.permit.is_some() { - return Poll::Ready(Ok(())); - } - - // Acquire a permit before returning `Ready`. - let permit = - ready!(self.semaphore.poll_acquire(cx)).expect("this semaphore is never closed"); - - self.permit = Some(permit); - Poll::Ready(Ok(())) - } - - #[inline] - fn call(&mut self, request: BCReadRequest) -> Self::Future { - let permit = self - .permit - .take() - .expect("poll_ready() should have acquire a permit before calling call()"); - - // Response channel we `.await` on. - let (response_sender, receiver) = oneshot::channel(); - - // Spawn the request in the rayon DB thread-pool. - // - // Note that this uses `self.pool` instead of `rayon::spawn` - // such that any `rayon` parallel code that runs within - // the passed closure uses the same `rayon` threadpool. - // - // INVARIANT: - // The below `DatabaseReader` function impl block relies on this behavior. - let env = Arc::clone(&self.env); - self.pool.spawn(move || { - let _permit: OwnedSemaphorePermit = permit; - map_request(&env, request, response_sender); - }); // drop(permit/env); - - InfallibleOneshotReceiver::from(receiver) - } + pool: Arc, +) -> BlockchainReadHandle { + DatabaseReadService::new(env, pool, map_request) } //---------------------------------------------------------------------------------------------------- Request Mapping @@ -194,17 +75,16 @@ impl tower::Service for DatabaseReadHandle { /// The basic structure is: /// 1. `Request` is mapped to a handler function /// 2. Handler function is called -/// 3. [`BCResponse`] is sent +/// 3. [`BlockchainResponse`] is returned fn map_request( - env: &ConcreteEnv, // Access to the database - request: BCReadRequest, // The request we must fulfill - response_sender: ResponseSender, // The channel we must send the response back to -) { - use BCReadRequest as R; + env: &ConcreteEnv, // Access to the database + request: BlockchainReadRequest, // The request we must fulfill +) -> ResponseResult { + use BlockchainReadRequest as R; /* SOMEDAY: pre-request handling, run some code for each request? */ - let response = match request { + match request { R::BlockExtendedHeader(block) => block_extended_header(env, block), R::BlockHash(block, chain) => block_hash(env, block, chain), R::FindBlock(_) => todo!("Add alt blocks to DB"), @@ -219,11 +99,6 @@ fn map_request( R::KeyImagesSpent(set) => key_images_spent(env, set), R::CompactChainHistory => compact_chain_history(env), R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids), - }; - - if let Err(e) = response_sender.send(response) { - // TODO: use tracing. - println!("database reader failed to send response: {e:?}"); } /* SOMEDAY: post-request handling, run some code for each request? */ @@ -300,7 +175,7 @@ macro_rules! get_tables { // TODO: The overhead of parallelism may be too much for every request, perfomace test to find optimal // amount of parallelism. -/// [`BCReadRequest::BlockExtendedHeader`]. +/// [`BlockchainReadRequest::BlockExtendedHeader`]. #[inline] fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. @@ -308,12 +183,12 @@ fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> Respon let tx_ro = env_inner.tx_ro()?; let tables = env_inner.open_tables(&tx_ro)?; - Ok(BCResponse::BlockExtendedHeader( + Ok(BlockchainResponse::BlockExtendedHeader( get_block_extended_header_from_height(&block_height, &tables)?, )) } -/// [`BCReadRequest::BlockHash`]. +/// [`BlockchainReadRequest::BlockHash`]. #[inline] fn block_hash(env: &ConcreteEnv, block_height: BlockHeight, chain: Chain) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. @@ -326,10 +201,10 @@ fn block_hash(env: &ConcreteEnv, block_height: BlockHeight, chain: Chain) -> Res Chain::Alt(_) => todo!("Add alt blocks to DB"), }; - Ok(BCResponse::BlockHash(block_hash)) + Ok(BlockchainResponse::BlockHash(block_hash)) } -/// [`BCReadRequest::FilterUnknownHashes`]. +/// [`BlockchainReadRequest::FilterUnknownHashes`]. #[inline] fn filter_unknown_hashes(env: &ConcreteEnv, mut hashes: HashSet) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. @@ -353,11 +228,11 @@ fn filter_unknown_hashes(env: &ConcreteEnv, mut hashes: HashSet) -> R if let Some(e) = err { Err(e) } else { - Ok(BCResponse::FilterUnknownHashes(hashes)) + Ok(BlockchainResponse::FilterUnknownHashes(hashes)) } } -/// [`BCReadRequest::BlockExtendedHeaderInRange`]. +/// [`BlockchainReadRequest::BlockExtendedHeaderInRange`]. #[inline] fn block_extended_header_in_range( env: &ConcreteEnv, @@ -382,10 +257,10 @@ fn block_extended_header_in_range( Chain::Alt(_) => todo!("Add alt blocks to DB"), }; - Ok(BCResponse::BlockExtendedHeaderInRange(vec)) + Ok(BlockchainResponse::BlockExtendedHeaderInRange(vec)) } -/// [`BCReadRequest::ChainHeight`]. +/// [`BlockchainReadRequest::ChainHeight`]. #[inline] fn chain_height(env: &ConcreteEnv) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. @@ -398,10 +273,10 @@ fn chain_height(env: &ConcreteEnv) -> ResponseResult { let block_hash = get_block_info(&chain_height.saturating_sub(1), &table_block_infos)?.block_hash; - Ok(BCResponse::ChainHeight(chain_height, block_hash)) + Ok(BlockchainResponse::ChainHeight(chain_height, block_hash)) } -/// [`BCReadRequest::GeneratedCoins`]. +/// [`BlockchainReadRequest::GeneratedCoins`]. #[inline] fn generated_coins(env: &ConcreteEnv, height: u64) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. @@ -409,13 +284,12 @@ fn generated_coins(env: &ConcreteEnv, height: u64) -> ResponseResult { let tx_ro = env_inner.tx_ro()?; let table_block_infos = env_inner.open_db_ro::(&tx_ro)?; - Ok(BCResponse::GeneratedCoins(cumulative_generated_coins( - &height, - &table_block_infos, - )?)) + Ok(BlockchainResponse::GeneratedCoins( + cumulative_generated_coins(&height, &table_block_infos)?, + )) } -/// [`BCReadRequest::Outputs`]. +/// [`BlockchainReadRequest::Outputs`]. #[inline] fn outputs(env: &ConcreteEnv, outputs: HashMap>) -> ResponseResult { // Prepare tx/tables in `ThreadLocal`. @@ -453,10 +327,10 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap>) -> }) .collect::>, RuntimeError>>()?; - Ok(BCResponse::Outputs(map)) + Ok(BlockchainResponse::Outputs(map)) } -/// [`BCReadRequest::NumberOutputsWithAmount`]. +/// [`BlockchainReadRequest::NumberOutputsWithAmount`]. #[inline] fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec) -> ResponseResult { // Prepare tx/tables in `ThreadLocal`. @@ -498,10 +372,10 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec) -> Respon }) .collect::, RuntimeError>>()?; - Ok(BCResponse::NumberOutputsWithAmount(map)) + Ok(BlockchainResponse::NumberOutputsWithAmount(map)) } -/// [`BCReadRequest::KeyImagesSpent`]. +/// [`BlockchainReadRequest::KeyImagesSpent`]. #[inline] fn key_images_spent(env: &ConcreteEnv, key_images: HashSet) -> ResponseResult { // Prepare tx/tables in `ThreadLocal`. @@ -532,13 +406,13 @@ fn key_images_spent(env: &ConcreteEnv, key_images: HashSet) -> Respons // Else, `Ok(false)` will continue the iterator. .find_any(|result| !matches!(result, Ok(false))) { - None | Some(Ok(false)) => Ok(BCResponse::KeyImagesSpent(false)), // Key image was NOT found. - Some(Ok(true)) => Ok(BCResponse::KeyImagesSpent(true)), // Key image was found. + None | Some(Ok(false)) => Ok(BlockchainResponse::KeyImagesSpent(false)), // Key image was NOT found. + Some(Ok(true)) => Ok(BlockchainResponse::KeyImagesSpent(true)), // Key image was found. Some(Err(e)) => Err(e), // A database error occurred. } } -/// [`BCReadRequest::CompactChainHistory`] +/// [`BlockchainReadRequest::CompactChainHistory`] fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult { let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro()?; @@ -568,13 +442,13 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult { block_ids.push(get_block_info(&0, &table_block_infos)?.block_hash); } - Ok(BCResponse::CompactChainHistory { + Ok(BlockchainResponse::CompactChainHistory { cumulative_difficulty, block_ids, }) } -/// [`BCReadRequest::FindFirstUnknown`] +/// [`BlockchainReadRequest::FindFirstUnknown`] /// /// # Invariant /// `block_ids` must be sorted in chronological block order, or else @@ -606,12 +480,12 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes } Ok(if idx == block_ids.len() { - BCResponse::FindFirstUnknown(None) + BlockchainResponse::FindFirstUnknown(None) } else if idx == 0 { - BCResponse::FindFirstUnknown(Some((0, 0))) + BlockchainResponse::FindFirstUnknown(Some((0, 0))) } else { let last_known_height = get_block_height(&block_ids[idx - 1], &table_block_heights)?; - BCResponse::FindFirstUnknown(Some((idx, last_known_height + 1))) + BlockchainResponse::FindFirstUnknown(Some((idx, last_known_height + 1))) }) } diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index c00e32f..72b60e2 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -18,7 +18,7 @@ use tower::{Service, ServiceExt}; use cuprate_database::{ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError}; use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; use cuprate_types::{ - blockchain::{BCReadRequest, BCResponse, BCWriteRequest}, + blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest}, Chain, OutputOnChain, VerifiedBlockInformation, }; @@ -29,7 +29,7 @@ use crate::{ blockchain::chain_height, output::id_to_output_on_chain, }, - service::{init, DatabaseReadHandle, DatabaseWriteHandle}, + service::{init, BlockchainReadHandle, BlockchainWriteHandle}, tables::{OpenTables, Tables, TablesIter}, tests::AssertTableLen, types::{Amount, AmountIndex, PreRctOutputId}, @@ -38,8 +38,8 @@ use crate::{ //---------------------------------------------------------------------------------------------------- Helper functions /// Initialize the `service`. fn init_service() -> ( - DatabaseReadHandle, - DatabaseWriteHandle, + BlockchainReadHandle, + BlockchainWriteHandle, Arc, tempfile::TempDir, ) { @@ -48,8 +48,7 @@ fn init_service() -> ( .db_directory(Cow::Owned(tempdir.path().into())) .low_power() .build(); - let (reader, writer) = init(config).unwrap(); - let env = reader.env().clone(); + let (reader, writer, env) = init(config).unwrap(); (reader, writer, env, tempdir) } @@ -82,10 +81,10 @@ async fn test_template( block.height = i as u64; // Request a block to be written, assert it was written. - let request = BCWriteRequest::WriteBlock(block); + let request = BlockchainWriteRequest::WriteBlock(block); let response_channel = writer.call(request); let response = response_channel.await.unwrap(); - assert_eq!(response, BCResponse::WriteBlockOk); + assert_eq!(response, BlockchainResponse::WriteBlockOk); } //----------------------------------------------------------------------- Reset the transaction @@ -101,36 +100,36 @@ async fn test_template( // Next few lines are just for preparing the expected responses, // see further below for usage. - let extended_block_header_0 = Ok(BCResponse::BlockExtendedHeader( + let extended_block_header_0 = Ok(BlockchainResponse::BlockExtendedHeader( get_block_extended_header_from_height(&0, &tables).unwrap(), )); let extended_block_header_1 = if block_fns.len() > 1 { - Ok(BCResponse::BlockExtendedHeader( + Ok(BlockchainResponse::BlockExtendedHeader( get_block_extended_header_from_height(&1, &tables).unwrap(), )) } else { Err(RuntimeError::KeyNotFound) }; - let block_hash_0 = Ok(BCResponse::BlockHash( + let block_hash_0 = Ok(BlockchainResponse::BlockHash( get_block_info(&0, tables.block_infos()).unwrap().block_hash, )); let block_hash_1 = if block_fns.len() > 1 { - Ok(BCResponse::BlockHash( + Ok(BlockchainResponse::BlockHash( get_block_info(&1, tables.block_infos()).unwrap().block_hash, )) } else { Err(RuntimeError::KeyNotFound) }; - let range_0_1 = Ok(BCResponse::BlockExtendedHeaderInRange(vec![ + let range_0_1 = Ok(BlockchainResponse::BlockExtendedHeaderInRange(vec![ get_block_extended_header_from_height(&0, &tables).unwrap(), ])); let range_0_2 = if block_fns.len() >= 2 { - Ok(BCResponse::BlockExtendedHeaderInRange(vec![ + Ok(BlockchainResponse::BlockExtendedHeaderInRange(vec![ get_block_extended_header_from_height(&0, &tables).unwrap(), get_block_extended_header_from_height(&1, &tables).unwrap(), ])) @@ -143,13 +142,15 @@ async fn test_template( let chain_height = { let block_info = get_block_info(&test_chain_height.saturating_sub(1), tables.block_infos()).unwrap(); - Ok(BCResponse::ChainHeight( + Ok(BlockchainResponse::ChainHeight( test_chain_height, block_info.block_hash, )) }; - let cumulative_generated_coins = Ok(BCResponse::GeneratedCoins(cumulative_generated_coins)); + let cumulative_generated_coins = Ok(BlockchainResponse::GeneratedCoins( + cumulative_generated_coins, + )); let num_req = tables .outputs_iter() @@ -159,7 +160,7 @@ async fn test_template( .map(|key| key.amount) .collect::>(); - let num_resp = Ok(BCResponse::NumberOutputsWithAmount( + let num_resp = Ok(BlockchainResponse::NumberOutputsWithAmount( num_req .iter() .map(|amount| match tables.num_outputs().get(amount) { @@ -174,36 +175,45 @@ async fn test_template( // Contains a fake non-spent key-image. let ki_req = HashSet::from([[0; 32]]); - let ki_resp = Ok(BCResponse::KeyImagesSpent(false)); + let ki_resp = Ok(BlockchainResponse::KeyImagesSpent(false)); //----------------------------------------------------------------------- Assert expected response // Assert read requests lead to the expected responses. for (request, expected_response) in [ ( - BCReadRequest::BlockExtendedHeader(0), + BlockchainReadRequest::BlockExtendedHeader(0), extended_block_header_0, ), ( - BCReadRequest::BlockExtendedHeader(1), + BlockchainReadRequest::BlockExtendedHeader(1), extended_block_header_1, ), - (BCReadRequest::BlockHash(0, Chain::Main), block_hash_0), - (BCReadRequest::BlockHash(1, Chain::Main), block_hash_1), ( - BCReadRequest::BlockExtendedHeaderInRange(0..1, Chain::Main), + BlockchainReadRequest::BlockHash(0, Chain::Main), + block_hash_0, + ), + ( + BlockchainReadRequest::BlockHash(1, Chain::Main), + block_hash_1, + ), + ( + BlockchainReadRequest::BlockExtendedHeaderInRange(0..1, Chain::Main), range_0_1, ), ( - BCReadRequest::BlockExtendedHeaderInRange(0..2, Chain::Main), + BlockchainReadRequest::BlockExtendedHeaderInRange(0..2, Chain::Main), range_0_2, ), - (BCReadRequest::ChainHeight, chain_height), + (BlockchainReadRequest::ChainHeight, chain_height), ( - BCReadRequest::GeneratedCoins(test_chain_height), + BlockchainReadRequest::GeneratedCoins(test_chain_height), cumulative_generated_coins, ), - (BCReadRequest::NumberOutputsWithAmount(num_req), num_resp), - (BCReadRequest::KeyImagesSpent(ki_req), ki_resp), + ( + BlockchainReadRequest::NumberOutputsWithAmount(num_req), + num_resp, + ), + (BlockchainReadRequest::KeyImagesSpent(ki_req), ki_resp), ] { let response = reader.clone().oneshot(request).await; println!("response: {response:#?}, expected_response: {expected_response:#?}"); @@ -217,10 +227,10 @@ async fn test_template( // Assert each key image we inserted comes back as "spent". for key_image in tables.key_images_iter().keys().unwrap() { let key_image = key_image.unwrap(); - let request = BCReadRequest::KeyImagesSpent(HashSet::from([key_image])); + let request = BlockchainReadRequest::KeyImagesSpent(HashSet::from([key_image])); let response = reader.clone().oneshot(request).await; println!("response: {response:#?}, key_image: {key_image:#?}"); - assert_eq!(response.unwrap(), BCResponse::KeyImagesSpent(true)); + assert_eq!(response.unwrap(), BlockchainResponse::KeyImagesSpent(true)); } //----------------------------------------------------------------------- Output checks @@ -281,10 +291,10 @@ async fn test_template( .collect::>(); // Send a request for every output we inserted before. - let request = BCReadRequest::Outputs(map.clone()); + let request = BlockchainReadRequest::Outputs(map.clone()); let response = reader.clone().oneshot(request).await; println!("Response::Outputs response: {response:#?}"); - let Ok(BCResponse::Outputs(response)) = response else { + let Ok(BlockchainResponse::Outputs(response)) = response else { panic!("{response:#?}") }; diff --git a/storage/blockchain/src/service/types.rs b/storage/blockchain/src/service/types.rs index c6ee67e..9cd86e9 100644 --- a/storage/blockchain/src/service/types.rs +++ b/storage/blockchain/src/service/types.rs @@ -1,30 +1,20 @@ //! Database service type aliases. -//! -//! Only used internally for our `tower::Service` impls. //---------------------------------------------------------------------------------------------------- Use -use futures::channel::oneshot::Sender; - use cuprate_database::RuntimeError; -use cuprate_helper::asynch::InfallibleOneshotReceiver; -use cuprate_types::blockchain::BCResponse; +use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle}; +use cuprate_types::blockchain::{ + BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest, +}; //---------------------------------------------------------------------------------------------------- Types /// The actual type of the response. /// -/// Either our [`BCResponse`], or a database error occurred. -pub(super) type ResponseResult = Result; +/// Either our [`BlockchainResponse`], or a database error occurred. +pub(super) type ResponseResult = Result; -/// The `Receiver` channel that receives the read response. -/// -/// This is owned by the caller (the reader/writer thread) -/// who `.await`'s for the response. -/// -/// The channel itself should never fail, -/// but the actual database operation might. -pub(super) type ResponseReceiver = InfallibleOneshotReceiver; +/// The blockchain database write service. +pub type BlockchainWriteHandle = DatabaseWriteHandle; -/// The `Sender` channel for the response. -/// -/// The database reader/writer thread uses this to send the database result to the caller. -pub(super) type ResponseSender = Sender; +/// The blockchain database read service. +pub type BlockchainReadHandle = DatabaseReadService; diff --git a/storage/blockchain/src/service/write.rs b/storage/blockchain/src/service/write.rs index 041ae7b..816afc4 100644 --- a/storage/blockchain/src/service/write.rs +++ b/storage/blockchain/src/service/write.rs @@ -1,209 +1,34 @@ //! Database writer thread definitions and logic. //---------------------------------------------------------------------------------------------------- Import -use std::{ - sync::Arc, - task::{Context, Poll}, -}; - -use futures::channel::oneshot; +use std::sync::Arc; use cuprate_database::{ConcreteEnv, Env, EnvInner, RuntimeError, TxRw}; -use cuprate_helper::asynch::InfallibleOneshotReceiver; +use cuprate_database_service::DatabaseWriteHandle; use cuprate_types::{ - blockchain::{BCResponse, BCWriteRequest}, + blockchain::{BlockchainResponse, BlockchainWriteRequest}, VerifiedBlockInformation, }; use crate::{ - service::types::{ResponseReceiver, ResponseResult, ResponseSender}, + service::types::{BlockchainWriteHandle, ResponseResult}, tables::OpenTables, }; -//---------------------------------------------------------------------------------------------------- Constants -/// Name of the writer thread. -const WRITER_THREAD_NAME: &str = concat!(module_path!(), "::DatabaseWriter"); - -//---------------------------------------------------------------------------------------------------- DatabaseWriteHandle -/// Write handle to the database. -/// -/// This is handle that allows `async`hronously writing to the database, -/// it is not [`Clone`]able as there is only ever 1 place within Cuprate -/// that writes. -/// -/// Calling [`tower::Service::call`] with a [`DatabaseWriteHandle`] & [`BCWriteRequest`] -/// will return an `async`hronous channel that can be `.await`ed upon -/// to receive the corresponding [`BCResponse`]. -#[derive(Debug)] -pub struct DatabaseWriteHandle { - /// Sender channel to the database write thread-pool. - /// - /// We provide the response channel for the thread-pool. - pub(super) sender: crossbeam::channel::Sender<(BCWriteRequest, ResponseSender)>, +//---------------------------------------------------------------------------------------------------- init_write_service +/// Initialize the blockchain write service from a [`ConcreteEnv`]. +pub fn init_write_service(env: Arc) -> BlockchainWriteHandle { + DatabaseWriteHandle::init(env, handle_blockchain_request) } -impl DatabaseWriteHandle { - /// Initialize the single `DatabaseWriter` thread. - #[cold] - #[inline(never)] // Only called once. - pub(super) fn init(env: Arc) -> Self { - // Initialize `Request/Response` channels. - let (sender, receiver) = crossbeam::channel::unbounded(); - - // Spawn the writer. - std::thread::Builder::new() - .name(WRITER_THREAD_NAME.into()) - .spawn(move || { - let this = DatabaseWriter { receiver, env }; - DatabaseWriter::main(this); - }) - .unwrap(); - - Self { sender } - } -} - -impl tower::Service for DatabaseWriteHandle { - type Response = BCResponse; - type Error = RuntimeError; - type Future = ResponseReceiver; - - #[inline] - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - #[inline] - fn call(&mut self, request: BCWriteRequest) -> Self::Future { - // Response channel we `.await` on. - let (response_sender, receiver) = oneshot::channel(); - - // Send the write request. - self.sender.send((request, response_sender)).unwrap(); - - InfallibleOneshotReceiver::from(receiver) - } -} - -//---------------------------------------------------------------------------------------------------- DatabaseWriter -/// The single database writer thread. -pub(super) struct DatabaseWriter { - /// Receiver side of the database request channel. - /// - /// Any caller can send some requests to this channel. - /// They send them alongside another `Response` channel, - /// which we will eventually send to. - receiver: crossbeam::channel::Receiver<(BCWriteRequest, ResponseSender)>, - - /// Access to the database. - env: Arc, -} - -impl Drop for DatabaseWriter { - fn drop(&mut self) { - // TODO: log the writer thread has exited? - } -} - -impl DatabaseWriter { - /// The `DatabaseWriter`'s main function. - /// - /// The writer just loops in this function, handling requests forever - /// until the request channel is dropped or a panic occurs. - #[cold] - #[inline(never)] // Only called once. - fn main(self) { - // 1. Hang on request channel - // 2. Map request to some database function - // 3. Execute that function, get the result - // 4. Return the result via channel - 'main: loop { - let Ok((request, response_sender)) = self.receiver.recv() else { - // If this receive errors, it means that the channel is empty - // and disconnected, meaning the other side (all senders) have - // been dropped. This means "shutdown", and we return here to - // exit the thread. - // - // Since the channel is empty, it means we've also processed - // all requests. Since it is disconnected, it means future - // ones cannot come in. - return; - }; - - /// How many times should we retry handling the request on resize errors? - /// - /// This is 1 on automatically resizing databases, meaning there is only 1 iteration. - const REQUEST_RETRY_LIMIT: usize = if ConcreteEnv::MANUAL_RESIZE { 3 } else { 1 }; - - // Map [`Request`]'s to specific database functions. - // - // Both will: - // 1. Map the request to a function - // 2. Call the function - // 3. (manual resize only) If resize is needed, resize and retry - // 4. (manual resize only) Redo step {1, 2} - // 5. Send the function's `Result` back to the requester - // - // FIXME: there's probably a more elegant way - // to represent this retry logic with recursive - // functions instead of a loop. - 'retry: for retry in 0..REQUEST_RETRY_LIMIT { - // FIXME: will there be more than 1 write request? - // this won't have to be an enum. - let response = match &request { - BCWriteRequest::WriteBlock(block) => write_block(&self.env, block), - }; - - // If the database needs to resize, do so. - if ConcreteEnv::MANUAL_RESIZE && matches!(response, Err(RuntimeError::ResizeNeeded)) - { - // If this is the last iteration of the outer `for` loop and we - // encounter a resize error _again_, it means something is wrong. - assert_ne!( - retry, REQUEST_RETRY_LIMIT, - "database resize failed maximum of {REQUEST_RETRY_LIMIT} times" - ); - - // Resize the map, and retry the request handling loop. - // - // FIXME: - // We could pass in custom resizes to account for - // batches, i.e., we're about to add ~5GB of data, - // add that much instead of the default 1GB. - // - let old = self.env.current_map_size(); - let new = self.env.resize_map(None); - - // TODO: use tracing. - println!("resizing database memory map, old: {old}B, new: {new}B"); - - // Try handling the request again. - continue 'retry; - } - - // Automatically resizing databases should not be returning a resize error. - #[cfg(debug_assertions)] - if !ConcreteEnv::MANUAL_RESIZE { - assert!( - !matches!(response, Err(RuntimeError::ResizeNeeded)), - "auto-resizing database returned a ResizeNeeded error" - ); - } - - // Send the response back, whether if it's an `Ok` or `Err`. - if let Err(e) = response_sender.send(response) { - // TODO: use tracing. - println!("database writer failed to send response: {e:?}"); - } - - continue 'main; - } - - // Above retry loop should either: - // - continue to the next ['main] loop or... - // - ...retry until panic - unreachable!(); - } +//---------------------------------------------------------------------------------------------------- handle_bc_request +/// Handle an incoming [`BlockchainWriteRequest`], returning a [`BlockchainResponse`]. +fn handle_blockchain_request( + env: &ConcreteEnv, + req: &BlockchainWriteRequest, +) -> Result { + match req { + BlockchainWriteRequest::WriteBlock(block) => write_block(env, block), } } @@ -216,7 +41,7 @@ impl DatabaseWriter { // Each function will return the [`Response`] that we // should send back to the caller in [`map_request()`]. -/// [`BCWriteRequest::WriteBlock`]. +/// [`BlockchainWriteRequest::WriteBlock`]. #[inline] fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseResult { let env_inner = env.env_inner(); @@ -230,7 +55,7 @@ fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseR match result { Ok(()) => { TxRw::commit(tx_rw)?; - Ok(BCResponse::WriteBlockOk) + Ok(BlockchainResponse::WriteBlockOk) } Err(e) => { // INVARIANT: ensure database atomicity by aborting diff --git a/storage/service/Cargo.toml b/storage/service/Cargo.toml new file mode 100644 index 0000000..ed46b35 --- /dev/null +++ b/storage/service/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "cuprate-database-service" +version = "0.1.0" +edition = "2021" +description = "Cuprate's database service abstraction" +license = "MIT" +authors = ["Boog900"] +repository = "https://github.com/Cuprate/cuprate/tree/main/storage/service" +keywords = ["cuprate", "service", "database"] + +[dependencies] +cuprate-database = { path = "../database" } +cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } + +serde = { workspace = true, optional = true } +rayon = { workspace = true } +tower = { workspace = true } +futures = { workspace = true } +crossbeam = { workspace = true, features = ["std"] } + +[lints] +workspace = true diff --git a/storage/service/README.md b/storage/service/README.md new file mode 100644 index 0000000..32e743c --- /dev/null +++ b/storage/service/README.md @@ -0,0 +1,7 @@ +# Cuprate's `tower::Service` database abstraction. + +This crate contains the building blocks for creating a [`tower::Service`] interface to [`cuprate_blockchain`](https://doc.cuprate.org/cuprate_blockchain). + +It is split into 2 `tower::Service`s: +1. A [read service](crate::DatabaseReadService) which is backed by a [`rayon::ThreadPool`] +1. A [write service](crate::DatabaseWriteHandle) which spawns a single thread to handle write requests diff --git a/storage/service/src/lib.rs b/storage/service/src/lib.rs new file mode 100644 index 0000000..51d896a --- /dev/null +++ b/storage/service/src/lib.rs @@ -0,0 +1,8 @@ +#![doc = include_str!("../README.md")] + +mod reader_threads; +mod service; + +pub use reader_threads::{init_thread_pool, ReaderThreads}; + +pub use service::{DatabaseReadService, DatabaseWriteHandle}; diff --git a/storage/blockchain/src/config/reader_threads.rs b/storage/service/src/reader_threads.rs similarity index 84% rename from storage/blockchain/src/config/reader_threads.rs rename to storage/service/src/reader_threads.rs index d4dd6ac..72f619a 100644 --- a/storage/blockchain/src/config/reader_threads.rs +++ b/storage/service/src/reader_threads.rs @@ -1,23 +1,36 @@ -//! Database [`Env`](crate::Env) configuration. +//! Reader thread-pool configuration and initiation. //! -//! This module contains the main [`Config`]uration struct -//! for the database [`Env`](crate::Env)ironment, and data -//! structures related to any configuration setting. +//! This module contains [`ReaderThreads`] which allow specifying the amount of +//! reader threads for the [`rayon::ThreadPool`]. //! -//! These configurations are processed at runtime, meaning -//! the `Env` can/will dynamically adjust its behavior -//! based on these values. +//! It also contains [`init_thread_pool`] which initiates the thread-pool. //---------------------------------------------------------------------------------------------------- Import -use std::num::NonZeroUsize; +use std::{num::NonZeroUsize, sync::Arc}; +use rayon::ThreadPool; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +//---------------------------------------------------------------------------------------------------- init_thread_pool +/// Initialize the reader thread-pool backed by `rayon`. +pub fn init_thread_pool(reader_threads: ReaderThreads) -> Arc { + // How many reader threads to spawn? + let reader_count = reader_threads.as_threads().get(); + + Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(reader_count) + .thread_name(|i| format!("{}::DatabaseReader({i})", module_path!())) + .build() + .unwrap(), + ) +} + //---------------------------------------------------------------------------------------------------- ReaderThreads -/// Amount of database reader threads to spawn when using [`service`](crate::service). +/// Amount of database reader threads to spawn. /// -/// This controls how many reader thread `service`'s +/// This controls how many reader threads the [`DatabaseReadService`](crate::DatabaseReadService) /// thread-pool will spawn to receive and send requests/responses. /// /// # Invariant @@ -48,7 +61,7 @@ pub enum ReaderThreads { /// as such, it is equal to [`ReaderThreads::OnePerThread`]. /// /// ```rust - /// # use cuprate_blockchain::config::*; + /// # use cuprate_database_service::*; /// let reader_threads = ReaderThreads::from(0_usize); /// assert!(matches!(reader_threads, ReaderThreads::OnePerThread)); /// ``` @@ -80,7 +93,7 @@ pub enum ReaderThreads { /// non-zero, but not 1 thread, the minimum value 1 will be returned. /// /// ```rust - /// # use cuprate_blockchain::config::*; + /// # use cuprate_database_service::ReaderThreads; /// assert_eq!(ReaderThreads::Percent(0.000000001).as_threads().get(), 1); /// ``` Percent(f32), @@ -96,7 +109,7 @@ impl ReaderThreads { /// /// # Example /// ```rust - /// use cuprate_blockchain::config::ReaderThreads as R; + /// use cuprate_database_service::ReaderThreads as R; /// /// let total_threads: std::num::NonZeroUsize = /// cuprate_helper::thread::threads(); diff --git a/storage/service/src/service.rs b/storage/service/src/service.rs new file mode 100644 index 0000000..cd4957f --- /dev/null +++ b/storage/service/src/service.rs @@ -0,0 +1,5 @@ +mod read; +mod write; + +pub use read::DatabaseReadService; +pub use write::DatabaseWriteHandle; diff --git a/storage/service/src/service/read.rs b/storage/service/src/service/read.rs new file mode 100644 index 0000000..0ab6853 --- /dev/null +++ b/storage/service/src/service/read.rs @@ -0,0 +1,95 @@ +use std::{ + sync::Arc, + task::{Context, Poll}, +}; + +use futures::channel::oneshot; +use rayon::ThreadPool; +use tower::Service; + +use cuprate_database::{ConcreteEnv, RuntimeError}; +use cuprate_helper::asynch::InfallibleOneshotReceiver; + +/// The [`rayon::ThreadPool`] service. +/// +/// Uses an inner request handler and a rayon thread-pool to asynchronously handle requests. +/// +/// - `Req` is the request type +/// - `Res` is the response type +pub struct DatabaseReadService { + /// Handle to the custom `rayon` DB reader thread-pool. + /// + /// Requests are [`rayon::ThreadPool::spawn`]ed in this thread-pool, + /// and responses are returned via a channel we (the caller) provide. + pool: Arc, + + /// The function used to handle request. + inner_handler: Arc Result + Send + Sync + 'static>, +} + +// Deriving [`Clone`] means `Req` & `Res` need to be `Clone`, even if they aren't. +impl Clone for DatabaseReadService { + fn clone(&self) -> Self { + Self { + pool: Arc::clone(&self.pool), + inner_handler: Arc::clone(&self.inner_handler), + } + } +} + +impl DatabaseReadService +where + Req: Send + 'static, + Res: Send + 'static, +{ + /// Creates the [`DatabaseReadService`] with the provided backing thread-pool. + /// + /// Should be called _once_ per actual database, although nothing bad will happen, cloning the [`DatabaseReadService`] + /// is the correct way to get multiple handles to the database. + #[cold] + #[inline(never)] // Only called once. + pub fn new( + env: Arc, + pool: Arc, + req_handler: impl Fn(&ConcreteEnv, Req) -> Result + Send + Sync + 'static, + ) -> Self { + let inner_handler = Arc::new(move |req| req_handler(&env, req)); + + Self { + pool, + inner_handler, + } + } +} + +impl Service for DatabaseReadService +where + Req: Send + 'static, + Res: Send + 'static, +{ + type Response = Res; + type Error = RuntimeError; + type Future = InfallibleOneshotReceiver>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Req) -> Self::Future { + // Response channel we `.await` on. + let (response_sender, receiver) = oneshot::channel(); + + let handler = Arc::clone(&self.inner_handler); + + // Spawn the request in the rayon DB thread-pool. + // + // Note that this uses `self.pool` instead of `rayon::spawn` + // such that any `rayon` parallel code that runs within + // the passed closure uses the same `rayon` threadpool. + self.pool.spawn(move || { + drop(response_sender.send(handler(req))); + }); + + InfallibleOneshotReceiver::from(receiver) + } +} diff --git a/storage/service/src/service/write.rs b/storage/service/src/service/write.rs new file mode 100644 index 0000000..f75d615 --- /dev/null +++ b/storage/service/src/service/write.rs @@ -0,0 +1,178 @@ +use std::{ + fmt::Debug, + sync::Arc, + task::{Context, Poll}, +}; + +use futures::channel::oneshot; + +use cuprate_database::{ConcreteEnv, Env, RuntimeError}; +use cuprate_helper::asynch::InfallibleOneshotReceiver; + +//---------------------------------------------------------------------------------------------------- Constants +/// Name of the writer thread. +const WRITER_THREAD_NAME: &str = concat!(module_path!(), "::DatabaseWriter"); + +//---------------------------------------------------------------------------------------------------- DatabaseWriteHandle +/// Write handle to the database. +/// +/// This is handle that allows `async`hronously writing to the database. +/// +/// Calling [`tower::Service::call`] with a [`DatabaseWriteHandle`] +/// will return an `async`hronous channel that can be `.await`ed upon +/// to receive the corresponding response. +#[derive(Debug)] +pub struct DatabaseWriteHandle { + /// Sender channel to the database write thread-pool. + /// + /// We provide the response channel for the thread-pool. + pub(super) sender: + crossbeam::channel::Sender<(Req, oneshot::Sender>)>, +} + +impl DatabaseWriteHandle +where + Req: Send + 'static, + Res: Debug + Send + 'static, +{ + /// Initialize the single `DatabaseWriter` thread. + #[cold] + #[inline(never)] // Only called once. + pub fn init( + env: Arc, + inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result + Send + 'static, + ) -> Self { + // Initialize `Request/Response` channels. + let (sender, receiver) = crossbeam::channel::unbounded(); + + // Spawn the writer. + std::thread::Builder::new() + .name(WRITER_THREAD_NAME.into()) + .spawn(move || database_writer(&env, &receiver, inner_handler)) + .unwrap(); + + Self { sender } + } +} + +impl tower::Service for DatabaseWriteHandle { + type Response = Res; + type Error = RuntimeError; + type Future = InfallibleOneshotReceiver>; + + #[inline] + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + #[inline] + fn call(&mut self, request: Req) -> Self::Future { + // Response channel we `.await` on. + let (response_sender, receiver) = oneshot::channel(); + + // Send the write request. + self.sender.send((request, response_sender)).unwrap(); + + InfallibleOneshotReceiver::from(receiver) + } +} + +//---------------------------------------------------------------------------------------------------- database_writer +/// The main function of the writer thread. +fn database_writer( + env: &ConcreteEnv, + receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender>)>, + inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result, +) where + Req: Send + 'static, + Res: Debug + Send + 'static, +{ + // 1. Hang on request channel + // 2. Map request to some database function + // 3. Execute that function, get the result + // 4. Return the result via channel + 'main: loop { + let Ok((request, response_sender)) = receiver.recv() else { + // If this receive errors, it means that the channel is empty + // and disconnected, meaning the other side (all senders) have + // been dropped. This means "shutdown", and we return here to + // exit the thread. + // + // Since the channel is empty, it means we've also processed + // all requests. Since it is disconnected, it means future + // ones cannot come in. + return; + }; + + /// How many times should we retry handling the request on resize errors? + /// + /// This is 1 on automatically resizing databases, meaning there is only 1 iteration. + const REQUEST_RETRY_LIMIT: usize = if ConcreteEnv::MANUAL_RESIZE { 3 } else { 1 }; + + // Map [`Request`]'s to specific database functions. + // + // Both will: + // 1. Map the request to a function + // 2. Call the function + // 3. (manual resize only) If resize is needed, resize and retry + // 4. (manual resize only) Redo step {1, 2} + // 5. Send the function's `Result` back to the requester + // + // FIXME: there's probably a more elegant way + // to represent this retry logic with recursive + // functions instead of a loop. + 'retry: for retry in 0..REQUEST_RETRY_LIMIT { + // FIXME: will there be more than 1 write request? + // this won't have to be an enum. + let response = inner_handler(env, &request); + + // If the database needs to resize, do so. + if ConcreteEnv::MANUAL_RESIZE && matches!(response, Err(RuntimeError::ResizeNeeded)) { + // If this is the last iteration of the outer `for` loop and we + // encounter a resize error _again_, it means something is wrong. + assert_ne!( + retry, REQUEST_RETRY_LIMIT, + "database resize failed maximum of {REQUEST_RETRY_LIMIT} times" + ); + + // Resize the map, and retry the request handling loop. + // + // FIXME: + // We could pass in custom resizes to account for + // batches, i.e., we're about to add ~5GB of data, + // add that much instead of the default 1GB. + // + let old = env.current_map_size(); + let new = env.resize_map(None); + + // TODO: use tracing. + println!("resizing database memory map, old: {old}B, new: {new}B"); + + // Try handling the request again. + continue 'retry; + } + + // Automatically resizing databases should not be returning a resize error. + #[cfg(debug_assertions)] + if !ConcreteEnv::MANUAL_RESIZE { + assert!( + !matches!(response, Err(RuntimeError::ResizeNeeded)), + "auto-resizing database returned a ResizeNeeded error" + ); + } + + // Send the response back, whether if it's an `Ok` or `Err`. + if let Err(e) = response_sender.send(response) { + // TODO: use tracing. + println!("database writer failed to send response: {e:?}"); + } + + continue 'main; + } + + // Above retry loop should either: + // - continue to the next ['main] loop or... + // - ...retry until panic + unreachable!(); + } +} diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index 1ff06c2..f1a8a75 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -1,4 +1,4 @@ -//! Database [`BCReadRequest`]s, [`BCWriteRequest`]s, and [`BCResponse`]s. +//! Database [`BlockchainReadRequest`]s, [`BlockchainWriteRequest`]s, and [`BlockchainResponse`]s. //! //! Tests that assert particular requests lead to particular //! responses are also tested in Cuprate's blockchain database crate. @@ -14,14 +14,14 @@ use crate::types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInfor //---------------------------------------------------------------------------------------------------- ReadRequest /// A read request to the blockchain database. /// -/// This pairs with [`BCResponse`], where each variant here -/// matches in name with a [`BCResponse`] variant. For example, -/// the proper response for a [`BCReadRequest::BlockHash`] -/// would be a [`BCResponse::BlockHash`]. +/// This pairs with [`BlockchainResponse`], where each variant here +/// matches in name with a [`BlockchainResponse`] variant. For example, +/// the proper response for a [`BlockchainReadRequest::BlockHash`] +/// would be a [`BlockchainResponse::BlockHash`]. /// /// See `Response` for the expected responses per `Request`. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum BCReadRequest { +pub enum BlockchainReadRequest { /// Request a block's extended header. /// /// The input is the block's height. @@ -104,10 +104,10 @@ pub enum BCReadRequest { /// A write request to the blockchain database. /// /// There is currently only 1 write request to the database, -/// as such, the only valid [`BCResponse`] to this request is -/// the proper response for a [`BCResponse::WriteBlockOk`]. +/// as such, the only valid [`BlockchainResponse`] to this request is +/// the proper response for a [`BlockchainResponse::WriteBlockOk`]. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum BCWriteRequest { +pub enum BlockchainWriteRequest { /// Request that a block be written to the database. /// /// Input is an already verified block. @@ -119,60 +119,60 @@ pub enum BCWriteRequest { /// /// These are the data types returned when using sending a `Request`. /// -/// This pairs with [`BCReadRequest`] and [`BCWriteRequest`], +/// This pairs with [`BlockchainReadRequest`] and [`BlockchainWriteRequest`], /// see those two for more info. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum BCResponse { +pub enum BlockchainResponse { //------------------------------------------------------ Reads - /// Response to [`BCReadRequest::BlockExtendedHeader`]. + /// Response to [`BlockchainReadRequest::BlockExtendedHeader`]. /// /// Inner value is the extended headed of the requested block. BlockExtendedHeader(ExtendedBlockHeader), - /// Response to [`BCReadRequest::BlockHash`]. + /// Response to [`BlockchainReadRequest::BlockHash`]. /// /// Inner value is the hash of the requested block. BlockHash([u8; 32]), - /// Response to [`BCReadRequest::FindBlock`]. + /// Response to [`BlockchainReadRequest::FindBlock`]. /// /// Inner value is the chain and height of the block if found. FindBlock(Option<(Chain, u64)>), - /// Response to [`BCReadRequest::FilterUnknownHashes`]. + /// Response to [`BlockchainReadRequest::FilterUnknownHashes`]. /// /// Inner value is the list of hashes that were in the main chain. FilterUnknownHashes(HashSet<[u8; 32]>), - /// Response to [`BCReadRequest::BlockExtendedHeaderInRange`]. + /// Response to [`BlockchainReadRequest::BlockExtendedHeaderInRange`]. /// /// Inner value is the list of extended header(s) of the requested block(s). BlockExtendedHeaderInRange(Vec), - /// Response to [`BCReadRequest::ChainHeight`]. + /// Response to [`BlockchainReadRequest::ChainHeight`]. /// /// Inner value is the chain height, and the top block's hash. ChainHeight(u64, [u8; 32]), - /// Response to [`BCReadRequest::GeneratedCoins`]. + /// Response to [`BlockchainReadRequest::GeneratedCoins`]. /// /// Inner value is the total amount of generated coins up to and including the chosen height, in atomic units. GeneratedCoins(u64), - /// Response to [`BCReadRequest::Outputs`]. + /// Response to [`BlockchainReadRequest::Outputs`]. /// /// Inner value is all the outputs requested, /// associated with their amount and amount index. Outputs(HashMap>), - /// Response to [`BCReadRequest::NumberOutputsWithAmount`]. + /// Response to [`BlockchainReadRequest::NumberOutputsWithAmount`]. /// /// Inner value is a `HashMap` of all the outputs requested where: /// - Key = output amount /// - Value = count of outputs with the same amount NumberOutputsWithAmount(HashMap), - /// Response to [`BCReadRequest::KeyImagesSpent`]. + /// Response to [`BlockchainReadRequest::KeyImagesSpent`]. /// /// The inner value is `true` if _any_ of the key images /// were spent (existed in the database already). @@ -180,7 +180,7 @@ pub enum BCResponse { /// The inner value is `false` if _none_ of the key images were spent. KeyImagesSpent(bool), - /// Response to [`BCReadRequest::CompactChainHistory`]. + /// Response to [`BlockchainReadRequest::CompactChainHistory`]. CompactChainHistory { /// A list of blocks IDs in our chain, starting with the most recent block, all the way to the genesis block. /// @@ -190,7 +190,7 @@ pub enum BCResponse { cumulative_difficulty: u128, }, - /// The response for [`BCReadRequest::FindFirstUnknown`]. + /// The response for [`BlockchainReadRequest::FindFirstUnknown`]. /// /// Contains the index of the first unknown block and its expected height. /// @@ -198,7 +198,7 @@ pub enum BCResponse { FindFirstUnknown(Option<(usize, u64)>), //------------------------------------------------------ Writes - /// Response to [`BCWriteRequest::WriteBlock`]. + /// Response to [`BlockchainWriteRequest::WriteBlock`]. /// /// This response indicates that the requested block has /// successfully been written to the database without error. From 27767690ca7d7c5b44172a857581f08c728a740d Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Mon, 5 Aug 2024 19:50:38 -0400 Subject: [PATCH 030/104] rpc: impl `cuprate-rpc-interface` (#233) * fixed-bytes: add `serde`, document feature flags * fixed-bytes: add derives * rpc: add `as _` syntax to macro * rpc: use `ByteArrayVec` and `ContainerAsBlob` for binary types * fixed-bytes: re-add derives * rpc-types: dedup default value within macro * readme: fixed bytes section * types: custom epee - `BlockCompleteEntry` * types: custom epee - `KeyImageSpentStatus` * types: custom epee - `PoolInfoExtent` * types: add `Status::Other(String)` variant * types: custom epee - `TxEntry`, add `read_epee_field` macro * bin: custom epee - `GetBlocks` * types: add `serde.rs` * misc: make `TxEntry` an `enum`, impl serde * misc: `unimplemented!()` for `TxEntry`'s epee * types: add `BlockCompleteEntry` * rpc: replace `BlockCompleteEntry` with `cuprate-types` * types: document `BlockCompleteEntry` * bin: fix `number_of_fields` for `GetBlocksResponse` * misc: add `Distribution` * distribution: add todo * misc fixes * readme: add `(De)serialization invariants` * distribution: compress variants * types: add `block_complete_entry.rs` * net: fix imports * p2p: fix imports * turn off default-features * p2p: fix imports * misc fixes * Update net/wire/Cargo.toml Co-authored-by: Boog900 * distribution: module doc * wire: re-export types * test-utils: add `crate::rpc::types` module * test-utils: conditional json doc-tests * bin: use enum for `GetBlocksResponse` * misc: use lowercase for stringify * json: add test data, fix macro doc tests * json: add all data * other: add all data * bin: add skeleton * docs * move type to correct file * remove duplicated fields for custom epee * rpc: `client/{client,constants}.rs` -> `client.rs` * lib.rs: remove `clippy::module_inception` * macros: add json doc test macro * json: add some tests * json: add doc-test for all types * add all other JSON doc-tests * move doc-test macros to files * base: add doc-tests * rpc: add `cuprate-rpc-interface` skeleton files * traits * json_rpc_method: add `.is_restricted()` * add route fn signatures * types: add rpc enums * interface: routes, types * interface: simplify routes * rewrite interface fns * types: remove `()` type alias, add `(restricted)` * types: add `other::InPeers` * interface: routes * types: fix `is_restricted()` * interface: reorder short-circuit bool * clean up traits/bounds * types: remove `axum` feature * interface: cleanup unused imports * interface: call handler in routes * json: TODO distribution test * interface: readme intro * combine `RpcHandler` + `RpcService`, add `RpcDummyHandler` * interface: readme docs + test * `IsRestricted` -> `RpcCall` * fix no input route problem * interface: `RpcHandlerDummy` docs * interface: crate docs * replace `create_router` with `RouterBuilder` * types: docs * types: doc `JsonRpc{Request,Response}` * types: readme docs * interface: doc `route/` * interface: fix `todo!()` * interface: allow customizing HTTP method on route functions * interface: fix tests * fix derives * Update rpc/interface/README.md Co-authored-by: Boog900 * Update rpc/interface/README.md Co-authored-by: Boog900 * interface: make `RpcHandler`'s `Future` generic * interface: add JSON-RPC notification todo * formatting * interface: use associated type bound for `RpcHandler`'s `Future` --------- Co-authored-by: Boog900 --- Cargo.lock | 196 ++++++++++++++++++++++++ rpc/interface/Cargo.toml | 17 +++ rpc/interface/README.md | 161 ++++++++++++++++++++ rpc/interface/src/lib.rs | 122 +++++++++++++++ rpc/interface/src/route/bin.rs | 108 ++++++++++++++ rpc/interface/src/route/fallback.rs | 18 +++ rpc/interface/src/route/json_rpc.rs | 68 +++++++++ rpc/interface/src/route/mod.rs | 9 ++ rpc/interface/src/route/other.rs | 138 +++++++++++++++++ rpc/interface/src/router_builder.rs | 198 +++++++++++++++++++++++++ rpc/interface/src/rpc_error.rs | 34 +++++ rpc/interface/src/rpc_handler.rs | 57 +++++++ rpc/interface/src/rpc_handler_dummy.rs | 142 ++++++++++++++++++ rpc/interface/src/rpc_request.rs | 33 +++++ rpc/interface/src/rpc_response.rs | 33 +++++ 15 files changed, 1334 insertions(+) create mode 100644 rpc/interface/README.md create mode 100644 rpc/interface/src/route/bin.rs create mode 100644 rpc/interface/src/route/fallback.rs create mode 100644 rpc/interface/src/route/json_rpc.rs create mode 100644 rpc/interface/src/route/mod.rs create mode 100644 rpc/interface/src/route/other.rs create mode 100644 rpc/interface/src/router_builder.rs create mode 100644 rpc/interface/src/rpc_error.rs create mode 100644 rpc/interface/src/rpc_handler.rs create mode 100644 rpc/interface/src/rpc_handler_dummy.rs create mode 100644 rpc/interface/src/rpc_request.rs create mode 100644 rpc/interface/src/rpc_response.rs diff --git a/Cargo.lock b/Cargo.lock index eaf5f99..1f99810 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -94,12 +94,73 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +[[package]] +name = "axum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "backtrace" version = "0.3.73" @@ -374,6 +435,15 @@ dependencies = [ "libc", ] +[[package]] +name = "crc32fast" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +dependencies = [ + "cfg-if", +] + [[package]] name = "crossbeam" version = "0.8.4" @@ -764,6 +834,20 @@ dependencies = [ [[package]] name = "cuprate-rpc-interface" version = "0.0.0" +dependencies = [ + "axum", + "cuprate-epee-encoding", + "cuprate-helper", + "cuprate-json-rpc", + "cuprate-rpc-types", + "futures", + "paste", + "serde", + "serde_json", + "tokio", + "tower", + "ureq", +] [[package]] name = "cuprate-rpc-types" @@ -1030,6 +1114,16 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +[[package]] +name = "flate2" +version = "1.0.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + [[package]] name = "flexible-transcript" version = "0.3.2" @@ -1179,6 +1273,25 @@ dependencies = [ "subtle", ] +[[package]] +name = "h2" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.2.6", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "hashbrown" version = "0.14.5" @@ -1299,6 +1412,12 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0e7a4dd27b9476dc40cb050d3632d3bba3a70ddbff012285f7f8559a1e7e545" +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + [[package]] name = "hyper" version = "1.3.1" @@ -1308,9 +1427,11 @@ dependencies = [ "bytes", "futures-channel", "futures-util", + "h2", "http", "http-body", "httparse", + "httpdate", "itoa", "pin-project-lite", "smallvec", @@ -1610,6 +1731,12 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "md-5" version = "0.10.6" @@ -1638,6 +1765,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "miniz_oxide" version = "0.7.3" @@ -2204,6 +2337,7 @@ version = "0.23.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" dependencies = [ + "log", "once_cell", "ring", "rustls-pki-types", @@ -2351,6 +2485,28 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + [[package]] name = "sha2" version = "0.10.8" @@ -2486,6 +2642,18 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "synchronoise" version = "1.0.1" @@ -2706,6 +2874,7 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -2770,6 +2939,24 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "ureq" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72139d247e5f97a3eff96229a7ae85ead5328a39efe76f8bf5a06313d505b6ea" +dependencies = [ + "base64", + "flate2", + "log", + "once_cell", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "url", + "webpki-roots", +] + [[package]] name = "url" version = "2.5.1" @@ -2877,6 +3064,15 @@ version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +[[package]] +name = "webpki-roots" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi" version = "0.3.9" diff --git a/rpc/interface/Cargo.toml b/rpc/interface/Cargo.toml index 47af5cd..a83c0f0 100644 --- a/rpc/interface/Cargo.toml +++ b/rpc/interface/Cargo.toml @@ -9,7 +9,24 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/cuprate-rpc-inte keywords = ["cuprate", "rpc", "interface"] [features] +default = ["dummy", "serde"] +dummy = [] [dependencies] +cuprate-epee-encoding = { path = "../../net/epee-encoding", default-features = false } +cuprate-json-rpc = { path = "../json-rpc", default-features = false } +cuprate-rpc-types = { path = "../types", features = ["serde", "epee"], default-features = false } +cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } + +axum = { version = "0.7.5", features = ["json"], default-features = false } +serde = { workspace = true, optional = true } +serde_json = { workspace = true, features = ["std"] } +tower = { workspace = true } +paste = { workspace = true } +futures = { workspace = true } [dev-dependencies] +axum = { version = "0.7.5", features = ["json", "tokio", "http2"] } +serde_json = { workspace = true, features = ["std"] } +tokio = { workspace = true, features = ["full"] } +ureq = { version = "2.10.0", features = ["json"] } \ No newline at end of file diff --git a/rpc/interface/README.md b/rpc/interface/README.md new file mode 100644 index 0000000..3a63ac4 --- /dev/null +++ b/rpc/interface/README.md @@ -0,0 +1,161 @@ +# `cuprate-rpc-interface` +This crate provides Cuprate's RPC _interface_. + +This crate is _not_ a standalone RPC server, it is just the interface. + +```text + cuprate-rpc-interface provides these parts + │ │ +┌───────────────────────────┤ ├───────────────────┐ +▼ ▼ ▼ ▼ +CLIENT ─► ROUTE ─► REQUEST ─► HANDLER ─► RESPONSE ─► CLIENT + ▲ ▲ + └───┬───┘ + │ + You provide this part +``` + +Everything coming _in_ from a client is handled by this crate. + +This is where your [`RpcHandler`] turns this [`RpcRequest`] into a [`RpcResponse`]. + +You hand this `Response` back to `cuprate-rpc-interface` and it will take care of sending it back to the client. + +The main handler used by Cuprate is implemented in the `cuprate-rpc-handler` crate; +it implements the standard RPC handlers modeled after `monerod`. + +# Purpose +`cuprate-rpc-interface` is built on-top of [`axum`], +which is the crate _actually_ handling everything. + +This crate simply handles: +- Registering endpoint routes (e.g. `/get_block.bin`) +- Defining handler function signatures +- (De)serialization of requests/responses (JSON-RPC, binary, JSON) + +The actual server details are all handled by the [`axum`] and [`tower`] ecosystem. + +The proper usage of this crate is to: +1. Implement a [`RpcHandler`] +2. Use it with [`RouterBuilder`] to generate an + [`axum::Router`] with all Monero RPC routes set +3. Do whatever with it + +# The [`RpcHandler`] +This is your [`tower::Service`] that converts [`RpcRequest`]s into [`RpcResponse`]s, +i.e. the "inner handler". + +Said concretely, `RpcHandler` is a `tower::Service` where the associated types are from this crate: +- [`RpcRequest`] +- [`RpcResponse`] +- [`RpcError`] + +`RpcHandler`'s [`Future`](std::future::Future) is generic, _although_, +it must output `Result`. + +The `RpcHandler` must also hold some state that is required +for RPC server operation. + +The only state currently needed is [`RpcHandler::restricted`], which determines if an RPC +server is restricted or not, and thus, if some endpoints/methods are allowed or not. + +# Unknown endpoint behavior +TODO: decide what this crate should return (per different endpoint) +when a request is received to an unknown endpoint, including HTTP stuff, e.g. status code. + +# Unknown JSON-RPC method behavior +TODO: decide what this crate returns when a `/json_rpc` +request is received with an unknown method, including HTTP stuff, e.g. status code. + +# Example +Example usage of this crate + starting an RPC server. + +This uses `RpcHandlerDummy` as the handler; it always responds with the +correct response type, but set to a default value regardless of the request. + +```rust +use std::sync::Arc; + +use tokio::{net::TcpListener, sync::Barrier}; + +use cuprate_json_rpc::{Request, Response, Id}; +use cuprate_rpc_types::{ + json::{JsonRpcRequest, JsonRpcResponse, GetBlockCountResponse}, + other::{OtherRequest, OtherResponse}, +}; +use cuprate_rpc_interface::{RouterBuilder, RpcHandlerDummy, RpcRequest}; + +// Send a `/get_height` request. This endpoint has no inputs. +async fn get_height(port: u16) -> OtherResponse { + let url = format!("http://127.0.0.1:{port}/get_height"); + ureq::get(&url) + .set("Content-Type", "application/json") + .call() + .unwrap() + .into_json() + .unwrap() +} + +// Send a JSON-RPC request with the `get_block_count` method. +// +// The returned [`String`] is JSON. +async fn get_block_count(port: u16) -> String { + let url = format!("http://127.0.0.1:{port}/json_rpc"); + let method = JsonRpcRequest::GetBlockCount(Default::default()); + let request = Request::new(method); + ureq::get(&url) + .set("Content-Type", "application/json") + .send_json(request) + .unwrap() + .into_string() + .unwrap() +} + +#[tokio::main] +async fn main() { + // Start a local RPC server. + let port = { + // Create the router. + let state = RpcHandlerDummy { restricted: false }; + let router = RouterBuilder::new().all().build().with_state(state); + + // Start a server. + let listener = TcpListener::bind("127.0.0.1:0") + .await + .unwrap(); + let port = listener.local_addr().unwrap().port(); + + // Run the server with `axum`. + tokio::task::spawn(async move { + axum::serve(listener, router).await.unwrap(); + }); + + port + }; + + // Assert the response is the default. + let response = get_height(port).await; + let expected = OtherResponse::GetHeight(Default::default()); + assert_eq!(response, expected); + + // Assert the response JSON is correct. + let response = get_block_count(port).await; + let expected = r#"{"jsonrpc":"2.0","id":null,"result":{"status":"OK","untrusted":false,"count":0}}"#; + assert_eq!(response, expected); + + // Assert that (de)serialization works. + let expected = Response::ok(Id::Null, Default::default()); + let response: Response = serde_json::from_str(&response).unwrap(); + assert_eq!(response, expected); +} +``` + +# Feature flags +List of feature flags for `cuprate-rpc-interface`. + +All are enabled by default. + +| Feature flag | Does what | +|--------------|-----------| +| `serde` | Enables serde on applicable types +| `dummy` | Enables the `RpcHandlerDummy` type \ No newline at end of file diff --git a/rpc/interface/src/lib.rs b/rpc/interface/src/lib.rs index 8b13789..2656b07 100644 --- a/rpc/interface/src/lib.rs +++ b/rpc/interface/src/lib.rs @@ -1 +1,123 @@ +#![doc = include_str!("../README.md")] +#![cfg_attr(docsrs, feature(doc_cfg))] +//---------------------------------------------------------------------------------------------------- Lints +// Forbid lints. +// Our code, and code generated (e.g macros) cannot overrule these. +#![forbid( + // `unsafe` is allowed but it _must_ be + // commented with `SAFETY: reason`. + clippy::undocumented_unsafe_blocks, + // Never. + unused_unsafe, + redundant_semicolons, + unused_allocation, + coherence_leak_check, + while_true, + + // Maybe can be put into `#[deny]`. + unconditional_recursion, + for_loops_over_fallibles, + unused_braces, + unused_labels, + keyword_idents, + non_ascii_idents, + variant_size_differences, + single_use_lifetimes, + + // Probably can be put into `#[deny]`. + future_incompatible, + let_underscore, + break_with_label_and_loop, + duplicate_macro_attributes, + exported_private_dependencies, + large_assignments, + overlapping_range_endpoints, + semicolon_in_expressions_from_macros, + noop_method_call, +)] +// Deny lints. +// Some of these are `#[allow]`'ed on a per-case basis. +#![deny( + clippy::all, + clippy::correctness, + clippy::suspicious, + clippy::style, + clippy::complexity, + clippy::perf, + clippy::pedantic, + clippy::nursery, + clippy::cargo, + unused_doc_comments, + unused_mut, + missing_docs, + deprecated, + unused_comparisons, + nonstandard_style, + unreachable_pub +)] +#![allow( + // FIXME: this lint affects crates outside of + // `database/` for some reason, allow for now. + clippy::cargo_common_metadata, + + // FIXME: adding `#[must_use]` onto everything + // might just be more annoying than useful... + // although it is sometimes nice. + clippy::must_use_candidate, + + // FIXME: good lint but too many false positives + // with our `Env` + `RwLock` setup. + clippy::significant_drop_tightening, + + // FIXME: good lint but is less clear in most cases. + clippy::items_after_statements, + + // TODO + rustdoc::bare_urls, + + clippy::module_name_repetitions, + clippy::module_inception, + clippy::redundant_pub_crate, + clippy::option_if_let_else, +)] +// Allow some lints when running in debug mode. +#![cfg_attr( + debug_assertions, + allow( + clippy::todo, + clippy::multiple_crate_versions, + unused_imports, + unused_variables + ) +)] +// Allow some lints in tests. +#![cfg_attr( + test, + allow( + clippy::cognitive_complexity, + clippy::needless_pass_by_value, + clippy::cast_possible_truncation, + clippy::too_many_lines + ) +)] +// TODO: remove me after finishing impl +#![allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] + +//---------------------------------------------------------------------------------------------------- Mod +mod route; +mod router_builder; +mod rpc_error; +mod rpc_handler; +#[cfg(feature = "dummy")] +mod rpc_handler_dummy; +mod rpc_request; +mod rpc_response; + +pub use router_builder::RouterBuilder; +pub use rpc_error::RpcError; +pub use rpc_handler::RpcHandler; +#[cfg(feature = "dummy")] +pub use rpc_handler_dummy::RpcHandlerDummy; +pub use rpc_request::RpcRequest; +pub use rpc_response::RpcResponse; diff --git a/rpc/interface/src/route/bin.rs b/rpc/interface/src/route/bin.rs new file mode 100644 index 0000000..b17b98c --- /dev/null +++ b/rpc/interface/src/route/bin.rs @@ -0,0 +1,108 @@ +//! Binary route functions. + +//---------------------------------------------------------------------------------------------------- Import +use axum::{body::Bytes, extract::State, http::StatusCode}; +use tower::ServiceExt; + +use cuprate_epee_encoding::from_bytes; +use cuprate_rpc_types::bin::{BinRequest, BinResponse, GetTransactionPoolHashesRequest}; + +use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse}; + +//---------------------------------------------------------------------------------------------------- Routes +/// This macro generates route functions that expect input. +/// +/// See below for usage. +macro_rules! generate_endpoints_with_input { + ($( + // Syntax: + // Function name => Expected input type + $endpoint:ident => $variant:ident + ),*) => { paste::paste! { + $( + /// TODO + #[allow(unused_mut)] + pub(crate) async fn $endpoint( + State(handler): State, + mut request: Bytes, + ) -> Result { + // Serialize into the request type. + let request = BinRequest::$variant( + from_bytes(&mut request).map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)? + ); + + generate_endpoints_inner!($variant, handler, request) + } + )* + }}; +} + +/// This macro generates route functions that expect _no_ input. +/// +/// See below for usage. +macro_rules! generate_endpoints_with_no_input { + ($( + // Syntax: + // Function name => Expected input type (that is empty) + $endpoint:ident => $variant:ident + ),*) => { paste::paste! { + $( + /// TODO + #[allow(unused_mut)] + pub(crate) async fn $endpoint( + State(handler): State, + ) -> Result { + const REQUEST: BinRequest = BinRequest::$variant([<$variant Request>] {}); + generate_endpoints_inner!($variant, handler, REQUEST) + } + )* + }}; +} + +/// De-duplicated inner function body for: +/// - [`generate_endpoints_with_input`] +/// - [`generate_endpoints_with_no_input`] +macro_rules! generate_endpoints_inner { + ($variant:ident, $handler:ident, $request:expr) => { + paste::paste! { + { + // Send request. + let request = RpcRequest::Binary($request); + let channel = $handler.oneshot(request).await?; + + // Assert the response from the inner handler is correct. + let RpcResponse::Binary(response) = channel else { + panic!("RPC handler did not return a binary response"); + }; + let BinResponse::$variant(response) = response else { + panic!("RPC handler returned incorrect response"); + }; + + // Serialize to bytes and respond. + match cuprate_epee_encoding::to_bytes(response) { + Ok(bytes) => Ok(bytes.freeze()), + Err(e) => Err(StatusCode::INTERNAL_SERVER_ERROR), + } + } + } + }; +} + +generate_endpoints_with_input! { + get_blocks => GetBlocks, + get_blocks_by_height => GetBlocksByHeight, + get_hashes => GetHashes, + get_o_indexes => GetOutputIndexes, + get_outs => GetOuts, + get_output_distribution => GetOutputDistribution +} + +generate_endpoints_with_no_input! { + get_transaction_pool_hashes => GetTransactionPoolHashes +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/interface/src/route/fallback.rs b/rpc/interface/src/route/fallback.rs new file mode 100644 index 0000000..9478901 --- /dev/null +++ b/rpc/interface/src/route/fallback.rs @@ -0,0 +1,18 @@ +//! Fallback route functions. + +//---------------------------------------------------------------------------------------------------- Import +use axum::http::StatusCode; + +//---------------------------------------------------------------------------------------------------- Routes +/// Fallback route function. +/// +/// This is used as the fallback endpoint in [`crate::RouterBuilder`]. +pub(crate) async fn fallback() -> StatusCode { + StatusCode::NOT_FOUND +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/interface/src/route/json_rpc.rs b/rpc/interface/src/route/json_rpc.rs new file mode 100644 index 0000000..bd35e43 --- /dev/null +++ b/rpc/interface/src/route/json_rpc.rs @@ -0,0 +1,68 @@ +//! JSON-RPC 2.0 endpoint route functions. + +//---------------------------------------------------------------------------------------------------- Import +use std::borrow::Cow; + +use axum::{extract::State, http::StatusCode, Json}; +use tower::ServiceExt; + +use cuprate_json_rpc::{ + error::{ErrorCode, ErrorObject}, + Id, +}; +use cuprate_rpc_types::{ + json::{JsonRpcRequest, JsonRpcResponse}, + RpcCallValue, +}; + +use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse}; + +//---------------------------------------------------------------------------------------------------- Routes +/// The `/json_rpc` route function used in [`crate::RouterBuilder`]. +pub(crate) async fn json_rpc( + State(handler): State, + Json(request): Json>, +) -> Result>, StatusCode> { + // TODO: + // + // JSON-RPC notifications (requests without `id`) + // must not be responded too, although, the request's side-effects + // must remain. How to do this considering this function will + // always return and cause `axum` to respond? + + // Return early if this RPC server is restricted and + // the requested method is only for non-restricted RPC. + if request.body.is_restricted() && handler.restricted() { + let error_object = ErrorObject { + code: ErrorCode::ServerError(-1 /* TODO */), + message: Cow::Borrowed("Restricted. TODO: mimic monerod message"), + data: None, + }; + + // JSON-RPC 2.0 rule: + // If there was an error in detecting the `Request`'s ID, + // the `Response` must contain an `Id::Null` + let id = request.id.unwrap_or(Id::Null); + + let response = cuprate_json_rpc::Response::err(id, error_object); + + return Ok(Json(response)); + } + + // Send request. + let request = RpcRequest::JsonRpc(request); + let channel = handler.oneshot(request).await?; + + // Assert the response from the inner handler is correct. + let RpcResponse::JsonRpc(response) = channel else { + panic!("RPC handler returned incorrect response"); + }; + + Ok(Json(response)) +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/interface/src/route/mod.rs b/rpc/interface/src/route/mod.rs new file mode 100644 index 0000000..7ff9ab8 --- /dev/null +++ b/rpc/interface/src/route/mod.rs @@ -0,0 +1,9 @@ +//! Routing functions. +//! +//! These are the function signatures passed to +//! [`crate::RouterBuilder`] when registering routes. + +pub(crate) mod bin; +pub(crate) mod fallback; +pub(crate) mod json_rpc; +pub(crate) mod other; diff --git a/rpc/interface/src/route/other.rs b/rpc/interface/src/route/other.rs new file mode 100644 index 0000000..ce778db --- /dev/null +++ b/rpc/interface/src/route/other.rs @@ -0,0 +1,138 @@ +//! Other JSON endpoint route functions. + +//---------------------------------------------------------------------------------------------------- Import +use axum::{extract::State, http::StatusCode, Json}; +use tower::ServiceExt; + +use cuprate_rpc_types::{ + other::{ + GetAltBlocksHashesRequest, GetAltBlocksHashesResponse, GetHeightRequest, GetHeightResponse, + GetLimitRequest, GetLimitResponse, GetNetStatsRequest, GetNetStatsResponse, GetOutsRequest, + GetOutsResponse, GetPeerListRequest, GetPeerListResponse, GetPublicNodesRequest, + GetPublicNodesResponse, GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse, + GetTransactionPoolRequest, GetTransactionPoolResponse, GetTransactionPoolStatsRequest, + GetTransactionPoolStatsResponse, GetTransactionsRequest, GetTransactionsResponse, + InPeersRequest, InPeersResponse, IsKeyImageSpentRequest, IsKeyImageSpentResponse, + MiningStatusRequest, MiningStatusResponse, OtherRequest, OtherResponse, OutPeersRequest, + OutPeersResponse, PopBlocksRequest, PopBlocksResponse, SaveBcRequest, SaveBcResponse, + SendRawTransactionRequest, SendRawTransactionResponse, SetBootstrapDaemonRequest, + SetBootstrapDaemonResponse, SetLimitRequest, SetLimitResponse, SetLogCategoriesRequest, + SetLogCategoriesResponse, SetLogHashRateRequest, SetLogHashRateResponse, + SetLogLevelRequest, SetLogLevelResponse, StartMiningRequest, StartMiningResponse, + StopDaemonRequest, StopDaemonResponse, StopMiningRequest, StopMiningResponse, + UpdateRequest, UpdateResponse, + }, + RpcCall, +}; + +use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse}; + +//---------------------------------------------------------------------------------------------------- Routes +/// This macro generates route functions that expect input. +/// +/// See below for usage. +macro_rules! generate_endpoints_with_input { + ($( + // Syntax: + // Function name => Expected input type + $endpoint:ident => $variant:ident + ),*) => { paste::paste! { + $( + pub(crate) async fn $endpoint( + State(handler): State, + Json(request): Json<[<$variant Request>]>, + ) -> Result]>, StatusCode> { + generate_endpoints_inner!($variant, handler, request) + } + )* + }}; +} + +/// This macro generates route functions that expect _no_ input. +/// +/// See below for usage. +macro_rules! generate_endpoints_with_no_input { + ($( + // Syntax: + // Function name => Expected input type (that is empty) + $endpoint:ident => $variant:ident + ),*) => { paste::paste! { + $( + pub(crate) async fn $endpoint( + State(handler): State, + ) -> Result]>, StatusCode> { + generate_endpoints_inner!($variant, handler, [<$variant Request>] {}) + } + )* + }}; +} + +/// De-duplicated inner function body for: +/// - [`generate_endpoints_with_input`] +/// - [`generate_endpoints_with_no_input`] +macro_rules! generate_endpoints_inner { + ($variant:ident, $handler:ident, $request:expr) => { + paste::paste! { + { + // Check if restricted. + if [<$variant Request>]::IS_RESTRICTED && $handler.restricted() { + // TODO: mimic `monerod` behavior. + return Err(StatusCode::FORBIDDEN); + } + + // Send request. + let request = RpcRequest::Other(OtherRequest::$variant($request)); + let channel = $handler.oneshot(request).await?; + + // Assert the response from the inner handler is correct. + let RpcResponse::Other(response) = channel else { + panic!("RPC handler did not return a binary response"); + }; + let OtherResponse::$variant(response) = response else { + panic!("RPC handler returned incorrect response") + }; + + Ok(Json(response)) + } + } + }; +} + +generate_endpoints_with_input! { + get_transactions => GetTransactions, + is_key_image_spent => IsKeyImageSpent, + send_raw_transaction => SendRawTransaction, + start_mining => StartMining, + get_peer_list => GetPeerList, + set_log_hash_rate => SetLogHashRate, + set_log_level => SetLogLevel, + set_log_categories => SetLogCategories, + set_bootstrap_daemon => SetBootstrapDaemon, + set_limit => SetLimit, + out_peers => OutPeers, + in_peers => InPeers, + get_outs => GetOuts, + update => Update, + pop_blocks => PopBlocks, + get_public_nodes => GetPublicNodes +} + +generate_endpoints_with_no_input! { + get_height => GetHeight, + get_alt_blocks_hashes => GetAltBlocksHashes, + stop_mining => StopMining, + mining_status => MiningStatus, + save_bc => SaveBc, + get_transaction_pool => GetTransactionPool, + get_transaction_pool_stats => GetTransactionPoolStats, + stop_daemon => StopDaemon, + get_limit => GetLimit, + get_net_stats => GetNetStats, + get_transaction_pool_hashes => GetTransactionPoolHashes +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/interface/src/router_builder.rs b/rpc/interface/src/router_builder.rs new file mode 100644 index 0000000..d370cf4 --- /dev/null +++ b/rpc/interface/src/router_builder.rs @@ -0,0 +1,198 @@ +//! Free functions. + +use std::marker::PhantomData; + +//---------------------------------------------------------------------------------------------------- Use +use axum::{ + routing::{method_routing::get, post}, + Router, +}; + +use crate::{ + route::{bin, fallback, json_rpc, other}, + rpc_handler::RpcHandler, +}; + +//---------------------------------------------------------------------------------------------------- RouterBuilder +/// Generate the `RouterBuilder` struct. +macro_rules! generate_router_builder { + ($( + // Syntax: + // $BUILDER_FUNCTION_NAME => + // $ACTUAL_ENDPOINT_STRING => + // $ENDPOINT_FUNCTION_MODULE::$ENDPOINT_FUNCTION => + // ($HTTP_METHOD(s)) + $endpoint_ident:ident => + $endpoint_string:literal => + $endpoint_module:ident::$endpoint_fn:ident => + ($($http_method:ident),*) + ),* $(,)?) => { + /// Builder for creating the RPC router. + /// + /// This builder allows you to selectively enable endpoints for the router, + /// and a [`fallback`](RouterBuilder::fallback) route. + /// + /// The [`default`](RouterBuilder::default) is to enable [`all`](RouterBuilder::all) routes. + /// + /// # Routes + /// Functions that enable routes are separated into 3 groups: + /// - `json_rpc` (enables all of JSON RPC 2.0) + /// - `other_` (e.g. [`other_get_height`](RouterBuilder::other_get_height)) + /// - `bin_` (e.g. [`bin_get_blocks`](RouterBuilder::bin_get_blocks)) + /// + /// For a list of all `monerod` routes, see + /// [here](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server.h#L97-L189), + /// or the source file of this type. + /// + /// # Aliases + /// Some routes have aliases, such as [`/get_height`](RouterBuilder::other_get_height) + /// and [`/getheight`](RouterBuilder::other_getheight). + /// + /// These both route to the same handler function, but they do not enable each other. + /// + /// If desired, you can enable `/get_height` but not `/getheight`. + /// + /// # Example + /// ```rust + /// use cuprate_rpc_interface::{RouterBuilder, RpcHandlerDummy}; + /// + /// // Create a router with _only_ `/json_rpc` enabled. + /// let only_json_rpc = RouterBuilder::::new() + /// .json_rpc() + /// .build(); + /// + /// // Create a router with: + /// // - `/get_outs.bin` enabled + /// // - A fallback enabled + /// let get_outs_bin_and_fallback = RouterBuilder::::new() + /// .bin_get_outs() + /// .fallback() + /// .build(); + /// + /// // Create a router with all endpoints enabled. + /// let all = RouterBuilder::::new() + /// .all() + /// .build(); + /// ``` + #[allow(clippy::struct_excessive_bools)] + #[derive(Clone)] + pub struct RouterBuilder { + router: Router, + } + + impl RouterBuilder { + /// Create a new [`Self`]. + #[must_use] + pub fn new() -> Self { + Self { + router: Router::new(), + } + } + + /// Build [`Self`] into a [`Router`]. + /// + /// All endpoints enabled in [`RouterBuilder`] + /// will be enabled in this [`Router`]. + pub fn build(self) -> Router { + self.router + } + + /// Enable all endpoints, including [`Self::fallback`]. + #[must_use] + pub fn all(mut self) -> Self { + $( + self = self.$endpoint_ident(); + )* + + self.fallback() + } + + /// Enable the catch-all fallback route. + /// + /// Any unknown or disabled route will route here, e.g.: + /// - `get_info` + /// - `getinfo` + /// - `asdf` + #[must_use] + pub fn fallback(self) -> Self { + Self { + router: self.router.fallback(fallback::fallback), + } + } + + $( + #[doc = concat!( + "Enable the `", + $endpoint_string, + "` endpoint.", + )] + #[must_use] + pub fn $endpoint_ident(self) -> Self { + Self { + router: self.router.route( + $endpoint_string, + ::axum::routing::method_routing::MethodRouter::new() + $(.$http_method($endpoint_module::$endpoint_fn::))* + ), + } + } + )* + } + }; +} + +generate_router_builder! { + // JSON-RPC 2.0 route. + json_rpc => "/json_rpc" => json_rpc::json_rpc => (get, post), + + // Other JSON routes. + other_get_height => "/get_height" => other::get_height => (get, post), + other_getheight => "/getheight" => other::get_height => (get, post), + other_get_transactions => "/get_transactions" => other::get_transactions => (get, post), + other_gettransactions => "/gettransactions" => other::get_transactions => (get, post), + other_get_alt_blocks_hashes => "/get_alt_blocks_hashes" => other::get_alt_blocks_hashes => (get, post), + other_is_key_image_spent => "/is_key_image_spent" => other::is_key_image_spent => (get, post), + other_send_raw_transaction => "/send_raw_transaction" => other::send_raw_transaction => (get, post), + other_sendrawtransaction => "/sendrawtransaction" => other::send_raw_transaction => (get, post), + other_start_mining => "/start_mining" => other::start_mining => (get, post), + other_stop_mining => "/stop_mining" => other::stop_mining => (get, post), + other_mining_status => "/mining_status" => other::mining_status => (get, post), + other_save_bc => "/save_bc" => other::save_bc => (get, post), + other_get_peer_list => "/get_peer_list" => other::get_peer_list => (get, post), + other_get_public_nodes => "/get_public_nodes" => other::get_public_nodes => (get, post), + other_set_log_hash_rate => "/set_log_hash_rate" => other::set_log_hash_rate => (get, post), + other_set_log_level => "/set_log_level" => other::set_log_level => (get, post), + other_set_log_categories => "/set_log_categories" => other::set_log_categories => (get, post), + other_get_transaction_pool => "/get_transaction_pool" => other::get_transaction_pool => (get, post), + other_get_transaction_pool_hashes => "/get_transaction_pool_hashes" => other::get_transaction_pool_hashes => (get, post), + other_get_transaction_pool_stats => "/get_transaction_pool_stats" => other::get_transaction_pool_stats => (get, post), + other_set_bootstrap_daemon => "/set_bootstrap_daemon" => other::set_bootstrap_daemon => (get, post), + other_stop_daemon => "/stop_daemon" => other::stop_daemon => (get, post), + other_get_net_stats => "/get_net_stats" => other::get_net_stats => (get, post), + other_get_limit => "/get_limit" => other::get_limit => (get, post), + other_set_limit => "/set_limit" => other::set_limit => (get, post), + other_out_peers => "/out_peers" => other::out_peers => (get, post), + other_in_peers => "/in_peers" => other::in_peers => (get, post), + other_get_outs => "/get_outs" => other::get_outs => (get, post), + other_update => "/update" => other::update => (get, post), + other_pop_blocks => "/pop_blocks" => other::pop_blocks => (get, post), + + // Binary routes. + bin_get_blocks => "/get_blocks.bin" => bin::get_blocks => (get, post), + bin_getblocks => "/getblocks.bin" => bin::get_blocks => (get, post), + bin_get_blocks_by_height => "/get_blocks_by_height.bin" => bin::get_blocks_by_height => (get, post), + bin_getblocks_by_height => "/getblocks_by_height.bin" => bin::get_blocks_by_height => (get, post), + bin_get_hashes => "/get_hashes.bin" => bin::get_hashes => (get, post), + bin_gethashes => "/gethashes.bin" => bin::get_hashes => (get, post), + bin_get_o_indexes => "/get_o_indexes.bin" => bin::get_o_indexes => (get, post), + bin_get_outs => "/get_outs.bin" => bin::get_outs => (get, post), + bin_get_transaction_pool_hashes => "/get_transaction_pool_hashes.bin" => bin::get_transaction_pool_hashes => (get, post), + bin_get_output_distribution => "/get_output_distribution.bin" => bin::get_output_distribution => (get, post), +} + +impl Default for RouterBuilder { + /// Uses [`Self::all`]. + fn default() -> Self { + Self::new().all() + } +} diff --git a/rpc/interface/src/rpc_error.rs b/rpc/interface/src/rpc_error.rs new file mode 100644 index 0000000..92b9cc1 --- /dev/null +++ b/rpc/interface/src/rpc_error.rs @@ -0,0 +1,34 @@ +//! RPC errors. + +//---------------------------------------------------------------------------------------------------- Import +use axum::http::StatusCode; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +//---------------------------------------------------------------------------------------------------- RpcError +/// Possible errors during RPC operation. +/// +/// These are any errors that can happen _during_ a handler function. +/// I.e. if this error surfaces, it happened _after_ the request was +/// deserialized. +/// +/// This is the `Error` type required to be used in an [`RpcHandler`](crate::RpcHandler). +/// +/// TODO: This is empty as possible errors will be +/// enumerated when the handler functions are created. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +pub enum RpcError {} + +impl From for StatusCode { + fn from(value: RpcError) -> Self { + // TODO + Self::INTERNAL_SERVER_ERROR + } +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/interface/src/rpc_handler.rs b/rpc/interface/src/rpc_handler.rs new file mode 100644 index 0000000..3d1c28d --- /dev/null +++ b/rpc/interface/src/rpc_handler.rs @@ -0,0 +1,57 @@ +//! RPC handler trait. + +//---------------------------------------------------------------------------------------------------- Use +use std::{future::Future, task::Poll}; + +use axum::{http::StatusCode, response::IntoResponse}; +use futures::{channel::oneshot::channel, FutureExt}; +use tower::Service; + +use cuprate_helper::asynch::InfallibleOneshotReceiver; +use cuprate_json_rpc::Id; +use cuprate_rpc_types::json::JsonRpcRequest; + +use crate::{rpc_error::RpcError, rpc_request::RpcRequest, rpc_response::RpcResponse}; + +//---------------------------------------------------------------------------------------------------- RpcHandler +/// An RPC handler. +/// +/// This trait represents a type that can turn [`RpcRequest`]s into [`RpcResponse`]s. +/// +/// Implementors of this trait must be [`tower::Service`]s that use: +/// - [`RpcRequest`] as the generic `Request` type +/// - [`RpcResponse`] as the associated `Response` type +/// - [`RpcError`] as the associated `Error` type +/// - A generic [`Future`] that outputs `Result` +/// +/// See this crate's `RpcHandlerDummy` for an implementation example of this trait. +/// +/// # Panics +/// Your [`RpcHandler`] must reply to [`RpcRequest`]s with the correct +/// [`RpcResponse`] or else this crate will panic during routing functions. +/// +/// For example, upon a [`RpcRequest::Binary`] must be replied with +/// [`RpcRequest::Binary`]. If an [`RpcRequest::Other`] were returned instead, +/// this crate would panic. +pub trait RpcHandler: + Clone + + Send + + Sync + + 'static + + Service< + RpcRequest, + Response = RpcResponse, + Error = RpcError, + Future: Future> + Send + Sync + 'static, + > +{ + /// Is this [`RpcHandler`] restricted? + /// + /// If this returns `true`, restricted methods and endpoints such as: + /// - `/json_rpc`'s `relay_tx` method + /// - The `/pop_blocks` endpoint + /// + /// will automatically be denied access when using the + /// [`axum::Router`] provided by [`RouterBuilder`](crate::RouterBuilder). + fn restricted(&self) -> bool; +} diff --git a/rpc/interface/src/rpc_handler_dummy.rs b/rpc/interface/src/rpc_handler_dummy.rs new file mode 100644 index 0000000..97b7585 --- /dev/null +++ b/rpc/interface/src/rpc_handler_dummy.rs @@ -0,0 +1,142 @@ +//! Dummy implementation of [`RpcHandler`]. + +//---------------------------------------------------------------------------------------------------- Use +use std::task::Poll; + +use futures::{channel::oneshot::channel, FutureExt}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use tower::Service; + +use cuprate_helper::asynch::InfallibleOneshotReceiver; +use cuprate_json_rpc::Id; +use cuprate_rpc_types::json::JsonRpcRequest; + +use crate::{ + rpc_error::RpcError, rpc_handler::RpcHandler, rpc_request::RpcRequest, + rpc_response::RpcResponse, +}; + +//---------------------------------------------------------------------------------------------------- RpcHandlerDummy +/// An [`RpcHandler`] that always returns [`Default::default`]. +/// +/// This `struct` implements [`RpcHandler`], and always responds +/// with the response `struct` set to [`Default::default`]. +/// +/// See the [`crate`] documentation for example usage. +/// +/// This is mostly used for testing purposes and can +/// be disabled by disable the `dummy` feature flag. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +pub struct RpcHandlerDummy { + /// Should this RPC server be [restricted](RpcHandler::restricted)? + /// + /// The dummy will honor this [`bool`] + /// on restricted methods/endpoints. + pub restricted: bool, +} + +impl RpcHandler for RpcHandlerDummy { + fn restricted(&self) -> bool { + self.restricted + } +} + +impl Service for RpcHandlerDummy { + type Response = RpcResponse; + type Error = RpcError; + type Future = InfallibleOneshotReceiver>; + + fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: RpcRequest) -> Self::Future { + use cuprate_rpc_types::bin::BinRequest as BReq; + use cuprate_rpc_types::bin::BinResponse as BResp; + use cuprate_rpc_types::json::JsonRpcRequest as JReq; + use cuprate_rpc_types::json::JsonRpcResponse as JResp; + use cuprate_rpc_types::other::OtherRequest as OReq; + use cuprate_rpc_types::other::OtherResponse as OResp; + + #[rustfmt::skip] + #[allow(clippy::default_trait_access)] + let resp = match req { + RpcRequest::JsonRpc(j) => RpcResponse::JsonRpc(cuprate_json_rpc::Response::ok(Id::Null, match j.body { + JReq::GetBlockCount(_) => JResp::GetBlockCount(Default::default()), + JReq::OnGetBlockHash(_) => JResp::OnGetBlockHash(Default::default()), + JReq::SubmitBlock(_) => JResp::SubmitBlock(Default::default()), + JReq::GenerateBlocks(_) => JResp::GenerateBlocks(Default::default()), + JReq::GetLastBlockHeader(_) => JResp::GetLastBlockHeader(Default::default()), + JReq::GetBlockHeaderByHash(_) => JResp::GetBlockHeaderByHash(Default::default()), + JReq::GetBlockHeaderByHeight(_) => JResp::GetBlockHeaderByHeight(Default::default()), + JReq::GetBlockHeadersRange(_) => JResp::GetBlockHeadersRange(Default::default()), + JReq::GetBlock(_) => JResp::GetBlock(Default::default()), + JReq::GetConnections(_) => JResp::GetConnections(Default::default()), + JReq::GetInfo(_) => JResp::GetInfo(Default::default()), + JReq::HardForkInfo(_) => JResp::HardForkInfo(Default::default()), + JReq::SetBans(_) => JResp::SetBans(Default::default()), + JReq::GetBans(_) => JResp::GetBans(Default::default()), + JReq::Banned(_) => JResp::Banned(Default::default()), + JReq::FlushTransactionPool(_) => JResp::FlushTransactionPool(Default::default()), + JReq::GetOutputHistogram(_) => JResp::GetOutputHistogram(Default::default()), + JReq::GetCoinbaseTxSum(_) => JResp::GetCoinbaseTxSum(Default::default()), + JReq::GetVersion(_) => JResp::GetVersion(Default::default()), + JReq::GetFeeEstimate(_) => JResp::GetFeeEstimate(Default::default()), + JReq::GetAlternateChains(_) => JResp::GetAlternateChains(Default::default()), + JReq::RelayTx(_) => JResp::RelayTx(Default::default()), + JReq::SyncInfo(_) => JResp::SyncInfo(Default::default()), + JReq::GetTransactionPoolBacklog(_) => JResp::GetTransactionPoolBacklog(Default::default()), + JReq::GetMinerData(_) => JResp::GetMinerData(Default::default()), + JReq::PruneBlockchain(_) => JResp::PruneBlockchain(Default::default()), + JReq::CalcPow(_) => JResp::CalcPow(Default::default()), + JReq::FlushCache(_) => JResp::FlushCache(Default::default()), + JReq::AddAuxPow(_) => JResp::AddAuxPow(Default::default()), + JReq::GetTxIdsLoose(_) => JResp::GetTxIdsLoose(Default::default()), + })), + RpcRequest::Binary(b) => RpcResponse::Binary(match b { + BReq::GetBlocks(_) => BResp::GetBlocks(Default::default()), + BReq::GetBlocksByHeight(_) => BResp::GetBlocksByHeight(Default::default()), + BReq::GetHashes(_) => BResp::GetHashes(Default::default()), + BReq::GetOutputIndexes(_) => BResp::GetOutputIndexes(Default::default()), + BReq::GetOuts(_) => BResp::GetOuts(Default::default()), + BReq::GetTransactionPoolHashes(_) => BResp::GetTransactionPoolHashes(Default::default()), + BReq::GetOutputDistribution(_) => BResp::GetOutputDistribution(Default::default()), + }), + RpcRequest::Other(o) => RpcResponse::Other(match o { + OReq::GetHeight(_) => OResp::GetHeight(Default::default()), + OReq::GetTransactions(_) => OResp::GetTransactions(Default::default()), + OReq::GetAltBlocksHashes(_) => OResp::GetAltBlocksHashes(Default::default()), + OReq::IsKeyImageSpent(_) => OResp::IsKeyImageSpent(Default::default()), + OReq::SendRawTransaction(_) => OResp::SendRawTransaction(Default::default()), + OReq::StartMining(_) => OResp::StartMining(Default::default()), + OReq::StopMining(_) => OResp::StopMining(Default::default()), + OReq::MiningStatus(_) => OResp::MiningStatus(Default::default()), + OReq::SaveBc(_) => OResp::SaveBc(Default::default()), + OReq::GetPeerList(_) => OResp::GetPeerList(Default::default()), + OReq::SetLogHashRate(_) => OResp::SetLogHashRate(Default::default()), + OReq::SetLogLevel(_) => OResp::SetLogLevel(Default::default()), + OReq::SetLogCategories(_) => OResp::SetLogCategories(Default::default()), + OReq::SetBootstrapDaemon(_) => OResp::SetBootstrapDaemon(Default::default()), + OReq::GetTransactionPool(_) => OResp::GetTransactionPool(Default::default()), + OReq::GetTransactionPoolStats(_) => OResp::GetTransactionPoolStats(Default::default()), + OReq::StopDaemon(_) => OResp::StopDaemon(Default::default()), + OReq::GetLimit(_) => OResp::GetLimit(Default::default()), + OReq::SetLimit(_) => OResp::SetLimit(Default::default()), + OReq::OutPeers(_) => OResp::OutPeers(Default::default()), + OReq::InPeers(_) => OResp::InPeers(Default::default()), + OReq::GetNetStats(_) => OResp::GetNetStats(Default::default()), + OReq::GetOuts(_) => OResp::GetOuts(Default::default()), + OReq::Update(_) => OResp::Update(Default::default()), + OReq::PopBlocks(_) => OResp::PopBlocks(Default::default()), + OReq::GetTransactionPoolHashes(_) => OResp::GetTransactionPoolHashes(Default::default()), + OReq::GetPublicNodes(_) => OResp::GetPublicNodes(Default::default()), + }) + }; + + let (tx, rx) = channel(); + drop(tx.send(Ok(resp))); + InfallibleOneshotReceiver::from(rx) + } +} diff --git a/rpc/interface/src/rpc_request.rs b/rpc/interface/src/rpc_request.rs new file mode 100644 index 0000000..3b66a78 --- /dev/null +++ b/rpc/interface/src/rpc_request.rs @@ -0,0 +1,33 @@ +//! RPC requests. + +//---------------------------------------------------------------------------------------------------- Import +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use cuprate_rpc_types::{bin::BinRequest, json::JsonRpcRequest, other::OtherRequest}; + +//---------------------------------------------------------------------------------------------------- RpcRequest +/// All possible RPC requests. +/// +/// This enum encapsulates all possible RPC requests: +/// - JSON RPC 2.0 requests +/// - Binary requests +/// - Other JSON requests +/// +/// It is the `Request` type required to be used in an [`RpcHandler`](crate::RpcHandler). +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +pub enum RpcRequest { + /// JSON-RPC 2.0 requests. + JsonRpc(cuprate_json_rpc::Request), + /// Binary requests. + Binary(BinRequest), + /// Other JSON requests. + Other(OtherRequest), +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} diff --git a/rpc/interface/src/rpc_response.rs b/rpc/interface/src/rpc_response.rs new file mode 100644 index 0000000..7e8ecdb --- /dev/null +++ b/rpc/interface/src/rpc_response.rs @@ -0,0 +1,33 @@ +//! RPC responses. + +//---------------------------------------------------------------------------------------------------- Import +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +use cuprate_rpc_types::{bin::BinResponse, json::JsonRpcResponse, other::OtherResponse}; + +//---------------------------------------------------------------------------------------------------- RpcResponse +/// All possible RPC responses. +/// +/// This enum encapsulates all possible RPC responses: +/// - JSON RPC 2.0 responses +/// - Binary responses +/// - Other JSON responses +/// +/// It is the `Response` type required to be used in an [`RpcHandler`](crate::RpcHandler). +#[derive(Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +pub enum RpcResponse { + /// JSON RPC 2.0 responses. + JsonRpc(cuprate_json_rpc::Response), + /// Binary responses. + Binary(BinResponse), + /// Other JSON responses. + Other(OtherResponse), +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + // use super::*; +} From 8227c286049769e171cb466a15ac3e197a30bf52 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 6 Aug 2024 23:48:53 +0000 Subject: [PATCH 031/104] update monero-serai (#201) * update monero-serai * update monero-serai + change height to `usize` * fix merge * fix merge * fix doc * fix clippy take 2 * misc changes * move RPC imports to dev deps * handle miner txs when calculating fee * Update consensus/rules/src/blocks.rs Co-authored-by: hinto-janai * Update consensus/rules/src/transactions.rs Co-authored-by: hinto-janai * Update storage/blockchain/src/ops/tx.rs Co-authored-by: hinto-janai * Update test-utils/src/data/free.rs Co-authored-by: hinto-janai * fixes * fix clippy --------- Co-authored-by: hinto-janai --- Cargo.lock | 292 +++++++++--------- Cargo.toml | 16 +- consensus/Cargo.toml | 2 - consensus/fast-sync/src/create.rs | 10 +- consensus/fast-sync/src/fast_sync.rs | 12 +- consensus/rules/Cargo.toml | 2 - consensus/rules/src/batch_verifier.rs | 14 +- consensus/rules/src/blocks.rs | 21 +- consensus/rules/src/genesis.rs | 8 +- consensus/rules/src/hard_forks.rs | 24 +- consensus/rules/src/hard_forks/tests.rs | 10 +- consensus/rules/src/miner_tx.rs | 38 +-- consensus/rules/src/transactions.rs | 110 ++++--- .../rules/src/transactions/contextual_data.rs | 2 +- consensus/rules/src/transactions/ring_ct.rs | 84 ++--- consensus/rules/src/transactions/tests.rs | 35 +-- consensus/src/batch_verifier.rs | 25 +- consensus/src/block.rs | 26 +- consensus/src/block/alt_block.rs | 6 +- consensus/src/block/free.rs | 6 +- consensus/src/context.rs | 8 +- consensus/src/context/alt_chains.rs | 6 +- consensus/src/context/difficulty.rs | 59 ++-- consensus/src/context/hardforks.rs | 27 +- consensus/src/context/rx_vms.rs | 21 +- consensus/src/context/task.rs | 2 +- consensus/src/context/weight.rs | 42 ++- consensus/src/tests/context.rs | 8 +- consensus/src/tests/context/difficulty.rs | 17 +- consensus/src/tests/context/hardforks.rs | 12 +- consensus/src/tests/context/weight.rs | 6 +- consensus/src/tests/mock_db.rs | 11 +- consensus/src/transactions.rs | 37 +-- consensus/src/transactions/contextual_data.rs | 10 +- consensus/src/transactions/free.rs | 64 ++++ p2p/address-book/src/book.rs | 4 +- p2p/address-book/src/peer_list.rs | 2 +- p2p/p2p-core/src/services.rs | 8 +- p2p/p2p/src/block_downloader.rs | 23 +- p2p/p2p/src/block_downloader/block_queue.rs | 12 +- p2p/p2p/src/block_downloader/chain_tracker.rs | 15 +- .../src/block_downloader/download_batch.rs | 12 +- p2p/p2p/src/block_downloader/tests.rs | 31 +- p2p/p2p/src/connection_maintainer.rs | 2 +- p2p/p2p/src/sync_states.rs | 2 +- pruning/src/lib.rs | 68 ++-- storage/blockchain/src/ops/block.rs | 24 +- storage/blockchain/src/ops/blockchain.rs | 9 +- storage/blockchain/src/ops/output.rs | 8 +- storage/blockchain/src/ops/tx.rs | 108 +++---- storage/blockchain/src/service/free.rs | 12 +- storage/blockchain/src/service/mod.rs | 2 +- storage/blockchain/src/service/read.rs | 4 +- storage/blockchain/src/service/tests.rs | 2 +- storage/blockchain/src/types.rs | 2 +- test-utils/Cargo.toml | 36 ++- test-utils/src/data/constants.rs | 26 +- test-utils/src/data/free.rs | 45 ++- test-utils/src/data/mod.rs | 4 +- test-utils/src/rpc/client.rs | 29 +- types/Cargo.toml | 1 + types/src/blockchain.rs | 14 +- types/src/types.rs | 14 +- 63 files changed, 808 insertions(+), 784 deletions(-) create mode 100644 consensus/src/transactions/free.rs diff --git a/Cargo.lock b/Cargo.lock index 1f99810..c35deec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,17 +50,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" -[[package]] -name = "async-lock" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" -dependencies = [ - "event-listener", - "event-listener-strategy", - "pin-project-lite", -] - [[package]] name = "async-stream" version = "0.3.5" @@ -176,28 +165,12 @@ dependencies = [ "rustc-demangle", ] -[[package]] -name = "base58-monero" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978e81a45367d2409ecd33369a45dda2e9a3ca516153ec194de1fbda4b9fb79d" -dependencies = [ - "thiserror", - "tiny-keccak", -] - [[package]] name = "base64" version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" -[[package]] -name = "base64ct" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" - [[package]] name = "bincode" version = "1.3.3" @@ -401,15 +374,6 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" -[[package]] -name = "concurrent-queue" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -500,12 +464,6 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - [[package]] name = "crypto-bigint" version = "0.5.5" @@ -513,6 +471,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "subtle", + "zeroize", ] [[package]] @@ -589,12 +548,10 @@ dependencies = [ "cuprate-test-utils", "cuprate-types", "curve25519-dalek", - "dalek-ff-group", "futures", "hex", "hex-literal", "monero-serai", - "multiexp", "proptest", "proptest-derive", "rand", @@ -617,11 +574,9 @@ dependencies = [ "cuprate-cryptonight", "cuprate-helper", "curve25519-dalek", - "dalek-ff-group", "hex", "hex-literal", "monero-serai", - "multiexp", "proptest", "proptest-derive", "rand", @@ -879,7 +834,9 @@ dependencies = [ "futures", "hex", "hex-literal", + "monero-rpc", "monero-serai", + "monero-simple-request-rpc", "paste", "pretty_assertions", "serde", @@ -897,6 +854,7 @@ version = "0.0.0" name = "cuprate-types" version = "0.0.0" dependencies = [ + "borsh", "bytes", "cuprate-epee-encoding", "cuprate-fixed-bytes", @@ -951,7 +909,7 @@ dependencies = [ [[package]] name = "dalek-ff-group" version = "0.4.1" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "crypto-bigint", "curve25519-dalek", @@ -1070,27 +1028,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "event-listener" -version = "5.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" -dependencies = [ - "event-listener", - "pin-project-lite", -] - [[package]] name = "fastrand" version = "2.1.0" @@ -1127,7 +1064,7 @@ dependencies = [ [[package]] name = "flexible-transcript" version = "0.3.2" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "blake2", "digest", @@ -1285,7 +1222,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.2.6", + "indexmap", "slab", "tokio", "tokio-util", @@ -1363,15 +1300,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest", -] - [[package]] name = "http" version = "1.1.0" @@ -1791,63 +1719,163 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "monero-address" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "monero-io", + "monero-primitives", + "std-shims", + "thiserror", + "zeroize", +] + +[[package]] +name = "monero-borromean" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "monero-primitives", + "std-shims", + "zeroize", +] + +[[package]] +name = "monero-bulletproofs" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "monero-primitives", + "rand_core", + "std-shims", + "thiserror", + "zeroize", +] + +[[package]] +name = "monero-clsag" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "dalek-ff-group", + "flexible-transcript", + "group", + "monero-generators", + "monero-io", + "monero-primitives", + "rand_chacha", + "rand_core", + "std-shims", + "subtle", + "thiserror", + "zeroize", +] + [[package]] name = "monero-generators" version = "0.4.0" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "dalek-ff-group", "group", + "monero-io", "sha3", "std-shims", "subtle", ] +[[package]] +name = "monero-io" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "std-shims", +] + +[[package]] +name = "monero-mlsag" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "monero-primitives", + "std-shims", + "thiserror", + "zeroize", +] + +[[package]] +name = "monero-primitives" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "sha3", + "std-shims", + "zeroize", +] + +[[package]] +name = "monero-rpc" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "async-trait", + "curve25519-dalek", + "hex", + "monero-address", + "monero-serai", + "serde", + "serde_json", + "std-shims", + "thiserror", + "zeroize", +] + [[package]] name = "monero-serai" version = "0.1.4-alpha" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ - "async-lock", - "async-trait", - "base58-monero", "curve25519-dalek", - "dalek-ff-group", - "digest_auth", - "flexible-transcript", - "group", - "hex", "hex-literal", + "monero-borromean", + "monero-bulletproofs", + "monero-clsag", "monero-generators", - "multiexp", - "pbkdf2", - "rand", - "rand_chacha", - "rand_core", - "rand_distr", - "serde", - "serde_json", - "sha3", - "simple-request", + "monero-io", + "monero-mlsag", + "monero-primitives", "std-shims", - "subtle", - "thiserror", - "tokio", "zeroize", ] [[package]] -name = "multiexp" -version = "0.4.0" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +name = "monero-simple-request-rpc" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ - "ff", - "group", - "rand_core", - "rustversion", - "std-shims", - "zeroize", + "async-trait", + "digest_auth", + "hex", + "monero-rpc", + "simple-request", + "tokio", ] [[package]] @@ -1907,12 +1935,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "parking" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" - [[package]] name = "parking_lot" version = "0.12.3" @@ -1936,35 +1958,12 @@ dependencies = [ "windows-targets 0.52.5", ] -[[package]] -name = "password-hash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" -dependencies = [ - "base64ct", - "rand_core", - "subtle", -] - [[package]] name = "paste" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "pbkdf2" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" -dependencies = [ - "digest", - "hmac", - "password-hash", - "sha2", -] - [[package]] name = "percent-encoding" version = "2.3.1" @@ -2540,7 +2539,7 @@ dependencies = [ [[package]] name = "simple-request" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "http-body-util", "hyper", @@ -2596,7 +2595,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "std-shims" version = "0.1.1" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "hashbrown", "spin", @@ -2722,15 +2721,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - [[package]] name = "tinystr" version = "0.7.6" diff --git a/Cargo.toml b/Cargo.toml index da82d9e..9f0fa27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,15 +58,13 @@ chrono = { version = "0.4.31", default-features = false } crypto-bigint = { version = "0.5.5", default-features = false } crossbeam = { version = "0.8.4", default-features = false } curve25519-dalek = { version = "4.1.3", default-features = false } -dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false } dashmap = { version = "5.5.3", default-features = false } dirs = { version = "5.0.1", default-features = false } futures = { version = "0.3.29", default-features = false } hex = { version = "0.4.3", default-features = false } hex-literal = { version = "0.4", default-features = false } indexmap = { version = "2.2.5", default-features = false } -monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false } -multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false } +monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce", default-features = false } paste = { version = "1.0.14", default-features = false } pin-project = { version = "1.1.3", default-features = false } randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false } @@ -86,11 +84,13 @@ tracing-subscriber = { version = "0.3.17", default-features = false } tracing = { version = "0.1.40", default-features = false } ## workspace.dev-dependencies -tempfile = { version = "3" } -pretty_assertions = { version = "1.4.0" } -proptest = { version = "1" } -proptest-derive = { version = "0.4.0" } -tokio-test = { version = "0.4.4" } +monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } +monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } +tempfile = { version = "3" } +pretty_assertions = { version = "1.4.0" } +proptest = { version = "1" } +proptest-derive = { version = "0.4.0" } +tokio-test = { version = "0.4.4" } ## TODO: ## Potential dependencies. diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 521b98c..bd3994a 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -19,8 +19,6 @@ futures = { workspace = true, features = ["std", "async-await"] } randomx-rs = { workspace = true } monero-serai = { workspace = true, features = ["std"] } -multiexp = { workspace = true } -dalek-ff-group = { workspace = true } curve25519-dalek = { workspace = true } rayon = { workspace = true } diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index 8d4f9a6..0d6d03f 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -13,13 +13,13 @@ use cuprate_types::{ use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes}; -const BATCH_SIZE: u64 = 512; +const BATCH_SIZE: usize = 512; async fn read_batch( handle: &mut BlockchainReadHandle, - height_from: u64, + height_from: usize, ) -> Result, RuntimeError> { - let mut block_ids = Vec::::with_capacity(BATCH_SIZE as usize); + let mut block_ids = Vec::::with_capacity(BATCH_SIZE); for height in height_from..(height_from + BATCH_SIZE) { let request = BlockchainReadRequest::BlockHash(height, Chain::Main); @@ -53,7 +53,7 @@ fn generate_hex(hashes: &[HashOfHashes]) -> String { #[command(version, about, long_about = None)] struct Args { #[arg(short, long)] - height: u64, + height: usize, } #[tokio::main] @@ -67,7 +67,7 @@ async fn main() { let mut hashes_of_hashes = Vec::new(); - let mut height = 0u64; + let mut height = 0_usize; while height < height_target { match read_batch(&mut read_handle, height).await { diff --git a/consensus/fast-sync/src/fast_sync.rs b/consensus/fast-sync/src/fast_sync.rs index a97040a..b42ae64 100644 --- a/consensus/fast-sync/src/fast_sync.rs +++ b/consensus/fast-sync/src/fast_sync.rs @@ -244,7 +244,7 @@ where let block_blob = block.serialize(); - let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else { + let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else { return Err(FastSyncError::MinerTx(MinerTxError::InputNotOfTypeGen)); }; if *height != block_chain_ctx.chain_height { @@ -252,7 +252,7 @@ where } let mut verified_txs = Vec::with_capacity(txs.len()); - for tx in &block.txs { + for tx in &block.transactions { let tx = txs .remove(tx) .ok_or(FastSyncError::TxsIncludedWithBlockIncorrect)?; @@ -269,8 +269,8 @@ where let total_fees = verified_txs.iter().map(|tx| tx.fee).sum::(); let total_outputs = block - .miner_tx - .prefix + .miner_transaction + .prefix() .outputs .iter() .map(|output| output.amount.unwrap_or(0)) @@ -278,8 +278,8 @@ where let generated_coins = total_outputs - total_fees; - let weight = - block.miner_tx.weight() + verified_txs.iter().map(|tx| tx.tx_weight).sum::(); + let weight = block.miner_transaction.weight() + + verified_txs.iter().map(|tx| tx.tx_weight).sum::(); Ok(FastSyncResponse::ValidateBlock(VerifiedBlockInformation { block_blob, diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index fd86a61..311bcc9 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -15,8 +15,6 @@ cuprate-helper = { path = "../../helper", default-features = false, features = [ cuprate-cryptonight = {path = "../../cryptonight"} monero-serai = { workspace = true, features = ["std"] } -multiexp = { workspace = true, features = ["std", "batch"] } -dalek-ff-group = { workspace = true, features = ["std"] } curve25519-dalek = { workspace = true, features = ["alloc", "zeroize", "precomputed-tables"] } rand = { workspace = true, features = ["std", "std_rng"] } diff --git a/consensus/rules/src/batch_verifier.rs b/consensus/rules/src/batch_verifier.rs index c8d3f10..bce6eb9 100644 --- a/consensus/rules/src/batch_verifier.rs +++ b/consensus/rules/src/batch_verifier.rs @@ -1,4 +1,4 @@ -use multiexp::BatchVerifier as InternalBatchVerifier; +use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier; /// This trait represents a batch verifier. /// @@ -12,18 +12,12 @@ pub trait BatchVerifier { /// # Panics /// This function may panic if `stmt` contains calls to `rayon`'s parallel iterators, e.g. `par_iter()`. // TODO: remove the panics by adding a generic API upstream. - fn queue_statement( - &mut self, - stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R, - ) -> R; + fn queue_statement(&mut self, stmt: impl FnOnce(&mut InternalBatchVerifier) -> R) -> R; } // impl this for a single threaded batch verifier. -impl BatchVerifier for &'_ mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint> { - fn queue_statement( - &mut self, - stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R, - ) -> R { +impl BatchVerifier for &'_ mut InternalBatchVerifier { + fn queue_statement(&mut self, stmt: impl FnOnce(&mut InternalBatchVerifier) -> R) -> R { stmt(self) } } diff --git a/consensus/rules/src/blocks.rs b/consensus/rules/src/blocks.rs index ecd6a11..c36f68b 100644 --- a/consensus/rules/src/blocks.rs +++ b/consensus/rules/src/blocks.rs @@ -21,8 +21,8 @@ pub const PENALTY_FREE_ZONE_1: usize = 20000; pub const PENALTY_FREE_ZONE_2: usize = 60000; pub const PENALTY_FREE_ZONE_5: usize = 300000; -pub const RX_SEEDHASH_EPOCH_BLOCKS: u64 = 2048; -pub const RX_SEEDHASH_EPOCH_LAG: u64 = 64; +pub const RX_SEEDHASH_EPOCH_BLOCKS: usize = 2048; +pub const RX_SEEDHASH_EPOCH_LAG: usize = 64; #[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)] pub enum BlockError { @@ -52,14 +52,14 @@ pub trait RandomX { } /// Returns if this height is a RandomX seed height. -pub fn is_randomx_seed_height(height: u64) -> bool { +pub fn is_randomx_seed_height(height: usize) -> bool { height % RX_SEEDHASH_EPOCH_BLOCKS == 0 } /// Returns the RandomX seed height for this block. /// /// ref: -pub fn randomx_seed_height(height: u64) -> u64 { +pub fn randomx_seed_height(height: usize) -> usize { if height <= RX_SEEDHASH_EPOCH_BLOCKS + RX_SEEDHASH_EPOCH_LAG { 0 } else { @@ -75,7 +75,7 @@ pub fn randomx_seed_height(height: u64) -> u64 { pub fn calculate_pow_hash( randomx_vm: Option<&R>, buf: &[u8], - height: u64, + height: usize, hf: &HardFork, ) -> Result<[u8; 32], BlockError> { if height == 202612 { @@ -89,7 +89,8 @@ pub fn calculate_pow_hash( } else if hf < &HardFork::V10 { cryptonight_hash_v2(buf) } else if hf < &HardFork::V12 { - cryptonight_hash_r(buf, height) + // FIXME: https://github.com/Cuprate/cuprate/issues/167. + cryptonight_hash_r(buf, height as u64) } else { randomx_vm .expect("RandomX VM needed from hf 12") @@ -220,7 +221,7 @@ pub struct ContextToVerifyBlock { /// Contains the median timestamp over the last 60 blocks, if there is less than 60 blocks this should be [`None`] pub median_block_timestamp: Option, /// The current chain height. - pub chain_height: u64, + pub chain_height: usize, /// The current hard-fork. pub current_hf: HardFork, /// ref: @@ -263,11 +264,11 @@ pub fn check_block( check_block_weight(block_weight, block_chain_ctx.median_weight_for_block_reward)?; block_size_sanity_check(block_blob_len, block_chain_ctx.effective_median_weight)?; - check_amount_txs(block.txs.len())?; - check_txs_unique(&block.txs)?; + check_amount_txs(block.transactions.len())?; + check_txs_unique(&block.transactions)?; let generated_coins = check_miner_tx( - &block.miner_tx, + &block.miner_transaction, total_fees, block_chain_ctx.chain_height, block_weight, diff --git a/consensus/rules/src/genesis.rs b/consensus/rules/src/genesis.rs index 73bc951..b796119 100644 --- a/consensus/rules/src/genesis.rs +++ b/consensus/rules/src/genesis.rs @@ -29,14 +29,14 @@ fn genesis_miner_tx(network: &Network) -> Transaction { pub fn generate_genesis_block(network: &Network) -> Block { Block { header: BlockHeader { - major_version: 1, - minor_version: 0, + hardfork_version: 1, + hardfork_signal: 0, timestamp: 0, previous: [0; 32], nonce: genesis_nonce(network), }, - miner_tx: genesis_miner_tx(network), - txs: vec![], + miner_transaction: genesis_miner_tx(network), + transactions: vec![], } } diff --git a/consensus/rules/src/hard_forks.rs b/consensus/rules/src/hard_forks.rs index 016a51f..6b98314 100644 --- a/consensus/rules/src/hard_forks.rs +++ b/consensus/rules/src/hard_forks.rs @@ -40,11 +40,11 @@ pub enum HardForkError { /// Information about a given hard-fork. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct HFInfo { - height: u64, - threshold: u64, + height: usize, + threshold: usize, } impl HFInfo { - pub const fn new(height: u64, threshold: u64) -> HFInfo { + pub const fn new(height: usize, threshold: usize) -> HFInfo { HFInfo { height, threshold } } } @@ -202,8 +202,8 @@ impl HardFork { #[inline] pub fn from_block_header(header: &BlockHeader) -> Result<(HardFork, HardFork), HardForkError> { Ok(( - HardFork::from_version(header.major_version)?, - HardFork::from_vote(header.minor_version), + HardFork::from_version(header.hardfork_version)?, + HardFork::from_vote(header.hardfork_signal), )) } @@ -245,7 +245,7 @@ impl HardFork { /// A struct holding the current voting state of the blockchain. #[derive(Debug, Clone, Eq, PartialEq)] pub struct HFVotes { - votes: [u64; NUMB_OF_HARD_FORKS], + votes: [usize; NUMB_OF_HARD_FORKS], vote_list: VecDeque, window_size: usize, } @@ -318,13 +318,13 @@ impl HFVotes { /// Returns the total votes for a hard-fork. /// /// ref: - pub fn votes_for_hf(&self, hf: &HardFork) -> u64 { + pub fn votes_for_hf(&self, hf: &HardFork) -> usize { self.votes[*hf as usize - 1..].iter().sum() } /// Returns the total amount of votes being tracked - pub fn total_votes(&self) -> u64 { - self.votes.iter().sum() + pub fn total_votes(&self) -> usize { + self.vote_list.len() } /// Checks if a future hard fork should be activated, returning the next hard-fork that should be @@ -334,8 +334,8 @@ impl HFVotes { pub fn current_fork( &self, current_hf: &HardFork, - current_height: u64, - window: u64, + current_height: usize, + window: usize, hfs_info: &HFsInfo, ) -> HardFork { let mut current_hf = *current_hf; @@ -361,6 +361,6 @@ impl HFVotes { /// Returns the votes needed for a hard-fork. /// /// ref: -pub fn votes_needed(threshold: u64, window: u64) -> u64 { +pub fn votes_needed(threshold: usize, window: usize) -> usize { (threshold * window).div_ceil(100) } diff --git a/consensus/rules/src/hard_forks/tests.rs b/consensus/rules/src/hard_forks/tests.rs index 77ed751..00dd036 100644 --- a/consensus/rules/src/hard_forks/tests.rs +++ b/consensus/rules/src/hard_forks/tests.rs @@ -4,7 +4,7 @@ use proptest::{arbitrary::any, prop_assert_eq, prop_compose, proptest}; use crate::hard_forks::{HFVotes, HardFork, NUMB_OF_HARD_FORKS}; -const TEST_WINDOW_SIZE: u64 = 25; +const TEST_WINDOW_SIZE: usize = 25; #[test] fn target_block_time() { @@ -35,9 +35,9 @@ prop_compose! { fn arb_full_hf_votes() ( // we can't use HardFork as for some reason it overflows the stack, so we use u8. - votes in any::<[u8; TEST_WINDOW_SIZE as usize]>() + votes in any::<[u8; TEST_WINDOW_SIZE]>() ) -> HFVotes { - let mut vote_count = HFVotes::new(TEST_WINDOW_SIZE as usize); + let mut vote_count = HFVotes::new(TEST_WINDOW_SIZE); for vote in votes { vote_count.add_vote_for_hf(&HardFork::from_vote(vote % 17)); } @@ -48,9 +48,9 @@ prop_compose! { proptest! { #[test] fn hf_vote_counter_total_correct(hf_votes in arb_full_hf_votes()) { - prop_assert_eq!(hf_votes.total_votes(), u64::try_from(hf_votes.vote_list.len()).unwrap()); + prop_assert_eq!(hf_votes.total_votes(), hf_votes.vote_list.len()); - let mut votes = [0_u64; NUMB_OF_HARD_FORKS]; + let mut votes = [0_usize; NUMB_OF_HARD_FORKS]; for vote in hf_votes.vote_list.iter() { // manually go through the list of votes tallying votes[*vote as usize - 1] += 1; diff --git a/consensus/rules/src/miner_tx.rs b/consensus/rules/src/miner_tx.rs index 90f1a7e..e4927e3 100644 --- a/consensus/rules/src/miner_tx.rs +++ b/consensus/rules/src/miner_tx.rs @@ -1,7 +1,4 @@ -use monero_serai::{ - ringct::RctType, - transaction::{Input, Output, Timelock, Transaction}, -}; +use monero_serai::transaction::{Input, Output, Timelock, Transaction}; use crate::{is_decomposed_amount, transactions::check_output_types, HardFork, TxVersion}; @@ -35,7 +32,7 @@ const MONEY_SUPPLY: u64 = u64::MAX; /// The minimum block reward per minute, "tail-emission" const MINIMUM_REWARD_PER_MIN: u64 = 3 * 10_u64.pow(11); /// The value which `lock_time` should be for a coinbase output. -const MINER_TX_TIME_LOCKED_BLOCKS: u64 = 60; +const MINER_TX_TIME_LOCKED_BLOCKS: usize = 60; /// Calculates the base block reward without taking away the penalty for expanding /// the block. @@ -88,7 +85,7 @@ fn check_miner_tx_version(tx_version: &TxVersion, hf: &HardFork) -> Result<(), M /// Checks the miner transactions inputs. /// /// ref: -fn check_inputs(inputs: &[Input], chain_height: u64) -> Result<(), MinerTxError> { +fn check_inputs(inputs: &[Input], chain_height: usize) -> Result<(), MinerTxError> { if inputs.len() != 1 { return Err(MinerTxError::IncorrectNumbOfInputs); } @@ -108,15 +105,15 @@ fn check_inputs(inputs: &[Input], chain_height: u64) -> Result<(), MinerTxError> /// Checks the miner transaction has a correct time lock. /// /// ref: -fn check_time_lock(time_lock: &Timelock, chain_height: u64) -> Result<(), MinerTxError> { +fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> { match time_lock { - Timelock::Block(till_height) => { + &Timelock::Block(till_height) => { // Lock times above this amount are timestamps not blocks. // This is just for safety though and shouldn't actually be hit. - if till_height > &500_000_000 { + if till_height > 500_000_000 { Err(MinerTxError::InvalidLockTime)?; } - if u64::try_from(*till_height).unwrap() != chain_height + MINER_TX_TIME_LOCKED_BLOCKS { + if till_height != chain_height + MINER_TX_TIME_LOCKED_BLOCKS { Err(MinerTxError::InvalidLockTime) } else { Ok(()) @@ -182,28 +179,33 @@ fn check_total_output_amt( pub fn check_miner_tx( tx: &Transaction, total_fees: u64, - chain_height: u64, + chain_height: usize, block_weight: usize, median_bw: usize, already_generated_coins: u64, hf: &HardFork, ) -> Result { - let tx_version = TxVersion::from_raw(tx.prefix.version).ok_or(MinerTxError::VersionInvalid)?; + let tx_version = TxVersion::from_raw(tx.version()).ok_or(MinerTxError::VersionInvalid)?; check_miner_tx_version(&tx_version, hf)?; // ref: - if hf >= &HardFork::V12 && tx.rct_signatures.rct_type() != RctType::Null { - return Err(MinerTxError::RCTTypeNotNULL); + match tx { + Transaction::V1 { .. } => (), + Transaction::V2 { proofs, .. } => { + if hf >= &HardFork::V12 && proofs.is_some() { + return Err(MinerTxError::RCTTypeNotNULL); + } + } } - check_time_lock(&tx.prefix.timelock, chain_height)?; + check_time_lock(&tx.prefix().additional_timelock, chain_height)?; - check_inputs(&tx.prefix.inputs, chain_height)?; + check_inputs(&tx.prefix().inputs, chain_height)?; - check_output_types(&tx.prefix.outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?; + check_output_types(&tx.prefix().outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?; let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf); - let total_outs = sum_outputs(&tx.prefix.outputs, hf, &tx_version)?; + let total_outs = sum_outputs(&tx.prefix().outputs, hf, &tx_version)?; check_total_output_amt(total_outs, reward, total_fees, hf) } diff --git a/consensus/rules/src/transactions.rs b/consensus/rules/src/transactions.rs index 9169708..5a0676b 100644 --- a/consensus/rules/src/transactions.rs +++ b/consensus/rules/src/transactions.rs @@ -91,7 +91,7 @@ impl TxVersion { /// /// ref: /// && - pub fn from_raw(version: u64) -> Option { + pub fn from_raw(version: u8) -> Option { Some(match version { 1 => TxVersion::RingSignatures, 2 => TxVersion::RingCT, @@ -205,7 +205,7 @@ fn check_number_of_outputs( outputs: usize, hf: &HardFork, tx_version: &TxVersion, - rct_type: &RctType, + bp_or_bpp: bool, ) -> Result<(), TransactionError> { if tx_version == &TxVersion::RingSignatures { return Ok(()); @@ -215,18 +215,10 @@ fn check_number_of_outputs( return Err(TransactionError::InvalidNumberOfOutputs); } - match rct_type { - RctType::Bulletproofs - | RctType::BulletproofsCompactAmount - | RctType::Clsag - | RctType::BulletproofsPlus => { - if outputs <= MAX_BULLETPROOFS_OUTPUTS { - Ok(()) - } else { - Err(TransactionError::InvalidNumberOfOutputs) - } - } - _ => Ok(()), + if bp_or_bpp && outputs > MAX_BULLETPROOFS_OUTPUTS { + Err(TransactionError::InvalidNumberOfOutputs) + } else { + Ok(()) } } @@ -239,11 +231,11 @@ fn check_outputs_semantics( outputs: &[Output], hf: &HardFork, tx_version: &TxVersion, - rct_type: &RctType, + bp_or_bpp: bool, ) -> Result { check_output_types(outputs, hf)?; check_output_keys(outputs)?; - check_number_of_outputs(outputs.len(), hf, tx_version, rct_type)?; + check_number_of_outputs(outputs.len(), hf, tx_version, bp_or_bpp)?; sum_outputs(outputs, hf, tx_version) } @@ -255,14 +247,14 @@ fn check_outputs_semantics( /// pub fn output_unlocked( time_lock: &Timelock, - current_chain_height: u64, + current_chain_height: usize, current_time_lock_timestamp: u64, hf: &HardFork, ) -> bool { match *time_lock { Timelock::None => true, Timelock::Block(unlock_height) => { - check_block_time_lock(unlock_height.try_into().unwrap(), current_chain_height) + check_block_time_lock(unlock_height, current_chain_height) } Timelock::Time(unlock_time) => { check_timestamp_time_lock(unlock_time, current_time_lock_timestamp, hf) @@ -273,7 +265,7 @@ pub fn output_unlocked( /// Returns if a locked output, which uses a block height, can be spent. /// /// ref: -fn check_block_time_lock(unlock_height: u64, current_chain_height: u64) -> bool { +fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool { // current_chain_height = 1 + top height unlock_height <= current_chain_height } @@ -297,7 +289,7 @@ fn check_timestamp_time_lock( /// fn check_all_time_locks( time_locks: &[Timelock], - current_chain_height: u64, + current_chain_height: usize, current_time_lock_timestamp: u64, hf: &HardFork, ) -> Result<(), TransactionError> { @@ -442,8 +434,8 @@ fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), Transactio /// /// ref: fn check_10_block_lock( - youngest_used_out_height: u64, - current_chain_height: u64, + youngest_used_out_height: usize, + current_chain_height: usize, hf: &HardFork, ) -> Result<(), TransactionError> { if hf >= &HardFork::V12 { @@ -510,7 +502,7 @@ fn check_inputs_semantics(inputs: &[Input], hf: &HardFork) -> Result Result<(), TransactionError> { // This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members. @@ -615,28 +607,41 @@ pub fn check_transaction_semantic( Err(TransactionError::TooBig)?; } - let tx_version = TxVersion::from_raw(tx.prefix.version) - .ok_or(TransactionError::TransactionVersionInvalid)?; + let tx_version = + TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?; - let outputs_sum = check_outputs_semantics( - &tx.prefix.outputs, - hf, - &tx_version, - &tx.rct_signatures.rct_type(), - )?; - let inputs_sum = check_inputs_semantics(&tx.prefix.inputs, hf)?; + let bp_or_bpp = match tx { + Transaction::V2 { + proofs: Some(proofs), + .. + } => match proofs.rct_type() { + RctType::AggregateMlsagBorromean | RctType::MlsagBorromean => false, + RctType::MlsagBulletproofs + | RctType::MlsagBulletproofsCompactAmount + | RctType::ClsagBulletproof + | RctType::ClsagBulletproofPlus => true, + }, + Transaction::V2 { proofs: None, .. } | Transaction::V1 { .. } => false, + }; - let fee = match tx_version { - TxVersion::RingSignatures => { + let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, &tx_version, bp_or_bpp)?; + let inputs_sum = check_inputs_semantics(&tx.prefix().inputs, hf)?; + + let fee = match tx { + Transaction::V1 { .. } => { if outputs_sum >= inputs_sum { Err(TransactionError::OutputsTooHigh)?; } inputs_sum - outputs_sum } - TxVersion::RingCT => { - ring_ct::ring_ct_semantic_checks(tx, tx_hash, verifier, hf)?; + Transaction::V2 { proofs, .. } => { + let proofs = proofs + .as_ref() + .ok_or(TransactionError::TransactionVersionInvalid)?; - tx.rct_signatures.base.fee + ring_ct::ring_ct_semantic_checks(proofs, tx_hash, verifier, hf)?; + + proofs.base.fee } }; @@ -654,15 +659,15 @@ pub fn check_transaction_semantic( pub fn check_transaction_contextual( tx: &Transaction, tx_ring_members_info: &TxRingMembersInfo, - current_chain_height: u64, + current_chain_height: usize, current_time_lock_timestamp: u64, hf: &HardFork, ) -> Result<(), TransactionError> { - let tx_version = TxVersion::from_raw(tx.prefix.version) - .ok_or(TransactionError::TransactionVersionInvalid)?; + let tx_version = + TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?; check_inputs_contextual( - &tx.prefix.inputs, + &tx.prefix().inputs, tx_ring_members_info, current_chain_height, hf, @@ -676,17 +681,22 @@ pub fn check_transaction_contextual( hf, )?; - match tx_version { - TxVersion::RingSignatures => ring_signatures::check_input_signatures( - &tx.prefix.inputs, - &tx.signatures, + match &tx { + Transaction::V1 { prefix, signatures } => ring_signatures::check_input_signatures( + &prefix.inputs, + signatures, &tx_ring_members_info.rings, - &tx.signature_hash(), + // This will only return None on v2 miner txs. + &tx.signature_hash() + .ok_or(TransactionError::TransactionVersionInvalid)?, ), - TxVersion::RingCT => Ok(ring_ct::check_input_signatures( - &tx.signature_hash(), - &tx.prefix.inputs, - &tx.rct_signatures, + Transaction::V2 { prefix, proofs } => Ok(ring_ct::check_input_signatures( + &tx.signature_hash() + .ok_or(TransactionError::TransactionVersionInvalid)?, + &prefix.inputs, + proofs + .as_ref() + .ok_or(TransactionError::TransactionVersionInvalid)?, &tx_ring_members_info.rings, )?), } diff --git a/consensus/rules/src/transactions/contextual_data.rs b/consensus/rules/src/transactions/contextual_data.rs index 6af3ad3..282093d 100644 --- a/consensus/rules/src/transactions/contextual_data.rs +++ b/consensus/rules/src/transactions/contextual_data.rs @@ -70,7 +70,7 @@ pub struct TxRingMembersInfo { pub rings: Rings, /// Information on the structure of the decoys, must be [`None`] for txs before [`HardFork::V1`] pub decoy_info: Option, - pub youngest_used_out_height: u64, + pub youngest_used_out_height: usize, pub time_locked_outs: Vec, } diff --git a/consensus/rules/src/transactions/ring_ct.rs b/consensus/rules/src/transactions/ring_ct.rs index 38b56eb..62f71dd 100644 --- a/consensus/rules/src/transactions/ring_ct.rs +++ b/consensus/rules/src/transactions/ring_ct.rs @@ -1,13 +1,13 @@ use curve25519_dalek::{EdwardsPoint, Scalar}; use hex_literal::hex; use monero_serai::{ + generators::H, ringct::{ clsag::ClsagError, mlsag::{AggregateRingMatrixBuilder, MlsagError, RingMatrix}, - RctPrunable, RctSignatures, RctType, + RctProofs, RctPrunable, RctType, }, - transaction::{Input, Transaction}, - H, + transaction::Input, }; use rand::thread_rng; #[cfg(feature = "rayon")] @@ -48,12 +48,12 @@ fn check_rct_type(ty: &RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), use RctType as T; match ty { - T::MlsagAggregate | T::MlsagIndividual if hf >= F::V4 && hf < F::V9 => Ok(()), - T::Bulletproofs if hf >= F::V8 && hf < F::V11 => Ok(()), - T::BulletproofsCompactAmount if hf >= F::V10 && hf < F::V14 => Ok(()), - T::BulletproofsCompactAmount if GRANDFATHERED_TRANSACTIONS.contains(tx_hash) => Ok(()), - T::Clsag if hf >= F::V13 && hf < F::V16 => Ok(()), - T::BulletproofsPlus if hf >= F::V15 => Ok(()), + T::AggregateMlsagBorromean | T::MlsagBorromean if hf >= F::V4 && hf < F::V9 => Ok(()), + T::MlsagBulletproofs if hf >= F::V8 && hf < F::V11 => Ok(()), + T::MlsagBulletproofsCompactAmount if hf >= F::V10 && hf < F::V14 => Ok(()), + T::MlsagBulletproofsCompactAmount if GRANDFATHERED_TRANSACTIONS.contains(tx_hash) => Ok(()), + T::ClsagBulletproof if hf >= F::V13 && hf < F::V16 => Ok(()), + T::ClsagBulletproofPlus if hf >= F::V15 => Ok(()), _ => Err(RingCTError::TypeNotAllowed), } } @@ -61,20 +61,22 @@ fn check_rct_type(ty: &RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), /// Checks that the pseudo-outs sum to the same point as the output commitments. /// /// -fn simple_type_balances(rct_sig: &RctSignatures) -> Result<(), RingCTError> { - let pseudo_outs = if rct_sig.rct_type() == RctType::MlsagIndividual { +fn simple_type_balances(rct_sig: &RctProofs) -> Result<(), RingCTError> { + let pseudo_outs = if rct_sig.rct_type() == RctType::MlsagBorromean { &rct_sig.base.pseudo_outs } else { match &rct_sig.prunable { RctPrunable::Clsag { pseudo_outs, .. } + | RctPrunable::MlsagBulletproofsCompactAmount { pseudo_outs, .. } | RctPrunable::MlsagBulletproofs { pseudo_outs, .. } => pseudo_outs, - _ => panic!("RingCT type is not simple!"), + RctPrunable::MlsagBorromean { .. } => &rct_sig.base.pseudo_outs, + RctPrunable::AggregateMlsagBorromean { .. } => panic!("RingCT type is not simple!"), } }; let sum_inputs = pseudo_outs.iter().sum::(); - let sum_outputs = rct_sig.base.commitments.iter().sum::() - + Scalar::from(rct_sig.base.fee) * H(); + let sum_outputs = + rct_sig.base.commitments.iter().sum::() + Scalar::from(rct_sig.base.fee) * *H; if sum_inputs == sum_outputs { Ok(()) @@ -89,13 +91,12 @@ fn simple_type_balances(rct_sig: &RctSignatures) -> Result<(), RingCTError> { /// /// fn check_output_range_proofs( - rct_sig: &RctSignatures, + proofs: &RctProofs, mut verifier: impl BatchVerifier, ) -> Result<(), RingCTError> { - let commitments = &rct_sig.base.commitments; + let commitments = &proofs.base.commitments; - match &rct_sig.prunable { - RctPrunable::Null => Err(RingCTError::TypeNotAllowed)?, + match &proofs.prunable { RctPrunable::MlsagBorromean { borromean, .. } | RctPrunable::AggregateMlsagBorromean { borromean, .. } => try_par_iter(borromean) .zip(commitments) @@ -106,10 +107,11 @@ fn check_output_range_proofs( Err(RingCTError::BorromeanRangeInvalid) } }), - RctPrunable::MlsagBulletproofs { bulletproofs, .. } - | RctPrunable::Clsag { bulletproofs, .. } => { + RctPrunable::MlsagBulletproofs { bulletproof, .. } + | RctPrunable::MlsagBulletproofsCompactAmount { bulletproof, .. } + | RctPrunable::Clsag { bulletproof, .. } => { if verifier.queue_statement(|verifier| { - bulletproofs.batch_verify(&mut thread_rng(), verifier, (), commitments) + bulletproof.batch_verify(&mut thread_rng(), verifier, commitments) }) { Ok(()) } else { @@ -120,18 +122,18 @@ fn check_output_range_proofs( } pub(crate) fn ring_ct_semantic_checks( - tx: &Transaction, + proofs: &RctProofs, tx_hash: &[u8; 32], verifier: impl BatchVerifier, hf: &HardFork, ) -> Result<(), RingCTError> { - let rct_type = tx.rct_signatures.rct_type(); + let rct_type = proofs.rct_type(); check_rct_type(&rct_type, *hf, tx_hash)?; - check_output_range_proofs(&tx.rct_signatures, verifier)?; + check_output_range_proofs(proofs, verifier)?; - if rct_type != RctType::MlsagAggregate { - simple_type_balances(&tx.rct_signatures)?; + if rct_type != RctType::AggregateMlsagBorromean { + simple_type_balances(proofs)?; } Ok(()) @@ -144,7 +146,7 @@ pub(crate) fn ring_ct_semantic_checks( pub(crate) fn check_input_signatures( msg: &[u8; 32], inputs: &[Input], - rct_sig: &RctSignatures, + proofs: &RctProofs, rings: &Rings, ) -> Result<(), RingCTError> { let Rings::RingCT(rings) = rings else { @@ -155,15 +157,15 @@ pub(crate) fn check_input_signatures( Err(RingCTError::RingInvalid)?; } - let pseudo_outs = match &rct_sig.prunable { + let pseudo_outs = match &proofs.prunable { RctPrunable::MlsagBulletproofs { pseudo_outs, .. } + | RctPrunable::MlsagBulletproofsCompactAmount { pseudo_outs, .. } | RctPrunable::Clsag { pseudo_outs, .. } => pseudo_outs.as_slice(), - RctPrunable::MlsagBorromean { .. } => rct_sig.base.pseudo_outs.as_slice(), - RctPrunable::AggregateMlsagBorromean { .. } | RctPrunable::Null => &[], + RctPrunable::MlsagBorromean { .. } => proofs.base.pseudo_outs.as_slice(), + RctPrunable::AggregateMlsagBorromean { .. } => &[], }; - match &rct_sig.prunable { - RctPrunable::Null => Err(RingCTError::TypeNotAllowed)?, + match &proofs.prunable { RctPrunable::AggregateMlsagBorromean { mlsag, .. } => { let key_images = inputs .iter() @@ -176,11 +178,14 @@ pub(crate) fn check_input_signatures( .collect::>(); let mut matrix = - AggregateRingMatrixBuilder::new(&rct_sig.base.commitments, rct_sig.base.fee); + AggregateRingMatrixBuilder::new(&proofs.base.commitments, proofs.base.fee); + rings.iter().try_for_each(|ring| matrix.push_ring(ring))?; + Ok(mlsag.verify(msg, &matrix.build()?, &key_images)?) } RctPrunable::MlsagBorromean { mlsags, .. } + | RctPrunable::MlsagBulletproofsCompactAmount { mlsags, .. } | RctPrunable::MlsagBulletproofs { mlsags, .. } => try_par_iter(mlsags) .zip(pseudo_outs) .zip(inputs) @@ -216,18 +221,21 @@ mod tests { #[test] fn grandfathered_bulletproofs2() { - assert!( - check_rct_type(&RctType::BulletproofsCompactAmount, HardFork::V14, &[0; 32]).is_err() - ); + assert!(check_rct_type( + &RctType::MlsagBulletproofsCompactAmount, + HardFork::V14, + &[0; 32] + ) + .is_err()); assert!(check_rct_type( - &RctType::BulletproofsCompactAmount, + &RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &GRANDFATHERED_TRANSACTIONS[0] ) .is_ok()); assert!(check_rct_type( - &RctType::BulletproofsCompactAmount, + &RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &GRANDFATHERED_TRANSACTIONS[1] ) diff --git a/consensus/rules/src/transactions/tests.rs b/consensus/rules/src/transactions/tests.rs index 1d7591b..0bea08c 100644 --- a/consensus/rules/src/transactions/tests.rs +++ b/consensus/rules/src/transactions/tests.rs @@ -97,31 +97,6 @@ fn test_torsion_ki() { } } -/// Returns a strategy that resolves to a [`RctType`] that uses -/// BPs(+). -#[allow(unreachable_code)] -#[allow(clippy::diverging_sub_expression)] -fn bulletproof_rct_type() -> BoxedStrategy { - return prop_oneof![ - Just(RctType::Bulletproofs), - Just(RctType::BulletproofsCompactAmount), - Just(RctType::Clsag), - Just(RctType::BulletproofsPlus), - ] - .boxed(); - - // Here to make sure this is updated when needed. - match unreachable!() { - RctType::Null => {} - RctType::MlsagAggregate => {} - RctType::MlsagIndividual => {} - RctType::Bulletproofs => {} - RctType::BulletproofsCompactAmount => {} - RctType::Clsag => {} - RctType::BulletproofsPlus => {} - }; -} - prop_compose! { /// Returns a valid prime-order point. fn random_point()(bytes in any::<[u8; 32]>()) -> EdwardsPoint { @@ -240,13 +215,13 @@ proptest! { } #[test] - fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize, rct_type in bulletproof_rct_type()) { - prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_ok()); + fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize) { + prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_ok()); } #[test] - fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX, rct_type in bulletproof_rct_type()) { - prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_err()); + fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX) { + prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_err()); } #[test] @@ -256,7 +231,7 @@ proptest! { } #[test] - fn test_block_unlock_time(height in 1..u64::MAX) { + fn test_block_unlock_time(height in 1..usize::MAX) { prop_assert!(check_block_time_lock(height, height)); prop_assert!(!check_block_time_lock(height, height - 1)); prop_assert!(check_block_time_lock(height, height+1)); diff --git a/consensus/src/batch_verifier.rs b/consensus/src/batch_verifier.rs index 44493a6..69018ac 100644 --- a/consensus/src/batch_verifier.rs +++ b/consensus/src/batch_verifier.rs @@ -1,12 +1,14 @@ use std::{cell::RefCell, ops::DerefMut}; -use multiexp::BatchVerifier as InternalBatchVerifier; +use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier; use rayon::prelude::*; use thread_local::ThreadLocal; +use cuprate_consensus_rules::batch_verifier::BatchVerifier; + /// A multithreaded batch verifier. pub struct MultiThreadedBatchVerifier { - internal: ThreadLocal>>, + internal: ThreadLocal>, } impl MultiThreadedBatchVerifier { @@ -22,19 +24,22 @@ impl MultiThreadedBatchVerifier { .into_iter() .map(RefCell::into_inner) .par_bridge() - .find_any(|batch_verifier| !batch_verifier.verify_vartime()) - .is_none() + .try_for_each(|batch_verifier| { + if batch_verifier.verify() { + Ok(()) + } else { + Err(()) + } + }) + .is_ok() } } -impl cuprate_consensus_rules::batch_verifier::BatchVerifier for &'_ MultiThreadedBatchVerifier { - fn queue_statement( - &mut self, - stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R, - ) -> R { +impl BatchVerifier for &'_ MultiThreadedBatchVerifier { + fn queue_statement(&mut self, stmt: impl FnOnce(&mut InternalBatchVerifier) -> R) -> R { let mut verifier = self .internal - .get_or(|| RefCell::new(InternalBatchVerifier::new(32))) + .get_or(|| RefCell::new(InternalBatchVerifier::new())) .borrow_mut(); stmt(verifier.deref_mut()) diff --git a/consensus/src/block.rs b/consensus/src/block.rs index 1b36eb9..f5aac5e 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -57,7 +57,7 @@ pub struct PreparedBlockExPow { /// The block's hash. pub block_hash: [u8; 32], /// The height of the block. - pub height: u64, + pub height: usize, /// The weight of the block's miner transaction. pub miner_tx_weight: usize, @@ -74,7 +74,7 @@ impl PreparedBlockExPow { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?; - let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else { + let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else { Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, )))? @@ -88,7 +88,7 @@ impl PreparedBlockExPow { block_hash: block.hash(), height: *height, - miner_tx_weight: block.miner_tx.weight(), + miner_tx_weight: block.miner_transaction.weight(), block, }) } @@ -128,7 +128,7 @@ impl PreparedBlock { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?; - let [Input::Gen(height)] = &block.miner_tx.prefix.inputs[..] else { + let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, )))? @@ -142,12 +142,12 @@ impl PreparedBlock { block_hash: block.hash(), pow_hash: calculate_pow_hash( randomx_vm, - &block.serialize_hashable(), + &block.serialize_pow_hash(), *height, &hf_version, )?, - miner_tx_weight: block.miner_tx.weight(), + miner_tx_weight: block.miner_transaction.weight(), block, }) } @@ -172,12 +172,12 @@ impl PreparedBlock { block_hash: block.block_hash, pow_hash: calculate_pow_hash( randomx_vm, - &block.block.serialize_hashable(), + &block.block.serialize_pow_hash(), block.height, &block.hf_version, )?, - miner_tx_weight: block.block.miner_tx.weight(), + miner_tx_weight: block.block.miner_transaction.weight(), block: block.block, }) } @@ -359,8 +359,8 @@ where // Set up the block and just pass it to [`verify_prepped_main_chain_block`] - // We just use the raw `major_version` here, no need to turn it into a `HardFork`. - let rx_vms = if block.header.major_version < 12 { + // We just use the raw `hardfork_version` here, no need to turn it into a `HardFork`. + let rx_vms = if block.header.hardfork_version < 12 { HashMap::new() } else { let BlockChainContextResponse::RxVms(rx_vms) = context_svc @@ -443,12 +443,12 @@ where check_block_pow(&prepped_block.pow_hash, context.next_difficulty) .map_err(ConsensusError::Block)?; - if prepped_block.block.txs.len() != txs.len() { + if prepped_block.block.transactions.len() != txs.len() { return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); } - if !prepped_block.block.txs.is_empty() { - for (expected_tx_hash, tx) in prepped_block.block.txs.iter().zip(txs.iter()) { + if !prepped_block.block.transactions.is_empty() { + for (expected_tx_hash, tx) in prepped_block.block.transactions.iter().zip(txs.iter()) { if expected_tx_hash != &tx.tx_hash { return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); } diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs index cf6f213..8944083 100644 --- a/consensus/src/block/alt_block.rs +++ b/consensus/src/block/alt_block.rs @@ -63,7 +63,7 @@ where }; // Check if the block's miner input is formed correctly. - let [Input::Gen(height)] = &block.miner_tx.prefix.inputs[..] else { + let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, )))? @@ -79,7 +79,7 @@ where let prepped_block = { let rx_vm = alt_rx_vm( alt_context_cache.chain_height, - block.header.major_version, + block.header.hardfork_version, alt_context_cache.parent_chain, &mut alt_context_cache, &mut context_svc, @@ -188,7 +188,7 @@ where /// /// If the `hf` is less than 12 (the height RX activates), then [`None`] is returned. async fn alt_rx_vm( - block_height: u64, + block_height: usize, hf: u8, parent_chain: Chain, alt_chain_context: &mut AltChainContextCache, diff --git a/consensus/src/block/free.rs b/consensus/src/block/free.rs index 8a61e80..46698e5 100644 --- a/consensus/src/block/free.rs +++ b/consensus/src/block/free.rs @@ -12,14 +12,14 @@ pub(crate) fn pull_ordered_transactions( block: &Block, mut txs: HashMap<[u8; 32], TransactionVerificationData>, ) -> Result, ExtendedConsensusError> { - if block.txs.len() != txs.len() { + if block.transactions.len() != txs.len() { return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); } let mut ordered_txs = Vec::with_capacity(txs.len()); - if !block.txs.is_empty() { - for tx_hash in &block.txs { + if !block.transactions.is_empty() { + for tx_hash in &block.transactions { let tx = txs .remove(tx_hash) .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; diff --git a/consensus/src/context.rs b/consensus/src/context.rs index fffbe90..26be75c 100644 --- a/consensus/src/context.rs +++ b/consensus/src/context.rs @@ -202,7 +202,7 @@ pub struct NewBlockData { /// The blocks hash. pub block_hash: [u8; 32], /// The blocks height. - pub height: u64, + pub height: usize, /// The blocks timestamp. pub timestamp: u64, /// The blocks weight. @@ -246,7 +246,7 @@ pub enum BlockChainContextRequest { /// # Panics /// /// This will panic if the number of blocks will pop the genesis block. - numb_blocks: u64, + numb_blocks: usize, }, /// Clear the alt chain context caches. ClearAltCache, @@ -289,7 +289,7 @@ pub enum BlockChainContextRequest { /// handle getting the randomX VM of an alt chain. AltChainRxVM { /// The height the RandomX VM is needed for. - height: u64, + height: usize, /// The chain to look in for the seed. chain: Chain, /// An internal token to prevent external crates calling this request. @@ -313,7 +313,7 @@ pub enum BlockChainContextResponse { /// Blockchain context response. Context(BlockChainContext), /// A map of seed height to RandomX VMs. - RxVms(HashMap>), + RxVms(HashMap>), /// A list of difficulties. BatchDifficulties(Vec), /// An alt chain context cache. diff --git a/consensus/src/context/alt_chains.rs b/consensus/src/context/alt_chains.rs index f0c391d..5586226 100644 --- a/consensus/src/context/alt_chains.rs +++ b/consensus/src/context/alt_chains.rs @@ -32,10 +32,10 @@ pub struct AltChainContextCache { pub difficulty_cache: Option, /// A cached RX VM. - pub cached_rx_vm: Option<(u64, Arc)>, + pub cached_rx_vm: Option<(usize, Arc)>, /// The chain height of the alt chain. - pub chain_height: u64, + pub chain_height: usize, /// The top hash of the alt chain. pub top_hash: [u8; 32], /// The [`ChainID`] of the alt chain. @@ -48,7 +48,7 @@ impl AltChainContextCache { /// Add a new block to the cache. pub fn add_new_block( &mut self, - height: u64, + height: usize, block_hash: [u8; 32], block_weight: usize, long_term_block_weight: usize, diff --git a/consensus/src/context/difficulty.rs b/consensus/src/context/difficulty.rs index 9ec0f1e..eb67cf5 100644 --- a/consensus/src/context/difficulty.rs +++ b/consensus/src/context/difficulty.rs @@ -48,8 +48,8 @@ impl DifficultyCacheConfig { } /// Returns the total amount of blocks we need to track to calculate difficulty - pub fn total_block_count(&self) -> u64 { - (self.window + self.lag).try_into().unwrap() + pub fn total_block_count(&self) -> usize { + self.window + self.lag } /// The amount of blocks we account for after removing the outliers. @@ -78,7 +78,7 @@ pub struct DifficultyCache { /// The current cumulative difficulty of the chain. pub(crate) cumulative_difficulties: VecDeque, /// The last height we accounted for. - pub(crate) last_accounted_height: u64, + pub(crate) last_accounted_height: usize, /// The config pub(crate) config: DifficultyCacheConfig, } @@ -87,7 +87,7 @@ impl DifficultyCache { /// Initialize the difficulty cache from the specified chain height. #[instrument(name = "init_difficulty_cache", level = "info", skip(database, config))] pub async fn init_from_chain_height( - chain_height: u64, + chain_height: usize, config: DifficultyCacheConfig, database: D, chain: Chain, @@ -104,7 +104,7 @@ impl DifficultyCache { let (timestamps, cumulative_difficulties) = get_blocks_in_pow_info(database.clone(), block_start..chain_height, chain).await?; - debug_assert_eq!(timestamps.len() as u64, chain_height - block_start); + debug_assert_eq!(timestamps.len(), chain_height - block_start); tracing::info!( "Current chain height: {}, accounting for {} blocks timestamps", @@ -132,14 +132,10 @@ impl DifficultyCache { #[instrument(name = "pop_blocks_diff_cache", skip_all, fields(numb_blocks = numb_blocks))] pub async fn pop_blocks_main_chain( &mut self, - numb_blocks: u64, + numb_blocks: usize, database: D, ) -> Result<(), ExtendedConsensusError> { - let Some(retained_blocks) = self - .timestamps - .len() - .checked_sub(usize::try_from(numb_blocks).unwrap()) - else { + let Some(retained_blocks) = self.timestamps.len().checked_sub(numb_blocks) else { // More blocks to pop than we have in the cache, so just restart a new cache. *self = Self::init_from_chain_height( self.last_accounted_height - numb_blocks + 1, @@ -167,7 +163,7 @@ impl DifficultyCache { database, new_start_height // current_chain_height - self.timestamps.len() blocks are already in the cache. - ..(current_chain_height - u64::try_from(self.timestamps.len()).unwrap()), + ..(current_chain_height - self.timestamps.len()), Chain::Main, ) .await?; @@ -187,7 +183,7 @@ impl DifficultyCache { } /// Add a new block to the difficulty cache. - pub fn new_block(&mut self, height: u64, timestamp: u64, cumulative_difficulty: u128) { + pub fn new_block(&mut self, height: usize, timestamp: u64, cumulative_difficulty: u128) { assert_eq!(self.last_accounted_height + 1, height); self.last_accounted_height += 1; @@ -199,7 +195,7 @@ impl DifficultyCache { self.cumulative_difficulties .push_back(cumulative_difficulty); - if u64::try_from(self.timestamps.len()).unwrap() > self.config.total_block_count() { + if self.timestamps.len() > self.config.total_block_count() { self.timestamps.pop_front(); self.cumulative_difficulties.pop_front(); } @@ -244,7 +240,7 @@ impl DifficultyCache { let last_cum_diff = cumulative_difficulties.back().copied().unwrap_or(1); cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap()); - if u64::try_from(timestamps.len()).unwrap() > self.config.total_block_count() { + if timestamps.len() > self.config.total_block_count() { diff_info_popped.push(( timestamps.pop_front().unwrap(), cumulative_difficulties.pop_front().unwrap(), @@ -266,22 +262,21 @@ impl DifficultyCache { /// /// Will return [`None`] if there aren't enough blocks. pub fn median_timestamp(&self, numb_blocks: usize) -> Option { - let mut timestamps = - if self.last_accounted_height + 1 == u64::try_from(numb_blocks).unwrap() { - // if the chain height is equal to `numb_blocks` add the genesis block. - // otherwise if the chain height is less than `numb_blocks` None is returned - // and if it's more it would be excluded from calculations. - let mut timestamps = self.timestamps.clone(); - // all genesis blocks have a timestamp of 0. - // https://cuprate.github.io/monero-book/consensus_rules/genesis_block.html - timestamps.push_front(0); - timestamps.into() - } else { - self.timestamps - .range(self.timestamps.len().checked_sub(numb_blocks)?..) - .copied() - .collect::>() - }; + let mut timestamps = if self.last_accounted_height + 1 == numb_blocks { + // if the chain height is equal to `numb_blocks` add the genesis block. + // otherwise if the chain height is less than `numb_blocks` None is returned + // and if it's more it would be excluded from calculations. + let mut timestamps = self.timestamps.clone(); + // all genesis blocks have a timestamp of 0. + // https://cuprate.github.io/monero-book/consensus_rules/genesis_block.html + timestamps.push_front(0); + timestamps.into() + } else { + self.timestamps + .range(self.timestamps.len().checked_sub(numb_blocks)?..) + .copied() + .collect::>() + }; timestamps.sort_unstable(); debug_assert_eq!(timestamps.len(), numb_blocks); @@ -368,7 +363,7 @@ fn get_window_start_and_end( #[instrument(name = "get_blocks_timestamps", skip(database), level = "info")] async fn get_blocks_in_pow_info( database: D, - block_heights: Range, + block_heights: Range, chain: Chain, ) -> Result<(VecDeque, VecDeque), ExtendedConsensusError> { tracing::info!("Getting blocks timestamps"); diff --git a/consensus/src/context/hardforks.rs b/consensus/src/context/hardforks.rs index 7972a0e..057e1c3 100644 --- a/consensus/src/context/hardforks.rs +++ b/consensus/src/context/hardforks.rs @@ -14,7 +14,7 @@ use crate::{Database, ExtendedConsensusError}; /// The default amount of hard-fork votes to track to decide on activation of a hard-fork. /// /// ref: -const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a week +const DEFAULT_WINDOW_SIZE: usize = 10080; // supermajority window check length - a week /// Configuration for hard-forks. /// @@ -23,7 +23,7 @@ pub struct HardForkConfig { /// The network we are on. pub(crate) info: HFsInfo, /// The amount of votes we are taking into account to decide on a fork activation. - pub(crate) window: u64, + pub(crate) window: usize, } impl HardForkConfig { @@ -64,14 +64,14 @@ pub struct HardForkState { pub(crate) votes: HFVotes, /// The last block height accounted for. - pub(crate) last_height: u64, + pub(crate) last_height: usize, } impl HardForkState { /// Initialize the [`HardForkState`] from the specified chain height. #[instrument(name = "init_hardfork_state", skip(config, database), level = "info")] pub async fn init_from_chain_height( - chain_height: u64, + chain_height: usize, config: HardForkConfig, mut database: D, ) -> Result { @@ -79,12 +79,8 @@ impl HardForkState { let block_start = chain_height.saturating_sub(config.window); - let votes = get_votes_in_range( - database.clone(), - block_start..chain_height, - usize::try_from(config.window).unwrap(), - ) - .await?; + let votes = + get_votes_in_range(database.clone(), block_start..chain_height, config.window).await?; if chain_height > config.window { debug_assert_eq!(votes.total_votes(), config.window) @@ -129,7 +125,7 @@ impl HardForkState { /// This _must_ only be used on a main-chain cache. pub async fn pop_blocks_main_chain( &mut self, - numb_blocks: u64, + numb_blocks: usize, database: D, ) -> Result<(), ExtendedConsensusError> { let Some(retained_blocks) = self.votes.total_votes().checked_sub(self.config.window) else { @@ -153,19 +149,18 @@ impl HardForkState { ..current_chain_height .saturating_sub(numb_blocks) .saturating_sub(retained_blocks), - usize::try_from(numb_blocks).unwrap(), + numb_blocks, ) .await?; - self.votes - .reverse_blocks(usize::try_from(numb_blocks).unwrap(), oldest_votes); + self.votes.reverse_blocks(numb_blocks, oldest_votes); self.last_height -= numb_blocks; Ok(()) } /// Add a new block to the cache. - pub fn new_block(&mut self, vote: HardFork, height: u64) { + pub fn new_block(&mut self, vote: HardFork, height: usize) { // We don't _need_ to take in `height` but it's for safety, so we don't silently loose track // of blocks. assert_eq!(self.last_height + 1, height); @@ -209,7 +204,7 @@ impl HardForkState { #[instrument(name = "get_votes", skip(database))] async fn get_votes_in_range( database: D, - block_heights: Range, + block_heights: Range, window_size: usize, ) -> Result { let mut votes = HFVotes::new(window_size); diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index 649146f..01aa973 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -74,9 +74,9 @@ impl RandomX for RandomXVM { #[derive(Clone, Debug)] pub struct RandomXVMCache { /// The top [`RX_SEEDS_CACHED`] RX seeds. - pub(crate) seeds: VecDeque<(u64, [u8; 32])>, + pub(crate) seeds: VecDeque<(usize, [u8; 32])>, /// The VMs for `seeds` (if after hf 12, otherwise this will be empty). - pub(crate) vms: HashMap>, + pub(crate) vms: HashMap>, /// A single cached VM that was given to us from a part of Cuprate. pub(crate) cached_vm: Option<([u8; 32], Arc)>, @@ -85,7 +85,7 @@ pub struct RandomXVMCache { impl RandomXVMCache { #[instrument(name = "init_rx_vm_cache", level = "info", skip(database))] pub async fn init_from_chain_height( - chain_height: u64, + chain_height: usize, hf: &HardFork, database: D, ) -> Result { @@ -94,7 +94,8 @@ impl RandomXVMCache { tracing::debug!("last {RX_SEEDS_CACHED} randomX seed heights: {seed_heights:?}",); - let seeds: VecDeque<(u64, [u8; 32])> = seed_heights.into_iter().zip(seed_hashes).collect(); + let seeds: VecDeque<(usize, [u8; 32])> = + seed_heights.into_iter().zip(seed_hashes).collect(); let vms = if hf >= &HardFork::V12 { tracing::debug!("Creating RandomX VMs"); @@ -132,7 +133,7 @@ impl RandomXVMCache { /// of them first. pub async fn get_alt_vm( &mut self, - height: u64, + height: usize, chain: Chain, database: D, ) -> Result, ExtendedConsensusError> { @@ -161,7 +162,7 @@ impl RandomXVMCache { } /// Get the main-chain RandomX VMs. - pub async fn get_vms(&mut self) -> HashMap> { + pub async fn get_vms(&mut self) -> HashMap> { match self.seeds.len().checked_sub(self.vms.len()) { // No difference in the amount of seeds to VMs. Some(0) => (), @@ -213,7 +214,7 @@ impl RandomXVMCache { } /// Removes all the RandomX VMs above the `new_height`. - pub fn pop_blocks_main_chain(&mut self, new_height: u64) { + pub fn pop_blocks_main_chain(&mut self, new_height: usize) { self.seeds.retain(|(height, _)| *height < new_height); self.vms.retain(|height, _| *height < new_height); } @@ -221,7 +222,7 @@ impl RandomXVMCache { /// Add a new block to the VM cache. /// /// hash is the block hash not the blocks PoW hash. - pub fn new_block(&mut self, height: u64, hash: &[u8; 32]) { + pub fn new_block(&mut self, height: usize, hash: &[u8; 32]) { if is_randomx_seed_height(height) { tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",); @@ -242,7 +243,7 @@ impl RandomXVMCache { /// Get the last `amount` of RX seeds, the top height returned here will not necessarily be the RX VM for the top block /// in the chain as VMs include some lag before a seed activates. -pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize) -> Vec { +pub(crate) fn get_last_rx_seed_heights(mut last_height: usize, mut amount: usize) -> Vec { let mut seeds = Vec::with_capacity(amount); if is_randomx_seed_height(last_height) { seeds.push(last_height); @@ -265,7 +266,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize) /// Gets the block hashes for the heights specified. async fn get_block_hashes( - heights: Vec, + heights: Vec, database: D, ) -> Result, ExtendedConsensusError> { let mut fut = FuturesOrdered::new(); diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index 79ddf4c..8939446 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -52,7 +52,7 @@ pub struct ContextTask { alt_chain_cache_map: AltChainMap, /// The current chain height. - chain_height: u64, + chain_height: usize, /// The top block hash. top_block_hash: [u8; 32], /// The total amount of coins generated. diff --git a/consensus/src/context/weight.rs b/consensus/src/context/weight.rs index 7cd5454..4c89139 100644 --- a/consensus/src/context/weight.rs +++ b/consensus/src/context/weight.rs @@ -24,21 +24,21 @@ use cuprate_types::{ use crate::{Database, ExtendedConsensusError, HardFork}; /// The short term block weight window. -const SHORT_TERM_WINDOW: u64 = 100; +const SHORT_TERM_WINDOW: usize = 100; /// The long term block weight window. -const LONG_TERM_WINDOW: u64 = 100000; +const LONG_TERM_WINDOW: usize = 100000; /// Configuration for the block weight cache. /// #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct BlockWeightsCacheConfig { - short_term_window: u64, - long_term_window: u64, + short_term_window: usize, + long_term_window: usize, } impl BlockWeightsCacheConfig { /// Creates a new [`BlockWeightsCacheConfig`] - pub const fn new(short_term_window: u64, long_term_window: u64) -> BlockWeightsCacheConfig { + pub const fn new(short_term_window: usize, long_term_window: usize) -> BlockWeightsCacheConfig { BlockWeightsCacheConfig { short_term_window, long_term_window, @@ -67,7 +67,7 @@ pub struct BlockWeightsCache { long_term_weights: RollingMedian, /// The height of the top block. - pub(crate) tip_height: u64, + pub(crate) tip_height: usize, pub(crate) config: BlockWeightsCacheConfig, } @@ -76,7 +76,7 @@ impl BlockWeightsCache { /// Initialize the [`BlockWeightsCache`] at the the given chain height. #[instrument(name = "init_weight_cache", level = "info", skip(database, config))] pub async fn init_from_chain_height( - chain_height: u64, + chain_height: usize, config: BlockWeightsCacheConfig, database: D, chain: Chain, @@ -101,17 +101,11 @@ impl BlockWeightsCache { Ok(BlockWeightsCache { short_term_block_weights: rayon_spawn_async(move || { - RollingMedian::from_vec( - short_term_block_weights, - usize::try_from(config.short_term_window).unwrap(), - ) + RollingMedian::from_vec(short_term_block_weights, config.short_term_window) }) .await, long_term_weights: rayon_spawn_async(move || { - RollingMedian::from_vec( - long_term_weights, - usize::try_from(config.long_term_window).unwrap(), - ) + RollingMedian::from_vec(long_term_weights, config.long_term_window) }) .await, tip_height: chain_height - 1, @@ -125,10 +119,10 @@ impl BlockWeightsCache { #[instrument(name = "pop_blocks_weight_cache", skip_all, fields(numb_blocks = numb_blocks))] pub async fn pop_blocks_main_chain( &mut self, - numb_blocks: u64, + numb_blocks: usize, database: D, ) -> Result<(), ExtendedConsensusError> { - if self.long_term_weights.window_len() <= usize::try_from(numb_blocks).unwrap() { + if self.long_term_weights.window_len() <= numb_blocks { // More blocks to pop than we have in the cache, so just restart a new cache. *self = Self::init_from_chain_height( self.tip_height - numb_blocks + 1, @@ -150,7 +144,7 @@ impl BlockWeightsCache { let old_long_term_weights = get_long_term_weight_in_range( new_long_term_start_height // current_chain_height - self.long_term_weights.len() blocks are already in the cache. - ..(chain_height - u64::try_from(self.long_term_weights.window_len()).unwrap()), + ..(chain_height - self.long_term_weights.window_len()), database.clone(), Chain::Main, ) @@ -163,11 +157,11 @@ impl BlockWeightsCache { let old_short_term_weights = get_blocks_weight_in_range( new_short_term_start_height // current_chain_height - self.long_term_weights.len() blocks are already in the cache. - ..(chain_height - u64::try_from(self.short_term_block_weights.window_len()).unwrap()), + ..(chain_height - self.short_term_block_weights.window_len()), database, - Chain::Main + Chain::Main, ) - .await?; + .await?; for _ in 0..numb_blocks { self.short_term_block_weights.pop_back(); @@ -186,7 +180,7 @@ impl BlockWeightsCache { /// /// The block_height **MUST** be one more than the last height the cache has /// seen. - pub fn new_block(&mut self, block_height: u64, block_weight: usize, long_term_weight: usize) { + pub fn new_block(&mut self, block_height: usize, block_weight: usize, long_term_weight: usize) { assert_eq!(self.tip_height + 1, block_height); self.tip_height += 1; tracing::debug!( @@ -290,7 +284,7 @@ pub fn calculate_block_long_term_weight( /// Gets the block weights from the blocks with heights in the range provided. #[instrument(name = "get_block_weights", skip(database))] async fn get_blocks_weight_in_range( - range: Range, + range: Range, database: D, chain: Chain, ) -> Result, ExtendedConsensusError> { @@ -314,7 +308,7 @@ async fn get_blocks_weight_in_range( /// Gets the block long term weights from the blocks with heights in the range provided. #[instrument(name = "get_long_term_weights", skip(database), level = "info")] async fn get_long_term_weight_in_range( - range: Range, + range: Range, database: D, chain: Chain, ) -> Result, ExtendedConsensusError> { diff --git a/consensus/src/tests/context.rs b/consensus/src/tests/context.rs index 8c3841e..bbf7bb0 100644 --- a/consensus/src/tests/context.rs +++ b/consensus/src/tests/context.rs @@ -29,10 +29,10 @@ const TEST_CONTEXT_CONFIG: ContextConfig = ContextConfig { #[tokio::test] async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> { - const BLOCKCHAIN_HEIGHT: u64 = 6000; + const BLOCKCHAIN_HEIGHT: usize = 6000; let mut runner = TestRunner::default(); - let db = arb_dummy_database(BLOCKCHAIN_HEIGHT.try_into().unwrap()) + let db = arb_dummy_database(BLOCKCHAIN_HEIGHT) .new_tree(&mut runner) .unwrap() .current(); @@ -71,10 +71,10 @@ async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> { #[tokio::test] async fn context_height_correct() -> Result<(), tower::BoxError> { - const BLOCKCHAIN_HEIGHT: u64 = 6000; + const BLOCKCHAIN_HEIGHT: usize = 6000; let mut runner = TestRunner::default(); - let db = arb_dummy_database(BLOCKCHAIN_HEIGHT.try_into().unwrap()) + let db = arb_dummy_database(BLOCKCHAIN_HEIGHT) .new_tree(&mut runner) .unwrap() .current(); diff --git a/consensus/src/tests/context/difficulty.rs b/consensus/src/tests/context/difficulty.rs index b59f62e..a79ae9b 100644 --- a/consensus/src/tests/context/difficulty.rs +++ b/consensus/src/tests/context/difficulty.rs @@ -63,10 +63,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { let cfg = DifficultyCacheConfig::main_net(); let mut db_builder = DummyDatabaseBuilder::default(); - for (cum_dif, timestamp) in DIF_3000000_3002000 - .iter() - .take(cfg.total_block_count() as usize) - { + for (cum_dif, timestamp) in DIF_3000000_3002000.iter().take(cfg.total_block_count()) { db_builder.add_block( DummyBlockExtendedHeader::default().with_difficulty_info(*timestamp, *cum_dif), ) @@ -82,14 +79,14 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { for (i, diff_info) in DIF_3000000_3002000 .windows(2) - .skip(cfg.total_block_count() as usize - 1) + .skip(cfg.total_block_count() - 1) .enumerate() { let diff = diff_info[1].0 - diff_info[0].0; assert_eq!(diff_cache.next_difficulty(&HardFork::V16), diff); - diff_cache.new_block(3_000_720 + i as u64, diff_info[1].1, diff_info[1].0); + diff_cache.new_block(3_000_720 + i, diff_info[1].1, diff_info[1].0); } Ok(()) @@ -104,7 +101,7 @@ prop_compose! { let (timestamps, mut cumulative_difficulties): (Vec<_>, Vec<_>) = blocks.into_iter().unzip(); cumulative_difficulties.sort_unstable(); DifficultyCache { - last_accounted_height: timestamps.len().try_into().unwrap(), + last_accounted_height: timestamps.len(), config: TEST_DIFFICULTY_CONFIG, timestamps: timestamps.into(), // we generate cumulative_difficulties in range 0..u64::MAX as if the generated values are close to u128::MAX @@ -165,7 +162,7 @@ proptest! { let mut timestamps: VecDeque = timestamps.into(); let diff_cache = DifficultyCache { - last_accounted_height: (TEST_WINDOW -1).try_into().unwrap(), + last_accounted_height: TEST_WINDOW -1, config: TEST_DIFFICULTY_CONFIG, timestamps: timestamps.clone(), // we dont need cumulative_difficulties @@ -234,7 +231,7 @@ proptest! { new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } - new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?; + new_cache.pop_blocks_main_chain(blocks_to_pop, database).await?; prop_assert_eq!(new_cache, old_cache); @@ -258,7 +255,7 @@ proptest! { new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } - new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?; + new_cache.pop_blocks_main_chain(blocks_to_pop, database).await?; prop_assert_eq!(new_cache, old_cache); diff --git a/consensus/src/tests/context/hardforks.rs b/consensus/src/tests/context/hardforks.rs index d003b3c..ffdff59 100644 --- a/consensus/src/tests/context/hardforks.rs +++ b/consensus/src/tests/context/hardforks.rs @@ -10,7 +10,7 @@ use crate::{ }, }; -const TEST_WINDOW_SIZE: u64 = 25; +const TEST_WINDOW_SIZE: usize = 25; const TEST_HFS: [HFInfo; NUMB_OF_HARD_FORKS] = [ HFInfo::new(0, 0), @@ -79,7 +79,7 @@ async fn hf_v15_v16_correct() { for (i, (_, vote)) in HFS_2688888_2689608.into_iter().enumerate() { assert_eq!(state.current_hardfork, HardFork::V15); - state.new_block(vote, (2688888 + i) as u64); + state.new_block(vote, 2688888 + i); } assert_eq!(state.current_hardfork, HardFork::V16); @@ -91,8 +91,8 @@ proptest! { extra_hfs in vec(any::(), 0..100) ) { tokio_test::block_on(async move { - let numb_hfs = hfs.len() as u64; - let numb_pop_blocks = extra_hfs.len() as u64; + let numb_hfs = hfs.len(); + let numb_pop_blocks = extra_hfs.len(); let mut db_builder = DummyDatabaseBuilder::default(); @@ -102,7 +102,7 @@ proptest! { ); } - let db = db_builder.finish(Some(numb_hfs as usize)); + let db = db_builder.finish(Some(numb_hfs )); let mut state = HardForkState::init_from_chain_height( numb_hfs, @@ -114,7 +114,7 @@ proptest! { let state_clone = state.clone(); for (i, hf) in extra_hfs.into_iter().enumerate() { - state.new_block(hf, state.last_height + u64::try_from(i).unwrap() + 1); + state.new_block(hf, state.last_height + i + 1); } state.pop_blocks_main_chain(numb_pop_blocks, db).await?; diff --git a/consensus/src/tests/context/weight.rs b/consensus/src/tests/context/weight.rs index 83c8bb9..6706d97 100644 --- a/consensus/src/tests/context/weight.rs +++ b/consensus/src/tests/context/weight.rs @@ -123,14 +123,14 @@ async fn weight_cache_calculates_correct_median() -> Result<(), tower::BoxError> .await?; for height in 1..=100 { - weight_cache.new_block(height as u64, height, height); + weight_cache.new_block(height, height, height); assert_eq!(weight_cache.median_short_term_weight(), height / 2); assert_eq!(weight_cache.median_long_term_weight(), height / 2); } for height in 101..=5000 { - weight_cache.new_block(height as u64, height, height); + weight_cache.new_block(height, height, height); assert_eq!(weight_cache.median_long_term_weight(), height / 2); } @@ -162,7 +162,7 @@ async fn calc_bw_ltw_2850000_3050000() { weight_cache.median_long_term_weight(), ); assert_eq!(calc_ltw, *ltw); - weight_cache.new_block((2950000 + i) as u64, *weight, *ltw); + weight_cache.new_block(2950000 + i, *weight, *ltw); } } diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index a620003..b138378 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -150,7 +150,7 @@ impl Service for DummyDatabase { async move { Ok(match req { BlockchainReadRequest::BlockExtendedHeader(id) => { - let mut id = usize::try_from(id).unwrap(); + let mut id = id; if let Some(dummy_height) = dummy_height { let block_len = blocks.read().unwrap().len(); @@ -173,8 +173,8 @@ impl Service for DummyDatabase { BlockchainResponse::BlockHash(hash) } BlockchainReadRequest::BlockExtendedHeaderInRange(range, _) => { - let mut end = usize::try_from(range.end).unwrap(); - let mut start = usize::try_from(range.start).unwrap(); + let mut end = range.end; + let mut start = range.start; if let Some(dummy_height) = dummy_height { let block_len = blocks.read().unwrap().len(); @@ -196,10 +196,7 @@ impl Service for DummyDatabase { ) } BlockchainReadRequest::ChainHeight => { - let height: u64 = dummy_height - .unwrap_or(blocks.read().unwrap().len()) - .try_into() - .unwrap(); + let height = dummy_height.unwrap_or(blocks.read().unwrap().len()); let mut top_hash = [0; 32]; top_hash[0..8].copy_from_slice(&height.to_le_bytes()); diff --git a/consensus/src/transactions.rs b/consensus/src/transactions.rs index 78104e9..978407e 100644 --- a/consensus/src/transactions.rs +++ b/consensus/src/transactions.rs @@ -12,10 +12,7 @@ use std::{ }; use futures::FutureExt; -use monero_serai::{ - ringct::RctType, - transaction::{Input, Timelock, Transaction}, -}; +use monero_serai::transaction::{Input, Timelock, Transaction}; use rayon::prelude::*; use tower::{Service, ServiceExt}; use tracing::instrument; @@ -37,6 +34,7 @@ use crate::{ }; pub mod contextual_data; +mod free; /// A struct representing the type of validation that needs to be completed for this transaction. #[derive(Debug, Copy, Clone, Eq, PartialEq)] @@ -103,22 +101,17 @@ impl TransactionVerificationData { let tx_hash = tx.hash(); let tx_blob = tx.serialize(); - // the tx weight is only different from the blobs length for bp(+) txs. - let tx_weight = match tx.rct_signatures.rct_type() { - RctType::Bulletproofs - | RctType::BulletproofsCompactAmount - | RctType::Clsag - | RctType::BulletproofsPlus => tx.weight(), - _ => tx_blob.len(), - }; + let tx_weight = free::tx_weight(&tx, &tx_blob); + + let fee = free::tx_fee(&tx)?; Ok(TransactionVerificationData { tx_hash, tx_blob, tx_weight, - fee: tx.rct_signatures.base.fee, + fee, cached_verification_state: StdMutex::new(CachedVerificationState::NotVerified), - version: TxVersion::from_raw(tx.prefix.version) + version: TxVersion::from_raw(tx.version()) .ok_or(TransactionError::TransactionVersionInvalid)?, tx, }) @@ -133,7 +126,7 @@ pub enum VerifyTxRequest { // TODO: Can we use references to remove the Vec? wont play nicely with Service though txs: Vec>, /// The current chain height. - current_chain_height: u64, + current_chain_height: usize, /// The top block hash. top_hash: [u8; 32], /// The value for time to use to check time locked outputs. @@ -147,7 +140,7 @@ pub enum VerifyTxRequest { /// The transactions to verify. txs: Vec, /// The current chain height. - current_chain_height: u64, + current_chain_height: usize, /// The top block hash. top_hash: [u8; 32], /// The value for time to use to check time locked outputs. @@ -246,7 +239,7 @@ where async fn prep_and_verify_transactions( database: D, txs: Vec, - current_chain_height: u64, + current_chain_height: usize, top_hash: [u8; 32], time_for_time_lock: u64, hf: HardFork, @@ -281,7 +274,7 @@ where async fn verify_prepped_transactions( mut database: D, txs: &[Arc], - current_chain_height: u64, + current_chain_height: usize, top_hash: [u8; 32], time_for_time_lock: u64, hf: HardFork, @@ -296,7 +289,7 @@ where let mut spent_kis = HashSet::with_capacity(txs.len()); txs.iter().try_for_each(|tx| { - tx.tx.prefix.inputs.iter().try_for_each(|input| { + tx.tx.prefix().inputs.iter().try_for_each(|input| { if let Input::ToKey { key_image, .. } = input { if !spent_kis.insert(key_image.compress().0) { tracing::debug!("Duplicate key image found in batch."); @@ -382,7 +375,7 @@ fn transactions_needing_verification( txs: &[Arc], hashes_in_main_chain: HashSet<[u8; 32]>, current_hf: &HardFork, - current_chain_height: u64, + current_chain_height: usize, time_for_time_lock: u64, ) -> Result< ( @@ -473,7 +466,7 @@ where async fn verify_transactions( txs: Vec<(Arc, VerificationNeeded)>, - current_chain_height: u64, + current_chain_height: usize, top_hash: [u8; 32], current_time_lock_timestamp: u64, hf: HardFork, @@ -501,7 +494,7 @@ where &hf, &batch_verifier, )?; - // make sure monero-serai calculated the same fee. + // make sure we calculated the right fee. assert_eq!(fee, tx.fee); } diff --git a/consensus/src/transactions/contextual_data.rs b/consensus/src/transactions/contextual_data.rs index b17fbe0..82f9976 100644 --- a/consensus/src/transactions/contextual_data.rs +++ b/consensus/src/transactions/contextual_data.rs @@ -149,7 +149,7 @@ pub async fn batch_get_ring_member_info( let mut output_ids = HashMap::new(); for tx_v_data in txs_verification_data.clone() { - insert_ring_member_ids(&tx_v_data.tx.prefix.inputs, &mut output_ids) + insert_ring_member_ids(&tx_v_data.tx.prefix().inputs, &mut output_ids) .map_err(ConsensusError::Transaction)?; } @@ -179,14 +179,14 @@ pub async fn batch_get_ring_member_info( let ring_members_for_tx = get_ring_members_for_inputs( |amt, idx| outputs.get(&amt)?.get(&idx).copied(), - &tx_v_data.tx.prefix.inputs, + &tx_v_data.tx.prefix().inputs, ) .map_err(ConsensusError::Transaction)?; let decoy_info = if hf != &HardFork::V1 { // this data is only needed after hard-fork 1. Some( - DecoyInfo::new(&tx_v_data.tx.prefix.inputs, numb_outputs, hf) + DecoyInfo::new(&tx_v_data.tx.prefix().inputs, numb_outputs, hf) .map_err(ConsensusError::Transaction)?, ) } else { @@ -222,7 +222,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( let unique_input_amounts = txs_verification_data .iter() .flat_map(|tx_info| { - tx_info.tx.prefix.inputs.iter().map(|input| match input { + tx_info.tx.prefix().inputs.iter().map(|input| match input { Input::ToKey { amount, .. } => amount.unwrap_or(0), _ => 0, }) @@ -247,7 +247,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( Ok(txs_verification_data.iter().map(move |tx_v_data| { DecoyInfo::new( - &tx_v_data.tx.prefix.inputs, + &tx_v_data.tx.prefix().inputs, |amt| outputs_with_amount.get(&amt).copied().unwrap_or(0), &hf, ) diff --git a/consensus/src/transactions/free.rs b/consensus/src/transactions/free.rs new file mode 100644 index 0000000..5ffd16e --- /dev/null +++ b/consensus/src/transactions/free.rs @@ -0,0 +1,64 @@ +use monero_serai::{ + ringct::{bulletproofs::Bulletproof, RctType}, + transaction::{Input, Transaction}, +}; + +use cuprate_consensus_rules::transactions::TransactionError; + +/// Calculates the weight of a [`Transaction`]. +/// +/// This is more efficient that [`Transaction::weight`] if you already have the transaction blob. +pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { + // the tx weight is only different from the blobs length for bp(+) txs. + + match &tx { + Transaction::V1 { .. } | Transaction::V2 { proofs: None, .. } => tx_blob.len(), + Transaction::V2 { + proofs: Some(proofs), + .. + } => match proofs.rct_type() { + RctType::AggregateMlsagBorromean | RctType::MlsagBorromean => tx_blob.len(), + RctType::MlsagBulletproofs + | RctType::MlsagBulletproofsCompactAmount + | RctType::ClsagBulletproof => { + tx_blob.len() + + Bulletproof::calculate_bp_clawback(false, tx.prefix().outputs.len()).0 + } + RctType::ClsagBulletproofPlus => { + tx_blob.len() + + Bulletproof::calculate_bp_clawback(true, tx.prefix().outputs.len()).0 + } + }, + } +} + +/// Calculates the fee of the [`Transaction`]. +pub fn tx_fee(tx: &Transaction) -> Result { + let mut fee = 0_u64; + + match &tx { + Transaction::V1 { prefix, .. } => { + for input in &prefix.inputs { + if let Input::ToKey { amount, .. } = input { + fee = fee + .checked_add(amount.unwrap_or(0)) + .ok_or(TransactionError::InputsOverflow)?; + } + } + + for output in &prefix.outputs { + fee.checked_sub(output.amount.unwrap_or(0)) + .ok_or(TransactionError::OutputsTooHigh)?; + } + } + Transaction::V2 { proofs, .. } => { + fee = proofs + .as_ref() + .ok_or(TransactionError::TransactionVersionInvalid)? + .base + .fee; + } + }; + + Ok(fee) +} diff --git a/p2p/address-book/src/book.rs b/p2p/address-book/src/book.rs index 4b5a1e7..2f0ce6d 100644 --- a/p2p/address-book/src/book.rs +++ b/p2p/address-book/src/book.rs @@ -260,7 +260,7 @@ impl AddressBook { fn take_random_white_peer( &mut self, - block_needed: Option, + block_needed: Option, ) -> Option> { tracing::debug!("Retrieving random white peer"); self.white_list @@ -269,7 +269,7 @@ impl AddressBook { fn take_random_gray_peer( &mut self, - block_needed: Option, + block_needed: Option, ) -> Option> { tracing::debug!("Retrieving random gray peer"); self.gray_list diff --git a/p2p/address-book/src/peer_list.rs b/p2p/address-book/src/peer_list.rs index e2a15d8..f0a905a 100644 --- a/p2p/address-book/src/peer_list.rs +++ b/p2p/address-book/src/peer_list.rs @@ -88,7 +88,7 @@ impl PeerList { pub fn take_random_peer( &mut self, r: &mut R, - block_needed: Option, + block_needed: Option, must_keep_peers: &HashSet, ) -> Option> { // Take a random peer and see if it's in the list of must_keep_peers, if it is try again. diff --git a/p2p/p2p-core/src/services.rs b/p2p/p2p-core/src/services.rs index b01bde0..6d66cfa 100644 --- a/p2p/p2p-core/src/services.rs +++ b/p2p/p2p-core/src/services.rs @@ -14,7 +14,7 @@ pub enum PeerSyncRequest { /// claim to have a higher cumulative difficulty. PeersToSyncFrom { current_cumulative_difficulty: u128, - block_needed: Option, + block_needed: Option, }, /// Add/update a peer's core sync data. IncomingCoreSyncData(InternalPeerID, ConnectionHandle, CoreSyncData), @@ -115,18 +115,18 @@ pub enum AddressBookRequest { /// Takes a random white peer from the peer list. If height is specified /// then the peer list should retrieve a peer that should have a full /// block at that height according to it's pruning seed - TakeRandomWhitePeer { height: Option }, + TakeRandomWhitePeer { height: Option }, /// Takes a random gray peer from the peer list. If height is specified /// then the peer list should retrieve a peer that should have a full /// block at that height according to it's pruning seed - TakeRandomGrayPeer { height: Option }, + TakeRandomGrayPeer { height: Option }, /// Takes a random peer from the peer list. If height is specified /// then the peer list should retrieve a peer that should have a full /// block at that height according to it's pruning seed. /// /// The address book will look in the white peer list first, then the gray /// one if no peer is found. - TakeRandomPeer { height: Option }, + TakeRandomPeer { height: Option }, /// Gets the specified number of white peers, or less if we don't have enough. GetWhitePeers(usize), /// Checks if the given peer is banned. diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index 5f53054..d295016 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -121,7 +121,7 @@ pub enum ChainSvcResponse { /// The response for [`ChainSvcRequest::FindFirstUnknown`]. /// /// Contains the index of the first unknown block and its expected height. - FindFirstUnknown(Option<(usize, u64)>), + FindFirstUnknown(Option<(usize, usize)>), /// The response for [`ChainSvcRequest::CumulativeDifficulty`]. /// /// The current cumulative difficulty of our chain. @@ -207,7 +207,7 @@ struct BlockDownloader { /// The amount of blocks to request in the next batch. amount_of_blocks_to_request: usize, /// The height at which [`Self::amount_of_blocks_to_request`] was updated. - amount_of_blocks_to_request_updated_at: u64, + amount_of_blocks_to_request_updated_at: usize, /// The amount of consecutive empty chain entries we received. /// @@ -225,12 +225,12 @@ struct BlockDownloader { /// The current inflight requests. /// /// This is a map of batch start heights to block IDs and related information of the batch. - inflight_requests: BTreeMap>, + inflight_requests: BTreeMap>, /// A queue of start heights from failed batches that should be retried. /// /// Wrapped in [`Reverse`] so we prioritize early batches. - failed_batches: BinaryHeap>, + failed_batches: BinaryHeap>, block_queue: BlockQueue, @@ -524,7 +524,7 @@ where /// Handles a response to a request to get blocks from a peer. async fn handle_download_batch_res( &mut self, - start_height: u64, + start_height: usize, res: Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError>, chain_tracker: &mut ChainTracker, pending_peers: &mut BTreeMap>>, @@ -692,18 +692,19 @@ where /// The return value from the block download tasks. struct BlockDownloadTaskResponse { /// The start height of the batch. - start_height: u64, + start_height: usize, /// A result containing the batch or an error. result: Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError>, } /// Returns if a peer has all the blocks in a range, according to its [`PruningSeed`]. -fn client_has_block_in_range(pruning_seed: &PruningSeed, start_height: u64, length: usize) -> bool { +fn client_has_block_in_range( + pruning_seed: &PruningSeed, + start_height: usize, + length: usize, +) -> bool { pruning_seed.has_full_block(start_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) - && pruning_seed.has_full_block( - start_height + u64::try_from(length).unwrap(), - CRYPTONOTE_MAX_BLOCK_HEIGHT, - ) + && pruning_seed.has_full_block(start_height + length, CRYPTONOTE_MAX_BLOCK_HEIGHT) } /// Calculates the next amount of blocks to request in a batch. diff --git a/p2p/p2p/src/block_downloader/block_queue.rs b/p2p/p2p/src/block_downloader/block_queue.rs index 708eb3e..5a92f49 100644 --- a/p2p/p2p/src/block_downloader/block_queue.rs +++ b/p2p/p2p/src/block_downloader/block_queue.rs @@ -15,7 +15,7 @@ use super::{BlockBatch, BlockDownloadError}; #[derive(Debug, Clone)] pub struct ReadyQueueBatch { /// The start height of the batch. - pub start_height: u64, + pub start_height: usize, /// The batch of blocks. pub block_batch: BlockBatch, } @@ -64,7 +64,7 @@ impl BlockQueue { } /// Returns the oldest batch that has not been put in the [`async_buffer`] yet. - pub fn oldest_ready_batch(&self) -> Option { + pub fn oldest_ready_batch(&self) -> Option { self.ready_batches.peek().map(|batch| batch.start_height) } @@ -80,13 +80,13 @@ impl BlockQueue { pub async fn add_incoming_batch( &mut self, new_batch: ReadyQueueBatch, - oldest_in_flight_start_height: Option, + oldest_in_flight_start_height: Option, ) -> Result<(), BlockDownloadError> { self.ready_batches_size += new_batch.block_batch.size; self.ready_batches.push(new_batch); // The height to stop pushing batches into the buffer. - let height_to_stop_at = oldest_in_flight_start_height.unwrap_or(u64::MAX); + let height_to_stop_at = oldest_in_flight_start_height.unwrap_or(usize::MAX); while self .ready_batches @@ -124,14 +124,14 @@ mod tests { use super::*; prop_compose! { - fn ready_batch_strategy()(start_height in 0_u64..500_000_000) -> ReadyQueueBatch { + fn ready_batch_strategy()(start_height in 0_usize..500_000_000) -> ReadyQueueBatch { let (_, peer_handle) = HandleBuilder::new().build(); ReadyQueueBatch { start_height, block_batch: BlockBatch { blocks: vec![], - size: start_height as usize, + size: start_height, peer_handle, }, } diff --git a/p2p/p2p/src/block_downloader/chain_tracker.rs b/p2p/p2p/src/block_downloader/chain_tracker.rs index 786a0de..aacb163 100644 --- a/p2p/p2p/src/block_downloader/chain_tracker.rs +++ b/p2p/p2p/src/block_downloader/chain_tracker.rs @@ -26,7 +26,7 @@ pub struct BlocksToRetrieve { /// The hash of the last block before this batch. pub prev_id: [u8; 32], /// The expected height of the first block in [`BlocksToRetrieve::ids`]. - pub start_height: u64, + pub start_height: usize, /// The peer who told us about this batch. pub peer_who_told_us: InternalPeerID, /// The peer who told us about this batch's handle. @@ -54,7 +54,7 @@ pub struct ChainTracker { /// A list of [`ChainEntry`]s, in order. entries: VecDeque>, /// The height of the first block, in the first entry in [`Self::entries`]. - first_height: u64, + first_height: usize, /// The hash of the last block in the last entry. top_seen_hash: [u8; 32], /// The hash of the block one below [`Self::first_height`]. @@ -67,7 +67,7 @@ impl ChainTracker { /// Creates a new chain tracker. pub fn new( new_entry: ChainEntry, - first_height: u64, + first_height: usize, our_genesis: [u8; 32], previous_hash: [u8; 32], ) -> Self { @@ -96,14 +96,14 @@ impl ChainTracker { } /// Returns the height of the highest block we are tracking. - pub fn top_height(&self) -> u64 { + pub fn top_height(&self) -> usize { let top_block_idx = self .entries .iter() .map(|entry| entry.ids.len()) .sum::(); - self.first_height + u64::try_from(top_block_idx).unwrap() + self.first_height + top_block_idx } /// Returns the total number of queued batches for a certain `batch_size`. @@ -171,15 +171,12 @@ impl ChainTracker { // - index of the next pruned block for this seed let end_idx = min( min(entry.ids.len(), max_blocks), - usize::try_from( pruning_seed .get_next_pruned_block(self.first_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) .expect("We use local values to calculate height which should be below the sanity limit") // Use a big value as a fallback if the seed does no pruning. .unwrap_or(CRYPTONOTE_MAX_BLOCK_HEIGHT) - self.first_height, - ) - .unwrap(), ); if end_idx == 0 { @@ -198,7 +195,7 @@ impl ChainTracker { failures: 0, }; - self.first_height += u64::try_from(end_idx).unwrap(); + self.first_height += end_idx; // TODO: improve ByteArrayVec API. self.previous_hash = blocks.ids[blocks.ids.len() - 1]; diff --git a/p2p/p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs index fbf33b1..ea57ead 100644 --- a/p2p/p2p/src/block_downloader/download_batch.rs +++ b/p2p/p2p/src/block_downloader/download_batch.rs @@ -34,7 +34,7 @@ pub async fn download_batch_task( client: ClientPoolDropGuard, ids: ByteArrayVec<32>, previous_id: [u8; 32], - expected_start_height: u64, + expected_start_height: usize, _attempt: usize, ) -> BlockDownloadTaskResponse { BlockDownloadTaskResponse { @@ -51,7 +51,7 @@ async fn request_batch_from_peer( mut client: ClientPoolDropGuard, ids: ByteArrayVec<32>, previous_id: [u8; 32], - expected_start_height: u64, + expected_start_height: usize, ) -> Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError> { let request = PeerRequest::Protocol(ProtocolRequest::GetObjects(GetObjectsRequest { blocks: ids.clone(), @@ -105,7 +105,7 @@ async fn request_batch_from_peer( fn deserialize_batch( blocks_response: GetObjectsResponse, - expected_start_height: u64, + expected_start_height: usize, requested_ids: ByteArrayVec<32>, previous_id: [u8; 32], peer_handle: ConnectionHandle, @@ -115,7 +115,7 @@ fn deserialize_batch( .into_par_iter() .enumerate() .map(|(i, block_entry)| { - let expected_height = u64::try_from(i).unwrap() + expected_start_height; + let expected_height = i + expected_start_height; let mut size = block_entry.block.len(); @@ -125,7 +125,7 @@ fn deserialize_batch( let block_hash = block.hash(); // Check the block matches the one requested and the peer sent enough transactions. - if requested_ids[i] != block_hash || block.txs.len() != block_entry.txs.len() { + if requested_ids[i] != block_hash || block.transactions.len() != block_entry.txs.len() { return Err(BlockDownloadError::PeersResponseWasInvalid); } @@ -177,7 +177,7 @@ fn deserialize_batch( .collect::, _>>()?; // Make sure the transactions in the block were the ones the peer sent. - let mut expected_txs = block.txs.iter().collect::>(); + let mut expected_txs = block.transactions.iter().collect::>(); for tx in &txs { if !expected_txs.remove(&tx.hash()) { diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index f6ddbfc..86a9a46 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -11,7 +11,6 @@ use futures::{FutureExt, StreamExt}; use indexmap::IndexMap; use monero_serai::{ block::{Block, BlockHeader}, - ringct::{RctBase, RctPrunable, RctSignatures}, transaction::{Input, Timelock, Transaction, TransactionPrefix}, }; use proptest::{collection::vec, prelude::*}; @@ -90,30 +89,20 @@ proptest! { prop_compose! { /// Returns a strategy to generate a [`Transaction`] that is valid for the block downloader. - fn dummy_transaction_stragtegy(height: u64) + fn dummy_transaction_stragtegy(height: usize) ( extra in vec(any::(), 0..1_000), timelock in 1_usize..50_000_000, ) -> Transaction { - Transaction { + Transaction::V1 { prefix: TransactionPrefix { - version: 1, - timelock: Timelock::Block(timelock), + additional_timelock: Timelock::Block(timelock), inputs: vec![Input::Gen(height)], outputs: vec![], extra, }, signatures: vec![], - rct_signatures: RctSignatures { - base: RctBase { - fee: 0, - pseudo_outs: vec![], - encrypted_amounts: vec![], - commitments: vec![], - }, - prunable: RctPrunable::Null - }, } } } @@ -121,25 +110,25 @@ prop_compose! { prop_compose! { /// Returns a strategy to generate a [`Block`] that is valid for the block downloader. fn dummy_block_stragtegy( - height: u64, + height: usize, previous: [u8; 32], ) ( - miner_tx in dummy_transaction_stragtegy(height), + miner_transaction in dummy_transaction_stragtegy(height), txs in vec(dummy_transaction_stragtegy(height), 0..25) ) -> (Block, Vec) { ( Block { header: BlockHeader { - major_version: 0, - minor_version: 0, + hardfork_version: 0, + hardfork_signal: 0, timestamp: 0, previous, nonce: 0, }, - miner_tx, - txs: txs.iter().map(Transaction::hash).collect(), + miner_transaction, + transactions: txs.iter().map(Transaction::hash).collect(), }, txs ) @@ -167,7 +156,7 @@ prop_compose! { for (height, mut block) in blocks.into_iter().enumerate() { if let Some(last) = blockchain.last() { block.0.header.previous = *last.0; - block.0.miner_tx.prefix.inputs = vec![Input::Gen(height as u64)] + block.0.miner_transaction.prefix_mut().inputs = vec![Input::Gen(height)] } blockchain.insert(block.0.hash(), block); diff --git a/p2p/p2p/src/connection_maintainer.rs b/p2p/p2p/src/connection_maintainer.rs index 2bcf270..3dfd5e8 100644 --- a/p2p/p2p/src/connection_maintainer.rs +++ b/p2p/p2p/src/connection_maintainer.rs @@ -38,7 +38,7 @@ enum OutboundConnectorError { /// set needs specific data that none of the currently connected peers have. pub struct MakeConnectionRequest { /// The block needed that no connected peers have due to pruning. - block_needed: Option, + block_needed: Option, } /// The outbound connection count keeper. diff --git a/p2p/p2p/src/sync_states.rs b/p2p/p2p/src/sync_states.rs index 1484941..70ef6ca 100644 --- a/p2p/p2p/src/sync_states.rs +++ b/p2p/p2p/src/sync_states.rs @@ -99,7 +99,7 @@ impl PeerSyncSvc { fn peers_to_sync_from( &self, current_cum_diff: u128, - block_needed: Option, + block_needed: Option, ) -> Vec> { self.cumulative_difficulties .range((current_cum_diff + 1)..) diff --git a/pruning/src/lib.rs b/pruning/src/lib.rs index 96c3609..fdd159c 100644 --- a/pruning/src/lib.rs +++ b/pruning/src/lib.rs @@ -22,13 +22,13 @@ use std::cmp::Ordering; use thiserror::Error; -pub const CRYPTONOTE_MAX_BLOCK_HEIGHT: u64 = 500000000; +pub const CRYPTONOTE_MAX_BLOCK_HEIGHT: usize = 500000000; /// The default log stripes for Monero pruning. pub const CRYPTONOTE_PRUNING_LOG_STRIPES: u32 = 3; /// The amount of blocks that peers keep before another stripe starts storing blocks. -pub const CRYPTONOTE_PRUNING_STRIPE_SIZE: u64 = 4096; +pub const CRYPTONOTE_PRUNING_STRIPE_SIZE: usize = 4096; /// The amount of blocks from the top of the chain that should not be pruned. -pub const CRYPTONOTE_PRUNING_TIP_BLOCKS: u64 = 5500; +pub const CRYPTONOTE_PRUNING_TIP_BLOCKS: usize = 5500; const PRUNING_SEED_LOG_STRIPES_SHIFT: u32 = 7; const PRUNING_SEED_STRIPE_SHIFT: u32 = 0; @@ -127,7 +127,7 @@ impl PruningSeed { } /// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block. - pub fn has_full_block(&self, height: u64, blockchain_height: u64) -> bool { + pub fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { match self { PruningSeed::NotPruned => true, PruningSeed::Pruned(seed) => seed.has_full_block(height, blockchain_height), @@ -151,9 +151,9 @@ impl PruningSeed { /// This function will also error if `block_height` > `blockchain_height` pub fn get_next_pruned_block( &self, - block_height: u64, - blockchain_height: u64, - ) -> Result, PruningError> { + block_height: usize, + blockchain_height: usize, + ) -> Result, PruningError> { Ok(match self { PruningSeed::NotPruned => None, PruningSeed::Pruned(seed) => { @@ -177,9 +177,9 @@ impl PruningSeed { /// pub fn get_next_unpruned_block( &self, - block_height: u64, - blockchain_height: u64, - ) -> Result { + block_height: usize, + blockchain_height: usize, + ) -> Result { Ok(match self { PruningSeed::NotPruned => block_height, PruningSeed::Pruned(seed) => { @@ -312,7 +312,7 @@ impl DecompressedPruningSeed { } /// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block. - pub fn has_full_block(&self, height: u64, blockchain_height: u64) -> bool { + pub fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { match get_block_pruning_stripe(height, blockchain_height, self.log_stripes) { Some(block_stripe) => self.stripe == block_stripe, None => true, @@ -334,9 +334,9 @@ impl DecompressedPruningSeed { /// pub fn get_next_unpruned_block( &self, - block_height: u64, - blockchain_height: u64, - ) -> Result { + block_height: usize, + blockchain_height: usize, + ) -> Result { if block_height > CRYPTONOTE_MAX_BLOCK_HEIGHT || block_height > blockchain_height { return Err(PruningError::BlockHeightTooLarge); } @@ -373,7 +373,7 @@ impl DecompressedPruningSeed { // amt_of_cycles * blocks in a cycle + how many blocks through a cycles until the seed starts storing blocks let calculated_height = cycles_start * (CRYPTONOTE_PRUNING_STRIPE_SIZE << self.log_stripes) - + (self.stripe as u64 - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE; + + (self.stripe as usize - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE; if calculated_height + CRYPTONOTE_PRUNING_TIP_BLOCKS > blockchain_height { // if our calculated height is greater than the amount of tip blocks then the start of the tip blocks will be the next un-pruned @@ -400,9 +400,9 @@ impl DecompressedPruningSeed { /// pub fn get_next_pruned_block( &self, - block_height: u64, - blockchain_height: u64, - ) -> Result, PruningError> { + block_height: usize, + blockchain_height: usize, + ) -> Result, PruningError> { if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height { // If we are within `CRYPTONOTE_PRUNING_TIP_BLOCKS` of the chain we should // not prune blocks. @@ -434,16 +434,16 @@ impl DecompressedPruningSeed { } fn get_block_pruning_stripe( - block_height: u64, - blockchain_height: u64, + block_height: usize, + blockchain_height: usize, log_stripe: u32, ) -> Option { if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height { None } else { Some( - (((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & ((1 << log_stripe) as u64 - 1)) + 1) - as u32, // it's trivial to prove it's ok to us `as` here + (((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & ((1 << log_stripe) as usize - 1)) + + 1) as u32, // it's trivial to prove it's ok to us `as` here ) } } @@ -503,7 +503,7 @@ mod tests { for i in 0_u32..8 { assert_eq!( get_block_pruning_stripe( - (i * 4096) as u64, + (i * 4096) as usize, blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES ) @@ -515,7 +515,7 @@ mod tests { for i in 0_u32..8 { assert_eq!( get_block_pruning_stripe( - 32768 + (i * 4096) as u64, + 32768 + (i * 4096) as usize, blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES ) @@ -527,7 +527,7 @@ mod tests { for i in 1_u32..8 { assert_eq!( get_block_pruning_stripe( - 32767 + (i * 4096) as u64, + 32767 + (i * 4096) as usize, blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES ) @@ -553,23 +553,23 @@ mod tests { for (i, seed) in all_valid_seeds.iter().enumerate() { assert_eq!( seed.get_next_unpruned_block(0, blockchain_height).unwrap(), - i as u64 * 4096 + i * 4096 ) } for (i, seed) in all_valid_seeds.iter().enumerate() { assert_eq!( - seed.get_next_unpruned_block((i as u64 + 1) * 4096, blockchain_height) + seed.get_next_unpruned_block((i + 1) * 4096, blockchain_height) .unwrap(), - i as u64 * 4096 + 32768 + i * 4096 + 32768 ) } for (i, seed) in all_valid_seeds.iter().enumerate() { assert_eq!( - seed.get_next_unpruned_block((i as u64 + 8) * 4096, blockchain_height) + seed.get_next_unpruned_block((i + 8) * 4096, blockchain_height) .unwrap(), - i as u64 * 4096 + 32768 + i * 4096 + 32768 ) } @@ -610,19 +610,19 @@ mod tests { for (i, seed) in all_valid_seeds.iter().enumerate() { assert_eq!( - seed.get_next_pruned_block((i as u64 + 1) * 4096, blockchain_height) + seed.get_next_pruned_block((i + 1) * 4096, blockchain_height) .unwrap() .unwrap(), - (i as u64 + 1) * 4096 + (i + 1) * 4096 ) } for (i, seed) in all_valid_seeds.iter().enumerate() { assert_eq!( - seed.get_next_pruned_block((i as u64 + 8) * 4096, blockchain_height) + seed.get_next_pruned_block((i + 8) * 4096, blockchain_height) .unwrap() .unwrap(), - (i as u64 + 9) * 4096 + (i + 9) * 4096 ) } diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index b0eb013..4d358f4 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -65,17 +65,17 @@ pub fn add_block( #[cfg(debug_assertions)] { assert_eq!(block.block.serialize(), block.block_blob); - assert_eq!(block.block.txs.len(), block.txs.len()); + assert_eq!(block.block.transactions.len(), block.txs.len()); for (i, tx) in block.txs.iter().enumerate() { assert_eq!(tx.tx_blob, tx.tx.serialize()); - assert_eq!(tx.tx_hash, block.block.txs[i]); + assert_eq!(tx.tx_hash, block.block.transactions[i]); } } //------------------------------------------------------ Transaction / Outputs / Key Images // Add the miner transaction first. { - let tx = &block.block.miner_tx; + let tx = &block.block.miner_transaction; add_tx(tx, &tx.serialize(), &tx.hash(), &chain_height, tables)?; } @@ -154,8 +154,8 @@ pub fn pop_block( let block = Block::read(&mut block_blob.as_slice())?; //------------------------------------------------------ Transaction / Outputs / Key Images - remove_tx(&block.miner_tx.hash(), tables)?; - for tx_hash in &block.txs { + remove_tx(&block.miner_transaction.hash(), tables)?; + for tx_hash in &block.transactions { remove_tx(tx_hash, tables)?; } @@ -200,8 +200,8 @@ pub fn get_block_extended_header_from_height( #[allow(clippy::cast_possible_truncation)] Ok(ExtendedBlockHeader { cumulative_difficulty, - version: block.header.major_version, - vote: block.header.minor_version, + version: block.header.hardfork_version, + vote: block.header.hardfork_signal, timestamp: block.header.timestamp, block_weight: block_info.weight as usize, long_term_weight: block_info.long_term_weight as usize, @@ -297,7 +297,7 @@ mod test { // HACK: `add_block()` asserts blocks with non-sequential heights // cannot be added, to get around this, manually edit the block height. for (height, block) in blocks.iter_mut().enumerate() { - block.height = height as u64; + block.height = height; assert_eq!(block.block.serialize(), block.block_blob); } let generated_coins_sum = blocks @@ -369,8 +369,8 @@ mod test { let b1 = block_header_from_hash; let b2 = block; assert_eq!(b1, block_header_from_height); - assert_eq!(b1.version, b2.block.header.major_version); - assert_eq!(b1.vote, b2.block.header.minor_version); + assert_eq!(b1.version, b2.block.header.hardfork_version); + assert_eq!(b1.vote, b2.block.header.hardfork_signal); assert_eq!(b1.timestamp, b2.block.header.timestamp); assert_eq!(b1.cumulative_difficulty, b2.cumulative_difficulty); assert_eq!(b1.block_weight, b2.weight); @@ -388,7 +388,7 @@ mod test { assert_eq!(tx.tx_blob, tx2.serialize()); assert_eq!(tx.tx_weight, tx2.weight()); - assert_eq!(tx.tx_hash, block.block.txs[i]); + assert_eq!(tx.tx_hash, block.block.transactions[i]); assert_eq!(tx.tx_hash, tx2.hash()); } } @@ -440,7 +440,7 @@ mod test { let mut block = block_v9_tx3().clone(); - block.height = u64::from(u32::MAX) + 1; + block.height = usize::try_from(u32::MAX).unwrap() + 1; add_block(&block, &mut tables).unwrap(); } diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index e93af3d..65d9ca2 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -25,7 +25,8 @@ use crate::{ pub fn chain_height( table_block_heights: &impl DatabaseRo, ) -> Result { - table_block_heights.len() + #[allow(clippy::cast_possible_truncation)] // we enforce 64-bit + table_block_heights.len().map(|height| height as usize) } /// Retrieve the height of the top block. @@ -47,7 +48,8 @@ pub fn top_block_height( ) -> Result { match table_block_heights.len()? { 0 => Err(RuntimeError::KeyNotFound), - height => Ok(height - 1), + #[allow(clippy::cast_possible_truncation)] // we enforce 64-bit + height => Ok(height as usize - 1), } } @@ -110,7 +112,7 @@ mod test { block_v9_tx3().clone(), block_v16_tx0().clone(), ]; - let blocks_len = u64::try_from(blocks.len()).unwrap(); + let blocks_len = blocks.len(); // Add blocks. { @@ -127,7 +129,6 @@ mod test { ); for (i, block) in blocks.iter_mut().enumerate() { - let i = u64::try_from(i).unwrap(); // HACK: `add_block()` asserts blocks with non-sequential heights // cannot be added, to get around this, manually edit the block height. block.height = i; diff --git a/storage/blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs index dfc52f2..f3453e4 100644 --- a/storage/blockchain/src/ops/output.rs +++ b/storage/blockchain/src/ops/output.rs @@ -2,7 +2,7 @@ //---------------------------------------------------------------------------------------------------- Import use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar}; -use monero_serai::{transaction::Timelock, H}; +use monero_serai::{generators::H, transaction::Timelock}; use cuprate_database::{ RuntimeError, {DatabaseRo, DatabaseRw}, @@ -157,7 +157,7 @@ pub fn output_to_output_on_chain( ) -> Result { // FIXME: implement lookup table for common values: // - let commitment = ED25519_BASEPOINT_POINT + H() * Scalar::from(amount); + let commitment = ED25519_BASEPOINT_POINT + *H * Scalar::from(amount); let time_lock = if output .output_flags @@ -173,7 +173,7 @@ pub fn output_to_output_on_chain( .unwrap_or(None); Ok(OutputOnChain { - height: u64::from(output.height), + height: output.height as usize, time_lock, key, commitment, @@ -213,7 +213,7 @@ pub fn rct_output_to_output_on_chain( .unwrap_or(None); Ok(OutputOnChain { - height: u64::from(rct_output.height), + height: rct_output.height as usize, time_lock, key, commitment, diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index f4a2675..7d608ca 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -68,7 +68,7 @@ pub fn add_tx( // so the `u64/usize` is stored without any tag. // // - match tx.prefix.timelock { + match tx.prefix().additional_timelock { Timelock::None => (), Timelock::Block(height) => tables.tx_unlock_time_mut().put(&tx_id, &(height as u64))?, Timelock::Time(time) => tables.tx_unlock_time_mut().put(&tx_id, &time)?, @@ -92,7 +92,7 @@ pub fn add_tx( let mut miner_tx = false; // Key images. - for inputs in &tx.prefix.inputs { + for inputs in &tx.prefix().inputs { match inputs { // Key images. Input::ToKey { key_image, .. } => { @@ -106,70 +106,64 @@ pub fn add_tx( //------------------------------------------------------ Outputs // Output bit flags. // Set to a non-zero bit value if the unlock time is non-zero. - let output_flags = match tx.prefix.timelock { + let output_flags = match tx.prefix().additional_timelock { Timelock::None => OutputFlags::empty(), Timelock::Block(_) | Timelock::Time(_) => OutputFlags::NON_ZERO_UNLOCK_TIME, }; - let mut amount_indices = Vec::with_capacity(tx.prefix.outputs.len()); - - for (i, output) in tx.prefix.outputs.iter().enumerate() { - let key = *output.key.as_bytes(); - - // Outputs with clear amounts. - let amount_index = if let Some(amount) = output.amount { - // RingCT (v2 transaction) miner outputs. - if miner_tx && tx.prefix.version == 2 { - // Create commitment. - // - // FIXME: implement lookup table for common values: - // - let commitment = (ED25519_BASEPOINT_POINT - + monero_serai::H() * Scalar::from(amount)) - .compress() - .to_bytes(); - - add_rct_output( - &RctOutput { - key, - height, - output_flags, - tx_idx: tx_id, - commitment, - }, - tables.rct_outputs_mut(), - )? - // Pre-RingCT outputs. - } else { - add_output( - amount, + let amount_indices = match &tx { + Transaction::V1 { prefix, .. } => prefix + .outputs + .iter() + .map(|output| { + // Pre-RingCT outputs. + Ok(add_output( + output.amount.unwrap_or(0), &Output { - key, + key: output.key.0, height, output_flags, tx_idx: tx_id, }, tables, )? - .amount_index - } - // RingCT outputs. - } else { - let commitment = tx.rct_signatures.base.commitments[i].compress().to_bytes(); - add_rct_output( - &RctOutput { - key, - height, - output_flags, - tx_idx: tx_id, - commitment, - }, - tables.rct_outputs_mut(), - )? - }; + .amount_index) + }) + .collect::, RuntimeError>>()?, + Transaction::V2 { prefix, proofs } => prefix + .outputs + .iter() + .enumerate() + .map(|(i, output)| { + // Create commitment. + // + // FIXME: implement lookup table for common values: + // + let commitment = if miner_tx { + ED25519_BASEPOINT_POINT + + *monero_serai::generators::H * Scalar::from(output.amount.unwrap_or(0)) + } else { + proofs + .as_ref() + .expect("A V2 transaction with no RCT proofs is a miner tx") + .base + .commitments[i] + }; - amount_indices.push(amount_index); - } // for each output + // Add the RCT output. + add_rct_output( + &RctOutput { + key: output.key.0, + height, + output_flags, + tx_idx: tx_id, + commitment: commitment.compress().0, + }, + tables.rct_outputs_mut(), + ) + }) + .collect::, _>>()?, + }; tables .tx_outputs_mut() @@ -227,7 +221,7 @@ pub fn remove_tx( //------------------------------------------------------ Key Images // Is this a miner transaction? let mut miner_tx = false; - for inputs in &tx.prefix.inputs { + for inputs in &tx.prefix().inputs { match inputs { // Key images. Input::ToKey { key_image, .. } => { @@ -240,11 +234,11 @@ pub fn remove_tx( //------------------------------------------------------ Outputs // Remove each output in the transaction. - for output in &tx.prefix.outputs { + for output in &tx.prefix().outputs { // Outputs with clear amounts. if let Some(amount) = output.amount { // RingCT miner outputs. - if miner_tx && tx.prefix.version == 2 { + if miner_tx && tx.version() == 2 { let amount_index = get_rct_num_outputs(tables.rct_outputs())? - 1; remove_rct_output(&amount_index, tables.rct_outputs_mut())?; // Pre-RingCT outputs. diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index 21fb05b..e748bbb 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -48,9 +48,9 @@ pub fn init( /// /// The height offset is the difference between the top block's height and the block height that should be in that position. #[inline] -pub(super) const fn compact_history_index_to_height_offset( - i: u64, -) -> u64 { +pub(super) const fn compact_history_index_to_height_offset( + i: usize, +) -> usize { // If the position is below the initial blocks just return the position back if i <= INITIAL_BLOCKS { i @@ -66,8 +66,8 @@ pub(super) const fn compact_history_index_to_height_offset( - top_block_height: u64, +pub(super) const fn compact_history_genesis_not_included( + top_block_height: usize, ) -> bool { // If the top block height is less than the initial blocks then it will always be included. // Otherwise, we use the fact that to reach the genesis block this statement must be true (for a @@ -91,7 +91,7 @@ mod tests { proptest! { #[test] - fn compact_history(top_height in 0_u64..500_000_000) { + fn compact_history(top_height in 0_usize..500_000_000) { let mut heights = (0..) .map(compact_history_index_to_height_offset::<11>) .map_while(|i| top_height.checked_sub(i)) diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index 993c52d..3331a55 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -87,7 +87,7 @@ //! //! // Prepare a request to write block. //! let mut block = block_v16_tx0().clone(); -//! # block.height = 0_u64; // must be 0th height or panic in `add_block()` +//! # block.height = 0_usize; // must be 0th height or panic in `add_block()` //! let request = BlockchainWriteRequest::WriteBlock(block); //! //! // Send the request. diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index fbd9f89..207da41 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -278,7 +278,7 @@ fn chain_height(env: &ConcreteEnv) -> ResponseResult { /// [`BlockchainReadRequest::GeneratedCoins`]. #[inline] -fn generated_coins(env: &ConcreteEnv, height: u64) -> ResponseResult { +fn generated_coins(env: &ConcreteEnv, height: usize) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro()?; @@ -429,7 +429,7 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult { ); /// The amount of top block IDs in the compact chain. - const INITIAL_BLOCKS: u64 = 11; + const INITIAL_BLOCKS: usize = 11; // rayon is not used here because the amount of block IDs is expected to be small. let mut block_ids = (0..) diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index 72b60e2..8d817bb 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -78,7 +78,7 @@ async fn test_template( // cannot be added, to get around this, manually edit the block height. for (i, block_fn) in block_fns.iter().enumerate() { let mut block = block_fn().clone(); - block.height = i as u64; + block.height = i; // Request a block to be written, assert it was written. let request = BlockchainWriteRequest::WriteBlock(block); diff --git a/storage/blockchain/src/types.rs b/storage/blockchain/src/types.rs index a1f28f0..08cde31 100644 --- a/storage/blockchain/src/types.rs +++ b/storage/blockchain/src/types.rs @@ -68,7 +68,7 @@ pub type BlockBlob = StorableVec; pub type BlockHash = [u8; 32]; /// A block's height. -pub type BlockHeight = u64; +pub type BlockHeight = usize; /// A key image. pub type KeyImage = [u8; 32]; diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index dd24fd5..a96a9cf 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -6,24 +6,26 @@ license = "MIT" authors = ["Boog900", "hinto-janai"] [dependencies] -cuprate-types = { path = "../types" } -cuprate-helper = { path = "../helper", features = ["map"] } -cuprate-wire = { path = "../net/wire" } -cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] } +cuprate-types = { path = "../types" } +cuprate-helper = { path = "../helper", features = ["map"] } +cuprate-wire = { path = "../net/wire" } +cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] } -hex = { workspace = true } -hex-literal = { workspace = true } -monero-serai = { workspace = true, features = ["std", "http-rpc"] } -futures = { workspace = true, features = ["std"] } -async-trait = { workspace = true } -tokio = { workspace = true, features = ["full"] } -tokio-util = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -bytes = { workspace = true, features = ["std"] } -tempfile = { workspace = true } -paste = { workspace = true } -borsh = { workspace = true, features = ["derive"]} +hex = { workspace = true } +hex-literal = { workspace = true } +monero-serai = { workspace = true, features = ["std"] } +monero-simple-request-rpc = { workspace = true } +monero-rpc = { workspace = true } +futures = { workspace = true, features = ["std"] } +async-trait = { workspace = true } +tokio = { workspace = true, features = ["full"] } +tokio-util = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +bytes = { workspace = true, features = ["std"] } +tempfile = { workspace = true } +paste = { workspace = true } +borsh = { workspace = true, features = ["derive"]} [dev-dependencies] hex = { workspace = true } diff --git a/test-utils/src/data/constants.rs b/test-utils/src/data/constants.rs index c1da6d0..fff0441 100644 --- a/test-utils/src/data/constants.rs +++ b/test-utils/src/data/constants.rs @@ -34,12 +34,12 @@ macro_rules! const_block_blob { #[doc = ""] #[doc = concat!("let block = Block::read(&mut ", stringify!($name), ").unwrap();")] #[doc = ""] - #[doc = concat!("assert_eq!(block.header.major_version, ", $major_version, ");")] - #[doc = concat!("assert_eq!(block.header.minor_version, ", $minor_version, ");")] + #[doc = concat!("assert_eq!(block.header.hardfork_version, ", $major_version, ");")] + #[doc = concat!("assert_eq!(block.header.hardfork_signal, ", $minor_version, ");")] #[doc = concat!("assert_eq!(block.header.timestamp, ", $timestamp, ");")] #[doc = concat!("assert_eq!(block.header.nonce, ", $nonce, ");")] - #[doc = concat!("assert!(matches!(block.miner_tx.prefix.inputs[0], Input::Gen(", $height, ")));")] - #[doc = concat!("assert_eq!(block.txs.len(), ", $tx_len, ");")] + #[doc = concat!("assert!(matches!(block.miner_transaction.prefix().inputs[0], Input::Gen(", $height, ")));")] + #[doc = concat!("assert_eq!(block.transactions.len(), ", $tx_len, ");")] #[doc = concat!("assert_eq!(hex::encode(block.hash()), \"", $hash, "\")")] /// ``` pub const $name: &[u8] = include_bytes!($data_path); @@ -107,7 +107,6 @@ macro_rules! const_tx_blob { timelock: $timelock:expr, // Transaction's timelock (use the real type `Timelock`) input_len: $input_len:literal, // Amount of inputs output_len: $output_len:literal, // Amount of outputs - signatures_len: $signatures_len:literal, // Amount of signatures ) => { #[doc = concat!("Transaction with hash `", $hash, "`.")] /// @@ -117,11 +116,10 @@ macro_rules! const_tx_blob { #[doc = ""] #[doc = concat!("let tx = Transaction::read(&mut ", stringify!($name), ").unwrap();")] #[doc = ""] - #[doc = concat!("assert_eq!(tx.prefix.version, ", $version, ");")] - #[doc = concat!("assert_eq!(tx.prefix.timelock, ", stringify!($timelock), ");")] - #[doc = concat!("assert_eq!(tx.prefix.inputs.len(), ", $input_len, ");")] - #[doc = concat!("assert_eq!(tx.prefix.outputs.len(), ", $output_len, ");")] - #[doc = concat!("assert_eq!(tx.signatures.len(), ", $signatures_len, ");")] + #[doc = concat!("assert_eq!(tx.version(), ", $version, ");")] + #[doc = concat!("assert_eq!(tx.prefix().additional_timelock, ", stringify!($timelock), ");")] + #[doc = concat!("assert_eq!(tx.prefix().inputs.len(), ", $input_len, ");")] + #[doc = concat!("assert_eq!(tx.prefix().outputs.len(), ", $output_len, ");")] #[doc = concat!("assert_eq!(hex::encode(tx.hash()), \"", $hash, "\")")] /// ``` pub const $name: &[u8] = include_bytes!($data_path); @@ -136,7 +134,6 @@ const_tx_blob! { timelock: Timelock::Block(100_081), input_len: 1, output_len: 5, - signatures_len: 0, } const_tx_blob! { @@ -147,7 +144,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 19, output_len: 61, - signatures_len: 19, } const_tx_blob! { @@ -158,7 +154,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 46, output_len: 46, - signatures_len: 46, } const_tx_blob! { @@ -169,7 +164,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 1, output_len: 2, - signatures_len: 0, } const_tx_blob! { @@ -180,7 +174,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 1, output_len: 2, - signatures_len: 0, } const_tx_blob! { @@ -191,7 +184,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 2, output_len: 2, - signatures_len: 0, } const_tx_blob! { @@ -202,7 +194,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 2, output_len: 5, - signatures_len: 2, } const_tx_blob! { @@ -213,7 +204,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 2, output_len: 2, - signatures_len: 0, } //---------------------------------------------------------------------------------------------------- Tests diff --git a/test-utils/src/data/free.rs b/test-utils/src/data/free.rs index ee6f49a..d7f61ae 100644 --- a/test-utils/src/data/free.rs +++ b/test-utils/src/data/free.rs @@ -8,11 +8,11 @@ //---------------------------------------------------------------------------------------------------- Import use std::sync::OnceLock; -use hex_literal::hex; -use monero_serai::{block::Block, transaction::Transaction}; - use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; +use hex_literal::hex; +use monero_serai::transaction::Input; +use monero_serai::{block::Block, transaction::Transaction}; use crate::data::constants::{ BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73, @@ -31,7 +31,7 @@ use crate::data::constants::{ struct VerifiedBlockMap { block_blob: &'static [u8], pow_hash: [u8; 32], - height: u64, + height: usize, generated_coins: u64, weight: usize, long_term_weight: usize, @@ -68,11 +68,11 @@ impl VerifiedBlockMap { assert_eq!( txs.len(), - block.txs.len(), + block.transactions.len(), "(deserialized txs).len() != (txs hashes in block).len()" ); - for (tx, tx_hash_in_block) in txs.iter().zip(&block.txs) { + for (tx, tx_hash_in_block) in txs.iter().zip(&block.transactions) { assert_eq!( &tx.tx_hash, tx_hash_in_block, "deserialized tx hash is not the same as the one in the parent block" @@ -103,13 +103,43 @@ fn to_tx_verification_data(tx_blob: impl AsRef<[u8]>) -> VerifiedTransactionInfo let tx = Transaction::read(&mut tx_blob.as_slice()).unwrap(); VerifiedTransactionInformation { tx_weight: tx.weight(), - fee: tx.rct_signatures.base.fee, + fee: tx_fee(&tx), tx_hash: tx.hash(), tx_blob, tx, } } +/// Calculates the fee of the [`Transaction`]. +/// +/// # Panics +/// This will panic if the inputs overflow or the transaction outputs too much. +pub fn tx_fee(tx: &Transaction) -> u64 { + let mut fee = 0_u64; + + match &tx { + Transaction::V1 { prefix, .. } => { + for input in &prefix.inputs { + match input { + Input::Gen(_) => return 0, + Input::ToKey { amount, .. } => { + fee = fee.checked_add(amount.unwrap_or(0)).unwrap(); + } + } + } + + for output in &prefix.outputs { + fee.checked_sub(output.amount.unwrap_or(0)).unwrap(); + } + } + Transaction::V2 { proofs, .. } => { + fee = proofs.as_ref().unwrap().base.fee; + } + }; + + fee +} + //---------------------------------------------------------------------------------------------------- Blocks /// Generate a block accessor function with this signature: /// `fn() -> &'static VerifiedBlockInformation` @@ -255,7 +285,6 @@ macro_rules! transaction_verification_data_fn { #[doc = concat!("assert_eq!(tx.tx_blob, ", stringify!($tx_blob), ");")] #[doc = concat!("assert_eq!(tx.tx_weight, ", $weight, ");")] #[doc = concat!("assert_eq!(tx.tx_hash, hex!(\"", $hash, "\"));")] - #[doc = "assert_eq!(tx.fee, tx.tx.rct_signatures.base.fee);"] /// ``` pub fn $fn_name() -> &'static VerifiedTransactionInformation { static TX: OnceLock = OnceLock::new(); diff --git a/test-utils/src/data/mod.rs b/test-utils/src/data/mod.rs index 49ea89a..696c686 100644 --- a/test-utils/src/data/mod.rs +++ b/test-utils/src/data/mod.rs @@ -32,4 +32,6 @@ pub use constants::{ }; mod free; -pub use free::{block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_v1_sig0, tx_v1_sig2, tx_v2_rct3}; +pub use free::{ + block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_fee, tx_v1_sig0, tx_v1_sig2, tx_v2_rct3, +}; diff --git a/test-utils/src/rpc/client.rs b/test-utils/src/rpc/client.rs index 28c49d8..fbe6fb9 100644 --- a/test-utils/src/rpc/client.rs +++ b/test-utils/src/rpc/client.rs @@ -5,13 +5,14 @@ use serde::Deserialize; use serde_json::json; use tokio::task::spawn_blocking; -use monero_serai::{ - block::Block, - rpc::{HttpRpc, Rpc}, -}; +use monero_rpc::Rpc; +use monero_serai::block::Block; +use monero_simple_request_rpc::SimpleRequestRpc; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; +use crate::data::tx_fee; + //---------------------------------------------------------------------------------------------------- Constants /// The default URL used for Monero RPC connections. pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081"; @@ -20,7 +21,7 @@ pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081"; /// An HTTP RPC client for Monero. pub struct HttpRpcClient { address: String, - rpc: Rpc, + rpc: SimpleRequestRpc, } impl HttpRpcClient { @@ -40,7 +41,7 @@ impl HttpRpcClient { let address = address.unwrap_or_else(|| LOCALHOST_RPC_URL.to_string()); Self { - rpc: HttpRpc::new(address.clone()).await.unwrap(), + rpc: SimpleRequestRpc::new(address.clone()).await.unwrap(), address, } } @@ -53,7 +54,7 @@ impl HttpRpcClient { /// Access to the inner RPC client for other usage. #[allow(dead_code)] - const fn rpc(&self) -> &Rpc { + const fn rpc(&self) -> &SimpleRequestRpc { &self.rpc } @@ -62,7 +63,7 @@ impl HttpRpcClient { /// # Panics /// This function will panic at any error point, e.g., /// if the node cannot be connected to, if deserialization fails, etc. - pub async fn get_verified_block_information(&self, height: u64) -> VerifiedBlockInformation { + pub async fn get_verified_block_information(&self, height: usize) -> VerifiedBlockInformation { #[derive(Debug, Deserialize)] struct Result { blob: String, @@ -75,7 +76,7 @@ impl HttpRpcClient { long_term_weight: usize, cumulative_difficulty: u128, hash: String, - height: u64, + height: usize, pow_hash: String, reward: u64, // generated_coins + total_tx_fees } @@ -111,7 +112,7 @@ impl HttpRpcClient { .unwrap(); let txs: Vec = self - .get_transaction_verification_data(&block.txs) + .get_transaction_verification_data(&block.transactions) .await .collect(); @@ -124,8 +125,8 @@ impl HttpRpcClient { let total_tx_fees = txs.iter().map(|tx| tx.fee).sum::(); let generated_coins = block - .miner_tx - .prefix + .miner_transaction + .prefix() .outputs .iter() .map(|output| output.amount.expect("miner_tx amount was None")) @@ -173,7 +174,7 @@ impl HttpRpcClient { tx_blob: tx.serialize(), tx_weight: tx.weight(), tx_hash, - fee: tx.rct_signatures.base.fee, + fee: tx_fee(&tx), tx, } }) @@ -199,7 +200,7 @@ mod tests { #[allow(clippy::too_many_arguments)] async fn assert_eq( rpc: &HttpRpcClient, - height: u64, + height: usize, block_hash: [u8; 32], pow_hash: [u8; 32], generated_coins: u64, diff --git a/types/Cargo.toml b/types/Cargo.toml index 8f16eb4..99fa978 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -22,5 +22,6 @@ bytes = { workspace = true } curve25519-dalek = { workspace = true } monero-serai = { workspace = true } serde = { workspace = true, features = ["derive"], optional = true } +borsh = { workspace = true, optional = true } [dev-dependencies] \ No newline at end of file diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index f1a8a75..b502c3f 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -25,12 +25,12 @@ pub enum BlockchainReadRequest { /// Request a block's extended header. /// /// The input is the block's height. - BlockExtendedHeader(u64), + BlockExtendedHeader(usize), /// Request a block's hash. /// /// The input is the block's height and the chain it is on. - BlockHash(u64, Chain), + BlockHash(usize, Chain), /// Request to check if we have a block and which [`Chain`] it is on. /// @@ -45,7 +45,7 @@ pub enum BlockchainReadRequest { /// Request a range of block extended headers. /// /// The input is a range of block heights. - BlockExtendedHeaderInRange(Range, Chain), + BlockExtendedHeaderInRange(Range, Chain), /// Request the current chain height. /// @@ -53,7 +53,7 @@ pub enum BlockchainReadRequest { ChainHeight, /// Request the total amount of generated coins (atomic units) at this height. - GeneratedCoins(u64), + GeneratedCoins(usize), /// Request data for multiple outputs. /// @@ -137,7 +137,7 @@ pub enum BlockchainResponse { /// Response to [`BlockchainReadRequest::FindBlock`]. /// /// Inner value is the chain and height of the block if found. - FindBlock(Option<(Chain, u64)>), + FindBlock(Option<(Chain, usize)>), /// Response to [`BlockchainReadRequest::FilterUnknownHashes`]. /// @@ -152,7 +152,7 @@ pub enum BlockchainResponse { /// Response to [`BlockchainReadRequest::ChainHeight`]. /// /// Inner value is the chain height, and the top block's hash. - ChainHeight(u64, [u8; 32]), + ChainHeight(usize, [u8; 32]), /// Response to [`BlockchainReadRequest::GeneratedCoins`]. /// @@ -195,7 +195,7 @@ pub enum BlockchainResponse { /// Contains the index of the first unknown block and its expected height. /// /// This will be [`None`] if all blocks were known. - FindFirstUnknown(Option<(usize, u64)>), + FindFirstUnknown(Option<(usize, usize)>), //------------------------------------------------------ Writes /// Response to [`BlockchainWriteRequest::WriteBlock`]. diff --git a/types/src/types.rs b/types/src/types.rs index db31507..a4a7135 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -17,13 +17,13 @@ pub struct ExtendedBlockHeader { /// /// This can also be represented with `cuprate_consensus::HardFork`. /// - /// This is the same value as [`monero_serai::block::BlockHeader::major_version`]. + /// This is the same value as [`monero_serai::block::BlockHeader::hardfork_version`]. pub version: u8, /// The block's hard-fork vote. /// /// This can also be represented with `cuprate_consensus::HardFork`. /// - /// This is the same value as [`monero_serai::block::BlockHeader::minor_version`]. + /// This is the same value as [`monero_serai::block::BlockHeader::hardfork_signal`]. pub vote: u8, /// The UNIX time at which the block was mined. pub timestamp: u64, @@ -72,7 +72,7 @@ pub struct VerifiedBlockInformation { /// /// [`Block::serialize`]. pub block_blob: Vec, - /// All the transactions in the block, excluding the [`Block::miner_tx`]. + /// All the transactions in the block, excluding the [`Block::miner_transaction`]. pub txs: Vec, /// The block's hash. /// @@ -81,7 +81,7 @@ pub struct VerifiedBlockInformation { /// The block's proof-of-work hash. pub pow_hash: [u8; 32], /// The block's height. - pub height: u64, + pub height: usize, /// The amount of generated coins (atomic units) in this block. pub generated_coins: u64, /// The adjusted block size, in bytes. @@ -119,7 +119,7 @@ pub struct AltBlockInformation { /// /// [`Block::serialize`]. pub block_blob: Vec, - /// All the transactions in the block, excluding the [`Block::miner_tx`]. + /// All the transactions in the block, excluding the [`Block::miner_transaction`]. pub txs: Vec, /// The block's hash. /// @@ -128,7 +128,7 @@ pub struct AltBlockInformation { /// The block's proof-of-work hash. pub pow_hash: [u8; 32], /// The block's height. - pub height: u64, + pub height: usize, /// The adjusted block size, in bytes. pub weight: usize, /// The long term block weight, which is the weight factored in with previous block weights. @@ -144,7 +144,7 @@ pub struct AltBlockInformation { #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct OutputOnChain { /// The block height this output belongs to. - pub height: u64, + pub height: usize, /// The timelock of this output, if any. pub time_lock: Timelock, /// The public key of this output, if any. From eb65efa7fb4b3f405cccdcbc4bdaea1c2b9ddc9b Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Tue, 6 Aug 2024 21:04:11 -0400 Subject: [PATCH 032/104] ci: add book build tests (#249) * ci: add book build tests * ci: add `mdbook-svgbob` * ci: `|` --- .github/workflows/architecture-book.yml | 37 +++++++++++++++++++++++++ .github/workflows/monero-book.yml | 37 +++++++++++++++++++++++++ .github/workflows/user-book.yml | 37 +++++++++++++++++++++++++ 3 files changed, 111 insertions(+) create mode 100644 .github/workflows/architecture-book.yml create mode 100644 .github/workflows/monero-book.yml create mode 100644 .github/workflows/user-book.yml diff --git a/.github/workflows/architecture-book.yml b/.github/workflows/architecture-book.yml new file mode 100644 index 0000000..5b99ca8 --- /dev/null +++ b/.github/workflows/architecture-book.yml @@ -0,0 +1,37 @@ +# This action attempts to build the architecture book, if changed. + +name: Architecture mdBook + +on: + push: + paths: + - 'books/architecture/**' + +env: + # Version of `mdbook` to install. + MDBOOK_VERSION: 0.4.36 + # Version of `mdbook-last-changed` to install. + # . + MDBOOK_LAST_CHANGED_VERSION: 0.1.4 + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Cache + uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/mdbook + ~/.cargo/bin/mdbook-last-changed + key: architecture-book + + - name: Install mdBook + run: | + cargo install --version ${MDBOOK_VERSION} mdbook + cargo install --version ${MDBOOK_LAST_CHANGED_VERSION} mdbook-last-changed + + - name: Build + run: mdbook build books/architecture \ No newline at end of file diff --git a/.github/workflows/monero-book.yml b/.github/workflows/monero-book.yml new file mode 100644 index 0000000..33887bc --- /dev/null +++ b/.github/workflows/monero-book.yml @@ -0,0 +1,37 @@ +# This action attempts to build the Monero book, if changed. + +name: Monero mdBook + +on: + push: + paths: + - 'books/protocol/**' + +env: + # Version of `mdbook` to install. + MDBOOK_VERSION: 0.4.36 + # Version of `mdbook-svgbob` to install. + # . + MDBOOK_SVGBOB_VERSION: 0.2.1 + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Cache + uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/mdbook + ~/.cargo/bin/mdbook-svgbob + key: monero-book + + - name: Install mdBook + run: | + cargo install --version ${MDBOOK_VERSION} mdbook + cargo install --version ${MDBOOK_SVGBOB_VERSION} mdbook-svgbob + + - name: Build + run: mdbook build books/protocol \ No newline at end of file diff --git a/.github/workflows/user-book.yml b/.github/workflows/user-book.yml new file mode 100644 index 0000000..fc95c94 --- /dev/null +++ b/.github/workflows/user-book.yml @@ -0,0 +1,37 @@ +# This action attempts to build the user book, if changed. + +name: User mdBook + +on: + push: + paths: + - 'books/user/**' + +env: + # Version of `mdbook` to install. + MDBOOK_VERSION: 0.4.36 + # Version of `mdbook-last-changed` to install. + # . + MDBOOK_LAST_CHANGED_VERSION: 0.1.4 + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Cache + uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/mdbook + ~/.cargo/bin/mdbook-last-changed + key: user-book + + - name: Install mdBook + run: | + cargo install --version ${MDBOOK_VERSION} mdbook + cargo install --version ${MDBOOK_LAST_CHANGED_VERSION} mdbook-last-changed + + - name: Build + run: mdbook build books/user \ No newline at end of file From fafa20c20fcc40fb709db49eaf42afddf01f4ed6 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 7 Aug 2024 19:15:22 -0400 Subject: [PATCH 033/104] architecture-book: fill `RPC` section (#243) * books: add `rpc` skeleton * json-rpc * types section * differences * interface * typos * differences: add `json-formatting.md` * rpc: small fixes * appendix: add `cuprate-rpc-handler` to crate list * differences: remove extra fields section, add more info * differences: add `id:0` section --- books/architecture/src/SUMMARY.md | 30 ++-- books/architecture/src/appendix/crates.md | 1 + .../src/rpc/differences/custom-strings.md | 7 + .../src/rpc/differences/http-methods.md | 17 +++ .../architecture/src/rpc/differences/intro.md | 9 ++ .../rpc/differences/json-field-ordering.md | 51 +++++++ .../src/rpc/differences/json-formatting.md | 118 +++++++++++++++ .../rpc/differences/json-rpc-strictness.md | 137 ++++++++++++++++++ .../src/rpc/differences/json-strictness.md | 49 +++++++ .../rpc-calls-with-different-behavior.md | 2 + .../src/rpc/differences/rpc-payment.md | 9 ++ .../rpc/differences/unsupported-rpc-calls.md | 2 + books/architecture/src/rpc/handler.md | 1 - books/architecture/src/rpc/handler/intro.md | 2 + books/architecture/src/rpc/interface.md | 37 ++++- books/architecture/src/rpc/intro.md | 31 +++- books/architecture/src/rpc/json-rpc.md | 10 ++ books/architecture/src/rpc/methods/intro.md | 1 - books/architecture/src/rpc/router.md | 1 - books/architecture/src/rpc/server/intro.md | 2 + .../architecture/src/rpc/types/base-types.md | 39 +++++ books/architecture/src/rpc/types/binary.md | 1 - .../src/rpc/types/deserialization.md | 38 +++++ books/architecture/src/rpc/types/intro.md | 32 +++- books/architecture/src/rpc/types/json.md | 1 - books/architecture/src/rpc/types/macro.md | 16 ++ books/architecture/src/rpc/types/metadata.md | 13 ++ .../architecture/src/rpc/types/misc-types.md | 11 ++ books/architecture/src/rpc/types/other.md | 1 - 29 files changed, 650 insertions(+), 19 deletions(-) create mode 100644 books/architecture/src/rpc/differences/custom-strings.md create mode 100644 books/architecture/src/rpc/differences/http-methods.md create mode 100644 books/architecture/src/rpc/differences/intro.md create mode 100644 books/architecture/src/rpc/differences/json-field-ordering.md create mode 100644 books/architecture/src/rpc/differences/json-formatting.md create mode 100644 books/architecture/src/rpc/differences/json-rpc-strictness.md create mode 100644 books/architecture/src/rpc/differences/json-strictness.md create mode 100644 books/architecture/src/rpc/differences/rpc-calls-with-different-behavior.md create mode 100644 books/architecture/src/rpc/differences/rpc-payment.md create mode 100644 books/architecture/src/rpc/differences/unsupported-rpc-calls.md delete mode 100644 books/architecture/src/rpc/handler.md create mode 100644 books/architecture/src/rpc/handler/intro.md create mode 100644 books/architecture/src/rpc/json-rpc.md delete mode 100644 books/architecture/src/rpc/methods/intro.md delete mode 100644 books/architecture/src/rpc/router.md create mode 100644 books/architecture/src/rpc/server/intro.md create mode 100644 books/architecture/src/rpc/types/base-types.md delete mode 100644 books/architecture/src/rpc/types/binary.md create mode 100644 books/architecture/src/rpc/types/deserialization.md delete mode 100644 books/architecture/src/rpc/types/json.md create mode 100644 books/architecture/src/rpc/types/macro.md create mode 100644 books/architecture/src/rpc/types/metadata.md create mode 100644 books/architecture/src/rpc/types/misc-types.md delete mode 100644 books/architecture/src/rpc/types/other.md diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index 3a8b351..ad521df 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -35,15 +35,27 @@ --- -- [🔴 RPC](rpc/intro.md) - - [⚪️ Types](rpc/types/intro.md) - - [⚪️ JSON](rpc/types/json.md) - - [⚪️ Binary](rpc/types/binary.md) - - [⚪️ Other](rpc/types/other.md) - - [⚪️ Interface](rpc/interface.md) - - [⚪️ Router](rpc/router.md) - - [⚪️ Handler](rpc/handler.md) - - [⚪️ Methods](rpc/methods/intro.md) +- [🟢 RPC](rpc/intro.md) + - [🟡 JSON-RPC 2.0](rpc/json-rpc.md) + - [🟢 The types](rpc/types/intro.md) + - [🟢 Misc types](rpc/types/misc-types.md) + - [🟢 Base RPC types](rpc/types/base-types.md) + - [🟢 The type generator macro](rpc/types/macro.md) + - [🟢 Metadata](rpc/types/metadata.md) + - [🟡 (De)serialization](rpc/types/deserialization.md) + - [🟢 The interface](rpc/interface.md) + - [🔴 The handler](rpc/handler/intro.md) + - [🔴 The server](rpc/server/intro.md) + - [🟢 Differences with `monerod`](rpc/differences/intro.md) + - [🟢 JSON field ordering](rpc/differences/json-field-ordering.md) + - [🟢 JSON formatting](rpc/differences/json-formatting.md) + - [🟢 JSON strictness](rpc/differences/json-strictness.md) + - [🟡 JSON-RPC strictness](rpc/differences/json-rpc-strictness.md) + - [🟡 HTTP methods](rpc/differences/http-methods.md) + - [🟡 RPC payment](rpc/differences/rpc-payment.md) + - [🟢 Custom strings](rpc/differences/custom-strings.md) + - [🔴 Unsupported RPC calls](rpc/differences/unsupported-rpc-calls.md) + - [🔴 RPC calls with different behavior](rpc/differences/rpc-calls-with-different-behavior.md) --- diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md index 224e678..e5311a8 100644 --- a/books/architecture/src/appendix/crates.md +++ b/books/architecture/src/appendix/crates.md @@ -50,6 +50,7 @@ cargo doc --open --package cuprate-blockchain | [`cuprate-json-rpc`](https://doc.cuprate.org/cuprate_json_rpc) | [`rpc/json-rpc/`](https://github.com/Cuprate/cuprate/tree/main/rpc/json-rpc) | JSON-RPC 2.0 implementation | [`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types) | [`rpc/types/`](https://github.com/Cuprate/cuprate/tree/main/rpc/types) | Monero RPC types and traits | [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing +| [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler) | [`rpc/handler/`](https://github.com/Cuprate/cuprate/tree/main/rpc/handler) | RPC inner handlers ## 1-off crates | Crate | In-tree path | Purpose | diff --git a/books/architecture/src/rpc/differences/custom-strings.md b/books/architecture/src/rpc/differences/custom-strings.md new file mode 100644 index 0000000..736c481 --- /dev/null +++ b/books/architecture/src/rpc/differences/custom-strings.md @@ -0,0 +1,7 @@ +# Custom strings +Many JSON response fields contain strings with custom messages. + +This may be error messages, status, etc. + +Although the field + string type will be followed, Cuprate will not always +have the exact same message, particularly when it comes to error messages. \ No newline at end of file diff --git a/books/architecture/src/rpc/differences/http-methods.md b/books/architecture/src/rpc/differences/http-methods.md new file mode 100644 index 0000000..238e2e3 --- /dev/null +++ b/books/architecture/src/rpc/differences/http-methods.md @@ -0,0 +1,17 @@ +# HTTP methods +`monerod` endpoints supports multiple [HTTP methods](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) +that do not necessarily make sense. + +For example: +```bash +curl \ + http://127.0.0.1:18081/get_limit \ + -H 'Content-Type: application/json' \ + --request DELETE +``` +This is sending an HTTP `DELETE` request, which should be a `GET`. + +`monerod` will respond to this the same as `GET`, `POST`, `PUT`, and `TRACE`. + +## Cuprate's behavior +> TODO: decide allowed HTTP methods for Cuprate . \ No newline at end of file diff --git a/books/architecture/src/rpc/differences/intro.md b/books/architecture/src/rpc/differences/intro.md new file mode 100644 index 0000000..54027e2 --- /dev/null +++ b/books/architecture/src/rpc/differences/intro.md @@ -0,0 +1,9 @@ +# Differences with `monerod` +As noted in the [introduction](../intro.md), `monerod`'s RPC behavior cannot always be perfectly followed by Cuprate. + +The reasoning for the differences can vary from: +- Technical limitations +- Behavior being `monerod`-specific +- Purposeful decision to not support behavior + +This section lays out the details of the differences between `monerod`'s and Cuprate's RPC system. diff --git a/books/architecture/src/rpc/differences/json-field-ordering.md b/books/architecture/src/rpc/differences/json-field-ordering.md new file mode 100644 index 0000000..ad4c1f9 --- /dev/null +++ b/books/architecture/src/rpc/differences/json-field-ordering.md @@ -0,0 +1,51 @@ +# JSON field ordering +When serializing JSON, `monerod` has the behavior to order key fields within a scope alphabetically. + +For example: +```json +{ + "id": "0", + "jsonrpc": "2.0", + "result": { + "blockhashing_blob": "...", + "blocktemplate_blob": "...", + "difficulty": 283305047039, + "difficulty_top64": 0, + "expected_reward": 600000000000, + "height": 3195018, + "next_seed_hash": "", + "prev_hash": "9d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a", + "reserved_offset": 131, + "seed_hash": "e2aa0b7b55042cd48b02e395d78fa66a29815ccc1584e38db2d1f0e8485cd44f", + "seed_height": 3194880, + "status": "OK", + "untrusted": false, + "wide_difficulty": "0x41f64bf3ff" + } +} +``` +In the main `{}`, `id` comes before `jsonrpc`, which comes before `result`. + +The same alphabetical ordering is applied to the fields within `result`. + +Cuprate uses [`serde`](https://docs.rs/serde) for JSON serialization, +which serializes fields based on the _definition_ order, i.e. whatever +order the fields are defined in the code, is the order they will appear +in JSON. + +Some `struct` fields within Cuprate's RPC types happen to be alphabetical, but this is not a guarantee. + +As these are JSON maps, the ordering of fields should not matter, +although this is something to note as the output will technically differ. + +## Example incompatibility +An example of where this leads to incompatibility is if specific +line numbers are depended on to contain specific fields. + +For example, this will print the 10th line: +```bash +curl http://127.0.0.1:18081/json_rpc -d '{"jsonrpc":"2.0","id":"0","method":"get_block_template","params":{"wallet_address":"44GBHzv6ZyQdJkjqZje6KLZ3xSyN1hBSFAnLP6EAqJtCRVzMzZmeXTC2AHKDS9aEDTRKmo6a6o9r9j86pYfhCWDkKjbtcns","reserve_size":60}' -H 'Content-Type: application/json' | sed -n 10p +``` +It will be `"height": 3195018` in `monerod`'s case, but may not necessarily be for Cuprate. + +By all means, this should not be relied upon in the first place, although it is shown as an example. diff --git a/books/architecture/src/rpc/differences/json-formatting.md b/books/architecture/src/rpc/differences/json-formatting.md new file mode 100644 index 0000000..2d29d48 --- /dev/null +++ b/books/architecture/src/rpc/differences/json-formatting.md @@ -0,0 +1,118 @@ +# JSON formatting +In general, Cuprate's JSON formatting is very similar to `monerod`, but there are some differences. + +This is a list of those differences. + +## Pretty vs compact +> TODO: decide when handlers are created if we should allow custom formatting. + +Cuprate's RPC (really, [`serde_json`](https://docs.rs/serde_json)) can be configured to use either: +- [Pretty formatting](https://docs.rs/serde_json/latest/serde_json/ser/struct.PrettyFormatter.html) +- [Compact formatting](https://docs.rs/serde_json/latest/serde_json/ser/struct.CompactFormatter.html) + +`monerod` uses something _similar_ to pretty formatting. + +As an example, pretty formatting: +```json +{ + "number": 1, + "array": [ + 0, + 1 + ], + "string": "", + "array_of_objects": [ + { + "x": 1.0, + "y": -1.0 + }, + { + "x": 2.0, + "y": -2.0 + } + ] +} +``` +compact formatting: +```json +{"number":1,"array":[0,1],"string":"","array_of_objects":[{"x":1.0,"y":-1.0},{"x":2.0,"y":-2.0}]} +``` + +## Array of objects +`monerod` will format an array of objects like such: +```json +{ + "array_of_objects": [{ + "x": 0.0, + "y": 0.0, + },{ + "x": 0.0, + "y": 0.0, + },{ + "x": 0.0, + "y": 0.0 + }] +} +``` + +Cuprate will format the above like such: +```json +{ + "array_of_objects": [ + { + "x": 0.0, + "y": 0.0, + }, + { + "x": 0.0, + "y": 0.0, + }, + { + "x": 0.0, + "y": 0.0 + } + ] +} +``` + +## Array of maps containing named objects +An method that contains outputs like this is the `peers` field in the `sync_info` method: +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsonrpc":"2.0","id":"0","method":"sync_info"}' \ + -H 'Content-Type: application/json' +``` + +`monerod` will format an array of maps that contains named objects like such: +```json +{ + "array": [{ + "named_object": { + "field": "" + } + },{ + "named_object": { + "field": "" + } + }] +} +``` + +Cuprate will format the above like such: +```json +{ + "array": [ + { + "named_object": { + "field": "" + } + }, + { + "named_object": { + "field": "" + } + } + ] +} +``` \ No newline at end of file diff --git a/books/architecture/src/rpc/differences/json-rpc-strictness.md b/books/architecture/src/rpc/differences/json-rpc-strictness.md new file mode 100644 index 0000000..a30d79d --- /dev/null +++ b/books/architecture/src/rpc/differences/json-rpc-strictness.md @@ -0,0 +1,137 @@ +# JSON-RPC strictness +This is a list of behavior that `monerod`'s JSON-RPC implementation allows, that Cuprate's JSON-RPC implementation does not. + +In general, `monerod`'s JSON-RPC is quite lenient, going against the specification in many cases. +Cuprate's JSON-RPC implementation is slightly more strict. + +Cuprate also makes some decisions that are _different_ than `monerod`, but are not necessarily more or less strict. + +## Allowing an incorrect `jsonrpc` field +[The JSON-RPC 2.0 specification states that the `jsonrpc` field must be exactly `"2.0"`](https://www.jsonrpc.org/specification#request_object). + +`monerod` allows `jsonrpc` to: +- Be any string +- Be an empty array +- Be `null` +- Not exist at all + +Examples: +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsonrpc":"???","method":"get_block_count"}' \ + -H 'Content-Type: application/json' +``` + +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsonrpc":[],"method":"get_block_count"}' \ + -H 'Content-Type: application/json' +``` + +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsonrpc":null,"method":"get_block_count"}' \ + -H 'Content-Type: application/json' +``` + +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"method":"get_block_count"}' \ + -H 'Content-Type: application/json' +``` + +## Allowing `id` to be any type +JSON-RPC 2.0 responses must contain the same `id` as the original request. + +However, the [specification states](https://www.jsonrpc.org/specification#request_object): + +> An identifier established by the Client that MUST contain a String, Number, or NULL value if included + +`monerod` does not check this and allows `id` to be any JSON type, for example, a map: +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsonrpc":"2.0","id":{"THIS":{"IS":"ALLOWED"}},"method":"get_block_count"}' \ + -H 'Content-Type: application/json' +``` + +The response: +```json +{ + "id": { + "THIS": { + "IS": "ALLOWED" + } + }, + "jsonrpc": "2.0", + "result": { + "count": 3210225, + "status": "OK", + "untrusted": false + } +} +``` + +## Responding with `id:0` on error +The JSON-RPC [specification states](https://www.jsonrpc.org/specification#response_object): + +> If there was an error in detecting the id in the Request object (e.g. Parse error/Invalid Request), it MUST be Null. + +Although, `monerod` will respond with `id:0` in these cases. + +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsonrpc":"2.0","id":asdf,"method":"get_block_count"}' \ + -H 'Content-Type: application/json' +``` +Response: +```bash +{ + "error": { + "code": -32700, + "message": "Parse error" + }, + "id": 0, + "jsonrpc": "2.0" +} +``` + +## Responding to notifications +> TODO: decide on Cuprate behavior + +Requests that have no `id` field are "notifications". + +[The JSON-RPC 2.0 specification states that requests without +an `id` field must _not_ be responded to](https://www.jsonrpc.org/specification#notification). + +Example: +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsonrpc":"2.0","method":"get_block_count"}' \ + -H 'Content-Type: application/json' +``` + +## Upper/mixed case fields +`monerod` will accept upper/mixed case fields on: +- `jsonrpc` +- `id` + +`method` however, is checked. + +The JSON-RPC 2.0 specification does not outright state what case to support, +although, Cuprate only supports lowercase as supporting upper/mixed case +is more code to add as `serde` by default is case-sensitive on `struct` fields. + +Example: +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsONrPc":"2.0","iD":0,"method":"get_block_count"}' \ + -H 'Content-Type: application/json' +``` \ No newline at end of file diff --git a/books/architecture/src/rpc/differences/json-strictness.md b/books/architecture/src/rpc/differences/json-strictness.md new file mode 100644 index 0000000..92b6fc2 --- /dev/null +++ b/books/architecture/src/rpc/differences/json-strictness.md @@ -0,0 +1,49 @@ +# JSON strictness +This is a list of behavior that `monerod`'s JSON parser allows, that Cuprate's JSON parser ([`serde_json`](https://docs.rs/serde_json)) does not. + +In general, `monerod`'s parser is quite lenient, allowing invalid JSON in many cases. +Cuprate's (really, `serde_json`) JSON parser is quite strict, essentially sticking to +the [JSON specification](https://datatracker.ietf.org/doc/html/rfc8259). + +Cuprate also makes some decisions that are _different_ than `monerod`, but are not necessarily more or less strict. + +## Missing closing bracket +`monerod` will accept JSON missing a final closing `}`. + +Example: +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsonrpc":"2.0","id":"0","method":"get_block_count"' \ + -H 'Content-Type: application/json' +``` + +## Trailing ending comma +`monerod` will accept JSON containing a final trailing `,`. + +Example: +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsonrpc":"2.0","id":"0","method":"get_block_count",}' \ + -H 'Content-Type: application/json' +``` + +## Allowing `-` in fields +`monerod` allows `-` as a valid value in certain fields, **not a string `"-"`, but the character `-`**. + +The fields where this is allowed seems to be any field `monerod` does not explicitly look for, examples include: +- `jsonrpc` +- `id` +- `params` (where parameters are not expected) +- Any ignored field + +The [JSON-RPC 2.0 specification does state that the response `id` should be `null` upon errors in detecting the request `id`](https://wwwjsonrpc.org/specification#response_object), although in this case, this is invalid JSON and should not make it this far. The response will contain the default `id: 0` in this case. + +Example: +```bash +curl \ + http://127.0.0.1:18081/json_rpc \ + -d '{"jsonrpc":-,"id":-,"params":-,"IGNORED_FIELD":-,"method":"get_block_count"}' \ + -H 'Content-Type: application/json' +``` \ No newline at end of file diff --git a/books/architecture/src/rpc/differences/rpc-calls-with-different-behavior.md b/books/architecture/src/rpc/differences/rpc-calls-with-different-behavior.md new file mode 100644 index 0000000..6c1cb69 --- /dev/null +++ b/books/architecture/src/rpc/differences/rpc-calls-with-different-behavior.md @@ -0,0 +1,2 @@ +# RPC calls with different behavior +> TODO: compile RPC calls with different behavior after handlers are created. \ No newline at end of file diff --git a/books/architecture/src/rpc/differences/rpc-payment.md b/books/architecture/src/rpc/differences/rpc-payment.md new file mode 100644 index 0000000..ae38c0d --- /dev/null +++ b/books/architecture/src/rpc/differences/rpc-payment.md @@ -0,0 +1,9 @@ +# RPC payment +The RPC payment system in `monerod` is a [pseudo-deprecated](https://github.com/monero-project/monero/issues/8722) +system that allows node operators to be compensated for RPC usage. + +Although this system is pseudo-deprecated, `monerod` still generates related fields in responses. [Cuprate follows this behavior](https://doc.cuprate.org/cuprate_rpc_types/base/struct.AccessResponseBase.html). + +However, the [associated endpoints](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server.h#L182-L187) and [actual functionality](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server.h#L260-L265) are not supported by Cuprate. The associated endpoints will return an error upon invocation. + +> TODO: decide on behavior and document . \ No newline at end of file diff --git a/books/architecture/src/rpc/differences/unsupported-rpc-calls.md b/books/architecture/src/rpc/differences/unsupported-rpc-calls.md new file mode 100644 index 0000000..ac0c2df --- /dev/null +++ b/books/architecture/src/rpc/differences/unsupported-rpc-calls.md @@ -0,0 +1,2 @@ +# Unsupported RPC calls +> TODO: compile unsupported RPC calls after handlers are created. \ No newline at end of file diff --git a/books/architecture/src/rpc/handler.md b/books/architecture/src/rpc/handler.md deleted file mode 100644 index fffa45f..0000000 --- a/books/architecture/src/rpc/handler.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Handler diff --git a/books/architecture/src/rpc/handler/intro.md b/books/architecture/src/rpc/handler/intro.md new file mode 100644 index 0000000..e664f5b --- /dev/null +++ b/books/architecture/src/rpc/handler/intro.md @@ -0,0 +1,2 @@ +# The handler +> TODO: fill after `cuprate-rpc-handler` is created. \ No newline at end of file diff --git a/books/architecture/src/rpc/interface.md b/books/architecture/src/rpc/interface.md index 541b744..3557ffb 100644 --- a/books/architecture/src/rpc/interface.md +++ b/books/architecture/src/rpc/interface.md @@ -1 +1,36 @@ -# ⚪️ Interface +# The interface +> This section is short as [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) contains detailed documentation. + +The RPC interface, which includes: + +- Endpoint routing (`/json_rpc`, `/get_blocks.bin`, etc) +- Route function signatures (`async fn json_rpc(...) -> Response`) +- Type (de)serialization +- Any miscellaneous handling (denying `restricted` RPC calls) + +is handled by the [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) crate. + +Essentially, this crate provides the API for the RPC. + +`cuprate-rpc-interface` is built on-top of [`axum`](https://docs.rs/axum) and [`tower`](https://docs.rs/tower), +which are the crates doing the bulk majority of the work. + +## Request -> Response +The functions that map requests to responses are not implemented by `cuprate-rpc-interface` itself, they must be provided by the user, i.e. it can be _customized_. + +In Rust terms, this crate provides you with: +```rust +async fn json_rpc( + state: State, + request: Request, +) -> Response { + /* your handler here */ +} +``` +and you provide the function body. + +The main handler crate is [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler). +This crate implements the standard RPC behavior, i.e. it mostly mirrors `monerod`. + +Although, it's worth noting that other implementations are possible, such as an RPC handler that caches blocks, +or an RPC handler that only accepts certain endpoints, or any combination. \ No newline at end of file diff --git a/books/architecture/src/rpc/intro.md b/books/architecture/src/rpc/intro.md index dcfc82b..acfc604 100644 --- a/books/architecture/src/rpc/intro.md +++ b/books/architecture/src/rpc/intro.md @@ -1,3 +1,30 @@ # RPC -- -- \ No newline at end of file +`monerod`'s daemon RPC has three kinds of RPC calls: +1. [JSON-RPC 2.0](https://www.jsonrpc.org/specification) methods, called at the `/json_rpc` endpoint +1. JSON (but not JSON-RPC 2.0) methods called at their own endpoints, e.g. [`/get_height`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_height) +1. Binary ([epee](../../formats-protocols-types/epee.html)) RPC methods called at their own endpoints ending in `.bin`, e.g. [`/get_blocks.bin`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_blocksbin) + +Cuprate's RPC aims to mirror `monerod`'s as much as it practically can. + +This includes, but is not limited to: +- Using the same endpoints +- Receiving the same request data +- Sending the same response data +- Responding with the same HTTP status codes +- Following internal behavior (e.g. [`/pop_blocks`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#pop_blocks)) + +Not all `monerod` behavior can always be followed, however. + +Some are not followed on purpose, some cannot be followed due to technical limitations, and some cannot be due to the behavior being `monerod` specific such as the [`/set_log_categories`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#set_log_categories) endpoint which uses `monerod`'s logging categories. + +Both subtle and large differences between Cuprate's RPC and `monerod`'s RPC are documented in the [Differences with `monerod`](differences/intro.md) section. + +## Main RPC components +The main components that make up Cuprate's RPC are noted below, alongside the equivalent `monerod` code and other notes. + +| Cuprate crate | `monerod` (rough) equivalent | Purpose | Notes | +|---------------|------------------------------|---------|-------| +| [`cuprate-json-rpc`](https://doc.cuprate.org/cuprate_json_rpc) | [`jsonrpc_structs.h`](https://github.com/monero-project/monero/blob/caa62bc9ea1c5f2ffe3ffa440ad230e1de509bfd/contrib/epee/include/net/jsonrpc_structs.h), [`http_server_handlers_map2.h`](https://github.com/monero-project/monero/blob/caa62bc9ea1c5f2ffe3ffa440ad230e1de509bfd/contrib/epee/include/net/http_server_handlers_map2.h) | JSON-RPC 2.0 implementation | `monerod`'s JSON-RPC 2.0 handling is spread across a few files. The first defines some data structures, the second contains macros that (essentially) implement JSON-RPC 2.0. +| [`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types) | [`core_rpc_server_commands_defs.h`](https://github.com/monero-project/monero/blob/caa62bc9ea1c5f2ffe3ffa440ad230e1de509bfd/src/rpc/core_rpc_server_commands_defs.h) | RPC request/response type definitions and (de)serialization | | +| [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`core_rpc_server.h`](https://github.com/monero-project/monero/blob/caa62bc9ea1c5f2ffe3ffa440ad230e1de509bfd/src/rpc/core_rpc_server.h) | RPC interface, routing, endpoints | | +| [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler) | [`core_rpc_server.cpp`](https://github.com/monero-project/monero/blob/caa62bc9ea1c5f2ffe3ffa440ad230e1de509bfd/src/rpc/core_rpc_server.cpp) | RPC request/response handling | These are the "inner handler" functions that turn requests into responses | \ No newline at end of file diff --git a/books/architecture/src/rpc/json-rpc.md b/books/architecture/src/rpc/json-rpc.md new file mode 100644 index 0000000..ac52cd1 --- /dev/null +++ b/books/architecture/src/rpc/json-rpc.md @@ -0,0 +1,10 @@ +# JSON-RPC 2.0 +Cuprate has a standalone crate that implements the [JSON-RPC 2.0](https://www.jsonrpc.org/specification) specification, [`cuprate-json-rpc`](https://doc.cuprate.org/cuprate_json_rpc). The RPC methods at the `/json_rpc` endpoint use this crate's types, functions, and (de)serialization. + +There is nothing too special about Cuprate's implementation. +Any small notes and differences are noted in the crate documentation. + +As such, there is not much to document here, instead, consider reading the very +brief JSON-RPC 2.0 specification, and the `cuprate-json-rpc` crate documentation. + +> TODO: document `method/params` vs flattened `base` when figured out. diff --git a/books/architecture/src/rpc/methods/intro.md b/books/architecture/src/rpc/methods/intro.md deleted file mode 100644 index d4a3a15..0000000 --- a/books/architecture/src/rpc/methods/intro.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Methods diff --git a/books/architecture/src/rpc/router.md b/books/architecture/src/rpc/router.md deleted file mode 100644 index 1827dd3..0000000 --- a/books/architecture/src/rpc/router.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Router diff --git a/books/architecture/src/rpc/server/intro.md b/books/architecture/src/rpc/server/intro.md new file mode 100644 index 0000000..0178ce2 --- /dev/null +++ b/books/architecture/src/rpc/server/intro.md @@ -0,0 +1,2 @@ +# 🔴 The server +> TODO: fill after `cuprate-rpc-server` or binary impl is created. \ No newline at end of file diff --git a/books/architecture/src/rpc/types/base-types.md b/books/architecture/src/rpc/types/base-types.md new file mode 100644 index 0000000..feda38d --- /dev/null +++ b/books/architecture/src/rpc/types/base-types.md @@ -0,0 +1,39 @@ +# Base RPC types +There exists a few "base" types that many types are built on-top of in `monerod`. +These are also implemented in [`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types/base/index.html). + +For example, many requests include these 2 fields: +```json +{ + "status": "OK", + "untrusted": false, +} +``` +This is [`rpc_response_base`](https://github.com/monero-project/monero/blob/cc73fe71162d564ffda8e549b79a350bca53c454/src/rpc/core_rpc_server_commands_defs.h#L101-L112) in `monerod`, and [`ResponseBase`](https://doc.cuprate.org/cuprate_rpc_types/base/struct.ResponseBase.html) in Cuprate. + +These types are [flattened](https://serde.rs/field-attrs.html#flatten) into other types, i.e. the fields +from these base types are injected into the given type. For example, [`get_block_count`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_block_count)'s response type is defined [like such in Cuprate](https://doc.cuprate.org/cuprate_rpc_types/json/struct.GetBlockCountResponse.html): +```rust +struct GetBlockCountResponse { + // The fields of this `base` type are directly + // injected into `GetBlockCountResponse` during + // (de)serialization. + // + // I.e. it is as if this `base` field were actually these 2 fields: + // status: Status, + // untrusted: bool, + base: ResponseBase, + count: u64, +} +``` +The JSON output of this type would look something like: +```json +{ + "status": "OK", + "untrusted": "false", + "count": 993163 +} +``` + +## RPC payment +`monerod` also contains RPC base types for the [RPC payment](https://doc.cuprate.org/cuprate_rpc_types/base/struct.AccessResponseBase.html) system. Although the RPC payment system [is](https://github.com/monero-project/monero/issues/8722) [pseudo](https://github.com/monero-project/monero/pull/8724) [deprecated](https://github.com/monero-project/monero/pull/8843), `monerod` still generates these fields in responses, and thus, [so does Cuprate](https://doc.cuprate.org/cuprate_rpc_types/base/struct.AccessResponseBase.html). \ No newline at end of file diff --git a/books/architecture/src/rpc/types/binary.md b/books/architecture/src/rpc/types/binary.md deleted file mode 100644 index dea12fa..0000000 --- a/books/architecture/src/rpc/types/binary.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Binary diff --git a/books/architecture/src/rpc/types/deserialization.md b/books/architecture/src/rpc/types/deserialization.md new file mode 100644 index 0000000..736ef24 --- /dev/null +++ b/books/architecture/src/rpc/types/deserialization.md @@ -0,0 +1,38 @@ +# (De)serialization +A crucial responsibility of [`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types) +is to provide the _correct_ (de)serialization of types. + +The input/output of Cuprate's RPC should match `monerod` (as much as practically possible). + +A simple example of this is that [`/get_height`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_height) +should respond with the exact same data for both `monerod` and Cuprate: +```json +{ + "hash": "7e23a28cfa6df925d5b63940baf60b83c0cbb65da95f49b19e7cf0ce7dd709ce", + "height": 2287217, + "status": "OK", + "untrusted": false +} +``` +Behavior would be considered incompatible if any of the following were true: +- Fields are missing +- Extra fields exist +- Field types are incorrect (`string` instead of `number`, etc) + +## JSON +(De)serialization for JSON is implemented using [`serde`](https://docs.rs/serde) and [`serde_json`](https://docs.rs/serde_json). + +[`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) (the main crate responsible +for the actual output) uses `serde_json` for JSON formatting. It is _mostly_ the same formatting as `monerod`, [although there are slight differences](../differences/json-formatting.md). + +Technically, the formatting of the JSON output is not handled by `cuprate-rpc-types`, users are free to choose whatever formatting they desire. + +## Epee +(De)serialization for the [epee binary format](../../formats-protocols-types/epee.md) is +handled by Cuprate's in-house [cuprate-epee-encoding](https://doc.cuprate.org/cuprate_epee_encoding) library. + +## Bitcasted `struct`s +> TODO: + +## Compressed data +> TODO: \ No newline at end of file diff --git a/books/architecture/src/rpc/types/intro.md b/books/architecture/src/rpc/types/intro.md index 22e430c..87b2f6e 100644 --- a/books/architecture/src/rpc/types/intro.md +++ b/books/architecture/src/rpc/types/intro.md @@ -1 +1,31 @@ -# ⚪️ Types +# The types +Cuprate has a crate that defines all the types related to RPC: [`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types). + +The main purpose of this crate is to port the types used in `monerod`'s RPC and to re-implement +(de)serialization for those types, whether that be JSON, `epee`, or a custom mix. + +The bulk majority of these types are [request & response types](macro.md), i.e. the inputs +Cuprate's RPC is expecting from users, and the output it will respond with. + +## Example +To showcase an example of the kinds of types defined in this crate, here is a request type: +```rust +#[serde(transparent)] +#[repr(transparent)] +struct OnGetBlockHashRequest { + block_height: [u64; 1], +} +``` +This is the input (`params`) expected in the [`on_get_block_hash`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#on_get_block_hash) method. + +As seen above, the type itself encodes some properties, such as being (de)serialized [transparently](https://serde.rs/container-attrs.html#transparent), and the input being an array with 1 length, rather than a single `u64`. [This is to match the behavior of `monerod`](https://github.com/monero-project/monero/blob/caa62bc9ea1c5f2ffe3ffa440ad230e1de509bfd/src/rpc/core_rpc_server.cpp#L1826). + +An example JSON form of this type would be: +```json +{ + "jsonrpc": "2.0", + "id": "0", + "method": "on_get_block_hash", + "params": [912345] // <- This can (de)serialize as a `OnGetBlockHashRequest` +} +``` \ No newline at end of file diff --git a/books/architecture/src/rpc/types/json.md b/books/architecture/src/rpc/types/json.md deleted file mode 100644 index 0bf9351..0000000 --- a/books/architecture/src/rpc/types/json.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ JSON diff --git a/books/architecture/src/rpc/types/macro.md b/books/architecture/src/rpc/types/macro.md new file mode 100644 index 0000000..49fb8bc --- /dev/null +++ b/books/architecture/src/rpc/types/macro.md @@ -0,0 +1,16 @@ +# The type generator macro +Request and response types make up the majority of [`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types). + +- Request types are the inputs expected _from_ users +- Response types are what will be outputted _to_ users + +Regardless of being meant for JSON-RPC, binary, or a standalone JSON endpoint, +all request/response types are defined using the ["type generator macro"](https://github.com/Cuprate/cuprate/blob/bd375eae40acfad7c8d0205bb10afd0b78e424d2/rpc/types/src/macros.rs#L46). This macro is important because it defines _all_ request/response types. + +This macro: +- Defines a matching pair of request & response types +- Implements many `derive` traits, e.g. `Clone` on those types +- Implements both `serde` and `epee` on those types +- Automates documentation, tests, etc. + +See [here](https://github.com/Cuprate/cuprate/blob/bd375eae40acfad7c8d0205bb10afd0b78e424d2/rpc/types/src/macros.rs#L46) for example usage of this macro. \ No newline at end of file diff --git a/books/architecture/src/rpc/types/metadata.md b/books/architecture/src/rpc/types/metadata.md new file mode 100644 index 0000000..a9c8c73 --- /dev/null +++ b/books/architecture/src/rpc/types/metadata.md @@ -0,0 +1,13 @@ +# Metadata +[`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types) also provides +some `trait`s to access some metadata surrounding RPC data types. + +For example, [`trait RpcCall`](https://doc.cuprate.org/cuprate_rpc_types/trait.RpcCall.html) +allows accessing whether an RPC request is [`restricted`](https://doc.cuprate.org/cuprate_rpc_types/trait.RpcCall.html#associatedconstant.IS_RESTRICTED) or not. + +`monerod` has a boolean permission system. RPC calls can be restricted or not. +If an RPC call is restricted, it will only be allowed on un-restricted RPC servers (`18081`). +If an RPC call is _not_ restricted, it will be allowed on all RPC server types (`18081` & `18089`). + +This metadata is used in crates that build upon `cuprate-rpc-types`, e.g. +to know if an RPC call should be allowed through or not. \ No newline at end of file diff --git a/books/architecture/src/rpc/types/misc-types.md b/books/architecture/src/rpc/types/misc-types.md new file mode 100644 index 0000000..07bd516 --- /dev/null +++ b/books/architecture/src/rpc/types/misc-types.md @@ -0,0 +1,11 @@ +# Misc types +Other than the main request/response types, this crate is also responsible +for any [miscellaneous types](https://doc.cuprate.org/cuprate_rpc_types/misc) used within `monerod`'s RPC. + +For example, the `status` field within many RPC responses is defined within +[`cuprate-rpc-types`](https://doc.cuprate.org/cuprate_rpc_types/misc/enum.Status.html). + +Types that aren't requests/responses but exist _within_ request/response +types are also defined in this crate, such as the +[`Distribution`](https://doc.cuprate.org/cuprate_rpc_types/misc/enum.Distribution.html) +structure returned from the [`get_output_distribution`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_output_distribution) method. \ No newline at end of file diff --git a/books/architecture/src/rpc/types/other.md b/books/architecture/src/rpc/types/other.md deleted file mode 100644 index 49a36cc..0000000 --- a/books/architecture/src/rpc/types/other.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Other From be2f3f2672b7c8e7af52f2d84ca162f1a7958708 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Thu, 8 Aug 2024 23:56:13 +0000 Subject: [PATCH 034/104] Consensus: move more types to `types` (#250) * move `HardFork` to `types` * fmt * fix tests & doc * fmt * fix clippy * move transaction verification data * misc fixes * doc fixes * update README.md * review fixes --- Cargo.lock | 4 + consensus/fast-sync/src/fast_sync.rs | 4 +- consensus/rules/Cargo.toml | 3 +- consensus/rules/src/blocks.rs | 9 +- consensus/rules/src/hard_forks.rs | 154 +++------------------ consensus/rules/src/lib.rs | 2 +- consensus/rules/src/miner_tx.rs | 4 +- consensus/rules/src/transactions.rs | 32 +---- consensus/src/block.rs | 14 +- consensus/src/block/alt_block.rs | 6 +- consensus/src/block/batch_prepare.rs | 4 +- consensus/src/block/free.rs | 4 +- consensus/src/context/hardforks.rs | 3 +- consensus/src/tests/mock_db.rs | 4 +- consensus/src/transactions.rs | 135 ++++++------------ consensus/src/transactions/free.rs | 33 ++++- storage/blockchain/src/ops/block.rs | 8 +- types/Cargo.toml | 5 + types/README.md | 1 + types/src/hard_fork.rs | 131 ++++++++++++++++++ types/src/lib.rs | 7 +- types/src/transaction_verification_data.rs | 94 +++++++++++++ types/src/types.rs | 8 +- 23 files changed, 381 insertions(+), 288 deletions(-) create mode 100644 types/src/hard_fork.rs create mode 100644 types/src/transaction_verification_data.rs diff --git a/Cargo.lock b/Cargo.lock index c35deec..3945896 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -573,6 +573,7 @@ dependencies = [ "crypto-bigint", "cuprate-cryptonight", "cuprate-helper", + "cuprate-types", "curve25519-dalek", "hex", "hex-literal", @@ -860,7 +861,10 @@ dependencies = [ "cuprate-fixed-bytes", "curve25519-dalek", "monero-serai", + "proptest", + "proptest-derive", "serde", + "thiserror", ] [[package]] diff --git a/consensus/fast-sync/src/fast_sync.rs b/consensus/fast-sync/src/fast_sync.rs index b42ae64..35fa674 100644 --- a/consensus/fast-sync/src/fast_sync.rs +++ b/consensus/fast-sync/src/fast_sync.rs @@ -16,7 +16,7 @@ use tower::{Service, ServiceExt}; use cuprate_consensus::{ context::{BlockChainContextRequest, BlockChainContextResponse}, - transactions::TransactionVerificationData, + transactions::new_tx_verification_data, }; use cuprate_consensus_rules::{miner_tx::MinerTxError, ConsensusError}; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; @@ -257,7 +257,7 @@ where .remove(tx) .ok_or(FastSyncError::TxsIncludedWithBlockIncorrect)?; - let data = TransactionVerificationData::new(tx)?; + let data = new_tx_verification_data(tx)?; verified_txs.push(VerifiedTransactionInformation { tx_blob: data.tx_blob, tx_weight: data.tx_weight, diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index 311bcc9..2cf03e3 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -7,11 +7,12 @@ authors = ["Boog900"] [features] default = [] -proptest = ["dep:proptest", "dep:proptest-derive"] +proptest = ["dep:proptest", "dep:proptest-derive", "cuprate-types/proptest"] rayon = ["dep:rayon"] [dependencies] cuprate-helper = { path = "../../helper", default-features = false, features = ["std"] } +cuprate-types = { path = "../../types", default-features = false } cuprate-cryptonight = {path = "../../cryptonight"} monero-serai = { workspace = true, features = ["std"] } diff --git a/consensus/rules/src/blocks.rs b/consensus/rules/src/blocks.rs index c36f68b..e118e9a 100644 --- a/consensus/rules/src/blocks.rs +++ b/consensus/rules/src/blocks.rs @@ -6,7 +6,7 @@ use monero_serai::block::Block; use cuprate_cryptonight::*; use crate::{ - current_unix_timestamp, + check_block_version_vote, current_unix_timestamp, hard_forks::HardForkError, miner_tx::{check_miner_tx, MinerTxError}, HardFork, @@ -249,11 +249,10 @@ pub fn check_block( block_blob_len: usize, block_chain_ctx: &ContextToVerifyBlock, ) -> Result<(HardFork, u64), BlockError> { - let (version, vote) = HardFork::from_block_header(&block.header)?; + let (version, vote) = + HardFork::from_block_header(&block.header).map_err(|_| HardForkError::HardForkUnknown)?; - block_chain_ctx - .current_hf - .check_block_version_vote(&version, &vote)?; + check_block_version_vote(&block_chain_ctx.current_hf, &version, &vote)?; if let Some(median_timestamp) = block_chain_ctx.median_block_timestamp { check_timestamp(block, median_timestamp)?; diff --git a/consensus/rules/src/hard_forks.rs b/consensus/rules/src/hard_forks.rs index 6b98314..4f786e4 100644 --- a/consensus/rules/src/hard_forks.rs +++ b/consensus/rules/src/hard_forks.rs @@ -1,40 +1,37 @@ //! # Hard-Forks //! -//! Monero use hard-forks to update it's protocol, this module contains a [`HardFork`] enum which is -//! an identifier for every current hard-fork. -//! -//! This module also contains a [`HFVotes`] struct which keeps track of current blockchain voting, and -//! has a method [`HFVotes::current_fork`] to check if the next hard-fork should be activated. -//! -use monero_serai::block::BlockHeader; +//! Monero use hard-forks to update it's protocol, this module contains a [`HFVotes`] struct which +//! keeps track of current blockchain voting, and has a method [`HFVotes::current_fork`] to check +//! if the next hard-fork should be activated. use std::{ collections::VecDeque, fmt::{Display, Formatter}, - time::Duration, }; +pub use cuprate_types::{HardFork, HardForkError}; + #[cfg(test)] mod tests; -/// Target block time for hf 1. -/// -/// ref: -const BLOCK_TIME_V1: Duration = Duration::from_secs(60); -/// Target block time from v2. -/// -/// ref: -const BLOCK_TIME_V2: Duration = Duration::from_secs(120); - pub const NUMB_OF_HARD_FORKS: usize = 16; -#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)] -pub enum HardForkError { - #[error("The hard-fork is unknown")] - HardForkUnknown, - #[error("The block is on an incorrect hard-fork")] - VersionIncorrect, - #[error("The block's vote is for a previous hard-fork")] - VoteTooLow, +/// Checks a blocks version and vote, assuming that `hf` is the current hard-fork. +/// +/// ref: +pub fn check_block_version_vote( + hf: &HardFork, + version: &HardFork, + vote: &HardFork, +) -> Result<(), HardForkError> { + // self = current hf + if hf != version { + Err(HardForkError::VersionIncorrect)?; + } + if hf > vote { + Err(HardForkError::VoteTooLow)?; + } + + Ok(()) } /// Information about a given hard-fork. @@ -135,113 +132,6 @@ impl HFsInfo { } } -/// An identifier for every hard-fork Monero has had. -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] -#[cfg_attr(any(feature = "proptest", test), derive(proptest_derive::Arbitrary))] -#[repr(u8)] -pub enum HardFork { - V1 = 1, - V2, - V3, - V4, - V5, - V6, - V7, - V8, - V9, - V10, - V11, - V12, - V13, - V14, - V15, - // remember to update from_vote! - V16, -} - -impl HardFork { - /// Returns the hard-fork for a blocks `major_version` field. - /// - /// - #[inline] - pub fn from_version(version: u8) -> Result { - Ok(match version { - 1 => HardFork::V1, - 2 => HardFork::V2, - 3 => HardFork::V3, - 4 => HardFork::V4, - 5 => HardFork::V5, - 6 => HardFork::V6, - 7 => HardFork::V7, - 8 => HardFork::V8, - 9 => HardFork::V9, - 10 => HardFork::V10, - 11 => HardFork::V11, - 12 => HardFork::V12, - 13 => HardFork::V13, - 14 => HardFork::V14, - 15 => HardFork::V15, - 16 => HardFork::V16, - _ => return Err(HardForkError::HardForkUnknown), - }) - } - - /// Returns the hard-fork for a blocks `minor_version` (vote) field. - /// - /// - #[inline] - pub fn from_vote(vote: u8) -> HardFork { - if vote == 0 { - // A vote of 0 is interpreted as 1 as that's what Monero used to default to. - return HardFork::V1; - } - // This must default to the latest hard-fork! - Self::from_version(vote).unwrap_or(HardFork::V16) - } - - #[inline] - pub fn from_block_header(header: &BlockHeader) -> Result<(HardFork, HardFork), HardForkError> { - Ok(( - HardFork::from_version(header.hardfork_version)?, - HardFork::from_vote(header.hardfork_signal), - )) - } - - /// Returns the next hard-fork. - pub fn next_fork(&self) -> Option { - HardFork::from_version(*self as u8 + 1).ok() - } - - /// Returns the target block time for this hardfork. - /// - /// ref: - pub fn block_time(&self) -> Duration { - match self { - HardFork::V1 => BLOCK_TIME_V1, - _ => BLOCK_TIME_V2, - } - } - - /// Checks a blocks version and vote, assuming that `self` is the current hard-fork. - /// - /// ref: - pub fn check_block_version_vote( - &self, - version: &HardFork, - vote: &HardFork, - ) -> Result<(), HardForkError> { - // self = current hf - if self != version { - Err(HardForkError::VersionIncorrect)?; - } - if self > vote { - Err(HardForkError::VoteTooLow)?; - } - - Ok(()) - } -} - /// A struct holding the current voting state of the blockchain. #[derive(Debug, Clone, Eq, PartialEq)] pub struct HFVotes { diff --git a/consensus/rules/src/lib.rs b/consensus/rules/src/lib.rs index 3106cbb..a5f8800 100644 --- a/consensus/rules/src/lib.rs +++ b/consensus/rules/src/lib.rs @@ -9,7 +9,7 @@ pub mod miner_tx; pub mod transactions; pub use decomposed_amount::is_decomposed_amount; -pub use hard_forks::{HFVotes, HFsInfo, HardFork}; +pub use hard_forks::{check_block_version_vote, HFVotes, HFsInfo, HardFork}; pub use transactions::TxVersion; #[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)] diff --git a/consensus/rules/src/miner_tx.rs b/consensus/rules/src/miner_tx.rs index e4927e3..663c95e 100644 --- a/consensus/rules/src/miner_tx.rs +++ b/consensus/rules/src/miner_tx.rs @@ -1,6 +1,8 @@ use monero_serai::transaction::{Input, Output, Timelock, Transaction}; -use crate::{is_decomposed_amount, transactions::check_output_types, HardFork, TxVersion}; +use cuprate_types::TxVersion; + +use crate::{is_decomposed_amount, transactions::check_output_types, HardFork}; #[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)] pub enum MinerTxError { diff --git a/consensus/rules/src/transactions.rs b/consensus/rules/src/transactions.rs index 5a0676b..9c6ad51 100644 --- a/consensus/rules/src/transactions.rs +++ b/consensus/rules/src/transactions.rs @@ -1,8 +1,11 @@ use std::cmp::Ordering; -use monero_serai::ringct::RctType; +use monero_serai::{ + ringct::RctType, + transaction::{Input, Output, Timelock, Transaction}, +}; -use monero_serai::transaction::{Input, Output, Timelock, Transaction}; +pub use cuprate_types::TxVersion; use crate::{ batch_verifier::BatchVerifier, blocks::penalty_free_zone, check_point_canonically_encoded, @@ -75,31 +78,6 @@ pub enum TransactionError { RingCTError(#[from] RingCTError), } -/// An enum representing all valid Monero transaction versions. -#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] -pub enum TxVersion { - /// Legacy ring signatures. - RingSignatures, - /// RingCT - RingCT, -} - -impl TxVersion { - /// Converts a `raw` version value to a [`TxVersion`]. - /// - /// This will return `None` on invalid values. - /// - /// ref: - /// && - pub fn from_raw(version: u8) -> Option { - Some(match version { - 1 => TxVersion::RingSignatures, - 2 => TxVersion::RingCT, - _ => return None, - }) - } -} - //----------------------------------------------------------------------------------------------------------- OUTPUTS /// Checks the output keys are canonically encoded points. diff --git a/consensus/src/block.rs b/consensus/src/block.rs index f5aac5e..e785a6b 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -16,20 +16,22 @@ use tower::{Service, ServiceExt}; use cuprate_helper::asynch::rayon_spawn_async; use cuprate_types::{ - AltBlockInformation, VerifiedBlockInformation, VerifiedTransactionInformation, + AltBlockInformation, TransactionVerificationData, VerifiedBlockInformation, + VerifiedTransactionInformation, }; use cuprate_consensus_rules::{ blocks::{ calculate_pow_hash, check_block, check_block_pow, randomx_seed_height, BlockError, RandomX, }, + hard_forks::HardForkError, miner_tx::MinerTxError, ConsensusError, HardFork, }; use crate::{ context::{BlockChainContextRequest, BlockChainContextResponse, RawBlockChainContext}, - transactions::{TransactionVerificationData, VerifyTxRequest, VerifyTxResponse}, + transactions::{VerifyTxRequest, VerifyTxResponse}, Database, ExtendedConsensusError, }; @@ -71,8 +73,8 @@ impl PreparedBlockExPow { /// - Hard-fork values are invalid /// - Miner transaction is missing a miner input pub fn new(block: Block) -> Result { - let (hf_version, hf_vote) = - HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?; + let (hf_version, hf_vote) = HardFork::from_block_header(&block.header) + .map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?; let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else { Err(ConsensusError::Block(BlockError::MinerTxError( @@ -125,8 +127,8 @@ impl PreparedBlock { block: Block, randomx_vm: Option<&R>, ) -> Result { - let (hf_version, hf_vote) = - HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?; + let (hf_version, hf_vote) = HardFork::from_block_header(&block.header) + .map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?; let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { Err(ConsensusError::Block(BlockError::MinerTxError( diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs index 8944083..513697e 100644 --- a/consensus/src/block/alt_block.rs +++ b/consensus/src/block/alt_block.rs @@ -15,7 +15,10 @@ use cuprate_consensus_rules::{ ConsensusError, }; use cuprate_helper::asynch::rayon_spawn_async; -use cuprate_types::{AltBlockInformation, Chain, ChainId, VerifiedTransactionInformation}; +use cuprate_types::{ + AltBlockInformation, Chain, ChainId, TransactionVerificationData, + VerifiedTransactionInformation, +}; use crate::{ block::{free::pull_ordered_transactions, PreparedBlock}, @@ -25,7 +28,6 @@ use crate::{ weight::{self, BlockWeightsCache}, AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, }, - transactions::TransactionVerificationData, BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, VerifyBlockResponse, }; diff --git a/consensus/src/block/batch_prepare.rs b/consensus/src/block/batch_prepare.rs index 64d1ccb..9974d6d 100644 --- a/consensus/src/block/batch_prepare.rs +++ b/consensus/src/block/batch_prepare.rs @@ -16,7 +16,7 @@ use cuprate_helper::asynch::rayon_spawn_async; use crate::{ block::{free::pull_ordered_transactions, PreparedBlock, PreparedBlockExPow}, context::rx_vms::RandomXVM, - transactions::TransactionVerificationData, + transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, VerifyBlockResponse, }; @@ -185,7 +185,7 @@ where let txs = txs .into_par_iter() .map(|tx| { - let tx = TransactionVerificationData::new(tx)?; + let tx = new_tx_verification_data(tx)?; Ok::<_, ConsensusError>((tx.tx_hash, tx)) }) .collect::, _>>()?; diff --git a/consensus/src/block/free.rs b/consensus/src/block/free.rs index 46698e5..e122374 100644 --- a/consensus/src/block/free.rs +++ b/consensus/src/block/free.rs @@ -3,7 +3,9 @@ use std::collections::HashMap; use monero_serai::block::Block; -use crate::{transactions::TransactionVerificationData, ExtendedConsensusError}; +use cuprate_types::TransactionVerificationData; + +use crate::ExtendedConsensusError; /// Returns a list of transactions, pulled from `txs` in the order they are in the [`Block`]. /// diff --git a/consensus/src/context/hardforks.rs b/consensus/src/context/hardforks.rs index 057e1c3..682933d 100644 --- a/consensus/src/context/hardforks.rs +++ b/consensus/src/context/hardforks.rs @@ -95,8 +95,7 @@ impl HardForkState { panic!("Database sent incorrect response!"); }; - let current_hardfork = - HardFork::from_version(ext_header.version).expect("Stored block has invalid hardfork"); + let current_hardfork = ext_header.version; let mut hfs = HardForkState { config, diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index b138378..a260cf0 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -61,8 +61,8 @@ pub struct DummyBlockExtendedHeader { impl From for ExtendedBlockHeader { fn from(value: DummyBlockExtendedHeader) -> Self { ExtendedBlockHeader { - version: value.version.unwrap_or(HardFork::V1) as u8, - vote: value.vote.unwrap_or(HardFork::V1) as u8, + version: value.version.unwrap_or(HardFork::V1), + vote: value.vote.unwrap_or(HardFork::V1).as_u8(), timestamp: value.timestamp.unwrap_or_default(), cumulative_difficulty: value.cumulative_difficulty.unwrap_or_default(), block_weight: value.block_weight.unwrap_or_default(), diff --git a/consensus/src/transactions.rs b/consensus/src/transactions.rs index 978407e..91de67c 100644 --- a/consensus/src/transactions.rs +++ b/consensus/src/transactions.rs @@ -7,7 +7,7 @@ use std::{ future::Future, ops::Deref, pin::Pin, - sync::{Arc, Mutex as StdMutex}, + sync::Arc, task::{Context, Poll}, }; @@ -22,10 +22,13 @@ use cuprate_consensus_rules::{ check_decoy_info, check_transaction_contextual, check_transaction_semantic, output_unlocked, TransactionError, }, - ConsensusError, HardFork, TxVersion, + ConsensusError, HardFork, }; use cuprate_helper::asynch::rayon_spawn_async; -use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse}; +use cuprate_types::{ + blockchain::{BlockchainReadRequest, BlockchainResponse}, + CachedVerificationState, TransactionVerificationData, TxVersion, +}; use crate::{ batch_verifier::MultiThreadedBatchVerifier, @@ -36,6 +39,8 @@ use crate::{ pub mod contextual_data; mod free; +pub use free::new_tx_verification_data; + /// A struct representing the type of validation that needs to be completed for this transaction. #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum VerificationNeeded { @@ -45,79 +50,6 @@ enum VerificationNeeded { Contextual, } -/// Represents if a transaction has been fully validated and under what conditions -/// the transaction is valid in the future. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum CachedVerificationState { - /// The transaction has not been validated. - NotVerified, - /// The transaction is valid* if the block represented by this hash is in the blockchain and the [`HardFork`] - /// is the same. - /// - /// *V1 transactions require checks on their ring-length even if this hash is in the blockchain. - ValidAtHashAndHF([u8; 32], HardFork), - /// The transaction is valid* if the block represented by this hash is in the blockchain _and_ this - /// given time lock is unlocked. The time lock here will represent the youngest used time based lock - /// (If the transaction uses any time based time locks). This is because time locks are not monotonic - /// so unlocked outputs could become re-locked. - /// - /// *V1 transactions require checks on their ring-length even if this hash is in the blockchain. - ValidAtHashAndHFWithTimeBasedLock([u8; 32], HardFork, Timelock), -} - -impl CachedVerificationState { - /// Returns the block hash this is valid for if in state [`CachedVerificationState::ValidAtHashAndHF`] or [`CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock`]. - fn verified_at_block_hash(&self) -> Option<[u8; 32]> { - match self { - CachedVerificationState::NotVerified => None, - CachedVerificationState::ValidAtHashAndHF(hash, _) - | CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(hash, _, _) => Some(*hash), - } - } -} - -/// Data needed to verify a transaction. -#[derive(Debug)] -pub struct TransactionVerificationData { - /// The transaction we are verifying - pub tx: Transaction, - /// The [`TxVersion`] of this tx. - pub version: TxVersion, - /// The serialised transaction. - pub tx_blob: Vec, - /// The weight of the transaction. - pub tx_weight: usize, - /// The fee this transaction has paid. - pub fee: u64, - /// The hash of this transaction. - pub tx_hash: [u8; 32], - /// The verification state of this transaction. - pub cached_verification_state: StdMutex, -} - -impl TransactionVerificationData { - /// Creates a new [`TransactionVerificationData`] from the given [`Transaction`]. - pub fn new(tx: Transaction) -> Result { - let tx_hash = tx.hash(); - let tx_blob = tx.serialize(); - - let tx_weight = free::tx_weight(&tx, &tx_blob); - - let fee = free::tx_fee(&tx)?; - - Ok(TransactionVerificationData { - tx_hash, - tx_blob, - tx_weight, - fee, - cached_verification_state: StdMutex::new(CachedVerificationState::NotVerified), - version: TxVersion::from_raw(tx.version()) - .ok_or(TransactionError::TransactionVersionInvalid)?, - tx, - }) - } -} - /// A request to verify a transaction. pub enum VerifyTxRequest { /// Verifies a batch of prepared txs. @@ -252,7 +184,7 @@ where tracing::debug!(parent: &span, "prepping transactions for verification."); let txs = rayon_spawn_async(|| { txs.into_par_iter() - .map(|tx| TransactionVerificationData::new(tx).map(Arc::new)) + .map(|tx| new_tx_verification_data(tx).map(Arc::new)) .collect::, _>>() }) .await?; @@ -399,7 +331,7 @@ fn transactions_needing_verification( .push((tx.clone(), VerificationNeeded::SemanticAndContextual)); continue; } - CachedVerificationState::ValidAtHashAndHF(hash, hf) => { + CachedVerificationState::ValidAtHashAndHF { block_hash, hf } => { if current_hf != hf { drop(guard); full_validation_transactions @@ -407,13 +339,17 @@ fn transactions_needing_verification( continue; } - if !hashes_in_main_chain.contains(hash) { + if !hashes_in_main_chain.contains(block_hash) { drop(guard); full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual)); continue; } } - CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock(hash, hf, lock) => { + CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock { + block_hash, + hf, + time_lock, + } => { if current_hf != hf { drop(guard); full_validation_transactions @@ -421,14 +357,14 @@ fn transactions_needing_verification( continue; } - if !hashes_in_main_chain.contains(hash) { + if !hashes_in_main_chain.contains(block_hash) { drop(guard); full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual)); continue; } // If the time lock is still locked then the transaction is invalid. - if !output_unlocked(lock, current_chain_height, time_for_time_lock, hf) { + if !output_unlocked(time_lock, current_chain_height, time_for_time_lock, hf) { return Err(ConsensusError::Transaction( TransactionError::OneOrMoreRingMembersLocked, )); @@ -517,10 +453,15 @@ where txs.iter() .zip(txs_ring_member_info) .for_each(|((tx, _), ring)| { - if ring.time_locked_outs.is_empty() { - *tx.cached_verification_state.lock().unwrap() = - CachedVerificationState::ValidAtHashAndHF(top_hash, hf); + *tx.cached_verification_state.lock().unwrap() = if ring.time_locked_outs.is_empty() + { + // no outputs with time-locks used. + CachedVerificationState::ValidAtHashAndHF { + block_hash: top_hash, + hf, + } } else { + // an output with a time-lock was used, check if it was time-based. let youngest_timebased_lock = ring .time_locked_outs .iter() @@ -530,16 +471,20 @@ where }) .min(); - *tx.cached_verification_state.lock().unwrap() = - if let Some(time) = youngest_timebased_lock { - CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock( - top_hash, - hf, - Timelock::Time(time), - ) - } else { - CachedVerificationState::ValidAtHashAndHF(top_hash, hf) - }; + if let Some(time) = youngest_timebased_lock { + // time-based lock used. + CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock { + block_hash: top_hash, + hf, + time_lock: Timelock::Time(time), + } + } else { + // no time-based locked output was used. + CachedVerificationState::ValidAtHashAndHF { + block_hash: top_hash, + hf, + } + } } }); diff --git a/consensus/src/transactions/free.rs b/consensus/src/transactions/free.rs index 5ffd16e..02c5235 100644 --- a/consensus/src/transactions/free.rs +++ b/consensus/src/transactions/free.rs @@ -1,9 +1,40 @@ +use std::sync::Mutex as StdMutex; + use monero_serai::{ ringct::{bulletproofs::Bulletproof, RctType}, transaction::{Input, Transaction}, }; -use cuprate_consensus_rules::transactions::TransactionError; +use cuprate_consensus_rules::{transactions::TransactionError, ConsensusError}; +use cuprate_types::{CachedVerificationState, TransactionVerificationData, TxVersion}; + +/// Creates a new [`TransactionVerificationData`] from a [`Transaction`]. +/// +/// # Errors +/// +/// This function will return [`Err`] if the transaction is malformed, although returning [`Ok`] does +/// not necessarily mean the tx is correctly formed. +pub fn new_tx_verification_data( + tx: Transaction, +) -> Result { + let tx_hash = tx.hash(); + let tx_blob = tx.serialize(); + + let tx_weight = tx_weight(&tx, &tx_blob); + + let fee = tx_fee(&tx)?; + + Ok(TransactionVerificationData { + tx_hash, + version: TxVersion::from_raw(tx.version()) + .ok_or(TransactionError::TransactionVersionInvalid)?, + tx_blob, + tx_weight, + fee, + cached_verification_state: StdMutex::new(CachedVerificationState::NotVerified), + tx, + }) +} /// Calculates the weight of a [`Transaction`]. /// diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 4d358f4..de955c8 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -8,7 +8,7 @@ use cuprate_database::{ RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, }; use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}; -use cuprate_types::{ExtendedBlockHeader, VerifiedBlockInformation}; +use cuprate_types::{ExtendedBlockHeader, HardFork, VerifiedBlockInformation}; use crate::{ ops::{ @@ -182,6 +182,7 @@ pub fn get_block_extended_header( /// Same as [`get_block_extended_header`] but with a [`BlockHeight`]. #[doc = doc_error!()] +#[allow(clippy::missing_panics_doc)] // The panic is only possible with a corrupt DB #[inline] pub fn get_block_extended_header_from_height( block_height: &BlockHeight, @@ -200,7 +201,8 @@ pub fn get_block_extended_header_from_height( #[allow(clippy::cast_possible_truncation)] Ok(ExtendedBlockHeader { cumulative_difficulty, - version: block.header.hardfork_version, + version: HardFork::from_version(block.header.hardfork_version) + .expect("Stored block must have a valid hard-fork"), vote: block.header.hardfork_signal, timestamp: block.header.timestamp, block_weight: block_info.weight as usize, @@ -369,7 +371,7 @@ mod test { let b1 = block_header_from_hash; let b2 = block; assert_eq!(b1, block_header_from_height); - assert_eq!(b1.version, b2.block.header.hardfork_version); + assert_eq!(b1.version.as_u8(), b2.block.header.hardfork_version); assert_eq!(b1.vote, b2.block.header.hardfork_signal); assert_eq!(b1.timestamp, b2.block.header.timestamp); assert_eq!(b1.cumulative_difficulty, b2.cumulative_difficulty); diff --git a/types/Cargo.toml b/types/Cargo.toml index 99fa978..4c31cfc 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -13,6 +13,7 @@ default = ["blockchain", "epee", "serde"] blockchain = [] epee = ["dep:cuprate-epee-encoding"] serde = ["dep:serde"] +proptest = ["dep:proptest", "dep:proptest-derive"] [dependencies] cuprate-epee-encoding = { path = "../net/epee-encoding", optional = true } @@ -23,5 +24,9 @@ curve25519-dalek = { workspace = true } monero-serai = { workspace = true } serde = { workspace = true, features = ["derive"], optional = true } borsh = { workspace = true, optional = true } +thiserror = { workspace = true } + +proptest = { workspace = true, optional = true } +proptest-derive = { workspace = true, optional = true } [dev-dependencies] \ No newline at end of file diff --git a/types/README.md b/types/README.md index 876931f..6dd2388 100644 --- a/types/README.md +++ b/types/README.md @@ -9,3 +9,4 @@ This crate is a kitchen-sink for data types that are shared across Cuprate. | `blockchain` | Enables the `blockchain` module, containing the blockchain database request/response types | `serde` | Enables `serde` on types where applicable | `epee` | Enables `cuprate-epee-encoding` on types where applicable +| `proptest` | Enables `proptest::arbitrary::Arbitrary` on some types diff --git a/types/src/hard_fork.rs b/types/src/hard_fork.rs new file mode 100644 index 0000000..412448e --- /dev/null +++ b/types/src/hard_fork.rs @@ -0,0 +1,131 @@ +//! The [`HardFork`] type. +use std::time::Duration; + +use monero_serai::block::BlockHeader; + +/// Target block time for hf 1. +/// +/// ref: +const BLOCK_TIME_V1: Duration = Duration::from_secs(60); +/// Target block time from v2. +/// +/// ref: +const BLOCK_TIME_V2: Duration = Duration::from_secs(120); + +/// An error working with a [`HardFork`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, thiserror::Error)] +pub enum HardForkError { + /// The raw-HF value is not a valid [`HardFork`]. + #[error("The hard-fork is unknown")] + HardForkUnknown, + /// The [`HardFork`] version is incorrect. + #[error("The block is on an incorrect hard-fork")] + VersionIncorrect, + /// The block's [`HardFork`] vote was below the current [`HardFork`]. + #[error("The block's vote is for a previous hard-fork")] + VoteTooLow, +} + +/// An identifier for every hard-fork Monero has had. +#[allow(missing_docs)] +#[derive(Default, Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Hash)] +#[cfg_attr(any(feature = "proptest"), derive(proptest_derive::Arbitrary))] +#[repr(u8)] +pub enum HardFork { + #[default] + V1 = 1, + V2, + V3, + V4, + V5, + V6, + V7, + V8, + V9, + V10, + V11, + V12, + V13, + V14, + V15, + // remember to update from_vote! + V16, +} + +impl HardFork { + /// Returns the hard-fork for a blocks [`BlockHeader::hardfork_version`] field. + /// + /// ref: + /// + /// # Errors + /// + /// Will return [`Err`] if the version is not a valid [`HardFork`]. + #[inline] + pub const fn from_version(version: u8) -> Result { + Ok(match version { + 1 => Self::V1, + 2 => Self::V2, + 3 => Self::V3, + 4 => Self::V4, + 5 => Self::V5, + 6 => Self::V6, + 7 => Self::V7, + 8 => Self::V8, + 9 => Self::V9, + 10 => Self::V10, + 11 => Self::V11, + 12 => Self::V12, + 13 => Self::V13, + 14 => Self::V14, + 15 => Self::V15, + 16 => Self::V16, + _ => return Err(HardForkError::HardForkUnknown), + }) + } + + /// Returns the hard-fork for a blocks [`BlockHeader::hardfork_signal`] (vote) field. + /// + /// + #[inline] + pub fn from_vote(vote: u8) -> Self { + if vote == 0 { + // A vote of 0 is interpreted as 1 as that's what Monero used to default to. + return Self::V1; + } + // This must default to the latest hard-fork! + Self::from_version(vote).unwrap_or(Self::V16) + } + + /// Returns the [`HardFork`] version and vote from this block header. + /// + /// # Errors + /// + /// Will return [`Err`] if the [`BlockHeader::hardfork_version`] is not a valid [`HardFork`]. + #[inline] + pub fn from_block_header(header: &BlockHeader) -> Result<(Self, Self), HardForkError> { + Ok(( + Self::from_version(header.hardfork_version)?, + Self::from_vote(header.hardfork_signal), + )) + } + + /// Returns the raw hard-fork value, as it would appear in [`BlockHeader::hardfork_version`]. + pub const fn as_u8(&self) -> u8 { + *self as u8 + } + + /// Returns the next hard-fork. + pub fn next_fork(&self) -> Option { + Self::from_version(*self as u8 + 1).ok() + } + + /// Returns the target block time for this hardfork. + /// + /// ref: + pub const fn block_time(&self) -> Duration { + match self { + Self::V1 => BLOCK_TIME_V1, + _ => BLOCK_TIME_V2, + } + } +} diff --git a/types/src/lib.rs b/types/src/lib.rs index bcf6a45..d70f4c3 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -80,9 +80,15 @@ // Documentation for each module is located in the respective file. mod block_complete_entry; +mod hard_fork; +mod transaction_verification_data; mod types; pub use block_complete_entry::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs}; +pub use hard_fork::{HardFork, HardForkError}; +pub use transaction_verification_data::{ + CachedVerificationState, TransactionVerificationData, TxVersion, +}; pub use types::{ AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation, VerifiedTransactionInformation, @@ -91,5 +97,4 @@ pub use types::{ //---------------------------------------------------------------------------------------------------- Feature-gated #[cfg(feature = "blockchain")] pub mod blockchain; - //---------------------------------------------------------------------------------------------------- Private diff --git a/types/src/transaction_verification_data.rs b/types/src/transaction_verification_data.rs new file mode 100644 index 0000000..68e17b8 --- /dev/null +++ b/types/src/transaction_verification_data.rs @@ -0,0 +1,94 @@ +//! Contains [`TransactionVerificationData`] and the related types. + +use std::sync::Mutex; + +use monero_serai::transaction::{Timelock, Transaction}; + +use crate::HardFork; + +/// An enum representing all valid Monero transaction versions. +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] +pub enum TxVersion { + /// Legacy ring signatures. + RingSignatures, + /// Ring-CT + RingCT, +} + +impl TxVersion { + /// Converts a `raw` version value to a [`TxVersion`]. + /// + /// This will return `None` on invalid values. + /// + /// ref: + /// && + pub const fn from_raw(version: u8) -> Option { + Some(match version { + 1 => Self::RingSignatures, + 2 => Self::RingCT, + _ => return None, + }) + } +} + +/// Represents if a transaction has been fully validated and under what conditions +/// the transaction is valid in the future. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum CachedVerificationState { + /// The transaction has not been validated. + NotVerified, + /// The transaction is valid* if the block represented by this hash is in the blockchain and the [`HardFork`] + /// is the same. + /// + /// *V1 transactions require checks on their ring-length even if this hash is in the blockchain. + ValidAtHashAndHF { + /// The block hash that was in the chain when this transaction was validated. + block_hash: [u8; 32], + /// The hf this transaction was validated against. + hf: HardFork, + }, + /// The transaction is valid* if the block represented by this hash is in the blockchain _and_ this + /// given time lock is unlocked. The time lock here will represent the youngest used time based lock + /// (If the transaction uses any time based time locks). This is because time locks are not monotonic + /// so unlocked outputs could become re-locked. + /// + /// *V1 transactions require checks on their ring-length even if this hash is in the blockchain. + ValidAtHashAndHFWithTimeBasedLock { + /// The block hash that was in the chain when this transaction was validated. + block_hash: [u8; 32], + /// The hf this transaction was validated against. + hf: HardFork, + /// The youngest used time based lock. + time_lock: Timelock, + }, +} + +impl CachedVerificationState { + /// Returns the block hash this is valid for if in state [`CachedVerificationState::ValidAtHashAndHF`] or [`CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock`]. + pub const fn verified_at_block_hash(&self) -> Option<[u8; 32]> { + match self { + Self::NotVerified => None, + Self::ValidAtHashAndHF { block_hash, .. } + | Self::ValidAtHashAndHFWithTimeBasedLock { block_hash, .. } => Some(*block_hash), + } + } +} + +/// Data needed to verify a transaction. +#[derive(Debug)] +pub struct TransactionVerificationData { + /// The transaction we are verifying + pub tx: Transaction, + /// The [`TxVersion`] of this tx. + pub version: TxVersion, + /// The serialised transaction. + pub tx_blob: Vec, + /// The weight of the transaction. + pub tx_weight: usize, + /// The fee this transaction has paid. + pub fee: u64, + /// The hash of this transaction. + pub tx_hash: [u8; 32], + /// The verification state of this transaction. + pub cached_verification_state: Mutex, +} diff --git a/types/src/types.rs b/types/src/types.rs index a4a7135..4b6e2e1 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -7,6 +7,8 @@ use monero_serai::{ transaction::{Timelock, Transaction}, }; +use crate::HardFork; + //---------------------------------------------------------------------------------------------------- ExtendedBlockHeader /// Extended header data of a block. /// @@ -15,13 +17,11 @@ use monero_serai::{ pub struct ExtendedBlockHeader { /// The block's major version. /// - /// This can also be represented with `cuprate_consensus::HardFork`. - /// /// This is the same value as [`monero_serai::block::BlockHeader::hardfork_version`]. - pub version: u8, + pub version: HardFork, /// The block's hard-fork vote. /// - /// This can also be represented with `cuprate_consensus::HardFork`. + /// This can't be represented with [`HardFork`] as raw-votes can be out of the range of [`HardFork`]s. /// /// This is the same value as [`monero_serai::block::BlockHeader::hardfork_signal`]. pub vote: u8, From 0041650fd18c1a38ba7c578fa318d868595a81fc Mon Sep 17 00:00:00 2001 From: Boog900 Date: Thu, 8 Aug 2024 23:56:41 +0000 Subject: [PATCH 035/104] books: change some definitions (#251) update some definitions --- books/protocol/src/consensus_rules.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/books/protocol/src/consensus_rules.md b/books/protocol/src/consensus_rules.md index b06e4a8..e56e148 100644 --- a/books/protocol/src/consensus_rules.md +++ b/books/protocol/src/consensus_rules.md @@ -24,11 +24,12 @@ an Ed25519 point which is not the negative identity and with y coordinate fully Prime Order Point: a point in the prime subgroup. + PoW Hash: -the hash calculated by using the active proof of work function. +the hash calculated from the block hashing blob by using the active proof of work function. Block Hash: -the keccak hash of the block. +the keccak hash of the block hashing blob, this is a slightly different hashing blob than the one used to calculate the `PoW Hash`. Transaction Blob: the raw bytes of a serialized transaction. From ca3b149b39163c466e16b683478104f7c1766fc5 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 9 Aug 2024 15:44:53 -0400 Subject: [PATCH 036/104] ci: fix book CI (#252) * ci: fix book ci * ci: add `--locked` --- .github/workflows/architecture-book.yml | 11 +++++++---- .github/workflows/audit.yml | 1 + .github/workflows/deny.yml | 1 + .github/workflows/monero-book.yml | 11 +++++++---- .github/workflows/user-book.yml | 11 +++++++---- 5 files changed, 23 insertions(+), 12 deletions(-) diff --git a/.github/workflows/architecture-book.yml b/.github/workflows/architecture-book.yml index 5b99ca8..3e6decf 100644 --- a/.github/workflows/architecture-book.yml +++ b/.github/workflows/architecture-book.yml @@ -4,8 +4,11 @@ name: Architecture mdBook on: push: - paths: - - 'books/architecture/**' + branches: ['main'] + paths: ['books/architecture/**'] + pull_request: + paths: ['books/architecture/**'] + workflow_dispatch: env: # Version of `mdbook` to install. @@ -30,8 +33,8 @@ jobs: - name: Install mdBook run: | - cargo install --version ${MDBOOK_VERSION} mdbook - cargo install --version ${MDBOOK_LAST_CHANGED_VERSION} mdbook-last-changed + cargo install --locked --version ${MDBOOK_VERSION} mdbook || echo "mdbook already exists" + cargo install --locked --version ${MDBOOK_LAST_CHANGED_VERSION} mdbook-last-changed || echo "mdbook-last-changed already exists" - name: Build run: mdbook build books/architecture \ No newline at end of file diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index 9aa4451..84b1995 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -7,6 +7,7 @@ on: paths: - '**/Cargo.toml' - '**/Cargo.lock' + workflow_dispatch: env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/deny.yml b/.github/workflows/deny.yml index 1301bb1..2ff2b6f 100644 --- a/.github/workflows/deny.yml +++ b/.github/workflows/deny.yml @@ -7,6 +7,7 @@ on: paths: - '**/Cargo.toml' - '**/Cargo.lock' + workflow_dispatch: env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/monero-book.yml b/.github/workflows/monero-book.yml index 33887bc..ab3660e 100644 --- a/.github/workflows/monero-book.yml +++ b/.github/workflows/monero-book.yml @@ -4,8 +4,11 @@ name: Monero mdBook on: push: - paths: - - 'books/protocol/**' + branches: ['main'] + paths: ['books/protocol/**'] + pull_request: + paths: ['books/protocol/**'] + workflow_dispatch: env: # Version of `mdbook` to install. @@ -30,8 +33,8 @@ jobs: - name: Install mdBook run: | - cargo install --version ${MDBOOK_VERSION} mdbook - cargo install --version ${MDBOOK_SVGBOB_VERSION} mdbook-svgbob + cargo install --locked --version ${MDBOOK_VERSION} mdbook || echo "mdbook already exists" + cargo install --locked --version ${MDBOOK_SVGBOB_VERSION} mdbook-svgbob || echo "mdbook-svgbob already exists" - name: Build run: mdbook build books/protocol \ No newline at end of file diff --git a/.github/workflows/user-book.yml b/.github/workflows/user-book.yml index fc95c94..cec2551 100644 --- a/.github/workflows/user-book.yml +++ b/.github/workflows/user-book.yml @@ -4,8 +4,11 @@ name: User mdBook on: push: - paths: - - 'books/user/**' + branches: ['main'] + paths: ['books/user/**'] + pull_request: + paths: ['books/user/**'] + workflow_dispatch: env: # Version of `mdbook` to install. @@ -30,8 +33,8 @@ jobs: - name: Install mdBook run: | - cargo install --version ${MDBOOK_VERSION} mdbook - cargo install --version ${MDBOOK_LAST_CHANGED_VERSION} mdbook-last-changed + cargo install --locked --version ${MDBOOK_VERSION} mdbook || echo "mdbook already exists" + cargo install --locked --version ${MDBOOK_LAST_CHANGED_VERSION} mdbook-last-changed || echo "mdbook-last-changed already exists" - name: Build run: mdbook build books/user \ No newline at end of file From bca062d2f5662ab9e7d196c827b71ceb5874c03b Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 9 Aug 2024 19:08:56 -0400 Subject: [PATCH 037/104] workspace: add `1.78..=1.80` lints (#253) cargo.toml: add lints --- Cargo.toml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 9f0fa27..71efcca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -111,7 +111,10 @@ cast_lossless = "deny" cast_ptr_alignment = "deny" checked_conversions = "deny" cloned_instead_of_copied = "deny" +const_is_empty = "deny" +doc_lazy_continuation = "deny" doc_link_with_quotes = "deny" +duplicated_attributes = "deny" empty_enum = "deny" enum_glob_use = "deny" expl_impl_clone_on_copy = "deny" @@ -128,21 +131,28 @@ invalid_upcast_comparisons = "deny" iter_filter_is_ok = "deny" iter_filter_is_some = "deny" implicit_clone = "deny" +legacy_numeric_constants = "deny" manual_c_str_literals = "deny" +manual_pattern_char_comparison = "deny" manual_instant_elapsed = "deny" +manual_inspect = "deny" manual_is_variant_and = "deny" manual_let_else = "deny" manual_ok_or = "deny" manual_string_new = "deny" +manual_unwrap_or_default = "deny" map_unwrap_or = "deny" match_bool = "deny" match_same_arms = "deny" match_wildcard_for_single_variants = "deny" mismatching_type_param_order = "deny" +missing_transmute_annotations = "deny" mut_mut = "deny" needless_bitwise_bool = "deny" +needless_character_iteration = "deny" needless_continue = "deny" needless_for_each = "deny" +needless_maybe_sized = "deny" needless_raw_string_hashes = "deny" no_effect_underscore_binding = "deny" no_mangle_with_rust_abi = "deny" @@ -203,6 +213,7 @@ string_to_string = "deny" rest_pat_in_fully_bound_structs = "deny" redundant_type_annotations = "deny" infinite_loop = "deny" +zero_repeat_side_effects = "deny" # Warm cast_possible_truncation = "deny" @@ -280,6 +291,7 @@ keyword_idents_2024 = "deny" missing_abi = "deny" non_ascii_idents = "deny" non_local_definitions = "deny" +redundant_lifetimes = "deny" single_use_lifetimes = "deny" trivial_casts = "deny" trivial_numeric_casts = "deny" From 59adf6dcf8a1adfbfbf05583866f6c5b0d85b374 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 9 Aug 2024 19:09:25 -0400 Subject: [PATCH 038/104] `std::mem::{size,align}_of` -> `{size,align}_of` (#254) --- net/epee-encoding/src/container_as_blob.rs | 2 +- net/epee-encoding/src/io.rs | 4 ++-- storage/blockchain/src/types.rs | 5 ----- storage/database/README.md | 4 ++-- storage/database/src/storable.rs | 2 +- 5 files changed, 6 insertions(+), 11 deletions(-) diff --git a/net/epee-encoding/src/container_as_blob.rs b/net/epee-encoding/src/container_as_blob.rs index a4e8808..701ec66 100644 --- a/net/epee-encoding/src/container_as_blob.rs +++ b/net/epee-encoding/src/container_as_blob.rs @@ -70,7 +70,7 @@ pub trait Containerable { macro_rules! int_container_able { ($int:ty ) => { impl Containerable for $int { - const SIZE: usize = std::mem::size_of::<$int>(); + const SIZE: usize = size_of::<$int>(); fn from_bytes(bytes: &[u8]) -> Self { <$int>::from_le_bytes(bytes.try_into().unwrap()) diff --git a/net/epee-encoding/src/io.rs b/net/epee-encoding/src/io.rs index 4f4240d..110a1ec 100644 --- a/net/epee-encoding/src/io.rs +++ b/net/epee-encoding/src/io.rs @@ -7,7 +7,7 @@ pub fn checked_read_primitive( b: &mut B, read: impl Fn(&mut B) -> R, ) -> Result { - checked_read(b, read, core::mem::size_of::()) + checked_read(b, read, size_of::()) } #[inline] @@ -25,7 +25,7 @@ pub fn checked_write_primitive( write: impl Fn(&mut B, T), t: T, ) -> Result<()> { - checked_write(b, write, t, core::mem::size_of::()) + checked_write(b, write, t, size_of::()) } #[inline] diff --git a/storage/blockchain/src/types.rs b/storage/blockchain/src/types.rs index 08cde31..9abd175 100644 --- a/storage/blockchain/src/types.rs +++ b/storage/blockchain/src/types.rs @@ -121,7 +121,6 @@ pub type UnlockTime = u64; /// # Size & Alignment /// ```rust /// # use cuprate_blockchain::types::*; -/// # use std::mem::*; /// assert_eq!(size_of::(), 16); /// assert_eq!(align_of::(), 8); /// ``` @@ -174,7 +173,6 @@ impl Key for PreRctOutputId {} /// # Size & Alignment /// ```rust /// # use cuprate_blockchain::types::*; -/// # use std::mem::*; /// assert_eq!(size_of::(), 88); /// assert_eq!(align_of::(), 8); /// ``` @@ -226,7 +224,6 @@ bitflags::bitflags! { /// # Size & Alignment /// ```rust /// # use cuprate_blockchain::types::*; - /// # use std::mem::*; /// assert_eq!(size_of::(), 4); /// assert_eq!(align_of::(), 4); /// ``` @@ -262,7 +259,6 @@ bitflags::bitflags! { /// # Size & Alignment /// ```rust /// # use cuprate_blockchain::types::*; -/// # use std::mem::*; /// assert_eq!(size_of::(), 48); /// assert_eq!(align_of::(), 8); /// ``` @@ -306,7 +302,6 @@ pub struct Output { /// # Size & Alignment /// ```rust /// # use cuprate_blockchain::types::*; -/// # use std::mem::*; /// assert_eq!(size_of::(), 80); /// assert_eq!(align_of::(), 8); /// ``` diff --git a/storage/database/README.md b/storage/database/README.md index aed738e..c805ab0 100644 --- a/storage/database/README.md +++ b/storage/database/README.md @@ -66,8 +66,8 @@ As `ConcreteEnv` is just a re-exposed type which has varying inner types, it means some properties will change depending on the backend used. For example: -- [`std::mem::size_of::`] -- [`std::mem::align_of::`] +- [`size_of::`] +- [`align_of::`] Things like these functions are affected by the backend and inner data, and should not be relied upon. This extends to any `struct/enum` that contains `ConcreteEnv`. diff --git a/storage/database/src/storable.rs b/storage/database/src/storable.rs index 100ed44..b153568 100644 --- a/storage/database/src/storable.rs +++ b/storage/database/src/storable.rs @@ -109,7 +109,7 @@ impl Storable for T where Self: Pod + Debug, { - const BYTE_LENGTH: Option = Some(std::mem::size_of::()); + const BYTE_LENGTH: Option = Some(size_of::()); #[inline] fn as_bytes(&self) -> &[u8] { From aeb070ae8dc7df5f5e1589befd2c017bb6405ccc Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Tue, 20 Aug 2024 17:53:32 -0400 Subject: [PATCH 039/104] Replace `OnceLock` + `fn` with `LazyLock` (#256) * `consensus/` * `helper/` * `test-utils/` * `storage/` * fix docs + tests + lints * decomposed_amount: remove `LazyLock` * clippy --- consensus/rules/src/decomposed_amount.rs | 59 ++++----- consensus/rules/src/transactions/tests.rs | 4 +- helper/src/fs.rs | 133 +++++++++----------- storage/blockchain/src/config/config.rs | 20 +-- storage/blockchain/src/ops/block.rs | 12 +- storage/blockchain/src/ops/blockchain.rs | 8 +- storage/blockchain/src/ops/mod.rs | 4 +- storage/blockchain/src/ops/tx.rs | 4 +- storage/blockchain/src/service/mod.rs | 4 +- storage/blockchain/src/service/tests.rs | 20 +-- storage/database/src/backend/tests.rs | 2 +- storage/database/src/resize.rs | 33 ++--- test-utils/src/data/mod.rs | 14 +-- test-utils/src/data/{free.rs => statics.rs} | 96 +++++++------- 14 files changed, 186 insertions(+), 227 deletions(-) rename test-utils/src/data/{free.rs => statics.rs} (82%) diff --git a/consensus/rules/src/decomposed_amount.rs b/consensus/rules/src/decomposed_amount.rs index 5934814..a8821f3 100644 --- a/consensus/rules/src/decomposed_amount.rs +++ b/consensus/rules/src/decomposed_amount.rs @@ -1,36 +1,27 @@ -use std::sync::OnceLock; - -/// Decomposed amount table. -/// -static DECOMPOSED_AMOUNTS: OnceLock<[u64; 172]> = OnceLock::new(); - #[rustfmt::skip] -pub fn decomposed_amounts() -> &'static [u64; 172] { - DECOMPOSED_AMOUNTS.get_or_init(|| { - [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 20, 30, 40, 50, 60, 70, 80, 90, - 100, 200, 300, 400, 500, 600, 700, 800, 900, - 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, - 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, - 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, - 1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, - 10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000, - 100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000, - 1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000, - 10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000, - 100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000, - 1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000, - 10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000, - 100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000, - 1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000, - 10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000, - 100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000, - 1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000, - 10000000000000000000 - ] - }) -} +/// Decomposed amount table. +pub static DECOMPOSED_AMOUNTS: [u64; 172] = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 20, 30, 40, 50, 60, 70, 80, 90, + 100, 200, 300, 400, 500, 600, 700, 800, 900, + 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, + 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, + 100000, 200000, 300000, 400000, 500000, 600000, 700000, 800000, 900000, + 1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000, + 10000000, 20000000, 30000000, 40000000, 50000000, 60000000, 70000000, 80000000, 90000000, + 100000000, 200000000, 300000000, 400000000, 500000000, 600000000, 700000000, 800000000, 900000000, + 1000000000, 2000000000, 3000000000, 4000000000, 5000000000, 6000000000, 7000000000, 8000000000, 9000000000, + 10000000000, 20000000000, 30000000000, 40000000000, 50000000000, 60000000000, 70000000000, 80000000000, 90000000000, + 100000000000, 200000000000, 300000000000, 400000000000, 500000000000, 600000000000, 700000000000, 800000000000, 900000000000, + 1000000000000, 2000000000000, 3000000000000, 4000000000000, 5000000000000, 6000000000000, 7000000000000, 8000000000000, 9000000000000, + 10000000000000, 20000000000000, 30000000000000, 40000000000000, 50000000000000, 60000000000000, 70000000000000, 80000000000000, 90000000000000, + 100000000000000, 200000000000000, 300000000000000, 400000000000000, 500000000000000, 600000000000000, 700000000000000, 800000000000000, 900000000000000, + 1000000000000000, 2000000000000000, 3000000000000000, 4000000000000000, 5000000000000000, 6000000000000000, 7000000000000000, 8000000000000000, 9000000000000000, + 10000000000000000, 20000000000000000, 30000000000000000, 40000000000000000, 50000000000000000, 60000000000000000, 70000000000000000, 80000000000000000, 90000000000000000, + 100000000000000000, 200000000000000000, 300000000000000000, 400000000000000000, 500000000000000000, 600000000000000000, 700000000000000000, 800000000000000000, 900000000000000000, + 1000000000000000000, 2000000000000000000, 3000000000000000000, 4000000000000000000, 5000000000000000000, 6000000000000000000, 7000000000000000000, 8000000000000000000, 9000000000000000000, + 10000000000000000000 +]; /// Checks that an output amount is decomposed. /// @@ -40,7 +31,7 @@ pub fn decomposed_amounts() -> &'static [u64; 172] { /// ref: #[inline] pub fn is_decomposed_amount(amount: &u64) -> bool { - decomposed_amounts().binary_search(amount).is_ok() + DECOMPOSED_AMOUNTS.binary_search(amount).is_ok() } #[cfg(test)] @@ -49,7 +40,7 @@ mod tests { #[test] fn decomposed_amounts_return_decomposed() { - for amount in decomposed_amounts() { + for amount in DECOMPOSED_AMOUNTS.iter() { assert!(is_decomposed_amount(amount)) } } diff --git a/consensus/rules/src/transactions/tests.rs b/consensus/rules/src/transactions/tests.rs index 0bea08c..cd0e8c3 100644 --- a/consensus/rules/src/transactions/tests.rs +++ b/consensus/rules/src/transactions/tests.rs @@ -10,11 +10,11 @@ use proptest::{collection::vec, prelude::*}; use monero_serai::transaction::Output; use super::*; -use crate::decomposed_amount::decomposed_amounts; +use crate::decomposed_amount::DECOMPOSED_AMOUNTS; #[test] fn test_check_output_amount_v1() { - for amount in decomposed_amounts() { + for amount in DECOMPOSED_AMOUNTS.iter() { assert!(check_output_amount_v1(*amount, &HardFork::V2).is_ok()) } diff --git a/helper/src/fs.rs b/helper/src/fs.rs index 1efb20c..d38ee33 100644 --- a/helper/src/fs.rs +++ b/helper/src/fs.rs @@ -4,7 +4,7 @@ //! Note that this module's functions uses [`dirs`], //! which adheres to the XDG standard on Linux. //! -//! This means that the values returned by these functions +//! This means that the values returned by these statics //! may change at runtime depending on environment variables, //! for example: //! @@ -17,7 +17,7 @@ //! # if cfg!(target_os = "linux") { //! std::env::set_var("XDG_CONFIG_HOME", "/custom/path"); //! assert_eq!( -//! cuprate_config_dir().to_string_lossy(), +//! CUPRATE_CONFIG_DIR.to_string_lossy(), //! "/custom/path/cuprate" //! ); //! # } @@ -28,10 +28,7 @@ //! - //---------------------------------------------------------------------------------------------------- Use -use std::{ - path::{Path, PathBuf}, - sync::OnceLock, -}; +use std::{path::PathBuf, sync::LazyLock}; //---------------------------------------------------------------------------------------------------- Const /// Cuprate's main directory. @@ -62,71 +59,59 @@ pub const CUPRATE_DIR: &str = { }; //---------------------------------------------------------------------------------------------------- Directories -/// Create a (private) `OnceLock` and accessor function for common PATHs used by Cuprate. +/// Create a `LazyLock` for common PATHs used by Cuprate. /// /// This currently creates these directories: -/// - [`cuprate_cache_dir()`] -/// - [`cuprate_config_dir()`] -/// - [`cuprate_data_dir()`] -/// - [`cuprate_blockchain_dir()`] -/// -/// FIXME: Use `LazyLock` when stabilized. -/// . -/// . -macro_rules! impl_path_oncelock_and_fn { +/// - [`CUPRATE_CACHE_DIR`] +/// - [`CUPRATE_CONFIG_DIR`] +/// - [`CUPRATE_DATA_DIR`] +/// - [`CUPRATE_BLOCKCHAIN_DIR`] +macro_rules! impl_path_lazylock { ($( $(#[$attr:meta])* // Documentation and any `derive`'s. - $fn:ident, // Name of the corresponding access function. + $name:ident, // Name of the corresponding `LazyLock`. $dirs_fn:ident, // Name of the `dirs` function to use, the PATH prefix. $sub_dirs:literal // Any sub-directories to add onto the PATH. ),* $(,)?) => {$( - // Create the `OnceLock` if needed, append + // Create the `LazyLock` if needed, append // the Cuprate directory string and return. $(#[$attr])* - pub fn $fn() -> &'static Path { - /// Local `OnceLock` containing the Path. - static ONCE_LOCK: OnceLock = OnceLock::new(); + pub static $name: LazyLock = LazyLock::new(|| { + // There's nothing we can do but panic if + // we cannot acquire critical system directories. + // + // Although, this realistically won't panic on + // normal systems for all OS's supported by `dirs`. + let mut path = dirs::$dirs_fn().unwrap(); - ONCE_LOCK.get_or_init(|| { - // There's nothing we can do but panic if - // we cannot acquire critical system directories. - // - // Although, this realistically won't panic on - // normal systems for all OS's supported by `dirs`. - let mut path = dirs::$dirs_fn().unwrap(); + // FIXME: + // Consider a user who does `HOME=/ ./cuprated` + // + // Should we say "that's stupid" and panic here? + // Or should it be respected? + // We really don't want a `rm -rf /` type of situation... + assert!( + path.parent().is_some(), + "SAFETY: returned OS PATH was either root or empty, aborting" + ); - // FIXME: - // Consider a user who does `HOME=/ ./cuprated` - // - // Should we say "that's stupid" and panic here? - // Or should it be respected? - // We really don't want a `rm -rf /` type of situation... - assert!( - path.parent().is_some(), - "SAFETY: returned OS PATH was either root or empty, aborting" - ); + // Returned OS PATH should be absolute, not relative. + assert!(path.is_absolute(), "SAFETY: returned OS PATH was not absolute"); - // Returned OS PATH should be absolute, not relative. - assert!(path.is_absolute(), "SAFETY: returned OS PATH was not absolute"); + // Unconditionally prefix with the top-level Cuprate directory. + path.push(CUPRATE_DIR); - // Unconditionally prefix with the top-level Cuprate directory. - path.push(CUPRATE_DIR); + // Add any sub directories if specified in the macro. + if !$sub_dirs.is_empty() { + path.push($sub_dirs); + } - // Add any sub directories if specified in the macro. - if !$sub_dirs.is_empty() { - path.push($sub_dirs); - } - - path - }) - } + path + }); )*}; } -// Note that the `OnceLock`'s are prefixed with `__` to indicate: -// 1. They're not really to be used directly -// 2. To avoid name conflicts -impl_path_oncelock_and_fn! { +impl_path_lazylock! { /// Cuprate's cache directory. /// /// This is the PATH used for any Cuprate cache files. @@ -136,7 +121,7 @@ impl_path_oncelock_and_fn! { /// | Windows | `C:\Users\Alice\AppData\Local\Cuprate\` | /// | macOS | `/Users/Alice/Library/Caches/Cuprate/` | /// | Linux | `/home/alice/.cache/cuprate/` | - cuprate_cache_dir, + CUPRATE_CACHE_DIR, cache_dir, "", @@ -149,7 +134,7 @@ impl_path_oncelock_and_fn! { /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` | /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/` | /// | Linux | `/home/alice/.config/cuprate/` | - cuprate_config_dir, + CUPRATE_CONFIG_DIR, config_dir, "", @@ -162,7 +147,7 @@ impl_path_oncelock_and_fn! { /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` | /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/` | /// | Linux | `/home/alice/.local/share/cuprate/` | - cuprate_data_dir, + CUPRATE_DATA_DIR, data_dir, "", @@ -175,7 +160,7 @@ impl_path_oncelock_and_fn! { /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` | /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` | /// | Linux | `/home/alice/.local/share/cuprate/blockchain/` | - cuprate_blockchain_dir, + CUPRATE_BLOCKCHAIN_DIR, data_dir, "blockchain", } @@ -192,58 +177,58 @@ mod test { // - It must `ends_with()` the expected end PATH for the OS #[test] fn path_sanity_check() { - assert!(cuprate_cache_dir().is_absolute()); - assert!(cuprate_config_dir().is_absolute()); - assert!(cuprate_data_dir().is_absolute()); - assert!(cuprate_blockchain_dir().is_absolute()); + assert!(CUPRATE_CACHE_DIR.is_absolute()); + assert!(CUPRATE_CONFIG_DIR.is_absolute()); + assert!(CUPRATE_DATA_DIR.is_absolute()); + assert!(CUPRATE_BLOCKCHAIN_DIR.is_absolute()); if cfg!(target_os = "windows") { - let dir = cuprate_cache_dir(); + let dir = &*CUPRATE_CACHE_DIR; println!("cuprate_cache_dir: {dir:?}"); assert!(dir.ends_with(r"AppData\Local\Cuprate")); - let dir = cuprate_config_dir(); + let dir = &*CUPRATE_CONFIG_DIR; println!("cuprate_config_dir: {dir:?}"); assert!(dir.ends_with(r"AppData\Roaming\Cuprate")); - let dir = cuprate_data_dir(); + let dir = &*CUPRATE_DATA_DIR; println!("cuprate_data_dir: {dir:?}"); assert!(dir.ends_with(r"AppData\Roaming\Cuprate")); - let dir = cuprate_blockchain_dir(); + let dir = &*CUPRATE_BLOCKCHAIN_DIR; println!("cuprate_blockchain_dir: {dir:?}"); assert!(dir.ends_with(r"AppData\Roaming\Cuprate\blockchain")); } else if cfg!(target_os = "macos") { - let dir = cuprate_cache_dir(); + let dir = &*CUPRATE_CACHE_DIR; println!("cuprate_cache_dir: {dir:?}"); assert!(dir.ends_with("Library/Caches/Cuprate")); - let dir = cuprate_config_dir(); + let dir = &*CUPRATE_CONFIG_DIR; println!("cuprate_config_dir: {dir:?}"); assert!(dir.ends_with("Library/Application Support/Cuprate")); - let dir = cuprate_data_dir(); + let dir = &*CUPRATE_DATA_DIR; println!("cuprate_data_dir: {dir:?}"); assert!(dir.ends_with("Library/Application Support/Cuprate")); - let dir = cuprate_blockchain_dir(); + let dir = &*CUPRATE_BLOCKCHAIN_DIR; println!("cuprate_blockchain_dir: {dir:?}"); assert!(dir.ends_with("Library/Application Support/Cuprate/blockchain")); } else { // Assumes Linux. - let dir = cuprate_cache_dir(); + let dir = &*CUPRATE_CACHE_DIR; println!("cuprate_cache_dir: {dir:?}"); assert!(dir.ends_with(".cache/cuprate")); - let dir = cuprate_config_dir(); + let dir = &*CUPRATE_CONFIG_DIR; println!("cuprate_config_dir: {dir:?}"); assert!(dir.ends_with(".config/cuprate")); - let dir = cuprate_data_dir(); + let dir = &*CUPRATE_DATA_DIR; println!("cuprate_data_dir: {dir:?}"); assert!(dir.ends_with(".local/share/cuprate")); - let dir = cuprate_blockchain_dir(); + let dir = &*CUPRATE_BLOCKCHAIN_DIR; println!("cuprate_blockchain_dir: {dir:?}"); assert!(dir.ends_with(".local/share/cuprate/blockchain")); } diff --git a/storage/blockchain/src/config/config.rs b/storage/blockchain/src/config/config.rs index 5bfbf74..957c67c 100644 --- a/storage/blockchain/src/config/config.rs +++ b/storage/blockchain/src/config/config.rs @@ -7,7 +7,7 @@ use std::{borrow::Cow, path::Path}; use serde::{Deserialize, Serialize}; use cuprate_database::{config::SyncMode, resize::ResizeAlgorithm}; -use cuprate_helper::fs::cuprate_blockchain_dir; +use cuprate_helper::fs::CUPRATE_BLOCKCHAIN_DIR; // re-exports pub use cuprate_database_service::ReaderThreads; @@ -38,7 +38,7 @@ impl ConfigBuilder { Self { db_directory: None, db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( - cuprate_blockchain_dir(), + &*CUPRATE_BLOCKCHAIN_DIR, )), reader_threads: None, } @@ -48,7 +48,7 @@ impl ConfigBuilder { /// /// # Default values /// If [`ConfigBuilder::db_directory`] was not called, - /// the default [`cuprate_blockchain_dir`] will be used. + /// the default [`CUPRATE_BLOCKCHAIN_DIR`] will be used. /// /// For all other values, [`Default::default`] is used. pub fn build(self) -> Config { @@ -56,7 +56,7 @@ impl ConfigBuilder { // in `helper::fs`. No need to do them here. let db_directory = self .db_directory - .unwrap_or_else(|| Cow::Borrowed(cuprate_blockchain_dir())); + .unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)); let reader_threads = self.reader_threads.unwrap_or_default(); let db_config = self @@ -106,7 +106,7 @@ impl ConfigBuilder { #[must_use] pub fn fast(mut self) -> Self { self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(cuprate_blockchain_dir())) + cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)) .fast(); self.reader_threads = Some(ReaderThreads::OnePerThread); @@ -120,7 +120,7 @@ impl ConfigBuilder { #[must_use] pub fn low_power(mut self) -> Self { self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(cuprate_blockchain_dir())) + cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)) .low_power(); self.reader_threads = Some(ReaderThreads::One); @@ -130,7 +130,7 @@ impl ConfigBuilder { impl Default for ConfigBuilder { fn default() -> Self { - let db_directory = Cow::Borrowed(cuprate_blockchain_dir()); + let db_directory = Cow::Borrowed(&**CUPRATE_BLOCKCHAIN_DIR); Self { db_directory: Some(db_directory.clone()), db_config: cuprate_database::config::ConfigBuilder::new(db_directory), @@ -161,7 +161,7 @@ impl Config { /// Create a new [`Config`] with sane default settings. /// /// The [`cuprate_database::config::Config::db_directory`] - /// will be set to [`cuprate_blockchain_dir`]. + /// will be set to [`CUPRATE_BLOCKCHAIN_DIR`]. /// /// All other values will be [`Default::default`]. /// @@ -179,8 +179,8 @@ impl Config { /// /// let config = Config::new(); /// - /// assert_eq!(config.db_config.db_directory(), cuprate_blockchain_dir()); - /// assert!(config.db_config.db_file().starts_with(cuprate_blockchain_dir())); + /// assert_eq!(config.db_config.db_directory(), &*CUPRATE_BLOCKCHAIN_DIR); + /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_BLOCKCHAIN_DIR)); /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); /// assert_eq!(config.db_config.sync_mode, SyncMode::default()); /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index de955c8..4f77d73 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -268,7 +268,7 @@ mod test { use pretty_assertions::assert_eq; use cuprate_database::{Env, EnvInner, TxRw}; - use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; + use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; use super::*; @@ -292,9 +292,9 @@ mod test { assert_all_tables_are_empty(&env); let mut blocks = [ - block_v1_tx2().clone(), - block_v9_tx3().clone(), - block_v16_tx0().clone(), + BLOCK_V1_TX2.clone(), + BLOCK_V9_TX3.clone(), + BLOCK_V16_TX0.clone(), ]; // HACK: `add_block()` asserts blocks with non-sequential heights // cannot be added, to get around this, manually edit the block height. @@ -440,7 +440,7 @@ mod test { let tx_rw = env_inner.tx_rw().unwrap(); let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap(); - let mut block = block_v9_tx3().clone(); + let mut block = BLOCK_V9_TX3.clone(); block.height = usize::try_from(u32::MAX).unwrap() + 1; add_block(&block, &mut tables).unwrap(); @@ -459,7 +459,7 @@ mod test { let tx_rw = env_inner.tx_rw().unwrap(); let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap(); - let mut block = block_v9_tx3().clone(); + let mut block = BLOCK_V9_TX3.clone(); // HACK: `add_block()` asserts blocks with non-sequential heights // cannot be added, to get around this, manually edit the block height. block.height = 0; diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index 65d9ca2..ed368ad 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -84,7 +84,7 @@ mod test { use pretty_assertions::assert_eq; use cuprate_database::{Env, EnvInner, TxRw}; - use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; + use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; use super::*; @@ -108,9 +108,9 @@ mod test { assert_all_tables_are_empty(&env); let mut blocks = [ - block_v1_tx2().clone(), - block_v9_tx3().clone(), - block_v16_tx0().clone(), + BLOCK_V1_TX2.clone(), + BLOCK_V9_TX3.clone(), + BLOCK_V16_TX0.clone(), ]; let blocks_len = blocks.len(); diff --git a/storage/blockchain/src/ops/mod.rs b/storage/blockchain/src/ops/mod.rs index 2699fc8..4ff7dff 100644 --- a/storage/blockchain/src/ops/mod.rs +++ b/storage/blockchain/src/ops/mod.rs @@ -54,7 +54,7 @@ //! ```rust //! use hex_literal::hex; //! -//! use cuprate_test_utils::data::block_v16_tx0; +//! use cuprate_test_utils::data::BLOCK_V16_TX0; //! use cuprate_blockchain::{ //! cuprate_database::{ //! ConcreteEnv, @@ -83,7 +83,7 @@ //! let mut tables = env_inner.open_tables_mut(&tx_rw)?; //! //! // Write a block to the database. -//! let mut block = block_v16_tx0().clone(); +//! let mut block = BLOCK_V16_TX0.clone(); //! # block.height = 0; //! add_block(&block, &mut tables)?; //! diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index 7d608ca..e7dbdcf 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -322,7 +322,7 @@ mod test { use pretty_assertions::assert_eq; use cuprate_database::{Env, EnvInner, TxRw}; - use cuprate_test_utils::data::{tx_v1_sig0, tx_v1_sig2, tx_v2_rct3}; + use cuprate_test_utils::data::{TX_V1_SIG0, TX_V1_SIG2, TX_V2_RCT3}; use crate::{ tables::{OpenTables, Tables}, @@ -337,7 +337,7 @@ mod test { assert_all_tables_are_empty(&env); // Monero `Transaction`, not database tx. - let txs = [tx_v1_sig0(), tx_v1_sig2(), tx_v2_rct3()]; + let txs = [&*TX_V1_SIG0, &*TX_V1_SIG2, &*TX_V2_RCT3]; // Add transactions. let tx_ids = { diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index 3331a55..c774ee4 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -66,7 +66,7 @@ //! use tower::{Service, ServiceExt}; //! //! use cuprate_types::{blockchain::{BlockchainReadRequest, BlockchainWriteRequest, BlockchainResponse}, Chain}; -//! use cuprate_test_utils::data::block_v16_tx0; +//! use cuprate_test_utils::data::BLOCK_V16_TX0; //! //! use cuprate_blockchain::{ //! cuprate_database::Env, @@ -86,7 +86,7 @@ //! let (mut read_handle, mut write_handle, _) = cuprate_blockchain::service::init(config)?; //! //! // Prepare a request to write block. -//! let mut block = block_v16_tx0().clone(); +//! let mut block = BLOCK_V16_TX0.clone(); //! # block.height = 0_usize; // must be 0th height or panic in `add_block()` //! let request = BlockchainWriteRequest::WriteBlock(block); //! diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index 8d817bb..ed13f7b 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -16,7 +16,7 @@ use pretty_assertions::assert_eq; use tower::{Service, ServiceExt}; use cuprate_database::{ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError}; -use cuprate_test_utils::data::{block_v16_tx0, block_v1_tx2, block_v9_tx3}; +use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest}, Chain, OutputOnChain, VerifiedBlockInformation, @@ -61,7 +61,7 @@ fn init_service() -> ( #[allow(clippy::future_not_send)] // INVARIANT: tests are using a single threaded runtime async fn test_template( // Which block(s) to add? - block_fns: &[fn() -> &'static VerifiedBlockInformation], + blocks: &[&VerifiedBlockInformation], // Total amount of generated coins after the block(s) have been added. cumulative_generated_coins: u64, // What are the table lengths be after the block(s) have been added? @@ -76,8 +76,8 @@ async fn test_template( // HACK: `add_block()` asserts blocks with non-sequential heights // cannot be added, to get around this, manually edit the block height. - for (i, block_fn) in block_fns.iter().enumerate() { - let mut block = block_fn().clone(); + for (i, block) in blocks.iter().enumerate() { + let mut block = (*block).clone(); block.height = i; // Request a block to be written, assert it was written. @@ -104,7 +104,7 @@ async fn test_template( get_block_extended_header_from_height(&0, &tables).unwrap(), )); - let extended_block_header_1 = if block_fns.len() > 1 { + let extended_block_header_1 = if blocks.len() > 1 { Ok(BlockchainResponse::BlockExtendedHeader( get_block_extended_header_from_height(&1, &tables).unwrap(), )) @@ -116,7 +116,7 @@ async fn test_template( get_block_info(&0, tables.block_infos()).unwrap().block_hash, )); - let block_hash_1 = if block_fns.len() > 1 { + let block_hash_1 = if blocks.len() > 1 { Ok(BlockchainResponse::BlockHash( get_block_info(&1, tables.block_infos()).unwrap().block_hash, )) @@ -128,7 +128,7 @@ async fn test_template( get_block_extended_header_from_height(&0, &tables).unwrap(), ])); - let range_0_2 = if block_fns.len() >= 2 { + let range_0_2 = if blocks.len() >= 2 { Ok(BlockchainResponse::BlockExtendedHeaderInRange(vec![ get_block_extended_header_from_height(&0, &tables).unwrap(), get_block_extended_header_from_height(&1, &tables).unwrap(), @@ -333,7 +333,7 @@ fn init_drop() { #[tokio::test] async fn v1_tx2() { test_template( - &[block_v1_tx2], + &[&*BLOCK_V1_TX2], 14_535_350_982_449, AssertTableLen { block_infos: 1, @@ -359,7 +359,7 @@ async fn v1_tx2() { #[tokio::test] async fn v9_tx3() { test_template( - &[block_v9_tx3], + &[&*BLOCK_V9_TX3], 3_403_774_022_163, AssertTableLen { block_infos: 1, @@ -385,7 +385,7 @@ async fn v9_tx3() { #[tokio::test] async fn v16_tx0() { test_template( - &[block_v16_tx0], + &[&*BLOCK_V16_TX0], 600_000_000_000, AssertTableLen { block_infos: 1, diff --git a/storage/database/src/backend/tests.rs b/storage/database/src/backend/tests.rs index ac6b592..e219c42 100644 --- a/storage/database/src/backend/tests.rs +++ b/storage/database/src/backend/tests.rs @@ -126,7 +126,7 @@ fn resize() { let (env, _tempdir) = tmp_concrete_env(); // Resize by the OS page size. - let page_size = crate::resize::page_size(); + let page_size = *crate::resize::PAGE_SIZE; let old_size = env.current_map_size(); env.resize_map(Some(ResizeAlgorithm::FixedBytes(page_size))); diff --git a/storage/database/src/resize.rs b/storage/database/src/resize.rs index 99d6d7e..6ef9974 100644 --- a/storage/database/src/resize.rs +++ b/storage/database/src/resize.rs @@ -10,7 +10,7 @@ //! //! # Page size //! All free functions in this module will -//! return a multiple of the OS page size ([`page_size()`]), +//! return a multiple of the OS page size ([`PAGE_SIZE`]), //! [LMDB will error](http://www.lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5) //! if this is not the case. //! @@ -18,10 +18,10 @@ //! All returned [`NonZeroUsize`] values of the free functions in this module //! (including [`ResizeAlgorithm::resize`]) uphold the following invariants: //! 1. It will always be `>=` the input `current_size_bytes` -//! 2. It will always be a multiple of [`page_size()`] +//! 2. It will always be a multiple of [`PAGE_SIZE`] //---------------------------------------------------------------------------------------------------- Import -use std::{num::NonZeroUsize, sync::OnceLock}; +use std::{num::NonZeroUsize, sync::LazyLock}; //---------------------------------------------------------------------------------------------------- ResizeAlgorithm /// The function/algorithm used by the @@ -85,21 +85,14 @@ impl Default for ResizeAlgorithm { } //---------------------------------------------------------------------------------------------------- Free functions -/// This function retrieves the system’s memory page size. +/// This retrieves the system’s memory page size. /// /// It is just [`page_size::get`](https://docs.rs/page_size) internally. /// -/// This caches the result, so this function is cheap after the 1st call. -/// /// # Panics -/// This function will panic if the OS returns of page size of `0` (impossible?). -#[inline] -pub fn page_size() -> NonZeroUsize { - /// Cached result of [`page_size()`]. - static PAGE_SIZE: OnceLock = OnceLock::new(); - *PAGE_SIZE - .get_or_init(|| NonZeroUsize::new(page_size::get()).expect("page_size::get() returned 0")) -} +/// Accessing this [`LazyLock`] will panic if the OS returns of page size of `0` (impossible?). +pub static PAGE_SIZE: LazyLock = + LazyLock::new(|| NonZeroUsize::new(page_size::get()).expect("page_size::get() returned 0")); /// Memory map resize closely matching `monerod`. /// @@ -122,7 +115,7 @@ pub fn page_size() -> NonZeroUsize { /// assert_eq!(monero(0).get(), N); /// /// // Rounds up to nearest OS page size. -/// assert_eq!(monero(1).get(), N + page_size().get()); +/// assert_eq!(monero(1).get(), N + PAGE_SIZE.get()); /// ``` /// /// # Panics @@ -143,7 +136,7 @@ pub fn monero(current_size_bytes: usize) -> NonZeroUsize { /// const ADD_SIZE: usize = 1_usize << 30; - let page_size = page_size().get(); + let page_size = PAGE_SIZE.get(); let new_size_bytes = current_size_bytes + ADD_SIZE; // Round up the new size to the @@ -167,7 +160,7 @@ pub fn monero(current_size_bytes: usize) -> NonZeroUsize { /// /// ```rust /// # use cuprate_database::resize::*; -/// let page_size: usize = page_size().get(); +/// let page_size: usize = PAGE_SIZE.get(); /// /// // Anything below the page size will round up to the page size. /// for i in 0..=page_size { @@ -190,7 +183,7 @@ pub fn monero(current_size_bytes: usize) -> NonZeroUsize { /// fixed_bytes(1, usize::MAX); /// ``` pub fn fixed_bytes(current_size_bytes: usize, add_bytes: usize) -> NonZeroUsize { - let page_size = page_size(); + let page_size = *PAGE_SIZE; let new_size_bytes = current_size_bytes + add_bytes; // Guard against < page_size. @@ -222,7 +215,7 @@ pub fn fixed_bytes(current_size_bytes: usize, add_bytes: usize) -> NonZeroUsize /// /// ```rust /// # use cuprate_database::resize::*; -/// let page_size: usize = page_size().get(); +/// let page_size: usize = PAGE_SIZE.get(); /// /// // Anything below the page size will round up to the page size. /// for i in 0..=page_size { @@ -265,7 +258,7 @@ pub fn percent(current_size_bytes: usize, percent: f32) -> NonZeroUsize { _ => 1.0, }; - let page_size = page_size(); + let page_size = *PAGE_SIZE; // INVARIANT: Allow `f32` <-> `usize` casting, we handle all cases. #[allow( diff --git a/test-utils/src/data/mod.rs b/test-utils/src/data/mod.rs index 696c686..b9d42fb 100644 --- a/test-utils/src/data/mod.rs +++ b/test-utils/src/data/mod.rs @@ -15,14 +15,14 @@ //! let tx: Transaction = Transaction::read(&mut TX_E57440).unwrap(); //! ``` //! -//! ## Functions -//! The free functions provide access to typed data found in `cuprate_types`: +//! ## Statics +//! The statics provide access to typed data found in `cuprate_types`: //! ```rust //! # use cuprate_test_utils::data::*; //! use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; //! -//! let block: VerifiedBlockInformation = block_v16_tx0().clone(); -//! let tx: VerifiedTransactionInformation = tx_v1_sig0().clone(); +//! let block: VerifiedBlockInformation = BLOCK_V16_TX0.clone(); +//! let tx: VerifiedTransactionInformation = TX_V1_SIG0.clone(); //! ``` mod constants; @@ -31,7 +31,7 @@ pub use constants::{ TX_9E3F73, TX_B6B439, TX_D7FEBD, TX_E2D393, TX_E57440, }; -mod free; -pub use free::{ - block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_fee, tx_v1_sig0, tx_v1_sig2, tx_v2_rct3, +mod statics; +pub use statics::{ + tx_fee, BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3, TX_V1_SIG0, TX_V1_SIG2, TX_V2_RCT3, }; diff --git a/test-utils/src/data/free.rs b/test-utils/src/data/statics.rs similarity index 82% rename from test-utils/src/data/free.rs rename to test-utils/src/data/statics.rs index d7f61ae..8b98171 100644 --- a/test-utils/src/data/free.rs +++ b/test-utils/src/data/statics.rs @@ -1,4 +1,4 @@ -//! Free functions to access data. +//! `static LazyLock`s to access data. #![allow( const_item_mutation, // `R: Read` needs `&mut self` @@ -6,7 +6,7 @@ )] //---------------------------------------------------------------------------------------------------- Import -use std::sync::OnceLock; +use std::sync::LazyLock; use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; @@ -141,8 +141,7 @@ pub fn tx_fee(tx: &Transaction) -> u64 { } //---------------------------------------------------------------------------------------------------- Blocks -/// Generate a block accessor function with this signature: -/// `fn() -> &'static VerifiedBlockInformation` +/// Generate a `static LazyLock`. /// /// This will use `VerifiedBlockMap` type above to do various /// checks on the input data and makes sure it seems correct. @@ -153,9 +152,9 @@ pub fn tx_fee(tx: &Transaction) -> u64 { /// - Monero RPC (see cuprate_test_utils::rpc for this) /// /// See below for actual usage. -macro_rules! verified_block_information_fn { +macro_rules! verified_block_information { ( - fn_name: $fn_name:ident, // Name of the function created + name: $name:ident, // Name of the `LazyLock` created block_blob: $block_blob:ident, // Block blob ([u8], found in `constants.rs`) tx_blobs: [$($tx_blob:ident),*], // Array of contained transaction blobs pow_hash: $pow_hash:literal, // PoW hash as a string literal @@ -183,7 +182,7 @@ macro_rules! verified_block_information_fn { #[doc = "# use hex_literal::hex;"] #[doc = "use cuprate_helper::map::combine_low_high_bits_to_u128;"] #[doc = ""] - #[doc = concat!("let block = ", stringify!($fn_name), "();")] + #[doc = concat!("let block = &*", stringify!($name), ";")] #[doc = concat!("assert_eq!(&block.block.serialize(), ", stringify!($block_blob), ");")] #[doc = concat!("assert_eq!(block.pow_hash, hex!(\"", $pow_hash, "\"));")] #[doc = concat!("assert_eq!(block.height, ", $height, ");")] @@ -201,28 +200,25 @@ macro_rules! verified_block_information_fn { "));" )] /// ``` - pub fn $fn_name() -> &'static VerifiedBlockInformation { - static BLOCK: OnceLock = OnceLock::new(); - BLOCK.get_or_init(|| { - VerifiedBlockMap { - block_blob: $block_blob, - pow_hash: hex!($pow_hash), - height: $height, - generated_coins: $generated_coins, - weight: $weight, - long_term_weight: $long_term_weight, - cumulative_difficulty_low: $cumulative_difficulty_low, - cumulative_difficulty_high: $cumulative_difficulty_high, - txs: &[$($tx_blob),*], - } - .into_verified() - }) - } + pub static $name: LazyLock = LazyLock::new(|| { + VerifiedBlockMap { + block_blob: $block_blob, + pow_hash: hex!($pow_hash), + height: $height, + generated_coins: $generated_coins, + weight: $weight, + long_term_weight: $long_term_weight, + cumulative_difficulty_low: $cumulative_difficulty_low, + cumulative_difficulty_high: $cumulative_difficulty_high, + txs: &[$($tx_blob),*], + } + .into_verified() + }); }; } -verified_block_information_fn! { - fn_name: block_v1_tx2, +verified_block_information! { + name: BLOCK_V1_TX2, block_blob: BLOCK_5ECB7E, tx_blobs: [TX_2180A8, TX_D7FEBD], pow_hash: "c960d540000459480560b7816de968c7470083e5874e10040bdd4cc501000000", @@ -235,8 +231,8 @@ verified_block_information_fn! { tx_len: 2, } -verified_block_information_fn! { - fn_name: block_v9_tx3, +verified_block_information! { + name: BLOCK_V9_TX3, block_blob: BLOCK_F91043, tx_blobs: [TX_E2D393, TX_E57440, TX_B6B439], pow_hash: "7c78b5b67a112a66ea69ea51477492057dba9cfeaa2942ee7372c61800000000", @@ -249,8 +245,8 @@ verified_block_information_fn! { tx_len: 3, } -verified_block_information_fn! { - fn_name: block_v16_tx0, +verified_block_information! { + name: BLOCK_V16_TX0, block_blob: BLOCK_43BD1F, tx_blobs: [], pow_hash: "10b473b5d097d6bfa0656616951840724dfe38c6fb9c4adf8158800300000000", @@ -264,13 +260,12 @@ verified_block_information_fn! { } //---------------------------------------------------------------------------------------------------- Transactions -/// Generate a transaction accessor function with this signature: -/// `fn() -> &'static VerifiedTransactionInformation` +/// Generate a `const LazyLock`. /// -/// Same as [`verified_block_information_fn`] but for transactions. -macro_rules! transaction_verification_data_fn { +/// Same as [`verified_block_information`] but for transactions. +macro_rules! transaction_verification_data { ( - fn_name: $fn_name:ident, // Name of the function created + name: $name:ident, // Name of the `LazyLock` created tx_blobs: $tx_blob:ident, // Transaction blob ([u8], found in `constants.rs`) weight: $weight:literal, // Transaction weight hash: $hash:literal, // Transaction hash as a string literal @@ -280,35 +275,34 @@ macro_rules! transaction_verification_data_fn { /// ```rust #[doc = "# use cuprate_test_utils::data::*;"] #[doc = "# use hex_literal::hex;"] - #[doc = concat!("let tx = ", stringify!($fn_name), "();")] + #[doc = concat!("let tx = &*", stringify!($name), ";")] #[doc = concat!("assert_eq!(&tx.tx.serialize(), ", stringify!($tx_blob), ");")] #[doc = concat!("assert_eq!(tx.tx_blob, ", stringify!($tx_blob), ");")] #[doc = concat!("assert_eq!(tx.tx_weight, ", $weight, ");")] #[doc = concat!("assert_eq!(tx.tx_hash, hex!(\"", $hash, "\"));")] /// ``` - pub fn $fn_name() -> &'static VerifiedTransactionInformation { - static TX: OnceLock = OnceLock::new(); - TX.get_or_init(|| to_tx_verification_data($tx_blob)) - } + pub static $name: LazyLock = LazyLock::new(|| { + to_tx_verification_data($tx_blob) + }); }; } -transaction_verification_data_fn! { - fn_name: tx_v1_sig0, +transaction_verification_data! { + name: TX_V1_SIG0, tx_blobs: TX_3BC7FF, weight: 248, hash: "3bc7ff015b227e7313cc2e8668bfbb3f3acbee274a9c201d6211cf681b5f6bb1", } -transaction_verification_data_fn! { - fn_name: tx_v1_sig2, +transaction_verification_data! { + name: TX_V1_SIG2, tx_blobs: TX_9E3F73, weight: 448, hash: "9e3f73e66d7c7293af59c59c1ff5d6aae047289f49e5884c66caaf4aea49fb34", } -transaction_verification_data_fn! { - fn_name: tx_v2_rct3, +transaction_verification_data! { + name: TX_V2_RCT3, tx_blobs: TX_84D48D, weight: 2743, hash: "84d48dc11ec91950f8b70a85af9db91fe0c8abef71ef5db08304f7344b99ea66", @@ -328,7 +322,7 @@ mod tests { #[tokio::test] async fn block_same_as_rpc() { let rpc = HttpRpcClient::new(None).await; - for block in [block_v1_tx2(), block_v9_tx3(), block_v16_tx0()] { + for block in [&*BLOCK_V1_TX2, &*BLOCK_V9_TX3, &*BLOCK_V16_TX0] { println!("block_height: {}", block.height); let block_rpc = rpc.get_verified_block_information(block.height).await; assert_eq!(block, &block_rpc); @@ -342,16 +336,12 @@ mod tests { async fn tx_same_as_rpc() { let rpc = HttpRpcClient::new(None).await; - let mut txs = [block_v1_tx2(), block_v9_tx3(), block_v16_tx0()] + let mut txs = [&*BLOCK_V1_TX2, &*BLOCK_V9_TX3, &*BLOCK_V16_TX0] .into_iter() .flat_map(|block| block.txs.iter().cloned()) .collect::>(); - txs.extend([ - tx_v1_sig0().clone(), - tx_v1_sig2().clone(), - tx_v2_rct3().clone(), - ]); + txs.extend([TX_V1_SIG0.clone(), TX_V1_SIG2.clone(), TX_V2_RCT3.clone()]); for tx in txs { println!("tx_hash: {:?}", tx.tx_hash); From 5648bf0da0ed2719e6987425138e89957508df4c Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Tue, 20 Aug 2024 18:50:31 -0400 Subject: [PATCH 040/104] rpc: remove temporary lints (#255) * rpc: remove temporary lints for types * rpc: remove temporary lints for json-rpc * rpc: remove temporary lints for interface * cfgs `1 tab` -> `4 spaces` --- rpc/interface/src/lib.rs | 103 +++++++++++------------- rpc/interface/src/route/bin.rs | 2 +- rpc/interface/src/router_builder.rs | 7 +- rpc/interface/src/rpc_error.rs | 2 +- rpc/interface/src/rpc_handler.rs | 8 +- rpc/interface/src/rpc_handler_dummy.rs | 5 +- rpc/json-rpc/src/lib.rs | 90 +++++++++++---------- rpc/types/src/bin.rs | 15 ++-- rpc/types/src/defaults.rs | 7 -- rpc/types/src/free.rs | 2 + rpc/types/src/json.rs | 5 +- rpc/types/src/lib.rs | 105 +++++++++++-------------- rpc/types/src/misc/distribution.rs | 15 ++-- rpc/types/src/misc/misc.rs | 12 +-- rpc/types/src/misc/tx_entry.rs | 8 +- rpc/types/src/other.rs | 7 +- rpc/types/src/serde.rs | 2 +- storage/blockchain/src/lib.rs | 90 ++++++++++----------- storage/database/src/lib.rs | 92 +++++++++++----------- 19 files changed, 255 insertions(+), 322 deletions(-) diff --git a/rpc/interface/src/lib.rs b/rpc/interface/src/lib.rs index 2656b07..43bd9e1 100644 --- a/rpc/interface/src/lib.rs +++ b/rpc/interface/src/lib.rs @@ -4,37 +4,37 @@ // Forbid lints. // Our code, and code generated (e.g macros) cannot overrule these. #![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, + // `unsafe` is allowed but it _must_ be + // commented with `SAFETY: reason`. + clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, + // Never. + unused_unsafe, + redundant_semicolons, + unused_allocation, + coherence_leak_check, + while_true, - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, + // Maybe can be put into `#[deny]`. + unconditional_recursion, + for_loops_over_fallibles, + unused_braces, + unused_labels, + keyword_idents, + non_ascii_idents, + variant_size_differences, single_use_lifetimes, - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, + // Probably can be put into `#[deny]`. + future_incompatible, + let_underscore, + break_with_label_and_loop, + duplicate_macro_attributes, + exported_private_dependencies, + large_assignments, + overlapping_range_endpoints, + semicolon_in_expressions_from_macros, + noop_method_call, )] // Deny lints. // Some of these are `#[allow]`'ed on a per-case basis. @@ -57,39 +57,30 @@ unreachable_pub )] #![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, + // FIXME: this lint affects crates outside of + // `database/` for some reason, allow for now. + clippy::cargo_common_metadata, - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, + // FIXME: adding `#[must_use]` onto everything + // might just be more annoying than useful... + // although it is sometimes nice. + clippy::must_use_candidate, - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, + // FIXME: good lint but too many false positives + // with our `Env` + `RwLock` setup. + clippy::significant_drop_tightening, - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, + // FIXME: good lint but is less clear in most cases. + clippy::items_after_statements, - // TODO - rustdoc::bare_urls, + // TODO + rustdoc::bare_urls, - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] -// Allow some lints when running in debug mode. -#![cfg_attr( - debug_assertions, - allow( - clippy::todo, - clippy::multiple_crate_versions, - unused_imports, - unused_variables - ) + clippy::multiple_crate_versions, + clippy::module_name_repetitions, + clippy::module_inception, + clippy::redundant_pub_crate, + clippy::option_if_let_else, )] // Allow some lints in tests. #![cfg_attr( @@ -101,8 +92,6 @@ clippy::too_many_lines ) )] -// TODO: remove me after finishing impl -#![allow(dead_code, unreachable_code, clippy::diverging_sub_expression)] //---------------------------------------------------------------------------------------------------- Mod mod route; diff --git a/rpc/interface/src/route/bin.rs b/rpc/interface/src/route/bin.rs index b17b98c..942e091 100644 --- a/rpc/interface/src/route/bin.rs +++ b/rpc/interface/src/route/bin.rs @@ -81,7 +81,7 @@ macro_rules! generate_endpoints_inner { // Serialize to bytes and respond. match cuprate_epee_encoding::to_bytes(response) { Ok(bytes) => Ok(bytes.freeze()), - Err(e) => Err(StatusCode::INTERNAL_SERVER_ERROR), + Err(_) => Err(StatusCode::INTERNAL_SERVER_ERROR), } } } diff --git a/rpc/interface/src/router_builder.rs b/rpc/interface/src/router_builder.rs index d370cf4..2e80c43 100644 --- a/rpc/interface/src/router_builder.rs +++ b/rpc/interface/src/router_builder.rs @@ -1,12 +1,7 @@ //! Free functions. -use std::marker::PhantomData; - //---------------------------------------------------------------------------------------------------- Use -use axum::{ - routing::{method_routing::get, post}, - Router, -}; +use axum::Router; use crate::{ route::{bin, fallback, json_rpc, other}, diff --git a/rpc/interface/src/rpc_error.rs b/rpc/interface/src/rpc_error.rs index 92b9cc1..47563d6 100644 --- a/rpc/interface/src/rpc_error.rs +++ b/rpc/interface/src/rpc_error.rs @@ -21,7 +21,7 @@ use serde::{Deserialize, Serialize}; pub enum RpcError {} impl From for StatusCode { - fn from(value: RpcError) -> Self { + fn from(_: RpcError) -> Self { // TODO Self::INTERNAL_SERVER_ERROR } diff --git a/rpc/interface/src/rpc_handler.rs b/rpc/interface/src/rpc_handler.rs index 3d1c28d..bcd0873 100644 --- a/rpc/interface/src/rpc_handler.rs +++ b/rpc/interface/src/rpc_handler.rs @@ -1,16 +1,10 @@ //! RPC handler trait. //---------------------------------------------------------------------------------------------------- Use -use std::{future::Future, task::Poll}; +use std::future::Future; -use axum::{http::StatusCode, response::IntoResponse}; -use futures::{channel::oneshot::channel, FutureExt}; use tower::Service; -use cuprate_helper::asynch::InfallibleOneshotReceiver; -use cuprate_json_rpc::Id; -use cuprate_rpc_types::json::JsonRpcRequest; - use crate::{rpc_error::RpcError, rpc_request::RpcRequest, rpc_response::RpcResponse}; //---------------------------------------------------------------------------------------------------- RpcHandler diff --git a/rpc/interface/src/rpc_handler_dummy.rs b/rpc/interface/src/rpc_handler_dummy.rs index 97b7585..73ffe9c 100644 --- a/rpc/interface/src/rpc_handler_dummy.rs +++ b/rpc/interface/src/rpc_handler_dummy.rs @@ -3,14 +3,13 @@ //---------------------------------------------------------------------------------------------------- Use use std::task::Poll; -use futures::{channel::oneshot::channel, FutureExt}; +use futures::channel::oneshot::channel; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use tower::Service; use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_json_rpc::Id; -use cuprate_rpc_types::json::JsonRpcRequest; use crate::{ rpc_error::RpcError, rpc_handler::RpcHandler, rpc_request::RpcRequest, @@ -48,7 +47,7 @@ impl Service for RpcHandlerDummy { type Error = RpcError; type Future = InfallibleOneshotReceiver>; - fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { + fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } diff --git a/rpc/json-rpc/src/lib.rs b/rpc/json-rpc/src/lib.rs index 45ac2ef..ce7467a 100644 --- a/rpc/json-rpc/src/lib.rs +++ b/rpc/json-rpc/src/lib.rs @@ -3,38 +3,38 @@ // Forbid lints. // Our code, and code generated (e.g macros) cannot overrule these. #![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, + // `unsafe` is allowed but it _must_ be + // commented with `SAFETY: reason`. + clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, + // Never. + unused_unsafe, + redundant_semicolons, + unused_allocation, + coherence_leak_check, + while_true, - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, + // Maybe can be put into `#[deny]`. + unconditional_recursion, + for_loops_over_fallibles, + unused_braces, + unused_labels, + keyword_idents, + non_ascii_idents, + variant_size_differences, single_use_lifetimes, - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, + // Probably can be put into `#[deny]`. + future_incompatible, + let_underscore, + break_with_label_and_loop, + duplicate_macro_attributes, + exported_private_dependencies, + large_assignments, + overlapping_range_endpoints, + semicolon_in_expressions_from_macros, + noop_method_call, + unreachable_pub, )] // Deny lints. // Some of these are `#[allow]`'ed on a per-case basis. @@ -56,29 +56,27 @@ nonstandard_style )] #![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, + // FIXME: this lint affects crates outside of + // `database/` for some reason, allow for now. + clippy::cargo_common_metadata, - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, + // FIXME: adding `#[must_use]` onto everything + // might just be more annoying than useful... + // although it is sometimes nice. + clippy::must_use_candidate, - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, + // FIXME: good lint but too many false positives + // with our `Env` + `RwLock` setup. + clippy::significant_drop_tightening, - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, + // FIXME: good lint but is less clear in most cases. + clippy::items_after_statements, - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, + clippy::module_name_repetitions, + clippy::module_inception, + clippy::redundant_pub_crate, + clippy::option_if_let_else, )] -// Allow some lints when running in debug mode. -#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))] // Allow some lints in tests. #![cfg_attr( test, diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index 278e535..0dbddea 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -13,22 +13,17 @@ use cuprate_epee_encoding::{ container_as_blob::ContainerAsBlob, epee_object, error, macros::bytes::{Buf, BufMut}, - read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue, + read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, }; use cuprate_types::BlockCompleteEntry; use crate::{ - base::{AccessResponseBase, ResponseBase}, - defaults::{default_false, default_height, default_string, default_vec, default_zero}, - free::{is_one, is_zero}, + base::AccessResponseBase, + defaults::{default_false, default_zero}, macros::{define_request, define_request_and_response, define_request_and_response_doc}, - misc::{ - AuxPow, BlockHeader, BlockOutputIndices, ChainInfo, ConnectionInfo, GetBan, GetOutputsOut, - HardforkEntry, HistogramEntry, OutKeyBin, OutputDistributionData, Peer, PoolInfoExtent, - PoolTxInfo, SetBan, Span, Status, TxBacklogEntry, - }, - rpc_call::{RpcCall, RpcCallValue}, + misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfoExtent, PoolTxInfo, Status}, + rpc_call::RpcCallValue, }; //---------------------------------------------------------------------------------------------------- Definitions diff --git a/rpc/types/src/defaults.rs b/rpc/types/src/defaults.rs index 6addd0a..def5df4 100644 --- a/rpc/types/src/defaults.rs +++ b/rpc/types/src/defaults.rs @@ -8,7 +8,6 @@ //! `height`, it will use [`default_height`] to fill that in. //---------------------------------------------------------------------------------------------------- Import -use std::borrow::Cow; //---------------------------------------------------------------------------------------------------- TODO /// Default [`bool`] type used in request/response types, `false`. @@ -23,12 +22,6 @@ pub(crate) const fn default_true() -> bool { true } -/// Default `Cow<'static, str` type used in request/response types. -#[inline] -pub(crate) const fn default_cow_str() -> Cow<'static, str> { - Cow::Borrowed("") -} - /// Default [`String`] type used in request/response types. #[inline] pub(crate) const fn default_string() -> String { diff --git a/rpc/types/src/free.rs b/rpc/types/src/free.rs index 043a520..45fb2f7 100644 --- a/rpc/types/src/free.rs +++ b/rpc/types/src/free.rs @@ -6,6 +6,7 @@ /// Returns `true` if the input `u` is equal to `0`. #[inline] #[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&` +#[allow(dead_code)] // TODO: see if needed after handlers. pub(crate) const fn is_zero(u: &u64) -> bool { *u == 0 } @@ -13,6 +14,7 @@ pub(crate) const fn is_zero(u: &u64) -> bool { /// Returns `true` the input `u` is equal to `1`. #[inline] #[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&` +#[allow(dead_code)] // TODO: see if needed after handlers. pub(crate) const fn is_one(u: &u64) -> bool { *u == 1 } diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index 4971061..cfefcf9 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -12,12 +12,11 @@ use crate::{ default_false, default_height, default_one, default_string, default_true, default_vec, default_zero, }, - free::{is_one, is_zero}, macros::define_request_and_response, misc::{ AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan, - GetMinerDataTxBacklogEntry, HardforkEntry, HistogramEntry, OutputDistributionData, SetBan, - Span, Status, SyncInfoPeer, TxBacklogEntry, + GetMinerDataTxBacklogEntry, HardforkEntry, HistogramEntry, SetBan, Span, Status, + SyncInfoPeer, TxBacklogEntry, }, rpc_call::RpcCallValue, }; diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index b48f22e..c5f890f 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -4,37 +4,37 @@ // Forbid lints. // Our code, and code generated (e.g macros) cannot overrule these. #![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, + // `unsafe` is allowed but it _must_ be + // commented with `SAFETY: reason`. + clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, + // Never. + unused_unsafe, + redundant_semicolons, + unused_allocation, + coherence_leak_check, + while_true, - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, + // Maybe can be put into `#[deny]`. + unconditional_recursion, + for_loops_over_fallibles, + unused_braces, + unused_labels, + keyword_idents, + non_ascii_idents, + variant_size_differences, single_use_lifetimes, - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, + // Probably can be put into `#[deny]`. + future_incompatible, + let_underscore, + break_with_label_and_loop, + duplicate_macro_attributes, + exported_private_dependencies, + large_assignments, + overlapping_range_endpoints, + semicolon_in_expressions_from_macros, + noop_method_call, )] // Deny lints. // Some of these are `#[allow]`'ed on a per-case basis. @@ -57,39 +57,27 @@ unreachable_pub )] #![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, + // FIXME: this lint affects crates outside of + // `database/` for some reason, allow for now. + clippy::cargo_common_metadata, - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, + // FIXME: adding `#[must_use]` onto everything + // might just be more annoying than useful... + // although it is sometimes nice. + clippy::must_use_candidate, - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, + // FIXME: good lint but too many false positives + // with our `Env` + `RwLock` setup. + clippy::significant_drop_tightening, - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, + // FIXME: good lint but is less clear in most cases. + clippy::items_after_statements, - // TODO - rustdoc::bare_urls, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] -// Allow some lints when running in debug mode. -#![cfg_attr( - debug_assertions, - allow( - clippy::todo, - clippy::multiple_crate_versions, - unused_imports, - unused_variables - ) + clippy::multiple_crate_versions, + clippy::module_name_repetitions, + clippy::module_inception, + clippy::redundant_pub_crate, + clippy::option_if_let_else, )] // Allow some lints in tests. #![cfg_attr( @@ -101,11 +89,6 @@ clippy::too_many_lines ) )] -// TODO: remove me after finishing impl -#![allow( - dead_code, - rustdoc::broken_intra_doc_links // TODO: remove after `{bin,json,other}.rs` gets merged -)] //---------------------------------------------------------------------------------------------------- Mod mod constants; diff --git a/rpc/types/src/misc/distribution.rs b/rpc/types/src/misc/distribution.rs index 1a488d4..55d509e 100644 --- a/rpc/types/src/misc/distribution.rs +++ b/rpc/types/src/misc/distribution.rs @@ -1,17 +1,14 @@ //! Output distributions for [`crate::json::GetOutputDistributionResponse`]. //---------------------------------------------------------------------------------------------------- Use -use std::mem::size_of; - #[cfg(feature = "serde")] -use serde::{ser::SerializeStruct, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::{ epee_object, error, macros::bytes::{Buf, BufMut}, - read_epee_value, read_varint, write_field, write_varint, EpeeObject, EpeeObjectBuilder, - EpeeValue, Marker, + read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue, }; //---------------------------------------------------------------------------------------------------- Free @@ -24,7 +21,7 @@ use cuprate_epee_encoding::{ 45..=55 )] #[cfg(feature = "epee")] -fn compress_integer_array(array: &[u64]) -> error::Result> { +fn compress_integer_array(_: &[u64]) -> error::Result> { todo!() } @@ -36,7 +33,7 @@ fn compress_integer_array(array: &[u64]) -> error::Result> { "rpc/core_rpc_server_commands_defs.h", 57..=72 )] -fn decompress_integer_array(array: &[u8]) -> Vec { +fn decompress_integer_array(_: &[u8]) -> Vec { todo!() } @@ -281,9 +278,9 @@ impl EpeeObject for Distribution { //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod tests { - use pretty_assertions::assert_eq; + // use pretty_assertions::assert_eq; - use super::*; + // use super::*; // TODO: re-enable tests after (de)compression functions are implemented. diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 2b31cab..842997b 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -5,23 +5,13 @@ //! the [`crate::misc::ConnectionInfo`] struct defined here. //---------------------------------------------------------------------------------------------------- Import -use std::fmt::Display; - #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] -use cuprate_epee_encoding::{ - epee_object, - macros::bytes::{Buf, BufMut}, - EpeeValue, Marker, -}; +use cuprate_epee_encoding::epee_object; use crate::{ - constants::{ - CORE_RPC_STATUS_BUSY, CORE_RPC_STATUS_NOT_MINING, CORE_RPC_STATUS_OK, - CORE_RPC_STATUS_PAYMENT_REQUIRED, - }, defaults::{default_string, default_zero}, macros::monero_definition_link, }; diff --git a/rpc/types/src/misc/tx_entry.rs b/rpc/types/src/misc/tx_entry.rs index e643076..5151cee 100644 --- a/rpc/types/src/misc/tx_entry.rs +++ b/rpc/types/src/misc/tx_entry.rs @@ -8,9 +8,9 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::{ - epee_object, error, + error, macros::bytes::{Buf, BufMut}, - read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, EpeeValue, Marker, + EpeeObject, EpeeObjectBuilder, }; //---------------------------------------------------------------------------------------------------- TxEntry @@ -123,7 +123,7 @@ impl Default for TxEntry { //---------------------------------------------------------------------------------------------------- Epee #[cfg(feature = "epee")] impl EpeeObjectBuilder for () { - fn add_field(&mut self, name: &str, r: &mut B) -> error::Result { + fn add_field(&mut self, _: &str, _: &mut B) -> error::Result { unreachable!() } @@ -140,7 +140,7 @@ impl EpeeObject for TxEntry { unreachable!() } - fn write_fields(self, w: &mut B) -> error::Result<()> { + fn write_fields(self, _: &mut B) -> error::Result<()> { unreachable!() } } diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 9457250..28c95d2 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -11,10 +11,9 @@ use crate::{ defaults::{default_false, default_string, default_true, default_vec, default_zero}, macros::define_request_and_response, misc::{ - GetOutputsOut, KeyImageSpentStatus, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, - TxEntry, TxInfo, TxpoolStats, + GetOutputsOut, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, TxEntry, TxInfo, + TxpoolStats, }, - rpc_call::RpcCall, RpcCallValue, }; @@ -191,7 +190,7 @@ define_request_and_response! { } )] AccessResponseBase { - /// FIXME: These are [`KeyImageSpentStatus`] in [`u8`] form. + /// FIXME: These are [`KeyImageSpentStatus`](crate::misc::KeyImageSpentStatus) in [`u8`] form. spent_status: Vec, } } diff --git a/rpc/types/src/serde.rs b/rpc/types/src/serde.rs index 70885e0..e624a66 100644 --- a/rpc/types/src/serde.rs +++ b/rpc/types/src/serde.rs @@ -28,5 +28,5 @@ where //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { - use super::*; + // use super::*; } diff --git a/storage/blockchain/src/lib.rs b/storage/blockchain/src/lib.rs index 9db0862..ec6d082 100644 --- a/storage/blockchain/src/lib.rs +++ b/storage/blockchain/src/lib.rs @@ -3,39 +3,39 @@ // Forbid lints. // Our code, and code generated (e.g macros) cannot overrule these. #![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, + // `unsafe` is allowed but it _must_ be + // commented with `SAFETY: reason`. + clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - clippy::missing_docs_in_private_items, + // Never. + unused_unsafe, + redundant_semicolons, + unused_allocation, + coherence_leak_check, + while_true, + clippy::missing_docs_in_private_items, - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, + // Maybe can be put into `#[deny]`. + unconditional_recursion, + for_loops_over_fallibles, + unused_braces, + unused_labels, + keyword_idents, + non_ascii_idents, + variant_size_differences, single_use_lifetimes, - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, + // Probably can be put into `#[deny]`. + future_incompatible, + let_underscore, + break_with_label_and_loop, + duplicate_macro_attributes, + exported_private_dependencies, + large_assignments, + overlapping_range_endpoints, + semicolon_in_expressions_from_macros, + noop_method_call, + unreachable_pub, )] // Deny lints. // Some of these are `#[allow]`'ed on a per-case basis. @@ -58,26 +58,26 @@ nonstandard_style )] #![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, + // FIXME: this lint affects crates outside of + // `database/` for some reason, allow for now. + clippy::cargo_common_metadata, - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, + // FIXME: adding `#[must_use]` onto everything + // might just be more annoying than useful... + // although it is sometimes nice. + clippy::must_use_candidate, - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, + // FIXME: good lint but too many false positives + // with our `Env` + `RwLock` setup. + clippy::significant_drop_tightening, - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, + // FIXME: good lint but is less clear in most cases. + clippy::items_after_statements, - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, + clippy::module_name_repetitions, + clippy::module_inception, + clippy::redundant_pub_crate, + clippy::option_if_let_else, )] // Allow some lints when running in debug mode. #![cfg_attr( diff --git a/storage/database/src/lib.rs b/storage/database/src/lib.rs index da36b0d..5946fe5 100644 --- a/storage/database/src/lib.rs +++ b/storage/database/src/lib.rs @@ -3,39 +3,39 @@ // Forbid lints. // Our code, and code generated (e.g macros) cannot overrule these. #![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, + // `unsafe` is allowed but it _must_ be + // commented with `SAFETY: reason`. + clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - clippy::missing_docs_in_private_items, + // Never. + unused_unsafe, + redundant_semicolons, + unused_allocation, + coherence_leak_check, + while_true, + clippy::missing_docs_in_private_items, - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, + // Maybe can be put into `#[deny]`. + unconditional_recursion, + for_loops_over_fallibles, + unused_braces, + unused_labels, + keyword_idents, + non_ascii_idents, + variant_size_differences, single_use_lifetimes, - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, + // Probably can be put into `#[deny]`. + future_incompatible, + let_underscore, + break_with_label_and_loop, + duplicate_macro_attributes, + exported_private_dependencies, + large_assignments, + overlapping_range_endpoints, + semicolon_in_expressions_from_macros, + noop_method_call, + unreachable_pub, )] // Deny lints. // Some of these are `#[allow]`'ed on a per-case basis. @@ -58,28 +58,28 @@ nonstandard_style )] #![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, + // FIXME: this lint affects crates outside of + // `database/` for some reason, allow for now. + clippy::cargo_common_metadata, - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, + // FIXME: adding `#[must_use]` onto everything + // might just be more annoying than useful... + // although it is sometimes nice. + clippy::must_use_candidate, - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, + // FIXME: good lint but too many false positives + // with our `Env` + `RwLock` setup. + clippy::significant_drop_tightening, - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, + // FIXME: good lint but is less clear in most cases. + clippy::items_after_statements, - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, + clippy::module_name_repetitions, + clippy::module_inception, + clippy::redundant_pub_crate, + clippy::option_if_let_else, - // unused_crate_dependencies, // false-positive with `paste` + // unused_crate_dependencies, // false-positive with `paste` )] // Allow some lints when running in debug mode. #![cfg_attr( From 7207fbd17b5f4207d6cb5df516b693267b5075d7 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 20 Aug 2024 22:56:18 +0000 Subject: [PATCH 041/104] Binaries: add cuprated skeleton (#258) * add cuprated skeleton * fmt and add deny exception --- Cargo.lock | 4 ++++ Cargo.toml | 1 + binaries/cuprated/Cargo.toml | 13 +++++++++++++ binaries/cuprated/src/blockchain.rs | 6 ++++++ binaries/cuprated/src/blockchain/manager.rs | 1 + binaries/cuprated/src/blockchain/syncer.rs | 1 + binaries/cuprated/src/config.rs | 1 + binaries/cuprated/src/main.rs | 9 +++++++++ binaries/cuprated/src/p2p.rs | 5 +++++ binaries/cuprated/src/p2p/request_handler.rs | 1 + binaries/cuprated/src/rpc.rs | 5 +++++ binaries/cuprated/src/rpc/request_handler.rs | 1 + binaries/cuprated/src/txpool.rs | 3 +++ deny.toml | 2 +- 14 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 binaries/cuprated/Cargo.toml create mode 100644 binaries/cuprated/src/blockchain.rs create mode 100644 binaries/cuprated/src/blockchain/manager.rs create mode 100644 binaries/cuprated/src/blockchain/syncer.rs create mode 100644 binaries/cuprated/src/config.rs create mode 100644 binaries/cuprated/src/main.rs create mode 100644 binaries/cuprated/src/p2p.rs create mode 100644 binaries/cuprated/src/p2p/request_handler.rs create mode 100644 binaries/cuprated/src/rpc.rs create mode 100644 binaries/cuprated/src/rpc/request_handler.rs create mode 100644 binaries/cuprated/src/txpool.rs diff --git a/Cargo.lock b/Cargo.lock index 3945896..052b1ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -881,6 +881,10 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cuprated" +version = "0.1.0" + [[package]] name = "curve25519-dalek" version = "4.1.3" diff --git a/Cargo.toml b/Cargo.toml index 71efcca..06b49a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,7 @@ resolver = "2" members = [ + "binaries/cuprated", "consensus", "consensus/fast-sync", "consensus/rules", diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml new file mode 100644 index 0000000..b524390 --- /dev/null +++ b/binaries/cuprated/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "cuprated" +version = "0.1.0" +edition = "2021" +description = "The Cuprate Monero Rust node." +license = "AGPL-3.0-only" +authors = ["Boog900", "hinto-janai", "SyntheticBird45"] +repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" + +[dependencies] + +[lints] +workspace = true diff --git a/binaries/cuprated/src/blockchain.rs b/binaries/cuprated/src/blockchain.rs new file mode 100644 index 0000000..4abebeb --- /dev/null +++ b/binaries/cuprated/src/blockchain.rs @@ -0,0 +1,6 @@ +//! Blockchain +//! +//! Will contain the chain manager and syncer. + +mod manager; +mod syncer; diff --git a/binaries/cuprated/src/blockchain/manager.rs b/binaries/cuprated/src/blockchain/manager.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/binaries/cuprated/src/blockchain/manager.rs @@ -0,0 +1 @@ + diff --git a/binaries/cuprated/src/blockchain/syncer.rs b/binaries/cuprated/src/blockchain/syncer.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/binaries/cuprated/src/blockchain/syncer.rs @@ -0,0 +1 @@ + diff --git a/binaries/cuprated/src/config.rs b/binaries/cuprated/src/config.rs new file mode 100644 index 0000000..d613c1f --- /dev/null +++ b/binaries/cuprated/src/config.rs @@ -0,0 +1 @@ +//! cuprated config diff --git a/binaries/cuprated/src/main.rs b/binaries/cuprated/src/main.rs new file mode 100644 index 0000000..918429c --- /dev/null +++ b/binaries/cuprated/src/main.rs @@ -0,0 +1,9 @@ +mod blockchain; +mod config; +mod p2p; +mod rpc; +mod txpool; + +fn main() { + todo!() +} diff --git a/binaries/cuprated/src/p2p.rs b/binaries/cuprated/src/p2p.rs new file mode 100644 index 0000000..f5b72ba --- /dev/null +++ b/binaries/cuprated/src/p2p.rs @@ -0,0 +1,5 @@ +//! P2P +//! +//! Will handle initiating the P2P and contains a protocol request handler. + +mod request_handler; diff --git a/binaries/cuprated/src/p2p/request_handler.rs b/binaries/cuprated/src/p2p/request_handler.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/binaries/cuprated/src/p2p/request_handler.rs @@ -0,0 +1 @@ + diff --git a/binaries/cuprated/src/rpc.rs b/binaries/cuprated/src/rpc.rs new file mode 100644 index 0000000..80b2789 --- /dev/null +++ b/binaries/cuprated/src/rpc.rs @@ -0,0 +1,5 @@ +//! RPC +//! +//! Will contain the code to initiate the RPC and a request handler. + +mod request_handler; diff --git a/binaries/cuprated/src/rpc/request_handler.rs b/binaries/cuprated/src/rpc/request_handler.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/binaries/cuprated/src/rpc/request_handler.rs @@ -0,0 +1 @@ + diff --git a/binaries/cuprated/src/txpool.rs b/binaries/cuprated/src/txpool.rs new file mode 100644 index 0000000..a6f05e7 --- /dev/null +++ b/binaries/cuprated/src/txpool.rs @@ -0,0 +1,3 @@ +//! Transaction Pool +//! +//! Will handle initiating the tx-pool, providing the preprocessor required for the dandelion pool. diff --git a/deny.toml b/deny.toml index 85e7da2..f469d06 100644 --- a/deny.toml +++ b/deny.toml @@ -133,7 +133,7 @@ confidence-threshold = 0.8 # aren't accepted for every possible crate as with the normal allow list exceptions = [ # Cuprate (AGPL-3.0) - # { allow = ["AGPL-3.0"], name = "cuprated", version = "*" } + { allow = ["AGPL-3.0"], name = "cuprated", version = "*" } # Each entry is the crate and version constraint, and its specific allow # list From ccff75057ecec4baeede4c6d78c2e050f8c332c4 Mon Sep 17 00:00:00 2001 From: SyntheticBird <118022351+SyntheticBird45@users.noreply.github.com> Date: Wed, 21 Aug 2024 23:33:21 +0000 Subject: [PATCH 042/104] Update Zed in ENVIRONMENT-ADVICE.md (#259) Update ENVIRONMENT-ADVICE.md Updated information on Zed editor --- misc/ENVIRONMENT-ADVICE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/ENVIRONMENT-ADVICE.md b/misc/ENVIRONMENT-ADVICE.md index 295fabd..b3358b9 100644 --- a/misc/ENVIRONMENT-ADVICE.md +++ b/misc/ENVIRONMENT-ADVICE.md @@ -87,4 +87,4 @@ On Rust-analyzer's VSCode plugin, you can add the following configuration if you If you still deal with lags on VSCode or Neovim, you could try the following IDE: - RustRover: It have been reported to have excellent performance at managing huge workspace. It use its own fine-tuned plugins by jetbrains. -- Zed: Rust-written IDE focused on performance. Still in beta and macOS only. \ No newline at end of file +- Zed: Rust-written IDE focused on performance. Stable on MacOS and Linux (requires Vulkan driver, therefore unable in virtual machines). From 8655a3f5e5b64f42b6ebc8611165f299248218e7 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Thu, 22 Aug 2024 00:18:44 +0000 Subject: [PATCH 043/104] dandelion-tower: improve API (#257) * init * reduce the jobs handled by the dandelion pool * fix docs * resolve todo * review changes * Update p2p/dandelion-tower/src/pool/incoming_tx.rs Co-authored-by: hinto-janai * Update p2p/dandelion-tower/src/pool/incoming_tx.rs Co-authored-by: hinto-janai * `PId` -> `PeerId` --------- Co-authored-by: hinto-janai --- p2p/dandelion-tower/src/lib.rs | 10 +- p2p/dandelion-tower/src/pool.rs | 509 -------------------- p2p/dandelion-tower/src/pool/incoming_tx.rs | 113 +++++ p2p/dandelion-tower/src/pool/manager.rs | 294 +++++++++++ p2p/dandelion-tower/src/pool/mod.rs | 145 ++++++ p2p/dandelion-tower/src/router.rs | 51 +- p2p/dandelion-tower/src/tests/mod.rs | 29 +- p2p/dandelion-tower/src/tests/pool.rs | 15 +- p2p/dandelion-tower/src/traits.rs | 28 +- 9 files changed, 595 insertions(+), 599 deletions(-) delete mode 100644 p2p/dandelion-tower/src/pool.rs create mode 100644 p2p/dandelion-tower/src/pool/incoming_tx.rs create mode 100644 p2p/dandelion-tower/src/pool/manager.rs create mode 100644 p2p/dandelion-tower/src/pool/mod.rs diff --git a/p2p/dandelion-tower/src/lib.rs b/p2p/dandelion-tower/src/lib.rs index aa622f3..60b5ea5 100644 --- a/p2p/dandelion-tower/src/lib.rs +++ b/p2p/dandelion-tower/src/lib.rs @@ -2,17 +2,17 @@ //! //! This crate implements [dandelion++](https://arxiv.org/pdf/1805.11060.pdf), using [`tower`]. //! -//! This crate provides 2 [`tower::Service`]s, a [`DandelionRouter`] and a [`DandelionPool`](pool::DandelionPool). +//! This crate provides 2 [`tower::Service`]s, a [`DandelionRouter`] and a [`DandelionPoolManager`](pool::DandelionPoolManager). //! The router is pretty minimal and only handles the absolute necessary data to route transactions, whereas the //! pool keeps track of all data necessary for dandelion++ but requires you to provide a backing tx-pool. //! -//! This split was done not because the [`DandelionPool`](pool::DandelionPool) is unnecessary but because it is hard -//! to cover a wide range of projects when abstracting over the tx-pool. Not using the [`DandelionPool`](pool::DandelionPool) +//! This split was done not because the [`DandelionPoolManager`](pool::DandelionPoolManager) is unnecessary but because it is hard +//! to cover a wide range of projects when abstracting over the tx-pool. Not using the [`DandelionPoolManager`](pool::DandelionPoolManager) //! requires you to implement part of the paper yourself. //! //! # Features //! -//! This crate only has one feature `txpool` which enables [`DandelionPool`](pool::DandelionPool). +//! This crate only has one feature `txpool` which enables [`DandelionPoolManager`](pool::DandelionPoolManager). //! //! # Needed Services //! @@ -45,7 +45,7 @@ //! //! ## Backing Pool //! -//! ([`DandelionPool`](pool::DandelionPool) only) +//! ([`DandelionPoolManager`](pool::DandelionPoolManager) only) //! //! This service is a backing tx-pool, in memory or on disk. //! The backing pool should have a request of [`TxStoreRequest`](traits::TxStoreRequest) and a response of diff --git a/p2p/dandelion-tower/src/pool.rs b/p2p/dandelion-tower/src/pool.rs deleted file mode 100644 index 5f4f734..0000000 --- a/p2p/dandelion-tower/src/pool.rs +++ /dev/null @@ -1,509 +0,0 @@ -//! # Dandelion++ Pool -//! -//! This module contains [`DandelionPool`] which is a thin wrapper around a backing transaction store, -//! which fully implements the dandelion++ protocol. -//! -//! ### How To Get Txs From [`DandelionPool`]. -//! -//! [`DandelionPool`] does not provide a full tx-pool API. You cannot retrieve transactions from it or -//! check what transactions are in it, to do this you must keep a handle to the backing transaction store -//! yourself. -//! -//! The reason for this is, the [`DandelionPool`] will only itself be passing these requests onto the backing -//! pool, so it makes sense to remove the "middle man". -//! -//! ### Keep Stem Transactions Hidden -//! -//! When using your handle to the backing store it must be remembered to keep transactions in the stem pool hidden. -//! So handle any requests to the tx-pool like the stem side of the pool does not exist. -use std::{ - collections::{HashMap, HashSet}, - future::Future, - hash::Hash, - marker::PhantomData, - pin::Pin, - task::{Context, Poll}, - time::Duration, -}; - -use futures::{FutureExt, StreamExt}; -use rand::prelude::*; -use rand_distr::Exp; -use tokio::{ - sync::{mpsc, oneshot}, - task::JoinSet, -}; -use tokio_util::{sync::PollSender, time::DelayQueue}; -use tower::{Service, ServiceExt}; -use tracing::Instrument; - -use crate::{ - traits::{TxStoreRequest, TxStoreResponse}, - DandelionConfig, DandelionRouteReq, DandelionRouterError, State, TxState, -}; - -/// Start the [`DandelionPool`]. -/// -/// This function spawns the [`DandelionPool`] and returns [`DandelionPoolService`] which can be used to send -/// requests to the pool. -/// -/// ### Args -/// -/// - `buffer_size` is the size of the channel's buffer between the [`DandelionPoolService`] and [`DandelionPool`]. -/// - `dandelion_router` is the router service, kept generic instead of [`DandelionRouter`](crate::DandelionRouter) to allow -/// user to customise routing functionality. -/// - `backing_pool` is the backing transaction storage service -/// - `config` is [`DandelionConfig`]. -pub fn start_dandelion_pool( - buffer_size: usize, - dandelion_router: R, - backing_pool: P, - config: DandelionConfig, -) -> DandelionPoolService -where - Tx: Clone + Send + 'static, - TxID: Hash + Eq + Clone + Send + 'static, - PID: Hash + Eq + Clone + Send + 'static, - P: Service< - TxStoreRequest, - Response = TxStoreResponse, - Error = tower::BoxError, - > + Send - + 'static, - P::Future: Send + 'static, - R: Service, Response = State, Error = DandelionRouterError> - + Send - + 'static, - R::Future: Send + 'static, -{ - let (tx, rx) = mpsc::channel(buffer_size); - - let pool = DandelionPool { - dandelion_router, - backing_pool, - routing_set: JoinSet::new(), - stem_origins: HashMap::new(), - embargo_timers: DelayQueue::new(), - embargo_dist: Exp::new(1.0 / config.average_embargo_timeout().as_secs_f64()).unwrap(), - config, - _tx: PhantomData, - }; - - let span = tracing::debug_span!("dandelion_pool"); - - tokio::spawn(pool.run(rx).instrument(span)); - - DandelionPoolService { - tx: PollSender::new(tx), - } -} - -#[derive(Copy, Clone, Debug, thiserror::Error)] -#[error("The dandelion pool was shutdown")] -pub struct DandelionPoolShutDown; - -/// An incoming transaction for the [`DandelionPool`] to handle. -/// -/// Users may notice there is no way to check if the dandelion-pool wants a tx according to an inventory message like seen -/// in Bitcoin, only having a request for a full tx. Users should look in the *public* backing pool to handle inv messages, -/// and request txs even if they are in the stem pool. -pub struct IncomingTx { - /// The transaction. - /// - /// It is recommended to put this in an [`Arc`](std::sync::Arc) as it needs to be cloned to send to the backing - /// tx pool and [`DandelionRouter`](crate::DandelionRouter) - pub tx: Tx, - /// The transaction ID. - pub tx_id: TxID, - /// The routing state of this transaction. - pub tx_state: TxState, -} - -/// The dandelion tx pool service. -#[derive(Clone)] -pub struct DandelionPoolService { - /// The channel to [`DandelionPool`]. - tx: PollSender<(IncomingTx, oneshot::Sender<()>)>, -} - -impl Service> for DandelionPoolService -where - Tx: Clone + Send, - TxID: Hash + Eq + Clone + Send + 'static, - PID: Hash + Eq + Clone + Send + 'static, -{ - type Response = (); - type Error = DandelionPoolShutDown; - type Future = - Pin> + Send + 'static>>; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.tx.poll_reserve(cx).map_err(|_| DandelionPoolShutDown) - } - - fn call(&mut self, req: IncomingTx) -> Self::Future { - // although the channel isn't sending anything we want to wait for the request to be handled before continuing. - let (tx, rx) = oneshot::channel(); - - let res = self - .tx - .send_item((req, tx)) - .map_err(|_| DandelionPoolShutDown); - - async move { - res?; - rx.await.expect("Oneshot dropped before response!"); - - Ok(()) - } - .boxed() - } -} - -/// The dandelion++ tx pool. -/// -/// See the [module docs](self) for more. -pub struct DandelionPool { - /// The dandelion++ router - dandelion_router: R, - /// The backing tx storage. - backing_pool: P, - /// The set of tasks that are running the future returned from `dandelion_router`. - routing_set: JoinSet<(TxID, Result>)>, - - /// The origin of stem transactions. - stem_origins: HashMap>, - - /// Current stem pool embargo timers. - embargo_timers: DelayQueue, - /// The distrobution to sample to get embargo timers. - embargo_dist: Exp, - - /// The d++ config. - config: DandelionConfig, - - _tx: PhantomData, -} - -impl DandelionPool -where - Tx: Clone + Send, - TxID: Hash + Eq + Clone + Send + 'static, - PID: Hash + Eq + Clone + Send + 'static, - P: Service< - TxStoreRequest, - Response = TxStoreResponse, - Error = tower::BoxError, - >, - P::Future: Send + 'static, - R: Service, Response = State, Error = DandelionRouterError>, - R::Future: Send + 'static, -{ - /// Stores the tx in the backing pools stem pool, setting the embargo timer, stem origin and steming the tx. - async fn store_tx_and_stem( - &mut self, - tx: Tx, - tx_id: TxID, - from: Option, - ) -> Result<(), tower::BoxError> { - self.backing_pool - .ready() - .await? - .call(TxStoreRequest::Store( - tx.clone(), - tx_id.clone(), - State::Stem, - )) - .await?; - - let embargo_timer = self.embargo_dist.sample(&mut thread_rng()); - tracing::debug!( - "Setting embargo timer for stem tx: {} seconds.", - embargo_timer - ); - self.embargo_timers - .insert(tx_id.clone(), Duration::from_secs_f64(embargo_timer)); - - self.stem_tx(tx, tx_id, from).await - } - - /// Stems the tx, setting the stem origin, if it wasn't already set. - /// - /// This function does not add the tx to the backing pool. - async fn stem_tx( - &mut self, - tx: Tx, - tx_id: TxID, - from: Option, - ) -> Result<(), tower::BoxError> { - if let Some(peer) = &from { - self.stem_origins - .entry(tx_id.clone()) - .or_default() - .insert(peer.clone()); - } - - let state = from - .map(|from| TxState::Stem { from }) - .unwrap_or(TxState::Local); - - let fut = self - .dandelion_router - .ready() - .await? - .call(DandelionRouteReq { - tx, - state: state.clone(), - }); - - self.routing_set - .spawn(fut.map(|res| (tx_id, res.map_err(|_| state)))); - Ok(()) - } - - /// Stores the tx in the backing pool and fluffs the tx, removing the stem data for this tx. - async fn store_and_fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> { - // fluffs the tx first to prevent timing attacks where we could fluff at different average times - // depending on if the tx was in the stem pool already or not. - // Massively overkill but this is a minimal change. - self.fluff_tx(tx.clone(), tx_id.clone()).await?; - - // Remove the tx from the maps used during the stem phase. - self.stem_origins.remove(&tx_id); - - self.backing_pool - .ready() - .await? - .call(TxStoreRequest::Store(tx, tx_id, State::Fluff)) - .await?; - - // The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the - // map. These timers should be relatively short, so it shouldn't be a problem. - //self.embargo_timers.try_remove(&tx_id); - - Ok(()) - } - - /// Fluffs a tx, does not add the tx to the tx pool. - async fn fluff_tx(&mut self, tx: Tx, tx_id: TxID) -> Result<(), tower::BoxError> { - let fut = self - .dandelion_router - .ready() - .await? - .call(DandelionRouteReq { - tx, - state: TxState::Fluff, - }); - - self.routing_set - .spawn(fut.map(|res| (tx_id, res.map_err(|_| TxState::Fluff)))); - Ok(()) - } - - /// Function to handle an incoming [`DandelionPoolRequest::IncomingTx`]. - async fn handle_incoming_tx( - &mut self, - tx: Tx, - tx_state: TxState, - tx_id: TxID, - ) -> Result<(), tower::BoxError> { - let TxStoreResponse::Contains(have_tx) = self - .backing_pool - .ready() - .await? - .call(TxStoreRequest::Contains(tx_id.clone())) - .await? - else { - panic!("Backing tx pool responded with wrong response for request."); - }; - // If we have already fluffed this tx then we don't need to do anything. - if have_tx == Some(State::Fluff) { - tracing::debug!("Already fluffed incoming tx, ignoring."); - return Ok(()); - } - - match tx_state { - TxState::Stem { from } => { - if self - .stem_origins - .get(&tx_id) - .is_some_and(|peers| peers.contains(&from)) - { - tracing::debug!("Received stem tx twice from same peer, fluffing it"); - // The same peer sent us a tx twice, fluff it. - self.promote_and_fluff_tx(tx_id).await - } else { - // This could be a new tx or it could have already been stemed, but we still stem it again - // unless the same peer sends us a tx twice. - tracing::debug!("Steming incoming tx"); - self.store_tx_and_stem(tx, tx_id, Some(from)).await - } - } - TxState::Fluff => { - tracing::debug!("Fluffing incoming tx"); - self.store_and_fluff_tx(tx, tx_id).await - } - TxState::Local => { - // If we have already stemed this tx then nothing to do. - if have_tx.is_some() { - tracing::debug!("Received a local tx that we already have, skipping"); - return Ok(()); - } - tracing::debug!("Steming local transaction"); - self.store_tx_and_stem(tx, tx_id, None).await - } - } - } - - /// Promotes a tx to the clear pool. - async fn promote_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> { - // Remove the tx from the maps used during the stem phase. - self.stem_origins.remove(&tx_id); - - // The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the - // map. These timers should be relatively short, so it shouldn't be a problem. - //self.embargo_timers.try_remove(&tx_id); - - self.backing_pool - .ready() - .await? - .call(TxStoreRequest::Promote(tx_id)) - .await?; - - Ok(()) - } - - /// Promotes a tx to the public fluff pool and fluffs the tx. - async fn promote_and_fluff_tx(&mut self, tx_id: TxID) -> Result<(), tower::BoxError> { - tracing::debug!("Promoting transaction to public pool and fluffing it."); - - let TxStoreResponse::Transaction(tx) = self - .backing_pool - .ready() - .await? - .call(TxStoreRequest::Get(tx_id.clone())) - .await? - else { - panic!("Backing tx pool responded with wrong response for request."); - }; - - let Some((tx, state)) = tx else { - tracing::debug!("Could not find tx, skipping."); - return Ok(()); - }; - - if state == State::Fluff { - tracing::debug!("Transaction already fluffed, skipping."); - return Ok(()); - } - - self.promote_tx(tx_id.clone()).await?; - self.fluff_tx(tx, tx_id).await - } - - /// Returns a tx stored in the fluff _OR_ stem pool. - async fn get_tx_from_pool(&mut self, tx_id: TxID) -> Result, tower::BoxError> { - let TxStoreResponse::Transaction(tx) = self - .backing_pool - .ready() - .await? - .call(TxStoreRequest::Get(tx_id)) - .await? - else { - panic!("Backing tx pool responded with wrong response for request."); - }; - - Ok(tx.map(|tx| tx.0)) - } - - /// Starts the [`DandelionPool`]. - async fn run( - mut self, - mut rx: mpsc::Receiver<(IncomingTx, oneshot::Sender<()>)>, - ) { - tracing::debug!("Starting dandelion++ tx-pool, config: {:?}", self.config); - - // On start up we just fluff all txs left in the stem pool. - let Ok(TxStoreResponse::IDs(ids)) = (&mut self.backing_pool) - .oneshot(TxStoreRequest::IDsInStemPool) - .await - else { - tracing::error!("Failed to get transactions in stem pool."); - return; - }; - - tracing::debug!( - "Fluffing {} txs that are currently in the stem pool", - ids.len() - ); - - for id in ids { - if let Err(e) = self.promote_and_fluff_tx(id).await { - tracing::error!("Failed to fluff tx in the stem pool at start up, {e}."); - return; - } - } - - loop { - tracing::trace!("Waiting for next event."); - tokio::select! { - // biased to handle current txs before routing new ones. - biased; - Some(fired) = self.embargo_timers.next() => { - tracing::debug!("Embargo timer fired, did not see stem tx in time."); - - let tx_id = fired.into_inner(); - if let Err(e) = self.promote_and_fluff_tx(tx_id).await { - tracing::error!("Error handling fired embargo timer: {e}"); - return; - } - } - Some(Ok((tx_id, res))) = self.routing_set.join_next() => { - tracing::trace!("Received d++ routing result."); - - let res = match res { - Ok(State::Fluff) => { - tracing::debug!("Transaction was fluffed upgrading it to the public pool."); - self.promote_tx(tx_id).await - } - Err(tx_state) => { - tracing::debug!("Error routing transaction, trying again."); - - match self.get_tx_from_pool(tx_id.clone()).await { - Ok(Some(tx)) => match tx_state { - TxState::Fluff => self.fluff_tx(tx, tx_id).await, - TxState::Stem { from } => self.stem_tx(tx, tx_id, Some(from)).await, - TxState::Local => self.stem_tx(tx, tx_id, None).await, - } - Err(e) => Err(e), - _ => continue, - } - } - Ok(State::Stem) => continue, - }; - - if let Err(e) = res { - tracing::error!("Error handling transaction routing return: {e}"); - return; - } - } - req = rx.recv() => { - tracing::debug!("Received new tx to route."); - - let Some((IncomingTx { tx, tx_state, tx_id }, res_tx)) = req else { - return; - }; - - if let Err(e) = self.handle_incoming_tx(tx, tx_state, tx_id).await { - let _ = res_tx.send(()); - - tracing::error!("Error handling transaction in dandelion pool: {e}"); - return; - } - let _ = res_tx.send(()); - - } - } - } - } -} diff --git a/p2p/dandelion-tower/src/pool/incoming_tx.rs b/p2p/dandelion-tower/src/pool/incoming_tx.rs new file mode 100644 index 0000000..c9a30de --- /dev/null +++ b/p2p/dandelion-tower/src/pool/incoming_tx.rs @@ -0,0 +1,113 @@ +//! Contains [`IncomingTx`] and [`IncomingTxBuilder`] +use crate::{State, TxState}; + +/// An incoming transaction that has gone through the preprocessing stage. +pub struct IncomingTx { + /// The transaction. + pub(crate) tx: Tx, + /// The transaction ID. + pub(crate) tx_id: TxId, + /// The routing state of the transaction. + pub(crate) routing_state: TxState, +} + +/// An [`IncomingTx`] builder. +/// +/// The const generics here are used to restrict what methods can be called. +/// +/// - `RS`: routing state; a `bool` for if the routing state is set +/// - `DBS`: database state; a `bool` for if the state in the DB is set +pub struct IncomingTxBuilder { + /// The transaction. + tx: Tx, + /// The transaction ID. + tx_id: TxId, + /// The routing state of the transaction. + routing_state: Option>, + /// The state of this transaction in the DB. + state_in_db: Option, +} + +impl IncomingTxBuilder { + /// Creates a new [`IncomingTxBuilder`]. + pub fn new(tx: Tx, tx_id: TxId) -> Self { + Self { + tx, + tx_id, + routing_state: None, + state_in_db: None, + } + } +} + +impl IncomingTxBuilder { + /// Adds the routing state to the builder. + /// + /// The routing state is the origin of this transaction from our perspective. + pub fn with_routing_state( + self, + state: TxState, + ) -> IncomingTxBuilder { + IncomingTxBuilder { + tx: self.tx, + tx_id: self.tx_id, + routing_state: Some(state), + state_in_db: self.state_in_db, + } + } +} + +impl IncomingTxBuilder { + /// Adds the database state to the builder. + /// + /// If the transaction is not in the DB already then the state should be [`None`]. + pub fn with_state_in_db( + self, + state: Option, + ) -> IncomingTxBuilder { + IncomingTxBuilder { + tx: self.tx, + tx_id: self.tx_id, + routing_state: self.routing_state, + state_in_db: state, + } + } +} + +impl IncomingTxBuilder { + /// Builds the [`IncomingTx`]. + /// + /// If this returns [`None`] then the transaction does not need to be given to the dandelion pool + /// manager. + pub fn build(self) -> Option> { + let routing_state = self.routing_state.unwrap(); + + if self.state_in_db == Some(State::Fluff) { + return None; + } + + Some(IncomingTx { + tx: self.tx, + tx_id: self.tx_id, + routing_state, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_builder() { + IncomingTxBuilder::new(1, 2) + .with_routing_state(TxState::Stem { from: 3 }) + .with_state_in_db(None) + .build(); + + IncomingTxBuilder::new(1, 2) + .with_state_in_db(None) + .with_routing_state(TxState::Stem { from: 3 }) + .build(); + } +} diff --git a/p2p/dandelion-tower/src/pool/manager.rs b/p2p/dandelion-tower/src/pool/manager.rs new file mode 100644 index 0000000..9e1572e --- /dev/null +++ b/p2p/dandelion-tower/src/pool/manager.rs @@ -0,0 +1,294 @@ +use std::{ + collections::{HashMap, HashSet}, + hash::Hash, + marker::PhantomData, + time::Duration, +}; + +use futures::{FutureExt, StreamExt}; +use rand::prelude::*; +use rand_distr::Exp; +use tokio::{ + sync::{mpsc, oneshot}, + task::JoinSet, +}; +use tokio_util::time::DelayQueue; +use tower::{Service, ServiceExt}; + +use crate::{ + pool::IncomingTx, + traits::{TxStoreRequest, TxStoreResponse}, + DandelionConfig, DandelionRouteReq, DandelionRouterError, State, TxState, +}; + +#[derive(Copy, Clone, Debug, thiserror::Error)] +#[error("The dandelion pool was shutdown")] +pub struct DandelionPoolShutDown; + +/// The dandelion++ pool manager. +/// +/// See the [module docs](super) for more. +pub struct DandelionPoolManager { + /// The dandelion++ router + pub(crate) dandelion_router: R, + /// The backing tx storage. + pub(crate) backing_pool: P, + /// The set of tasks that are running the future returned from `dandelion_router`. + pub(crate) routing_set: JoinSet<(TxId, Result>)>, + + /// The origin of stem transactions. + pub(crate) stem_origins: HashMap>, + + /// Current stem pool embargo timers. + pub(crate) embargo_timers: DelayQueue, + /// The distrobution to sample to get embargo timers. + pub(crate) embargo_dist: Exp, + + /// The d++ config. + pub(crate) config: DandelionConfig, + + pub(crate) _tx: PhantomData, +} + +impl DandelionPoolManager +where + Tx: Clone + Send, + TxId: Hash + Eq + Clone + Send + 'static, + PeerId: Hash + Eq + Clone + Send + 'static, + P: Service, Response = TxStoreResponse, Error = tower::BoxError>, + P::Future: Send + 'static, + R: Service, Response = State, Error = DandelionRouterError>, + R::Future: Send + 'static, +{ + /// Adds a new embargo timer to the running timers, with a duration pulled from [`Self::embargo_dist`] + fn add_embargo_timer_for_tx(&mut self, tx_id: TxId) { + let embargo_timer = self.embargo_dist.sample(&mut thread_rng()); + tracing::debug!( + "Setting embargo timer for stem tx: {} seconds.", + embargo_timer + ); + + self.embargo_timers + .insert(tx_id, Duration::from_secs_f64(embargo_timer)); + } + + /// Stems the tx, setting the stem origin, if it wasn't already set. + /// + /// This function does not add the tx to the backing pool. + async fn stem_tx( + &mut self, + tx: Tx, + tx_id: TxId, + from: Option, + ) -> Result<(), tower::BoxError> { + if let Some(peer) = &from { + self.stem_origins + .entry(tx_id.clone()) + .or_default() + .insert(peer.clone()); + } + + let state = from + .map(|from| TxState::Stem { from }) + .unwrap_or(TxState::Local); + + let fut = self + .dandelion_router + .ready() + .await? + .call(DandelionRouteReq { + tx, + state: state.clone(), + }); + + self.routing_set + .spawn(fut.map(|res| (tx_id, res.map_err(|_| state)))); + Ok(()) + } + + /// Fluffs a tx, does not add the tx to the tx pool. + async fn fluff_tx(&mut self, tx: Tx, tx_id: TxId) -> Result<(), tower::BoxError> { + let fut = self + .dandelion_router + .ready() + .await? + .call(DandelionRouteReq { + tx, + state: TxState::Fluff, + }); + + self.routing_set + .spawn(fut.map(|res| (tx_id, res.map_err(|_| TxState::Fluff)))); + Ok(()) + } + + /// Function to handle an [`IncomingTx`]. + async fn handle_incoming_tx( + &mut self, + tx: Tx, + tx_state: TxState, + tx_id: TxId, + ) -> Result<(), tower::BoxError> { + match tx_state { + TxState::Stem { from } => { + if self + .stem_origins + .get(&tx_id) + .is_some_and(|peers| peers.contains(&from)) + { + tracing::debug!("Received stem tx twice from same peer, fluffing it"); + // The same peer sent us a tx twice, fluff it. + self.promote_and_fluff_tx(tx_id).await?; + } else { + // This could be a new tx or it could have already been stemed, but we still stem it again + // unless the same peer sends us a tx twice. + tracing::debug!("Steming incoming tx"); + self.stem_tx(tx, tx_id.clone(), Some(from)).await?; + self.add_embargo_timer_for_tx(tx_id); + } + } + TxState::Fluff => { + tracing::debug!("Fluffing incoming tx"); + self.fluff_tx(tx, tx_id).await?; + } + TxState::Local => { + tracing::debug!("Steming local transaction"); + self.stem_tx(tx, tx_id.clone(), None).await?; + self.add_embargo_timer_for_tx(tx_id); + } + } + + Ok(()) + } + + /// Promotes a tx to the clear pool. + async fn promote_tx(&mut self, tx_id: TxId) -> Result<(), tower::BoxError> { + // Remove the tx from the maps used during the stem phase. + self.stem_origins.remove(&tx_id); + + // The key for this is *Not* the tx_id, it is given on insert, so just keep the timer in the + // map. These timers should be relatively short, so it shouldn't be a problem. + //self.embargo_timers.try_remove(&tx_id); + + self.backing_pool + .ready() + .await? + .call(TxStoreRequest::Promote(tx_id)) + .await?; + + Ok(()) + } + + /// Promotes a tx to the public fluff pool and fluffs the tx. + async fn promote_and_fluff_tx(&mut self, tx_id: TxId) -> Result<(), tower::BoxError> { + tracing::debug!("Promoting transaction to public pool and fluffing it."); + + let TxStoreResponse::Transaction(tx) = self + .backing_pool + .ready() + .await? + .call(TxStoreRequest::Get(tx_id.clone())) + .await? + else { + panic!("Backing tx pool responded with wrong response for request."); + }; + + let Some((tx, state)) = tx else { + tracing::debug!("Could not find tx, skipping."); + return Ok(()); + }; + + if state == State::Fluff { + tracing::debug!("Transaction already fluffed, skipping."); + return Ok(()); + } + + self.promote_tx(tx_id.clone()).await?; + self.fluff_tx(tx, tx_id).await + } + + /// Returns a tx stored in the fluff _OR_ stem pool. + async fn get_tx_from_pool(&mut self, tx_id: TxId) -> Result, tower::BoxError> { + let TxStoreResponse::Transaction(tx) = self + .backing_pool + .ready() + .await? + .call(TxStoreRequest::Get(tx_id)) + .await? + else { + panic!("Backing tx pool responded with wrong response for request."); + }; + + Ok(tx.map(|tx| tx.0)) + } + + /// Starts the [`DandelionPoolManager`]. + pub(crate) async fn run( + mut self, + mut rx: mpsc::Receiver<(IncomingTx, oneshot::Sender<()>)>, + ) { + tracing::debug!("Starting dandelion++ tx-pool, config: {:?}", self.config); + + loop { + tracing::trace!("Waiting for next event."); + tokio::select! { + // biased to handle current txs before routing new ones. + biased; + Some(fired) = self.embargo_timers.next() => { + tracing::debug!("Embargo timer fired, did not see stem tx in time."); + + let tx_id = fired.into_inner(); + if let Err(e) = self.promote_and_fluff_tx(tx_id).await { + tracing::error!("Error handling fired embargo timer: {e}"); + return; + } + } + Some(Ok((tx_id, res))) = self.routing_set.join_next() => { + tracing::trace!("Received d++ routing result."); + + let res = match res { + Ok(State::Fluff) => { + tracing::debug!("Transaction was fluffed upgrading it to the public pool."); + self.promote_tx(tx_id).await + } + Err(tx_state) => { + tracing::debug!("Error routing transaction, trying again."); + + match self.get_tx_from_pool(tx_id.clone()).await { + Ok(Some(tx)) => match tx_state { + TxState::Fluff => self.fluff_tx(tx, tx_id).await, + TxState::Stem { from } => self.stem_tx(tx, tx_id, Some(from)).await, + TxState::Local => self.stem_tx(tx, tx_id, None).await, + } + Err(e) => Err(e), + _ => continue, + } + } + Ok(State::Stem) => continue, + }; + + if let Err(e) = res { + tracing::error!("Error handling transaction routing return: {e}"); + return; + } + } + req = rx.recv() => { + tracing::debug!("Received new tx to route."); + + let Some((IncomingTx { tx, tx_id, routing_state }, res_tx)) = req else { + return; + }; + + if let Err(e) = self.handle_incoming_tx(tx, routing_state, tx_id).await { + let _ = res_tx.send(()); + + tracing::error!("Error handling transaction in dandelion pool: {e}"); + return; + } + let _ = res_tx.send(()); + + } + } + } + } +} diff --git a/p2p/dandelion-tower/src/pool/mod.rs b/p2p/dandelion-tower/src/pool/mod.rs new file mode 100644 index 0000000..40a3617 --- /dev/null +++ b/p2p/dandelion-tower/src/pool/mod.rs @@ -0,0 +1,145 @@ +//! # Dandelion++ Pool +//! +//! This module contains [`DandelionPoolManager`] which is a wrapper around a backing transaction store, +//! which fully implements the dandelion++ protocol. +//! +//! The [`DandelionPoolManager`] is a middle man between a [preprocessing stage](#preprocessing-stage) and a dandelion router. +//! It handles promoting transactions in the stem state to the fluff state and setting embargo timers on stem state transactions. +//! +//! ### Preprocessing stage +//! +//! The preprocessing stage (not handled in this crate) before giving the transaction to the [`DandelionPoolManager`] +//! should handle: +//! +//! - verifying the tx. +//! - checking if we have the tx in the pool already and giving that information to the [`IncomingTxBuilder`]. +//! - storing the tx in the pool, if it isn't there already. +//! +//! ### Keep Stem Transactions Hidden +//! +//! When using your handle to the backing store it must be remembered to keep transactions in the stem pool hidden. +//! So handle any requests to the tx-pool like the stem side of the pool does not exist. +use std::{ + collections::HashMap, + hash::Hash, + marker::PhantomData, + task::{Context, Poll}, +}; + +use futures::{future::BoxFuture, FutureExt}; +use rand_distr::Exp; +use tokio::{ + sync::{mpsc, oneshot}, + task::JoinSet, +}; +use tokio_util::{sync::PollSender, time::DelayQueue}; +use tower::Service; +use tracing::Instrument; + +use crate::{ + pool::manager::DandelionPoolShutDown, + traits::{TxStoreRequest, TxStoreResponse}, + DandelionConfig, DandelionRouteReq, DandelionRouterError, State, +}; + +mod incoming_tx; +mod manager; + +pub use incoming_tx::{IncomingTx, IncomingTxBuilder}; +pub use manager::DandelionPoolManager; + +/// Start the [`DandelionPoolManager`]. +/// +/// This function spawns the [`DandelionPoolManager`] and returns [`DandelionPoolService`] which can be used to send +/// requests to the pool. +/// +/// ### Args +/// +/// - `buffer_size` is the size of the channel's buffer between the [`DandelionPoolService`] and [`DandelionPoolManager`]. +/// - `dandelion_router` is the router service, kept generic instead of [`DandelionRouter`](crate::DandelionRouter) to allow +/// user to customise routing functionality. +/// - `backing_pool` is the backing transaction storage service +/// - `config` is [`DandelionConfig`]. +pub fn start_dandelion_pool_manager( + buffer_size: usize, + dandelion_router: R, + backing_pool: P, + config: DandelionConfig, +) -> DandelionPoolService +where + Tx: Clone + Send + 'static, + TxId: Hash + Eq + Clone + Send + 'static, + PeerId: Hash + Eq + Clone + Send + 'static, + P: Service, Response = TxStoreResponse, Error = tower::BoxError> + + Send + + 'static, + P::Future: Send + 'static, + R: Service, Response = State, Error = DandelionRouterError> + + Send + + 'static, + R::Future: Send + 'static, +{ + let (tx, rx) = mpsc::channel(buffer_size); + + let pool = DandelionPoolManager { + dandelion_router, + backing_pool, + routing_set: JoinSet::new(), + stem_origins: HashMap::new(), + embargo_timers: DelayQueue::new(), + embargo_dist: Exp::new(1.0 / config.average_embargo_timeout().as_secs_f64()).unwrap(), + config, + _tx: PhantomData, + }; + + let span = tracing::debug_span!("dandelion_pool"); + + tokio::spawn(pool.run(rx).instrument(span)); + + DandelionPoolService { + tx: PollSender::new(tx), + } +} + +/// The dandelion pool manager service. +/// +/// Used to send [`IncomingTx`]s to the [`DandelionPoolManager`] +#[derive(Clone)] +pub struct DandelionPoolService { + /// The channel to [`DandelionPoolManager`]. + tx: PollSender<(IncomingTx, oneshot::Sender<()>)>, +} + +impl Service> + for DandelionPoolService +where + Tx: Clone + Send, + TxId: Hash + Eq + Clone + Send + 'static, + PeerId: Hash + Eq + Clone + Send + 'static, +{ + type Response = (); + type Error = DandelionPoolShutDown; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.tx.poll_reserve(cx).map_err(|_| DandelionPoolShutDown) + } + + fn call(&mut self, req: IncomingTx) -> Self::Future { + // although the channel isn't sending anything we want to wait for the request to be handled before continuing. + let (tx, rx) = oneshot::channel(); + + let res = self + .tx + .send_item((req, tx)) + .map_err(|_| DandelionPoolShutDown); + + async move { + res?; + rx.await.expect("Oneshot dropped before response!"); + + Ok(()) + } + .boxed() + } +} diff --git a/p2p/dandelion-tower/src/router.rs b/p2p/dandelion-tower/src/router.rs index c118c0b..edeccae 100644 --- a/p2p/dandelion-tower/src/router.rs +++ b/p2p/dandelion-tower/src/router.rs @@ -6,7 +6,7 @@ //! ### What The Router Does Not Do //! //! It does not handle anything to do with keeping transactions long term, i.e. embargo timers and handling -//! loops in the stem. It is up to implementers to do this if they decide not to use [`DandelionPool`](crate::pool::DandelionPool) +//! loops in the stem. It is up to implementers to do this if they decide not to use [`DandelionPool`](crate::pool::DandelionPoolManager) use std::{ collections::HashMap, hash::Hash, @@ -43,9 +43,9 @@ pub enum DandelionRouterError { } /// A response from an attempt to retrieve an outbound peer. -pub enum OutboundPeer { +pub enum OutboundPeer { /// A peer. - Peer(ID, T), + Peer(Id, T), /// The peer store is exhausted and has no more to return. Exhausted, } @@ -61,28 +61,28 @@ pub enum State { /// The routing state of a transaction. #[derive(Debug, Clone, Eq, PartialEq)] -pub enum TxState { +pub enum TxState { /// Fluff state. Fluff, /// Stem state. Stem { - /// The peer who sent us this transaction's ID. - from: ID, + /// The peer who sent us this transaction's Id. + from: Id, }, /// Local - the transaction originated from our node. Local, } /// A request to route a transaction. -pub struct DandelionRouteReq { +pub struct DandelionRouteReq { /// The transaction. pub tx: Tx, /// The transaction state. - pub state: TxState, + pub state: TxState, } /// The dandelion router service. -pub struct DandelionRouter { +pub struct DandelionRouter { // pub(crate) is for tests /// A [`Discover`] where we can get outbound peers from. outbound_peer_discover: Pin>, @@ -95,14 +95,14 @@ pub struct DandelionRouter { epoch_start: Instant, /// The stem our local transactions will be sent to. - local_route: Option, - /// A [`HashMap`] linking peer's IDs to IDs in `stem_peers`. - stem_routes: HashMap, + local_route: Option, + /// A [`HashMap`] linking peer's Ids to Ids in `stem_peers`. + stem_routes: HashMap, /// Peers we are using for stemming. /// /// This will contain peers, even in [`State::Fluff`] to allow us to stem [`TxState::Local`] /// transactions. - pub(crate) stem_peers: HashMap, + pub(crate) stem_peers: HashMap, /// The distribution to sample to get the [`State`], true is [`State::Fluff`]. state_dist: Bernoulli, @@ -116,10 +116,10 @@ pub struct DandelionRouter { _tx: PhantomData, } -impl DandelionRouter +impl DandelionRouter where - ID: Hash + Eq + Clone, - P: TryStream, Error = tower::BoxError>, + Id: Hash + Eq + Clone, + P: TryStream, Error = tower::BoxError>, B: Service, Error = tower::BoxError>, B::Future: Send + 'static, S: Service, Error = tower::BoxError>, @@ -198,7 +198,7 @@ where fn stem_tx( &mut self, tx: Tx, - from: ID, + from: Id, ) -> BoxFuture<'static, Result> { if self.stem_peers.is_empty() { tracing::debug!("Stem peers are empty, fluffing stem transaction."); @@ -258,19 +258,10 @@ where } } -/* -## Generics ## - -Tx: The tx type -ID: Peer Id type - unique identifier for nodes. -P: Peer Set discover - where we can get outbound peers from -B: Broadcast service - where we send txs to get diffused. -S: The Peer service - handles routing messages to a single node. - */ -impl Service> for DandelionRouter +impl Service> for DandelionRouter where - ID: Hash + Eq + Clone, - P: TryStream, Error = tower::BoxError>, + Id: Hash + Eq + Clone, + P: TryStream, Error = tower::BoxError>, B: Service, Error = tower::BoxError>, B::Future: Send + 'static, S: Service, Error = tower::BoxError>, @@ -336,7 +327,7 @@ where Poll::Ready(Ok(())) } - fn call(&mut self, req: DandelionRouteReq) -> Self::Future { + fn call(&mut self, req: DandelionRouteReq) -> Self::Future { tracing::trace!(parent: &self.span, "Handling route request."); match req.state { diff --git a/p2p/dandelion-tower/src/tests/mod.rs b/p2p/dandelion-tower/src/tests/mod.rs index d868a99..1c6a3e0 100644 --- a/p2p/dandelion-tower/src/tests/mod.rs +++ b/p2p/dandelion-tower/src/tests/mod.rs @@ -76,11 +76,9 @@ pub fn mock_in_memory_backing_pool< TxID: Clone + Hash + Eq + Send + 'static, >() -> ( impl Service< - TxStoreRequest, - Response = TxStoreResponse, - Future = impl Future, tower::BoxError>> - + Send - + 'static, + TxStoreRequest, + Response = TxStoreResponse, + Future = impl Future, tower::BoxError>> + Send + 'static, Error = tower::BoxError, > + Send + 'static, @@ -90,33 +88,14 @@ pub fn mock_in_memory_backing_pool< let txs_2 = txs.clone(); ( - service_fn(move |req: TxStoreRequest| { + service_fn(move |req: TxStoreRequest| { let txs = txs.clone(); async move { match req { - TxStoreRequest::Store(tx, tx_id, state) => { - txs.lock().unwrap().insert(tx_id, (tx, state)); - Ok(TxStoreResponse::Ok) - } TxStoreRequest::Get(tx_id) => { let tx_state = txs.lock().unwrap().get(&tx_id).cloned(); Ok(TxStoreResponse::Transaction(tx_state)) } - TxStoreRequest::Contains(tx_id) => Ok(TxStoreResponse::Contains( - txs.lock().unwrap().get(&tx_id).map(|res| res.1), - )), - TxStoreRequest::IDsInStemPool => { - // horribly inefficient, but it's test code :) - let ids = txs - .lock() - .unwrap() - .iter() - .filter(|(_, (_, state))| matches!(state, State::Stem)) - .map(|tx| tx.0.clone()) - .collect::>(); - - Ok(TxStoreResponse::IDs(ids)) - } TxStoreRequest::Promote(tx_id) => { let _ = txs .lock() diff --git a/p2p/dandelion-tower/src/tests/pool.rs b/p2p/dandelion-tower/src/tests/pool.rs index 4a7c87d..b7fa55e 100644 --- a/p2p/dandelion-tower/src/tests/pool.rs +++ b/p2p/dandelion-tower/src/tests/pool.rs @@ -1,12 +1,11 @@ use std::time::Duration; +use super::*; use crate::{ - pool::{start_dandelion_pool, IncomingTx}, + pool::{start_dandelion_pool_manager, IncomingTx}, DandelionConfig, DandelionRouter, Graph, TxState, }; -use super::*; - #[tokio::test] async fn basic_functionality() { let config = DandelionConfig { @@ -21,9 +20,9 @@ async fn basic_functionality() { let router = DandelionRouter::new(broadcast_svc, outbound_peer_svc, config); - let (pool_svc, pool) = mock_in_memory_backing_pool(); + let (pool_svc, _pool) = mock_in_memory_backing_pool(); - let mut pool_svc = start_dandelion_pool(15, router, pool_svc, config); + let mut pool_svc = start_dandelion_pool_manager(15, router, pool_svc, config); pool_svc .ready() @@ -32,11 +31,13 @@ async fn basic_functionality() { .call(IncomingTx { tx: 0_usize, tx_id: 1_usize, - tx_state: TxState::Fluff, + routing_state: TxState::Fluff, }) .await .unwrap(); - assert!(pool.lock().unwrap().contains_key(&1)); + // TODO: the DandelionPoolManager doesn't handle adding txs to the pool, add more tests here to test + // all functionality. + //assert!(pool.lock().unwrap().contains_key(&1)); assert!(broadcast_rx.try_recv().is_ok()) } diff --git a/p2p/dandelion-tower/src/traits.rs b/p2p/dandelion-tower/src/traits.rs index c84ecf0..bbf6086 100644 --- a/p2p/dandelion-tower/src/traits.rs +++ b/p2p/dandelion-tower/src/traits.rs @@ -8,42 +8,24 @@ pub struct StemRequest(pub Tx); #[cfg(feature = "txpool")] /// A request sent to the backing transaction pool storage. -pub enum TxStoreRequest { - /// A request to store a transaction with the ID to store it under and the pool to store it in. - /// - /// If the tx is already in the pool then do nothing, unless the tx is in the stem pool then move it - /// to the fluff pool, _if this request state is fluff_. - Store(Tx, TxID, crate::State), - /// A request to retrieve a `Tx` with the given ID from the pool, should not remove that tx from the pool. +pub enum TxStoreRequest { + /// A request to retrieve a `Tx` with the given Id from the pool, should not remove that tx from the pool. /// /// Must return [`TxStoreResponse::Transaction`] - Get(TxID), + Get(TxId), /// Promote a transaction from the stem pool to the public pool. /// /// If the tx is already in the fluff pool do nothing. /// /// This should not error if the tx isn't in the pool at all. - Promote(TxID), - /// A request to check if a translation is in the pool. - /// - /// Must return [`TxStoreResponse::Contains`] - Contains(TxID), - /// Returns the IDs of all the transaction in the stem pool. - /// - /// Must return [`TxStoreResponse::IDs`] - IDsInStemPool, + Promote(TxId), } #[cfg(feature = "txpool")] /// A response sent back from the backing transaction pool. -pub enum TxStoreResponse { +pub enum TxStoreResponse { /// A generic ok response. Ok, - /// A response containing a [`Option`] for if the transaction is in the pool (Some) or not (None) and in which pool - /// the tx is in. - Contains(Option), /// A response containing a requested transaction. Transaction(Option<(Tx, crate::State)>), - /// A list of transaction IDs. - IDs(Vec), } From fdd1689665e6659dee1aaa67d4a6193d5e6d1016 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Thu, 22 Aug 2024 01:09:07 +0000 Subject: [PATCH 044/104] Storage: tx-pool database (#238) * split the DB service abstraction * fix ci * misc changes * init tx-pool DBs * add some comments * move more types to `/types` * add some ops * add config & more ops functions & open function * add read & write svcs * add more docs * add write functions + docs * fix merge * fix test * fix ci * move `TxPoolWriteError` * add more docs * fix toml formatting * fix some docs * fix clippy * review fixes * update docs * fix merge * fix docs * fix tests * fix tests * add back lints * Update storage/txpool/README.md Co-authored-by: hinto-janai --------- Co-authored-by: hinto-janai --- Cargo.lock | 18 ++ consensus/src/lib.rs | 7 +- helper/src/fs.rs | 25 +++ storage/blockchain/Cargo.toml | 4 +- storage/blockchain/src/constants.rs | 15 -- storage/blockchain/src/lib.rs | 2 +- storage/blockchain/src/tables.rs | 1 - storage/blockchain/src/types.rs | 2 +- storage/txpool/Cargo.toml | 32 ++- storage/txpool/README.md | 114 ++++++++++ storage/txpool/src/config.rs | 232 +++++++++++++++++++++ storage/txpool/src/free.rs | 62 ++++++ storage/txpool/src/lib.rs | 14 ++ storage/txpool/src/ops.rs | 102 +++++++++ storage/txpool/src/ops/key_images.rs | 54 +++++ storage/txpool/src/ops/tx_read.rs | 36 ++++ storage/txpool/src/ops/tx_write.rs | 83 ++++++++ storage/txpool/src/service.rs | 136 ++++++++++++ storage/txpool/src/service/free.rs | 37 ++++ storage/txpool/src/service/interface.rs | 59 ++++++ storage/txpool/src/service/read.rs | 105 ++++++++++ storage/txpool/src/service/types.rs | 21 ++ storage/txpool/src/service/write.rs | 103 +++++++++ storage/txpool/src/tables.rs | 45 ++++ storage/txpool/src/types.rs | 124 +++++++++++ types/src/transaction_verification_data.rs | 22 +- 26 files changed, 1430 insertions(+), 25 deletions(-) create mode 100644 storage/txpool/README.md create mode 100644 storage/txpool/src/config.rs create mode 100644 storage/txpool/src/free.rs create mode 100644 storage/txpool/src/ops.rs create mode 100644 storage/txpool/src/ops/key_images.rs create mode 100644 storage/txpool/src/ops/tx_read.rs create mode 100644 storage/txpool/src/ops/tx_write.rs create mode 100644 storage/txpool/src/service.rs create mode 100644 storage/txpool/src/service/free.rs create mode 100644 storage/txpool/src/service/interface.rs create mode 100644 storage/txpool/src/service/read.rs create mode 100644 storage/txpool/src/service/types.rs create mode 100644 storage/txpool/src/service/write.rs create mode 100644 storage/txpool/src/tables.rs create mode 100644 storage/txpool/src/types.rs diff --git a/Cargo.lock b/Cargo.lock index 052b1ee..7753189 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -850,6 +850,24 @@ dependencies = [ [[package]] name = "cuprate-txpool" version = "0.0.0" +dependencies = [ + "bitflags 2.5.0", + "bytemuck", + "cuprate-database", + "cuprate-database-service", + "cuprate-helper", + "cuprate-test-utils", + "cuprate-types", + "hex", + "hex-literal", + "monero-serai", + "rayon", + "serde", + "tempfile", + "thiserror", + "tokio", + "tower", +] [[package]] name = "cuprate-types" diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 3b7f2ae..004285d 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -10,7 +10,7 @@ //! implement a database you need to have a service which accepts [`BlockchainReadRequest`] and responds //! with [`BlockchainResponse`]. //! -use cuprate_consensus_rules::{ConsensusError, HardFork}; +use cuprate_consensus_rules::ConsensusError; mod batch_verifier; pub mod block; @@ -27,7 +27,10 @@ pub use context::{ pub use transactions::{TxVerifierService, VerifyTxRequest, VerifyTxResponse}; // re-export. -pub use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse}; +pub use cuprate_types::{ + blockchain::{BlockchainReadRequest, BlockchainResponse}, + HardFork, +}; /// An Error returned from one of the consensus services. #[derive(Debug, thiserror::Error)] diff --git a/helper/src/fs.rs b/helper/src/fs.rs index d38ee33..7290361 100644 --- a/helper/src/fs.rs +++ b/helper/src/fs.rs @@ -163,6 +163,19 @@ impl_path_lazylock! { CUPRATE_BLOCKCHAIN_DIR, data_dir, "blockchain", + + /// Cuprate's transaction pool directory. + /// + /// This is the PATH used for any Cuprate txpool files. + /// + /// | OS | PATH | + /// |---------|------------------------------------------------------------| + /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` | + /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` | + /// | Linux | `/home/alice/.local/share/cuprate/txpool/` | + CUPRATE_TXPOOL_DIR, + data_dir, + "txpool", } //---------------------------------------------------------------------------------------------------- Tests @@ -198,6 +211,10 @@ mod test { let dir = &*CUPRATE_BLOCKCHAIN_DIR; println!("cuprate_blockchain_dir: {dir:?}"); assert!(dir.ends_with(r"AppData\Roaming\Cuprate\blockchain")); + + let dir = &*CUPRATE_TXPOOL_DIR; + println!("cuprate_txpool_dir: {dir:?}"); + assert!(dir.ends_with(r"AppData\Roaming\Cuprate\txpool")); } else if cfg!(target_os = "macos") { let dir = &*CUPRATE_CACHE_DIR; println!("cuprate_cache_dir: {dir:?}"); @@ -214,6 +231,10 @@ mod test { let dir = &*CUPRATE_BLOCKCHAIN_DIR; println!("cuprate_blockchain_dir: {dir:?}"); assert!(dir.ends_with("Library/Application Support/Cuprate/blockchain")); + + let dir = &*CUPRATE_TXPOOL_DIR; + println!("cuprate_txpool_dir: {dir:?}"); + assert!(dir.ends_with("Library/Application Support/Cuprate/txpool")); } else { // Assumes Linux. let dir = &*CUPRATE_CACHE_DIR; @@ -231,6 +252,10 @@ mod test { let dir = &*CUPRATE_BLOCKCHAIN_DIR; println!("cuprate_blockchain_dir: {dir:?}"); assert!(dir.ends_with(".local/share/cuprate/blockchain")); + + let dir = &*CUPRATE_TXPOOL_DIR; + println!("cuprate_txpool_dir: {dir:?}"); + assert!(dir.ends_with(".local/share/cuprate/txpool")); } } } diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index f45f1bc..7e79305 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -26,8 +26,8 @@ cuprate-database-service = { path = "../service" } cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } cuprate-types = { path = "../../types", features = ["blockchain"] } -bitflags = { workspace = true, features = ["serde", "bytemuck"] } -bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } +bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } +bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } curve25519-dalek = { workspace = true } cuprate-pruning = { path = "../../pruning" } monero-serai = { workspace = true, features = ["std"] } diff --git a/storage/blockchain/src/constants.rs b/storage/blockchain/src/constants.rs index 7f00d4c..8726885 100644 --- a/storage/blockchain/src/constants.rs +++ b/storage/blockchain/src/constants.rs @@ -14,21 +14,6 @@ /// pub const DATABASE_VERSION: u64 = 0; -//---------------------------------------------------------------------------------------------------- Error Messages -/// Corrupt database error message. -/// -/// The error message shown to end-users in panic -/// messages if we think the database is corrupted. -/// -/// This is meant to be user-friendly. -pub const DATABASE_CORRUPT_MSG: &str = r"Cuprate has encountered a fatal error. The database may be corrupted. - -TODO: instructions on: -1. What to do -2. How to fix (re-sync, recover, etc) -3. General advice for preventing corruption -4. etc"; - //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test {} diff --git a/storage/blockchain/src/lib.rs b/storage/blockchain/src/lib.rs index ec6d082..e544a69 100644 --- a/storage/blockchain/src/lib.rs +++ b/storage/blockchain/src/lib.rs @@ -117,7 +117,7 @@ compile_error!("Cuprate is only compatible with 64-bit CPUs"); mod constants; mod free; -pub use constants::{DATABASE_CORRUPT_MSG, DATABASE_VERSION}; +pub use constants::DATABASE_VERSION; pub use cuprate_database; pub use free::open; diff --git a/storage/blockchain/src/tables.rs b/storage/blockchain/src/tables.rs index caac787..122ac31 100644 --- a/storage/blockchain/src/tables.rs +++ b/storage/blockchain/src/tables.rs @@ -28,7 +28,6 @@ use crate::types::{ // - Tables are defined in plural to avoid name conflicts with types // - If adding/changing a table also edit: // - the tests in `src/backend/tests.rs` -// - `call_fn_on_all_tables_or_early_return!()` macro in `src/open_tables.rs` cuprate_database::define_tables! { /// Serialized block blobs (bytes). /// diff --git a/storage/blockchain/src/types.rs b/storage/blockchain/src/types.rs index 9abd175..eb1dc64 100644 --- a/storage/blockchain/src/types.rs +++ b/storage/blockchain/src/types.rs @@ -1,4 +1,4 @@ -//! Database [table](crate::tables) types. +//! Blockchain [table](crate::tables) types. //! //! This module contains all types used by the database tables, //! and aliases for common Monero-related types that use the diff --git a/storage/txpool/Cargo.toml b/storage/txpool/Cargo.toml index 536d445..d5ea77d 100644 --- a/storage/txpool/Cargo.toml +++ b/storage/txpool/Cargo.toml @@ -4,12 +4,40 @@ version = "0.0.0" edition = "2021" description = "Cuprate's transaction pool database" license = "MIT" -authors = ["hinto-janai"] -repository = "https://github.com/Cuprate/cuprate/tree/main/storage/cuprate-txpool" +authors = ["Boog900"] +repository = "https://github.com/Cuprate/cuprate/tree/main/storage/txpool" keywords = ["cuprate", "txpool", "transaction", "pool", "database"] [features] +default = ["heed", "service"] +# default = ["redb", "service"] +# default = ["redb-memory", "service"] +heed = ["cuprate-database/heed"] +redb = ["cuprate-database/redb"] +redb-memory = ["cuprate-database/redb-memory"] +service = ["dep:tower", "dep:rayon", "dep:cuprate-database-service"] +serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] [dependencies] +cuprate-database = { path = "../database", features = ["heed"] } +cuprate-database-service = { path = "../service", optional = true } +cuprate-types = { path = "../../types" } +cuprate-helper = { path = "../../helper", default-features = false, features = ["constants"] } + +monero-serai = { workspace = true, features = ["std"] } +bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } +bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } +thiserror = { workspace = true } +hex = { workspace = true } + +tower = { workspace = true, optional = true } +rayon = { workspace = true, optional = true } + +serde = { workspace = true, optional = true } [dev-dependencies] +cuprate-test-utils = { path = "../../test-utils" } + +tokio = { workspace = true } +tempfile = { workspace = true } +hex-literal = { workspace = true } diff --git a/storage/txpool/README.md b/storage/txpool/README.md new file mode 100644 index 0000000..80d3b25 --- /dev/null +++ b/storage/txpool/README.md @@ -0,0 +1,114 @@ +Cuprate's tx-pool database. + +This documentation is mostly for practical usage of `cuprate_txpool`. + +For a high-level overview, see the database section in +[Cuprate's architecture book](https://architecture.cuprate.org). + +If you're looking for a database crate, consider using the lower-level +[`cuprate-database`](https://doc.cuprate.org/cuprate_database) +crate that this crate is built on-top of. + +# Purpose + +This crate does 3 things: + +1. Uses [`cuprate_database`] as a base database layer +1. Implements various transaction pool related [operations](ops), [tables], and [types] +1. Exposes a [`tower::Service`] backed by a thread-pool + +Each layer builds on-top of the previous. + +As a user of `cuprate_txpool`, consider using the higher-level [`service`] module, +or at the very least the [`ops`] module instead of interacting with the `cuprate_database` traits directly. + +# `cuprate_database` + +Consider reading `cuprate_database`'s crate documentation before this crate, as it is the first layer. + +If/when this crate needs is used, be sure to use the version that this crate re-exports, e.g.: + +```rust +use cuprate_txpool::{ + cuprate_database::RuntimeError, +}; +``` + +This ensures the types/traits used from `cuprate_database` are the same ones used by `cuprate_txpool` internally. + +# Feature flags + +The `service` module requires the `service` feature to be enabled. +See the module for more documentation. + +Different database backends are enabled by the feature flags: + +- `heed` (LMDB) +- `redb` + +The default is `heed`. + +`tracing` is always enabled and cannot be disabled via feature-flag. + + +# Invariants when not using `service` + +See [`cuprate_blockchain`](https://doc.cuprate.org/cuprate_blockchain), the invariants are the same. + +# Examples + +The below is an example of using `cuprate_txpool`'s +lowest API, i.e. using a mix of this crate and `cuprate_database`'s traits directly - +**this is NOT recommended.** + +For examples of the higher-level APIs, see: + +- [`ops`] +- [`service`] + +```rust +use cuprate_txpool::{ + cuprate_database::{ + ConcreteEnv, + Env, EnvInner, + DatabaseRo, DatabaseRw, TxRo, TxRw, + }, + config::ConfigBuilder, + tables::{Tables, TablesMut, OpenTables}, +}; + +# fn main() -> Result<(), Box> { + // Create a configuration for the database environment. + let tmp_dir = tempfile::tempdir()?; + let db_dir = tmp_dir.path().to_owned(); + let config = ConfigBuilder::new() + .db_directory(db_dir.into()) + .build(); + + // Initialize the database environment. + let env = cuprate_txpool::open(config)?; + + // Open up a transaction + tables for writing. + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + let mut tables = env_inner.open_tables_mut(&tx_rw)?; + + // ⚠️ Write data to the tables directly. + // (not recommended, use `ops` or `service`). + const KEY_IMAGE: [u8; 32] = [88; 32]; + const TX_HASH: [u8; 32] = [88; 32]; + tables.spent_key_images_mut().put(&KEY_IMAGE, &TX_HASH)?; + + // Commit the data written. + drop(tables); + TxRw::commit(tx_rw)?; + + // Read the data, assert it is correct. + let tx_ro = env_inner.tx_ro()?; + let tables = env_inner.open_tables(&tx_ro)?; + let (key_image, tx_hash) = tables.spent_key_images().first()?; + assert_eq!(key_image, KEY_IMAGE); + assert_eq!(tx_hash, TX_HASH); + # Ok(()) +} +``` diff --git a/storage/txpool/src/config.rs b/storage/txpool/src/config.rs new file mode 100644 index 0000000..8d09b5e --- /dev/null +++ b/storage/txpool/src/config.rs @@ -0,0 +1,232 @@ +//! The transaction pool [`Config`]. +use std::{borrow::Cow, path::Path}; + +use cuprate_database::{ + config::{Config as DbConfig, SyncMode}, + resize::ResizeAlgorithm, +}; +use cuprate_database_service::ReaderThreads; +use cuprate_helper::fs::CUPRATE_TXPOOL_DIR; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +/// The default transaction pool weight limit. +const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024; + +//---------------------------------------------------------------------------------------------------- ConfigBuilder +/// Builder for [`Config`]. +/// +// SOMEDAY: there's are many more options to add in the future. +#[derive(Debug, Clone, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct ConfigBuilder { + /// [`Config::db_directory`]. + db_directory: Option>, + + /// [`Config::cuprate_database_config`]. + db_config: cuprate_database::config::ConfigBuilder, + + /// [`Config::reader_threads`]. + reader_threads: Option, + + /// [`Config::max_txpool_weight`]. + max_txpool_weight: Option, +} + +impl ConfigBuilder { + /// Create a new [`ConfigBuilder`]. + /// + /// [`ConfigBuilder::build`] can be called immediately + /// after this function to use default values. + pub fn new() -> Self { + Self { + db_directory: None, + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( + &*CUPRATE_TXPOOL_DIR, + )), + reader_threads: None, + max_txpool_weight: None, + } + } + + /// Build into a [`Config`]. + /// + /// # Default values + /// If [`ConfigBuilder::db_directory`] was not called, + /// the default [`CUPRATE_TXPOOL_DIR`] will be used. + /// + /// For all other values, [`Default::default`] is used. + pub fn build(self) -> Config { + // INVARIANT: all PATH safety checks are done + // in `helper::fs`. No need to do them here. + let db_directory = self + .db_directory + .unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)); + + let reader_threads = self.reader_threads.unwrap_or_default(); + + let max_txpool_weight = self + .max_txpool_weight + .unwrap_or(DEFAULT_TXPOOL_WEIGHT_LIMIT); + + let db_config = self + .db_config + .db_directory(db_directory) + .reader_threads(reader_threads.as_threads()) + .build(); + + Config { + db_config, + reader_threads, + max_txpool_weight, + } + } + + /// Sets a new maximum weight for the transaction pool. + #[must_use] + pub const fn max_txpool_weight(mut self, max_txpool_weight: usize) -> Self { + self.max_txpool_weight = Some(max_txpool_weight); + self + } + + /// Set a custom database directory (and file) [`Path`]. + #[must_use] + pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self { + self.db_directory = Some(db_directory); + self + } + + /// Calls [`cuprate_database::config::ConfigBuilder::sync_mode`]. + #[must_use] + pub fn sync_mode(mut self, sync_mode: SyncMode) -> Self { + self.db_config = self.db_config.sync_mode(sync_mode); + self + } + + /// Calls [`cuprate_database::config::ConfigBuilder::resize_algorithm`]. + #[must_use] + pub fn resize_algorithm(mut self, resize_algorithm: ResizeAlgorithm) -> Self { + self.db_config = self.db_config.resize_algorithm(resize_algorithm); + self + } + + /// Set a custom [`ReaderThreads`]. + #[must_use] + pub const fn reader_threads(mut self, reader_threads: ReaderThreads) -> Self { + self.reader_threads = Some(reader_threads); + self + } + + /// Tune the [`ConfigBuilder`] for the highest performing, + /// but also most resource-intensive & maybe risky settings. + /// + /// Good default for testing, and resource-available machines. + #[must_use] + pub fn fast(mut self) -> Self { + self.db_config = + cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)) + .fast(); + + self.reader_threads = Some(ReaderThreads::OnePerThread); + self + } + + /// Tune the [`ConfigBuilder`] for the lowest performing, + /// but also least resource-intensive settings. + /// + /// Good default for resource-limited machines, e.g. a cheap VPS. + #[must_use] + pub fn low_power(mut self) -> Self { + self.db_config = + cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)) + .low_power(); + + self.reader_threads = Some(ReaderThreads::One); + self + } +} + +impl Default for ConfigBuilder { + fn default() -> Self { + let db_directory = Cow::Borrowed(CUPRATE_TXPOOL_DIR.as_path()); + Self { + db_directory: Some(db_directory.clone()), + db_config: cuprate_database::config::ConfigBuilder::new(db_directory), + reader_threads: Some(ReaderThreads::default()), + max_txpool_weight: Some(DEFAULT_TXPOOL_WEIGHT_LIMIT), + } + } +} + +//---------------------------------------------------------------------------------------------------- Config +/// `cuprate_txpool` configuration. +/// +/// This is a configuration built on-top of [`DbConfig`]. +/// +/// It contains configuration specific to this crate, plus the database config. +/// +/// For construction, either use [`ConfigBuilder`] or [`Config::default`]. +#[derive(Debug, Clone, PartialEq, PartialOrd)] +pub struct Config { + /// The database configuration. + pub db_config: DbConfig, + + /// Database reader thread count. + pub reader_threads: ReaderThreads, + + /// The maximum weight of the transaction pool, after which we will start dropping transactions. + // TODO: enforce this max size. + pub max_txpool_weight: usize, +} + +impl Config { + /// Create a new [`Config`] with sane default settings. + /// + /// The [`DbConfig::db_directory`] + /// will be set to [`CUPRATE_TXPOOL_DIR`]. + /// + /// All other values will be [`Default::default`]. + /// + /// Same as [`Config::default`]. + /// + /// ```rust + /// use cuprate_database::{ + /// config::SyncMode, + /// resize::ResizeAlgorithm, + /// DATABASE_DATA_FILENAME, + /// }; + /// use cuprate_database_service::ReaderThreads; + /// use cuprate_helper::fs::*; + /// + /// use cuprate_txpool::Config; + /// + /// let config = Config::new(); + /// + /// assert_eq!(config.db_config.db_directory(), &*CUPRATE_TXPOOL_DIR); + /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_TXPOOL_DIR)); + /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); + /// assert_eq!(config.db_config.sync_mode, SyncMode::default()); + /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); + /// assert_eq!(config.reader_threads, ReaderThreads::default()); + /// ``` + pub fn new() -> Self { + Config { + db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)), + reader_threads: ReaderThreads::default(), + max_txpool_weight: 0, + } + } +} + +impl Default for Config { + /// Same as [`Config::new`]. + /// + /// ```rust + /// # use cuprate_txpool::Config; + /// assert_eq!(Config::default(), Config::new()); + /// ``` + fn default() -> Self { + Self::new() + } +} diff --git a/storage/txpool/src/free.rs b/storage/txpool/src/free.rs new file mode 100644 index 0000000..d394002 --- /dev/null +++ b/storage/txpool/src/free.rs @@ -0,0 +1,62 @@ +//! General free functions (related to the tx-pool database). + +//---------------------------------------------------------------------------------------------------- Import +use cuprate_database::{ConcreteEnv, Env, EnvInner, InitError, RuntimeError, TxRw}; + +use crate::{config::Config, tables::OpenTables}; + +//---------------------------------------------------------------------------------------------------- Free functions +/// Open the txpool database using the passed [`Config`]. +/// +/// This calls [`cuprate_database::Env::open`] and prepares the +/// database to be ready for txpool-related usage, e.g. +/// table creation, table sort order, etc. +/// +/// All tables found in [`crate::tables`] will be +/// ready for usage in the returned [`ConcreteEnv`]. +/// +/// # Errors +/// This will error if: +/// - The database file could not be opened +/// - A write transaction could not be opened +/// - A table could not be created/opened +#[cold] +#[inline(never)] // only called once +pub fn open(config: Config) -> Result { + // Attempt to open the database environment. + let env = ::open(config.db_config)?; + + /// Convert runtime errors to init errors. + /// + /// INVARIANT: + /// [`cuprate_database`]'s functions mostly return the former + /// so we must convert them. We have knowledge of which errors + /// makes sense in this functions context so we panic on + /// unexpected ones. + fn runtime_to_init_error(runtime: RuntimeError) -> InitError { + match runtime { + RuntimeError::Io(io_error) => io_error.into(), + + // These errors shouldn't be happening here. + RuntimeError::KeyExists + | RuntimeError::KeyNotFound + | RuntimeError::ResizeNeeded + | RuntimeError::TableNotFound => unreachable!(), + } + } + + // INVARIANT: We must ensure that all tables are created, + // `cuprate_database` has no way of knowing _which_ tables + // we want since it is agnostic, so we are responsible for this. + { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw().map_err(runtime_to_init_error)?; + + // Create all tables. + OpenTables::create_tables(&env_inner, &tx_rw).map_err(runtime_to_init_error)?; + + TxRw::commit(tx_rw).map_err(runtime_to_init_error)?; + } + + Ok(env) +} diff --git a/storage/txpool/src/lib.rs b/storage/txpool/src/lib.rs index 8b13789..f200c34 100644 --- a/storage/txpool/src/lib.rs +++ b/storage/txpool/src/lib.rs @@ -1 +1,15 @@ +#![doc = include_str!("../README.md")] +pub mod config; +mod free; +pub mod ops; +#[cfg(feature = "service")] +pub mod service; +pub mod tables; +pub mod types; + +pub use config::Config; +pub use free::open; + +//re-exports +pub use cuprate_database; diff --git a/storage/txpool/src/ops.rs b/storage/txpool/src/ops.rs new file mode 100644 index 0000000..50d9ea4 --- /dev/null +++ b/storage/txpool/src/ops.rs @@ -0,0 +1,102 @@ +//! Abstracted Monero tx-pool database operations. +//! +//! This module contains many free functions that use the +//! traits in [`cuprate_database`] to generically call Monero-related +//! tx-pool database operations. +//! +//! # `impl Table` +//! Functions in this module take [`Tables`](crate::tables::Tables) and +//! [`TablesMut`](crate::tables::TablesMut) directly - these are +//! _already opened_ database tables. +//! +//! As such, the responsibility of +//! transactions, tables, etc, are on the caller. +//! +//! Notably, this means that these functions are as lean +//! as possible, so calling them in a loop should be okay. +//! +//! # Atomicity +//! As transactions are handled by the _caller_ of these functions, +//! it is up to the caller to decide what happens if one them return +//! an error. +//! +//! To maintain atomicity, transactions should be [`abort`](cuprate_database::TxRw::abort)ed +//! if one of the functions failed. +//! +//! For example, if [`add_transaction`] is called and returns an [`Err`], +//! `abort`ing the transaction that opened the input `TableMut` would reverse all tables +//! mutated by [`add_transaction`] up until the error, leaving it in the state it was in before +//! [`add_transaction`] was called. +//! +//! # Example +//! Simple usage of `ops`. +//! +//! ```rust +//! use hex_literal::hex; +//! +//! use cuprate_test_utils::data::TX_V1_SIG2; +//! use cuprate_txpool::{ +//! cuprate_database::{ +//! ConcreteEnv, +//! Env, EnvInner, +//! DatabaseRo, DatabaseRw, TxRo, TxRw, +//! }, +//! config::ConfigBuilder, +//! tables::{Tables, TablesMut, OpenTables}, +//! ops::{add_transaction, get_transaction_verification_data}, +//! }; +//! +//! # fn main() -> Result<(), Box> { +//! // Create a configuration for the database environment. +//! let tmp_dir = tempfile::tempdir()?; +//! let db_dir = tmp_dir.path().to_owned(); +//! let config = ConfigBuilder::new() +//! .db_directory(db_dir.into()) +//! .build(); +//! +//! // Initialize the database environment. +//! let env = cuprate_txpool::open(config)?; +//! +//! // Open up a transaction + tables for writing. +//! let env_inner = env.env_inner(); +//! let tx_rw = env_inner.tx_rw()?; +//! let mut tables = env_inner.open_tables_mut(&tx_rw)?; +//! +//! // Write a tx to the database. +//! let mut tx = TX_V1_SIG2.clone(); +//! let tx_hash = tx.tx_hash; +//! add_transaction(&tx.try_into().unwrap(), true, &mut tables)?; +//! +//! // Commit the data written. +//! drop(tables); +//! TxRw::commit(tx_rw)?; +//! +//! // Read the data, assert it is correct. +//! let tx_rw = env_inner.tx_rw()?; +//! let mut tables = env_inner.open_tables_mut(&tx_rw)?; +//! let tx = get_transaction_verification_data(&tx_hash, &mut tables)?; +//! +//! assert_eq!(tx.tx_hash, tx_hash); +//! assert_eq!(tx.tx, TX_V1_SIG2.tx); +//! # Ok(()) } +//! ``` + +mod key_images; +mod tx_read; +mod tx_write; + +pub use tx_read::get_transaction_verification_data; +pub use tx_write::{add_transaction, remove_transaction}; + +/// An error that can occur on some tx-write ops. +#[derive(thiserror::Error, Debug)] +pub enum TxPoolWriteError { + /// The transaction could not be added as it double spends another tx in the pool. + /// + /// The inner value is the hash of the transaction that was double spent. + #[error("Transaction doubles spent transaction already in the pool ({}).", hex::encode(.0))] + DoubleSpend(crate::types::TransactionHash), + /// A database error. + #[error("Database error: {0}")] + Database(#[from] cuprate_database::RuntimeError), +} diff --git a/storage/txpool/src/ops/key_images.rs b/storage/txpool/src/ops/key_images.rs new file mode 100644 index 0000000..c6e4415 --- /dev/null +++ b/storage/txpool/src/ops/key_images.rs @@ -0,0 +1,54 @@ +//! Tx-pool key image ops. +use monero_serai::transaction::Input; + +use cuprate_database::{DatabaseRw, RuntimeError}; + +use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHash}; + +/// Adds the transaction key images to the [`SpentKeyImages`] table. +/// +/// This function will return an error if any of the key images are already spent. +/// +/// # Panics +/// This function will panic if any of the [`Input`]s are not [`Input::ToKey`] +pub fn add_tx_key_images( + inputs: &[Input], + tx_hash: &TransactionHash, + kis_table: &mut impl DatabaseRw, +) -> Result<(), TxPoolWriteError> { + for ki in inputs.iter().map(ki_from_input) { + if let Ok(double_spend_tx_hash) = kis_table.get(&ki) { + return Err(TxPoolWriteError::DoubleSpend(double_spend_tx_hash)); + } + + kis_table.put(&ki, tx_hash)?; + } + + Ok(()) +} + +/// Removes key images from the [`SpentKeyImages`] table. +/// +/// # Panics +/// This function will panic if any of the [`Input`]s are not [`Input::ToKey`] +pub fn remove_tx_key_images( + inputs: &[Input], + kis_table: &mut impl DatabaseRw, +) -> Result<(), RuntimeError> { + for ki in inputs.iter().map(ki_from_input) { + kis_table.delete(&ki)?; + } + + Ok(()) +} + +/// Maps an input to a key image. +/// +/// # Panics +/// This function will panic if the [`Input`] is not [`Input::ToKey`] +fn ki_from_input(input: &Input) -> [u8; 32] { + match input { + Input::ToKey { key_image, .. } => key_image.compress().0, + Input::Gen(_) => panic!("miner tx cannot be added to the txpool"), + } +} diff --git a/storage/txpool/src/ops/tx_read.rs b/storage/txpool/src/ops/tx_read.rs new file mode 100644 index 0000000..db89415 --- /dev/null +++ b/storage/txpool/src/ops/tx_read.rs @@ -0,0 +1,36 @@ +//! Transaction read ops. +//! +//! This module handles reading full transaction data, like getting a transaction from the pool. +use std::sync::Mutex; + +use monero_serai::transaction::Transaction; + +use cuprate_database::{DatabaseRo, RuntimeError}; +use cuprate_types::{TransactionVerificationData, TxVersion}; + +use crate::{tables::Tables, types::TransactionHash}; + +/// Gets the [`TransactionVerificationData`] of a transaction in the tx-pool, leaving the tx in the pool. +pub fn get_transaction_verification_data( + tx_hash: &TransactionHash, + tables: &impl Tables, +) -> Result { + let tx_blob = tables.transaction_blobs().get(tx_hash)?.0; + + let tx_info = tables.transaction_infos().get(tx_hash)?; + + let cached_verification_state = tables.cached_verification_state().get(tx_hash)?.into(); + + let tx = + Transaction::read(&mut tx_blob.as_slice()).expect("Tx in the tx-pool must be parseable"); + + Ok(TransactionVerificationData { + version: TxVersion::from_raw(tx.version()).expect("Tx in tx-pool has invalid version"), + tx, + tx_blob, + tx_weight: tx_info.weight, + fee: tx_info.fee, + tx_hash: *tx_hash, + cached_verification_state: Mutex::new(cached_verification_state), + }) +} diff --git a/storage/txpool/src/ops/tx_write.rs b/storage/txpool/src/ops/tx_write.rs new file mode 100644 index 0000000..9885b9c --- /dev/null +++ b/storage/txpool/src/ops/tx_write.rs @@ -0,0 +1,83 @@ +//! Transaction writing ops. +//! +//! This module handles writing full transaction data, like removing or adding a transaction. +use bytemuck::TransparentWrapper; +use monero_serai::transaction::{NotPruned, Transaction}; + +use cuprate_database::{DatabaseRw, RuntimeError, StorableVec}; +use cuprate_types::TransactionVerificationData; + +use crate::{ + ops::{ + key_images::{add_tx_key_images, remove_tx_key_images}, + TxPoolWriteError, + }, + tables::TablesMut, + types::{TransactionHash, TransactionInfo, TxStateFlags}, +}; + +/// Adds a transaction to the tx-pool. +/// +/// This function fills in all tables necessary to add the transaction to the pool. +/// +/// # Panics +/// This function will panic if the transactions inputs are not all of type [`Input::ToKey`](monero_serai::transaction::Input::ToKey). +pub fn add_transaction( + tx: &TransactionVerificationData, + state_stem: bool, + tables: &mut impl TablesMut, +) -> Result<(), TxPoolWriteError> { + // Add the tx blob to table 0. + tables + .transaction_blobs_mut() + .put(&tx.tx_hash, StorableVec::wrap_ref(&tx.tx_blob))?; + + let mut flags = TxStateFlags::empty(); + flags.set(TxStateFlags::STATE_STEM, state_stem); + + // Add the tx info to table 1. + tables.transaction_infos_mut().put( + &tx.tx_hash, + &TransactionInfo { + fee: tx.fee, + weight: tx.tx_weight, + flags, + _padding: [0; 7], + }, + )?; + + // Add the cached verification state to table 2. + let cached_verification_state = (*tx.cached_verification_state.lock().unwrap()).into(); + tables + .cached_verification_state_mut() + .put(&tx.tx_hash, &cached_verification_state)?; + + // Add the tx key images to table 3. + let kis_table = tables.spent_key_images_mut(); + add_tx_key_images(&tx.tx.prefix().inputs, &tx.tx_hash, kis_table)?; + + Ok(()) +} + +/// Removes a transaction from the transaction pool. +pub fn remove_transaction( + tx_hash: &TransactionHash, + tables: &mut impl TablesMut, +) -> Result<(), RuntimeError> { + // Remove the tx blob from table 0. + let tx_blob = tables.transaction_blobs_mut().take(tx_hash)?.0; + + // Remove the tx info from table 1. + tables.transaction_infos_mut().delete(tx_hash)?; + + // Remove the cached verification state from table 2. + tables.cached_verification_state_mut().delete(tx_hash)?; + + // Remove the tx key images from table 3. + let tx = Transaction::::read(&mut tx_blob.as_slice()) + .expect("Tx in the tx-pool must be parseable"); + let kis_table = tables.spent_key_images_mut(); + remove_tx_key_images(&tx.prefix().inputs, kis_table)?; + + Ok(()) +} diff --git a/storage/txpool/src/service.rs b/storage/txpool/src/service.rs new file mode 100644 index 0000000..d87adce --- /dev/null +++ b/storage/txpool/src/service.rs @@ -0,0 +1,136 @@ +//! [`tower::Service`] integeration + thread-pool. +//! +//! ## `service` +//! The `service` module implements the [`tower`] integration, +//! along with the reader/writer thread-pool system. +//! +//! The thread-pool allows outside crates to communicate with it by +//! sending database [`Request`][req_r]s and receiving [`Response`][resp]s `async`hronously - +//! without having to actually worry and handle the database themselves. +//! +//! The system is managed by this crate, and only requires [`init`] by the user. +//! +//! This module must be enabled with the `service` feature. +//! +//! ## Handles +//! The 2 handles to the database are: +//! - [`TxpoolReadHandle`] +//! - [`TxpoolWriteHandle`] +//! +//! The 1st allows any caller to send [`ReadRequest`][req_r]s. +//! +//! The 2nd allows any caller to send [`WriteRequest`][req_w]s. +//! +//! The `DatabaseReadHandle` can be shared as it is cheaply [`Clone`]able, however, +//! the `DatabaseWriteHandle` cannot be cloned. There is only 1 place in Cuprate that +//! writes, so it is passed there and used. +//! +//! ## Initialization +//! The database & thread-pool system can be initialized with [`init()`]. +//! +//! This causes the underlying database/threads to be setup +//! and returns a read/write handle to that database. +//! +//! ## Shutdown +//! Upon the above handles being dropped, the corresponding thread(s) will automatically exit, i.e: +//! - The last [`TxpoolReadHandle`] is dropped => reader thread-pool exits +//! - The last [`TxpoolWriteHandle`] is dropped => writer thread exits +//! +//! Upon dropping the [`cuprate_database::Env`]: +//! - All un-processed database transactions are completed +//! - All data gets flushed to disk (caused by [`Drop::drop`] impl on `Env`) +//! +//! ## Request and Response +//! To interact with the database (whether reading or writing data), +//! a `Request` can be sent using one of the above handles. +//! +//! Both the handles implement `tower::Service`, so they can be [`tower::Service::call`]ed. +//! +//! An `async`hronous channel will be returned from the call. +//! This channel can be `.await`ed upon to (eventually) receive +//! the corresponding `Response` to your `Request`. +//! +//! [req_r]: interface::TxpoolReadRequest +//! +//! [req_w]: interface::TxpoolWriteRequest +//! +//! // TODO: we have 2 responses +//! +//! [resp]: interface::TxpoolWriteResponse +//! +//! # Example +//! Simple usage of `service`. +//! +//! ```rust +//! use std::sync::Arc; +//! +//! use hex_literal::hex; +//! use tower::{Service, ServiceExt}; +//! +//! use cuprate_test_utils::data::TX_V1_SIG2; +//! +//! use cuprate_txpool::{ +//! cuprate_database::Env, +//! config::ConfigBuilder, +//! service::interface::{ +//! TxpoolWriteRequest, +//! TxpoolWriteResponse, +//! TxpoolReadRequest, +//! TxpoolReadResponse +//! } +//! }; +//! +//! # #[tokio::main] +//! # async fn main() -> Result<(), Box> { +//! // Create a configuration for the database environment. +//! use cuprate_test_utils::data::TX_V1_SIG2; +//! let tmp_dir = tempfile::tempdir()?; +//! let db_dir = tmp_dir.path().to_owned(); +//! let config = ConfigBuilder::new() +//! .db_directory(db_dir.into()) +//! .build(); +//! +//! // Initialize the database thread-pool. +//! let (mut read_handle, mut write_handle, _) = cuprate_txpool::service::init(config)?; +//! +//! // Prepare a request to write block. +//! let tx = TX_V1_SIG2.clone(); +//! let request = TxpoolWriteRequest::AddTransaction { +//! tx: Arc::new(tx.try_into().unwrap()), +//! state_stem: false, +//! }; +//! +//! // Send the request. +//! // We receive back an `async` channel that will +//! // eventually yield the result when `service` +//! // is done writing the tx. +//! let response_channel = write_handle.ready().await?.call(request); +//! +//! // Block write was OK. +//! let TxpoolWriteResponse::AddTransaction(double_spent) = response_channel.await? else { +//! panic!("tx-pool returned wrong response!"); +//! }; +//! assert!(double_spent.is_none()); +//! +//! // Now, let's try getting the block hash +//! // of the block we just wrote. +//! let request = TxpoolReadRequest::TxBlob(TX_V1_SIG2.tx_hash); +//! let response_channel = read_handle.ready().await?.call(request); +//! let response = response_channel.await?; +//! +//! // This causes the writer thread on the +//! // other side of this handle to exit... +//! drop(write_handle); +//! // ...and this causes the reader thread-pool to exit. +//! drop(read_handle); +//! # Ok(()) } +//! ``` + +mod free; +pub mod interface; +mod read; +mod types; +mod write; + +pub use free::init; +pub use types::{TxpoolReadHandle, TxpoolWriteHandle}; diff --git a/storage/txpool/src/service/free.rs b/storage/txpool/src/service/free.rs new file mode 100644 index 0000000..614ab5c --- /dev/null +++ b/storage/txpool/src/service/free.rs @@ -0,0 +1,37 @@ +use std::sync::Arc; + +use cuprate_database::{ConcreteEnv, InitError}; + +use crate::{ + service::{ + read::init_read_service, + types::{TxpoolReadHandle, TxpoolWriteHandle}, + write::init_write_service, + }, + Config, +}; + +//---------------------------------------------------------------------------------------------------- Init +#[cold] +#[inline(never)] // Only called once (?) +/// Initialize a database & thread-pool, and return a read/write handle to it. +/// +/// Once the returned handles are [`Drop::drop`]ed, the reader +/// thread-pool and writer thread will exit automatically. +/// +/// # Errors +/// This will forward the error if [`crate::open`] failed. +pub fn init( + config: Config, +) -> Result<(TxpoolReadHandle, TxpoolWriteHandle, Arc), InitError> { + let reader_threads = config.reader_threads; + + // Initialize the database itself. + let db = Arc::new(crate::open(config)?); + + // Spawn the Reader thread pool and Writer. + let readers = init_read_service(db.clone(), reader_threads); + let writer = init_write_service(db.clone()); + + Ok((readers, writer, db)) +} diff --git a/storage/txpool/src/service/interface.rs b/storage/txpool/src/service/interface.rs new file mode 100644 index 0000000..93235c0 --- /dev/null +++ b/storage/txpool/src/service/interface.rs @@ -0,0 +1,59 @@ +//! Tx-pool [`service`](super) interface. +//! +//! This module contains `cuprate_txpool`'s [`tower::Service`] request and response enums. +use std::sync::Arc; + +use cuprate_types::TransactionVerificationData; + +use crate::types::TransactionHash; + +//---------------------------------------------------------------------------------------------------- TxpoolReadRequest +/// The transaction pool [`tower::Service`] read request type. +pub enum TxpoolReadRequest { + /// A request for the blob (raw bytes) of a transaction with the given hash. + TxBlob(TransactionHash), + /// A request for the [`TransactionVerificationData`] of a transaction in the tx pool. + TxVerificationData(TransactionHash), +} + +//---------------------------------------------------------------------------------------------------- TxpoolReadResponse +/// The transaction pool [`tower::Service`] read response type. +#[allow(clippy::large_enum_variant)] +pub enum TxpoolReadResponse { + /// A response containing the raw bytes of a transaction. + // TODO: use bytes::Bytes. + TxBlob(Vec), + /// A response of [`TransactionVerificationData`]. + TxVerificationData(TransactionVerificationData), +} + +//---------------------------------------------------------------------------------------------------- TxpoolWriteRequest +/// The transaction pool [`tower::Service`] write request type. +pub enum TxpoolWriteRequest { + /// Add a transaction to the pool. + /// + /// Returns [`TxpoolWriteResponse::AddTransaction`]. + AddTransaction { + /// The tx to add. + tx: Arc, + /// A [`bool`] denoting the routing state of this tx. + /// + /// [`true`] if this tx is in the stem state. + state_stem: bool, + }, + /// Remove a transaction with the given hash from the pool. + /// + /// Returns [`TxpoolWriteResponse::Ok`]. + RemoveTransaction(TransactionHash), +} + +//---------------------------------------------------------------------------------------------------- TxpoolWriteResponse +/// The transaction pool [`tower::Service`] write response type. +#[derive(Debug, Ord, PartialOrd, Eq, PartialEq)] +pub enum TxpoolWriteResponse { + /// A [`TxpoolWriteRequest::AddTransaction`] response. + /// + /// If the inner value is [`Some`] the tx was not added to the pool as it double spends a tx with the given hash. + AddTransaction(Option), + Ok, +} diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs new file mode 100644 index 0000000..c2fee66 --- /dev/null +++ b/storage/txpool/src/service/read.rs @@ -0,0 +1,105 @@ +use std::sync::Arc; + +use rayon::ThreadPool; + +use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner}; +use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; + +use crate::{ + ops::get_transaction_verification_data, + service::{ + interface::{TxpoolReadRequest, TxpoolReadResponse}, + types::{ReadResponseResult, TxpoolReadHandle}, + }, + tables::{OpenTables, TransactionBlobs}, + types::TransactionHash, +}; + +// TODO: update the docs here +//---------------------------------------------------------------------------------------------------- init_read_service +/// Initialize the [`TxpoolReadHandle`] thread-pool backed by `rayon`. +/// +/// This spawns `threads` amount of reader threads +/// attached to `env` and returns a handle to the pool. +/// +/// Should be called _once_ per actual database. +#[cold] +#[inline(never)] // Only called once. +pub fn init_read_service(env: Arc, threads: ReaderThreads) -> TxpoolReadHandle { + init_read_service_with_pool(env, init_thread_pool(threads)) +} + +/// Initialize the [`TxpoolReadHandle`], with a specific rayon thread-pool instead of +/// creating a new one. +/// +/// Should be called _once_ per actual database. +#[cold] +#[inline(never)] // Only called once. +pub fn init_read_service_with_pool( + env: Arc, + pool: Arc, +) -> TxpoolReadHandle { + DatabaseReadService::new(env, pool, map_request) +} + +//---------------------------------------------------------------------------------------------------- Request Mapping +// This function maps [`Request`]s to function calls +// executed by the rayon DB reader threadpool. + +/// Map [`TxpoolReadRequest`]'s to specific database handler functions. +/// +/// This is the main entrance into all `Request` handler functions. +/// The basic structure is: +/// 1. `Request` is mapped to a handler function +/// 2. Handler function is called +/// 3. [`TxpoolReadResponse`] is returned +fn map_request( + env: &ConcreteEnv, // Access to the database + request: TxpoolReadRequest, // The request we must fulfill +) -> ReadResponseResult { + match request { + TxpoolReadRequest::TxBlob(tx_hash) => tx_blob(env, &tx_hash), + TxpoolReadRequest::TxVerificationData(tx_hash) => tx_verification_data(env, &tx_hash), + } +} + +//---------------------------------------------------------------------------------------------------- Handler functions +// These are the actual functions that do stuff according to the incoming [`TxpoolReadRequest`]. +// +// Each function name is a 1-1 mapping (from CamelCase -> snake_case) to +// the enum variant name, e.g: `TxBlob` -> `tx_blob`. +// +// Each function will return the [`TxpoolReadResponse`] that we +// should send back to the caller in [`map_request()`]. +// +// INVARIANT: +// These functions are called above in `tower::Service::call()` +// using a custom threadpool which means any call to `par_*()` functions +// will be using the custom rayon DB reader thread-pool, not the global one. +// +// All functions below assume that this is the case, such that +// `par_*()` functions will not block the _global_ rayon thread-pool. + +/// [`TxpoolReadRequest::TxBlob`]. +#[inline] +fn tx_blob(env: &ConcreteEnv, tx_hash: &TransactionHash) -> ReadResponseResult { + let inner_env = env.env_inner(); + let tx_ro = inner_env.tx_ro()?; + + let tx_blobs_table = inner_env.open_db_ro::(&tx_ro)?; + + tx_blobs_table + .get(tx_hash) + .map(|blob| TxpoolReadResponse::TxBlob(blob.0)) +} + +/// [`TxpoolReadRequest::TxVerificationData`]. +#[inline] +fn tx_verification_data(env: &ConcreteEnv, tx_hash: &TransactionHash) -> ReadResponseResult { + let inner_env = env.env_inner(); + let tx_ro = inner_env.tx_ro()?; + + let tables = inner_env.open_tables(&tx_ro)?; + + get_transaction_verification_data(tx_hash, &tables).map(TxpoolReadResponse::TxVerificationData) +} diff --git a/storage/txpool/src/service/types.rs b/storage/txpool/src/service/types.rs new file mode 100644 index 0000000..5c6b97c --- /dev/null +++ b/storage/txpool/src/service/types.rs @@ -0,0 +1,21 @@ +//! Database service type aliases. +//! +//! Only used internally for our [`tower::Service`] impls. + +use cuprate_database::RuntimeError; +use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle}; + +use crate::service::interface::{ + TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest, TxpoolWriteResponse, +}; + +/// The actual type of the response. +/// +/// Either our [`TxpoolReadResponse`], or a database error occurred. +pub(super) type ReadResponseResult = Result; + +/// The transaction pool database write service. +pub type TxpoolWriteHandle = DatabaseWriteHandle; + +/// The transaction pool database read service. +pub type TxpoolReadHandle = DatabaseReadService; diff --git a/storage/txpool/src/service/write.rs b/storage/txpool/src/service/write.rs new file mode 100644 index 0000000..f6bdb38 --- /dev/null +++ b/storage/txpool/src/service/write.rs @@ -0,0 +1,103 @@ +use std::sync::Arc; + +use cuprate_database::{ConcreteEnv, Env, EnvInner, RuntimeError, TxRw}; +use cuprate_database_service::DatabaseWriteHandle; +use cuprate_types::TransactionVerificationData; + +use crate::{ + ops::{self, TxPoolWriteError}, + service::{ + interface::{TxpoolWriteRequest, TxpoolWriteResponse}, + types::TxpoolWriteHandle, + }, + tables::OpenTables, + types::TransactionHash, +}; + +//---------------------------------------------------------------------------------------------------- init_write_service +/// Initialize the txpool write service from a [`ConcreteEnv`]. +pub fn init_write_service(env: Arc) -> TxpoolWriteHandle { + DatabaseWriteHandle::init(env, handle_txpool_request) +} + +//---------------------------------------------------------------------------------------------------- handle_txpool_request +/// Handle an incoming [`TxpoolWriteRequest`], returning a [`TxpoolWriteResponse`]. +fn handle_txpool_request( + env: &ConcreteEnv, + req: &TxpoolWriteRequest, +) -> Result { + match req { + TxpoolWriteRequest::AddTransaction { tx, state_stem } => { + add_transaction(env, tx, *state_stem) + } + TxpoolWriteRequest::RemoveTransaction(tx_hash) => remove_transaction(env, tx_hash), + } +} + +//---------------------------------------------------------------------------------------------------- Handler functions +// These are the actual functions that do stuff according to the incoming [`TxpoolWriteRequest`]. +// +// Each function name is a 1-1 mapping (from CamelCase -> snake_case) to +// the enum variant name, e.g: `BlockExtendedHeader` -> `block_extended_header`. +// +// Each function will return the [`Response`] that we +// should send back to the caller in [`map_request()`]. + +/// [`TxpoolWriteRequest::AddTransaction`] +fn add_transaction( + env: &ConcreteEnv, + tx: &TransactionVerificationData, + state_stem: bool, +) -> Result { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + + if let Err(e) = ops::add_transaction(tx, state_stem, &mut tables_mut) { + drop(tables_mut); + // error adding the tx, abort the DB transaction. + TxRw::abort(tx_rw) + .expect("could not maintain database atomicity by aborting write transaction"); + + return match e { + TxPoolWriteError::DoubleSpend(tx_hash) => { + // If we couldn't add the tx due to a double spend still return ok, but include the tx + // this double spent. + // TODO: mark the double spent tx? + Ok(TxpoolWriteResponse::AddTransaction(Some(tx_hash))) + } + TxPoolWriteError::Database(e) => Err(e), + }; + }; + + drop(tables_mut); + // The tx was added to the pool successfully. + TxRw::commit(tx_rw)?; + Ok(TxpoolWriteResponse::AddTransaction(None)) +} + +/// [`TxpoolWriteRequest::RemoveTransaction`] +fn remove_transaction( + env: &ConcreteEnv, + tx_hash: &TransactionHash, +) -> Result { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + + if let Err(e) = ops::remove_transaction(tx_hash, &mut tables_mut) { + drop(tables_mut); + // error removing the tx, abort the DB transaction. + TxRw::abort(tx_rw) + .expect("could not maintain database atomicity by aborting write transaction"); + + return Err(e); + } + + drop(tables_mut); + + TxRw::commit(tx_rw)?; + Ok(TxpoolWriteResponse::Ok) +} diff --git a/storage/txpool/src/tables.rs b/storage/txpool/src/tables.rs new file mode 100644 index 0000000..dbb686a --- /dev/null +++ b/storage/txpool/src/tables.rs @@ -0,0 +1,45 @@ +//! Tx-pool Database tables. +//! +//! # Table marker structs +//! This module contains all the table definitions used by [`cuprate_txpool`](crate). +//! +//! The zero-sized structs here represents the table type; +//! they all are essentially marker types that implement [`cuprate_database::Table`]. +//! +//! Table structs are `CamelCase`, and their static string +//! names used by the actual database backend are `snake_case`. +//! +//! For example: [`TransactionBlobs`] -> `transaction_blobs`. +//! +//! # Traits +//! This module also contains a set of traits for +//! accessing _all_ tables defined here at once. +use cuprate_database::{define_tables, StorableVec}; + +use crate::types::{KeyImage, RawCachedVerificationState, TransactionHash, TransactionInfo}; + +define_tables! { + /// Serialized transaction blobs. + /// + /// This table contains the transaction blobs of all the transactions in the pool. + 0 => TransactionBlobs, + TransactionHash => StorableVec, + + /// Transaction information. + /// + /// This table contains information of all transactions currently in the pool. + 1 => TransactionInfos, + TransactionHash => TransactionInfo, + + /// Cached transaction verification state. + /// + /// This table contains the cached verification state of all translations in the pool. + 2 => CachedVerificationState, + TransactionHash => RawCachedVerificationState, + + /// Spent key images. + /// + /// This table contains the spent key images from all transactions in the pool. + 3 => SpentKeyImages, + KeyImage => TransactionHash +} diff --git a/storage/txpool/src/types.rs b/storage/txpool/src/types.rs new file mode 100644 index 0000000..5c89d3b --- /dev/null +++ b/storage/txpool/src/types.rs @@ -0,0 +1,124 @@ +//! Tx-pool [table](crate::tables) types. +//! +//! This module contains all types used by the database tables, +//! and aliases for common types that use the same underlying +//! primitive type. +//! +//! +use bytemuck::{Pod, Zeroable}; + +use monero_serai::transaction::Timelock; + +use cuprate_types::{CachedVerificationState, HardFork}; + +/// An inputs key image. +pub type KeyImage = [u8; 32]; + +/// A transaction hash. +pub type TransactionHash = [u8; 32]; + +bitflags::bitflags! { + /// Flags representing the state of the transaction in the pool. + #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] + #[repr(transparent)] + pub struct TxStateFlags: u8 { + /// A flag for if the transaction is in the stem state. + const STATE_STEM = 0b0000_0001; + /// A flag for if we have seen another tx double spending this tx. + const DOUBLE_SPENT = 0b0000_0010; + } +} + +/// Information on a tx-pool transaction. +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct TransactionInfo { + /// The transaction's fee. + pub fee: u64, + /// The transaction`s weight. + pub weight: usize, + /// [`TxStateFlags`] of this transaction. + pub flags: TxStateFlags, + /// Explicit padding so that we have no implicit padding bytes in `repr(C)`. + /// + /// Allows potential future expansion of this type. + pub _padding: [u8; 7], +} + +/// [`CachedVerificationState`] in a format that can be stored into the database. +/// +/// This type impls [`Into`] & [`From`] [`CachedVerificationState`]. +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct RawCachedVerificationState { + /// The raw hash, will be all `0`s if there is no block hash that this is valid for. + raw_valid_at_hash: [u8; 32], + /// The raw hard-fork, will be `0` if there is no hf this was validated at. + raw_hf: u8, + /// The raw [`u64`] timestamp as little endian bytes ([`u64::to_le_bytes`]). + /// + /// This will be `0` if there is no timestamp that needs to be passed for this to + /// be valid. + /// + /// Not a [`u64`] as if it was this type would have an alignment requirement. + raw_valid_past_timestamp: [u8; 8], +} + +impl From for CachedVerificationState { + fn from(value: RawCachedVerificationState) -> Self { + // if the hash is all `0`s then there is no hash this is valid at. + if value.raw_valid_at_hash == [0; 32] { + return CachedVerificationState::NotVerified; + } + + let raw_valid_past_timestamp = u64::from_le_bytes(value.raw_valid_past_timestamp); + + // if the timestamp is 0, there is no timestamp that needs to be passed. + if raw_valid_past_timestamp == 0 { + return CachedVerificationState::ValidAtHashAndHF { + block_hash: value.raw_valid_at_hash, + hf: HardFork::from_version(value.raw_hf) + .expect("hard-fork values stored in the DB should always be valid"), + }; + } + + CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock { + block_hash: value.raw_valid_at_hash, + hf: HardFork::from_version(value.raw_hf) + .expect("hard-fork values stored in the DB should always be valid"), + time_lock: Timelock::Time(raw_valid_past_timestamp), + } + } +} + +impl From for RawCachedVerificationState { + fn from(value: CachedVerificationState) -> Self { + match value { + CachedVerificationState::NotVerified => Self { + raw_valid_at_hash: [0; 32], + raw_hf: 0, + raw_valid_past_timestamp: [0; 8], + }, + CachedVerificationState::ValidAtHashAndHF { block_hash, hf } => Self { + raw_valid_at_hash: block_hash, + raw_hf: hf.as_u8(), + raw_valid_past_timestamp: [0; 8], + }, + CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock { + block_hash, + hf, + time_lock, + } => { + let Timelock::Time(time) = time_lock else { + panic!("ValidAtHashAndHFWithTimeBasedLock timelock was not time-based"); + }; + + Self { + raw_valid_at_hash: block_hash, + raw_hf: hf.as_u8(), + raw_valid_past_timestamp: time.to_le_bytes(), + } + } + } + } +} diff --git a/types/src/transaction_verification_data.rs b/types/src/transaction_verification_data.rs index 68e17b8..3dfe5fd 100644 --- a/types/src/transaction_verification_data.rs +++ b/types/src/transaction_verification_data.rs @@ -4,7 +4,7 @@ use std::sync::Mutex; use monero_serai::transaction::{Timelock, Transaction}; -use crate::HardFork; +use crate::{HardFork, VerifiedTransactionInformation}; /// An enum representing all valid Monero transaction versions. #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] @@ -92,3 +92,23 @@ pub struct TransactionVerificationData { /// The verification state of this transaction. pub cached_verification_state: Mutex, } + +#[derive(Debug, Copy, Clone, thiserror::Error)] +#[error("Error converting a verified tx to a cached verification data tx.")] +pub struct TxConversionError; + +impl TryFrom for TransactionVerificationData { + type Error = TxConversionError; + + fn try_from(value: VerifiedTransactionInformation) -> Result { + Ok(Self { + version: TxVersion::from_raw(value.tx.version()).ok_or(TxConversionError)?, + tx: value.tx, + tx_blob: value.tx_blob, + tx_weight: value.tx_weight, + fee: value.fee, + tx_hash: value.tx_hash, + cached_verification_state: Mutex::new(CachedVerificationState::NotVerified), + }) + } +} From bec8cc0aa49517a13ff9132d3bf389cb9b20bcfa Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Mon, 2 Sep 2024 13:09:52 -0400 Subject: [PATCH 045/104] helper: add and use `cast` module (#264) * helper: add `cast` module * fix crates * spacing --- Cargo.lock | 3 + consensus/rules/Cargo.toml | 2 +- consensus/rules/src/transactions/tests.rs | 6 +- consensus/src/block/alt_block.rs | 4 +- consensus/src/context/task.rs | 7 +- helper/Cargo.toml | 5 +- helper/src/cast.rs | 84 +++++++++++++++++++++++ helper/src/lib.rs | 3 + helper/src/map.rs | 6 +- net/epee-encoding/Cargo.toml | 1 + net/epee-encoding/src/lib.rs | 12 ++-- net/epee-encoding/src/value.rs | 39 +++++++---- net/levin/Cargo.toml | 2 + net/levin/src/codec.rs | 20 ++---- net/levin/src/lib.rs | 4 +- net/levin/src/message.rs | 12 ++-- net/levin/tests/fragmented_message.rs | 4 +- net/wire/Cargo.toml | 1 + net/wire/src/p2p.rs | 2 +- storage/blockchain/Cargo.toml | 2 +- storage/blockchain/src/ops/block.rs | 2 +- 21 files changed, 163 insertions(+), 58 deletions(-) create mode 100644 helper/src/cast.rs diff --git a/Cargo.lock b/Cargo.lock index 7753189..d004f95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -646,6 +646,7 @@ version = "0.5.0" dependencies = [ "bytes", "cuprate-fixed-bytes", + "cuprate-helper", "hex", "paste", "ref-cast", @@ -713,6 +714,7 @@ version = "0.1.0" dependencies = [ "bitflags 2.5.0", "bytes", + "cuprate-helper", "futures", "proptest", "rand", @@ -893,6 +895,7 @@ dependencies = [ "bytes", "cuprate-epee-encoding", "cuprate-fixed-bytes", + "cuprate-helper", "cuprate-levin", "cuprate-types", "hex", diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index 2cf03e3..8ba321d 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -11,7 +11,7 @@ proptest = ["dep:proptest", "dep:proptest-derive", "cuprate-types/proptest"] rayon = ["dep:rayon"] [dependencies] -cuprate-helper = { path = "../../helper", default-features = false, features = ["std"] } +cuprate-helper = { path = "../../helper", default-features = false, features = ["std", "cast"] } cuprate-types = { path = "../../types", default-features = false } cuprate-cryptonight = {path = "../../cryptonight"} diff --git a/consensus/rules/src/transactions/tests.rs b/consensus/rules/src/transactions/tests.rs index cd0e8c3..4da8fd5 100644 --- a/consensus/rules/src/transactions/tests.rs +++ b/consensus/rules/src/transactions/tests.rs @@ -9,6 +9,8 @@ use proptest::{collection::vec, prelude::*}; use monero_serai::transaction::Output; +use cuprate_helper::cast::u64_to_usize; + use super::*; use crate::decomposed_amount::DECOMPOSED_AMOUNTS; @@ -164,7 +166,7 @@ prop_compose! { if timebased || lock_height > 500_000_000 { Timelock::Time(time_for_time_lock) } else { - Timelock::Block(usize::try_from(lock_height).unwrap()) + Timelock::Block(u64_to_usize(lock_height)) } } } @@ -179,7 +181,7 @@ prop_compose! { match ty { 0 => Timelock::None, 1 => Timelock::Time(time_for_time_lock), - _ => Timelock::Block(usize::try_from(lock_height).unwrap()) + _ => Timelock::Block(u64_to_usize(lock_height)) } } } diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs index 513697e..9b94a27 100644 --- a/consensus/src/block/alt_block.rs +++ b/consensus/src/block/alt_block.rs @@ -14,7 +14,7 @@ use cuprate_consensus_rules::{ miner_tx::MinerTxError, ConsensusError, }; -use cuprate_helper::asynch::rayon_spawn_async; +use cuprate_helper::{asynch::rayon_spawn_async, cast::u64_to_usize}; use cuprate_types::{ AltBlockInformation, Chain, ChainId, TransactionVerificationData, VerifiedTransactionInformation, @@ -101,7 +101,7 @@ where // Check the alt block timestamp is in the correct range. if let Some(median_timestamp) = - difficulty_cache.median_timestamp(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW.try_into().unwrap()) + difficulty_cache.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)) { check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)? }; diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index 8939446..2376c35 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -9,6 +9,7 @@ use tower::ServiceExt; use tracing::Instrument; use cuprate_consensus_rules::blocks::ContextToVerifyBlock; +use cuprate_helper::cast::u64_to_usize; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, @@ -168,9 +169,9 @@ impl ContextTask { .weight_cache .effective_median_block_weight(¤t_hf), top_hash: self.top_block_hash, - median_block_timestamp: self.difficulty_cache.median_timestamp( - usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(), - ), + median_block_timestamp: self + .difficulty_cache + .median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)), chain_height: self.chain_height, current_hf, next_difficulty: self.difficulty_cache.next_difficulty(¤t_hf), diff --git a/helper/Cargo.toml b/helper/Cargo.toml index 59e4e71..9af25c6 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -10,14 +10,15 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus" [features] # All features on by default. -default = ["std", "atomic", "asynch", "fs", "num", "map", "time", "thread", "constants"] +default = ["std", "atomic", "asynch", "cast", "fs", "num", "map", "time", "thread", "constants"] std = [] atomic = ["dep:crossbeam"] asynch = ["dep:futures", "dep:rayon"] +cast = [] constants = [] fs = ["dep:dirs"] num = [] -map = ["dep:monero-serai"] +map = ["cast", "dep:monero-serai"] time = ["dep:chrono", "std"] thread = ["std", "dep:target_os_lib"] diff --git a/helper/src/cast.rs b/helper/src/cast.rs new file mode 100644 index 0000000..81d0836 --- /dev/null +++ b/helper/src/cast.rs @@ -0,0 +1,84 @@ +//! Casting. +//! +//! This modules provides utilities for casting between types. +//! +//! `#[no_std]` compatible. + +#[rustfmt::skip] +//============================ SAFETY: DO NOT REMOVE ===========================// +// // +// // +// Only allow building 64-bit targets. // +// This allows us to assume 64-bit invariants in this file. // + #[cfg(not(target_pointer_width = "64"))] + compile_error!("Cuprate is only compatible with 64-bit CPUs"); +// // +// // +//============================ SAFETY: DO NOT REMOVE ===========================// + +//---------------------------------------------------------------------------------------------------- Free functions +/// Cast [`u64`] to [`usize`]. +#[inline(always)] +pub const fn u64_to_usize(u: u64) -> usize { + u as usize +} + +/// Cast [`u32`] to [`usize`]. +#[inline(always)] +pub const fn u32_to_usize(u: u32) -> usize { + u as usize +} + +/// Cast [`usize`] to [`u64`]. +#[inline(always)] +pub const fn usize_to_u64(u: usize) -> u64 { + u as u64 +} + +/// Cast [`i64`] to [`isize`]. +#[inline(always)] +pub const fn i64_to_isize(i: i64) -> isize { + i as isize +} + +/// Cast [`i32`] to [`isize`]. +#[inline(always)] +pub const fn i32_to_isize(i: i32) -> isize { + i as isize +} + +/// Cast [`isize`] to [`i64`]. +#[inline(always)] +pub const fn isize_to_i64(i: isize) -> i64 { + i as i64 +} + +//---------------------------------------------------------------------------------------------------- Tests +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn max_unsigned() { + assert_eq!(u32_to_usize(u32::MAX), u32::MAX as usize); + assert_eq!(usize_to_u64(u32_to_usize(u32::MAX)), u32::MAX as u64); + + assert_eq!(u64_to_usize(u64::MAX), usize::MAX); + assert_eq!(usize_to_u64(u64_to_usize(u64::MAX)), u64::MAX); + + assert_eq!(usize_to_u64(usize::MAX), u64::MAX); + assert_eq!(u64_to_usize(usize_to_u64(usize::MAX)), usize::MAX); + } + + #[test] + fn max_signed() { + assert_eq!(i32_to_isize(i32::MAX), i32::MAX as isize); + assert_eq!(isize_to_i64(i32_to_isize(i32::MAX)), i32::MAX as i64); + + assert_eq!(i64_to_isize(i64::MAX), isize::MAX); + assert_eq!(isize_to_i64(i64_to_isize(i64::MAX)), i64::MAX); + + assert_eq!(isize_to_i64(isize::MAX), i64::MAX); + assert_eq!(i64_to_isize(isize_to_i64(isize::MAX)), isize::MAX); + } +} diff --git a/helper/src/lib.rs b/helper/src/lib.rs index 90f420d..4dd3105 100644 --- a/helper/src/lib.rs +++ b/helper/src/lib.rs @@ -40,6 +40,9 @@ pub mod asynch; // async collides #[cfg(feature = "atomic")] pub mod atomic; +#[cfg(feature = "cast")] +pub mod cast; + #[cfg(feature = "constants")] pub mod constants; diff --git a/helper/src/map.rs b/helper/src/map.rs index 96d9f61..82d5494 100644 --- a/helper/src/map.rs +++ b/helper/src/map.rs @@ -7,6 +7,8 @@ //---------------------------------------------------------------------------------------------------- Use use monero_serai::transaction::Timelock; +use crate::cast::{u64_to_usize, usize_to_u64}; + //---------------------------------------------------------------------------------------------------- `(u64, u64) <-> u128` /// Split a [`u128`] value into 2 64-bit values. /// @@ -77,7 +79,7 @@ pub fn u64_to_timelock(u: u64) -> Timelock { if u == 0 { Timelock::None } else if u < 500_000_000 { - Timelock::Block(usize::try_from(u).unwrap()) + Timelock::Block(u64_to_usize(u)) } else { Timelock::Time(u) } @@ -97,7 +99,7 @@ pub fn u64_to_timelock(u: u64) -> Timelock { pub fn timelock_to_u64(timelock: Timelock) -> u64 { match timelock { Timelock::None => 0, - Timelock::Block(u) => u64::try_from(u).unwrap(), + Timelock::Block(u) => usize_to_u64(u), Timelock::Time(u) => u, } } diff --git a/net/epee-encoding/Cargo.toml b/net/epee-encoding/Cargo.toml index 7feac00..85ee2c9 100644 --- a/net/epee-encoding/Cargo.toml +++ b/net/epee-encoding/Cargo.toml @@ -15,6 +15,7 @@ default = ["std"] std = ["dep:thiserror", "bytes/std", "cuprate-fixed-bytes/std"] [dependencies] +cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } cuprate-fixed-bytes = { path = "../fixed-bytes", default-features = false } paste = "1.0.14" diff --git a/net/epee-encoding/src/lib.rs b/net/epee-encoding/src/lib.rs index 5b64315..fa3449b 100644 --- a/net/epee-encoding/src/lib.rs +++ b/net/epee-encoding/src/lib.rs @@ -65,6 +65,8 @@ use core::{ops::Deref, str::from_utf8 as str_from_utf8}; use bytes::{Buf, BufMut, Bytes, BytesMut}; +use cuprate_helper::cast::{u64_to_usize, usize_to_u64}; + pub mod container_as_blob; pub mod error; mod io; @@ -242,7 +244,7 @@ pub fn write_bytes, B: BufMut>(t: T, w: &mut B) -> Result<()> { let bytes = t.as_ref(); let len = bytes.len(); - write_varint(len.try_into()?, w)?; + write_varint(usize_to_u64(len), w)?; if w.remaining_mut() < len { return Err(Error::IO("Not enough capacity to write bytes")); @@ -286,7 +288,7 @@ where I: Iterator + ExactSizeIterator, B: BufMut, { - write_varint(iterator.len().try_into()?, w)?; + write_varint(usize_to_u64(iterator.len()), w)?; for item in iterator.into_iter() { item.write(w)?; } @@ -334,7 +336,7 @@ fn skip_epee_value(r: &mut B, skipped_objects: &mut u8) -> Result<()> { if let Some(size) = marker.inner_marker.size() { let bytes_to_skip = size - .checked_mul(len.try_into()?) + .checked_mul(u64_to_usize(len)) .ok_or(Error::Value("List is too big".to_string()))?; return advance(bytes_to_skip, r); }; @@ -352,8 +354,8 @@ fn skip_epee_value(r: &mut B, skipped_objects: &mut u8) -> Result<()> { | InnerMarker::U8 | InnerMarker::Bool => unreachable!("These types are constant size."), InnerMarker::String => { - let len = read_varint(r)?; - advance(len.try_into()?, r)?; + let len = u64_to_usize(read_varint(r)?); + advance(len, r)?; } InnerMarker::Object => { *skipped_objects += 1; diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs index 094f0ef..000d89c 100644 --- a/net/epee-encoding/src/value.rs +++ b/net/epee-encoding/src/value.rs @@ -7,6 +7,7 @@ use core::fmt::Debug; use bytes::{Buf, BufMut, Bytes, BytesMut}; use cuprate_fixed_bytes::{ByteArray, ByteArrayVec}; +use cuprate_helper::cast::u64_to_usize; use crate::{ io::{checked_read_primitive, checked_write_primitive}, @@ -66,11 +67,11 @@ impl EpeeValue for Vec { "Marker is not sequence when a sequence was expected", )); } - let len = read_varint(r)?; + let len = u64_to_usize(read_varint(r)?); let individual_marker = Marker::new(marker.inner_marker); - let mut res = Vec::with_capacity(len.try_into()?); + let mut res = Vec::with_capacity(len); for _ in 0..len { res.push(T::read(r, &individual_marker)?); } @@ -167,11 +168,13 @@ impl EpeeValue for Vec { return Err(Error::Format("Byte array exceeded max length")); } - if r.remaining() < len.try_into()? { + let len = u64_to_usize(len); + + if r.remaining() < len { return Err(Error::IO("Not enough bytes to fill object")); } - let mut res = vec![0; len.try_into()?]; + let mut res = vec![0; len]; r.copy_to_slice(&mut res); Ok(res) @@ -203,11 +206,13 @@ impl EpeeValue for Bytes { return Err(Error::Format("Byte array exceeded max length")); } - if r.remaining() < len.try_into()? { + let len = u64_to_usize(len); + + if r.remaining() < len { return Err(Error::IO("Not enough bytes to fill object")); } - Ok(r.copy_to_bytes(len.try_into()?)) + Ok(r.copy_to_bytes(len)) } fn epee_default_value() -> Option { @@ -236,11 +241,13 @@ impl EpeeValue for BytesMut { return Err(Error::Format("Byte array exceeded max length")); } - if r.remaining() < len.try_into()? { + let len = u64_to_usize(len); + + if r.remaining() < len { return Err(Error::IO("Not enough bytes to fill object")); } - let mut bytes = BytesMut::zeroed(len.try_into()?); + let mut bytes = BytesMut::zeroed(len); r.copy_to_slice(&mut bytes); Ok(bytes) @@ -272,11 +279,13 @@ impl EpeeValue for ByteArrayVec { return Err(Error::Format("Byte array exceeded max length")); } - if r.remaining() < usize::try_from(len)? { + let len = u64_to_usize(len); + + if r.remaining() < len { return Err(Error::IO("Not enough bytes to fill object")); } - ByteArrayVec::try_from(r.copy_to_bytes(usize::try_from(len)?)) + ByteArrayVec::try_from(r.copy_to_bytes(len)) .map_err(|_| Error::Format("Field has invalid length")) } @@ -302,7 +311,7 @@ impl EpeeValue for ByteArray { return Err(Error::Format("Marker does not match expected Marker")); } - let len: usize = read_varint(r)?.try_into()?; + let len = u64_to_usize(read_varint(r)?); if len != N { return Err(Error::Format("Byte array has incorrect length")); } @@ -370,11 +379,11 @@ impl EpeeValue for Vec<[u8; N]> { )); } - let len = read_varint(r)?; + let len = u64_to_usize(read_varint(r)?); let individual_marker = Marker::new(marker.inner_marker); - let mut res = Vec::with_capacity(len.try_into()?); + let mut res = Vec::with_capacity(len); for _ in 0..len { res.push(<[u8; N]>::read(r, &individual_marker)?); } @@ -406,11 +415,11 @@ macro_rules! epee_seq { )); } - let len = read_varint(r)?; + let len = u64_to_usize(read_varint(r)?); let individual_marker = Marker::new(marker.inner_marker.clone()); - let mut res = Vec::with_capacity(len.try_into()?); + let mut res = Vec::with_capacity(len); for _ in 0..len { res.push(<$val>::read(r, &individual_marker)?); } diff --git a/net/levin/Cargo.toml b/net/levin/Cargo.toml index 13deabe..1c585b9 100644 --- a/net/levin/Cargo.toml +++ b/net/levin/Cargo.toml @@ -12,6 +12,8 @@ default = [] tracing = ["dep:tracing", "tokio-util/tracing"] [dependencies] +cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } + thiserror = { workspace = true } bytes = { workspace = true, features = ["std"] } bitflags = { workspace = true } diff --git a/net/levin/src/codec.rs b/net/levin/src/codec.rs index 3718d8c..1177733 100644 --- a/net/levin/src/codec.rs +++ b/net/levin/src/codec.rs @@ -20,6 +20,8 @@ use std::{fmt::Debug, marker::PhantomData}; use bytes::{Buf, BufMut, BytesMut}; use tokio_util::codec::{Decoder, Encoder}; +use cuprate_helper::cast::u64_to_usize; + use crate::{ header::{Flags, HEADER_SIZE}, message::{make_dummy_message, LevinMessage}, @@ -114,10 +116,7 @@ impl Decoder for LevinBucketCodec { std::mem::replace(&mut self.state, LevinBucketState::WaitingForBody(head)); } LevinBucketState::WaitingForBody(head) => { - let body_len = head - .size - .try_into() - .map_err(|_| BucketError::BucketExceededMaxSize)?; + let body_len = u64_to_usize(head.size); if src.len() < body_len { src.reserve(body_len - src.len()); return Ok(None); @@ -255,13 +254,11 @@ impl Decoder for LevinMessageCodec { continue; }; - let max_size = if self.bucket_codec.handshake_message_seen { + let max_size = u64_to_usize(if self.bucket_codec.handshake_message_seen { self.bucket_codec.protocol.max_packet_size } else { self.bucket_codec.protocol.max_packet_size_before_handshake - } - .try_into() - .expect("Levin max message size is too large, does not fit into a usize."); + }); if bytes.len().saturating_add(bucket.body.len()) > max_size { return Err(BucketError::InvalidFragmentedMessage( @@ -300,12 +297,7 @@ impl Decoder for LevinMessageCodec { } // Check the fragmented message contains enough bytes to build the message. - if bytes.len().saturating_sub(HEADER_SIZE) - < header - .size - .try_into() - .map_err(|_| BucketError::BucketExceededMaxSize)? - { + if bytes.len().saturating_sub(HEADER_SIZE) < u64_to_usize(header.size) { return Err(BucketError::InvalidFragmentedMessage( "Fragmented message does not have enough bytes to fill bucket body", )); diff --git a/net/levin/src/lib.rs b/net/levin/src/lib.rs index 0a247f7..ab03bfb 100644 --- a/net/levin/src/lib.rs +++ b/net/levin/src/lib.rs @@ -38,6 +38,8 @@ use std::fmt::Debug; use bytes::{Buf, Bytes}; use thiserror::Error; +use cuprate_helper::cast::usize_to_u64; + pub mod codec; pub mod header; pub mod message; @@ -212,7 +214,7 @@ impl BucketBuilder { Bucket { header: BucketHead { signature: self.signature.unwrap(), - size: body.len().try_into().unwrap(), + size: usize_to_u64(body.len()), have_to_return_data: ty.have_to_return_data(), command: self.command.unwrap(), return_code: self.return_code.unwrap(), diff --git a/net/levin/src/message.rs b/net/levin/src/message.rs index af8227d..19aa1b5 100644 --- a/net/levin/src/message.rs +++ b/net/levin/src/message.rs @@ -5,6 +5,8 @@ //! for more control over what is actually sent over the wire at certain times. use bytes::{Bytes, BytesMut}; +use cuprate_helper::cast::usize_to_u64; + use crate::{ header::{Flags, HEADER_SIZE}, Bucket, BucketBuilder, BucketError, BucketHead, LevinBody, LevinCommand, Protocol, @@ -106,9 +108,7 @@ pub fn make_fragmented_messages( new_body.resize(fragment_size - HEADER_SIZE, 0); bucket.body = new_body.freeze(); - bucket.header.size = (fragment_size - HEADER_SIZE) - .try_into() - .expect("Bucket size does not fit into u64"); + bucket.header.size = usize_to_u64(fragment_size - HEADER_SIZE); } return Ok(vec![bucket]); @@ -118,9 +118,7 @@ pub fn make_fragmented_messages( // The first fragment will set the START flag, the last will set the END flag. let fragment_head = BucketHead { signature: protocol.signature, - size: (fragment_size - HEADER_SIZE) - .try_into() - .expect("Bucket size does not fit into u64"), + size: usize_to_u64(fragment_size - HEADER_SIZE), have_to_return_data: false, // Just use a default command. command: T::Command::from(0), @@ -191,7 +189,7 @@ pub(crate) fn make_dummy_message(protocol: &Protocol, size: usi // A header to put on the dummy message. let header = BucketHead { signature: protocol.signature, - size: size.try_into().expect("Bucket size does not fit into u64"), + size: usize_to_u64(size), have_to_return_data: false, // Just use a default command. command: T::from(0), diff --git a/net/levin/tests/fragmented_message.rs b/net/levin/tests/fragmented_message.rs index 7799a71..512fd46 100644 --- a/net/levin/tests/fragmented_message.rs +++ b/net/levin/tests/fragmented_message.rs @@ -8,6 +8,8 @@ use tokio::{ }; use tokio_util::codec::{FramedRead, FramedWrite}; +use cuprate_helper::cast::u64_to_usize; + use cuprate_levin::{ message::make_fragmented_messages, BucketBuilder, BucketError, LevinBody, LevinCommand, LevinMessageCodec, MessageType, Protocol, @@ -54,7 +56,7 @@ impl LevinBody for TestBody { _: MessageType, _: Self::Command, ) -> Result { - let size = body.get_u64_le().try_into().unwrap(); + let size = u64_to_usize(body.get_u64_le()); // bucket Ok(TestBody::Bytes(size, body.copy_to_bytes(size))) } diff --git a/net/wire/Cargo.toml b/net/wire/Cargo.toml index 101daa3..cbeb551 100644 --- a/net/wire/Cargo.toml +++ b/net/wire/Cargo.toml @@ -15,6 +15,7 @@ cuprate-levin = { path = "../levin" } cuprate-epee-encoding = { path = "../epee-encoding" } cuprate-fixed-bytes = { path = "../fixed-bytes" } cuprate-types = { path = "../../types", default-features = false, features = ["epee"] } +cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } bitflags = { workspace = true, features = ["std"] } bytes = { workspace = true, features = ["std"] } diff --git a/net/wire/src/p2p.rs b/net/wire/src/p2p.rs index 9743109..3829d17 100644 --- a/net/wire/src/p2p.rs +++ b/net/wire/src/p2p.rs @@ -99,7 +99,7 @@ impl LevinCommandTrait for LevinCommand { LevinCommand::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB LevinCommand::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB - LevinCommand::Unknown(_) => usize::MAX.try_into().unwrap_or(u64::MAX), + LevinCommand::Unknown(_) => u64::MAX, } } diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 7e79305..58da21e 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -39,7 +39,7 @@ thread_local = { workspace = true, optional = true } rayon = { workspace = true, optional = true } [dev-dependencies] -cuprate-helper = { path = "../../helper", features = ["thread"] } +cuprate-helper = { path = "../../helper", features = ["thread", "cast"] } cuprate-test-utils = { path = "../../test-utils" } tokio = { workspace = true, features = ["full"] } diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 4f77d73..d1b83a4 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -442,7 +442,7 @@ mod test { let mut block = BLOCK_V9_TX3.clone(); - block.height = usize::try_from(u32::MAX).unwrap() + 1; + block.height = cuprate_helper::cast::u32_to_usize(u32::MAX) + 1; add_block(&block, &mut tables).unwrap(); } From b837d350a448ba400fdd0296464a58672db2cb7b Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Mon, 2 Sep 2024 13:10:45 -0400 Subject: [PATCH 046/104] workspace: add naming convention lints (#261) * add lint to {Cargo,clippy}.toml * `RandomXVM` -> `RandomXVm` * epee: `TT` -> `T2` --- Cargo.toml | 2 ++ clippy.toml | 1 + consensus/src/block/alt_block.rs | 4 +-- consensus/src/block/batch_prepare.rs | 4 +-- consensus/src/context.rs | 8 ++--- consensus/src/context/alt_chains.rs | 4 +-- consensus/src/context/rx_vms.rs | 38 ++++++++++++------------ consensus/src/context/task.rs | 4 +-- consensus/src/tests/context/rx_vms.rs | 6 ++-- net/epee-encoding/tests/duplicate_key.rs | 6 ++-- 10 files changed, 40 insertions(+), 37 deletions(-) create mode 100644 clippy.toml diff --git a/Cargo.toml b/Cargo.toml index 06b49a0..1a0c667 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -262,6 +262,7 @@ empty_structs_with_brackets = "deny" empty_enum_variants_with_brackets = "deny" empty_drop = "deny" clone_on_ref_ptr = "deny" +upper_case_acronyms = "deny" # Hot # inline_always = "deny" @@ -309,6 +310,7 @@ let_underscore_drop = "deny" unreachable_pub = "deny" unused_qualifications = "deny" variant_size_differences = "deny" +non_camel_case_types = "deny" # Hot # unused_results = "deny" diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000..cc94ec5 --- /dev/null +++ b/clippy.toml @@ -0,0 +1 @@ +upper-case-acronyms-aggressive = true diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs index 9b94a27..b20b4f2 100644 --- a/consensus/src/block/alt_block.rs +++ b/consensus/src/block/alt_block.rs @@ -24,7 +24,7 @@ use crate::{ block::{free::pull_ordered_transactions, PreparedBlock}, context::{ difficulty::DifficultyCache, - rx_vms::RandomXVM, + rx_vms::RandomXVm, weight::{self, BlockWeightsCache}, AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, }, @@ -195,7 +195,7 @@ async fn alt_rx_vm( parent_chain: Chain, alt_chain_context: &mut AltChainContextCache, context_svc: C, -) -> Result>, ExtendedConsensusError> +) -> Result>, ExtendedConsensusError> where C: Service< BlockChainContextRequest, diff --git a/consensus/src/block/batch_prepare.rs b/consensus/src/block/batch_prepare.rs index 9974d6d..d32cd76 100644 --- a/consensus/src/block/batch_prepare.rs +++ b/consensus/src/block/batch_prepare.rs @@ -15,7 +15,7 @@ use cuprate_helper::asynch::rayon_spawn_async; use crate::{ block::{free::pull_ordered_transactions, PreparedBlock, PreparedBlockExPow}, - context::rx_vms::RandomXVM, + context::rx_vms::RandomXVm, transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, VerifyBlockResponse, @@ -148,7 +148,7 @@ where tracing::debug!("New randomX seed in batch, initialising VM"); let new_vm = rayon_spawn_async(move || { - Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!")) + Arc::new(RandomXVm::new(&new_vm_seed).expect("RandomX VM gave an error on set up!")) }) .await; diff --git a/consensus/src/context.rs b/consensus/src/context.rs index 26be75c..9e71304 100644 --- a/consensus/src/context.rs +++ b/consensus/src/context.rs @@ -33,7 +33,7 @@ mod tokens; use cuprate_types::Chain; use difficulty::DifficultyCache; -use rx_vms::RandomXVM; +use rx_vms::RandomXVm; use weight::BlockWeightsCache; pub(crate) use alt_chains::{sealed::AltChainRequestToken, AltChainContextCache}; @@ -236,7 +236,7 @@ pub enum BlockChainContextRequest { /// seed. /// /// This should include the seed used to init this VM and the VM. - NewRXVM(([u8; 32], Arc)), + NewRXVM(([u8; 32], Arc)), /// A request to add a new block to the cache. Update(NewBlockData), /// Pop blocks from the cache to the specified height. @@ -313,7 +313,7 @@ pub enum BlockChainContextResponse { /// Blockchain context response. Context(BlockChainContext), /// A map of seed height to RandomX VMs. - RxVms(HashMap>), + RxVms(HashMap>), /// A list of difficulties. BatchDifficulties(Vec), /// An alt chain context cache. @@ -321,7 +321,7 @@ pub enum BlockChainContextResponse { /// A difficulty cache for an alt chain. AltChainDifficultyCache(DifficultyCache), /// A randomX VM for an alt chain. - AltChainRxVM(Arc), + AltChainRxVM(Arc), /// A weight cache for an alt chain AltChainWeightCache(BlockWeightsCache), /// A generic Ok response. diff --git a/consensus/src/context/alt_chains.rs b/consensus/src/context/alt_chains.rs index 5586226..937e847 100644 --- a/consensus/src/context/alt_chains.rs +++ b/consensus/src/context/alt_chains.rs @@ -11,7 +11,7 @@ use cuprate_types::{ use crate::{ ExtendedConsensusError, __private::Database, - context::{difficulty::DifficultyCache, rx_vms::RandomXVM, weight::BlockWeightsCache}, + context::{difficulty::DifficultyCache, rx_vms::RandomXVm, weight::BlockWeightsCache}, }; pub(crate) mod sealed { @@ -32,7 +32,7 @@ pub struct AltChainContextCache { pub difficulty_cache: Option, /// A cached RX VM. - pub cached_rx_vm: Option<(usize, Arc)>, + pub cached_rx_vm: Option<(usize, Arc)>, /// The chain height of the alt chain. pub chain_height: usize, diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index 01aa973..b1ab102 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -9,7 +9,7 @@ use std::{ }; use futures::{stream::FuturesOrdered, StreamExt}; -use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner}; +use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VmInner}; use rayon::prelude::*; use thread_local::ThreadLocal; use tower::ServiceExt; @@ -33,16 +33,16 @@ const RX_SEEDS_CACHED: usize = 2; /// A multithreaded randomX VM. #[derive(Debug)] -pub struct RandomXVM { +pub struct RandomXVm { /// These RandomX VMs all share the same cache. - vms: ThreadLocal, + vms: ThreadLocal, /// The RandomX cache. cache: RandomXCache, /// The flags used to start the RandomX VMs. flags: RandomXFlag, } -impl RandomXVM { +impl RandomXVm { /// Create a new multithreaded randomX VM with the provided seed. pub fn new(seed: &[u8; 32]) -> Result { // TODO: allow passing in flags. @@ -50,7 +50,7 @@ impl RandomXVM { let cache = RandomXCache::new(flags, seed.as_slice())?; - Ok(RandomXVM { + Ok(RandomXVm { vms: ThreadLocal::new(), cache, flags, @@ -58,12 +58,12 @@ impl RandomXVM { } } -impl RandomX for RandomXVM { +impl RandomX for RandomXVm { type Error = RandomXError; fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error> { self.vms - .get_or_try(|| VMInner::new(self.flags, Some(self.cache.clone()), None))? + .get_or_try(|| VmInner::new(self.flags, Some(self.cache.clone()), None))? .calculate_hash(buf) .map(|out| out.try_into().unwrap()) } @@ -72,17 +72,17 @@ impl RandomX for RandomXVM { /// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a /// couple more around this VM. #[derive(Clone, Debug)] -pub struct RandomXVMCache { +pub struct RandomXVmCache { /// The top [`RX_SEEDS_CACHED`] RX seeds. pub(crate) seeds: VecDeque<(usize, [u8; 32])>, /// The VMs for `seeds` (if after hf 12, otherwise this will be empty). - pub(crate) vms: HashMap>, + pub(crate) vms: HashMap>, /// A single cached VM that was given to us from a part of Cuprate. - pub(crate) cached_vm: Option<([u8; 32], Arc)>, + pub(crate) cached_vm: Option<([u8; 32], Arc)>, } -impl RandomXVMCache { +impl RandomXVmCache { #[instrument(name = "init_rx_vm_cache", level = "info", skip(database))] pub async fn init_from_chain_height( chain_height: usize, @@ -106,7 +106,7 @@ impl RandomXVMCache { .map(|(height, seed)| { ( *height, - Arc::new(RandomXVM::new(seed).expect("Failed to create RandomX VM!")), + Arc::new(RandomXVm::new(seed).expect("Failed to create RandomX VM!")), ) }) .collect() @@ -117,7 +117,7 @@ impl RandomXVMCache { HashMap::new() }; - Ok(RandomXVMCache { + Ok(RandomXVmCache { seeds, vms, cached_vm: None, @@ -125,7 +125,7 @@ impl RandomXVMCache { } /// Add a randomX VM to the cache, with the seed it was created with. - pub fn add_vm(&mut self, vm: ([u8; 32], Arc)) { + pub fn add_vm(&mut self, vm: ([u8; 32], Arc)) { self.cached_vm.replace(vm); } @@ -136,7 +136,7 @@ impl RandomXVMCache { height: usize, chain: Chain, database: D, - ) -> Result, ExtendedConsensusError> { + ) -> Result, ExtendedConsensusError> { let seed_height = randomx_seed_height(height); let BlockchainResponse::BlockHash(seed_hash) = database @@ -156,13 +156,13 @@ impl RandomXVMCache { } } - let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVM::new(&seed_hash).unwrap())).await; + let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVm::new(&seed_hash).unwrap())).await; Ok(alt_vm) } /// Get the main-chain RandomX VMs. - pub async fn get_vms(&mut self) -> HashMap> { + pub async fn get_vms(&mut self) -> HashMap> { match self.seeds.len().checked_sub(self.vms.len()) { // No difference in the amount of seeds to VMs. Some(0) => (), @@ -184,7 +184,7 @@ impl RandomXVMCache { } }; - rayon_spawn_async(move || Arc::new(RandomXVM::new(&next_seed_hash).unwrap())) + rayon_spawn_async(move || Arc::new(RandomXVm::new(&next_seed_hash).unwrap())) .await }; @@ -200,7 +200,7 @@ impl RandomXVMCache { seeds_clone .par_iter() .map(|(height, seed)| { - let vm = RandomXVM::new(seed).expect("Failed to create RandomX VM!"); + let vm = RandomXVm::new(seed).expect("Failed to create RandomX VM!"); let vm = Arc::new(vm); (*height, vm) }) diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index 2376c35..bc54285 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -46,7 +46,7 @@ pub struct ContextTask { /// The weight cache. weight_cache: weight::BlockWeightsCache, /// The RX VM cache. - rx_vm_cache: rx_vms::RandomXVMCache, + rx_vm_cache: rx_vms::RandomXVmCache, /// The hard-fork state cache. hardfork_state: hardforks::HardForkState, @@ -128,7 +128,7 @@ impl ContextTask { let db = database.clone(); let rx_seed_handle = tokio::spawn(async move { - rx_vms::RandomXVMCache::init_from_chain_height(chain_height, ¤t_hf, db).await + rx_vms::RandomXVmCache::init_from_chain_height(chain_height, ¤t_hf, db).await }); let context_svc = ContextTask { diff --git a/consensus/src/tests/context/rx_vms.rs b/consensus/src/tests/context/rx_vms.rs index f18a9b5..5c198cf 100644 --- a/consensus/src/tests/context/rx_vms.rs +++ b/consensus/src/tests/context/rx_vms.rs @@ -9,7 +9,7 @@ use cuprate_consensus_rules::{ }; use crate::{ - context::rx_vms::{get_last_rx_seed_heights, RandomXVMCache}, + context::rx_vms::{get_last_rx_seed_heights, RandomXVmCache}, tests::mock_db::*, }; @@ -42,7 +42,7 @@ fn rx_heights_consistent() { async fn rx_vm_created_on_hf_12() { let db = DummyDatabaseBuilder::default().finish(Some(10)); - let mut cache = RandomXVMCache::init_from_chain_height(10, &HardFork::V11, db) + let mut cache = RandomXVmCache::init_from_chain_height(10, &HardFork::V11, db) .await .unwrap(); @@ -67,7 +67,7 @@ proptest! { let rt = Builder::new_multi_thread().enable_all().build().unwrap(); rt.block_on(async move { - let cache = RandomXVMCache::init_from_chain_height(10, &hf, db).await.unwrap(); + let cache = RandomXVmCache::init_from_chain_height(10, &hf, db).await.unwrap(); assert!(cache.seeds.len() == cache.vms.len() || hf < HardFork::V12); }); } diff --git a/net/epee-encoding/tests/duplicate_key.rs b/net/epee-encoding/tests/duplicate_key.rs index c1b3148..0ed87af 100644 --- a/net/epee-encoding/tests/duplicate_key.rs +++ b/net/epee-encoding/tests/duplicate_key.rs @@ -9,12 +9,12 @@ epee_object!( a: u8, ); -struct TT { +struct T2 { a: u8, } epee_object!( - TT, + T2, a: u8 = 0, ); @@ -35,5 +35,5 @@ fn duplicate_key_with_default() { b'a', 0x0B, 0x00, ]; - assert!(from_bytes::(&mut &data[..]).is_err()); + assert!(from_bytes::(&mut &data[..]).is_err()); } From eead49beb09b4a0a8c4a2b0f38683311f53eb891 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Mon, 2 Sep 2024 13:12:54 -0400 Subject: [PATCH 047/104] lints: opt in manual lint crates (#263) * cargo.toml: transfer existing lints * rpc/interface: lints * rpc/json-rpc: lints * rpc/types: lints * storage/blockchain: lints * rpc/types: fix lints * cargo.toml: fix lint group priority * storage/blockchain: fix lints * fix misc lints * storage/database: fixes * storage/txpool: opt in lints + fixes * types: opt in + fixes * helper: opt in + fixes * types: remove borsh * rpc/interface: fix test * test fixes * database: fix lints * fix lint * tabs -> spaces * blockchain: `config/` -> `config.rs` --- Cargo.lock | 44 +++++--- Cargo.toml | 6 +- helper/Cargo.toml | 3 + helper/src/asynch.rs | 8 +- helper/src/atomic.rs | 2 + helper/src/fs.rs | 89 +++++---------- helper/src/lib.rs | 32 ------ helper/src/map.rs | 3 +- helper/src/network.rs | 8 +- helper/src/num.rs | 3 +- helper/src/thread.rs | 10 +- helper/src/time.rs | 2 + rpc/interface/Cargo.toml | 8 +- rpc/interface/src/lib.rs | 103 ++---------------- rpc/json-rpc/Cargo.toml | 5 +- rpc/json-rpc/src/lib.rs | 90 --------------- rpc/json-rpc/src/response.rs | 4 +- rpc/json-rpc/src/tests.rs | 1 + rpc/types/Cargo.toml | 9 +- rpc/types/src/lib.rs | 97 ++--------------- rpc/types/src/misc/mod.rs | 1 + storage/blockchain/Cargo.toml | 3 + storage/blockchain/src/{config => }/config.rs | 42 ++++++- storage/blockchain/src/config/mod.rs | 44 -------- storage/blockchain/src/lib.rs | 100 +---------------- storage/blockchain/src/service/free.rs | 4 +- storage/blockchain/src/service/tests.rs | 3 +- storage/database/Cargo.toml | 5 +- storage/database/src/backend/heed/env.rs | 2 +- storage/database/src/backend/heed/storable.rs | 4 +- storage/database/src/backend/redb/database.rs | 10 +- storage/database/src/config/mod.rs | 1 + storage/database/src/env.rs | 2 +- storage/database/src/key.rs | 4 +- storage/database/src/lib.rs | 102 +++-------------- storage/txpool/Cargo.toml | 3 + storage/txpool/src/config.rs | 2 +- storage/txpool/src/lib.rs | 14 +++ storage/txpool/src/ops/key_images.rs | 4 +- storage/txpool/src/service/free.rs | 4 +- storage/txpool/src/service/read.rs | 8 +- storage/txpool/src/service/write.rs | 2 +- storage/txpool/src/types.rs | 10 +- types/Cargo.toml | 6 +- types/src/lib.rs | 75 +------------ 45 files changed, 240 insertions(+), 742 deletions(-) rename storage/blockchain/src/{config => }/config.rs (82%) delete mode 100644 storage/blockchain/src/config/mod.rs diff --git a/Cargo.lock b/Cargo.lock index d004f95..950044c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + [[package]] name = "ahash" version = "0.8.11" @@ -160,7 +166,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.7.3", "object", "rustc-demangle", ] @@ -798,6 +804,7 @@ dependencies = [ "cuprate-helper", "cuprate-json-rpc", "cuprate-rpc-types", + "cuprate-test-utils", "futures", "paste", "serde", @@ -813,12 +820,9 @@ version = "0.0.0" dependencies = [ "cuprate-epee-encoding", "cuprate-fixed-bytes", - "cuprate-json-rpc", "cuprate-test-utils", "cuprate-types", - "monero-serai", "paste", - "pretty_assertions", "serde", "serde_json", ] @@ -875,7 +879,6 @@ dependencies = [ name = "cuprate-types" version = "0.0.0" dependencies = [ - "borsh", "bytes", "cuprate-epee-encoding", "cuprate-fixed-bytes", @@ -1082,12 +1085,12 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.8.0", ] [[package]] @@ -1241,9 +1244,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -1737,6 +1740,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + [[package]] name = "mio" version = "0.8.11" @@ -2399,9 +2411,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" @@ -2960,9 +2972,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72139d247e5f97a3eff96229a7ae85ead5328a39efe76f8bf5a06313d505b6ea" +checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" dependencies = [ "base64", "flate2", @@ -3085,9 +3097,9 @@ checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "webpki-roots" -version = "0.26.3" +version = "0.26.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" dependencies = [ "rustls-pki-types", ] diff --git a/Cargo.toml b/Cargo.toml index 1a0c667..0a98eab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -279,13 +279,15 @@ upper_case_acronyms = "deny" # allow_attributes_without_reason = "deny" # missing_assert_message = "deny" # missing_docs_in_private_items = "deny" -# undocumented_unsafe_blocks = "deny" +undocumented_unsafe_blocks = "deny" # multiple_unsafe_ops_per_block = "deny" # single_char_lifetime_names = "deny" # wildcard_enum_match_arm = "deny" [workspace.lints.rust] # Cold +future_incompatible = { level = "deny", priority = -1 } +nonstandard_style = { level = "deny", priority = -1 } absolute_paths_not_starting_with_crate = "deny" explicit_outlives_requirements = "deny" keyword_idents_2018 = "deny" @@ -306,7 +308,7 @@ ambiguous_glob_imports = "deny" unused_unsafe = "deny" # Warm -let_underscore_drop = "deny" +let_underscore = { level = "deny", priority = -1 } unreachable_pub = "deny" unused_qualifications = "deny" variant_size_differences = "deny" diff --git a/helper/Cargo.toml b/helper/Cargo.toml index 9af25c6..c74e40f 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -40,3 +40,6 @@ target_os_lib = { package = "libc", version = "0.2.151", optional = true } [dev-dependencies] tokio = { workspace = true, features = ["full"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/helper/src/asynch.rs b/helper/src/asynch.rs index ea89dd7..9868191 100644 --- a/helper/src/asynch.rs +++ b/helper/src/asynch.rs @@ -19,7 +19,7 @@ pub struct InfallibleOneshotReceiver(oneshot::Receiver); impl From> for InfallibleOneshotReceiver { fn from(value: oneshot::Receiver) -> Self { - InfallibleOneshotReceiver(value) + Self(value) } } @@ -43,7 +43,7 @@ where { let (tx, rx) = oneshot::channel(); rayon::spawn(move || { - let _ = tx.send(f()); + drop(tx.send(f())); }); rx.await.expect("The sender must not be dropped") } @@ -62,7 +62,7 @@ mod test { #[tokio::test] // Assert that basic channel operations work. async fn infallible_oneshot_receiver() { - let (tx, rx) = futures::channel::oneshot::channel::(); + let (tx, rx) = oneshot::channel::(); let msg = "hello world!".to_string(); tx.send(msg.clone()).unwrap(); @@ -84,7 +84,7 @@ mod test { let barrier = Arc::new(Barrier::new(2)); let task = |barrier: &Barrier| barrier.wait(); - let b_2 = barrier.clone(); + let b_2 = Arc::clone(&barrier); let (tx, rx) = std::sync::mpsc::channel(); diff --git a/helper/src/atomic.rs b/helper/src/atomic.rs index f253737..4795896 100644 --- a/helper/src/atomic.rs +++ b/helper/src/atomic.rs @@ -49,6 +49,8 @@ pub type AtomicF64 = AtomicCell; //---------------------------------------------------------------------------------------------------- TESTS #[cfg(test)] mod tests { + #![allow(clippy::float_cmp)] + use super::*; #[test] diff --git a/helper/src/fs.rs b/helper/src/fs.rs index 7290361..5d62a64 100644 --- a/helper/src/fs.rs +++ b/helper/src/fs.rs @@ -190,72 +190,41 @@ mod test { // - It must `ends_with()` the expected end PATH for the OS #[test] fn path_sanity_check() { - assert!(CUPRATE_CACHE_DIR.is_absolute()); - assert!(CUPRATE_CONFIG_DIR.is_absolute()); - assert!(CUPRATE_DATA_DIR.is_absolute()); - assert!(CUPRATE_BLOCKCHAIN_DIR.is_absolute()); + // Array of (PATH, expected_path_as_string). + // + // The different OS's will set the expected path below. + let mut array = [ + (&*CUPRATE_CACHE_DIR, ""), + (&*CUPRATE_CONFIG_DIR, ""), + (&*CUPRATE_DATA_DIR, ""), + (&*CUPRATE_BLOCKCHAIN_DIR, ""), + (&*CUPRATE_TXPOOL_DIR, ""), + ]; if cfg!(target_os = "windows") { - let dir = &*CUPRATE_CACHE_DIR; - println!("cuprate_cache_dir: {dir:?}"); - assert!(dir.ends_with(r"AppData\Local\Cuprate")); - - let dir = &*CUPRATE_CONFIG_DIR; - println!("cuprate_config_dir: {dir:?}"); - assert!(dir.ends_with(r"AppData\Roaming\Cuprate")); - - let dir = &*CUPRATE_DATA_DIR; - println!("cuprate_data_dir: {dir:?}"); - assert!(dir.ends_with(r"AppData\Roaming\Cuprate")); - - let dir = &*CUPRATE_BLOCKCHAIN_DIR; - println!("cuprate_blockchain_dir: {dir:?}"); - assert!(dir.ends_with(r"AppData\Roaming\Cuprate\blockchain")); - - let dir = &*CUPRATE_TXPOOL_DIR; - println!("cuprate_txpool_dir: {dir:?}"); - assert!(dir.ends_with(r"AppData\Roaming\Cuprate\txpool")); + array[0].1 = r"AppData\Local\Cuprate"; + array[1].1 = r"AppData\Roaming\Cuprate"; + array[2].1 = r"AppData\Roaming\Cuprate"; + array[3].1 = r"AppData\Roaming\Cuprate\blockchain"; + array[4].1 = r"AppData\Roaming\Cuprate\txpool"; } else if cfg!(target_os = "macos") { - let dir = &*CUPRATE_CACHE_DIR; - println!("cuprate_cache_dir: {dir:?}"); - assert!(dir.ends_with("Library/Caches/Cuprate")); - - let dir = &*CUPRATE_CONFIG_DIR; - println!("cuprate_config_dir: {dir:?}"); - assert!(dir.ends_with("Library/Application Support/Cuprate")); - - let dir = &*CUPRATE_DATA_DIR; - println!("cuprate_data_dir: {dir:?}"); - assert!(dir.ends_with("Library/Application Support/Cuprate")); - - let dir = &*CUPRATE_BLOCKCHAIN_DIR; - println!("cuprate_blockchain_dir: {dir:?}"); - assert!(dir.ends_with("Library/Application Support/Cuprate/blockchain")); - - let dir = &*CUPRATE_TXPOOL_DIR; - println!("cuprate_txpool_dir: {dir:?}"); - assert!(dir.ends_with("Library/Application Support/Cuprate/txpool")); + array[0].1 = "Library/Caches/Cuprate"; + array[1].1 = "Library/Application Support/Cuprate"; + array[2].1 = "Library/Application Support/Cuprate"; + array[3].1 = "Library/Application Support/Cuprate/blockchain"; + array[4].1 = "Library/Application Support/Cuprate/txpool"; } else { // Assumes Linux. - let dir = &*CUPRATE_CACHE_DIR; - println!("cuprate_cache_dir: {dir:?}"); - assert!(dir.ends_with(".cache/cuprate")); + array[0].1 = ".cache/cuprate"; + array[1].1 = ".config/cuprate"; + array[2].1 = ".local/share/cuprate"; + array[3].1 = ".local/share/cuprate/blockchain"; + array[4].1 = ".local/share/cuprate/txpool"; + }; - let dir = &*CUPRATE_CONFIG_DIR; - println!("cuprate_config_dir: {dir:?}"); - assert!(dir.ends_with(".config/cuprate")); - - let dir = &*CUPRATE_DATA_DIR; - println!("cuprate_data_dir: {dir:?}"); - assert!(dir.ends_with(".local/share/cuprate")); - - let dir = &*CUPRATE_BLOCKCHAIN_DIR; - println!("cuprate_blockchain_dir: {dir:?}"); - assert!(dir.ends_with(".local/share/cuprate/blockchain")); - - let dir = &*CUPRATE_TXPOOL_DIR; - println!("cuprate_txpool_dir: {dir:?}"); - assert!(dir.ends_with(".local/share/cuprate/txpool")); + for (path, expected) in array { + assert!(path.is_absolute()); + assert!(path.ends_with(expected)); } } } diff --git a/helper/src/lib.rs b/helper/src/lib.rs index 4dd3105..de0d955 100644 --- a/helper/src/lib.rs +++ b/helper/src/lib.rs @@ -1,36 +1,4 @@ #![doc = include_str!("../README.md")] -//---------------------------------------------------------------------------------------------------- Lints -#![allow(clippy::len_zero, clippy::type_complexity, clippy::module_inception)] -#![deny(nonstandard_style, deprecated, missing_docs, unused_mut)] -#![forbid( - unused_unsafe, - future_incompatible, - break_with_label_and_loop, - coherence_leak_check, - duplicate_macro_attributes, - exported_private_dependencies, - for_loops_over_fallibles, - large_assignments, - overlapping_range_endpoints, - // private_in_public, - semicolon_in_expressions_from_macros, - redundant_semicolons, - unconditional_recursion, - unreachable_patterns, - unused_allocation, - unused_braces, - unused_comparisons, - unused_doc_comments, - unused_parens, - unused_labels, - while_true, - keyword_idents, - non_ascii_idents, - noop_method_call, - unreachable_pub, - single_use_lifetimes, - // variant_size_differences, -)] #![cfg_attr(not(feature = "std"), no_std)] //---------------------------------------------------------------------------------------------------- Public API diff --git a/helper/src/map.rs b/helper/src/map.rs index 82d5494..ea6dfc4 100644 --- a/helper/src/map.rs +++ b/helper/src/map.rs @@ -29,6 +29,7 @@ use crate::cast::{u64_to_usize, usize_to_u64}; /// ``` #[inline] pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) { + #[allow(clippy::cast_possible_truncation)] (value as u64, (value >> 64) as u64) } @@ -60,7 +61,7 @@ pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u12 /// Map a [`u64`] to a [`Timelock`]. /// /// Height/time is not differentiated via type, but rather: -/// "height is any value less than 500_000_000 and timestamp is any value above" +/// "height is any value less than `500_000_000` and timestamp is any value above" /// so the `u64/usize` is stored without any tag. /// /// See [`timelock_to_u64`] for the inverse function. diff --git a/helper/src/network.rs b/helper/src/network.rs index 684e71a..f3224b3 100644 --- a/helper/src/network.rs +++ b/helper/src/network.rs @@ -30,11 +30,11 @@ pub enum Network { impl Network { /// Returns the network ID for the current network. - pub fn network_id(&self) -> [u8; 16] { + pub const fn network_id(&self) -> [u8; 16] { match self { - Network::Mainnet => MAINNET_NETWORK_ID, - Network::Testnet => TESTNET_NETWORK_ID, - Network::Stagenet => STAGENET_NETWORK_ID, + Self::Mainnet => MAINNET_NETWORK_ID, + Self::Testnet => TESTNET_NETWORK_ID, + Self::Stagenet => STAGENET_NETWORK_ID, } } } diff --git a/helper/src/num.rs b/helper/src/num.rs index f90357e..674ed35 100644 --- a/helper/src/num.rs +++ b/helper/src/num.rs @@ -89,8 +89,9 @@ where /// assert_eq!(median(vec), 5); /// ``` /// -/// # Safety +/// # Invariant /// If not sorted the output will be invalid. +#[allow(clippy::debug_assert_with_mut_call)] pub fn median(array: impl AsRef<[T]>) -> T where T: Add diff --git a/helper/src/thread.rs b/helper/src/thread.rs index 96958ff..04a2606 100644 --- a/helper/src/thread.rs +++ b/helper/src/thread.rs @@ -28,10 +28,10 @@ macro_rules! impl_thread_percent { $( $(#[$doc])* pub fn $fn_name() -> NonZeroUsize { - // SAFETY: // unwrap here is okay because: // - THREADS().get() is always non-zero // - max() guards against 0 + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss)] NonZeroUsize::new(max(1, (threads().get() as f64 * $percent).floor() as usize)).unwrap() } )* @@ -58,10 +58,10 @@ impl_thread_percent! { /// Originally from . /// /// # Windows -/// Uses SetThreadPriority() with THREAD_PRIORITY_IDLE (-15). +/// Uses `SetThreadPriority()` with `THREAD_PRIORITY_IDLE` (-15). /// /// # Unix -/// Uses libc::nice() with the max nice level. +/// Uses `libc::nice()` with the max nice level. /// /// On macOS and *BSD: +20 /// On Linux: +19 @@ -74,7 +74,7 @@ pub fn low_priority_thread() { // SAFETY: calling C. // We are _lowering_ our priority, not increasing, so this function should never fail. unsafe { - let _ = SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE); + drop(SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE)); } } @@ -87,7 +87,7 @@ pub fn low_priority_thread() { // SAFETY: calling C. // We are _lowering_ our priority, not increasing, so this function should never fail. unsafe { - let _ = libc::nice(NICE_MAX); + libc::nice(NICE_MAX); } } } diff --git a/helper/src/time.rs b/helper/src/time.rs index 28aff7f..ce39c2d 100644 --- a/helper/src/time.rs +++ b/helper/src/time.rs @@ -129,6 +129,7 @@ pub const fn secs_to_clock(seconds: u32) -> (u8, u8, u8) { debug_assert!(m < 60); debug_assert!(s < 60); + #[allow(clippy::cast_possible_truncation)] // checked above (h as u8, m, s) } @@ -153,6 +154,7 @@ pub fn time() -> u32 { /// /// This is guaranteed to return a value between `0..=86399` pub fn time_utc() -> u32 { + #[allow(clippy::cast_sign_loss)] // checked in function calls unix_clock(chrono::offset::Local::now().timestamp() as u64) } diff --git a/rpc/interface/Cargo.toml b/rpc/interface/Cargo.toml index a83c0f0..5f17317 100644 --- a/rpc/interface/Cargo.toml +++ b/rpc/interface/Cargo.toml @@ -20,13 +20,17 @@ cuprate-helper = { path = "../../helper", features = ["asynch"], default- axum = { version = "0.7.5", features = ["json"], default-features = false } serde = { workspace = true, optional = true } -serde_json = { workspace = true, features = ["std"] } tower = { workspace = true } paste = { workspace = true } futures = { workspace = true } [dev-dependencies] +cuprate-test-utils = { path = "../../test-utils" } + axum = { version = "0.7.5", features = ["json", "tokio", "http2"] } serde_json = { workspace = true, features = ["std"] } tokio = { workspace = true, features = ["full"] } -ureq = { version = "2.10.0", features = ["json"] } \ No newline at end of file +ureq = { version = "2.10.0", features = ["json"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/rpc/interface/src/lib.rs b/rpc/interface/src/lib.rs index 43bd9e1..d437697 100644 --- a/rpc/interface/src/lib.rs +++ b/rpc/interface/src/lib.rs @@ -1,99 +1,6 @@ #![doc = include_str!("../README.md")] #![cfg_attr(docsrs, feature(doc_cfg))] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - single_use_lifetimes, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - unused_doc_comments, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style, - unreachable_pub -)] -#![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, - - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, - - // TODO - rustdoc::bare_urls, - - clippy::multiple_crate_versions, - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] -// Allow some lints in tests. -#![cfg_attr( - test, - allow( - clippy::cognitive_complexity, - clippy::needless_pass_by_value, - clippy::cast_possible_truncation, - clippy::too_many_lines - ) -)] - -//---------------------------------------------------------------------------------------------------- Mod mod route; mod router_builder; mod rpc_error; @@ -110,3 +17,13 @@ pub use rpc_handler::RpcHandler; pub use rpc_handler_dummy::RpcHandlerDummy; pub use rpc_request::RpcRequest; pub use rpc_response::RpcResponse; + +// false-positive: used in `README.md`'s doc-test. +#[cfg(test)] +mod test { + extern crate axum; + extern crate cuprate_test_utils; + extern crate serde_json; + extern crate tokio; + extern crate ureq; +} diff --git a/rpc/json-rpc/Cargo.toml b/rpc/json-rpc/Cargo.toml index 777f326..5d2544e 100644 --- a/rpc/json-rpc/Cargo.toml +++ b/rpc/json-rpc/Cargo.toml @@ -17,4 +17,7 @@ serde_json = { workspace = true, features = ["std"] } thiserror = { workspace = true } [dev-dependencies] -pretty_assertions = { workspace = true } \ No newline at end of file +pretty_assertions = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/rpc/json-rpc/src/lib.rs b/rpc/json-rpc/src/lib.rs index ce7467a..dfc4b18 100644 --- a/rpc/json-rpc/src/lib.rs +++ b/rpc/json-rpc/src/lib.rs @@ -1,94 +1,5 @@ #![doc = include_str!("../README.md")] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - single_use_lifetimes, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - clippy::missing_docs_in_private_items, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style -)] -#![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, - - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] -// Allow some lints in tests. -#![cfg_attr( - test, - allow( - clippy::cognitive_complexity, - clippy::needless_pass_by_value, - clippy::cast_possible_truncation, - clippy::too_many_lines - ) -)] - -//---------------------------------------------------------------------------------------------------- Mod/Use pub mod error; mod id; @@ -103,6 +14,5 @@ pub use request::Request; mod response; pub use response::Response; -//---------------------------------------------------------------------------------------------------- TESTS #[cfg(test)] mod tests; diff --git a/rpc/json-rpc/src/response.rs b/rpc/json-rpc/src/response.rs index efd768b..2b84606 100644 --- a/rpc/json-rpc/src/response.rs +++ b/rpc/json-rpc/src/response.rs @@ -304,14 +304,14 @@ where if payload.is_none() { payload = Some(Ok(map.next_value::()?)); } else { - return Err(serde::de::Error::duplicate_field("result/error")); + return Err(Error::duplicate_field("result/error")); } } Key::Error => { if payload.is_none() { payload = Some(Err(map.next_value::()?)); } else { - return Err(serde::de::Error::duplicate_field("result/error")); + return Err(Error::duplicate_field("result/error")); } } Key::Unknown => { diff --git a/rpc/json-rpc/src/tests.rs b/rpc/json-rpc/src/tests.rs index ff8f049..3ee6088 100644 --- a/rpc/json-rpc/src/tests.rs +++ b/rpc/json-rpc/src/tests.rs @@ -52,6 +52,7 @@ where } /// Tests an input JSON string matches an expected type `T`. +#[allow(clippy::needless_pass_by_value)] // serde signature fn assert_de(json: &'static str, expected: T) where T: DeserializeOwned + std::fmt::Debug + Clone + PartialEq, diff --git a/rpc/types/Cargo.toml b/rpc/types/Cargo.toml index 9c99681..08b13b1 100644 --- a/rpc/types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -18,13 +18,14 @@ cuprate-epee-encoding = { path = "../../net/epee-encoding", optional = true } cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } cuprate-types = { path = "../../types" } -monero-serai = { workspace = true } paste = { workspace = true } serde = { workspace = true, optional = true } [dev-dependencies] cuprate-test-utils = { path = "../../test-utils" } -cuprate-json-rpc = { path = "../json-rpc" } -serde_json = { workspace = true } -pretty_assertions = { workspace = true } \ No newline at end of file +serde = { workspace = true } +serde_json = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index c5f890f..51ea3cc 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -1,96 +1,6 @@ #![doc = include_str!("../README.md")] #![cfg_attr(docsrs, feature(doc_cfg))] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - single_use_lifetimes, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - unused_doc_comments, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style, - unreachable_pub -)] -#![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, - - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, - - clippy::multiple_crate_versions, - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] -// Allow some lints in tests. -#![cfg_attr( - test, - allow( - clippy::cognitive_complexity, - clippy::needless_pass_by_value, - clippy::cast_possible_truncation, - clippy::too_many_lines - ) -)] - -//---------------------------------------------------------------------------------------------------- Mod mod constants; mod defaults; mod free; @@ -112,3 +22,10 @@ pub use constants::{ CORE_RPC_VERSION_MINOR, }; pub use rpc_call::{RpcCall, RpcCallValue}; + +// false-positive: used in tests +#[cfg(test)] +mod test { + extern crate cuprate_test_utils; + extern crate serde_json; +} diff --git a/rpc/types/src/misc/mod.rs b/rpc/types/src/misc/mod.rs index bd6454d..c5c1840 100644 --- a/rpc/types/src/misc/mod.rs +++ b/rpc/types/src/misc/mod.rs @@ -15,6 +15,7 @@ mod binary_string; mod distribution; mod key_image_spent_status; +#[allow(clippy::module_inception)] mod misc; mod pool_info_extent; mod status; diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 58da21e..e039903 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -48,3 +48,6 @@ pretty_assertions = { workspace = true } proptest = { workspace = true } hex = { workspace = true } hex-literal = { workspace = true } + +[lints] +workspace = true diff --git a/storage/blockchain/src/config/config.rs b/storage/blockchain/src/config.rs similarity index 82% rename from storage/blockchain/src/config/config.rs rename to storage/blockchain/src/config.rs index 957c67c..e4b7606 100644 --- a/storage/blockchain/src/config/config.rs +++ b/storage/blockchain/src/config.rs @@ -1,4 +1,44 @@ -//! The main [`Config`] struct, holding all configurable values. +//! Database configuration. +//! +//! This module contains the main [`Config`]uration struct +//! for the database [`Env`](cuprate_database::Env)ironment, +//! and blockchain-specific configuration. +//! +//! It also contains types related to configuration settings. +//! +//! The main constructor is the [`ConfigBuilder`]. +//! +//! These configurations are processed at runtime, meaning +//! the `Env` can/will dynamically adjust its behavior based +//! on these values. +//! +//! # Example +//! ```rust +//! use cuprate_blockchain::{ +//! cuprate_database::{Env, config::SyncMode}, +//! config::{ConfigBuilder, ReaderThreads}, +//! }; +//! +//! # fn main() -> Result<(), Box> { +//! let tmp_dir = tempfile::tempdir()?; +//! let db_dir = tmp_dir.path().to_owned(); +//! +//! let config = ConfigBuilder::new() +//! // Use a custom database directory. +//! .db_directory(db_dir.into()) +//! // Use as many reader threads as possible (when using `service`). +//! .reader_threads(ReaderThreads::OnePerThread) +//! // Use the fastest sync mode. +//! .sync_mode(SyncMode::Fast) +//! // Build into `Config` +//! .build(); +//! +//! // Start a database `service` using this configuration. +//! let (_, _, env) = cuprate_blockchain::service::init(config.clone())?; +//! // It's using the config we provided. +//! assert_eq!(env.config(), &config.db_config); +//! # Ok(()) } +//! ``` //---------------------------------------------------------------------------------------------------- Import use std::{borrow::Cow, path::Path}; diff --git a/storage/blockchain/src/config/mod.rs b/storage/blockchain/src/config/mod.rs deleted file mode 100644 index 555a6e6..0000000 --- a/storage/blockchain/src/config/mod.rs +++ /dev/null @@ -1,44 +0,0 @@ -//! Database configuration. -//! -//! This module contains the main [`Config`]uration struct -//! for the database [`Env`](cuprate_database::Env)ironment, -//! and blockchain-specific configuration. -//! -//! It also contains types related to configuration settings. -//! -//! The main constructor is the [`ConfigBuilder`]. -//! -//! These configurations are processed at runtime, meaning -//! the `Env` can/will dynamically adjust its behavior based -//! on these values. -//! -//! # Example -//! ```rust -//! use cuprate_blockchain::{ -//! cuprate_database::{Env, config::SyncMode}, -//! config::{ConfigBuilder, ReaderThreads}, -//! }; -//! -//! # fn main() -> Result<(), Box> { -//! let tmp_dir = tempfile::tempdir()?; -//! let db_dir = tmp_dir.path().to_owned(); -//! -//! let config = ConfigBuilder::new() -//! // Use a custom database directory. -//! .db_directory(db_dir.into()) -//! // Use as many reader threads as possible (when using `service`). -//! .reader_threads(ReaderThreads::OnePerThread) -//! // Use the fastest sync mode. -//! .sync_mode(SyncMode::Fast) -//! // Build into `Config` -//! .build(); -//! -//! // Start a database `service` using this configuration. -//! let (_, _, env) = cuprate_blockchain::service::init(config.clone())?; -//! // It's using the config we provided. -//! assert_eq!(env.config(), &config.db_config); -//! # Ok(()) } -//! ``` - -mod config; -pub use config::{Config, ConfigBuilder, ReaderThreads}; diff --git a/storage/blockchain/src/lib.rs b/storage/blockchain/src/lib.rs index e544a69..f66cd99 100644 --- a/storage/blockchain/src/lib.rs +++ b/storage/blockchain/src/lib.rs @@ -1,103 +1,9 @@ #![doc = include_str!("../README.md")] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - clippy::missing_docs_in_private_items, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - single_use_lifetimes, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - unused_crate_dependencies, - unused_doc_comments, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style -)] #![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, - - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] -// Allow some lints when running in debug mode. -#![cfg_attr( - debug_assertions, - allow( - clippy::todo, - clippy::multiple_crate_versions, - // unused_crate_dependencies, - ) -)] -// Allow some lints in tests. -#![cfg_attr( - test, - allow( - clippy::cognitive_complexity, - clippy::needless_pass_by_value, - clippy::cast_possible_truncation, - clippy::too_many_lines - ) + // See `cuprate-database` for reasoning. + clippy::significant_drop_tightening )] + // Only allow building 64-bit targets. // // This allows us to assume 64-bit diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index e748bbb..2e7c908 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -37,8 +37,8 @@ pub fn init( let db = Arc::new(crate::open(config)?); // Spawn the Reader thread pool and Writer. - let readers = init_read_service(db.clone(), reader_threads); - let writer = init_write_service(db.clone()); + let readers = init_read_service(Arc::clone(&db), reader_threads); + let writer = init_write_service(Arc::clone(&db)); Ok((readers, writer, db)) } diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index ed13f7b..b68b544 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -304,8 +304,9 @@ async fn test_template( // Assert we get back the same map of // `Amount`'s and `AmountIndex`'s. let mut response_output_count = 0; + #[allow(clippy::iter_over_hash_type)] // order doesn't matter in this test for (amount, output_map) in response { - let amount_index_set = map.get(&amount).unwrap(); + let amount_index_set = &map[&amount]; for (amount_index, output) in output_map { response_output_count += 1; diff --git a/storage/database/Cargo.toml b/storage/database/Cargo.toml index a70457f..0ef4a97 100644 --- a/storage/database/Cargo.toml +++ b/storage/database/Cargo.toml @@ -32,4 +32,7 @@ serde = { workspace = true, optional = true } [dev-dependencies] bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } page_size = { version = "0.6.0" } -tempfile = { version = "3.10.0" } \ No newline at end of file +tempfile = { version = "3.10.0" } + +[lints] +workspace = true \ No newline at end of file diff --git a/storage/database/src/backend/heed/env.rs b/storage/database/src/backend/heed/env.rs index 0c2847f..8c71e61 100644 --- a/storage/database/src/backend/heed/env.rs +++ b/storage/database/src/backend/heed/env.rs @@ -70,7 +70,7 @@ impl Drop for ConcreteEnv { // We need to do `mdb_env_set_flags(&env, MDB_NOSYNC|MDB_ASYNCMAP, 0)` // to clear the no sync and async flags such that the below `self.sync()` // _actually_ synchronously syncs. - if let Err(_e) = crate::Env::sync(self) { + if let Err(_e) = Env::sync(self) { // TODO: log error? } diff --git a/storage/database/src/backend/heed/storable.rs b/storage/database/src/backend/heed/storable.rs index 3566e88..da0e0cb 100644 --- a/storage/database/src/backend/heed/storable.rs +++ b/storage/database/src/backend/heed/storable.rs @@ -78,8 +78,8 @@ mod test { println!("left: {left:?}, right: {right:?}, expected: {expected:?}"); assert_eq!( as heed::Comparator>::compare( - & as heed::BytesEncode>::bytes_encode(&left).unwrap(), - & as heed::BytesEncode>::bytes_encode(&right).unwrap() + & as BytesEncode>::bytes_encode(&left).unwrap(), + & as BytesEncode>::bytes_encode(&right).unwrap() ), expected ); diff --git a/storage/database/src/backend/redb/database.rs b/storage/database/src/backend/redb/database.rs index cd9a0be..dafb241 100644 --- a/storage/database/src/backend/redb/database.rs +++ b/storage/database/src/backend/redb/database.rs @@ -23,7 +23,7 @@ use crate::{ /// Shared [`DatabaseRo::get()`]. #[inline] fn get( - db: &impl redb::ReadableTable, StorableRedb>, + db: &impl ReadableTable, StorableRedb>, key: &T::Key, ) -> Result { Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value()) @@ -32,7 +32,7 @@ fn get( /// Shared [`DatabaseRo::len()`]. #[inline] fn len( - db: &impl redb::ReadableTable, StorableRedb>, + db: &impl ReadableTable, StorableRedb>, ) -> Result { Ok(db.len()?) } @@ -40,7 +40,7 @@ fn len( /// Shared [`DatabaseRo::first()`]. #[inline] fn first( - db: &impl redb::ReadableTable, StorableRedb>, + db: &impl ReadableTable, StorableRedb>, ) -> Result<(T::Key, T::Value), RuntimeError> { let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) @@ -49,7 +49,7 @@ fn first( /// Shared [`DatabaseRo::last()`]. #[inline] fn last( - db: &impl redb::ReadableTable, StorableRedb>, + db: &impl ReadableTable, StorableRedb>, ) -> Result<(T::Key, T::Value), RuntimeError> { let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) @@ -58,7 +58,7 @@ fn last( /// Shared [`DatabaseRo::is_empty()`]. #[inline] fn is_empty( - db: &impl redb::ReadableTable, StorableRedb>, + db: &impl ReadableTable, StorableRedb>, ) -> Result { Ok(db.is_empty()?) } diff --git a/storage/database/src/config/mod.rs b/storage/database/src/config/mod.rs index 19a324e..c6ed0c0 100644 --- a/storage/database/src/config/mod.rs +++ b/storage/database/src/config/mod.rs @@ -33,6 +33,7 @@ //! # Ok(()) } //! ``` +#[allow(clippy::module_inception)] mod config; pub use config::{Config, ConfigBuilder, READER_THREADS_DEFAULT}; diff --git a/storage/database/src/env.rs b/storage/database/src/env.rs index cae4973..8294443 100644 --- a/storage/database/src/env.rs +++ b/storage/database/src/env.rs @@ -163,7 +163,7 @@ pub trait Env: Sized { // We have the direct PATH to the file, // no need to use backend-specific functions. // - // SAFETY: as we are only accessing the metadata of + // INVARIANT: as we are only accessing the metadata of // the file and not reading the bytes, it should be // fine even with a memory mapped file being actively // written to. diff --git a/storage/database/src/key.rs b/storage/database/src/key.rs index 3273d4e..2f3855a 100644 --- a/storage/database/src/key.rs +++ b/storage/database/src/key.rs @@ -163,11 +163,11 @@ impl KeyCompare { #[inline] pub const fn as_compare_fn(self) -> fn(&[u8], &[u8]) -> Ordering { match self { - Self::Default => std::cmp::Ord::cmp, + Self::Default => Ord::cmp, Self::Number => |left, right| { let left = ::from_bytes(left); let right = ::from_bytes(right); - std::cmp::Ord::cmp(&left, &right) + Ord::cmp(&left, &right) }, Self::Custom(f) => f, } diff --git a/storage/database/src/lib.rs b/storage/database/src/lib.rs index 5946fe5..45bfc53 100644 --- a/storage/database/src/lib.rs +++ b/storage/database/src/lib.rs @@ -1,94 +1,18 @@ #![doc = include_str!("../README.md")] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - while_true, - clippy::missing_docs_in_private_items, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - single_use_lifetimes, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - unused_crate_dependencies, - unused_doc_comments, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style -)] #![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - // FIXME: good lint but too many false positives - // with our `Env` + `RwLock` setup. - clippy::significant_drop_tightening, - - // FIXME: good lint but is less clear in most cases. - clippy::items_after_statements, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, - - // unused_crate_dependencies, // false-positive with `paste` -)] -// Allow some lints when running in debug mode. -#![cfg_attr( - debug_assertions, - allow( - clippy::todo, - clippy::multiple_crate_versions, - // unused_crate_dependencies, - ) + // This lint is allowed because the following + // code exists a lot in this crate: + // + // ```rust + // let env_inner = env.env_inner(); + // let tx_rw = env_inner.tx_rw()?; + // OpenTables::create_tables(&env_inner, &tx_rw)?; + // ``` + // + // Rust thinks `env_inner` can be dropped earlier + // but it cannot, we need it for the lifetime of + // the database transaction + tables. + clippy::significant_drop_tightening )] // Allow some lints in tests. #![cfg_attr( diff --git a/storage/txpool/Cargo.toml b/storage/txpool/Cargo.toml index d5ea77d..70211d9 100644 --- a/storage/txpool/Cargo.toml +++ b/storage/txpool/Cargo.toml @@ -41,3 +41,6 @@ cuprate-test-utils = { path = "../../test-utils" } tokio = { workspace = true } tempfile = { workspace = true } hex-literal = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/storage/txpool/src/config.rs b/storage/txpool/src/config.rs index 8d09b5e..1ef0d73 100644 --- a/storage/txpool/src/config.rs +++ b/storage/txpool/src/config.rs @@ -211,7 +211,7 @@ impl Config { /// assert_eq!(config.reader_threads, ReaderThreads::default()); /// ``` pub fn new() -> Self { - Config { + Self { db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)), reader_threads: ReaderThreads::default(), max_txpool_weight: 0, diff --git a/storage/txpool/src/lib.rs b/storage/txpool/src/lib.rs index f200c34..243dc4d 100644 --- a/storage/txpool/src/lib.rs +++ b/storage/txpool/src/lib.rs @@ -1,4 +1,8 @@ #![doc = include_str!("../README.md")] +#![allow( + // See `cuprate-database` for reasoning. + clippy::significant_drop_tightening +)] pub mod config; mod free; @@ -13,3 +17,13 @@ pub use free::open; //re-exports pub use cuprate_database; + +// TODO: remove when used. +use tower as _; +#[cfg(test)] +mod test { + use cuprate_test_utils as _; + use hex_literal as _; + use tempfile as _; + use tokio as _; +} diff --git a/storage/txpool/src/ops/key_images.rs b/storage/txpool/src/ops/key_images.rs index c6e4415..04aa1b4 100644 --- a/storage/txpool/src/ops/key_images.rs +++ b/storage/txpool/src/ops/key_images.rs @@ -11,7 +11,7 @@ use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHas /// /// # Panics /// This function will panic if any of the [`Input`]s are not [`Input::ToKey`] -pub fn add_tx_key_images( +pub(super) fn add_tx_key_images( inputs: &[Input], tx_hash: &TransactionHash, kis_table: &mut impl DatabaseRw, @@ -31,7 +31,7 @@ pub fn add_tx_key_images( /// /// # Panics /// This function will panic if any of the [`Input`]s are not [`Input::ToKey`] -pub fn remove_tx_key_images( +pub(super) fn remove_tx_key_images( inputs: &[Input], kis_table: &mut impl DatabaseRw, ) -> Result<(), RuntimeError> { diff --git a/storage/txpool/src/service/free.rs b/storage/txpool/src/service/free.rs index 614ab5c..003da55 100644 --- a/storage/txpool/src/service/free.rs +++ b/storage/txpool/src/service/free.rs @@ -30,8 +30,8 @@ pub fn init( let db = Arc::new(crate::open(config)?); // Spawn the Reader thread pool and Writer. - let readers = init_read_service(db.clone(), reader_threads); - let writer = init_write_service(db.clone()); + let readers = init_read_service(Arc::clone(&db), reader_threads); + let writer = init_write_service(Arc::clone(&db)); Ok((readers, writer, db)) } diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs index c2fee66..5654164 100644 --- a/storage/txpool/src/service/read.rs +++ b/storage/txpool/src/service/read.rs @@ -25,7 +25,7 @@ use crate::{ /// Should be called _once_ per actual database. #[cold] #[inline(never)] // Only called once. -pub fn init_read_service(env: Arc, threads: ReaderThreads) -> TxpoolReadHandle { +pub(super) fn init_read_service(env: Arc, threads: ReaderThreads) -> TxpoolReadHandle { init_read_service_with_pool(env, init_thread_pool(threads)) } @@ -35,10 +35,7 @@ pub fn init_read_service(env: Arc, threads: ReaderThreads) -> Txpoo /// Should be called _once_ per actual database. #[cold] #[inline(never)] // Only called once. -pub fn init_read_service_with_pool( - env: Arc, - pool: Arc, -) -> TxpoolReadHandle { +fn init_read_service_with_pool(env: Arc, pool: Arc) -> TxpoolReadHandle { DatabaseReadService::new(env, pool, map_request) } @@ -53,6 +50,7 @@ pub fn init_read_service_with_pool( /// 1. `Request` is mapped to a handler function /// 2. Handler function is called /// 3. [`TxpoolReadResponse`] is returned +#[allow(clippy::needless_pass_by_value)] fn map_request( env: &ConcreteEnv, // Access to the database request: TxpoolReadRequest, // The request we must fulfill diff --git a/storage/txpool/src/service/write.rs b/storage/txpool/src/service/write.rs index f6bdb38..8a3b1bf 100644 --- a/storage/txpool/src/service/write.rs +++ b/storage/txpool/src/service/write.rs @@ -16,7 +16,7 @@ use crate::{ //---------------------------------------------------------------------------------------------------- init_write_service /// Initialize the txpool write service from a [`ConcreteEnv`]. -pub fn init_write_service(env: Arc) -> TxpoolWriteHandle { +pub(super) fn init_write_service(env: Arc) -> TxpoolWriteHandle { DatabaseWriteHandle::init(env, handle_txpool_request) } diff --git a/storage/txpool/src/types.rs b/storage/txpool/src/types.rs index 5c89d3b..09b0ce0 100644 --- a/storage/txpool/src/types.rs +++ b/storage/txpool/src/types.rs @@ -35,10 +35,11 @@ bitflags::bitflags! { pub struct TransactionInfo { /// The transaction's fee. pub fee: u64, - /// The transaction`s weight. + /// The transaction's weight. pub weight: usize, /// [`TxStateFlags`] of this transaction. pub flags: TxStateFlags, + #[allow(clippy::pub_underscore_fields)] /// Explicit padding so that we have no implicit padding bytes in `repr(C)`. /// /// Allows potential future expansion of this type. @@ -68,21 +69,21 @@ impl From for CachedVerificationState { fn from(value: RawCachedVerificationState) -> Self { // if the hash is all `0`s then there is no hash this is valid at. if value.raw_valid_at_hash == [0; 32] { - return CachedVerificationState::NotVerified; + return Self::NotVerified; } let raw_valid_past_timestamp = u64::from_le_bytes(value.raw_valid_past_timestamp); // if the timestamp is 0, there is no timestamp that needs to be passed. if raw_valid_past_timestamp == 0 { - return CachedVerificationState::ValidAtHashAndHF { + return Self::ValidAtHashAndHF { block_hash: value.raw_valid_at_hash, hf: HardFork::from_version(value.raw_hf) .expect("hard-fork values stored in the DB should always be valid"), }; } - CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock { + Self::ValidAtHashAndHFWithTimeBasedLock { block_hash: value.raw_valid_at_hash, hf: HardFork::from_version(value.raw_hf) .expect("hard-fork values stored in the DB should always be valid"), @@ -91,6 +92,7 @@ impl From for CachedVerificationState { } } +#[allow(clippy::fallible_impl_from)] // only panics in invalid states impl From for RawCachedVerificationState { fn from(value: CachedVerificationState) -> Self { match value { diff --git a/types/Cargo.toml b/types/Cargo.toml index 4c31cfc..4b9204b 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -23,10 +23,12 @@ bytes = { workspace = true } curve25519-dalek = { workspace = true } monero-serai = { workspace = true } serde = { workspace = true, features = ["derive"], optional = true } -borsh = { workspace = true, optional = true } thiserror = { workspace = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } -[dev-dependencies] \ No newline at end of file +[dev-dependencies] + +[lints] +workspace = true \ No newline at end of file diff --git a/types/src/lib.rs b/types/src/lib.rs index d70f4c3..0b0dbe6 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,76 +1,6 @@ #![doc = include_str!("../README.md")] -//---------------------------------------------------------------------------------------------------- Lints -// Forbid lints. -// Our code, and code generated (e.g macros) cannot overrule these. -#![forbid( - // `unsafe` is allowed but it _must_ be - // commented with `SAFETY: reason`. - clippy::undocumented_unsafe_blocks, - - // Never. - unused_unsafe, - redundant_semicolons, - unused_allocation, - coherence_leak_check, - single_use_lifetimes, - while_true, - clippy::missing_docs_in_private_items, - - // Maybe can be put into `#[deny]`. - unconditional_recursion, - for_loops_over_fallibles, - unused_braces, - unused_doc_comments, - unused_labels, - keyword_idents, - non_ascii_idents, - variant_size_differences, - - // Probably can be put into `#[deny]`. - future_incompatible, - let_underscore, - break_with_label_and_loop, - duplicate_macro_attributes, - exported_private_dependencies, - large_assignments, - overlapping_range_endpoints, - semicolon_in_expressions_from_macros, - noop_method_call, - unreachable_pub, -)] -// Deny lints. -// Some of these are `#[allow]`'ed on a per-case basis. -#![deny( - clippy::all, - clippy::correctness, - clippy::suspicious, - clippy::style, - clippy::complexity, - clippy::perf, - clippy::pedantic, - clippy::nursery, - clippy::cargo, - unused_mut, - missing_docs, - deprecated, - unused_comparisons, - nonstandard_style -)] -#![allow( - // FIXME: this lint affects crates outside of - // `database/` for some reason, allow for now. - clippy::cargo_common_metadata, - - // FIXME: adding `#[must_use]` onto everything - // might just be more annoying than useful... - // although it is sometimes nice. - clippy::must_use_candidate, - - clippy::module_name_repetitions, - clippy::module_inception, - clippy::redundant_pub_crate, - clippy::option_if_let_else, -)] +// `proptest` needs this internally. +#![cfg_attr(any(feature = "proptest"), allow(non_local_definitions))] // Allow some lints when running in debug mode. #![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))] @@ -97,4 +27,5 @@ pub use types::{ //---------------------------------------------------------------------------------------------------- Feature-gated #[cfg(feature = "blockchain")] pub mod blockchain; + //---------------------------------------------------------------------------------------------------- Private From 0941f68efcd7dfe66124ad0c1934277f47da9090 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Mon, 2 Sep 2024 17:46:11 -0400 Subject: [PATCH 048/104] helper: fix clippy (#265) * helper: fix lints * fix tests --- helper/src/cast.rs | 16 +++++++++------- helper/src/map.rs | 4 ++-- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/helper/src/cast.rs b/helper/src/cast.rs index 81d0836..99b7f53 100644 --- a/helper/src/cast.rs +++ b/helper/src/cast.rs @@ -4,14 +4,16 @@ //! //! `#[no_std]` compatible. +#![allow(clippy::cast_possible_truncation)] + #[rustfmt::skip] //============================ SAFETY: DO NOT REMOVE ===========================// // // // // -// Only allow building 64-bit targets. // -// This allows us to assume 64-bit invariants in this file. // +// Only allow building 64-bit targets. // +// This allows us to assume 64-bit invariants in this file. // #[cfg(not(target_pointer_width = "64"))] - compile_error!("Cuprate is only compatible with 64-bit CPUs"); + compile_error!("Cuprate is only compatible with 64-bit CPUs"); // // // // //============================ SAFETY: DO NOT REMOVE ===========================// @@ -60,8 +62,8 @@ mod test { #[test] fn max_unsigned() { - assert_eq!(u32_to_usize(u32::MAX), u32::MAX as usize); - assert_eq!(usize_to_u64(u32_to_usize(u32::MAX)), u32::MAX as u64); + assert_eq!(u32_to_usize(u32::MAX), usize::try_from(u32::MAX).unwrap()); + assert_eq!(usize_to_u64(u32_to_usize(u32::MAX)), u64::from(u32::MAX)); assert_eq!(u64_to_usize(u64::MAX), usize::MAX); assert_eq!(usize_to_u64(u64_to_usize(u64::MAX)), u64::MAX); @@ -72,8 +74,8 @@ mod test { #[test] fn max_signed() { - assert_eq!(i32_to_isize(i32::MAX), i32::MAX as isize); - assert_eq!(isize_to_i64(i32_to_isize(i32::MAX)), i32::MAX as i64); + assert_eq!(i32_to_isize(i32::MAX), isize::try_from(i32::MAX).unwrap()); + assert_eq!(isize_to_i64(i32_to_isize(i32::MAX)), i64::from(i32::MAX)); assert_eq!(i64_to_isize(i64::MAX), isize::MAX); assert_eq!(isize_to_i64(i64_to_isize(i64::MAX)), i64::MAX); diff --git a/helper/src/map.rs b/helper/src/map.rs index ea6dfc4..7805ea6 100644 --- a/helper/src/map.rs +++ b/helper/src/map.rs @@ -76,7 +76,7 @@ pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u12 /// assert_eq!(u64_to_timelock(499_999_999), Timelock::Block(499_999_999)); /// assert_eq!(u64_to_timelock(500_000_000), Timelock::Time(500_000_000)); /// ``` -pub fn u64_to_timelock(u: u64) -> Timelock { +pub const fn u64_to_timelock(u: u64) -> Timelock { if u == 0 { Timelock::None } else if u < 500_000_000 { @@ -97,7 +97,7 @@ pub fn u64_to_timelock(u: u64) -> Timelock { /// assert_eq!(timelock_to_u64(Timelock::Block(499_999_999)), 499_999_999); /// assert_eq!(timelock_to_u64(Timelock::Time(500_000_000)), 500_000_000); /// ``` -pub fn timelock_to_u64(timelock: Timelock) -> u64 { +pub const fn timelock_to_u64(timelock: Timelock) -> u64 { match timelock { Timelock::None => 0, Timelock::Block(u) => usize_to_u64(u), From 4653ac58849c81b6ab993a1d23f061a97962524b Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 5 Sep 2024 11:53:16 -0400 Subject: [PATCH 049/104] rpc/interface: separate `RpcHandler` into 3 `tower::Service`s (#266) * apply diff * cleanup * fix test --- rpc/interface/README.md | 16 +- rpc/interface/src/lib.rs | 6 +- rpc/interface/src/route/bin.rs | 9 +- rpc/interface/src/route/json_rpc.rs | 28 ++-- rpc/interface/src/route/other.rs | 10 +- rpc/interface/src/rpc_handler.rs | 49 +++--- rpc/interface/src/rpc_handler_dummy.rs | 210 +++++++++++++++---------- rpc/interface/src/rpc_request.rs | 33 ---- rpc/interface/src/rpc_response.rs | 33 ---- rpc/interface/src/rpc_service.rs | 52 ++++++ 10 files changed, 227 insertions(+), 219 deletions(-) delete mode 100644 rpc/interface/src/rpc_request.rs delete mode 100644 rpc/interface/src/rpc_response.rs create mode 100644 rpc/interface/src/rpc_service.rs diff --git a/rpc/interface/README.md b/rpc/interface/README.md index 3a63ac4..eb87864 100644 --- a/rpc/interface/README.md +++ b/rpc/interface/README.md @@ -17,7 +17,7 @@ CLIENT ─► ROUTE ─► REQUEST ─► HANDLER ─► RESPONSE ─► CLIENT Everything coming _in_ from a client is handled by this crate. -This is where your [`RpcHandler`] turns this [`RpcRequest`] into a [`RpcResponse`]. +This is where your [`RpcHandler`] turns this `Request` into a `Response`. You hand this `Response` back to `cuprate-rpc-interface` and it will take care of sending it back to the client. @@ -42,16 +42,18 @@ The proper usage of this crate is to: 3. Do whatever with it # The [`RpcHandler`] -This is your [`tower::Service`] that converts [`RpcRequest`]s into [`RpcResponse`]s, +This is your [`tower::Service`] that converts `Request`s into `Response`s, i.e. the "inner handler". -Said concretely, `RpcHandler` is a `tower::Service` where the associated types are from this crate: -- [`RpcRequest`] -- [`RpcResponse`] +Said concretely, `RpcHandler` is 3 `tower::Service`s where the request/response types are +the 3 endpoint enums from [`cuprate_rpc_types`] and the error type is from this crate: +- [`JsonRpcRequest`](cuprate_rpc_types::json::JsonRpcRequest) & [`JsonRpcResponse`](cuprate_rpc_types::json::JsonRpcResponse) +- [`BinRequest`](cuprate_rpc_types::bin::BinRequest) & [`BinResponse`](cuprate_rpc_types::bin::BinRequest) +- [`OtherRequest`](cuprate_rpc_types::other::OtherRequest) & [`OtherResponse`](cuprate_rpc_types::other::OtherRequest) - [`RpcError`] `RpcHandler`'s [`Future`](std::future::Future) is generic, _although_, -it must output `Result`. +it must output `Result<$RESPONSE, RpcError>`. The `RpcHandler` must also hold some state that is required for RPC server operation. @@ -83,7 +85,7 @@ use cuprate_rpc_types::{ json::{JsonRpcRequest, JsonRpcResponse, GetBlockCountResponse}, other::{OtherRequest, OtherResponse}, }; -use cuprate_rpc_interface::{RouterBuilder, RpcHandlerDummy, RpcRequest}; +use cuprate_rpc_interface::{RouterBuilder, RpcHandlerDummy}; // Send a `/get_height` request. This endpoint has no inputs. async fn get_height(port: u16) -> OtherResponse { diff --git a/rpc/interface/src/lib.rs b/rpc/interface/src/lib.rs index d437697..ebea493 100644 --- a/rpc/interface/src/lib.rs +++ b/rpc/interface/src/lib.rs @@ -7,16 +7,14 @@ mod rpc_error; mod rpc_handler; #[cfg(feature = "dummy")] mod rpc_handler_dummy; -mod rpc_request; -mod rpc_response; +mod rpc_service; pub use router_builder::RouterBuilder; pub use rpc_error::RpcError; pub use rpc_handler::RpcHandler; #[cfg(feature = "dummy")] pub use rpc_handler_dummy::RpcHandlerDummy; -pub use rpc_request::RpcRequest; -pub use rpc_response::RpcResponse; +pub use rpc_service::RpcService; // false-positive: used in `README.md`'s doc-test. #[cfg(test)] diff --git a/rpc/interface/src/route/bin.rs b/rpc/interface/src/route/bin.rs index 942e091..45447ca 100644 --- a/rpc/interface/src/route/bin.rs +++ b/rpc/interface/src/route/bin.rs @@ -7,7 +7,7 @@ use tower::ServiceExt; use cuprate_epee_encoding::from_bytes; use cuprate_rpc_types::bin::{BinRequest, BinResponse, GetTransactionPoolHashesRequest}; -use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse}; +use crate::rpc_handler::RpcHandler; //---------------------------------------------------------------------------------------------------- Routes /// This macro generates route functions that expect input. @@ -67,13 +67,8 @@ macro_rules! generate_endpoints_inner { paste::paste! { { // Send request. - let request = RpcRequest::Binary($request); - let channel = $handler.oneshot(request).await?; + let response = $handler.oneshot($request).await?; - // Assert the response from the inner handler is correct. - let RpcResponse::Binary(response) = channel else { - panic!("RPC handler did not return a binary response"); - }; let BinResponse::$variant(response) = response else { panic!("RPC handler returned incorrect response"); }; diff --git a/rpc/interface/src/route/json_rpc.rs b/rpc/interface/src/route/json_rpc.rs index bd35e43..bf3d937 100644 --- a/rpc/interface/src/route/json_rpc.rs +++ b/rpc/interface/src/route/json_rpc.rs @@ -8,21 +8,21 @@ use tower::ServiceExt; use cuprate_json_rpc::{ error::{ErrorCode, ErrorObject}, - Id, + Id, Response, }; use cuprate_rpc_types::{ json::{JsonRpcRequest, JsonRpcResponse}, RpcCallValue, }; -use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse}; +use crate::rpc_handler::RpcHandler; //---------------------------------------------------------------------------------------------------- Routes /// The `/json_rpc` route function used in [`crate::RouterBuilder`]. pub(crate) async fn json_rpc( State(handler): State, Json(request): Json>, -) -> Result>, StatusCode> { +) -> Result>, StatusCode> { // TODO: // // JSON-RPC notifications (requests without `id`) @@ -30,6 +30,11 @@ pub(crate) async fn json_rpc( // must remain. How to do this considering this function will // always return and cause `axum` to respond? + // JSON-RPC 2.0 rule: + // If there was an error in detecting the `Request`'s ID, + // the `Response` must contain an `Id::Null` + let id = request.id.unwrap_or(Id::Null); + // Return early if this RPC server is restricted and // the requested method is only for non-restricted RPC. if request.body.is_restricted() && handler.restricted() { @@ -39,26 +44,15 @@ pub(crate) async fn json_rpc( data: None, }; - // JSON-RPC 2.0 rule: - // If there was an error in detecting the `Request`'s ID, - // the `Response` must contain an `Id::Null` - let id = request.id.unwrap_or(Id::Null); - - let response = cuprate_json_rpc::Response::err(id, error_object); + let response = Response::err(id, error_object); return Ok(Json(response)); } // Send request. - let request = RpcRequest::JsonRpc(request); - let channel = handler.oneshot(request).await?; + let response = handler.oneshot(request.body).await?; - // Assert the response from the inner handler is correct. - let RpcResponse::JsonRpc(response) = channel else { - panic!("RPC handler returned incorrect response"); - }; - - Ok(Json(response)) + Ok(Json(Response::ok(id, response))) } //---------------------------------------------------------------------------------------------------- Tests diff --git a/rpc/interface/src/route/other.rs b/rpc/interface/src/route/other.rs index ce778db..129ddd5 100644 --- a/rpc/interface/src/route/other.rs +++ b/rpc/interface/src/route/other.rs @@ -25,7 +25,7 @@ use cuprate_rpc_types::{ RpcCall, }; -use crate::{rpc_handler::RpcHandler, rpc_request::RpcRequest, rpc_response::RpcResponse}; +use crate::rpc_handler::RpcHandler; //---------------------------------------------------------------------------------------------------- Routes /// This macro generates route functions that expect input. @@ -81,13 +81,9 @@ macro_rules! generate_endpoints_inner { } // Send request. - let request = RpcRequest::Other(OtherRequest::$variant($request)); - let channel = $handler.oneshot(request).await?; + let request = OtherRequest::$variant($request); + let response = $handler.oneshot(request).await?; - // Assert the response from the inner handler is correct. - let RpcResponse::Other(response) = channel else { - panic!("RPC handler did not return a binary response"); - }; let OtherResponse::$variant(response) = response else { panic!("RPC handler returned incorrect response") }; diff --git a/rpc/interface/src/rpc_handler.rs b/rpc/interface/src/rpc_handler.rs index bcd0873..1299ec4 100644 --- a/rpc/interface/src/rpc_handler.rs +++ b/rpc/interface/src/rpc_handler.rs @@ -1,43 +1,42 @@ //! RPC handler trait. //---------------------------------------------------------------------------------------------------- Use -use std::future::Future; +use cuprate_rpc_types::{ + bin::{BinRequest, BinResponse}, + json::{JsonRpcRequest, JsonRpcResponse}, + other::{OtherRequest, OtherResponse}, +}; -use tower::Service; - -use crate::{rpc_error::RpcError, rpc_request::RpcRequest, rpc_response::RpcResponse}; +use crate::RpcService; //---------------------------------------------------------------------------------------------------- RpcHandler /// An RPC handler. /// -/// This trait represents a type that can turn [`RpcRequest`]s into [`RpcResponse`]s. +/// This trait represents a type that can turn `Request`s into `Response`s. /// -/// Implementors of this trait must be [`tower::Service`]s that use: -/// - [`RpcRequest`] as the generic `Request` type -/// - [`RpcResponse`] as the associated `Response` type -/// - [`RpcError`] as the associated `Error` type -/// - A generic [`Future`] that outputs `Result` +/// Implementors of this trait must be: +/// - A [`tower::Service`] that uses [`JsonRpcRequest`] & [`JsonRpcResponse`] +/// - A [`tower::Service`] that uses [`BinRequest`] & [`BinResponse`] +/// - A [`tower::Service`] that uses [`OtherRequest`] & [`OtherResponse`] +/// +/// In other words, an [`RpcHandler`] is a type that implements [`tower::Service`] 3 times, +/// one for each request/response enum type found in [`cuprate_rpc_types`]. +/// +/// The error type must always be [`RpcError`](crate::RpcError). /// /// See this crate's `RpcHandlerDummy` for an implementation example of this trait. /// /// # Panics -/// Your [`RpcHandler`] must reply to [`RpcRequest`]s with the correct -/// [`RpcResponse`] or else this crate will panic during routing functions. +/// Your [`RpcHandler`] must reply to `Request`s with the correct +/// `Response` or else this crate will panic during routing functions. /// -/// For example, upon a [`RpcRequest::Binary`] must be replied with -/// [`RpcRequest::Binary`]. If an [`RpcRequest::Other`] were returned instead, -/// this crate would panic. +/// For example, a [`JsonRpcRequest::GetBlockCount`] must be replied with +/// [`JsonRpcResponse::GetBlockCount`]. If anything else is returned, +/// this crate may panic. pub trait RpcHandler: - Clone - + Send - + Sync - + 'static - + Service< - RpcRequest, - Response = RpcResponse, - Error = RpcError, - Future: Future> + Send + Sync + 'static, - > + RpcService + + RpcService + + RpcService { /// Is this [`RpcHandler`] restricted? /// diff --git a/rpc/interface/src/rpc_handler_dummy.rs b/rpc/interface/src/rpc_handler_dummy.rs index 73ffe9c..06fa460 100644 --- a/rpc/interface/src/rpc_handler_dummy.rs +++ b/rpc/interface/src/rpc_handler_dummy.rs @@ -3,18 +3,19 @@ //---------------------------------------------------------------------------------------------------- Use use std::task::Poll; +use cuprate_rpc_types::{ + bin::{BinRequest, BinResponse}, + json::{JsonRpcRequest, JsonRpcResponse}, + other::{OtherRequest, OtherResponse}, +}; use futures::channel::oneshot::channel; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use tower::Service; use cuprate_helper::asynch::InfallibleOneshotReceiver; -use cuprate_json_rpc::Id; -use crate::{ - rpc_error::RpcError, rpc_handler::RpcHandler, rpc_request::RpcRequest, - rpc_response::RpcResponse, -}; +use crate::{rpc_error::RpcError, rpc_handler::RpcHandler}; //---------------------------------------------------------------------------------------------------- RpcHandlerDummy /// An [`RpcHandler`] that always returns [`Default::default`]. @@ -42,96 +43,133 @@ impl RpcHandler for RpcHandlerDummy { } } -impl Service for RpcHandlerDummy { - type Response = RpcResponse; +impl Service for RpcHandlerDummy { + type Response = JsonRpcResponse; type Error = RpcError; - type Future = InfallibleOneshotReceiver>; + type Future = InfallibleOneshotReceiver>; fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn call(&mut self, req: RpcRequest) -> Self::Future { - use cuprate_rpc_types::bin::BinRequest as BReq; - use cuprate_rpc_types::bin::BinResponse as BResp; - use cuprate_rpc_types::json::JsonRpcRequest as JReq; - use cuprate_rpc_types::json::JsonRpcResponse as JResp; - use cuprate_rpc_types::other::OtherRequest as OReq; - use cuprate_rpc_types::other::OtherResponse as OResp; + fn call(&mut self, req: JsonRpcRequest) -> Self::Future { + use cuprate_rpc_types::json::JsonRpcRequest as Req; + use cuprate_rpc_types::json::JsonRpcResponse as Resp; - #[rustfmt::skip] #[allow(clippy::default_trait_access)] let resp = match req { - RpcRequest::JsonRpc(j) => RpcResponse::JsonRpc(cuprate_json_rpc::Response::ok(Id::Null, match j.body { - JReq::GetBlockCount(_) => JResp::GetBlockCount(Default::default()), - JReq::OnGetBlockHash(_) => JResp::OnGetBlockHash(Default::default()), - JReq::SubmitBlock(_) => JResp::SubmitBlock(Default::default()), - JReq::GenerateBlocks(_) => JResp::GenerateBlocks(Default::default()), - JReq::GetLastBlockHeader(_) => JResp::GetLastBlockHeader(Default::default()), - JReq::GetBlockHeaderByHash(_) => JResp::GetBlockHeaderByHash(Default::default()), - JReq::GetBlockHeaderByHeight(_) => JResp::GetBlockHeaderByHeight(Default::default()), - JReq::GetBlockHeadersRange(_) => JResp::GetBlockHeadersRange(Default::default()), - JReq::GetBlock(_) => JResp::GetBlock(Default::default()), - JReq::GetConnections(_) => JResp::GetConnections(Default::default()), - JReq::GetInfo(_) => JResp::GetInfo(Default::default()), - JReq::HardForkInfo(_) => JResp::HardForkInfo(Default::default()), - JReq::SetBans(_) => JResp::SetBans(Default::default()), - JReq::GetBans(_) => JResp::GetBans(Default::default()), - JReq::Banned(_) => JResp::Banned(Default::default()), - JReq::FlushTransactionPool(_) => JResp::FlushTransactionPool(Default::default()), - JReq::GetOutputHistogram(_) => JResp::GetOutputHistogram(Default::default()), - JReq::GetCoinbaseTxSum(_) => JResp::GetCoinbaseTxSum(Default::default()), - JReq::GetVersion(_) => JResp::GetVersion(Default::default()), - JReq::GetFeeEstimate(_) => JResp::GetFeeEstimate(Default::default()), - JReq::GetAlternateChains(_) => JResp::GetAlternateChains(Default::default()), - JReq::RelayTx(_) => JResp::RelayTx(Default::default()), - JReq::SyncInfo(_) => JResp::SyncInfo(Default::default()), - JReq::GetTransactionPoolBacklog(_) => JResp::GetTransactionPoolBacklog(Default::default()), - JReq::GetMinerData(_) => JResp::GetMinerData(Default::default()), - JReq::PruneBlockchain(_) => JResp::PruneBlockchain(Default::default()), - JReq::CalcPow(_) => JResp::CalcPow(Default::default()), - JReq::FlushCache(_) => JResp::FlushCache(Default::default()), - JReq::AddAuxPow(_) => JResp::AddAuxPow(Default::default()), - JReq::GetTxIdsLoose(_) => JResp::GetTxIdsLoose(Default::default()), - })), - RpcRequest::Binary(b) => RpcResponse::Binary(match b { - BReq::GetBlocks(_) => BResp::GetBlocks(Default::default()), - BReq::GetBlocksByHeight(_) => BResp::GetBlocksByHeight(Default::default()), - BReq::GetHashes(_) => BResp::GetHashes(Default::default()), - BReq::GetOutputIndexes(_) => BResp::GetOutputIndexes(Default::default()), - BReq::GetOuts(_) => BResp::GetOuts(Default::default()), - BReq::GetTransactionPoolHashes(_) => BResp::GetTransactionPoolHashes(Default::default()), - BReq::GetOutputDistribution(_) => BResp::GetOutputDistribution(Default::default()), - }), - RpcRequest::Other(o) => RpcResponse::Other(match o { - OReq::GetHeight(_) => OResp::GetHeight(Default::default()), - OReq::GetTransactions(_) => OResp::GetTransactions(Default::default()), - OReq::GetAltBlocksHashes(_) => OResp::GetAltBlocksHashes(Default::default()), - OReq::IsKeyImageSpent(_) => OResp::IsKeyImageSpent(Default::default()), - OReq::SendRawTransaction(_) => OResp::SendRawTransaction(Default::default()), - OReq::StartMining(_) => OResp::StartMining(Default::default()), - OReq::StopMining(_) => OResp::StopMining(Default::default()), - OReq::MiningStatus(_) => OResp::MiningStatus(Default::default()), - OReq::SaveBc(_) => OResp::SaveBc(Default::default()), - OReq::GetPeerList(_) => OResp::GetPeerList(Default::default()), - OReq::SetLogHashRate(_) => OResp::SetLogHashRate(Default::default()), - OReq::SetLogLevel(_) => OResp::SetLogLevel(Default::default()), - OReq::SetLogCategories(_) => OResp::SetLogCategories(Default::default()), - OReq::SetBootstrapDaemon(_) => OResp::SetBootstrapDaemon(Default::default()), - OReq::GetTransactionPool(_) => OResp::GetTransactionPool(Default::default()), - OReq::GetTransactionPoolStats(_) => OResp::GetTransactionPoolStats(Default::default()), - OReq::StopDaemon(_) => OResp::StopDaemon(Default::default()), - OReq::GetLimit(_) => OResp::GetLimit(Default::default()), - OReq::SetLimit(_) => OResp::SetLimit(Default::default()), - OReq::OutPeers(_) => OResp::OutPeers(Default::default()), - OReq::InPeers(_) => OResp::InPeers(Default::default()), - OReq::GetNetStats(_) => OResp::GetNetStats(Default::default()), - OReq::GetOuts(_) => OResp::GetOuts(Default::default()), - OReq::Update(_) => OResp::Update(Default::default()), - OReq::PopBlocks(_) => OResp::PopBlocks(Default::default()), - OReq::GetTransactionPoolHashes(_) => OResp::GetTransactionPoolHashes(Default::default()), - OReq::GetPublicNodes(_) => OResp::GetPublicNodes(Default::default()), - }) + Req::GetBlockCount(_) => Resp::GetBlockCount(Default::default()), + Req::OnGetBlockHash(_) => Resp::OnGetBlockHash(Default::default()), + Req::SubmitBlock(_) => Resp::SubmitBlock(Default::default()), + Req::GenerateBlocks(_) => Resp::GenerateBlocks(Default::default()), + Req::GetLastBlockHeader(_) => Resp::GetLastBlockHeader(Default::default()), + Req::GetBlockHeaderByHash(_) => Resp::GetBlockHeaderByHash(Default::default()), + Req::GetBlockHeaderByHeight(_) => Resp::GetBlockHeaderByHeight(Default::default()), + Req::GetBlockHeadersRange(_) => Resp::GetBlockHeadersRange(Default::default()), + Req::GetBlock(_) => Resp::GetBlock(Default::default()), + Req::GetConnections(_) => Resp::GetConnections(Default::default()), + Req::GetInfo(_) => Resp::GetInfo(Default::default()), + Req::HardForkInfo(_) => Resp::HardForkInfo(Default::default()), + Req::SetBans(_) => Resp::SetBans(Default::default()), + Req::GetBans(_) => Resp::GetBans(Default::default()), + Req::Banned(_) => Resp::Banned(Default::default()), + Req::FlushTransactionPool(_) => Resp::FlushTransactionPool(Default::default()), + Req::GetOutputHistogram(_) => Resp::GetOutputHistogram(Default::default()), + Req::GetCoinbaseTxSum(_) => Resp::GetCoinbaseTxSum(Default::default()), + Req::GetVersion(_) => Resp::GetVersion(Default::default()), + Req::GetFeeEstimate(_) => Resp::GetFeeEstimate(Default::default()), + Req::GetAlternateChains(_) => Resp::GetAlternateChains(Default::default()), + Req::RelayTx(_) => Resp::RelayTx(Default::default()), + Req::SyncInfo(_) => Resp::SyncInfo(Default::default()), + Req::GetTransactionPoolBacklog(_) => { + Resp::GetTransactionPoolBacklog(Default::default()) + } + Req::GetMinerData(_) => Resp::GetMinerData(Default::default()), + Req::PruneBlockchain(_) => Resp::PruneBlockchain(Default::default()), + Req::CalcPow(_) => Resp::CalcPow(Default::default()), + Req::FlushCache(_) => Resp::FlushCache(Default::default()), + Req::AddAuxPow(_) => Resp::AddAuxPow(Default::default()), + Req::GetTxIdsLoose(_) => Resp::GetTxIdsLoose(Default::default()), + }; + + let (tx, rx) = channel(); + drop(tx.send(Ok(resp))); + InfallibleOneshotReceiver::from(rx) + } +} + +impl Service for RpcHandlerDummy { + type Response = BinResponse; + type Error = RpcError; + type Future = InfallibleOneshotReceiver>; + + fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: BinRequest) -> Self::Future { + use cuprate_rpc_types::bin::BinRequest as Req; + use cuprate_rpc_types::bin::BinResponse as Resp; + + #[allow(clippy::default_trait_access)] + let resp = match req { + Req::GetBlocks(_) => Resp::GetBlocks(Default::default()), + Req::GetBlocksByHeight(_) => Resp::GetBlocksByHeight(Default::default()), + Req::GetHashes(_) => Resp::GetHashes(Default::default()), + Req::GetOutputIndexes(_) => Resp::GetOutputIndexes(Default::default()), + Req::GetOuts(_) => Resp::GetOuts(Default::default()), + Req::GetTransactionPoolHashes(_) => Resp::GetTransactionPoolHashes(Default::default()), + Req::GetOutputDistribution(_) => Resp::GetOutputDistribution(Default::default()), + }; + + let (tx, rx) = channel(); + drop(tx.send(Ok(resp))); + InfallibleOneshotReceiver::from(rx) + } +} + +impl Service for RpcHandlerDummy { + type Response = OtherResponse; + type Error = RpcError; + type Future = InfallibleOneshotReceiver>; + + fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: OtherRequest) -> Self::Future { + use cuprate_rpc_types::other::OtherRequest as Req; + use cuprate_rpc_types::other::OtherResponse as Resp; + + #[allow(clippy::default_trait_access)] + let resp = match req { + Req::GetHeight(_) => Resp::GetHeight(Default::default()), + Req::GetTransactions(_) => Resp::GetTransactions(Default::default()), + Req::GetAltBlocksHashes(_) => Resp::GetAltBlocksHashes(Default::default()), + Req::IsKeyImageSpent(_) => Resp::IsKeyImageSpent(Default::default()), + Req::SendRawTransaction(_) => Resp::SendRawTransaction(Default::default()), + Req::StartMining(_) => Resp::StartMining(Default::default()), + Req::StopMining(_) => Resp::StopMining(Default::default()), + Req::MiningStatus(_) => Resp::MiningStatus(Default::default()), + Req::SaveBc(_) => Resp::SaveBc(Default::default()), + Req::GetPeerList(_) => Resp::GetPeerList(Default::default()), + Req::SetLogHashRate(_) => Resp::SetLogHashRate(Default::default()), + Req::SetLogLevel(_) => Resp::SetLogLevel(Default::default()), + Req::SetLogCategories(_) => Resp::SetLogCategories(Default::default()), + Req::SetBootstrapDaemon(_) => Resp::SetBootstrapDaemon(Default::default()), + Req::GetTransactionPool(_) => Resp::GetTransactionPool(Default::default()), + Req::GetTransactionPoolStats(_) => Resp::GetTransactionPoolStats(Default::default()), + Req::StopDaemon(_) => Resp::StopDaemon(Default::default()), + Req::GetLimit(_) => Resp::GetLimit(Default::default()), + Req::SetLimit(_) => Resp::SetLimit(Default::default()), + Req::OutPeers(_) => Resp::OutPeers(Default::default()), + Req::InPeers(_) => Resp::InPeers(Default::default()), + Req::GetNetStats(_) => Resp::GetNetStats(Default::default()), + Req::GetOuts(_) => Resp::GetOuts(Default::default()), + Req::Update(_) => Resp::Update(Default::default()), + Req::PopBlocks(_) => Resp::PopBlocks(Default::default()), + Req::GetTransactionPoolHashes(_) => Resp::GetTransactionPoolHashes(Default::default()), + Req::GetPublicNodes(_) => Resp::GetPublicNodes(Default::default()), }; let (tx, rx) = channel(); diff --git a/rpc/interface/src/rpc_request.rs b/rpc/interface/src/rpc_request.rs deleted file mode 100644 index 3b66a78..0000000 --- a/rpc/interface/src/rpc_request.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! RPC requests. - -//---------------------------------------------------------------------------------------------------- Import -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -use cuprate_rpc_types::{bin::BinRequest, json::JsonRpcRequest, other::OtherRequest}; - -//---------------------------------------------------------------------------------------------------- RpcRequest -/// All possible RPC requests. -/// -/// This enum encapsulates all possible RPC requests: -/// - JSON RPC 2.0 requests -/// - Binary requests -/// - Other JSON requests -/// -/// It is the `Request` type required to be used in an [`RpcHandler`](crate::RpcHandler). -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] -pub enum RpcRequest { - /// JSON-RPC 2.0 requests. - JsonRpc(cuprate_json_rpc::Request), - /// Binary requests. - Binary(BinRequest), - /// Other JSON requests. - Other(OtherRequest), -} - -//---------------------------------------------------------------------------------------------------- Tests -#[cfg(test)] -mod test { - // use super::*; -} diff --git a/rpc/interface/src/rpc_response.rs b/rpc/interface/src/rpc_response.rs deleted file mode 100644 index 7e8ecdb..0000000 --- a/rpc/interface/src/rpc_response.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! RPC responses. - -//---------------------------------------------------------------------------------------------------- Import -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -use cuprate_rpc_types::{bin::BinResponse, json::JsonRpcResponse, other::OtherResponse}; - -//---------------------------------------------------------------------------------------------------- RpcResponse -/// All possible RPC responses. -/// -/// This enum encapsulates all possible RPC responses: -/// - JSON RPC 2.0 responses -/// - Binary responses -/// - Other JSON responses -/// -/// It is the `Response` type required to be used in an [`RpcHandler`](crate::RpcHandler). -#[derive(Clone, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] -pub enum RpcResponse { - /// JSON RPC 2.0 responses. - JsonRpc(cuprate_json_rpc::Response), - /// Binary responses. - Binary(BinResponse), - /// Other JSON responses. - Other(OtherResponse), -} - -//---------------------------------------------------------------------------------------------------- Tests -#[cfg(test)] -mod test { - // use super::*; -} diff --git a/rpc/interface/src/rpc_service.rs b/rpc/interface/src/rpc_service.rs new file mode 100644 index 0000000..db84830 --- /dev/null +++ b/rpc/interface/src/rpc_service.rs @@ -0,0 +1,52 @@ +//! RPC [`tower::Service`] trait. + +//---------------------------------------------------------------------------------------------------- Use +use std::future::Future; + +use tower::Service; + +use crate::rpc_error::RpcError; + +//---------------------------------------------------------------------------------------------------- RpcService +/// An RPC [`tower::Service`]. +/// +/// This trait solely exists to encapsulate the traits needed +/// to handle RPC requests and respond with responses - **it is +/// not meant to be used directly.** +/// +/// The `Request` and `Response` are generic and +/// are used in the [`tower::Service`] bounds. +/// +/// The error type is always [`RpcError`]. +/// +/// There is a blanket implementation that implements this +/// trait on types that implement `tower::Service` correctly. +/// +/// See [`RpcHandler`](crate::RpcHandler) for more information. +pub trait RpcService: + Clone + + Send + + Sync + + 'static + + Service< + Request, + Response = Response, + Error = RpcError, + Future: Future> + Send + Sync + 'static, + > +{ +} + +impl RpcService for T where + Self: Clone + + Send + + Sync + + 'static + + Service< + Request, + Response = Response, + Error = RpcError, + Future: Future> + Send + Sync + 'static, + > +{ +} From 92800810d91bd59fb463aa4486a1005cf5e2fc3b Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Sun, 8 Sep 2024 10:52:17 -0400 Subject: [PATCH 050/104] cuprated: initial RPC module skeleton (#262) * readme * cuprated: add all workspace deps * cuprated: add lints * !! * add state, fn signatures * fixes * error signatures * interface: handle json-rpc concepts * split rpc calls into 3 `Service`s * interface: extract out to `RpcService` * fix merge * remove crate lints * use `BoxFuture` * rpc/interface: impl `thiserror::Error` * split state from main handler struct * cleanup * fix imports * replace `RpcError` with `anyhow::Error` * interface: update error * cuprated: update error type --- Cargo.lock | 78 +++++ Cargo.toml | 3 +- binaries/cuprated/Cargo.toml | 63 ++++ binaries/cuprated/README.md | 2 + binaries/cuprated/src/main.rs | 13 + binaries/cuprated/src/rpc.rs | 7 +- binaries/cuprated/src/rpc/bin.rs | 85 ++++++ binaries/cuprated/src/rpc/handler.rs | 103 +++++++ binaries/cuprated/src/rpc/json.rs | 294 +++++++++++++++++++ binaries/cuprated/src/rpc/other.rs | 260 ++++++++++++++++ binaries/cuprated/src/rpc/request_handler.rs | 1 - rpc/interface/Cargo.toml | 5 +- rpc/interface/README.md | 9 +- rpc/interface/src/lib.rs | 2 - rpc/interface/src/route/bin.rs | 19 +- rpc/interface/src/route/json_rpc.rs | 4 +- rpc/interface/src/route/other.rs | 4 +- rpc/interface/src/rpc_error.rs | 34 --- rpc/interface/src/rpc_handler.rs | 2 +- rpc/interface/src/rpc_handler_dummy.rs | 25 +- rpc/interface/src/rpc_service.rs | 12 +- 21 files changed, 956 insertions(+), 69 deletions(-) create mode 100644 binaries/cuprated/README.md create mode 100644 binaries/cuprated/src/rpc/bin.rs create mode 100644 binaries/cuprated/src/rpc/handler.rs create mode 100644 binaries/cuprated/src/rpc/json.rs create mode 100644 binaries/cuprated/src/rpc/other.rs delete mode 100644 binaries/cuprated/src/rpc/request_handler.rs delete mode 100644 rpc/interface/src/rpc_error.rs diff --git a/Cargo.lock b/Cargo.lock index 950044c..0bb4612 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,6 +56,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +[[package]] +name = "anyhow" +version = "1.0.87" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" + [[package]] name = "async-stream" version = "0.3.5" @@ -799,6 +805,7 @@ dependencies = [ name = "cuprate-rpc-interface" version = "0.0.0" dependencies = [ + "anyhow", "axum", "cuprate-epee-encoding", "cuprate-helper", @@ -908,6 +915,68 @@ dependencies = [ [[package]] name = "cuprated" version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "bitflags 2.5.0", + "borsh", + "bytemuck", + "bytes", + "cfg-if", + "chrono", + "clap", + "crossbeam", + "crypto-bigint", + "cuprate-address-book", + "cuprate-async-buffer", + "cuprate-blockchain", + "cuprate-consensus", + "cuprate-consensus-rules", + "cuprate-cryptonight", + "cuprate-dandelion-tower", + "cuprate-database", + "cuprate-database-service", + "cuprate-epee-encoding", + "cuprate-fast-sync", + "cuprate-fixed-bytes", + "cuprate-helper", + "cuprate-json-rpc", + "cuprate-levin", + "cuprate-p2p", + "cuprate-p2p-core", + "cuprate-pruning", + "cuprate-rpc-interface", + "cuprate-rpc-types", + "cuprate-test-utils", + "cuprate-txpool", + "cuprate-types", + "cuprate-wire", + "curve25519-dalek", + "dashmap", + "dirs", + "futures", + "hex", + "hex-literal", + "indexmap", + "monero-serai", + "paste", + "pin-project", + "rand", + "rand_distr", + "randomx-rs", + "rayon", + "serde", + "serde_bytes", + "serde_json", + "thiserror", + "thread_local", + "tokio", + "tokio-stream", + "tokio-util", + "tower", + "tracing", + "tracing-subscriber", +] [[package]] name = "curve25519-dalek" @@ -2503,6 +2572,15 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde_bytes" +version = "0.11.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" +dependencies = [ + "serde", +] + [[package]] name = "serde_derive" version = "1.0.203" diff --git a/Cargo.toml b/Cargo.toml index 0a98eab..2d71893 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,6 +48,7 @@ opt-level = 1 opt-level = 3 [workspace.dependencies] +anyhow = { version = "1.0.87", default-features = false } async-trait = { version = "0.1.74", default-features = false } bitflags = { version = "2.4.2", default-features = false } borsh = { version = "1.2.1", default-features = false } @@ -76,7 +77,7 @@ serde_bytes = { version = "0.11.12", default-features = false } serde_json = { version = "1.0.108", default-features = false } serde = { version = "1.0.190", default-features = false } thiserror = { version = "1.0.50", default-features = false } -thread_local = { version = "1.1.7", default-features = false } +thread_local = { version = "1.1.7", default-features = false } tokio-util = { version = "0.7.10", default-features = false } tokio-stream = { version = "0.1.14", default-features = false } tokio = { version = "1.33.0", default-features = false } diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index b524390..a886c12 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -8,6 +8,69 @@ authors = ["Boog900", "hinto-janai", "SyntheticBird45"] repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" [dependencies] +# TODO: after v1.0.0, remove unneeded dependencies. +cuprate-consensus = { path = "../../consensus" } +cuprate-fast-sync = { path = "../../consensus/fast-sync" } +cuprate-consensus-rules = { path = "../../consensus/rules" } +cuprate-cryptonight = { path = "../../cryptonight" } +cuprate-helper = { path = "../../helper" } +cuprate-epee-encoding = { path = "../../net/epee-encoding" } +cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } +cuprate-levin = { path = "../../net/levin" } +cuprate-wire = { path = "../../net/wire" } +cuprate-p2p = { path = "../../p2p/p2p" } +cuprate-p2p-core = { path = "../../p2p/p2p-core" } +cuprate-dandelion-tower = { path = "../../p2p/dandelion-tower" } +cuprate-async-buffer = { path = "../../p2p/async-buffer" } +cuprate-address-book = { path = "../../p2p/address-book" } +cuprate-blockchain = { path = "../../storage/blockchain" } +cuprate-database-service = { path = "../../storage/service" } +cuprate-txpool = { path = "../../storage/txpool" } +cuprate-database = { path = "../../storage/database" } +cuprate-pruning = { path = "../../pruning" } +cuprate-test-utils = { path = "../../test-utils" } +cuprate-types = { path = "../../types" } +cuprate-json-rpc = { path = "../../rpc/json-rpc" } +cuprate-rpc-interface = { path = "../../rpc/interface" } +cuprate-rpc-types = { path = "../../rpc/types" } + +# TODO: after v1.0.0, remove unneeded dependencies. +anyhow = { workspace = true } +async-trait = { workspace = true } +bitflags = { workspace = true } +borsh = { workspace = true } +bytemuck = { workspace = true } +bytes = { workspace = true } +cfg-if = { workspace = true } +clap = { workspace = true } +chrono = { workspace = true } +crypto-bigint = { workspace = true } +crossbeam = { workspace = true } +curve25519-dalek = { workspace = true } +dashmap = { workspace = true } +dirs = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +hex-literal = { workspace = true } +indexmap = { workspace = true } +monero-serai = { workspace = true } +paste = { workspace = true } +pin-project = { workspace = true } +randomx-rs = { workspace = true } +rand = { workspace = true } +rand_distr = { workspace = true } +rayon = { workspace = true } +serde_bytes = { workspace = true } +serde_json = { workspace = true } +serde = { workspace = true } +thiserror = { workspace = true } +thread_local = { workspace = true } +tokio-util = { workspace = true } +tokio-stream = { workspace = true } +tokio = { workspace = true } +tower = { workspace = true } +tracing-subscriber = { workspace = true } +tracing = { workspace = true } [lints] workspace = true diff --git a/binaries/cuprated/README.md b/binaries/cuprated/README.md new file mode 100644 index 0000000..47f0408 --- /dev/null +++ b/binaries/cuprated/README.md @@ -0,0 +1,2 @@ +# `cuprated` +TODO diff --git a/binaries/cuprated/src/main.rs b/binaries/cuprated/src/main.rs index 918429c..76eb85e 100644 --- a/binaries/cuprated/src/main.rs +++ b/binaries/cuprated/src/main.rs @@ -1,3 +1,16 @@ +#![doc = include_str!("../README.md")] +#![cfg_attr(docsrs, feature(doc_cfg))] +#![allow( + unused_imports, + unreachable_pub, + unused_crate_dependencies, + dead_code, + unused_variables, + clippy::needless_pass_by_value, + clippy::unused_async, + reason = "TODO: remove after v1.0.0" +)] + mod blockchain; mod config; mod p2p; diff --git a/binaries/cuprated/src/rpc.rs b/binaries/cuprated/src/rpc.rs index 80b2789..9ebcd1b 100644 --- a/binaries/cuprated/src/rpc.rs +++ b/binaries/cuprated/src/rpc.rs @@ -2,4 +2,9 @@ //! //! Will contain the code to initiate the RPC and a request handler. -mod request_handler; +mod bin; +mod handler; +mod json; +mod other; + +pub use handler::{CupratedRpcHandler, CupratedRpcHandlerState}; diff --git a/binaries/cuprated/src/rpc/bin.rs b/binaries/cuprated/src/rpc/bin.rs new file mode 100644 index 0000000..60d92c1 --- /dev/null +++ b/binaries/cuprated/src/rpc/bin.rs @@ -0,0 +1,85 @@ +use anyhow::Error; + +use cuprate_rpc_types::{ + bin::{ + BinRequest, BinResponse, GetBlocksByHeightRequest, GetBlocksByHeightResponse, + GetBlocksRequest, GetBlocksResponse, GetHashesRequest, GetHashesResponse, + GetOutputIndexesRequest, GetOutputIndexesResponse, GetOutsRequest, GetOutsResponse, + GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse, + }, + json::{GetOutputDistributionRequest, GetOutputDistributionResponse}, +}; + +use crate::rpc::CupratedRpcHandlerState; + +/// Map a [`BinRequest`] to the function that will lead to a [`BinResponse`]. +pub(super) async fn map_request( + state: CupratedRpcHandlerState, + request: BinRequest, +) -> Result { + use BinRequest as Req; + use BinResponse as Resp; + + Ok(match request { + Req::GetBlocks(r) => Resp::GetBlocks(get_blocks(state, r).await?), + Req::GetBlocksByHeight(r) => Resp::GetBlocksByHeight(get_blocks_by_height(state, r).await?), + Req::GetHashes(r) => Resp::GetHashes(get_hashes(state, r).await?), + Req::GetOutputIndexes(r) => Resp::GetOutputIndexes(get_output_indexes(state, r).await?), + Req::GetOuts(r) => Resp::GetOuts(get_outs(state, r).await?), + Req::GetTransactionPoolHashes(r) => { + Resp::GetTransactionPoolHashes(get_transaction_pool_hashes(state, r).await?) + } + Req::GetOutputDistribution(r) => { + Resp::GetOutputDistribution(get_output_distribution(state, r).await?) + } + }) +} + +async fn get_blocks( + state: CupratedRpcHandlerState, + request: GetBlocksRequest, +) -> Result { + todo!() +} + +async fn get_blocks_by_height( + state: CupratedRpcHandlerState, + request: GetBlocksByHeightRequest, +) -> Result { + todo!() +} + +async fn get_hashes( + state: CupratedRpcHandlerState, + request: GetHashesRequest, +) -> Result { + todo!() +} + +async fn get_output_indexes( + state: CupratedRpcHandlerState, + request: GetOutputIndexesRequest, +) -> Result { + todo!() +} + +async fn get_outs( + state: CupratedRpcHandlerState, + request: GetOutsRequest, +) -> Result { + todo!() +} + +async fn get_transaction_pool_hashes( + state: CupratedRpcHandlerState, + request: GetTransactionPoolHashesRequest, +) -> Result { + todo!() +} + +async fn get_output_distribution( + state: CupratedRpcHandlerState, + request: GetOutputDistributionRequest, +) -> Result { + todo!() +} diff --git a/binaries/cuprated/src/rpc/handler.rs b/binaries/cuprated/src/rpc/handler.rs new file mode 100644 index 0000000..8ba25ea --- /dev/null +++ b/binaries/cuprated/src/rpc/handler.rs @@ -0,0 +1,103 @@ +//! Dummy implementation of [`RpcHandler`]. + +use std::task::{Context, Poll}; + +use anyhow::Error; +use futures::{channel::oneshot::channel, future::BoxFuture}; +use serde::{Deserialize, Serialize}; +use tower::Service; + +use cuprate_blockchain::service::BlockchainReadHandle; +use cuprate_helper::asynch::InfallibleOneshotReceiver; +use cuprate_json_rpc::Id; +use cuprate_rpc_interface::RpcHandler; +use cuprate_rpc_types::{ + bin::{BinRequest, BinResponse}, + json::{JsonRpcRequest, JsonRpcResponse}, + other::{OtherRequest, OtherResponse}, +}; +use cuprate_txpool::service::TxpoolReadHandle; + +use crate::rpc::{bin, json, other}; + +/// TODO +#[derive(Clone)] +pub struct CupratedRpcHandler { + /// Should this RPC server be [restricted](RpcHandler::restricted)? + // + // INVARIANT: + // We don't need to include this in `state` and check for + // `self.is_restricted()` because `cuprate-rpc-interface` handles that. + pub restricted: bool, + + /// State needed for request -> response mapping. + pub state: CupratedRpcHandlerState, +} + +/// TODO +#[derive(Clone)] +pub struct CupratedRpcHandlerState { + /// Read handle to the blockchain database. + pub blockchain: BlockchainReadHandle, + + /// Read handle to the transaction pool database. + pub txpool: TxpoolReadHandle, +} + +impl CupratedRpcHandler { + /// TODO + pub fn init() { + todo!() + } +} + +impl RpcHandler for CupratedRpcHandler { + fn restricted(&self) -> bool { + self.restricted + } +} + +impl Service for CupratedRpcHandler { + type Response = JsonRpcResponse; + type Error = Error; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, request: JsonRpcRequest) -> Self::Future { + let state = CupratedRpcHandlerState::clone(&self.state); + Box::pin(json::map_request(state, request)) + } +} + +impl Service for CupratedRpcHandler { + type Response = BinResponse; + type Error = Error; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, request: BinRequest) -> Self::Future { + let state = CupratedRpcHandlerState::clone(&self.state); + Box::pin(bin::map_request(state, request)) + } +} + +impl Service for CupratedRpcHandler { + type Response = OtherResponse; + type Error = Error; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, request: OtherRequest) -> Self::Future { + let state = CupratedRpcHandlerState::clone(&self.state); + Box::pin(other::map_request(state, request)) + } +} diff --git a/binaries/cuprated/src/rpc/json.rs b/binaries/cuprated/src/rpc/json.rs new file mode 100644 index 0000000..41398d4 --- /dev/null +++ b/binaries/cuprated/src/rpc/json.rs @@ -0,0 +1,294 @@ +use std::sync::Arc; + +use anyhow::Error; +use tower::ServiceExt; + +use cuprate_rpc_types::json::{ + AddAuxPowRequest, AddAuxPowResponse, BannedRequest, BannedResponse, CalcPowRequest, + CalcPowResponse, FlushCacheRequest, FlushCacheResponse, FlushTransactionPoolRequest, + FlushTransactionPoolResponse, GenerateBlocksRequest, GenerateBlocksResponse, + GetAlternateChainsRequest, GetAlternateChainsResponse, GetBansRequest, GetBansResponse, + GetBlockCountRequest, GetBlockCountResponse, GetBlockHeaderByHashRequest, + GetBlockHeaderByHashResponse, GetBlockHeaderByHeightRequest, GetBlockHeaderByHeightResponse, + GetBlockHeadersRangeRequest, GetBlockHeadersRangeResponse, GetBlockRequest, GetBlockResponse, + GetCoinbaseTxSumRequest, GetCoinbaseTxSumResponse, GetConnectionsRequest, + GetConnectionsResponse, GetFeeEstimateRequest, GetFeeEstimateResponse, GetInfoRequest, + GetInfoResponse, GetLastBlockHeaderRequest, GetLastBlockHeaderResponse, GetMinerDataRequest, + GetMinerDataResponse, GetOutputHistogramRequest, GetOutputHistogramResponse, + GetTransactionPoolBacklogRequest, GetTransactionPoolBacklogResponse, GetTxIdsLooseRequest, + GetTxIdsLooseResponse, GetVersionRequest, GetVersionResponse, HardForkInfoRequest, + HardForkInfoResponse, JsonRpcRequest, JsonRpcResponse, OnGetBlockHashRequest, + OnGetBlockHashResponse, PruneBlockchainRequest, PruneBlockchainResponse, RelayTxRequest, + RelayTxResponse, SetBansRequest, SetBansResponse, SubmitBlockRequest, SubmitBlockResponse, + SyncInfoRequest, SyncInfoResponse, +}; + +use crate::rpc::CupratedRpcHandlerState; + +/// Map a [`JsonRpcRequest`] to the function that will lead to a [`JsonRpcResponse`]. +pub(super) async fn map_request( + state: CupratedRpcHandlerState, + request: JsonRpcRequest, +) -> Result { + use JsonRpcRequest as Req; + use JsonRpcResponse as Resp; + + Ok(match request { + Req::GetBlockCount(r) => Resp::GetBlockCount(get_block_count(state, r).await?), + Req::OnGetBlockHash(r) => Resp::OnGetBlockHash(on_get_block_hash(state, r).await?), + Req::SubmitBlock(r) => Resp::SubmitBlock(submit_block(state, r).await?), + Req::GenerateBlocks(r) => Resp::GenerateBlocks(generate_blocks(state, r).await?), + Req::GetLastBlockHeader(r) => { + Resp::GetLastBlockHeader(get_last_block_header(state, r).await?) + } + Req::GetBlockHeaderByHash(r) => { + Resp::GetBlockHeaderByHash(get_block_header_by_hash(state, r).await?) + } + Req::GetBlockHeaderByHeight(r) => { + Resp::GetBlockHeaderByHeight(get_block_header_by_height(state, r).await?) + } + Req::GetBlockHeadersRange(r) => { + Resp::GetBlockHeadersRange(get_block_headers_range(state, r).await?) + } + Req::GetBlock(r) => Resp::GetBlock(get_block(state, r).await?), + Req::GetConnections(r) => Resp::GetConnections(get_connections(state, r).await?), + Req::GetInfo(r) => Resp::GetInfo(get_info(state, r).await?), + Req::HardForkInfo(r) => Resp::HardForkInfo(hard_fork_info(state, r).await?), + Req::SetBans(r) => Resp::SetBans(set_bans(state, r).await?), + Req::GetBans(r) => Resp::GetBans(get_bans(state, r).await?), + Req::Banned(r) => Resp::Banned(banned(state, r).await?), + Req::FlushTransactionPool(r) => { + Resp::FlushTransactionPool(flush_transaction_pool(state, r).await?) + } + Req::GetOutputHistogram(r) => { + Resp::GetOutputHistogram(get_output_histogram(state, r).await?) + } + Req::GetCoinbaseTxSum(r) => Resp::GetCoinbaseTxSum(get_coinbase_tx_sum(state, r).await?), + Req::GetVersion(r) => Resp::GetVersion(get_version(state, r).await?), + Req::GetFeeEstimate(r) => Resp::GetFeeEstimate(get_fee_estimate(state, r).await?), + Req::GetAlternateChains(r) => { + Resp::GetAlternateChains(get_alternate_chains(state, r).await?) + } + Req::RelayTx(r) => Resp::RelayTx(relay_tx(state, r).await?), + Req::SyncInfo(r) => Resp::SyncInfo(sync_info(state, r).await?), + Req::GetTransactionPoolBacklog(r) => { + Resp::GetTransactionPoolBacklog(get_transaction_pool_backlog(state, r).await?) + } + Req::GetMinerData(r) => Resp::GetMinerData(get_miner_data(state, r).await?), + Req::PruneBlockchain(r) => Resp::PruneBlockchain(prune_blockchain(state, r).await?), + Req::CalcPow(r) => Resp::CalcPow(calc_pow(state, r).await?), + Req::FlushCache(r) => Resp::FlushCache(flush_cache(state, r).await?), + Req::AddAuxPow(r) => Resp::AddAuxPow(add_aux_pow(state, r).await?), + Req::GetTxIdsLoose(r) => Resp::GetTxIdsLoose(get_tx_ids_loose(state, r).await?), + }) +} + +async fn get_block_count( + state: CupratedRpcHandlerState, + request: GetBlockCountRequest, +) -> Result { + todo!() +} + +async fn on_get_block_hash( + state: CupratedRpcHandlerState, + request: OnGetBlockHashRequest, +) -> Result { + todo!() +} + +async fn submit_block( + state: CupratedRpcHandlerState, + request: SubmitBlockRequest, +) -> Result { + todo!() +} + +async fn generate_blocks( + state: CupratedRpcHandlerState, + request: GenerateBlocksRequest, +) -> Result { + todo!() +} + +async fn get_last_block_header( + state: CupratedRpcHandlerState, + request: GetLastBlockHeaderRequest, +) -> Result { + todo!() +} + +async fn get_block_header_by_hash( + state: CupratedRpcHandlerState, + request: GetBlockHeaderByHashRequest, +) -> Result { + todo!() +} + +async fn get_block_header_by_height( + state: CupratedRpcHandlerState, + request: GetBlockHeaderByHeightRequest, +) -> Result { + todo!() +} + +async fn get_block_headers_range( + state: CupratedRpcHandlerState, + request: GetBlockHeadersRangeRequest, +) -> Result { + todo!() +} + +async fn get_block( + state: CupratedRpcHandlerState, + request: GetBlockRequest, +) -> Result { + todo!() +} + +async fn get_connections( + state: CupratedRpcHandlerState, + request: GetConnectionsRequest, +) -> Result { + todo!() +} + +async fn get_info( + state: CupratedRpcHandlerState, + request: GetInfoRequest, +) -> Result { + todo!() +} + +async fn hard_fork_info( + state: CupratedRpcHandlerState, + request: HardForkInfoRequest, +) -> Result { + todo!() +} + +async fn set_bans( + state: CupratedRpcHandlerState, + request: SetBansRequest, +) -> Result { + todo!() +} + +async fn get_bans( + state: CupratedRpcHandlerState, + request: GetBansRequest, +) -> Result { + todo!() +} + +async fn banned( + state: CupratedRpcHandlerState, + request: BannedRequest, +) -> Result { + todo!() +} + +async fn flush_transaction_pool( + state: CupratedRpcHandlerState, + request: FlushTransactionPoolRequest, +) -> Result { + todo!() +} + +async fn get_output_histogram( + state: CupratedRpcHandlerState, + request: GetOutputHistogramRequest, +) -> Result { + todo!() +} + +async fn get_coinbase_tx_sum( + state: CupratedRpcHandlerState, + request: GetCoinbaseTxSumRequest, +) -> Result { + todo!() +} + +async fn get_version( + state: CupratedRpcHandlerState, + request: GetVersionRequest, +) -> Result { + todo!() +} + +async fn get_fee_estimate( + state: CupratedRpcHandlerState, + request: GetFeeEstimateRequest, +) -> Result { + todo!() +} + +async fn get_alternate_chains( + state: CupratedRpcHandlerState, + request: GetAlternateChainsRequest, +) -> Result { + todo!() +} + +async fn relay_tx( + state: CupratedRpcHandlerState, + request: RelayTxRequest, +) -> Result { + todo!() +} + +async fn sync_info( + state: CupratedRpcHandlerState, + request: SyncInfoRequest, +) -> Result { + todo!() +} + +async fn get_transaction_pool_backlog( + state: CupratedRpcHandlerState, + request: GetTransactionPoolBacklogRequest, +) -> Result { + todo!() +} + +async fn get_miner_data( + state: CupratedRpcHandlerState, + request: GetMinerDataRequest, +) -> Result { + todo!() +} + +async fn prune_blockchain( + state: CupratedRpcHandlerState, + request: PruneBlockchainRequest, +) -> Result { + todo!() +} + +async fn calc_pow( + state: CupratedRpcHandlerState, + request: CalcPowRequest, +) -> Result { + todo!() +} + +async fn flush_cache( + state: CupratedRpcHandlerState, + request: FlushCacheRequest, +) -> Result { + todo!() +} + +async fn add_aux_pow( + state: CupratedRpcHandlerState, + request: AddAuxPowRequest, +) -> Result { + todo!() +} + +async fn get_tx_ids_loose( + state: CupratedRpcHandlerState, + request: GetTxIdsLooseRequest, +) -> Result { + todo!() +} diff --git a/binaries/cuprated/src/rpc/other.rs b/binaries/cuprated/src/rpc/other.rs new file mode 100644 index 0000000..c0df399 --- /dev/null +++ b/binaries/cuprated/src/rpc/other.rs @@ -0,0 +1,260 @@ +use anyhow::Error; + +use cuprate_rpc_types::other::{ + GetAltBlocksHashesRequest, GetAltBlocksHashesResponse, GetHeightRequest, GetHeightResponse, + GetLimitRequest, GetLimitResponse, GetNetStatsRequest, GetNetStatsResponse, GetOutsRequest, + GetOutsResponse, GetPeerListRequest, GetPeerListResponse, GetPublicNodesRequest, + GetPublicNodesResponse, GetTransactionPoolHashesRequest, GetTransactionPoolHashesResponse, + GetTransactionPoolRequest, GetTransactionPoolResponse, GetTransactionPoolStatsRequest, + GetTransactionPoolStatsResponse, GetTransactionsRequest, GetTransactionsResponse, + InPeersRequest, InPeersResponse, IsKeyImageSpentRequest, IsKeyImageSpentResponse, + MiningStatusRequest, MiningStatusResponse, OtherRequest, OtherResponse, OutPeersRequest, + OutPeersResponse, PopBlocksRequest, PopBlocksResponse, SaveBcRequest, SaveBcResponse, + SendRawTransactionRequest, SendRawTransactionResponse, SetBootstrapDaemonRequest, + SetBootstrapDaemonResponse, SetLimitRequest, SetLimitResponse, SetLogCategoriesRequest, + SetLogCategoriesResponse, SetLogHashRateRequest, SetLogHashRateResponse, SetLogLevelRequest, + SetLogLevelResponse, StartMiningRequest, StartMiningResponse, StopDaemonRequest, + StopDaemonResponse, StopMiningRequest, StopMiningResponse, UpdateRequest, UpdateResponse, +}; + +use crate::rpc::CupratedRpcHandlerState; + +/// Map a [`OtherRequest`] to the function that will lead to a [`OtherResponse`]. +pub(super) async fn map_request( + state: CupratedRpcHandlerState, + request: OtherRequest, +) -> Result { + use OtherRequest as Req; + use OtherResponse as Resp; + + Ok(match request { + Req::GetHeight(r) => Resp::GetHeight(get_height(state, r).await?), + Req::GetTransactions(r) => Resp::GetTransactions(get_transactions(state, r).await?), + Req::GetAltBlocksHashes(r) => { + Resp::GetAltBlocksHashes(get_alt_blocks_hashes(state, r).await?) + } + Req::IsKeyImageSpent(r) => Resp::IsKeyImageSpent(is_key_image_spent(state, r).await?), + Req::SendRawTransaction(r) => { + Resp::SendRawTransaction(send_raw_transaction(state, r).await?) + } + Req::StartMining(r) => Resp::StartMining(start_mining(state, r).await?), + Req::StopMining(r) => Resp::StopMining(stop_mining(state, r).await?), + Req::MiningStatus(r) => Resp::MiningStatus(mining_status(state, r).await?), + Req::SaveBc(r) => Resp::SaveBc(save_bc(state, r).await?), + Req::GetPeerList(r) => Resp::GetPeerList(get_peer_list(state, r).await?), + Req::SetLogHashRate(r) => Resp::SetLogHashRate(set_log_hash_rate(state, r).await?), + Req::SetLogLevel(r) => Resp::SetLogLevel(set_log_level(state, r).await?), + Req::SetLogCategories(r) => Resp::SetLogCategories(set_log_categories(state, r).await?), + Req::SetBootstrapDaemon(r) => { + Resp::SetBootstrapDaemon(set_bootstrap_daemon(state, r).await?) + } + Req::GetTransactionPool(r) => { + Resp::GetTransactionPool(get_transaction_pool(state, r).await?) + } + Req::GetTransactionPoolStats(r) => { + Resp::GetTransactionPoolStats(get_transaction_pool_stats(state, r).await?) + } + Req::StopDaemon(r) => Resp::StopDaemon(stop_daemon(state, r).await?), + Req::GetLimit(r) => Resp::GetLimit(get_limit(state, r).await?), + Req::SetLimit(r) => Resp::SetLimit(set_limit(state, r).await?), + Req::OutPeers(r) => Resp::OutPeers(out_peers(state, r).await?), + Req::InPeers(r) => Resp::InPeers(in_peers(state, r).await?), + Req::GetNetStats(r) => Resp::GetNetStats(get_net_stats(state, r).await?), + Req::GetOuts(r) => Resp::GetOuts(get_outs(state, r).await?), + Req::Update(r) => Resp::Update(update(state, r).await?), + Req::PopBlocks(r) => Resp::PopBlocks(pop_blocks(state, r).await?), + Req::GetTransactionPoolHashes(r) => { + Resp::GetTransactionPoolHashes(get_transaction_pool_hashes(state, r).await?) + } + Req::GetPublicNodes(r) => Resp::GetPublicNodes(get_public_nodes(state, r).await?), + }) +} + +async fn get_height( + state: CupratedRpcHandlerState, + request: GetHeightRequest, +) -> Result { + todo!() +} + +async fn get_transactions( + state: CupratedRpcHandlerState, + request: GetTransactionsRequest, +) -> Result { + todo!() +} + +async fn get_alt_blocks_hashes( + state: CupratedRpcHandlerState, + request: GetAltBlocksHashesRequest, +) -> Result { + todo!() +} + +async fn is_key_image_spent( + state: CupratedRpcHandlerState, + request: IsKeyImageSpentRequest, +) -> Result { + todo!() +} + +async fn send_raw_transaction( + state: CupratedRpcHandlerState, + request: SendRawTransactionRequest, +) -> Result { + todo!() +} + +async fn start_mining( + state: CupratedRpcHandlerState, + request: StartMiningRequest, +) -> Result { + todo!() +} + +async fn stop_mining( + state: CupratedRpcHandlerState, + request: StopMiningRequest, +) -> Result { + todo!() +} + +async fn mining_status( + state: CupratedRpcHandlerState, + request: MiningStatusRequest, +) -> Result { + todo!() +} + +async fn save_bc( + state: CupratedRpcHandlerState, + request: SaveBcRequest, +) -> Result { + todo!() +} + +async fn get_peer_list( + state: CupratedRpcHandlerState, + request: GetPeerListRequest, +) -> Result { + todo!() +} + +async fn set_log_hash_rate( + state: CupratedRpcHandlerState, + request: SetLogHashRateRequest, +) -> Result { + todo!() +} + +async fn set_log_level( + state: CupratedRpcHandlerState, + request: SetLogLevelRequest, +) -> Result { + todo!() +} + +async fn set_log_categories( + state: CupratedRpcHandlerState, + request: SetLogCategoriesRequest, +) -> Result { + todo!() +} + +async fn set_bootstrap_daemon( + state: CupratedRpcHandlerState, + request: SetBootstrapDaemonRequest, +) -> Result { + todo!() +} + +async fn get_transaction_pool( + state: CupratedRpcHandlerState, + request: GetTransactionPoolRequest, +) -> Result { + todo!() +} + +async fn get_transaction_pool_stats( + state: CupratedRpcHandlerState, + request: GetTransactionPoolStatsRequest, +) -> Result { + todo!() +} + +async fn stop_daemon( + state: CupratedRpcHandlerState, + request: StopDaemonRequest, +) -> Result { + todo!() +} + +async fn get_limit( + state: CupratedRpcHandlerState, + request: GetLimitRequest, +) -> Result { + todo!() +} + +async fn set_limit( + state: CupratedRpcHandlerState, + request: SetLimitRequest, +) -> Result { + todo!() +} + +async fn out_peers( + state: CupratedRpcHandlerState, + request: OutPeersRequest, +) -> Result { + todo!() +} + +async fn in_peers( + state: CupratedRpcHandlerState, + request: InPeersRequest, +) -> Result { + todo!() +} + +async fn get_net_stats( + state: CupratedRpcHandlerState, + request: GetNetStatsRequest, +) -> Result { + todo!() +} + +async fn get_outs( + state: CupratedRpcHandlerState, + request: GetOutsRequest, +) -> Result { + todo!() +} + +async fn update( + state: CupratedRpcHandlerState, + request: UpdateRequest, +) -> Result { + todo!() +} + +async fn pop_blocks( + state: CupratedRpcHandlerState, + request: PopBlocksRequest, +) -> Result { + todo!() +} + +async fn get_transaction_pool_hashes( + state: CupratedRpcHandlerState, + request: GetTransactionPoolHashesRequest, +) -> Result { + todo!() +} + +async fn get_public_nodes( + state: CupratedRpcHandlerState, + request: GetPublicNodesRequest, +) -> Result { + todo!() +} diff --git a/binaries/cuprated/src/rpc/request_handler.rs b/binaries/cuprated/src/rpc/request_handler.rs deleted file mode 100644 index 8b13789..0000000 --- a/binaries/cuprated/src/rpc/request_handler.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/rpc/interface/Cargo.toml b/rpc/interface/Cargo.toml index 5f17317..42d1055 100644 --- a/rpc/interface/Cargo.toml +++ b/rpc/interface/Cargo.toml @@ -9,8 +9,8 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/rpc/cuprate-rpc-inte keywords = ["cuprate", "rpc", "interface"] [features] -default = ["dummy", "serde"] -dummy = [] +default = ["dummy", "serde"] +dummy = [] [dependencies] cuprate-epee-encoding = { path = "../../net/epee-encoding", default-features = false } @@ -18,6 +18,7 @@ cuprate-json-rpc = { path = "../json-rpc", default-features = false } cuprate-rpc-types = { path = "../types", features = ["serde", "epee"], default-features = false } cuprate-helper = { path = "../../helper", features = ["asynch"], default-features = false } +anyhow = { workspace = true } axum = { version = "0.7.5", features = ["json"], default-features = false } serde = { workspace = true, optional = true } tower = { workspace = true } diff --git a/rpc/interface/README.md b/rpc/interface/README.md index eb87864..fa5496c 100644 --- a/rpc/interface/README.md +++ b/rpc/interface/README.md @@ -45,15 +45,16 @@ The proper usage of this crate is to: This is your [`tower::Service`] that converts `Request`s into `Response`s, i.e. the "inner handler". -Said concretely, `RpcHandler` is 3 `tower::Service`s where the request/response types are -the 3 endpoint enums from [`cuprate_rpc_types`] and the error type is from this crate: +Said concretely, `RpcHandler` is 3 `tower::Service`s where the +request/response types are the 3 endpoint enums from [`cuprate_rpc_types`]: - [`JsonRpcRequest`](cuprate_rpc_types::json::JsonRpcRequest) & [`JsonRpcResponse`](cuprate_rpc_types::json::JsonRpcResponse) - [`BinRequest`](cuprate_rpc_types::bin::BinRequest) & [`BinResponse`](cuprate_rpc_types::bin::BinRequest) - [`OtherRequest`](cuprate_rpc_types::other::OtherRequest) & [`OtherResponse`](cuprate_rpc_types::other::OtherRequest) -- [`RpcError`] `RpcHandler`'s [`Future`](std::future::Future) is generic, _although_, -it must output `Result<$RESPONSE, RpcError>`. +it must output `Result<$RESPONSE, anyhow::Error>`. + +The error type must always be [`anyhow::Error`]. The `RpcHandler` must also hold some state that is required for RPC server operation. diff --git a/rpc/interface/src/lib.rs b/rpc/interface/src/lib.rs index ebea493..1f84738 100644 --- a/rpc/interface/src/lib.rs +++ b/rpc/interface/src/lib.rs @@ -3,14 +3,12 @@ mod route; mod router_builder; -mod rpc_error; mod rpc_handler; #[cfg(feature = "dummy")] mod rpc_handler_dummy; mod rpc_service; pub use router_builder::RouterBuilder; -pub use rpc_error::RpcError; pub use rpc_handler::RpcHandler; #[cfg(feature = "dummy")] pub use rpc_handler_dummy::RpcHandlerDummy; diff --git a/rpc/interface/src/route/bin.rs b/rpc/interface/src/route/bin.rs index 45447ca..90d06c8 100644 --- a/rpc/interface/src/route/bin.rs +++ b/rpc/interface/src/route/bin.rs @@ -5,7 +5,14 @@ use axum::{body::Bytes, extract::State, http::StatusCode}; use tower::ServiceExt; use cuprate_epee_encoding::from_bytes; -use cuprate_rpc_types::bin::{BinRequest, BinResponse, GetTransactionPoolHashesRequest}; +use cuprate_rpc_types::{ + bin::{ + BinRequest, BinResponse, GetBlocksByHeightRequest, GetBlocksRequest, GetHashesRequest, + GetOutputIndexesRequest, GetOutsRequest, GetTransactionPoolHashesRequest, + }, + json::GetOutputDistributionRequest, + RpcCall, +}; use crate::rpc_handler::RpcHandler; @@ -66,8 +73,16 @@ macro_rules! generate_endpoints_inner { ($variant:ident, $handler:ident, $request:expr) => { paste::paste! { { + // Check if restricted. + if [<$variant Request>]::IS_RESTRICTED && $handler.restricted() { + // TODO: mimic `monerod` behavior. + return Err(StatusCode::FORBIDDEN); + } + // Send request. - let response = $handler.oneshot($request).await?; + let Ok(response) = $handler.oneshot($request).await else { + return Err(StatusCode::INTERNAL_SERVER_ERROR); + }; let BinResponse::$variant(response) = response else { panic!("RPC handler returned incorrect response"); diff --git a/rpc/interface/src/route/json_rpc.rs b/rpc/interface/src/route/json_rpc.rs index bf3d937..7efb851 100644 --- a/rpc/interface/src/route/json_rpc.rs +++ b/rpc/interface/src/route/json_rpc.rs @@ -50,7 +50,9 @@ pub(crate) async fn json_rpc( } // Send request. - let response = handler.oneshot(request.body).await?; + let Ok(response) = handler.oneshot(request.body).await else { + return Err(StatusCode::INTERNAL_SERVER_ERROR); + }; Ok(Json(Response::ok(id, response))) } diff --git a/rpc/interface/src/route/other.rs b/rpc/interface/src/route/other.rs index 129ddd5..3ff8448 100644 --- a/rpc/interface/src/route/other.rs +++ b/rpc/interface/src/route/other.rs @@ -82,7 +82,9 @@ macro_rules! generate_endpoints_inner { // Send request. let request = OtherRequest::$variant($request); - let response = $handler.oneshot(request).await?; + let Ok(response) = $handler.oneshot(request).await else { + return Err(StatusCode::INTERNAL_SERVER_ERROR); + }; let OtherResponse::$variant(response) = response else { panic!("RPC handler returned incorrect response") diff --git a/rpc/interface/src/rpc_error.rs b/rpc/interface/src/rpc_error.rs deleted file mode 100644 index 47563d6..0000000 --- a/rpc/interface/src/rpc_error.rs +++ /dev/null @@ -1,34 +0,0 @@ -//! RPC errors. - -//---------------------------------------------------------------------------------------------------- Import -use axum::http::StatusCode; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -//---------------------------------------------------------------------------------------------------- RpcError -/// Possible errors during RPC operation. -/// -/// These are any errors that can happen _during_ a handler function. -/// I.e. if this error surfaces, it happened _after_ the request was -/// deserialized. -/// -/// This is the `Error` type required to be used in an [`RpcHandler`](crate::RpcHandler). -/// -/// TODO: This is empty as possible errors will be -/// enumerated when the handler functions are created. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] -pub enum RpcError {} - -impl From for StatusCode { - fn from(_: RpcError) -> Self { - // TODO - Self::INTERNAL_SERVER_ERROR - } -} - -//---------------------------------------------------------------------------------------------------- Tests -#[cfg(test)] -mod test { - // use super::*; -} diff --git a/rpc/interface/src/rpc_handler.rs b/rpc/interface/src/rpc_handler.rs index 1299ec4..1d2676c 100644 --- a/rpc/interface/src/rpc_handler.rs +++ b/rpc/interface/src/rpc_handler.rs @@ -22,7 +22,7 @@ use crate::RpcService; /// In other words, an [`RpcHandler`] is a type that implements [`tower::Service`] 3 times, /// one for each request/response enum type found in [`cuprate_rpc_types`]. /// -/// The error type must always be [`RpcError`](crate::RpcError). +/// The error type must always be [`anyhow::Error`]. /// /// See this crate's `RpcHandlerDummy` for an implementation example of this trait. /// diff --git a/rpc/interface/src/rpc_handler_dummy.rs b/rpc/interface/src/rpc_handler_dummy.rs index 06fa460..0b01835 100644 --- a/rpc/interface/src/rpc_handler_dummy.rs +++ b/rpc/interface/src/rpc_handler_dummy.rs @@ -3,19 +3,20 @@ //---------------------------------------------------------------------------------------------------- Use use std::task::Poll; -use cuprate_rpc_types::{ - bin::{BinRequest, BinResponse}, - json::{JsonRpcRequest, JsonRpcResponse}, - other::{OtherRequest, OtherResponse}, -}; +use anyhow::Error; use futures::channel::oneshot::channel; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use tower::Service; use cuprate_helper::asynch::InfallibleOneshotReceiver; +use cuprate_rpc_types::{ + bin::{BinRequest, BinResponse}, + json::{JsonRpcRequest, JsonRpcResponse}, + other::{OtherRequest, OtherResponse}, +}; -use crate::{rpc_error::RpcError, rpc_handler::RpcHandler}; +use crate::rpc_handler::RpcHandler; //---------------------------------------------------------------------------------------------------- RpcHandlerDummy /// An [`RpcHandler`] that always returns [`Default::default`]. @@ -45,8 +46,8 @@ impl RpcHandler for RpcHandlerDummy { impl Service for RpcHandlerDummy { type Response = JsonRpcResponse; - type Error = RpcError; - type Future = InfallibleOneshotReceiver>; + type Error = Error; + type Future = InfallibleOneshotReceiver>; fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -100,8 +101,8 @@ impl Service for RpcHandlerDummy { impl Service for RpcHandlerDummy { type Response = BinResponse; - type Error = RpcError; - type Future = InfallibleOneshotReceiver>; + type Error = Error; + type Future = InfallibleOneshotReceiver>; fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -130,8 +131,8 @@ impl Service for RpcHandlerDummy { impl Service for RpcHandlerDummy { type Response = OtherResponse; - type Error = RpcError; - type Future = InfallibleOneshotReceiver>; + type Error = Error; + type Future = InfallibleOneshotReceiver>; fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) diff --git a/rpc/interface/src/rpc_service.rs b/rpc/interface/src/rpc_service.rs index db84830..285d60b 100644 --- a/rpc/interface/src/rpc_service.rs +++ b/rpc/interface/src/rpc_service.rs @@ -5,8 +5,6 @@ use std::future::Future; use tower::Service; -use crate::rpc_error::RpcError; - //---------------------------------------------------------------------------------------------------- RpcService /// An RPC [`tower::Service`]. /// @@ -17,7 +15,7 @@ use crate::rpc_error::RpcError; /// The `Request` and `Response` are generic and /// are used in the [`tower::Service`] bounds. /// -/// The error type is always [`RpcError`]. +/// The error type is always [`anyhow::Error`]. /// /// There is a blanket implementation that implements this /// trait on types that implement `tower::Service` correctly. @@ -31,8 +29,8 @@ pub trait RpcService: + Service< Request, Response = Response, - Error = RpcError, - Future: Future> + Send + Sync + 'static, + Error = anyhow::Error, + Future: Future> + Send + 'static, > { } @@ -45,8 +43,8 @@ impl RpcService for T where + Service< Request, Response = Response, - Error = RpcError, - Future: Future> + Send + Sync + 'static, + Error = anyhow::Error, + Future: Future> + Send + 'static, > { } From 01625535fa85bcc90000267932576ed65b6fd65d Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Sun, 8 Sep 2024 13:31:58 -0400 Subject: [PATCH 051/104] book/architecture: add resource index (#268) * resource index * index * cap * cleanup --- books/architecture/src/SUMMARY.md | 25 +++--- .../architecture/src/resource-model/intro.md | 1 - .../src/resource-model/sockets.md | 1 - .../cap}/intro.md | 0 .../cap}/map.md | 0 .../cap}/the-block-downloader.md | 0 .../cap}/the-database.md | 0 .../cap}/the-rpc-server.md | 0 .../cap}/the-verifier.md | 0 .../cap}/thread-exit.md | 0 .../architecture/src/resources/cap/threads.md | 2 + .../file-system.md => resources/fs/intro.md} | 0 books/architecture/src/resources/fs/paths.md | 87 +++++++++++++++++++ books/architecture/src/resources/intro.md | 1 + .../{resource-model => resources}/memory.md | 0 .../src/resources/sockets/index.md | 1 + .../src/resources/sockets/ports.md | 2 + 17 files changed, 107 insertions(+), 13 deletions(-) delete mode 100644 books/architecture/src/resource-model/intro.md delete mode 100644 books/architecture/src/resource-model/sockets.md rename books/architecture/src/{resource-model/concurrency-and-parallelism => resources/cap}/intro.md (100%) rename books/architecture/src/{resource-model/concurrency-and-parallelism => resources/cap}/map.md (100%) rename books/architecture/src/{resource-model/concurrency-and-parallelism => resources/cap}/the-block-downloader.md (100%) rename books/architecture/src/{resource-model/concurrency-and-parallelism => resources/cap}/the-database.md (100%) rename books/architecture/src/{resource-model/concurrency-and-parallelism => resources/cap}/the-rpc-server.md (100%) rename books/architecture/src/{resource-model/concurrency-and-parallelism => resources/cap}/the-verifier.md (100%) rename books/architecture/src/{resource-model/concurrency-and-parallelism => resources/cap}/thread-exit.md (100%) create mode 100644 books/architecture/src/resources/cap/threads.md rename books/architecture/src/{resource-model/file-system.md => resources/fs/intro.md} (100%) create mode 100644 books/architecture/src/resources/fs/paths.md create mode 100644 books/architecture/src/resources/intro.md rename books/architecture/src/{resource-model => resources}/memory.md (100%) create mode 100644 books/architecture/src/resources/sockets/index.md create mode 100644 books/architecture/src/resources/sockets/ports.md diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index ad521df..d97d223 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -93,17 +93,20 @@ --- -- [⚪️ Resource model](resource-model/intro.md) - - [⚪️ File system](resource-model/file-system.md) - - [⚪️ Sockets](resource-model/sockets.md) - - [⚪️ Memory](resource-model/memory.md) - - [🟡 Concurrency and parallelism](resource-model/concurrency-and-parallelism/intro.md) - - [⚪️ Map](resource-model/concurrency-and-parallelism/map.md) - - [⚪️ The RPC server](resource-model/concurrency-and-parallelism/the-rpc-server.md) - - [⚪️ The database](resource-model/concurrency-and-parallelism/the-database.md) - - [⚪️ The block downloader](resource-model/concurrency-and-parallelism/the-block-downloader.md) - - [⚪️ The verifier](resource-model/concurrency-and-parallelism/the-verifier.md) - - [⚪️ Thread exit](resource-model/concurrency-and-parallelism/thread-exit.md) +- [⚪️ Resources](resources/intro.md) + - [⚪️ File system](resources/fs/intro.md) + - [🟡 Index of PATHs](resources/fs/paths.md) + - [⚪️ Sockets](resources/sockets/index.md) + - [🔴 Index of ports](resources/sockets/ports.md) + - [⚪️ Memory](resources/memory.md) + - [🟡 Concurrency and parallelism](resources/cap/intro.md) + - [⚪️ Map](resources/cap/map.md) + - [⚪️ The RPC server](resources/cap/the-rpc-server.md) + - [⚪️ The database](resources/cap/the-database.md) + - [⚪️ The block downloader](resources/cap/the-block-downloader.md) + - [⚪️ The verifier](resources/cap/the-verifier.md) + - [⚪️ Thread exit](resources/cap/thread-exit.md) + - [🔴 Index of threads](resources/cap/threads.md) --- diff --git a/books/architecture/src/resource-model/intro.md b/books/architecture/src/resource-model/intro.md deleted file mode 100644 index 28d1dd6..0000000 --- a/books/architecture/src/resource-model/intro.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Resource model diff --git a/books/architecture/src/resource-model/sockets.md b/books/architecture/src/resource-model/sockets.md deleted file mode 100644 index 0d590ca..0000000 --- a/books/architecture/src/resource-model/sockets.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Sockets diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/intro.md b/books/architecture/src/resources/cap/intro.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/intro.md rename to books/architecture/src/resources/cap/intro.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/map.md b/books/architecture/src/resources/cap/map.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/map.md rename to books/architecture/src/resources/cap/map.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-block-downloader.md b/books/architecture/src/resources/cap/the-block-downloader.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/the-block-downloader.md rename to books/architecture/src/resources/cap/the-block-downloader.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-database.md b/books/architecture/src/resources/cap/the-database.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/the-database.md rename to books/architecture/src/resources/cap/the-database.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-rpc-server.md b/books/architecture/src/resources/cap/the-rpc-server.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/the-rpc-server.md rename to books/architecture/src/resources/cap/the-rpc-server.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/the-verifier.md b/books/architecture/src/resources/cap/the-verifier.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/the-verifier.md rename to books/architecture/src/resources/cap/the-verifier.md diff --git a/books/architecture/src/resource-model/concurrency-and-parallelism/thread-exit.md b/books/architecture/src/resources/cap/thread-exit.md similarity index 100% rename from books/architecture/src/resource-model/concurrency-and-parallelism/thread-exit.md rename to books/architecture/src/resources/cap/thread-exit.md diff --git a/books/architecture/src/resources/cap/threads.md b/books/architecture/src/resources/cap/threads.md new file mode 100644 index 0000000..e40f2c7 --- /dev/null +++ b/books/architecture/src/resources/cap/threads.md @@ -0,0 +1,2 @@ +# Index of threads +This is an index of all of the system threads Cuprate actively uses. \ No newline at end of file diff --git a/books/architecture/src/resource-model/file-system.md b/books/architecture/src/resources/fs/intro.md similarity index 100% rename from books/architecture/src/resource-model/file-system.md rename to books/architecture/src/resources/fs/intro.md diff --git a/books/architecture/src/resources/fs/paths.md b/books/architecture/src/resources/fs/paths.md new file mode 100644 index 0000000..0e5dc3d --- /dev/null +++ b/books/architecture/src/resources/fs/paths.md @@ -0,0 +1,87 @@ +# Index of PATHs +This is an index of all of the filesystem PATHs Cuprate actively uses. + +The [`cuprate_helper::fs`](https://doc.cuprate.org/cuprate_helper/fs/index.html) +module defines the general locations used throughout Cuprate. + +[`dirs`](https://docs.rs/dirs) is used internally, which follows +the PATH standards/conventions on each OS Cuprate supports, i.e.: +- the [XDG base directory](https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html) and the [XDG user directory](https://www.freedesktop.org/wiki/Software/xdg-user-dirs/) specifications on Linux +- the [Known Folder](https://msdn.microsoft.com/en-us/library/windows/desktop/bb776911(v=vs.85).aspx) system on Windows +- the [Standard Directories](https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html#//apple_ref/doc/uid/TP40010672-CH2-SW6) on macOS + +## Cache +Cuprate's cache directory. + +| OS | PATH | +|---------|-----------------------------------------| +| Windows | `C:\Users\Alice\AppData\Local\Cuprate\` | +| macOS | `/Users/Alice/Library/Caches/Cuprate/` | +| Linux | `/home/alice/.cache/cuprate/` | + +## Config +Cuprate's config directory. + +| OS | PATH | +|---------|-----------------------------------------------------| +| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` | +| macOS | `/Users/Alice/Library/Application Support/Cuprate/` | +| Linux | `/home/alice/.config/cuprate/` | + +## Data +Cuprate's data directory. + +| OS | PATH | +|---------|-----------------------------------------------------| +| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\` | +| macOS | `/Users/Alice/Library/Application Support/Cuprate/` | +| Linux | `/home/alice/.local/share/cuprate/` | + +## Blockchain +Cuprate's blockchain directory. + +| OS | PATH | +|---------|----------------------------------------------------------------| +| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` | +| macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` | +| Linux | `/home/alice/.local/share/cuprate/blockchain/` | + +## Transaction pool +Cuprate's transaction pool directory. + +| OS | PATH | +|---------|------------------------------------------------------------| +| Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` | +| macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` | +| Linux | `/home/alice/.local/share/cuprate/txpool/` | + +## Database +Cuprate's database location/filenames depend on: + +- Which database it is +- Which backend is being used + +--- + +`cuprate_blockchain` files are in the above mentioned `blockchain` folder. + +`cuprate_txpool` files are in the above mentioned `txpool` folder. + +--- + +If the `heed` backend is being used, these files will be created: + +| Filename | Purpose | +|------------|--------------------| +| `data.mdb` | Main data file | +| `lock.mdb` | Database lock file | + +For example: `/home/alice/.local/share/cuprate/blockchain/lock.mdb`. + +If the `redb` backend is being used, these files will be created: + +| Filename | Purpose | +|-------------|--------------------| +| `data.redb` | Main data file | + +For example: `/home/alice/.local/share/cuprate/txpool/data.redb`. \ No newline at end of file diff --git a/books/architecture/src/resources/intro.md b/books/architecture/src/resources/intro.md new file mode 100644 index 0000000..3c1229e --- /dev/null +++ b/books/architecture/src/resources/intro.md @@ -0,0 +1 @@ +# Resources diff --git a/books/architecture/src/resource-model/memory.md b/books/architecture/src/resources/memory.md similarity index 100% rename from books/architecture/src/resource-model/memory.md rename to books/architecture/src/resources/memory.md diff --git a/books/architecture/src/resources/sockets/index.md b/books/architecture/src/resources/sockets/index.md new file mode 100644 index 0000000..1e65ffc --- /dev/null +++ b/books/architecture/src/resources/sockets/index.md @@ -0,0 +1 @@ +# Sockets diff --git a/books/architecture/src/resources/sockets/ports.md b/books/architecture/src/resources/sockets/ports.md new file mode 100644 index 0000000..38ebc1d --- /dev/null +++ b/books/architecture/src/resources/sockets/ports.md @@ -0,0 +1,2 @@ +# Index of ports +This is an index of all of the network sockets Cuprate actively uses. \ No newline at end of file From 967537fae174f4209e2d8afc178246acea250536 Mon Sep 17 00:00:00 2001 From: Asurar Date: Tue, 10 Sep 2024 00:12:06 +0200 Subject: [PATCH 052/104] P2P: Implement incoming ping request handling over maximum inbound limit (#277) Implement incoming ping request handling over maximum inbound limit - If the maximum inbound connection semaphore reach its limit, `inbound_server` fn will open a tokio task to check if the node wanted to ping us. If it is the case we respond, otherwise drop the connection. - Added some documentation to the `inbound_server` fn. --- p2p/p2p/src/constants.rs | 6 ++++ p2p/p2p/src/inbound_server.rs | 61 ++++++++++++++++++++++++++++++++--- 2 files changed, 62 insertions(+), 5 deletions(-) diff --git a/p2p/p2p/src/constants.rs b/p2p/p2p/src/constants.rs index 44dba91..4c08eb8 100644 --- a/p2p/p2p/src/constants.rs +++ b/p2p/p2p/src/constants.rs @@ -3,6 +3,12 @@ use std::time::Duration; /// The timeout we set on handshakes. pub(crate) const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(20); +/// The timeout we set on receiving ping requests +pub(crate) const PING_REQUEST_TIMEOUT: Duration = Duration::from_secs(5); + +/// The amount of concurrency (maximum number of simultaneous tasks) we allow for handling ping requests +pub(crate) const PING_REQUEST_CONCURRENCY: usize = 2; + /// The maximum amount of connections to make to seed nodes for when we need peers. pub(crate) const MAX_SEED_CONNECTIONS: usize = 3; diff --git a/p2p/p2p/src/inbound_server.rs b/p2p/p2p/src/inbound_server.rs index aa971a5..80ff38e 100644 --- a/p2p/p2p/src/inbound_server.rs +++ b/p2p/p2p/src/inbound_server.rs @@ -4,9 +4,10 @@ //! them to the handshaker service and then adds them to the client pool. use std::{pin::pin, sync::Arc}; -use futures::StreamExt; +use futures::{SinkExt, StreamExt}; use tokio::{ sync::Semaphore, + task::JoinSet, time::{sleep, timeout}, }; use tower::{Service, ServiceExt}; @@ -17,14 +18,22 @@ use cuprate_p2p_core::{ services::{AddressBookRequest, AddressBookResponse}, AddressBook, ConnectionDirection, NetworkZone, }; +use cuprate_wire::{ + admin::{PingResponse, PING_OK_RESPONSE_STATUS_TEXT}, + AdminRequestMessage, AdminResponseMessage, Message, +}; use crate::{ client_pool::ClientPool, - constants::{HANDSHAKE_TIMEOUT, INBOUND_CONNECTION_COOL_DOWN}, + constants::{ + HANDSHAKE_TIMEOUT, INBOUND_CONNECTION_COOL_DOWN, PING_REQUEST_CONCURRENCY, + PING_REQUEST_TIMEOUT, + }, P2PConfig, }; -/// Starts the inbound server. +/// Starts the inbound server. This function will listen to all incoming connections +/// and initiate handshake if needed, after verifying the address isn't banned. #[instrument(level = "warn", skip_all)] pub async fn inbound_server( client_pool: Arc>, @@ -40,6 +49,10 @@ where HS::Future: Send + 'static, A: AddressBook, { + // Copying the peer_id before borrowing for ping responses (Make us avoid a `clone()`). + let our_peer_id = config.basic_node_data().peer_id; + + // Mandatory. Extract server config from P2PConfig let Some(server_config) = config.server_config else { tracing::warn!("No inbound server config provided, not listening for inbound connections."); return Ok(()); @@ -53,13 +66,18 @@ where let mut listener = pin!(listener); + // Create semaphore for limiting to maximum inbound connections. let semaphore = Arc::new(Semaphore::new(config.max_inbound_connections)); + // Create ping request handling JoinSet + let mut ping_join_set = JoinSet::new(); + // Listen to incoming connections and extract necessary information. while let Some(connection) = listener.next().await { - let Ok((addr, peer_stream, peer_sink)) = connection else { + let Ok((addr, mut peer_stream, mut peer_sink)) = connection else { continue; }; + // If peer is banned, drop connection if let Some(addr) = &addr { let AddressBookResponse::IsPeerBanned(banned) = address_book .ready() @@ -75,11 +93,13 @@ where } } + // Create a new internal id for new peers let addr = match addr { Some(addr) => InternalPeerID::KnownAddr(addr), None => InternalPeerID::Unknown(rand::random()), }; + // If we're still behind our maximum limit, Initiate handshake. if let Ok(permit) = semaphore.clone().try_acquire_owned() { tracing::debug!("Permit free for incoming connection, attempting handshake."); @@ -102,8 +122,39 @@ where .instrument(Span::current()), ); } else { + // Otherwise check if the node is simply pinging us. tracing::debug!("No permit free for incoming connection."); - // TODO: listen for if the peer is just trying to ping us to see if we are reachable. + + // We only handle 2 ping request conccurently. Otherwise we drop the connection immediately. + if ping_join_set.len() < PING_REQUEST_CONCURRENCY { + ping_join_set.spawn( + async move { + // Await first message from node. If it is a ping request we respond back, otherwise we drop the connection. + let fut = timeout(PING_REQUEST_TIMEOUT, peer_stream.next()); + + // Ok if timeout did not elapsed -> Some if there is a message -> Ok if it has been decoded + if let Ok(Some(Ok(Message::Request(AdminRequestMessage::Ping)))) = fut.await + { + let response = peer_sink + .send( + Message::Response(AdminResponseMessage::Ping(PingResponse { + status: PING_OK_RESPONSE_STATUS_TEXT, + peer_id: our_peer_id, + })) + .into(), + ) + .await; + + if let Err(err) = response { + tracing::debug!( + "Unable to respond to ping request from peer ({addr}): {err}" + ) + } + } + } + .instrument(Span::current()), + ); + } } sleep(INBOUND_CONNECTION_COOL_DOWN).await; From 49d1344aa11e3c0772c372d217d894570b50c9c9 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 10 Sep 2024 01:15:04 +0100 Subject: [PATCH 053/104] Storage: use `saturating_add` for `cumulative_generated_coins` (#275) * use `saturating_add` for `cumulative_generated_coins` * cargo fmt --- storage/blockchain/src/ops/block.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index d1b83a4..91d6e57 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -89,9 +89,10 @@ pub fn add_block( // RCT output count needs account for _this_ block's outputs. let cumulative_rct_outs = get_rct_num_outputs(tables.rct_outputs())?; + // `saturating_add` is used here as cumulative generated coins overflows due to tail emission. let cumulative_generated_coins = cumulative_generated_coins(&block.height.saturating_sub(1), tables.block_infos())? - + block.generated_coins; + .saturating_add(block.generated_coins); let (cumulative_difficulty_low, cumulative_difficulty_high) = split_u128_into_low_high_bits(block.cumulative_difficulty); From 90027143f05a1a4a5aa0840dabff199e6db95699 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 10 Sep 2024 01:18:26 +0100 Subject: [PATCH 054/104] consensus: misc fixes (#276) * fix decoy checks + fee calculation * fmt --- consensus/src/transactions.rs | 5 +++++ consensus/src/transactions/free.rs | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/consensus/src/transactions.rs b/consensus/src/transactions.rs index 91de67c..09f6884 100644 --- a/consensus/src/transactions.rs +++ b/consensus/src/transactions.rs @@ -393,6 +393,11 @@ async fn verify_transactions_decoy_info( where D: Database + Clone + Sync + Send + 'static, { + // Decoy info is not validated for V1 txs. + if hf == HardFork::V1 || txs.is_empty() { + return Ok(()); + } + batch_get_decoy_info(&txs, hf, database) .await? .try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, &hf)?)))?; diff --git a/consensus/src/transactions/free.rs b/consensus/src/transactions/free.rs index 02c5235..67b675a 100644 --- a/consensus/src/transactions/free.rs +++ b/consensus/src/transactions/free.rs @@ -78,7 +78,8 @@ pub fn tx_fee(tx: &Transaction) -> Result { } for output in &prefix.outputs { - fee.checked_sub(output.amount.unwrap_or(0)) + fee = fee + .checked_sub(output.amount.unwrap_or(0)) .ok_or(TransactionError::OutputsTooHigh)?; } } From 2291a9679593825ca7ee815a35619f17918bb9de Mon Sep 17 00:00:00 2001 From: Asurar Date: Sat, 14 Sep 2024 15:01:43 +0200 Subject: [PATCH 055/104] P2P: Add latest clearnet mainnet seed nodes. (#281) Add Monerod latest clearnet mainnet seed nodes --- p2p/p2p-core/src/network_zones/clear.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/p2p/p2p-core/src/network_zones/clear.rs b/p2p/p2p-core/src/network_zones/clear.rs index 192e363..acde368 100644 --- a/p2p/p2p-core/src/network_zones/clear.rs +++ b/p2p/p2p-core/src/network_zones/clear.rs @@ -54,8 +54,13 @@ impl NetworkZone for ClearNet { const NAME: &'static str = "ClearNet"; const SEEDS: &'static [Self::Addr] = &[ - ip_v4(37, 187, 74, 171, 18080), + ip_v4(176, 9, 0, 187, 18080), + ip_v4(88, 198, 163, 90, 18080), + ip_v4(66, 85, 74, 134, 18080), + ip_v4(51, 79, 173, 165, 18080), ip_v4(192, 99, 8, 110, 18080), + ip_v4(37, 187, 74, 171, 18080), + ip_v4(77, 172, 183, 193, 18080), ]; const ALLOW_SYNC: bool = true; From 6502729d8ca81e5566e4704457245ba055115fbc Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 18 Sep 2024 16:31:08 -0400 Subject: [PATCH 056/104] lints: replace `allow` with `expect` (#285) * cargo.toml: add `allow_attributes` lint * fix lints * fixes * fmt * fix docs * fix docs * fix expect msg --- Cargo.toml | 1 + helper/src/atomic.rs | 23 +++++++++++++--------- helper/src/map.rs | 2 +- helper/src/num.rs | 2 +- helper/src/thread.rs | 14 ++++++++----- helper/src/time.rs | 4 ++-- rpc/interface/src/route/bin.rs | 2 -- rpc/interface/src/router_builder.rs | 1 - rpc/interface/src/rpc_handler_dummy.rs | 6 +++--- rpc/json-rpc/src/tests.rs | 2 +- rpc/types/src/bin.rs | 8 -------- rpc/types/src/free.rs | 8 ++++---- rpc/types/src/json.rs | 2 -- rpc/types/src/lib.rs | 4 ++++ rpc/types/src/macros.rs | 6 +++--- rpc/types/src/misc/distribution.rs | 5 +---- rpc/types/src/misc/mod.rs | 2 +- rpc/types/src/other.rs | 2 -- storage/blockchain/src/ops/block.rs | 17 ++++++++-------- storage/blockchain/src/ops/blockchain.rs | 4 ++-- storage/blockchain/src/service/read.rs | 13 +++++++----- storage/blockchain/src/service/tests.rs | 16 +++++++++++---- storage/blockchain/src/unsafe_sendable.rs | 4 ++-- storage/database/src/backend/heed/env.rs | 2 +- storage/database/src/backend/heed/error.rs | 5 ++++- storage/database/src/backend/tests.rs | 4 ++-- storage/database/src/config/mod.rs | 2 +- storage/database/src/database.rs | 2 +- storage/database/src/env.rs | 2 +- storage/database/src/resize.rs | 2 +- storage/service/src/reader_threads.rs | 2 +- storage/txpool/src/service/interface.rs | 2 +- storage/txpool/src/service/read.rs | 2 +- storage/txpool/src/types.rs | 4 ++-- types/src/hard_fork.rs | 1 - 35 files changed, 94 insertions(+), 84 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2d71893..f991f73 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -264,6 +264,7 @@ empty_enum_variants_with_brackets = "deny" empty_drop = "deny" clone_on_ref_ptr = "deny" upper_case_acronyms = "deny" +allow_attributes = "deny" # Hot # inline_always = "deny" diff --git a/helper/src/atomic.rs b/helper/src/atomic.rs index 4795896..aa66c0c 100644 --- a/helper/src/atomic.rs +++ b/helper/src/atomic.rs @@ -5,9 +5,6 @@ //---------------------------------------------------------------------------------------------------- Use use crossbeam::atomic::AtomicCell; -#[allow(unused_imports)] // docs -use std::sync::atomic::{Ordering, Ordering::Acquire, Ordering::Release}; - //---------------------------------------------------------------------------------------------------- Atomic Float /// Compile-time assertion that our floats are /// lock-free for the target we're building for. @@ -31,9 +28,13 @@ const _: () = { /// This is an alias for /// [`crossbeam::atomic::AtomicCell`](https://docs.rs/crossbeam/latest/crossbeam/atomic/struct.AtomicCell.html). /// -/// Note that there are no [`Ordering`] parameters, -/// atomic loads use [`Acquire`], -/// and atomic stores use [`Release`]. +/// Note that there are no [Ordering] parameters, +/// atomic loads use [Acquire], +/// and atomic stores use [Release]. +/// +/// [Ordering]: std::sync::atomic::Ordering +/// [Acquire]: std::sync::atomic::Ordering::Acquire +/// [Release]: std::sync::atomic::Ordering::Release pub type AtomicF32 = AtomicCell; /// An atomic [`f64`]. @@ -41,9 +42,13 @@ pub type AtomicF32 = AtomicCell; /// This is an alias for /// [`crossbeam::atomic::AtomicCell`](https://docs.rs/crossbeam/latest/crossbeam/atomic/struct.AtomicCell.html). /// -/// Note that there are no [`Ordering`] parameters, -/// atomic loads use [`Acquire`], -/// and atomic stores use [`Release`]. +/// Note that there are no [Ordering] parameters, +/// atomic loads use [Acquire], +/// and atomic stores use [Release]. +/// +/// [Ordering]: std::sync::atomic::Ordering +/// [Acquire]: std::sync::atomic::Ordering::Acquire +/// [Release]: std::sync::atomic::Ordering::Release pub type AtomicF64 = AtomicCell; //---------------------------------------------------------------------------------------------------- TESTS diff --git a/helper/src/map.rs b/helper/src/map.rs index 7805ea6..8cf0978 100644 --- a/helper/src/map.rs +++ b/helper/src/map.rs @@ -29,7 +29,7 @@ use crate::cast::{u64_to_usize, usize_to_u64}; /// ``` #[inline] pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) { - #[allow(clippy::cast_possible_truncation)] + #[expect(clippy::cast_possible_truncation)] (value as u64, (value >> 64) as u64) } diff --git a/helper/src/num.rs b/helper/src/num.rs index 674ed35..399c38d 100644 --- a/helper/src/num.rs +++ b/helper/src/num.rs @@ -91,7 +91,7 @@ where /// /// # Invariant /// If not sorted the output will be invalid. -#[allow(clippy::debug_assert_with_mut_call)] +#[expect(clippy::debug_assert_with_mut_call)] pub fn median(array: impl AsRef<[T]>) -> T where T: Add diff --git a/helper/src/thread.rs b/helper/src/thread.rs index 04a2606..8ba025d 100644 --- a/helper/src/thread.rs +++ b/helper/src/thread.rs @@ -6,7 +6,6 @@ use std::{cmp::max, num::NonZeroUsize}; //---------------------------------------------------------------------------------------------------- Thread Count & Percent -#[allow(non_snake_case)] /// Get the total amount of system threads. /// /// ```rust @@ -28,10 +27,15 @@ macro_rules! impl_thread_percent { $( $(#[$doc])* pub fn $fn_name() -> NonZeroUsize { - // unwrap here is okay because: - // - THREADS().get() is always non-zero - // - max() guards against 0 - #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss)] + // unwrap here is okay because: + // - THREADS().get() is always non-zero + // - max() guards against 0 + #[expect( + clippy::cast_possible_truncation, + clippy::cast_sign_loss, + clippy::cast_precision_loss, + reason = "we need to round integers" + )] NonZeroUsize::new(max(1, (threads().get() as f64 * $percent).floor() as usize)).unwrap() } )* diff --git a/helper/src/time.rs b/helper/src/time.rs index ce39c2d..c7b12c2 100644 --- a/helper/src/time.rs +++ b/helper/src/time.rs @@ -129,7 +129,7 @@ pub const fn secs_to_clock(seconds: u32) -> (u8, u8, u8) { debug_assert!(m < 60); debug_assert!(s < 60); - #[allow(clippy::cast_possible_truncation)] // checked above + #[expect(clippy::cast_possible_truncation, reason = "checked above")] (h as u8, m, s) } @@ -154,7 +154,7 @@ pub fn time() -> u32 { /// /// This is guaranteed to return a value between `0..=86399` pub fn time_utc() -> u32 { - #[allow(clippy::cast_sign_loss)] // checked in function calls + #[expect(clippy::cast_sign_loss, reason = "checked in function calls")] unix_clock(chrono::offset::Local::now().timestamp() as u64) } diff --git a/rpc/interface/src/route/bin.rs b/rpc/interface/src/route/bin.rs index 90d06c8..f7e3a01 100644 --- a/rpc/interface/src/route/bin.rs +++ b/rpc/interface/src/route/bin.rs @@ -28,7 +28,6 @@ macro_rules! generate_endpoints_with_input { ),*) => { paste::paste! { $( /// TODO - #[allow(unused_mut)] pub(crate) async fn $endpoint( State(handler): State, mut request: Bytes, @@ -55,7 +54,6 @@ macro_rules! generate_endpoints_with_no_input { ),*) => { paste::paste! { $( /// TODO - #[allow(unused_mut)] pub(crate) async fn $endpoint( State(handler): State, ) -> Result { diff --git a/rpc/interface/src/router_builder.rs b/rpc/interface/src/router_builder.rs index 2e80c43..d18a694 100644 --- a/rpc/interface/src/router_builder.rs +++ b/rpc/interface/src/router_builder.rs @@ -69,7 +69,6 @@ macro_rules! generate_router_builder { /// .all() /// .build(); /// ``` - #[allow(clippy::struct_excessive_bools)] #[derive(Clone)] pub struct RouterBuilder { router: Router, diff --git a/rpc/interface/src/rpc_handler_dummy.rs b/rpc/interface/src/rpc_handler_dummy.rs index 0b01835..9d5009e 100644 --- a/rpc/interface/src/rpc_handler_dummy.rs +++ b/rpc/interface/src/rpc_handler_dummy.rs @@ -57,7 +57,7 @@ impl Service for RpcHandlerDummy { use cuprate_rpc_types::json::JsonRpcRequest as Req; use cuprate_rpc_types::json::JsonRpcResponse as Resp; - #[allow(clippy::default_trait_access)] + #[expect(clippy::default_trait_access)] let resp = match req { Req::GetBlockCount(_) => Resp::GetBlockCount(Default::default()), Req::OnGetBlockHash(_) => Resp::OnGetBlockHash(Default::default()), @@ -112,7 +112,7 @@ impl Service for RpcHandlerDummy { use cuprate_rpc_types::bin::BinRequest as Req; use cuprate_rpc_types::bin::BinResponse as Resp; - #[allow(clippy::default_trait_access)] + #[expect(clippy::default_trait_access)] let resp = match req { Req::GetBlocks(_) => Resp::GetBlocks(Default::default()), Req::GetBlocksByHeight(_) => Resp::GetBlocksByHeight(Default::default()), @@ -142,7 +142,7 @@ impl Service for RpcHandlerDummy { use cuprate_rpc_types::other::OtherRequest as Req; use cuprate_rpc_types::other::OtherResponse as Resp; - #[allow(clippy::default_trait_access)] + #[expect(clippy::default_trait_access)] let resp = match req { Req::GetHeight(_) => Resp::GetHeight(Default::default()), Req::GetTransactions(_) => Resp::GetTransactions(Default::default()), diff --git a/rpc/json-rpc/src/tests.rs b/rpc/json-rpc/src/tests.rs index 3ee6088..99ce126 100644 --- a/rpc/json-rpc/src/tests.rs +++ b/rpc/json-rpc/src/tests.rs @@ -52,7 +52,7 @@ where } /// Tests an input JSON string matches an expected type `T`. -#[allow(clippy::needless_pass_by_value)] // serde signature +#[expect(clippy::needless_pass_by_value, reason = "serde signature")] fn assert_de(json: &'static str, expected: T) where T: DeserializeOwned + std::fmt::Debug + Clone + PartialEq, diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index 0dbddea..a68d3e1 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -138,7 +138,6 @@ define_request! { )] /// /// This response's variant depends upon [`PoolInfoExtent`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum GetBlocksResponse { @@ -157,7 +156,6 @@ impl Default for GetBlocksResponse { } /// Data within [`GetBlocksResponse::PoolInfoNone`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct GetBlocksResponsePoolInfoNone { @@ -183,7 +181,6 @@ epee_object! { } /// Data within [`GetBlocksResponse::PoolInfoIncremental`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct GetBlocksResponsePoolInfoIncremental { @@ -215,7 +212,6 @@ epee_object! { } /// Data within [`GetBlocksResponse::PoolInfoFull`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct GetBlocksResponsePoolInfoFull { @@ -248,7 +244,6 @@ epee_object! { /// [`EpeeObjectBuilder`] for [`GetBlocksResponse`]. /// /// Not for public usage. -#[allow(dead_code, missing_docs)] #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct __GetBlocksResponseEpeeBuilder { @@ -354,7 +349,6 @@ impl EpeeObjectBuilder for __GetBlocksResponseEpeeBuilder { } #[cfg(feature = "epee")] -#[allow(clippy::cognitive_complexity)] impl EpeeObject for GetBlocksResponse { type Builder = __GetBlocksResponseEpeeBuilder; @@ -397,7 +391,6 @@ impl EpeeObject for GetBlocksResponse { /// See also: [`BinResponse`]. #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(untagged))] -#[allow(missing_docs)] #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum BinRequest { GetBlocks(GetBlocksRequest), @@ -444,7 +437,6 @@ impl RpcCallValue for BinRequest { #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(untagged))] -#[allow(missing_docs)] pub enum BinResponse { GetBlocks(GetBlocksResponse), GetBlocksByHeight(GetBlocksByHeightResponse), diff --git a/rpc/types/src/free.rs b/rpc/types/src/free.rs index 45fb2f7..a41c853 100644 --- a/rpc/types/src/free.rs +++ b/rpc/types/src/free.rs @@ -5,16 +5,16 @@ /// Returns `true` if the input `u` is equal to `0`. #[inline] -#[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&` -#[allow(dead_code)] // TODO: see if needed after handlers. +#[expect(clippy::trivially_copy_pass_by_ref, reason = "serde signature")] +#[expect(dead_code, reason = "TODO: see if needed after handlers.")] pub(crate) const fn is_zero(u: &u64) -> bool { *u == 0 } /// Returns `true` the input `u` is equal to `1`. #[inline] -#[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&` -#[allow(dead_code)] // TODO: see if needed after handlers. +#[expect(clippy::trivially_copy_pass_by_ref, reason = "serde signature")] +#[expect(dead_code, reason = "TODO: see if needed after handlers.")] pub(crate) const fn is_one(u: &u64) -> bool { *u == 1 } diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index cfefcf9..fb6e44b 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -1581,7 +1581,6 @@ define_request_and_response! { feature = "serde", serde(rename_all = "snake_case", tag = "method", content = "params") )] -#[allow(missing_docs)] pub enum JsonRpcRequest { GetBlockCount(GetBlockCountRequest), OnGetBlockHash(OnGetBlockHashRequest), @@ -1714,7 +1713,6 @@ impl RpcCallValue for JsonRpcRequest { #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(untagged, rename_all = "snake_case"))] -#[allow(missing_docs)] pub enum JsonRpcResponse { GetBlockCount(GetBlockCountResponse), OnGetBlockHash(OnGetBlockHashResponse), diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index 51ea3cc..be1069e 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -1,5 +1,9 @@ #![doc = include_str!("../README.md")] #![cfg_attr(docsrs, feature(doc_cfg))] +#![allow( + clippy::allow_attributes, + reason = "macros (internal + serde) make this lint hard to satisfy" +)] mod constants; mod defaults; diff --git a/rpc/types/src/macros.rs b/rpc/types/src/macros.rs index 60ffa90..85f4272 100644 --- a/rpc/types/src/macros.rs +++ b/rpc/types/src/macros.rs @@ -94,6 +94,7 @@ macro_rules! define_request_and_response { } ) => { paste::paste! { $crate::macros::define_request! { + #[allow(dead_code, missing_docs, reason = "inside a macro")] #[doc = $crate::macros::define_request_and_response_doc!( "response" => [<$type_name Response>], $monero_daemon_rpc_doc_link, @@ -118,8 +119,7 @@ macro_rules! define_request_and_response { } $crate::macros::define_response! { - #[allow(dead_code)] - #[allow(missing_docs)] + #[allow(dead_code, missing_docs, reason = "inside a macro")] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[doc = $crate::macros::define_request_and_response_doc!( @@ -236,7 +236,7 @@ macro_rules! define_request { )* } ) => { - #[allow(dead_code, missing_docs)] + #[allow(dead_code, missing_docs, reason = "inside a macro")] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] $( #[$attr] )* diff --git a/rpc/types/src/misc/distribution.rs b/rpc/types/src/misc/distribution.rs index 55d509e..faac7ad 100644 --- a/rpc/types/src/misc/distribution.rs +++ b/rpc/types/src/misc/distribution.rs @@ -76,7 +76,6 @@ impl Default for Distribution { } /// Data within [`Distribution::Uncompressed`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct DistributionUncompressed { @@ -99,7 +98,6 @@ epee_object! { } /// Data within [`Distribution::CompressedBinary`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct DistributionCompressedBinary { @@ -132,7 +130,7 @@ epee_object! { /// 1. Compresses the distribution array /// 2. Serializes the compressed data #[cfg(feature = "serde")] -#[allow(clippy::ptr_arg)] +#[expect(clippy::ptr_arg)] fn serialize_distribution_as_compressed_data(v: &Vec, s: S) -> Result where S: serde::Serializer, @@ -162,7 +160,6 @@ where /// [`EpeeObjectBuilder`] for [`Distribution`]. /// /// Not for public usage. -#[allow(dead_code, missing_docs)] #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct __DistributionEpeeBuilder { diff --git a/rpc/types/src/misc/mod.rs b/rpc/types/src/misc/mod.rs index c5c1840..e09f847 100644 --- a/rpc/types/src/misc/mod.rs +++ b/rpc/types/src/misc/mod.rs @@ -15,7 +15,7 @@ mod binary_string; mod distribution; mod key_image_spent_status; -#[allow(clippy::module_inception)] +#[expect(clippy::module_inception)] mod misc; mod pool_info_extent; mod status; diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 28c95d2..5b04089 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -973,7 +973,6 @@ define_request_and_response! { #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(untagged))] -#[allow(missing_docs)] pub enum OtherRequest { GetHeight(GetHeightRequest), GetTransactions(GetTransactionsRequest), @@ -1092,7 +1091,6 @@ impl RpcCallValue for OtherRequest { #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(untagged))] -#[allow(missing_docs)] pub enum OtherResponse { GetHeight(GetHeightResponse), GetTransactions(GetTransactionsResponse), diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 91d6e57..5fc72fc 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -183,7 +183,10 @@ pub fn get_block_extended_header( /// Same as [`get_block_extended_header`] but with a [`BlockHeight`]. #[doc = doc_error!()] -#[allow(clippy::missing_panics_doc)] // The panic is only possible with a corrupt DB +#[expect( + clippy::missing_panics_doc, + reason = "The panic is only possible with a corrupt DB" +)] #[inline] pub fn get_block_extended_header_from_height( block_height: &BlockHeight, @@ -198,8 +201,10 @@ pub fn get_block_extended_header_from_height( block_info.cumulative_difficulty_high, ); - // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64` - #[allow(clippy::cast_possible_truncation)] + #[expect( + clippy::cast_possible_truncation, + reason = "INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`" + )] Ok(ExtendedBlockHeader { cumulative_difficulty, version: HardFork::from_version(block.header.hardfork_version) @@ -260,11 +265,7 @@ pub fn block_exists( //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] -#[allow( - clippy::significant_drop_tightening, - clippy::cognitive_complexity, - clippy::too_many_lines -)] +#[expect(clippy::significant_drop_tightening, clippy::too_many_lines)] mod test { use pretty_assertions::assert_eq; diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index ed368ad..acda96f 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -25,7 +25,7 @@ use crate::{ pub fn chain_height( table_block_heights: &impl DatabaseRo, ) -> Result { - #[allow(clippy::cast_possible_truncation)] // we enforce 64-bit + #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")] table_block_heights.len().map(|height| height as usize) } @@ -48,7 +48,7 @@ pub fn top_block_height( ) -> Result { match table_block_heights.len()? { 0 => Err(RuntimeError::KeyNotFound), - #[allow(clippy::cast_possible_truncation)] // we enforce 64-bit + #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")] height => Ok(height as usize - 1), } } diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index 207da41..87f416e 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -142,7 +142,6 @@ fn thread_local(env: &impl Env) -> ThreadLocal { macro_rules! get_tables { ($env_inner:ident, $tx_ro:ident, $tables:ident) => {{ $tables.get_or_try(|| { - #[allow(clippy::significant_drop_in_scrutinee)] match $env_inner.open_tables($tx_ro) { // SAFETY: see above macro doc comment. Ok(tables) => Ok(unsafe { crate::unsafe_sendable::UnsafeSendable::new(tables) }), @@ -339,8 +338,10 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec) -> Respon let tables = thread_local(env); // Cache the amount of RCT outputs once. - // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64` - #[allow(clippy::cast_possible_truncation)] + #[expect( + clippy::cast_possible_truncation, + reason = "INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`" + )] let num_rct_outputs = { let tx_ro = env_inner.tx_ro()?; let tables = env_inner.open_tables(&tx_ro)?; @@ -360,8 +361,10 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec) -> Respon } else { // v1 transactions. match tables.num_outputs().get(&amount) { - // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64` - #[allow(clippy::cast_possible_truncation)] + #[expect( + clippy::cast_possible_truncation, + reason = "INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`" + )] Ok(count) => Ok((amount, count as usize)), // If we get a request for an `amount` that doesn't exist, // we return `0` instead of an error. diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index b68b544..c314bb5 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -58,7 +58,10 @@ fn init_service() -> ( /// - Receive response(s) /// - Assert proper tables were mutated /// - Assert read requests lead to expected responses -#[allow(clippy::future_not_send)] // INVARIANT: tests are using a single threaded runtime +#[expect( + clippy::future_not_send, + reason = "INVARIANT: tests are using a single threaded runtime" +)] async fn test_template( // Which block(s) to add? blocks: &[&VerifiedBlockInformation], @@ -164,8 +167,10 @@ async fn test_template( num_req .iter() .map(|amount| match tables.num_outputs().get(amount) { - // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64` - #[allow(clippy::cast_possible_truncation)] + #[expect( + clippy::cast_possible_truncation, + reason = "INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`" + )] Ok(count) => (*amount, count as usize), Err(RuntimeError::KeyNotFound) => (*amount, 0), Err(e) => panic!("{e:?}"), @@ -304,7 +309,10 @@ async fn test_template( // Assert we get back the same map of // `Amount`'s and `AmountIndex`'s. let mut response_output_count = 0; - #[allow(clippy::iter_over_hash_type)] // order doesn't matter in this test + #[expect( + clippy::iter_over_hash_type, + reason = "order doesn't matter in this test" + )] for (amount, output_map) in response { let amount_index_set = &map[&amount]; diff --git a/storage/blockchain/src/unsafe_sendable.rs b/storage/blockchain/src/unsafe_sendable.rs index 9447293..76c7899 100644 --- a/storage/blockchain/src/unsafe_sendable.rs +++ b/storage/blockchain/src/unsafe_sendable.rs @@ -26,7 +26,7 @@ use bytemuck::TransparentWrapper; /// Notably, `heed`'s table type uses this inside `service`. pub(crate) struct UnsafeSendable(T); -#[allow(clippy::non_send_fields_in_send_ty)] +#[expect(clippy::non_send_fields_in_send_ty)] // SAFETY: Users ensure that their usage of this type is safe. unsafe impl Send for UnsafeSendable {} @@ -41,7 +41,7 @@ impl UnsafeSendable { } /// Extract the inner `T`. - #[allow(dead_code)] + #[expect(dead_code)] pub(crate) fn into_inner(self) -> T { self.0 } diff --git a/storage/database/src/backend/heed/env.rs b/storage/database/src/backend/heed/env.rs index 8c71e61..568379e 100644 --- a/storage/database/src/backend/heed/env.rs +++ b/storage/database/src/backend/heed/env.rs @@ -144,7 +144,7 @@ impl Env for ConcreteEnv { // (current disk size) + (a bit of leeway) // to account for empty databases where we // need to write same tables. - #[allow(clippy::cast_possible_truncation)] // only 64-bit targets + #[expect(clippy::cast_possible_truncation, reason = "only 64-bit targets")] let disk_size_bytes = match std::fs::File::open(&config.db_file) { Ok(file) => file.metadata()?.len() as usize, // The database file doesn't exist, 0 bytes. diff --git a/storage/database/src/backend/heed/error.rs b/storage/database/src/backend/heed/error.rs index bbaeaf0..fdeab70 100644 --- a/storage/database/src/backend/heed/error.rs +++ b/storage/database/src/backend/heed/error.rs @@ -57,7 +57,10 @@ impl From for crate::InitError { } //---------------------------------------------------------------------------------------------------- RuntimeError -#[allow(clippy::fallible_impl_from)] // We need to panic sometimes. +#[expect( + clippy::fallible_impl_from, + reason = "We need to panic sometimes for safety" +)] impl From for crate::RuntimeError { /// # Panics /// This will panic on unrecoverable errors for safety. diff --git a/storage/database/src/backend/tests.rs b/storage/database/src/backend/tests.rs index e219c42..0c0fe05 100644 --- a/storage/database/src/backend/tests.rs +++ b/storage/database/src/backend/tests.rs @@ -194,7 +194,7 @@ fn db_read_write() { // Insert keys. let mut key = KEY; - #[allow(clippy::explicit_counter_loop)] // we need the +1 side effect + #[expect(clippy::explicit_counter_loop, reason = "we need the +1 side effect")] for _ in 0..N { table.put(&key, &VALUE).unwrap(); key += 1; @@ -269,7 +269,7 @@ fn db_read_write() { assert_ne!(table.get(&KEY).unwrap(), NEW_VALUE); - #[allow(unused_assignments)] + #[expect(unused_assignments)] table .update(&KEY, |mut value| { value = NEW_VALUE; diff --git a/storage/database/src/config/mod.rs b/storage/database/src/config/mod.rs index c6ed0c0..7d65233 100644 --- a/storage/database/src/config/mod.rs +++ b/storage/database/src/config/mod.rs @@ -33,7 +33,7 @@ //! # Ok(()) } //! ``` -#[allow(clippy::module_inception)] +#[expect(clippy::module_inception)] mod config; pub use config::{Config, ConfigBuilder, READER_THREADS_DEFAULT}; diff --git a/storage/database/src/database.rs b/storage/database/src/database.rs index 4a45f7c..6fbb7aa 100644 --- a/storage/database/src/database.rs +++ b/storage/database/src/database.rs @@ -54,7 +54,7 @@ pub trait DatabaseIter { /// Get an [`Iterator`] that returns the `(key, value)` types for this database. #[doc = doc_iter!()] - #[allow(clippy::iter_not_returning_iterator)] + #[expect(clippy::iter_not_returning_iterator)] fn iter( &self, ) -> Result> + '_, RuntimeError>; diff --git a/storage/database/src/env.rs b/storage/database/src/env.rs index 8294443..1ae6aa1 100644 --- a/storage/database/src/env.rs +++ b/storage/database/src/env.rs @@ -122,7 +122,7 @@ pub trait Env: Sized { /// This function _must_ be re-implemented if [`Env::MANUAL_RESIZE`] is `true`. /// /// Otherwise, this function will panic with `unreachable!()`. - #[allow(unused_variables)] + #[expect(unused_variables)] fn resize_map(&self, resize_algorithm: Option) -> NonZeroUsize { unreachable!() } diff --git a/storage/database/src/resize.rs b/storage/database/src/resize.rs index 6ef9974..b217478 100644 --- a/storage/database/src/resize.rs +++ b/storage/database/src/resize.rs @@ -261,7 +261,7 @@ pub fn percent(current_size_bytes: usize, percent: f32) -> NonZeroUsize { let page_size = *PAGE_SIZE; // INVARIANT: Allow `f32` <-> `usize` casting, we handle all cases. - #[allow( + #[expect( clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss diff --git a/storage/service/src/reader_threads.rs b/storage/service/src/reader_threads.rs index 72f619a..a182e48 100644 --- a/storage/service/src/reader_threads.rs +++ b/storage/service/src/reader_threads.rs @@ -153,7 +153,7 @@ impl ReaderThreads { }, // We handle the casting loss. - #[allow( + #[expect( clippy::cast_precision_loss, clippy::cast_possible_truncation, clippy::cast_sign_loss diff --git a/storage/txpool/src/service/interface.rs b/storage/txpool/src/service/interface.rs index 93235c0..450b28d 100644 --- a/storage/txpool/src/service/interface.rs +++ b/storage/txpool/src/service/interface.rs @@ -18,7 +18,7 @@ pub enum TxpoolReadRequest { //---------------------------------------------------------------------------------------------------- TxpoolReadResponse /// The transaction pool [`tower::Service`] read response type. -#[allow(clippy::large_enum_variant)] +#[expect(clippy::large_enum_variant)] pub enum TxpoolReadResponse { /// A response containing the raw bytes of a transaction. // TODO: use bytes::Bytes. diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs index 5654164..f006813 100644 --- a/storage/txpool/src/service/read.rs +++ b/storage/txpool/src/service/read.rs @@ -50,7 +50,7 @@ fn init_read_service_with_pool(env: Arc, pool: Arc) -> /// 1. `Request` is mapped to a handler function /// 2. Handler function is called /// 3. [`TxpoolReadResponse`] is returned -#[allow(clippy::needless_pass_by_value)] +#[expect(clippy::needless_pass_by_value)] fn map_request( env: &ConcreteEnv, // Access to the database request: TxpoolReadRequest, // The request we must fulfill diff --git a/storage/txpool/src/types.rs b/storage/txpool/src/types.rs index 09b0ce0..4da2d0f 100644 --- a/storage/txpool/src/types.rs +++ b/storage/txpool/src/types.rs @@ -39,7 +39,7 @@ pub struct TransactionInfo { pub weight: usize, /// [`TxStateFlags`] of this transaction. pub flags: TxStateFlags, - #[allow(clippy::pub_underscore_fields)] + #[expect(clippy::pub_underscore_fields)] /// Explicit padding so that we have no implicit padding bytes in `repr(C)`. /// /// Allows potential future expansion of this type. @@ -92,7 +92,7 @@ impl From for CachedVerificationState { } } -#[allow(clippy::fallible_impl_from)] // only panics in invalid states +#[expect(clippy::fallible_impl_from, reason = "only panics in invalid states")] impl From for RawCachedVerificationState { fn from(value: CachedVerificationState) -> Self { match value { diff --git a/types/src/hard_fork.rs b/types/src/hard_fork.rs index 412448e..8b2cd78 100644 --- a/types/src/hard_fork.rs +++ b/types/src/hard_fork.rs @@ -27,7 +27,6 @@ pub enum HardForkError { } /// An identifier for every hard-fork Monero has had. -#[allow(missing_docs)] #[derive(Default, Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Hash)] #[cfg_attr(any(feature = "proptest"), derive(proptest_derive::Arbitrary))] #[repr(u8)] From 8b4b403c5cc691103e4ec2dbe3cf3608188b5fd0 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 18 Sep 2024 17:44:23 -0400 Subject: [PATCH 057/104] pruning: enable workspace lints (#284) pruning: enable/fix workspace lints --- pruning/Cargo.toml | 3 ++ pruning/src/lib.rs | 106 ++++++++++++++++++++++----------------------- 2 files changed, 56 insertions(+), 53 deletions(-) diff --git a/pruning/Cargo.toml b/pruning/Cargo.toml index 3f5bd27..497c04b 100644 --- a/pruning/Cargo.toml +++ b/pruning/Cargo.toml @@ -13,3 +13,6 @@ borsh = ["dep:borsh"] thiserror = { workspace = true } borsh = { workspace = true, features = ["derive", "std"], optional = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/pruning/src/lib.rs b/pruning/src/lib.rs index fdd159c..1f5ee2a 100644 --- a/pruning/src/lib.rs +++ b/pruning/src/lib.rs @@ -71,7 +71,7 @@ impl PruningSeed { /// /// See: [`DecompressedPruningSeed::new`] pub fn new_pruned(stripe: u32, log_stripes: u32) -> Result { - Ok(PruningSeed::Pruned(DecompressedPruningSeed::new( + Ok(Self::Pruned(DecompressedPruningSeed::new( stripe, log_stripes, )?)) @@ -81,9 +81,7 @@ impl PruningSeed { /// /// An error means the pruning seed was invalid. pub fn decompress(seed: u32) -> Result { - Ok(DecompressedPruningSeed::decompress(seed)? - .map(PruningSeed::Pruned) - .unwrap_or(PruningSeed::NotPruned)) + Ok(DecompressedPruningSeed::decompress(seed)?.map_or(Self::NotPruned, Self::Pruned)) } /// Decompresses the seed, performing the same checks as [`PruningSeed::decompress`] and some more according to @@ -103,34 +101,34 @@ impl PruningSeed { } /// Compresses this pruning seed to a u32. - pub fn compress(&self) -> u32 { + pub const fn compress(&self) -> u32 { match self { - PruningSeed::NotPruned => 0, - PruningSeed::Pruned(seed) => seed.compress(), + Self::NotPruned => 0, + Self::Pruned(seed) => seed.compress(), } } /// Returns the `log_stripes` for this seed, if this seed is pruned otherwise [`None`] is returned. - pub fn get_log_stripes(&self) -> Option { + pub const fn get_log_stripes(&self) -> Option { match self { - PruningSeed::NotPruned => None, - PruningSeed::Pruned(seed) => Some(seed.log_stripes), + Self::NotPruned => None, + Self::Pruned(seed) => Some(seed.log_stripes), } } /// Returns the `stripe` for this seed, if this seed is pruned otherwise [`None`] is returned. - pub fn get_stripe(&self) -> Option { + pub const fn get_stripe(&self) -> Option { match self { - PruningSeed::NotPruned => None, - PruningSeed::Pruned(seed) => Some(seed.stripe), + Self::NotPruned => None, + Self::Pruned(seed) => Some(seed.stripe), } } /// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block. - pub fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { + pub const fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { match self { - PruningSeed::NotPruned => true, - PruningSeed::Pruned(seed) => seed.has_full_block(height, blockchain_height), + Self::NotPruned => true, + Self::Pruned(seed) => seed.has_full_block(height, blockchain_height), } } @@ -155,10 +153,8 @@ impl PruningSeed { blockchain_height: usize, ) -> Result, PruningError> { Ok(match self { - PruningSeed::NotPruned => None, - PruningSeed::Pruned(seed) => { - seed.get_next_pruned_block(block_height, blockchain_height)? - } + Self::NotPruned => None, + Self::Pruned(seed) => seed.get_next_pruned_block(block_height, blockchain_height)?, }) } @@ -181,10 +177,8 @@ impl PruningSeed { blockchain_height: usize, ) -> Result { Ok(match self { - PruningSeed::NotPruned => block_height, - PruningSeed::Pruned(seed) => { - seed.get_next_unpruned_block(block_height, blockchain_height)? - } + Self::NotPruned => block_height, + Self::Pruned(seed) => seed.get_next_unpruned_block(block_height, blockchain_height)?, }) } } @@ -199,11 +193,11 @@ impl Ord for PruningSeed { fn cmp(&self, other: &Self) -> Ordering { match (self, other) { // Make sure pruning seeds storing more blocks are greater. - (PruningSeed::NotPruned, PruningSeed::NotPruned) => Ordering::Equal, - (PruningSeed::NotPruned, PruningSeed::Pruned(_)) => Ordering::Greater, - (PruningSeed::Pruned(_), PruningSeed::NotPruned) => Ordering::Less, + (Self::NotPruned, Self::NotPruned) => Ordering::Equal, + (Self::NotPruned, Self::Pruned(_)) => Ordering::Greater, + (Self::Pruned(_), Self::NotPruned) => Ordering::Less, - (PruningSeed::Pruned(seed1), PruningSeed::Pruned(seed2)) => seed1.cmp(seed2), + (Self::Pruned(seed1), Self::Pruned(seed2)) => seed1.cmp(seed2), } } } @@ -222,7 +216,7 @@ pub struct DecompressedPruningSeed { log_stripes: u32, /// The specific portion this peer keeps. /// - /// *MUST* be between 1..=2^log_stripes + /// *MUST* be between `1..=2^log_stripes` stripe: u32, } @@ -268,13 +262,13 @@ impl DecompressedPruningSeed { /// a valid seed you currently MUST pass in a number 1 to 8 for `stripe` /// and 3 for `log_stripes`.* /// - pub fn new(stripe: u32, log_stripes: u32) -> Result { + pub const fn new(stripe: u32, log_stripes: u32) -> Result { if log_stripes > PRUNING_SEED_LOG_STRIPES_MASK { Err(PruningError::LogStripesOutOfRange) } else if !(stripe > 0 && stripe <= (1 << log_stripes)) { Err(PruningError::StripeOutOfRange) } else { - Ok(DecompressedPruningSeed { + Ok(Self { log_stripes, stripe, }) @@ -286,7 +280,7 @@ impl DecompressedPruningSeed { /// Will return Ok(None) if the pruning seed means no pruning. /// /// An error means the pruning seed was invalid. - pub fn decompress(seed: u32) -> Result, PruningError> { + pub const fn decompress(seed: u32) -> Result, PruningError> { if seed == 0 { // No pruning. return Ok(None); @@ -299,20 +293,20 @@ impl DecompressedPruningSeed { return Err(PruningError::StripeOutOfRange); } - Ok(Some(DecompressedPruningSeed { + Ok(Some(Self { log_stripes, stripe, })) } /// Compresses the pruning seed into a u32. - pub fn compress(&self) -> u32 { + pub const fn compress(&self) -> u32 { (self.log_stripes << PRUNING_SEED_LOG_STRIPES_SHIFT) | ((self.stripe - 1) << PRUNING_SEED_STRIPE_SHIFT) } /// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block. - pub fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { + pub const fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { match get_block_pruning_stripe(height, blockchain_height, self.log_stripes) { Some(block_stripe) => self.stripe == block_stripe, None => true, @@ -419,7 +413,7 @@ impl DecompressedPruningSeed { // We can get the end of our "non-pruning" cycle by getting the next stripe's first un-pruned block height. // So we calculate the next un-pruned block for the next stripe and return it as our next pruned block let next_stripe = 1 + (self.stripe & ((1 << self.log_stripes) - 1)); - let seed = DecompressedPruningSeed::new(next_stripe, self.log_stripes) + let seed = Self::new(next_stripe, self.log_stripes) .expect("We just made sure this stripe is in range for this log_stripe"); let calculated_height = seed.get_next_unpruned_block(block_height, blockchain_height)?; @@ -433,7 +427,7 @@ impl DecompressedPruningSeed { } } -fn get_block_pruning_stripe( +const fn get_block_pruning_stripe( block_height: usize, blockchain_height: usize, log_stripe: u32, @@ -441,9 +435,14 @@ fn get_block_pruning_stripe( if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height { None } else { + #[expect( + clippy::cast_possible_truncation, + clippy::cast_sign_loss, + reason = "it's trivial to prove it's ok to us `as` here" + )] Some( (((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & ((1 << log_stripe) as usize - 1)) - + 1) as u32, // it's trivial to prove it's ok to us `as` here + + 1) as u32, ) } } @@ -483,16 +482,17 @@ mod tests { #[test] fn get_pruning_log_stripe() { let all_valid_seeds = make_all_pruning_seeds(); - for seed in all_valid_seeds.iter() { - assert_eq!(seed.get_log_stripes().unwrap(), 3) + for seed in &all_valid_seeds { + assert_eq!(seed.get_log_stripes().unwrap(), 3); } } #[test] fn get_pruning_stripe() { let all_valid_seeds = make_all_pruning_seeds(); + #[expect(clippy::cast_possible_truncation)] for (i, seed) in all_valid_seeds.iter().enumerate() { - assert_eq!(seed.get_stripe().unwrap(), i as u32 + 1) + assert_eq!(seed.get_stripe().unwrap(), i as u32 + 1); } } @@ -554,7 +554,7 @@ mod tests { assert_eq!( seed.get_next_unpruned_block(0, blockchain_height).unwrap(), i * 4096 - ) + ); } for (i, seed) in all_valid_seeds.iter().enumerate() { @@ -562,7 +562,7 @@ mod tests { seed.get_next_unpruned_block((i + 1) * 4096, blockchain_height) .unwrap(), i * 4096 + 32768 - ) + ); } for (i, seed) in all_valid_seeds.iter().enumerate() { @@ -570,15 +570,15 @@ mod tests { seed.get_next_unpruned_block((i + 8) * 4096, blockchain_height) .unwrap(), i * 4096 + 32768 - ) + ); } - for seed in all_valid_seeds.iter() { + for seed in &all_valid_seeds { assert_eq!( seed.get_next_unpruned_block(76437863 - 1, blockchain_height) .unwrap(), 76437863 - 1 - ) + ); } let zero_seed = PruningSeed::NotPruned; @@ -591,7 +591,7 @@ mod tests { let seed = PruningSeed::decompress(384).unwrap(); // the next unpruned block is the first tip block - assert_eq!(seed.get_next_unpruned_block(5000, 11000).unwrap(), 5500) + assert_eq!(seed.get_next_unpruned_block(5000, 11000).unwrap(), 5500); } #[test] @@ -605,7 +605,7 @@ mod tests { .unwrap() .unwrap(), 0 - ) + ); } for (i, seed) in all_valid_seeds.iter().enumerate() { @@ -614,7 +614,7 @@ mod tests { .unwrap() .unwrap(), (i + 1) * 4096 - ) + ); } for (i, seed) in all_valid_seeds.iter().enumerate() { @@ -623,15 +623,15 @@ mod tests { .unwrap() .unwrap(), (i + 9) * 4096 - ) + ); } - for seed in all_valid_seeds.iter() { + for seed in &all_valid_seeds { assert_eq!( seed.get_next_pruned_block(76437863 - 1, blockchain_height) .unwrap(), None - ) + ); } let zero_seed = PruningSeed::NotPruned; @@ -644,6 +644,6 @@ mod tests { let seed = PruningSeed::decompress(384).unwrap(); // there is no next pruned block - assert_eq!(seed.get_next_pruned_block(5000, 10000).unwrap(), None) + assert_eq!(seed.get_next_pruned_block(5000, 10000).unwrap(), None); } } From b9842fcb1860eec68506b7956a3f5c18637b9606 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 18 Sep 2024 18:12:35 -0400 Subject: [PATCH 058/104] fixed-bytes: enable workspace lints (#293) --- net/fixed-bytes/Cargo.toml | 3 +++ net/fixed-bytes/src/lib.rs | 50 ++++++++++++++++++++------------------ 2 files changed, 29 insertions(+), 24 deletions(-) diff --git a/net/fixed-bytes/Cargo.toml b/net/fixed-bytes/Cargo.toml index 4c5a1af..7844570 100644 --- a/net/fixed-bytes/Cargo.toml +++ b/net/fixed-bytes/Cargo.toml @@ -17,3 +17,6 @@ serde = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] serde_json = { workspace = true, features = ["std"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/net/fixed-bytes/src/lib.rs b/net/fixed-bytes/src/lib.rs index 2e8f1bc..b1b064b 100644 --- a/net/fixed-bytes/src/lib.rs +++ b/net/fixed-bytes/src/lib.rs @@ -22,17 +22,15 @@ pub enum FixedByteError { } impl FixedByteError { - fn field_name(&self) -> &'static str { + const fn field_name(&self) -> &'static str { match self { - FixedByteError::InvalidLength => "input", + Self::InvalidLength => "input", } } - fn field_data(&self) -> &'static str { + const fn field_data(&self) -> &'static str { match self { - FixedByteError::InvalidLength => { - "Cannot create fix byte array, input has invalid length." - } + Self::InvalidLength => "Cannot create fix byte array, input has invalid length.", } } } @@ -82,7 +80,7 @@ impl ByteArray { impl From<[u8; N]> for ByteArray { fn from(value: [u8; N]) -> Self { - ByteArray(Bytes::copy_from_slice(&value)) + Self(Bytes::copy_from_slice(&value)) } } @@ -101,7 +99,7 @@ impl TryFrom for ByteArray { if value.len() != N { return Err(FixedByteError::InvalidLength); } - Ok(ByteArray(value)) + Ok(Self(value)) } } @@ -112,7 +110,7 @@ impl TryFrom> for ByteArray { if value.len() != N { return Err(FixedByteError::InvalidLength); } - Ok(ByteArray(Bytes::from(value))) + Ok(Self(Bytes::from(value))) } } @@ -142,11 +140,11 @@ impl<'de, const N: usize> Deserialize<'de> for ByteArrayVec { } impl ByteArrayVec { - pub fn len(&self) -> usize { + pub const fn len(&self) -> usize { self.0.len() / N } - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.len() == 0 } @@ -162,6 +160,7 @@ impl ByteArrayVec { /// /// # Panics /// Panics if at > len. + #[must_use] pub fn split_off(&mut self, at: usize) -> Self { Self(self.0.split_off(at * N)) } @@ -169,9 +168,9 @@ impl ByteArrayVec { impl From<&ByteArrayVec> for Vec<[u8; N]> { fn from(value: &ByteArrayVec) -> Self { - let mut out = Vec::with_capacity(value.len()); + let mut out = Self::with_capacity(value.len()); for i in 0..value.len() { - out.push(value[i]) + out.push(value[i]); } out @@ -181,11 +180,11 @@ impl From<&ByteArrayVec> for Vec<[u8; N]> { impl From> for ByteArrayVec { fn from(value: Vec<[u8; N]>) -> Self { let mut bytes = BytesMut::with_capacity(N * value.len()); - for i in value.into_iter() { - bytes.extend_from_slice(&i) + for i in value { + bytes.extend_from_slice(&i); } - ByteArrayVec(bytes.freeze()) + Self(bytes.freeze()) } } @@ -197,13 +196,13 @@ impl TryFrom for ByteArrayVec { return Err(FixedByteError::InvalidLength); } - Ok(ByteArrayVec(value)) + Ok(Self(value)) } } impl From<[u8; N]> for ByteArrayVec { fn from(value: [u8; N]) -> Self { - ByteArrayVec(Bytes::copy_from_slice(value.as_slice())) + Self(Bytes::copy_from_slice(value.as_slice())) } } @@ -211,11 +210,11 @@ impl From<[[u8; N]; LEN]> for ByteArrayVec fn from(value: [[u8; N]; LEN]) -> Self { let mut bytes = BytesMut::with_capacity(N * LEN); - for val in value.into_iter() { + for val in value { bytes.put_slice(val.as_slice()); } - ByteArrayVec(bytes.freeze()) + Self(bytes.freeze()) } } @@ -227,7 +226,7 @@ impl TryFrom> for ByteArrayVec { return Err(FixedByteError::InvalidLength); } - Ok(ByteArrayVec(Bytes::from(value))) + Ok(Self(Bytes::from(value))) } } @@ -235,9 +234,12 @@ impl Index for ByteArrayVec { type Output = [u8; N]; fn index(&self, index: usize) -> &Self::Output { - if (index + 1) * N > self.0.len() { - panic!("Index out of range, idx: {}, length: {}", index, self.len()); - } + assert!( + (index + 1) * N <= self.0.len(), + "Index out of range, idx: {}, length: {}", + index, + self.len() + ); self.0[index * N..(index + 1) * N] .as_ref() From 2afc0e8373d8cad459ebe5648fb24256769e83cd Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 18 Sep 2024 18:14:31 -0400 Subject: [PATCH 059/104] test-utils: enable workspace lints (#283) * test-utils: enable workspace lints + fix * `allow` -> `expect` * fixes --- Cargo.lock | 1 - test-utils/Cargo.toml | 6 +++-- test-utils/src/data/statics.rs | 4 ++-- test-utils/src/monerod.rs | 1 + test-utils/src/rpc/client.rs | 6 ++--- test-utils/src/rpc/data/macros.rs | 8 ------- test-utils/src/rpc/data/other.rs | 39 +++++++++++-------------------- test-utils/src/test_netzone.rs | 5 ++-- 8 files changed, 25 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0bb4612..3a435a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -840,7 +840,6 @@ version = "0.1.0" dependencies = [ "async-trait", "borsh", - "bytes", "cuprate-helper", "cuprate-p2p-core", "cuprate-types", diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index a96a9cf..b2fafd9 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -22,11 +22,13 @@ tokio = { workspace = true, features = ["full"] } tokio-util = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -bytes = { workspace = true, features = ["std"] } tempfile = { workspace = true } paste = { workspace = true } borsh = { workspace = true, features = ["derive"]} [dev-dependencies] hex = { workspace = true } -pretty_assertions = { workspace = true } \ No newline at end of file +pretty_assertions = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/test-utils/src/data/statics.rs b/test-utils/src/data/statics.rs index 8b98171..474e35c 100644 --- a/test-utils/src/data/statics.rs +++ b/test-utils/src/data/statics.rs @@ -148,8 +148,8 @@ pub fn tx_fee(tx: &Transaction) -> u64 { /// /// This requires some static block/tx input (from data) and some fields. /// This data can be accessed more easily via: -/// - A block explorer (https://xmrchain.net) -/// - Monero RPC (see cuprate_test_utils::rpc for this) +/// - A block explorer () +/// - Monero RPC (see `cuprate_test_utils::rpc` for this) /// /// See below for actual usage. macro_rules! verified_block_information { diff --git a/test-utils/src/monerod.rs b/test-utils/src/monerod.rs index 9ffa08d..abad4c9 100644 --- a/test-utils/src/monerod.rs +++ b/test-utils/src/monerod.rs @@ -178,6 +178,7 @@ impl Drop for SpawnedMoneroD { println!("------END-MONEROD-LOGS------"); } + #[expect(clippy::manual_assert, reason = "`if` is more clear")] if error && !panicking() { // `println` only outputs in a test when panicking so if there is an error while // dropping monerod but not an error in the test then we need to panic to make sure diff --git a/test-utils/src/rpc/client.rs b/test-utils/src/rpc/client.rs index fbe6fb9..25240ae 100644 --- a/test-utils/src/rpc/client.rs +++ b/test-utils/src/rpc/client.rs @@ -47,13 +47,13 @@ impl HttpRpcClient { } /// The address used for this [`HttpRpcClient`]. - #[allow(dead_code)] + #[allow(clippy::allow_attributes, dead_code, reason = "expect doesn't work")] const fn address(&self) -> &String { &self.address } /// Access to the inner RPC client for other usage. - #[allow(dead_code)] + #[expect(dead_code)] const fn rpc(&self) -> &SimpleRequestRpc { &self.rpc } @@ -197,7 +197,7 @@ mod tests { #[ignore] // FIXME: doesn't work in CI, we need a real unrestricted node #[tokio::test] async fn get() { - #[allow(clippy::too_many_arguments)] + #[expect(clippy::too_many_arguments)] async fn assert_eq( rpc: &HttpRpcClient, height: usize, diff --git a/test-utils/src/rpc/data/macros.rs b/test-utils/src/rpc/data/macros.rs index 632917a..63a214c 100644 --- a/test-utils/src/rpc/data/macros.rs +++ b/test-utils/src/rpc/data/macros.rs @@ -156,13 +156,5 @@ macro_rules! define_request_and_response_doc_test { "```\n", ) }; - - // No doc test. - ( - $name:ident, - $test:ident, - ) => { - "" - }; } pub(super) use define_request_and_response_doc_test; diff --git a/test-utils/src/rpc/data/other.rs b/test-utils/src/rpc/data/other.rs index 80a48ab..9af6d8b 100644 --- a/test-utils/src/rpc/data/other.rs +++ b/test-utils/src/rpc/data/other.rs @@ -8,8 +8,7 @@ define_request_and_response! { // `(other)` adds a JSON sanity-check test. get_height (other), GET_HEIGHT: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "hash": "68bb1a1cff8e2a44c3221e8e1aff80bc6ca45d06fa8eff4d2a3a7ac31d4efe3f", @@ -53,8 +52,7 @@ r#"{ define_request_and_response! { get_alt_blocks_hashes (other), GET_ALT_BLOCKS_HASHES: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "blks_hashes": ["8ee10db35b1baf943f201b303890a29e7d45437bd76c2bd4df0d2f2ee34be109"], @@ -134,8 +132,7 @@ r#"{ define_request_and_response! { stop_mining (other), STOP_MINING: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "status": "OK", @@ -146,8 +143,7 @@ r#"{ define_request_and_response! { mining_status (other), MINING_STATUS: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "active": false, @@ -173,8 +169,7 @@ r#"{ define_request_and_response! { save_bc (other), SAVE_BC: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "status": "OK", @@ -185,8 +180,7 @@ r#"{ define_request_and_response! { get_peer_list (other), GET_PEER_LIST: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "gray_list": [{ @@ -291,8 +285,7 @@ r#"{ define_request_and_response! { get_transaction_pool (other), GET_TRANSACTION_POOL: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "credits": 0, @@ -598,8 +591,7 @@ r#"{ define_request_and_response! { get_transaction_pool_stats (other), GET_TRANSACTION_POOL_STATS: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "credits": 0, @@ -657,8 +649,7 @@ r#"{ define_request_and_response! { stop_daemon (other), STOP_DAEMON: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "status": "OK" @@ -668,8 +659,7 @@ r#"{ define_request_and_response! { get_limit (other), GET_LIMIT: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "limit_down": 1280000, @@ -713,8 +703,7 @@ r#"{ define_request_and_response! { get_net_stats (other), GET_NET_STATS: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "start_time": 1721251858, @@ -801,8 +790,7 @@ r#"{ define_request_and_response! { UNDOCUMENTED_ENDPOINT (other), GET_TRANSACTION_POOL_HASHES: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "credits": 0, @@ -835,8 +823,7 @@ r#"{ define_request_and_response! { UNDOCUMENTED_ENDPOINT (other), GET_PUBLIC_NODES: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "status": "OK", diff --git a/test-utils/src/test_netzone.rs b/test-utils/src/test_netzone.rs index f1f7582..791533c 100644 --- a/test-utils/src/test_netzone.rs +++ b/test-utils/src/test_netzone.rs @@ -86,9 +86,8 @@ impl, MoneroWireCodec>; type Listener = Pin< Box< - dyn Stream< - Item = Result<(Option, Self::Stream, Self::Sink), std::io::Error>, - > + Send + dyn Stream, Self::Stream, Self::Sink), Error>> + + Send + 'static, >, >; From a1267619ef02e4df51b87664cfcbd8ed5768228e Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 18 Sep 2024 18:18:31 -0400 Subject: [PATCH 060/104] p2p/address-book: enable workspace lints (#286) * address-book: enable workspace lints * fix * fixes --- Cargo.lock | 1 - p2p/address-book/Cargo.toml | 4 ++- p2p/address-book/src/book.rs | 26 +++++++---------- p2p/address-book/src/book/tests.rs | 4 +-- p2p/address-book/src/peer_list.rs | 39 ++++++++++++------------- p2p/address-book/src/peer_list/tests.rs | 24 +++++++-------- p2p/address-book/src/store.rs | 9 ++++-- 7 files changed, 52 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a435a3..80e70d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -504,7 +504,6 @@ dependencies = [ "cuprate-p2p-core", "cuprate-pruning", "cuprate-test-utils", - "cuprate-wire", "futures", "indexmap", "rand", diff --git a/p2p/address-book/Cargo.toml b/p2p/address-book/Cargo.toml index 9cff78a..0871163 100644 --- a/p2p/address-book/Cargo.toml +++ b/p2p/address-book/Cargo.toml @@ -8,7 +8,6 @@ authors = ["Boog900"] [dependencies] cuprate-pruning = { path = "../../pruning" } -cuprate-wire = { path= "../../net/wire" } cuprate-p2p-core = { path = "../p2p-core" } tower = { workspace = true, features = ["util"] } @@ -29,3 +28,6 @@ borsh = { workspace = true, features = ["derive", "std"]} cuprate-test-utils = {path = "../../test-utils"} tokio = { workspace = true, features = ["rt-multi-thread", "macros"]} + +[lints] +workspace = true \ No newline at end of file diff --git a/p2p/address-book/src/book.rs b/p2p/address-book/src/book.rs index 2f0ce6d..9c22981 100644 --- a/p2p/address-book/src/book.rs +++ b/p2p/address-book/src/book.rs @@ -36,7 +36,7 @@ use crate::{ mod tests; /// An entry in the connected list. -pub struct ConnectionPeerEntry { +pub(crate) struct ConnectionPeerEntry { addr: Option, id: u64, handle: ConnectionHandle, @@ -109,14 +109,14 @@ impl AddressBook { match handle.poll_unpin(cx) { Poll::Pending => return, Poll::Ready(Ok(Err(e))) => { - tracing::error!("Could not save peer list to disk, got error: {}", e) + tracing::error!("Could not save peer list to disk, got error: {e}"); } Poll::Ready(Err(e)) => { if e.is_panic() { panic::resume_unwind(e.into_panic()) } } - _ => (), + Poll::Ready(_) => (), } } // the task is finished. @@ -144,6 +144,7 @@ impl AddressBook { let mut internal_addr_disconnected = Vec::new(); let mut addrs_to_ban = Vec::new(); + #[expect(clippy::iter_over_hash_type, reason = "ordering doesn't matter here")] for (internal_addr, peer) in &mut self.connected_peers { if let Some(time) = peer.handle.check_should_ban() { match internal_addr { @@ -158,7 +159,7 @@ impl AddressBook { } } - for (addr, time) in addrs_to_ban.into_iter() { + for (addr, time) in addrs_to_ban { self.ban_peer(addr, time); } @@ -172,12 +173,7 @@ impl AddressBook { .remove(&addr); // If the amount of peers with this ban id is 0 remove the whole set. - if self - .connected_peers_ban_id - .get(&addr.ban_id()) - .unwrap() - .is_empty() - { + if self.connected_peers_ban_id[&addr.ban_id()].is_empty() { self.connected_peers_ban_id.remove(&addr.ban_id()); } // remove the peer from the anchor list. @@ -188,7 +184,7 @@ impl AddressBook { fn ban_peer(&mut self, addr: Z::Addr, time: Duration) { if self.banned_peers.contains_key(&addr.ban_id()) { - tracing::error!("Tried to ban peer twice, this shouldn't happen.") + tracing::error!("Tried to ban peer twice, this shouldn't happen."); } if let Some(connected_peers_with_ban_id) = self.connected_peers_ban_id.get(&addr.ban_id()) { @@ -242,10 +238,10 @@ impl AddressBook { peer_list.retain_mut(|peer| { peer.adr.make_canonical(); - if !peer.adr.should_add_to_peer_list() { - false - } else { + if peer.adr.should_add_to_peer_list() { !self.is_peer_banned(&peer.adr) + } else { + false } // TODO: check rpc/ p2p ports not the same }); @@ -391,7 +387,7 @@ impl Service> for AddressBook { rpc_credits_per_hash, }, ) - .map(|_| AddressBookResponse::Ok), + .map(|()| AddressBookResponse::Ok), AddressBookRequest::IncomingPeerList(peer_list) => { self.handle_incoming_peer_list(peer_list); Ok(AddressBookResponse::Ok) diff --git a/p2p/address-book/src/book/tests.rs b/p2p/address-book/src/book/tests.rs index 1abea04..aefbd84 100644 --- a/p2p/address-book/src/book/tests.rs +++ b/p2p/address-book/src/book/tests.rs @@ -109,7 +109,7 @@ async fn add_new_peer_already_connected() { }, ), Err(AddressBookError::PeerAlreadyConnected) - ) + ); } #[tokio::test] @@ -143,5 +143,5 @@ async fn banned_peer_removed_from_peer_lists() { .unwrap() .into_inner(), TestNetZoneAddr(1) - ) + ); } diff --git a/p2p/address-book/src/peer_list.rs b/p2p/address-book/src/peer_list.rs index f0a905a..9b98a8a 100644 --- a/p2p/address-book/src/peer_list.rs +++ b/p2p/address-book/src/peer_list.rs @@ -7,31 +7,31 @@ use cuprate_p2p_core::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress, use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; #[cfg(test)] -pub mod tests; +pub(crate) mod tests; /// A Peer list in the address book. /// /// This could either be the white list or gray list. #[derive(Debug)] -pub struct PeerList { +pub(crate) struct PeerList { /// The peers with their peer data. pub peers: IndexMap>, /// An index of Pruning seed to address, so can quickly grab peers with the blocks /// we want. /// - /// Pruning seeds are sorted by first their log_stripes and then their stripe. + /// Pruning seeds are sorted by first their `log_stripes` and then their stripe. /// This means the first peers in this list will store more blocks than peers /// later on. So when we need a peer with a certain block we look at the peers /// storing more blocks first then work our way to the peers storing less. /// pruning_seeds: BTreeMap>, - /// A hashmap linking ban_ids to addresses. + /// A hashmap linking `ban_ids` to addresses. ban_ids: HashMap<::BanID, Vec>, } impl PeerList { /// Creates a new peer list. - pub fn new(list: Vec>) -> PeerList { + pub(crate) fn new(list: Vec>) -> Self { let mut peers = IndexMap::with_capacity(list.len()); let mut pruning_seeds = BTreeMap::new(); let mut ban_ids = HashMap::with_capacity(list.len()); @@ -49,7 +49,7 @@ impl PeerList { peers.insert(peer.adr, peer); } - PeerList { + Self { peers, pruning_seeds, ban_ids, @@ -57,21 +57,20 @@ impl PeerList { } /// Gets the length of the peer list - pub fn len(&self) -> usize { + pub(crate) fn len(&self) -> usize { self.peers.len() } /// Adds a new peer to the peer list - pub fn add_new_peer(&mut self, peer: ZoneSpecificPeerListEntryBase) { + pub(crate) fn add_new_peer(&mut self, peer: ZoneSpecificPeerListEntryBase) { if self.peers.insert(peer.adr, peer).is_none() { - // It's more clear with this - #[allow(clippy::unwrap_or_default)] + #[expect(clippy::unwrap_or_default, reason = "It's more clear with this")] self.pruning_seeds .entry(peer.pruning_seed) .or_insert_with(Vec::new) .push(peer.adr); - #[allow(clippy::unwrap_or_default)] + #[expect(clippy::unwrap_or_default)] self.ban_ids .entry(peer.adr.ban_id()) .or_insert_with(Vec::new) @@ -85,7 +84,7 @@ impl PeerList { /// list. /// /// The given peer will be removed from the peer list. - pub fn take_random_peer( + pub(crate) fn take_random_peer( &mut self, r: &mut R, block_needed: Option, @@ -127,7 +126,7 @@ impl PeerList { None } - pub fn get_random_peers( + pub(crate) fn get_random_peers( &self, r: &mut R, len: usize, @@ -142,7 +141,7 @@ impl PeerList { } /// Returns a mutable reference to a peer. - pub fn get_peer_mut( + pub(crate) fn get_peer_mut( &mut self, peer: &Z::Addr, ) -> Option<&mut ZoneSpecificPeerListEntryBase> { @@ -150,7 +149,7 @@ impl PeerList { } /// Returns true if the list contains this peer. - pub fn contains_peer(&self, peer: &Z::Addr) -> bool { + pub(crate) fn contains_peer(&self, peer: &Z::Addr) -> bool { self.peers.contains_key(peer) } @@ -189,11 +188,11 @@ impl PeerList { /// MUST NOT BE USED ALONE fn remove_peer_from_all_idxs(&mut self, peer: &ZoneSpecificPeerListEntryBase) { self.remove_peer_pruning_idx(peer); - self.remove_peer_ban_idx(peer) + self.remove_peer_ban_idx(peer); } /// Removes a peer from the peer list - pub fn remove_peer( + pub(crate) fn remove_peer( &mut self, peer: &Z::Addr, ) -> Option> { @@ -203,7 +202,7 @@ impl PeerList { } /// Removes all peers with a specific ban id. - pub fn remove_peers_with_ban_id(&mut self, ban_id: &::BanID) { + pub(crate) fn remove_peers_with_ban_id(&mut self, ban_id: &::BanID) { let Some(addresses) = self.ban_ids.get(ban_id) else { // No peers to ban return; @@ -217,8 +216,8 @@ impl PeerList { /// Tries to reduce the peer list to `new_len`. /// /// This function could keep the list bigger than `new_len` if `must_keep_peers`s length - /// is larger than new_len, in that case we will remove as much as we can. - pub fn reduce_list(&mut self, must_keep_peers: &HashSet, new_len: usize) { + /// is larger than `new_len`, in that case we will remove as much as we can. + pub(crate) fn reduce_list(&mut self, must_keep_peers: &HashSet, new_len: usize) { if new_len >= self.len() { return; } diff --git a/p2p/address-book/src/peer_list/tests.rs b/p2p/address-book/src/peer_list/tests.rs index 8d2d220..4b13ae7 100644 --- a/p2p/address-book/src/peer_list/tests.rs +++ b/p2p/address-book/src/peer_list/tests.rs @@ -14,7 +14,7 @@ fn make_fake_peer( ) -> ZoneSpecificPeerListEntryBase { ZoneSpecificPeerListEntryBase { adr: TestNetZoneAddr(id), - id: id as u64, + id: u64::from(id), last_seen: 0, pruning_seed: PruningSeed::decompress(pruning_seed.unwrap_or(0)).unwrap(), rpc_port: 0, @@ -22,14 +22,14 @@ fn make_fake_peer( } } -pub fn make_fake_peer_list( +pub(crate) fn make_fake_peer_list( start_idx: u32, numb_o_peers: u32, ) -> PeerList> { let mut peer_list = Vec::with_capacity(numb_o_peers as usize); for idx in start_idx..(start_idx + numb_o_peers) { - peer_list.push(make_fake_peer(idx, None)) + peer_list.push(make_fake_peer(idx, None)); } PeerList::new(peer_list) @@ -50,7 +50,7 @@ fn make_fake_peer_list_with_random_pruning_seeds( } else { r.gen_range(384..=391) }), - )) + )); } PeerList::new(peer_list) } @@ -70,7 +70,7 @@ fn peer_list_reduce_length() { #[test] fn peer_list_reduce_length_with_peers_we_need() { let mut peer_list = make_fake_peer_list(0, 500); - let must_keep_peers = HashSet::from_iter(peer_list.peers.keys().copied()); + let must_keep_peers = peer_list.peers.keys().copied().collect::>(); let target_len = 49; @@ -92,7 +92,7 @@ fn peer_list_remove_specific_peer() { let peers = peer_list.peers; for (_, addrs) in pruning_idxs { - addrs.iter().for_each(|adr| assert_ne!(adr, &peer.adr)) + addrs.iter().for_each(|adr| assert_ne!(adr, &peer.adr)); } assert!(!peers.contains_key(&peer.adr)); @@ -104,13 +104,13 @@ fn peer_list_pruning_idxs_are_correct() { let mut total_len = 0; for (seed, list) in peer_list.pruning_seeds { - for peer in list.iter() { + for peer in &list { assert_eq!(peer_list.peers.get(peer).unwrap().pruning_seed, seed); total_len += 1; } } - assert_eq!(total_len, peer_list.peers.len()) + assert_eq!(total_len, peer_list.peers.len()); } #[test] @@ -122,11 +122,7 @@ fn peer_list_add_new_peer() { assert_eq!(peer_list.len(), 11); assert_eq!(peer_list.peers.get(&new_peer.adr), Some(&new_peer)); - assert!(peer_list - .pruning_seeds - .get(&new_peer.pruning_seed) - .unwrap() - .contains(&new_peer.adr)); + assert!(peer_list.pruning_seeds[&new_peer.pruning_seed].contains(&new_peer.adr)); } #[test] @@ -164,7 +160,7 @@ fn peer_list_get_peer_with_block() { assert!(peer .pruning_seed .get_next_unpruned_block(1, 1_000_000) - .is_ok()) + .is_ok()); } #[test] diff --git a/p2p/address-book/src/store.rs b/p2p/address-book/src/store.rs index abc42d6..07c117e 100644 --- a/p2p/address-book/src/store.rs +++ b/p2p/address-book/src/store.rs @@ -1,3 +1,8 @@ +#![expect( + single_use_lifetimes, + reason = "false positive on generated derive code on `SerPeerDataV1`" +)] + use std::fs; use borsh::{from_slice, to_vec, BorshDeserialize, BorshSerialize}; @@ -21,7 +26,7 @@ struct DeserPeerDataV1 { gray_list: Vec>, } -pub fn save_peers_to_disk( +pub(crate) fn save_peers_to_disk( cfg: &AddressBookConfig, white_list: &PeerList, gray_list: &PeerList, @@ -38,7 +43,7 @@ pub fn save_peers_to_disk( spawn_blocking(move || fs::write(&file, &data)) } -pub async fn read_peers_from_disk( +pub(crate) async fn read_peers_from_disk( cfg: &AddressBookConfig, ) -> Result< ( From e3a918bca57922d00857523ee06d6581016948f3 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 18 Sep 2024 18:19:32 -0400 Subject: [PATCH 061/104] wire: enable workspace lints (#291) * wire: enable workspace lints * revert match arm formatting --- net/wire/Cargo.toml | 4 +- net/wire/src/network_address.rs | 16 +- net/wire/src/network_address/epee_builder.rs | 4 +- net/wire/src/p2p.rs | 219 +++++++++---------- net/wire/src/p2p/admin.rs | 8 +- net/wire/src/p2p/common.rs | 16 +- net/wire/src/p2p/protocol.rs | 6 +- 7 files changed, 138 insertions(+), 135 deletions(-) diff --git a/net/wire/Cargo.toml b/net/wire/Cargo.toml index cbeb551..0b77cf1 100644 --- a/net/wire/Cargo.toml +++ b/net/wire/Cargo.toml @@ -15,7 +15,7 @@ cuprate-levin = { path = "../levin" } cuprate-epee-encoding = { path = "../epee-encoding" } cuprate-fixed-bytes = { path = "../fixed-bytes" } cuprate-types = { path = "../../types", default-features = false, features = ["epee"] } -cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } +cuprate-helper = { path = "../../helper", default-features = false, features = ["map"] } bitflags = { workspace = true, features = ["std"] } bytes = { workspace = true, features = ["std"] } @@ -24,3 +24,5 @@ thiserror = { workspace = true } [dev-dependencies] hex = { workspace = true, features = ["std"]} +[lints] +workspace = true diff --git a/net/wire/src/network_address.rs b/net/wire/src/network_address.rs index 632739a..ad599b7 100644 --- a/net/wire/src/network_address.rs +++ b/net/wire/src/network_address.rs @@ -51,38 +51,38 @@ impl EpeeObject for NetworkAddress { } impl NetworkAddress { - pub fn get_zone(&self) -> NetZone { + pub const fn get_zone(&self) -> NetZone { match self { - NetworkAddress::Clear(_) => NetZone::Public, + Self::Clear(_) => NetZone::Public, } } - pub fn is_loopback(&self) -> bool { + pub const fn is_loopback(&self) -> bool { // TODO false } - pub fn is_local(&self) -> bool { + pub const fn is_local(&self) -> bool { // TODO false } - pub fn port(&self) -> u16 { + pub const fn port(&self) -> u16 { match self { - NetworkAddress::Clear(ip) => ip.port(), + Self::Clear(ip) => ip.port(), } } } impl From for NetworkAddress { fn from(value: net::SocketAddrV4) -> Self { - NetworkAddress::Clear(value.into()) + Self::Clear(value.into()) } } impl From for NetworkAddress { fn from(value: net::SocketAddrV6) -> Self { - NetworkAddress::Clear(value.into()) + Self::Clear(value.into()) } } diff --git a/net/wire/src/network_address/epee_builder.rs b/net/wire/src/network_address/epee_builder.rs index 36db824..c1d1742 100644 --- a/net/wire/src/network_address/epee_builder.rs +++ b/net/wire/src/network_address/epee_builder.rs @@ -74,7 +74,7 @@ impl From for TaggedNetworkAddress { fn from(value: NetworkAddress) -> Self { match value { NetworkAddress::Clear(addr) => match addr { - SocketAddr::V4(addr) => TaggedNetworkAddress { + SocketAddr::V4(addr) => Self { ty: Some(1), addr: Some(AllFieldsNetworkAddress { m_ip: Some(u32::from_be_bytes(addr.ip().octets())), @@ -82,7 +82,7 @@ impl From for TaggedNetworkAddress { addr: None, }), }, - SocketAddr::V6(addr) => TaggedNetworkAddress { + SocketAddr::V6(addr) => Self { ty: Some(2), addr: Some(AllFieldsNetworkAddress { addr: Some(addr.ip().octets()), diff --git a/net/wire/src/p2p.rs b/net/wire/src/p2p.rs index 3829d17..a7cd784 100644 --- a/net/wire/src/p2p.rs +++ b/net/wire/src/p2p.rs @@ -55,27 +55,27 @@ pub enum LevinCommand { impl std::fmt::Display for LevinCommand { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - if let LevinCommand::Unknown(id) = self { - return f.write_str(&format!("unknown id: {}", id)); + if let Self::Unknown(id) = self { + return f.write_str(&format!("unknown id: {id}")); } f.write_str(match self { - LevinCommand::Handshake => "handshake", - LevinCommand::TimedSync => "timed sync", - LevinCommand::Ping => "ping", - LevinCommand::SupportFlags => "support flags", + Self::Handshake => "handshake", + Self::TimedSync => "timed sync", + Self::Ping => "ping", + Self::SupportFlags => "support flags", - LevinCommand::NewBlock => "new block", - LevinCommand::NewTransactions => "new transactions", - LevinCommand::GetObjectsRequest => "get objects request", - LevinCommand::GetObjectsResponse => "get objects response", - LevinCommand::ChainRequest => "chain request", - LevinCommand::ChainResponse => "chain response", - LevinCommand::NewFluffyBlock => "new fluffy block", - LevinCommand::FluffyMissingTxsRequest => "fluffy missing transaction request", - LevinCommand::GetTxPoolCompliment => "get transaction pool compliment", + Self::NewBlock => "new block", + Self::NewTransactions => "new transactions", + Self::GetObjectsRequest => "get objects request", + Self::GetObjectsResponse => "get objects response", + Self::ChainRequest => "chain request", + Self::ChainResponse => "chain response", + Self::NewFluffyBlock => "new fluffy block", + Self::FluffyMissingTxsRequest => "fluffy missing transaction request", + Self::GetTxPoolCompliment => "get transaction pool compliment", - LevinCommand::Unknown(_) => unreachable!(), + Self::Unknown(_) => unreachable!(), }) } } @@ -83,50 +83,51 @@ impl std::fmt::Display for LevinCommand { impl LevinCommandTrait for LevinCommand { fn bucket_size_limit(&self) -> u64 { // https://github.com/monero-project/monero/blob/00fd416a99686f0956361d1cd0337fe56e58d4a7/src/cryptonote_basic/connection_context.cpp#L37 + #[expect(clippy::match_same_arms, reason = "formatting is more clear")] match self { - LevinCommand::Handshake => 65536, - LevinCommand::TimedSync => 65536, - LevinCommand::Ping => 4096, - LevinCommand::SupportFlags => 4096, + Self::Handshake => 65536, + Self::TimedSync => 65536, + Self::Ping => 4096, + Self::SupportFlags => 4096, - LevinCommand::NewBlock => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) - LevinCommand::NewTransactions => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) - LevinCommand::GetObjectsRequest => 1024 * 1024 * 2, // 2 MB - LevinCommand::GetObjectsResponse => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) - LevinCommand::ChainRequest => 512 * 1024, // 512 kB - LevinCommand::ChainResponse => 1024 * 1024 * 4, // 4 MB - LevinCommand::NewFluffyBlock => 1024 * 1024 * 4, // 4 MB - LevinCommand::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB - LevinCommand::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB + Self::NewBlock => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) + Self::NewTransactions => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) + Self::GetObjectsRequest => 1024 * 1024 * 2, // 2 MB + Self::GetObjectsResponse => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) + Self::ChainRequest => 512 * 1024, // 512 kB + Self::ChainResponse => 1024 * 1024 * 4, // 4 MB + Self::NewFluffyBlock => 1024 * 1024 * 4, // 4 MB + Self::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB + Self::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB - LevinCommand::Unknown(_) => u64::MAX, + Self::Unknown(_) => u64::MAX, } } fn is_handshake(&self) -> bool { - matches!(self, LevinCommand::Handshake) + matches!(self, Self::Handshake) } } impl From for LevinCommand { fn from(value: u32) -> Self { match value { - 1001 => LevinCommand::Handshake, - 1002 => LevinCommand::TimedSync, - 1003 => LevinCommand::Ping, - 1007 => LevinCommand::SupportFlags, + 1001 => Self::Handshake, + 1002 => Self::TimedSync, + 1003 => Self::Ping, + 1007 => Self::SupportFlags, - 2001 => LevinCommand::NewBlock, - 2002 => LevinCommand::NewTransactions, - 2003 => LevinCommand::GetObjectsRequest, - 2004 => LevinCommand::GetObjectsResponse, - 2006 => LevinCommand::ChainRequest, - 2007 => LevinCommand::ChainResponse, - 2008 => LevinCommand::NewFluffyBlock, - 2009 => LevinCommand::FluffyMissingTxsRequest, - 2010 => LevinCommand::GetTxPoolCompliment, + 2001 => Self::NewBlock, + 2002 => Self::NewTransactions, + 2003 => Self::GetObjectsRequest, + 2004 => Self::GetObjectsResponse, + 2006 => Self::ChainRequest, + 2007 => Self::ChainResponse, + 2008 => Self::NewFluffyBlock, + 2009 => Self::FluffyMissingTxsRequest, + 2010 => Self::GetTxPoolCompliment, - x => LevinCommand::Unknown(x), + x => Self::Unknown(x), } } } @@ -191,19 +192,19 @@ pub enum ProtocolMessage { } impl ProtocolMessage { - pub fn command(&self) -> LevinCommand { + pub const fn command(&self) -> LevinCommand { use LevinCommand as C; match self { - ProtocolMessage::NewBlock(_) => C::NewBlock, - ProtocolMessage::NewFluffyBlock(_) => C::NewFluffyBlock, - ProtocolMessage::GetObjectsRequest(_) => C::GetObjectsRequest, - ProtocolMessage::GetObjectsResponse(_) => C::GetObjectsResponse, - ProtocolMessage::ChainRequest(_) => C::ChainRequest, - ProtocolMessage::ChainEntryResponse(_) => C::ChainResponse, - ProtocolMessage::NewTransactions(_) => C::NewTransactions, - ProtocolMessage::FluffyMissingTransactionsRequest(_) => C::FluffyMissingTxsRequest, - ProtocolMessage::GetTxPoolCompliment(_) => C::GetTxPoolCompliment, + Self::NewBlock(_) => C::NewBlock, + Self::NewFluffyBlock(_) => C::NewFluffyBlock, + Self::GetObjectsRequest(_) => C::GetObjectsRequest, + Self::GetObjectsResponse(_) => C::GetObjectsResponse, + Self::ChainRequest(_) => C::ChainRequest, + Self::ChainEntryResponse(_) => C::ChainResponse, + Self::NewTransactions(_) => C::NewTransactions, + Self::FluffyMissingTransactionsRequest(_) => C::FluffyMissingTxsRequest, + Self::GetTxPoolCompliment(_) => C::GetTxPoolCompliment, } } @@ -230,26 +231,26 @@ impl ProtocolMessage { use LevinCommand as C; match self { - ProtocolMessage::NewBlock(val) => build_message(C::NewBlock, val, builder)?, - ProtocolMessage::NewTransactions(val) => { - build_message(C::NewTransactions, val, builder)? + Self::NewBlock(val) => build_message(C::NewBlock, val, builder)?, + Self::NewTransactions(val) => { + build_message(C::NewTransactions, val, builder)?; } - ProtocolMessage::GetObjectsRequest(val) => { - build_message(C::GetObjectsRequest, val, builder)? + Self::GetObjectsRequest(val) => { + build_message(C::GetObjectsRequest, val, builder)?; } - ProtocolMessage::GetObjectsResponse(val) => { - build_message(C::GetObjectsResponse, val, builder)? + Self::GetObjectsResponse(val) => { + build_message(C::GetObjectsResponse, val, builder)?; } - ProtocolMessage::ChainRequest(val) => build_message(C::ChainRequest, val, builder)?, - ProtocolMessage::ChainEntryResponse(val) => { - build_message(C::ChainResponse, val, builder)? + Self::ChainRequest(val) => build_message(C::ChainRequest, val, builder)?, + Self::ChainEntryResponse(val) => { + build_message(C::ChainResponse, val, builder)?; } - ProtocolMessage::NewFluffyBlock(val) => build_message(C::NewFluffyBlock, val, builder)?, - ProtocolMessage::FluffyMissingTransactionsRequest(val) => { - build_message(C::FluffyMissingTxsRequest, val, builder)? + Self::NewFluffyBlock(val) => build_message(C::NewFluffyBlock, val, builder)?, + Self::FluffyMissingTransactionsRequest(val) => { + build_message(C::FluffyMissingTxsRequest, val, builder)?; } - ProtocolMessage::GetTxPoolCompliment(val) => { - build_message(C::GetTxPoolCompliment, val, builder)? + Self::GetTxPoolCompliment(val) => { + build_message(C::GetTxPoolCompliment, val, builder)?; } } Ok(()) @@ -265,14 +266,14 @@ pub enum AdminRequestMessage { } impl AdminRequestMessage { - pub fn command(&self) -> LevinCommand { + pub const fn command(&self) -> LevinCommand { use LevinCommand as C; match self { - AdminRequestMessage::Handshake(_) => C::Handshake, - AdminRequestMessage::Ping => C::Ping, - AdminRequestMessage::SupportFlags => C::SupportFlags, - AdminRequestMessage::TimedSync(_) => C::TimedSync, + Self::Handshake(_) => C::Handshake, + Self::Ping => C::Ping, + Self::SupportFlags => C::SupportFlags, + Self::TimedSync(_) => C::TimedSync, } } @@ -286,13 +287,13 @@ impl AdminRequestMessage { cuprate_epee_encoding::from_bytes::(buf) .map_err(|e| BucketError::BodyDecodingError(e.into()))?; - AdminRequestMessage::Ping + Self::Ping } C::SupportFlags => { cuprate_epee_encoding::from_bytes::(buf) .map_err(|e| BucketError::BodyDecodingError(e.into()))?; - AdminRequestMessage::SupportFlags + Self::SupportFlags } _ => return Err(BucketError::UnknownCommand), }) @@ -302,11 +303,11 @@ impl AdminRequestMessage { use LevinCommand as C; match self { - AdminRequestMessage::Handshake(val) => build_message(C::Handshake, val, builder)?, - AdminRequestMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?, - AdminRequestMessage::Ping => build_message(C::Ping, EmptyMessage, builder)?, - AdminRequestMessage::SupportFlags => { - build_message(C::SupportFlags, EmptyMessage, builder)? + Self::Handshake(val) => build_message(C::Handshake, val, builder)?, + Self::TimedSync(val) => build_message(C::TimedSync, val, builder)?, + Self::Ping => build_message(C::Ping, EmptyMessage, builder)?, + Self::SupportFlags => { + build_message(C::SupportFlags, EmptyMessage, builder)?; } } Ok(()) @@ -322,14 +323,14 @@ pub enum AdminResponseMessage { } impl AdminResponseMessage { - pub fn command(&self) -> LevinCommand { + pub const fn command(&self) -> LevinCommand { use LevinCommand as C; match self { - AdminResponseMessage::Handshake(_) => C::Handshake, - AdminResponseMessage::Ping(_) => C::Ping, - AdminResponseMessage::SupportFlags(_) => C::SupportFlags, - AdminResponseMessage::TimedSync(_) => C::TimedSync, + Self::Handshake(_) => C::Handshake, + Self::Ping(_) => C::Ping, + Self::SupportFlags(_) => C::SupportFlags, + Self::TimedSync(_) => C::TimedSync, } } @@ -349,11 +350,11 @@ impl AdminResponseMessage { use LevinCommand as C; match self { - AdminResponseMessage::Handshake(val) => build_message(C::Handshake, val, builder)?, - AdminResponseMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?, - AdminResponseMessage::Ping(val) => build_message(C::Ping, val, builder)?, - AdminResponseMessage::SupportFlags(val) => { - build_message(C::SupportFlags, val, builder)? + Self::Handshake(val) => build_message(C::Handshake, val, builder)?, + Self::TimedSync(val) => build_message(C::TimedSync, val, builder)?, + Self::Ping(val) => build_message(C::Ping, val, builder)?, + Self::SupportFlags(val) => { + build_message(C::SupportFlags, val, builder)?; } } Ok(()) @@ -368,23 +369,23 @@ pub enum Message { } impl Message { - pub fn is_request(&self) -> bool { - matches!(self, Message::Request(_)) + pub const fn is_request(&self) -> bool { + matches!(self, Self::Request(_)) } - pub fn is_response(&self) -> bool { - matches!(self, Message::Response(_)) + pub const fn is_response(&self) -> bool { + matches!(self, Self::Response(_)) } - pub fn is_protocol(&self) -> bool { - matches!(self, Message::Protocol(_)) + pub const fn is_protocol(&self) -> bool { + matches!(self, Self::Protocol(_)) } - pub fn command(&self) -> LevinCommand { + pub const fn command(&self) -> LevinCommand { match self { - Message::Request(mes) => mes.command(), - Message::Response(mes) => mes.command(), - Message::Protocol(mes) => mes.command(), + Self::Request(mes) => mes.command(), + Self::Response(mes) => mes.command(), + Self::Protocol(mes) => mes.command(), } } } @@ -398,27 +399,25 @@ impl LevinBody for Message { command: LevinCommand, ) -> Result { Ok(match typ { - MessageType::Request => Message::Request(AdminRequestMessage::decode(body, command)?), - MessageType::Response => { - Message::Response(AdminResponseMessage::decode(body, command)?) - } - MessageType::Notification => Message::Protocol(ProtocolMessage::decode(body, command)?), + MessageType::Request => Self::Request(AdminRequestMessage::decode(body, command)?), + MessageType::Response => Self::Response(AdminResponseMessage::decode(body, command)?), + MessageType::Notification => Self::Protocol(ProtocolMessage::decode(body, command)?), }) } fn encode(self, builder: &mut BucketBuilder) -> Result<(), BucketError> { match self { - Message::Protocol(pro) => { + Self::Protocol(pro) => { builder.set_message_type(MessageType::Notification); builder.set_return_code(0); pro.build(builder) } - Message::Request(req) => { + Self::Request(req) => { builder.set_message_type(MessageType::Request); builder.set_return_code(0); req.build(builder) } - Message::Response(res) => { + Self::Response(res) => { builder.set_message_type(MessageType::Response); builder.set_return_code(1); res.build(builder) diff --git a/net/wire/src/p2p/admin.rs b/net/wire/src/p2p/admin.rs index 173c293..67a8e21 100644 --- a/net/wire/src/p2p/admin.rs +++ b/net/wire/src/p2p/admin.rs @@ -45,7 +45,7 @@ pub struct HandshakeResponse { pub node_data: BasicNodeData, /// Core Sync Data pub payload_data: CoreSyncData, - /// PeerList + /// `PeerList` pub local_peerlist_new: Vec, } @@ -56,7 +56,7 @@ epee_object!( local_peerlist_new: Vec, ); -/// A TimedSync Request +/// A `TimedSync` Request #[derive(Debug, Clone, PartialEq, Eq)] pub struct TimedSyncRequest { /// Core Sync Data @@ -68,12 +68,12 @@ epee_object!( payload_data: CoreSyncData, ); -/// A TimedSync Response +/// A `TimedSync` Response #[derive(Debug, Clone, PartialEq, Eq)] pub struct TimedSyncResponse { /// Core Sync Data pub payload_data: CoreSyncData, - /// PeerList + /// `PeerList` pub local_peerlist_new: Vec, } diff --git a/net/wire/src/p2p/common.rs b/net/wire/src/p2p/common.rs index d585d07..d95a620 100644 --- a/net/wire/src/p2p/common.rs +++ b/net/wire/src/p2p/common.rs @@ -18,6 +18,7 @@ use bitflags::bitflags; use cuprate_epee_encoding::epee_object; +use cuprate_helper::map::split_u128_into_low_high_bits; pub use cuprate_types::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs}; use crate::NetworkAddress; @@ -34,7 +35,7 @@ bitflags! { impl From for PeerSupportFlags { fn from(value: u32) -> Self { - PeerSupportFlags(value) + Self(value) } } @@ -113,16 +114,17 @@ epee_object! { } impl CoreSyncData { - pub fn new( + pub const fn new( cumulative_difficulty_128: u128, current_height: u64, pruning_seed: u32, top_id: [u8; 32], top_version: u8, - ) -> CoreSyncData { - let cumulative_difficulty = cumulative_difficulty_128 as u64; - let cumulative_difficulty_top64 = (cumulative_difficulty_128 >> 64) as u64; - CoreSyncData { + ) -> Self { + let (cumulative_difficulty, cumulative_difficulty_top64) = + split_u128_into_low_high_bits(cumulative_difficulty_128); + + Self { cumulative_difficulty, cumulative_difficulty_top64, current_height, @@ -139,7 +141,7 @@ impl CoreSyncData { } } -/// PeerListEntryBase, information kept on a peer which will be entered +/// `PeerListEntryBase`, information kept on a peer which will be entered /// in a peer list/store. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct PeerListEntryBase { diff --git a/net/wire/src/p2p/protocol.rs b/net/wire/src/p2p/protocol.rs index 73694d5..1d1d45a 100644 --- a/net/wire/src/p2p/protocol.rs +++ b/net/wire/src/p2p/protocol.rs @@ -127,7 +127,7 @@ pub struct ChainResponse { impl ChainResponse { #[inline] - pub fn cumulative_difficulty(&self) -> u128 { + pub const fn cumulative_difficulty(&self) -> u128 { let cumulative_difficulty = self.cumulative_difficulty_top64 as u128; cumulative_difficulty << 64 | self.cumulative_difficulty_low64 as u128 } @@ -159,7 +159,7 @@ epee_object!( current_blockchain_height: u64, ); -/// A request for Txs we are missing from our TxPool +/// A request for Txs we are missing from our `TxPool` #[derive(Debug, Clone, PartialEq, Eq)] pub struct FluffyMissingTransactionsRequest { /// The Block we are missing the Txs in @@ -177,7 +177,7 @@ epee_object!( missing_tx_indices: Vec as ContainerAsBlob, ); -/// TxPoolCompliment +/// `TxPoolCompliment` #[derive(Debug, Clone, PartialEq, Eq)] pub struct GetTxPoolCompliment { /// Tx Hashes From 4169c45c5850662c942bf7be838e40ecbcc0d59c Mon Sep 17 00:00:00 2001 From: Boog900 Date: Thu, 19 Sep 2024 16:55:28 +0100 Subject: [PATCH 062/104] Blockchain: add alt-block handling (#260) * add new tables & types * add function to fully add an alt block * resolve current todo!s * add new requests * WIP: starting re-orgs * add last service request * commit Cargo.lock * add test * more docs + cleanup + alt blocks request * clippy + fmt * document types * move tx_fee to helper * more doc updates * fmt * fix imports * fix fee * Apply suggestions from code review Co-authored-by: hinto-janai * remove default features from `cuprate-helper` * review fixes * fix find_block * add a test and fix some issues in chain history * fix clippy * fmt * Apply suggestions from code review Co-authored-by: hinto-janai * add dev dep * cargo update * move `flush_alt_blocks` * review fixes * more review fixes * fix clippy * remove INVARIANT comments --------- Co-authored-by: hinto-janai --- Cargo.lock | 840 +++++++----------- helper/Cargo.toml | 8 +- helper/src/lib.rs | 2 + helper/src/tx.rs | 70 ++ storage/blockchain/Cargo.toml | 3 +- storage/blockchain/src/ops/alt_block/block.rs | 337 +++++++ storage/blockchain/src/ops/alt_block/chain.rs | 117 +++ storage/blockchain/src/ops/alt_block/mod.rs | 58 ++ storage/blockchain/src/ops/alt_block/tx.rs | 76 ++ storage/blockchain/src/ops/block.rs | 104 ++- storage/blockchain/src/ops/macros.rs | 22 + storage/blockchain/src/ops/mod.rs | 3 +- storage/blockchain/src/service/free.rs | 45 +- storage/blockchain/src/service/mod.rs | 2 +- storage/blockchain/src/service/read.rs | 121 ++- storage/blockchain/src/service/tests.rs | 96 +- storage/blockchain/src/service/write.rs | 163 +++- storage/blockchain/src/tables.rs | 39 +- storage/blockchain/src/tests.rs | 19 + storage/blockchain/src/types.rs | 261 +++++- test-utils/Cargo.toml | 2 +- test-utils/src/data/mod.rs | 6 +- test-utils/src/data/statics.rs | 40 +- test-utils/src/rpc/client.rs | 13 +- types/src/blockchain.rs | 61 +- types/src/types.rs | 8 +- 26 files changed, 1864 insertions(+), 652 deletions(-) create mode 100644 helper/src/tx.rs create mode 100644 storage/blockchain/src/ops/alt_block/block.rs create mode 100644 storage/blockchain/src/ops/alt_block/chain.rs create mode 100644 storage/blockchain/src/ops/alt_block/mod.rs create mode 100644 storage/blockchain/src/ops/alt_block/tx.rs diff --git a/Cargo.lock b/Cargo.lock index 80e70d7..61fa2a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,19 +4,13 @@ version = 3 [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -52,9 +46,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anyhow" @@ -81,18 +75,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -164,17 +158,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.3", + "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -215,9 +209,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" dependencies = [ "bytemuck", "serde", @@ -273,7 +267,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", "syn_derive", ] @@ -285,22 +279,22 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.16.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" +checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -311,18 +305,21 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] [[package]] name = "cc" -version = "1.0.99" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +dependencies = [ + "shlex", +] [[package]] name = "cfg-if" @@ -345,14 +342,14 @@ dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] name = "clap" -version = "4.5.7" +version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f" +checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" dependencies = [ "clap_builder", "clap_derive", @@ -360,9 +357,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.7" +version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f" +checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" dependencies = [ "anstyle", "clap_lex", @@ -370,21 +367,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.5" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] name = "clap_lex" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "core-foundation" @@ -398,15 +395,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -528,7 +525,7 @@ dependencies = [ name = "cuprate-blockchain" version = "0.0.0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "bytemuck", "cuprate-database", "cuprate-database-service", @@ -542,6 +539,7 @@ dependencies = [ "monero-serai", "pretty_assertions", "proptest", + "rand", "rayon", "serde", "tempfile", @@ -700,6 +698,7 @@ version = "0.1.0" dependencies = [ "chrono", "crossbeam", + "curve25519-dalek", "dirs", "futures", "libc", @@ -723,7 +722,7 @@ dependencies = [ name = "cuprate-levin" version = "0.1.0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "bytes", "cuprate-helper", "futures", @@ -862,7 +861,7 @@ dependencies = [ name = "cuprate-txpool" version = "0.0.0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "bytemuck", "cuprate-database", "cuprate-database-service", @@ -899,7 +898,7 @@ dependencies = [ name = "cuprate-wire" version = "0.1.0" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "bytes", "cuprate-epee-encoding", "cuprate-fixed-bytes", @@ -916,7 +915,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bitflags 2.5.0", + "bitflags 2.6.0", "borsh", "bytemuck", "bytes", @@ -1002,7 +1001,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -1085,17 +1084,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "displaydoc" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - [[package]] name = "doxygen-rs" version = "0.4.2" @@ -1107,9 +1095,9 @@ dependencies = [ [[package]] name = "either" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "equivalent" @@ -1129,9 +1117,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "ff" @@ -1157,7 +1145,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -1238,7 +1226,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -1294,9 +1282,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "group" @@ -1345,11 +1333,11 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "heed" -version = "0.20.2" +version = "0.20.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f60d7cff16094be9627830b399c087a25017e93fb3768b87cd656a68ccb1ebe8" +checksum = "7d4f449bab7320c56003d37732a917e18798e2f1709d80263face2b4f9436ddb" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "byteorder", "heed-traits", "heed-types", @@ -1370,9 +1358,9 @@ checksum = "eb3130048d404c57ce5a1ac61a903696e8fcde7e8c2991e9fcfc1f27c3ef74ff" [[package]] name = "heed-types" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb0d6ba3700c9a57e83c013693e3eddb68a6d9b6781cacafc62a0d992e8ddb3" +checksum = "9d3f528b053a6d700b2734eabcd0fd49cb8230647aa72958467527b0b7917114" dependencies = [ "bincode", "byteorder", @@ -1412,9 +1400,9 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http", @@ -1435,9 +1423,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.3" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0e7a4dd27b9476dc40cb050d3632d3bba3a70ddbff012285f7f8559a1e7e545" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -1447,9 +1435,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.3.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", @@ -1468,9 +1456,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http", @@ -1486,9 +1474,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -1506,9 +1494,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -1527,141 +1515,21 @@ dependencies = [ "cc", ] -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" - -[[package]] -name = "icu_properties" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f8ac670d7422d7f76b32e17a5db556510825b29ec9154f235977c9caba61036" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" - -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - [[package]] name = "idna" -version = "1.0.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4716a3a0933a1d01c2f72450e89596eb51dd34ef3c211ccd875acdf1f8fe47ed" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ - "icu_normalizer", - "icu_properties", - "smallvec", - "utf8_iter", + "unicode-bidi", + "unicode-normalization", ] [[package]] name = "indexmap" -version = "2.2.6" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown", @@ -1675,9 +1543,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -1693,15 +1561,15 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libm" @@ -1715,7 +1583,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "libc", ] @@ -1725,17 +1593,11 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" -[[package]] -name = "litemap" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" - [[package]] name = "lmdb-master-sys" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5142795c220effa4c8f4813537bd4c88113a07e45e93100ccb2adc5cec6c7f3" +checksum = "472c3760e2a8d0f61f322fb36788021bb36d573c502b50fa3e2bcaac3ec326c9" dependencies = [ "cc", "doxygen-rs", @@ -1754,9 +1616,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "matchit" @@ -1776,9 +1638,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "merlin" @@ -1798,15 +1660,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" -[[package]] -name = "miniz_oxide" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -1818,13 +1671,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi", "libc", "wasi", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1996,21 +1850,11 @@ dependencies = [ "libm", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - [[package]] name = "object" -version = "0.36.0" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576dfe1fc8f9df304abb159d767a29d0476f7750fbf8aa7ad07816004a207434" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] @@ -2063,7 +1907,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -2108,7 +1952,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -2137,7 +1981,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -2154,15 +1998,18 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "pretty_assertions" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ "diff", "yansi", @@ -2170,9 +2017,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ "toml_edit", ] @@ -2202,22 +2049,22 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.85" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] [[package]] name = "proptest" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.5.0", + "bitflags 2.6.0", "lazy_static", "num-traits", "rand", @@ -2248,9 +2095,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -2342,27 +2189,27 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.1" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6dd20d3cdeb9c7d2366a0b16b93b35b75aec15309fbeb7ce477138c9f68c8c0" +checksum = "e4760ad04a88ef77075ba86ba9ea79b919e6bab29c1764c5747237cd6eaedcaa" dependencies = [ "libc", ] [[package]] name = "redox_syscall" -version = "0.5.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", @@ -2386,7 +2233,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -2418,20 +2265,20 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -2440,9 +2287,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "log", "once_cell", @@ -2455,9 +2302,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" dependencies = [ "openssl-probe", "rustls-pemfile", @@ -2468,9 +2315,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ "base64", "rustls-pki-types", @@ -2484,9 +2331,9 @@ checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring", "rustls-pki-types", @@ -2519,11 +2366,11 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2534,11 +2381,11 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "security-framework" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -2547,9 +2394,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" dependencies = [ "core-foundation-sys", "libc", @@ -2563,9 +2410,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -2581,22 +2428,23 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -2644,6 +2492,12 @@ dependencies = [ "keccak", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -2703,12 +2557,6 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "std-shims" version = "0.1.1" @@ -2720,9 +2568,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" @@ -2737,9 +2585,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.66" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", @@ -2755,7 +2603,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -2779,17 +2627,6 @@ dependencies = [ "crossbeam-queue", ] -[[package]] -name = "synstructure" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - [[package]] name = "tap" version = "1.0.1" @@ -2798,34 +2635,35 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.10.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -2839,43 +2677,47 @@ dependencies = [ ] [[package]] -name = "tinystr" -version = "0.7.6" +name = "tinyvec" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ - "displaydoc", - "zerovec", + "tinyvec_macros", ] [[package]] -name = "tokio" -version = "1.38.0" +name = "tinyvec_macros" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.40.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -2891,9 +2733,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -2916,9 +2758,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -2931,15 +2773,15 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" [[package]] name = "toml_edit" -version = "0.21.1" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap", "toml_datetime", @@ -2965,15 +2807,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -2995,7 +2837,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] @@ -3035,10 +2877,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] -name = "unicode-ident" -version = "1.0.12" +name = "unicode-bidi" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + +[[package]] +name = "unicode-ident" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" + +[[package]] +name = "unicode-normalization" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" +dependencies = [ + "tinyvec", +] [[package]] name = "untrusted" @@ -3066,32 +2923,20 @@ dependencies = [ [[package]] name = "url" -version = "2.5.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c25da092f0a868cdf09e8674cd3b7ef3a7d92a24253e663a2fb85e2496de56" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "wait-timeout" @@ -3119,34 +2964,35 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3154,22 +3000,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "webpki-roots" @@ -3204,12 +3050,12 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.57.0" +version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" dependencies = [ - "windows-core 0.57.0", - "windows-targets 0.52.5", + "windows-core 0.58.0", + "windows-targets 0.52.6", ] [[package]] @@ -3218,50 +3064,61 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] name = "windows-core" -version = "0.57.0" +version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" dependencies = [ "windows-implement", "windows-interface", "windows-result", - "windows-targets 0.52.5", + "windows-strings", + "windows-targets 0.52.6", ] [[package]] name = "windows-implement" -version = "0.57.0" +version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] name = "windows-interface" -version = "0.57.0" +version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", + "syn 2.0.77", ] [[package]] name = "windows-result" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -3279,7 +3136,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -3299,18 +3165,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -3321,9 +3187,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -3333,9 +3199,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -3345,15 +3211,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -3363,9 +3229,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -3375,9 +3241,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -3387,9 +3253,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -3399,31 +3265,19 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.5.40" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ "memchr", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - [[package]] name = "wyz" version = "0.5.1" @@ -3435,73 +3289,29 @@ dependencies = [ [[package]] name = "yansi" -version = "0.5.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" - -[[package]] -name = "yoke" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", - "synstructure", -] +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", -] - -[[package]] -name = "zerofrom" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", - "synstructure", + "syn 2.0.77", ] [[package]] @@ -3521,27 +3331,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.66", -] - -[[package]] -name = "zerovec" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", + "syn 2.0.77", ] diff --git a/helper/Cargo.toml b/helper/Cargo.toml index c74e40f..baa3f23 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -9,8 +9,8 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus" [features] -# All features on by default. -default = ["std", "atomic", "asynch", "cast", "fs", "num", "map", "time", "thread", "constants"] +# All features off by default. +default = [] std = [] atomic = ["dep:crossbeam"] asynch = ["dep:futures", "dep:rayon"] @@ -21,6 +21,7 @@ num = [] map = ["cast", "dep:monero-serai"] time = ["dep:chrono", "std"] thread = ["std", "dep:target_os_lib"] +tx = ["dep:monero-serai"] [dependencies] crossbeam = { workspace = true, optional = true } @@ -39,7 +40,8 @@ target_os_lib = { package = "windows", version = ">=0.51", features = ["Win32_Sy target_os_lib = { package = "libc", version = "0.2.151", optional = true } [dev-dependencies] -tokio = { workspace = true, features = ["full"] } +tokio = { workspace = true, features = ["full"] } +curve25519-dalek = { workspace = true } [lints] workspace = true \ No newline at end of file diff --git a/helper/src/lib.rs b/helper/src/lib.rs index de0d955..f29c499 100644 --- a/helper/src/lib.rs +++ b/helper/src/lib.rs @@ -31,6 +31,8 @@ pub mod thread; #[cfg(feature = "time")] pub mod time; +#[cfg(feature = "tx")] +pub mod tx; //---------------------------------------------------------------------------------------------------- Private Usage //---------------------------------------------------------------------------------------------------- diff --git a/helper/src/tx.rs b/helper/src/tx.rs new file mode 100644 index 0000000..53706ec --- /dev/null +++ b/helper/src/tx.rs @@ -0,0 +1,70 @@ +//! Utils for working with [`Transaction`] + +use monero_serai::transaction::{Input, Transaction}; + +/// Calculates the fee of the [`Transaction`]. +/// +/// # Panics +/// This will panic if the inputs overflow or the transaction outputs too much, so should only +/// be used on known to be valid txs. +pub fn tx_fee(tx: &Transaction) -> u64 { + let mut fee = 0_u64; + + match &tx { + Transaction::V1 { prefix, .. } => { + for input in &prefix.inputs { + match input { + Input::Gen(_) => return 0, + Input::ToKey { amount, .. } => { + fee = fee.checked_add(amount.unwrap_or(0)).unwrap(); + } + } + } + + for output in &prefix.outputs { + fee = fee.checked_sub(output.amount.unwrap_or(0)).unwrap(); + } + } + Transaction::V2 { proofs, .. } => { + fee = proofs.as_ref().unwrap().base.fee; + } + }; + + fee +} + +#[cfg(test)] +mod test { + use curve25519_dalek::{edwards::CompressedEdwardsY, EdwardsPoint}; + use monero_serai::transaction::{NotPruned, Output, Timelock, TransactionPrefix}; + + use super::*; + + #[test] + #[should_panic(expected = "called `Option::unwrap()` on a `None` value")] + fn tx_fee_panic() { + let input = Input::ToKey { + amount: Some(u64::MAX), + key_offsets: vec![], + key_image: EdwardsPoint::default(), + }; + + let output = Output { + amount: Some(u64::MAX), + key: CompressedEdwardsY::default(), + view_tag: None, + }; + + let tx = Transaction::::V1 { + prefix: TransactionPrefix { + additional_timelock: Timelock::None, + inputs: vec![input; 2], + outputs: vec![output], + extra: vec![], + }, + signatures: vec![], + }; + + tx_fee(&tx); + } +} diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index e039903..46b8414 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -25,11 +25,12 @@ cuprate-database = { path = "../database" } cuprate-database-service = { path = "../service" } cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } cuprate-types = { path = "../../types", features = ["blockchain"] } +cuprate-pruning = { path = "../../pruning" } bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } curve25519-dalek = { workspace = true } -cuprate-pruning = { path = "../../pruning" } +rand = { workspace = true } monero-serai = { workspace = true, features = ["std"] } serde = { workspace = true, optional = true } diff --git a/storage/blockchain/src/ops/alt_block/block.rs b/storage/blockchain/src/ops/alt_block/block.rs new file mode 100644 index 0000000..6bd01cb --- /dev/null +++ b/storage/blockchain/src/ops/alt_block/block.rs @@ -0,0 +1,337 @@ +use bytemuck::TransparentWrapper; +use monero_serai::block::{Block, BlockHeader}; + +use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}; +use cuprate_types::{AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, HardFork}; + +use crate::{ + ops::{ + alt_block::{add_alt_transaction_blob, get_alt_transaction, update_alt_chain_info}, + block::get_block_info, + macros::doc_error, + }, + tables::{Tables, TablesMut}, + types::{AltBlockHeight, BlockHash, BlockHeight, CompactAltBlockInfo}, +}; + +/// Flush all alt-block data from all the alt-block tables. +/// +/// This function completely empties the alt block tables. +pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>( + env_inner: &E, + tx_rw: &mut E::Rw<'_>, +) -> Result<(), RuntimeError> { + use crate::tables::{ + AltBlockBlobs, AltBlockHeights, AltBlocksInfo, AltChainInfos, AltTransactionBlobs, + AltTransactionInfos, + }; + + env_inner.clear_db::(tx_rw)?; + env_inner.clear_db::(tx_rw)?; + env_inner.clear_db::(tx_rw)?; + env_inner.clear_db::(tx_rw)?; + env_inner.clear_db::(tx_rw)?; + env_inner.clear_db::(tx_rw) +} + +/// Add a [`AltBlockInformation`] to the database. +/// +/// This extracts all the data from the input block and +/// maps/adds them to the appropriate database tables. +/// +#[doc = doc_error!()] +/// +/// # Panics +/// This function will panic if: +/// - `alt_block.height` is == `0` +/// - `alt_block.txs.len()` != `alt_block.block.transactions.len()` +/// +pub fn add_alt_block( + alt_block: &AltBlockInformation, + tables: &mut impl TablesMut, +) -> Result<(), RuntimeError> { + let alt_block_height = AltBlockHeight { + chain_id: alt_block.chain_id.into(), + height: alt_block.height, + }; + + tables + .alt_block_heights_mut() + .put(&alt_block.block_hash, &alt_block_height)?; + + update_alt_chain_info(&alt_block_height, &alt_block.block.header.previous, tables)?; + + let (cumulative_difficulty_low, cumulative_difficulty_high) = + split_u128_into_low_high_bits(alt_block.cumulative_difficulty); + + let alt_block_info = CompactAltBlockInfo { + block_hash: alt_block.block_hash, + pow_hash: alt_block.pow_hash, + height: alt_block.height, + weight: alt_block.weight, + long_term_weight: alt_block.long_term_weight, + cumulative_difficulty_low, + cumulative_difficulty_high, + }; + + tables + .alt_blocks_info_mut() + .put(&alt_block_height, &alt_block_info)?; + + tables.alt_block_blobs_mut().put( + &alt_block_height, + StorableVec::wrap_ref(&alt_block.block_blob), + )?; + + assert_eq!(alt_block.txs.len(), alt_block.block.transactions.len()); + for tx in &alt_block.txs { + add_alt_transaction_blob(tx, tables)?; + } + + Ok(()) +} + +/// Retrieves an [`AltBlockInformation`] from the database. +/// +/// This function will look at only the blocks with the given [`AltBlockHeight::chain_id`], no others +/// even if they are technically part of this chain. +#[doc = doc_error!()] +pub fn get_alt_block( + alt_block_height: &AltBlockHeight, + tables: &impl Tables, +) -> Result { + let block_info = tables.alt_blocks_info().get(alt_block_height)?; + + let block_blob = tables.alt_block_blobs().get(alt_block_height)?.0; + + let block = Block::read(&mut block_blob.as_slice())?; + + let txs = block + .transactions + .iter() + .map(|tx_hash| get_alt_transaction(tx_hash, tables)) + .collect::>()?; + + Ok(AltBlockInformation { + block, + block_blob, + txs, + block_hash: block_info.block_hash, + pow_hash: block_info.pow_hash, + height: block_info.height, + weight: block_info.weight, + long_term_weight: block_info.long_term_weight, + cumulative_difficulty: combine_low_high_bits_to_u128( + block_info.cumulative_difficulty_low, + block_info.cumulative_difficulty_high, + ), + chain_id: alt_block_height.chain_id.into(), + }) +} + +/// Retrieves the hash of the block at the given `block_height` on the alt chain with +/// the given [`ChainId`]. +/// +/// This function will get blocks from the whole chain, for example if you were to ask for height +/// `0` with any [`ChainId`] (as long that chain actually exists) you will get the main chain genesis. +/// +#[doc = doc_error!()] +pub fn get_alt_block_hash( + block_height: &BlockHeight, + alt_chain: ChainId, + tables: &impl Tables, +) -> Result { + let alt_chains = tables.alt_chain_infos(); + + // First find what [`ChainId`] this block would be stored under. + let original_chain = { + let mut chain = alt_chain.into(); + loop { + let chain_info = alt_chains.get(&chain)?; + + if chain_info.common_ancestor_height < *block_height { + break Chain::Alt(chain.into()); + } + + match chain_info.parent_chain.into() { + Chain::Main => break Chain::Main, + Chain::Alt(alt_chain_id) => { + chain = alt_chain_id.into(); + continue; + } + } + } + }; + + // Get the block hash. + match original_chain { + Chain::Main => { + get_block_info(block_height, tables.block_infos()).map(|info| info.block_hash) + } + Chain::Alt(chain_id) => tables + .alt_blocks_info() + .get(&AltBlockHeight { + chain_id: chain_id.into(), + height: *block_height, + }) + .map(|info| info.block_hash), + } +} + +/// Retrieves the [`ExtendedBlockHeader`] of the alt-block with an exact [`AltBlockHeight`]. +/// +/// This function will look at only the blocks with the given [`AltBlockHeight::chain_id`], no others +/// even if they are technically part of this chain. +/// +#[doc = doc_error!()] +pub fn get_alt_block_extended_header_from_height( + height: &AltBlockHeight, + table: &impl Tables, +) -> Result { + let block_info = table.alt_blocks_info().get(height)?; + + let block_blob = table.alt_block_blobs().get(height)?.0; + + let block_header = BlockHeader::read(&mut block_blob.as_slice())?; + + Ok(ExtendedBlockHeader { + version: HardFork::from_version(block_header.hardfork_version) + .expect("Block in DB must have correct version"), + vote: block_header.hardfork_version, + timestamp: block_header.timestamp, + cumulative_difficulty: combine_low_high_bits_to_u128( + block_info.cumulative_difficulty_low, + block_info.cumulative_difficulty_high, + ), + block_weight: block_info.weight, + long_term_weight: block_info.long_term_weight, + }) +} + +#[cfg(test)] +mod tests { + use std::num::NonZero; + + use cuprate_database::{Env, EnvInner, TxRw}; + use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; + use cuprate_types::{Chain, ChainId}; + + use crate::{ + ops::{ + alt_block::{ + add_alt_block, flush_alt_blocks, get_alt_block, + get_alt_block_extended_header_from_height, get_alt_block_hash, + get_alt_chain_history_ranges, + }, + block::{add_block, pop_block}, + }, + tables::{OpenTables, Tables}, + tests::{assert_all_tables_are_empty, map_verified_block_to_alt, tmp_concrete_env}, + types::AltBlockHeight, + }; + + #[expect(clippy::range_plus_one)] + #[test] + fn all_alt_blocks() { + let (env, _tmp) = tmp_concrete_env(); + let env_inner = env.env_inner(); + assert_all_tables_are_empty(&env); + + let chain_id = ChainId(NonZero::new(1).unwrap()); + + // Add initial block. + { + let tx_rw = env_inner.tx_rw().unwrap(); + let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap(); + + let mut initial_block = BLOCK_V1_TX2.clone(); + initial_block.height = 0; + + add_block(&initial_block, &mut tables).unwrap(); + + drop(tables); + TxRw::commit(tx_rw).unwrap(); + } + + let alt_blocks = [ + map_verified_block_to_alt(BLOCK_V9_TX3.clone(), chain_id), + map_verified_block_to_alt(BLOCK_V16_TX0.clone(), chain_id), + ]; + + // Add alt-blocks + { + let tx_rw = env_inner.tx_rw().unwrap(); + let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap(); + + let mut prev_hash = BLOCK_V1_TX2.block_hash; + for (i, mut alt_block) in alt_blocks.into_iter().enumerate() { + let height = i + 1; + + alt_block.height = height; + alt_block.block.header.previous = prev_hash; + alt_block.block_blob = alt_block.block.serialize(); + + add_alt_block(&alt_block, &mut tables).unwrap(); + + let alt_height = AltBlockHeight { + chain_id: chain_id.into(), + height, + }; + + let alt_block_2 = get_alt_block(&alt_height, &tables).unwrap(); + assert_eq!(alt_block.block, alt_block_2.block); + + let headers = get_alt_chain_history_ranges( + 0..(height + 1), + chain_id, + tables.alt_chain_infos(), + ) + .unwrap(); + + assert_eq!(headers.len(), 2); + assert_eq!(headers[1], (Chain::Main, 0..1)); + assert_eq!(headers[0], (Chain::Alt(chain_id), 1..(height + 1))); + + prev_hash = alt_block.block_hash; + + let header = + get_alt_block_extended_header_from_height(&alt_height, &tables).unwrap(); + + assert_eq!(header.timestamp, alt_block.block.header.timestamp); + assert_eq!(header.block_weight, alt_block.weight); + assert_eq!(header.long_term_weight, alt_block.long_term_weight); + assert_eq!( + header.cumulative_difficulty, + alt_block.cumulative_difficulty + ); + assert_eq!( + header.version.as_u8(), + alt_block.block.header.hardfork_version + ); + assert_eq!(header.vote, alt_block.block.header.hardfork_signal); + + let block_hash = get_alt_block_hash(&height, chain_id, &tables).unwrap(); + + assert_eq!(block_hash, alt_block.block_hash); + } + + drop(tables); + TxRw::commit(tx_rw).unwrap(); + } + + { + let mut tx_rw = env_inner.tx_rw().unwrap(); + + flush_alt_blocks(&env_inner, &mut tx_rw).unwrap(); + + let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap(); + pop_block(None, &mut tables).unwrap(); + + drop(tables); + TxRw::commit(tx_rw).unwrap(); + } + + assert_all_tables_are_empty(&env); + } +} diff --git a/storage/blockchain/src/ops/alt_block/chain.rs b/storage/blockchain/src/ops/alt_block/chain.rs new file mode 100644 index 0000000..5b5f3cb --- /dev/null +++ b/storage/blockchain/src/ops/alt_block/chain.rs @@ -0,0 +1,117 @@ +use std::cmp::{max, min}; + +use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError}; +use cuprate_types::{Chain, ChainId}; + +use crate::{ + ops::macros::{doc_add_alt_block_inner_invariant, doc_error}, + tables::{AltChainInfos, TablesMut}, + types::{AltBlockHeight, AltChainInfo, BlockHash, BlockHeight}, +}; + +/// Updates the [`AltChainInfo`] with information on a new alt-block. +/// +#[doc = doc_add_alt_block_inner_invariant!()] +#[doc = doc_error!()] +/// +/// # Panics +/// +/// This will panic if [`AltBlockHeight::height`] == `0`. +pub fn update_alt_chain_info( + alt_block_height: &AltBlockHeight, + prev_hash: &BlockHash, + tables: &mut impl TablesMut, +) -> Result<(), RuntimeError> { + let parent_chain = match tables.alt_block_heights().get(prev_hash) { + Ok(alt_parent_height) => Chain::Alt(alt_parent_height.chain_id.into()), + Err(RuntimeError::KeyNotFound) => Chain::Main, + Err(e) => return Err(e), + }; + + // try update the info if one exists for this chain. + let update = tables + .alt_chain_infos_mut() + .update(&alt_block_height.chain_id, |mut info| { + if info.chain_height < alt_block_height.height + 1 { + // If the chain height is increasing we only need to update the chain height. + info.chain_height = alt_block_height.height + 1; + } else { + // If the chain height is not increasing we are popping blocks and need to update the + // split point. + info.common_ancestor_height = alt_block_height.height.checked_sub(1).unwrap(); + info.parent_chain = parent_chain.into(); + } + + info.chain_height = alt_block_height.height + 1; + Some(info) + }); + + match update { + Ok(()) => return Ok(()), + Err(RuntimeError::KeyNotFound) => (), + Err(e) => return Err(e), + } + + // If one doesn't already exist add it. + + tables.alt_chain_infos_mut().put( + &alt_block_height.chain_id, + &AltChainInfo { + parent_chain: parent_chain.into(), + common_ancestor_height: alt_block_height.height.checked_sub(1).unwrap(), + chain_height: alt_block_height.height + 1, + }, + ) +} + +/// Get the height history of an alt-chain in reverse chronological order. +/// +/// Height history is a list of height ranges with the corresponding [`Chain`] they are stored under. +/// For example if your range goes from height `0` the last entry in the list will be [`Chain::Main`] +/// upto the height where the first split occurs. +#[doc = doc_error!()] +pub fn get_alt_chain_history_ranges( + range: std::ops::Range, + alt_chain: ChainId, + alt_chain_infos: &impl DatabaseRo, +) -> Result)>, RuntimeError> { + let mut ranges = Vec::with_capacity(5); + + let mut i = range.end; + let mut current_chain_id = alt_chain.into(); + while i > range.start { + let chain_info = alt_chain_infos.get(¤t_chain_id)?; + + let start_height = max(range.start, chain_info.common_ancestor_height + 1); + let end_height = min(i, chain_info.chain_height); + + ranges.push(( + Chain::Alt(current_chain_id.into()), + start_height..end_height, + )); + i = chain_info.common_ancestor_height + 1; + + match chain_info.parent_chain.into() { + Chain::Main => { + ranges.push((Chain::Main, range.start..i)); + break; + } + Chain::Alt(alt_chain_id) => { + let alt_chain_id = alt_chain_id.into(); + + // This shouldn't be possible to hit, however in a test with custom (invalid) block data + // this caused an infinite loop. + if alt_chain_id == current_chain_id { + return Err(RuntimeError::Io(std::io::Error::other( + "Loop detected in ChainIDs, invalid alt chain.", + ))); + } + + current_chain_id = alt_chain_id; + continue; + } + } + } + + Ok(ranges) +} diff --git a/storage/blockchain/src/ops/alt_block/mod.rs b/storage/blockchain/src/ops/alt_block/mod.rs new file mode 100644 index 0000000..1654d27 --- /dev/null +++ b/storage/blockchain/src/ops/alt_block/mod.rs @@ -0,0 +1,58 @@ +//! Alternative Block/Chain Ops +//! +//! Alternative chains are chains that potentially have more proof-of-work than the main-chain +//! which we are tracking to potentially re-org to. +//! +//! Cuprate uses an ID system for alt-chains. When a split is made from the main-chain we generate +//! a random [`ChainID`](cuprate_types::ChainId) and assign it to the chain: +//! +//! ```text +//! | +//! | +//! | split +//! |------------- +//! | | +//! | | +//! \|/ \|/ +//! main-chain ChainID(X) +//! ``` +//! +//! In that example if we were to receive an alt-block which immediately follows the top block of `ChainID(X)` +//! then that block will also be stored under `ChainID(X)`. However, if it follows from another block from `ChainID(X)` +//! we will split into a chain with a different ID: +//! +//! ```text +//! | +//! | +//! | split +//! |------------- +//! | | split +//! | |-------------| +//! | | | +//! | | | +//! | | | +//! \|/ \|/ \|/ +//! main-chain ChainID(X) ChainID(Z) +//! ``` +//! +//! As you can see if we wanted to get all the alt-blocks in `ChainID(Z)` that now includes some blocks from `ChainID(X)` as well. +//! [`get_alt_chain_history_ranges`] covers this and is the method to get the ranges of heights needed from each [`ChainID`](cuprate_types::ChainId) +//! to get all the alt-blocks in a given [`ChainID`](cuprate_types::ChainId). +//! +//! Although this should be kept in mind as a possibility, because Cuprate's block downloader will only track a single chain it is +//! unlikely that we will be tracking [`ChainID`](cuprate_types::ChainId)s that don't immediately connect to the main-chain. +//! +//! ## Why not use the block's `previous` field? +//! +//! Although that would be easier, it makes getting a range of block extremely slow, as we have to build the weight cache to verify +//! blocks, roughly 100,000 block headers needed, this cost is too high. +mod block; +mod chain; +mod tx; + +pub use block::{ + add_alt_block, flush_alt_blocks, get_alt_block, get_alt_block_extended_header_from_height, + get_alt_block_hash, +}; +pub use chain::{get_alt_chain_history_ranges, update_alt_chain_info}; +pub use tx::{add_alt_transaction_blob, get_alt_transaction}; diff --git a/storage/blockchain/src/ops/alt_block/tx.rs b/storage/blockchain/src/ops/alt_block/tx.rs new file mode 100644 index 0000000..4185c6c --- /dev/null +++ b/storage/blockchain/src/ops/alt_block/tx.rs @@ -0,0 +1,76 @@ +use bytemuck::TransparentWrapper; +use monero_serai::transaction::Transaction; + +use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_types::VerifiedTransactionInformation; + +use crate::{ + ops::macros::{doc_add_alt_block_inner_invariant, doc_error}, + tables::{Tables, TablesMut}, + types::{AltTransactionInfo, TxHash}, +}; + +/// Adds a [`VerifiedTransactionInformation`] from an alt-block +/// if it is not already in the DB. +/// +/// If the transaction is in the main-chain this function will still fill in the +/// [`AltTransactionInfos`](crate::tables::AltTransactionInfos) table, as that +/// table holds data which we don't keep around for main-chain txs. +/// +#[doc = doc_add_alt_block_inner_invariant!()] +#[doc = doc_error!()] +pub fn add_alt_transaction_blob( + tx: &VerifiedTransactionInformation, + tables: &mut impl TablesMut, +) -> Result<(), RuntimeError> { + tables.alt_transaction_infos_mut().put( + &tx.tx_hash, + &AltTransactionInfo { + tx_weight: tx.tx_weight, + fee: tx.fee, + tx_hash: tx.tx_hash, + }, + )?; + + if tables.tx_ids().get(&tx.tx_hash).is_ok() + || tables.alt_transaction_blobs().get(&tx.tx_hash).is_ok() + { + return Ok(()); + } + + tables + .alt_transaction_blobs_mut() + .put(&tx.tx_hash, StorableVec::wrap_ref(&tx.tx_blob))?; + + Ok(()) +} + +/// Retrieve a [`VerifiedTransactionInformation`] from the database. +/// +#[doc = doc_error!()] +pub fn get_alt_transaction( + tx_hash: &TxHash, + tables: &impl Tables, +) -> Result { + let tx_info = tables.alt_transaction_infos().get(tx_hash)?; + + let tx_blob = match tables.alt_transaction_blobs().get(tx_hash) { + Ok(blob) => blob.0, + Err(RuntimeError::KeyNotFound) => { + let tx_id = tables.tx_ids().get(tx_hash)?; + + let blob = tables.tx_blobs().get(&tx_id)?; + + blob.0 + } + Err(e) => return Err(e), + }; + + Ok(VerifiedTransactionInformation { + tx: Transaction::read(&mut tx_blob.as_slice()).unwrap(), + tx_blob, + tx_weight: tx_info.tx_weight, + fee: tx_info.fee, + tx_hash: tx_info.tx_hash, + }) +} diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 5fc72fc..af81348 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -2,16 +2,23 @@ //---------------------------------------------------------------------------------------------------- Import use bytemuck::TransparentWrapper; -use monero_serai::block::Block; +use monero_serai::block::{Block, BlockHeader}; use cuprate_database::{ RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, }; -use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}; -use cuprate_types::{ExtendedBlockHeader, HardFork, VerifiedBlockInformation}; +use cuprate_helper::{ + map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}, + tx::tx_fee, +}; +use cuprate_types::{ + AltBlockInformation, ChainId, ExtendedBlockHeader, HardFork, VerifiedBlockInformation, + VerifiedTransactionInformation, +}; use crate::{ ops::{ + alt_block, blockchain::{chain_height, cumulative_generated_coins}, macros::doc_error, output::get_rct_num_outputs, @@ -33,11 +40,6 @@ use crate::{ /// This function will panic if: /// - `block.height > u32::MAX` (not normally possible) /// - `block.height` is not != [`chain_height`] -/// -/// # Already exists -/// This function will operate normally even if `block` already -/// exists, i.e., this function will not return `Err` even if you -/// call this function infinitely with the same block. // no inline, too big. pub fn add_block( block: &VerifiedBlockInformation, @@ -107,9 +109,8 @@ pub fn add_block( cumulative_rct_outs, timestamp: block.block.header.timestamp, block_hash: block.block_hash, - // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64` - weight: block.weight as u64, - long_term_weight: block.long_term_weight as u64, + weight: block.weight, + long_term_weight: block.long_term_weight, }, )?; @@ -130,23 +131,24 @@ pub fn add_block( /// Remove the top/latest block from the database. /// /// The removed block's data is returned. +/// +/// If a [`ChainId`] is specified the popped block will be added to the alt block tables under +/// that [`ChainId`]. Otherwise, the block will be completely removed from the DB. #[doc = doc_error!()] /// /// In `pop_block()`'s case, [`RuntimeError::KeyNotFound`] /// will be returned if there are no blocks left. // no inline, too big pub fn pop_block( + move_to_alt_chain: Option, tables: &mut impl TablesMut, ) -> Result<(BlockHeight, BlockHash, Block), RuntimeError> { //------------------------------------------------------ Block Info // Remove block data from tables. - let (block_height, block_hash) = { - let (block_height, block_info) = tables.block_infos_mut().pop_last()?; - (block_height, block_info.block_hash) - }; + let (block_height, block_info) = tables.block_infos_mut().pop_last()?; // Block heights. - tables.block_heights_mut().delete(&block_hash)?; + tables.block_heights_mut().delete(&block_info.block_hash)?; // Block blobs. // We deserialize the block blob into a `Block`, such @@ -156,11 +158,52 @@ pub fn pop_block( //------------------------------------------------------ Transaction / Outputs / Key Images remove_tx(&block.miner_transaction.hash(), tables)?; - for tx_hash in &block.transactions { - remove_tx(tx_hash, tables)?; + + let remove_tx_iter = block.transactions.iter().map(|tx_hash| { + let (_, tx) = remove_tx(tx_hash, tables)?; + Ok::<_, RuntimeError>(tx) + }); + + if let Some(chain_id) = move_to_alt_chain { + let txs = remove_tx_iter + .map(|result| { + let tx = result?; + Ok(VerifiedTransactionInformation { + tx_weight: tx.weight(), + tx_blob: tx.serialize(), + tx_hash: tx.hash(), + fee: tx_fee(&tx), + tx, + }) + }) + .collect::, RuntimeError>>()?; + + alt_block::add_alt_block( + &AltBlockInformation { + block: block.clone(), + block_blob, + txs, + block_hash: block_info.block_hash, + // We know the PoW is valid for this block so just set it so it will always verify as valid. + pow_hash: [0; 32], + height: block_height, + weight: block_info.weight, + long_term_weight: block_info.long_term_weight, + cumulative_difficulty: combine_low_high_bits_to_u128( + block_info.cumulative_difficulty_low, + block_info.cumulative_difficulty_high, + ), + chain_id, + }, + tables, + )?; + } else { + for result in remove_tx_iter { + drop(result?); + } } - Ok((block_height, block_hash, block)) + Ok((block_height, block_info.block_hash, block)) } //---------------------------------------------------------------------------------------------------- `get_block_extended_header_*` @@ -194,25 +237,21 @@ pub fn get_block_extended_header_from_height( ) -> Result { let block_info = tables.block_infos().get(block_height)?; let block_blob = tables.block_blobs().get(block_height)?.0; - let block = Block::read(&mut block_blob.as_slice())?; + let block_header = BlockHeader::read(&mut block_blob.as_slice())?; let cumulative_difficulty = combine_low_high_bits_to_u128( block_info.cumulative_difficulty_low, block_info.cumulative_difficulty_high, ); - #[expect( - clippy::cast_possible_truncation, - reason = "INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`" - )] Ok(ExtendedBlockHeader { cumulative_difficulty, - version: HardFork::from_version(block.header.hardfork_version) + version: HardFork::from_version(block_header.hardfork_version) .expect("Stored block must have a valid hard-fork"), - vote: block.header.hardfork_signal, - timestamp: block.header.timestamp, - block_weight: block_info.weight as usize, - long_term_weight: block_info.long_term_weight as usize, + vote: block_header.hardfork_signal, + timestamp: block_header.timestamp, + block_weight: block_info.weight, + long_term_weight: block_info.long_term_weight, }) } @@ -272,14 +311,14 @@ mod test { use cuprate_database::{Env, EnvInner, TxRw}; use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; - use super::*; - use crate::{ ops::tx::{get_tx, tx_exists}, tables::OpenTables, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, }; + use super::*; + /// Tests all above block functions. /// /// Note that this doesn't test the correctness of values added, as the @@ -414,7 +453,8 @@ mod test { for block_hash in block_hashes.into_iter().rev() { println!("pop_block(): block_hash: {}", hex::encode(block_hash)); - let (_popped_height, popped_hash, _popped_block) = pop_block(&mut tables).unwrap(); + let (_popped_height, popped_hash, _popped_block) = + pop_block(None, &mut tables).unwrap(); assert_eq!(block_hash, popped_hash); diff --git a/storage/blockchain/src/ops/macros.rs b/storage/blockchain/src/ops/macros.rs index b7cdba4..18ec506 100644 --- a/storage/blockchain/src/ops/macros.rs +++ b/storage/blockchain/src/ops/macros.rs @@ -31,3 +31,25 @@ When calling this function, ensure that either: }; } pub(super) use doc_add_block_inner_invariant; + +/// Generate `# Invariant` documentation for internal alt block `fn`'s +/// that should be called directly with caution. +/// +/// This is pretty much the same as [`doc_add_block_inner_invariant`], +/// it's not worth the effort to reduce the duplication. +macro_rules! doc_add_alt_block_inner_invariant { + () => { + r#"# ⚠️ Invariant ⚠️ +This function mainly exists to be used internally by the parent function [`crate::ops::alt_block::add_alt_block`]. + +`add_alt_block()` makes sure all data related to the input is mutated, while +this function _does not_, it specifically mutates _particular_ tables. + +This is usually undesired - although this function is still available to call directly. + +When calling this function, ensure that either: +1. This effect (incomplete database mutation) is what is desired, or that... +2. ...the other tables will also be mutated to a correct state"# + }; +} +pub(super) use doc_add_alt_block_inner_invariant; diff --git a/storage/blockchain/src/ops/mod.rs b/storage/blockchain/src/ops/mod.rs index 4ff7dff..285aa24 100644 --- a/storage/blockchain/src/ops/mod.rs +++ b/storage/blockchain/src/ops/mod.rs @@ -94,7 +94,7 @@ //! // Read the data, assert it is correct. //! let tx_rw = env_inner.tx_rw()?; //! let mut tables = env_inner.open_tables_mut(&tx_rw)?; -//! let (height, hash, serai_block) = pop_block(&mut tables)?; +//! let (height, hash, serai_block) = pop_block(None, &mut tables)?; //! //! assert_eq!(height, 0); //! assert_eq!(serai_block, block.block); @@ -102,6 +102,7 @@ //! # Ok(()) } //! ``` +pub mod alt_block; pub mod block; pub mod blockchain; pub mod key_image; diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index 2e7c908..d8a878c 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -4,11 +4,14 @@ use std::sync::Arc; use cuprate_database::{ConcreteEnv, InitError}; +use cuprate_types::{AltBlockInformation, VerifiedBlockInformation}; -use crate::service::{init_read_service, init_write_service}; use crate::{ config::Config, - service::types::{BlockchainReadHandle, BlockchainWriteHandle}, + service::{ + init_read_service, init_write_service, + types::{BlockchainReadHandle, BlockchainWriteHandle}, + }, }; //---------------------------------------------------------------------------------------------------- Init @@ -81,6 +84,44 @@ pub(super) const fn compact_history_genesis_not_included INITIAL_BLOCKS && !(top_block_height - INITIAL_BLOCKS + 2).is_power_of_two() } +//---------------------------------------------------------------------------------------------------- Map Block +/// Maps [`AltBlockInformation`] to [`VerifiedBlockInformation`] +/// +/// # Panics +/// This will panic if the block is invalid, so should only be used on blocks that have been popped from +/// the main-chain. +pub(super) fn map_valid_alt_block_to_verified_block( + alt_block: AltBlockInformation, +) -> VerifiedBlockInformation { + let total_fees = alt_block.txs.iter().map(|tx| tx.fee).sum::(); + let total_miner_output = alt_block + .block + .miner_transaction + .prefix() + .outputs + .iter() + .map(|out| out.amount.unwrap_or(0)) + .sum::(); + + VerifiedBlockInformation { + block: alt_block.block, + block_blob: alt_block.block_blob, + txs: alt_block + .txs + .into_iter() + .map(TryInto::try_into) + .collect::>() + .unwrap(), + block_hash: alt_block.block_hash, + pow_hash: alt_block.pow_hash, + height: alt_block.height, + generated_coins: total_miner_output - total_fees, + weight: alt_block.weight, + long_term_weight: alt_block.long_term_weight, + cumulative_difficulty: alt_block.cumulative_difficulty, + } +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index c774ee4..aa322d0 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -98,7 +98,7 @@ //! //! // Block write was OK. //! let response = response_channel.await?; -//! assert_eq!(response, BlockchainResponse::WriteBlockOk); +//! assert_eq!(response, BlockchainResponse::Ok); //! //! // Now, let's try getting the block hash //! // of the block we just wrote. diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index 87f416e..b0e7e04 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -8,6 +8,7 @@ use std::{ use rayon::{ iter::{IntoParallelIterator, ParallelIterator}, + prelude::*, ThreadPool, }; use thread_local::ThreadLocal; @@ -17,11 +18,15 @@ use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThre use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, - Chain, ExtendedBlockHeader, OutputOnChain, + Chain, ChainId, ExtendedBlockHeader, OutputOnChain, }; use crate::{ ops::{ + alt_block::{ + get_alt_block, get_alt_block_extended_header_from_height, get_alt_block_hash, + get_alt_chain_history_ranges, + }, block::{ block_exists, get_block_extended_header_from_height, get_block_height, get_block_info, }, @@ -33,8 +38,10 @@ use crate::{ free::{compact_history_genesis_not_included, compact_history_index_to_height_offset}, types::{BlockchainReadHandle, ResponseResult}, }, - tables::{BlockHeights, BlockInfos, OpenTables, Tables}, - types::{Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId}, + tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables}, + types::{ + AltBlockHeight, Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId, + }, }; //---------------------------------------------------------------------------------------------------- init_read_service @@ -87,7 +94,7 @@ fn map_request( match request { R::BlockExtendedHeader(block) => block_extended_header(env, block), R::BlockHash(block, chain) => block_hash(env, block, chain), - R::FindBlock(_) => todo!("Add alt blocks to DB"), + R::FindBlock(block_hash) => find_block(env, block_hash), R::FilterUnknownHashes(hashes) => filter_unknown_hashes(env, hashes), R::BlockExtendedHeaderInRange(range, chain) => { block_extended_header_in_range(env, range, chain) @@ -99,6 +106,7 @@ fn map_request( R::KeyImagesSpent(set) => key_images_spent(env, set), R::CompactChainHistory => compact_chain_history(env), R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids), + R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id), } /* SOMEDAY: post-request handling, run some code for each request? */ @@ -197,12 +205,41 @@ fn block_hash(env: &ConcreteEnv, block_height: BlockHeight, chain: Chain) -> Res let block_hash = match chain { Chain::Main => get_block_info(&block_height, &table_block_infos)?.block_hash, - Chain::Alt(_) => todo!("Add alt blocks to DB"), + Chain::Alt(chain) => { + get_alt_block_hash(&block_height, chain, &env_inner.open_tables(&tx_ro)?)? + } }; Ok(BlockchainResponse::BlockHash(block_hash)) } +/// [`BlockchainReadRequest::FindBlock`] +fn find_block(env: &ConcreteEnv, block_hash: BlockHash) -> ResponseResult { + // Single-threaded, no `ThreadLocal` required. + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro()?; + + let table_block_heights = env_inner.open_db_ro::(&tx_ro)?; + + // Check the main chain first. + match table_block_heights.get(&block_hash) { + Ok(height) => return Ok(BlockchainResponse::FindBlock(Some((Chain::Main, height)))), + Err(RuntimeError::KeyNotFound) => (), + Err(e) => return Err(e), + } + + let table_alt_block_heights = env_inner.open_db_ro::(&tx_ro)?; + + match table_alt_block_heights.get(&block_hash) { + Ok(height) => Ok(BlockchainResponse::FindBlock(Some(( + Chain::Alt(height.chain_id.into()), + height.height, + )))), + Err(RuntimeError::KeyNotFound) => Ok(BlockchainResponse::FindBlock(None)), + Err(e) => Err(e), + } +} + /// [`BlockchainReadRequest::FilterUnknownHashes`]. #[inline] fn filter_unknown_hashes(env: &ConcreteEnv, mut hashes: HashSet) -> ResponseResult { @@ -253,7 +290,37 @@ fn block_extended_header_in_range( get_block_extended_header_from_height(&block_height, tables) }) .collect::, RuntimeError>>()?, - Chain::Alt(_) => todo!("Add alt blocks to DB"), + Chain::Alt(chain_id) => { + let ranges = { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + let alt_chains = tables.alt_chain_infos(); + + get_alt_chain_history_ranges(range, chain_id, alt_chains)? + }; + + ranges + .par_iter() + .rev() + .flat_map(|(chain, range)| { + range.clone().into_par_iter().map(|height| { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + + match *chain { + Chain::Main => get_block_extended_header_from_height(&height, tables), + Chain::Alt(chain_id) => get_alt_block_extended_header_from_height( + &AltBlockHeight { + chain_id: chain_id.into(), + height, + }, + tables, + ), + } + }) + }) + .collect::, _>>()? + } }; Ok(BlockchainResponse::BlockExtendedHeaderInRange(vec)) @@ -492,3 +559,45 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes BlockchainResponse::FindFirstUnknown(Some((idx, last_known_height + 1))) }) } + +/// [`BlockchainReadRequest::AltBlocksInChain`] +fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult { + // Prepare tx/tables in `ThreadLocal`. + let env_inner = env.env_inner(); + let tx_ro = thread_local(env); + let tables = thread_local(env); + + // Get the history of this alt-chain. + let history = { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + get_alt_chain_history_ranges(0..usize::MAX, chain_id, tables.alt_chain_infos())? + }; + + // Get all the blocks until we join the main-chain. + let blocks = history + .par_iter() + .rev() + .skip(1) + .flat_map(|(chain_id, range)| { + let Chain::Alt(chain_id) = chain_id else { + panic!("Should not have main chain blocks here we skipped last range"); + }; + + range.clone().into_par_iter().map(|height| { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + + get_alt_block( + &AltBlockHeight { + chain_id: (*chain_id).into(), + height, + }, + tables, + ) + }) + }) + .collect::>()?; + + Ok(BlockchainResponse::AltBlocksInChain(blocks)) +} diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index c314bb5..b3ccbbd 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -13,13 +13,14 @@ use std::{ }; use pretty_assertions::assert_eq; +use rand::Rng; use tower::{Service, ServiceExt}; use cuprate_database::{ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError}; use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest}, - Chain, OutputOnChain, VerifiedBlockInformation, + Chain, ChainId, OutputOnChain, VerifiedBlockInformation, }; use crate::{ @@ -31,7 +32,7 @@ use crate::{ }, service::{init, BlockchainReadHandle, BlockchainWriteHandle}, tables::{OpenTables, Tables, TablesIter}, - tests::AssertTableLen, + tests::{map_verified_block_to_alt, AssertTableLen}, types::{Amount, AmountIndex, PreRctOutputId}, }; @@ -87,7 +88,7 @@ async fn test_template( let request = BlockchainWriteRequest::WriteBlock(block); let response_channel = writer.call(request); let response = response_channel.await.unwrap(); - assert_eq!(response, BlockchainResponse::WriteBlockOk); + assert_eq!(response, BlockchainResponse::Ok); } //----------------------------------------------------------------------- Reset the transaction @@ -415,3 +416,92 @@ async fn v16_tx0() { ) .await; } + +/// Tests the alt-chain requests and responses. +#[tokio::test] +async fn alt_chain_requests() { + let (reader, mut writer, _, _tempdir) = init_service(); + + // Set up the test by adding blocks to the main-chain. + for (i, mut block) in [BLOCK_V9_TX3.clone(), BLOCK_V16_TX0.clone()] + .into_iter() + .enumerate() + { + block.height = i; + + let request = BlockchainWriteRequest::WriteBlock(block); + writer.call(request).await.unwrap(); + } + + // Generate the alt-blocks. + let mut prev_hash = BLOCK_V9_TX3.block_hash; + let mut chain_id = 1; + let alt_blocks = [&BLOCK_V16_TX0, &BLOCK_V9_TX3, &BLOCK_V1_TX2] + .into_iter() + .enumerate() + .map(|(i, block)| { + let mut block = (**block).clone(); + block.height = i + 1; + block.block.header.previous = prev_hash; + block.block_blob = block.block.serialize(); + + prev_hash = block.block_hash; + // Randomly either keep the [`ChainId`] the same or change it to a new value. + chain_id += rand::thread_rng().gen_range(0..=1); + + map_verified_block_to_alt(block, ChainId(chain_id.try_into().unwrap())) + }) + .collect::>(); + + for block in &alt_blocks { + // Request a block to be written, assert it was written. + let request = BlockchainWriteRequest::WriteAltBlock(block.clone()); + let response_channel = writer.call(request); + let response = response_channel.await.unwrap(); + assert_eq!(response, BlockchainResponse::Ok); + } + + // Get the full alt-chain + let request = BlockchainReadRequest::AltBlocksInChain(ChainId(chain_id.try_into().unwrap())); + let response = reader.clone().oneshot(request).await.unwrap(); + + let BlockchainResponse::AltBlocksInChain(blocks) = response else { + panic!("Wrong response type was returned"); + }; + + assert_eq!(blocks.len(), alt_blocks.len()); + for (got_block, alt_block) in blocks.into_iter().zip(alt_blocks) { + assert_eq!(got_block.block_blob, alt_block.block_blob); + assert_eq!(got_block.block_hash, alt_block.block_hash); + assert_eq!(got_block.chain_id, alt_block.chain_id); + assert_eq!(got_block.txs, alt_block.txs); + } + + // Flush all alt blocks. + let request = BlockchainWriteRequest::FlushAltBlocks; + let response = writer.ready().await.unwrap().call(request).await.unwrap(); + assert_eq!(response, BlockchainResponse::Ok); + + // Pop blocks from the main chain + let request = BlockchainWriteRequest::PopBlocks(1); + let response = writer.ready().await.unwrap().call(request).await.unwrap(); + + let BlockchainResponse::PopBlocks(old_main_chain_id) = response else { + panic!("Wrong response type was returned"); + }; + + // Check we have popped the top block. + let request = BlockchainReadRequest::ChainHeight; + let response = reader.clone().oneshot(request).await.unwrap(); + assert!(matches!(response, BlockchainResponse::ChainHeight(1, _))); + + // Attempt to add the popped block back. + let request = BlockchainWriteRequest::ReverseReorg(old_main_chain_id); + let response = writer.ready().await.unwrap().call(request).await.unwrap(); + assert_eq!(response, BlockchainResponse::Ok); + + // Check we have the popped block back. + let request = BlockchainReadRequest::ChainHeight; + let response = reader.clone().oneshot(request).await.unwrap(); + assert!(matches!(response, BlockchainResponse::ChainHeight(2, _))); +} diff --git a/storage/blockchain/src/service/write.rs b/storage/blockchain/src/service/write.rs index 816afc4..07162d2 100644 --- a/storage/blockchain/src/service/write.rs +++ b/storage/blockchain/src/service/write.rs @@ -1,20 +1,30 @@ //! Database writer thread definitions and logic. - //---------------------------------------------------------------------------------------------------- Import use std::sync::Arc; -use cuprate_database::{ConcreteEnv, Env, EnvInner, RuntimeError, TxRw}; +use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError, TxRw}; use cuprate_database_service::DatabaseWriteHandle; use cuprate_types::{ blockchain::{BlockchainResponse, BlockchainWriteRequest}, - VerifiedBlockInformation, + AltBlockInformation, Chain, ChainId, VerifiedBlockInformation, }; use crate::{ - service::types::{BlockchainWriteHandle, ResponseResult}, - tables::OpenTables, + service::{ + free::map_valid_alt_block_to_verified_block, + types::{BlockchainWriteHandle, ResponseResult}, + }, + tables::{OpenTables, Tables}, + types::AltBlockHeight, }; +/// Write functions within this module abort if the write transaction +/// could not be aborted successfully to maintain atomicity. +/// +/// This is the panic message if the `abort()` fails. +const TX_RW_ABORT_FAIL: &str = + "Could not maintain blockchain database atomicity by aborting write transaction"; + //---------------------------------------------------------------------------------------------------- init_write_service /// Initialize the blockchain write service from a [`ConcreteEnv`]. pub fn init_write_service(env: Arc) -> BlockchainWriteHandle { @@ -29,6 +39,12 @@ fn handle_blockchain_request( ) -> Result { match req { BlockchainWriteRequest::WriteBlock(block) => write_block(env, block), + BlockchainWriteRequest::WriteAltBlock(alt_block) => write_alt_block(env, alt_block), + BlockchainWriteRequest::PopBlocks(numb_blocks) => pop_blocks(env, *numb_blocks), + BlockchainWriteRequest::ReverseReorg(old_main_chain_id) => { + reverse_reorg(env, *old_main_chain_id) + } + BlockchainWriteRequest::FlushAltBlocks => flush_alt_blocks(env), } } @@ -55,13 +71,140 @@ fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseR match result { Ok(()) => { TxRw::commit(tx_rw)?; - Ok(BlockchainResponse::WriteBlockOk) + Ok(BlockchainResponse::Ok) } Err(e) => { - // INVARIANT: ensure database atomicity by aborting - // the transaction on `add_block()` failures. - TxRw::abort(tx_rw) - .expect("could not maintain database atomicity by aborting write transaction"); + TxRw::abort(tx_rw).expect(TX_RW_ABORT_FAIL); + Err(e) + } + } +} + +/// [`BlockchainWriteRequest::WriteAltBlock`]. +#[inline] +fn write_alt_block(env: &ConcreteEnv, block: &AltBlockInformation) -> ResponseResult { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + + let result = { + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + crate::ops::alt_block::add_alt_block(block, &mut tables_mut) + }; + + match result { + Ok(()) => { + TxRw::commit(tx_rw)?; + Ok(BlockchainResponse::Ok) + } + Err(e) => { + TxRw::abort(tx_rw).expect(TX_RW_ABORT_FAIL); + Err(e) + } + } +} + +/// [`BlockchainWriteRequest::PopBlocks`]. +fn pop_blocks(env: &ConcreteEnv, numb_blocks: usize) -> ResponseResult { + let env_inner = env.env_inner(); + let mut tx_rw = env_inner.tx_rw()?; + + // FIXME: turn this function into a try block once stable. + let mut result = || { + // flush all the current alt blocks as they may reference blocks to be popped. + crate::ops::alt_block::flush_alt_blocks(&env_inner, &mut tx_rw)?; + + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + // generate a `ChainId` for the popped blocks. + let old_main_chain_id = ChainId(rand::random()); + + // pop the blocks + for _ in 0..numb_blocks { + crate::ops::block::pop_block(Some(old_main_chain_id), &mut tables_mut)?; + } + + Ok(old_main_chain_id) + }; + + match result() { + Ok(old_main_chain_id) => { + TxRw::commit(tx_rw)?; + Ok(BlockchainResponse::PopBlocks(old_main_chain_id)) + } + Err(e) => { + TxRw::abort(tx_rw).expect(TX_RW_ABORT_FAIL); + Err(e) + } + } +} + +/// [`BlockchainWriteRequest::ReverseReorg`]. +fn reverse_reorg(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult { + let env_inner = env.env_inner(); + let mut tx_rw = env_inner.tx_rw()?; + + // FIXME: turn this function into a try block once stable. + let mut result = || { + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + + let chain_info = tables_mut.alt_chain_infos().get(&chain_id.into())?; + // Although this doesn't guarantee the chain was popped from the main-chain, it's an easy + // thing for us to check. + assert_eq!(Chain::from(chain_info.parent_chain), Chain::Main); + + let top_block_height = + crate::ops::blockchain::top_block_height(tables_mut.block_heights())?; + + // pop any blocks that were added as part of a re-org. + for _ in chain_info.common_ancestor_height..top_block_height { + crate::ops::block::pop_block(None, &mut tables_mut)?; + } + + // Add the old main chain blocks back to the main chain. + for height in (chain_info.common_ancestor_height + 1)..chain_info.chain_height { + let alt_block = crate::ops::alt_block::get_alt_block( + &AltBlockHeight { + chain_id: chain_id.into(), + height, + }, + &tables_mut, + )?; + let verified_block = map_valid_alt_block_to_verified_block(alt_block); + crate::ops::block::add_block(&verified_block, &mut tables_mut)?; + } + + drop(tables_mut); + crate::ops::alt_block::flush_alt_blocks(&env_inner, &mut tx_rw)?; + + Ok(()) + }; + + match result() { + Ok(()) => { + TxRw::commit(tx_rw)?; + Ok(BlockchainResponse::Ok) + } + Err(e) => { + TxRw::abort(tx_rw).expect(TX_RW_ABORT_FAIL); + Err(e) + } + } +} + +/// [`BlockchainWriteRequest::FlushAltBlocks`]. +#[inline] +fn flush_alt_blocks(env: &ConcreteEnv) -> ResponseResult { + let env_inner = env.env_inner(); + let mut tx_rw = env_inner.tx_rw()?; + + let result = crate::ops::alt_block::flush_alt_blocks(&env_inner, &mut tx_rw); + + match result { + Ok(()) => { + TxRw::commit(tx_rw)?; + Ok(BlockchainResponse::Ok) + } + Err(e) => { + TxRw::abort(tx_rw).expect(TX_RW_ABORT_FAIL); Err(e) } } diff --git a/storage/blockchain/src/tables.rs b/storage/blockchain/src/tables.rs index 122ac31..75c33ae 100644 --- a/storage/blockchain/src/tables.rs +++ b/storage/blockchain/src/tables.rs @@ -17,8 +17,9 @@ //---------------------------------------------------------------------------------------------------- Import use crate::types::{ - Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfo, KeyImage, - Output, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RctOutput, TxBlob, TxHash, + AltBlockHeight, AltChainInfo, AltTransactionInfo, Amount, AmountIndex, AmountIndices, + BlockBlob, BlockHash, BlockHeight, BlockInfo, CompactAltBlockInfo, KeyImage, Output, + PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RawChainId, RctOutput, TxBlob, TxHash, TxId, UnlockTime, }; @@ -129,6 +130,40 @@ cuprate_database::define_tables! { /// Transactions without unlock times will not exist in this table. 14 => TxUnlockTime, TxId => UnlockTime, + + /// Information on alt-chains. + 15 => AltChainInfos, + RawChainId => AltChainInfo, + + /// Alt-block heights. + /// + /// Contains the height of all alt-blocks. + 16 => AltBlockHeights, + BlockHash => AltBlockHeight, + + /// Alt-block information. + /// + /// Contains information on all alt-blocks. + 17 => AltBlocksInfo, + AltBlockHeight => CompactAltBlockInfo, + + /// Alt-block blobs. + /// + /// Contains the raw bytes of all alt-blocks. + 18 => AltBlockBlobs, + AltBlockHeight => BlockBlob, + + /// Alt-block transaction blobs. + /// + /// Contains the raw bytes of alt transactions, if those transactions are not in the main-chain. + 19 => AltTransactionBlobs, + TxHash => TxBlob, + + /// Alt-block transaction information. + /// + /// Contains information on all alt transactions, even if they are in the main-chain. + 20 => AltTransactionInfos, + TxHash => AltTransactionInfo, } //---------------------------------------------------------------------------------------------------- Tests diff --git a/storage/blockchain/src/tests.rs b/storage/blockchain/src/tests.rs index 65527e1..d57a371 100644 --- a/storage/blockchain/src/tests.rs +++ b/storage/blockchain/src/tests.rs @@ -10,6 +10,7 @@ use std::{borrow::Cow, fmt::Debug}; use pretty_assertions::assert_eq; use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner}; +use cuprate_types::{AltBlockInformation, ChainId, VerifiedBlockInformation}; use crate::{ config::ConfigBuilder, @@ -88,3 +89,21 @@ pub(crate) fn assert_all_tables_are_empty(env: &ConcreteEnv) { assert!(tables.all_tables_empty().unwrap()); assert_eq!(crate::ops::tx::get_num_tx(tables.tx_ids()).unwrap(), 0); } + +pub(crate) fn map_verified_block_to_alt( + verified_block: VerifiedBlockInformation, + chain_id: ChainId, +) -> AltBlockInformation { + AltBlockInformation { + block: verified_block.block, + block_blob: verified_block.block_blob, + txs: verified_block.txs, + block_hash: verified_block.block_hash, + pow_hash: verified_block.pow_hash, + height: verified_block.height, + weight: verified_block.weight, + long_term_weight: verified_block.long_term_weight, + cumulative_difficulty: verified_block.cumulative_difficulty, + chain_id, + } +} diff --git a/storage/blockchain/src/types.rs b/storage/blockchain/src/types.rs index eb1dc64..6afd3d4 100644 --- a/storage/blockchain/src/types.rs +++ b/storage/blockchain/src/types.rs @@ -41,12 +41,14 @@ #![forbid(unsafe_code)] // if you remove this line i will steal your monero //---------------------------------------------------------------------------------------------------- Import -use bytemuck::{Pod, Zeroable}; +use std::num::NonZero; +use bytemuck::{Pod, Zeroable}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use cuprate_database::{Key, StorableVec}; +use cuprate_types::{Chain, ChainId}; //---------------------------------------------------------------------------------------------------- Aliases // These type aliases exist as many Monero-related types are the exact same. @@ -187,7 +189,7 @@ pub struct BlockInfo { /// The adjusted block size, in bytes. /// /// See [`block_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#blocks-weight). - pub weight: u64, + pub weight: usize, /// Least-significant 64 bits of the 128-bit cumulative difficulty. pub cumulative_difficulty_low: u64, /// Most-significant 64 bits of the 128-bit cumulative difficulty. @@ -199,7 +201,7 @@ pub struct BlockInfo { /// The long term block weight, based on the median weight of the preceding `100_000` blocks. /// /// See [`long_term_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#long-term-block-weight). - pub long_term_weight: u64, + pub long_term_weight: usize, } //---------------------------------------------------------------------------------------------------- OutputFlags @@ -324,6 +326,259 @@ pub struct RctOutput { } // TODO: local_index? +//---------------------------------------------------------------------------------------------------- RawChain +/// [`Chain`] in a format which can be stored in the DB. +/// +/// Implements [`Into`] and [`From`] for [`Chain`]. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// use cuprate_types::Chain; +/// +/// // Assert Storable is correct. +/// let a: RawChain = Chain::Main.into(); +/// let b = Storable::as_bytes(&a); +/// let c: RawChain = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 8); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(transparent)] +pub struct RawChain(u64); + +impl From for RawChain { + fn from(value: Chain) -> Self { + match value { + Chain::Main => Self(0), + Chain::Alt(chain_id) => Self(chain_id.0.get()), + } + } +} + +impl From for Chain { + fn from(value: RawChain) -> Self { + NonZero::new(value.0).map_or(Self::Main, |id| Self::Alt(ChainId(id))) + } +} + +impl From for RawChain { + fn from(value: RawChainId) -> Self { + // A [`ChainID`] with an inner value of `0` is invalid. + assert_ne!(value.0, 0); + + Self(value.0) + } +} + +//---------------------------------------------------------------------------------------------------- RawChainId +/// [`ChainId`] in a format which can be stored in the DB. +/// +/// Implements [`Into`] and [`From`] for [`ChainId`]. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// use cuprate_types::ChainId; +/// +/// // Assert Storable is correct. +/// let a: RawChainId = ChainId(10.try_into().unwrap()).into(); +/// let b = Storable::as_bytes(&a); +/// let c: RawChainId = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 8); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(transparent)] +pub struct RawChainId(u64); + +impl From for RawChainId { + fn from(value: ChainId) -> Self { + Self(value.0.get()) + } +} + +impl From for ChainId { + fn from(value: RawChainId) -> Self { + Self(NonZero::new(value.0).expect("RawChainId cannot have a value of `0`")) + } +} + +impl Key for RawChainId {} + +//---------------------------------------------------------------------------------------------------- AltChainInfo +/// Information on an alternative chain. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// use cuprate_types::Chain; +/// +/// // Assert Storable is correct. +/// let a: AltChainInfo = AltChainInfo { +/// parent_chain: Chain::Main.into(), +/// common_ancestor_height: 0, +/// chain_height: 1, +/// }; +/// let b = Storable::as_bytes(&a); +/// let c: AltChainInfo = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 24); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct AltChainInfo { + /// The chain this alt chain forks from. + pub parent_chain: RawChain, + /// The height of the first block we share with the parent chain. + pub common_ancestor_height: usize, + /// The chain height of the blocks in this alt chain. + pub chain_height: usize, +} + +//---------------------------------------------------------------------------------------------------- AltBlockHeight +/// Represents the height of a block on an alt-chain. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// use cuprate_types::ChainId; +/// +/// // Assert Storable is correct. +/// let a: AltBlockHeight = AltBlockHeight { +/// chain_id: ChainId(1.try_into().unwrap()).into(), +/// height: 1, +/// }; +/// let b = Storable::as_bytes(&a); +/// let c: AltBlockHeight = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 16); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct AltBlockHeight { + /// The [`ChainId`] of the chain this alt block is on, in raw form. + pub chain_id: RawChainId, + /// The height of this alt-block. + pub height: usize, +} + +impl Key for AltBlockHeight {} + +//---------------------------------------------------------------------------------------------------- CompactAltBlockInfo +/// Represents information on an alt-chain. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// +/// // Assert Storable is correct. +/// let a: CompactAltBlockInfo = CompactAltBlockInfo { +/// block_hash: [1; 32], +/// pow_hash: [2; 32], +/// height: 10, +/// weight: 20, +/// long_term_weight: 30, +/// cumulative_difficulty_low: 40, +/// cumulative_difficulty_high: 50, +/// }; +/// +/// let b = Storable::as_bytes(&a); +/// let c: CompactAltBlockInfo = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 104); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct CompactAltBlockInfo { + /// The block's hash. + pub block_hash: [u8; 32], + /// The block's proof-of-work hash. + pub pow_hash: [u8; 32], + /// The block's height. + pub height: usize, + /// The adjusted block size, in bytes. + pub weight: usize, + /// The long term block weight, which is the weight factored in with previous block weights. + pub long_term_weight: usize, + /// The low 64 bits of the cumulative difficulty. + pub cumulative_difficulty_low: u64, + /// The high 64 bits of the cumulative difficulty. + pub cumulative_difficulty_high: u64, +} + +//---------------------------------------------------------------------------------------------------- AltTransactionInfo +/// Represents information on an alt transaction. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// +/// // Assert Storable is correct. +/// let a: AltTransactionInfo = AltTransactionInfo { +/// tx_weight: 1, +/// fee: 6, +/// tx_hash: [6; 32], +/// }; +/// +/// let b = Storable::as_bytes(&a); +/// let c: AltTransactionInfo = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 48); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct AltTransactionInfo { + /// The transaction's weight. + pub tx_weight: usize, + /// The transaction's total fees. + pub fee: u64, + /// The transaction's hash. + pub tx_hash: [u8; 32], +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index b2fafd9..abf7ee4 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Boog900", "hinto-janai"] [dependencies] cuprate-types = { path = "../types" } -cuprate-helper = { path = "../helper", features = ["map"] } +cuprate-helper = { path = "../helper", features = ["map", "tx"] } cuprate-wire = { path = "../net/wire" } cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] } diff --git a/test-utils/src/data/mod.rs b/test-utils/src/data/mod.rs index b9d42fb..3be409f 100644 --- a/test-utils/src/data/mod.rs +++ b/test-utils/src/data/mod.rs @@ -25,13 +25,11 @@ //! let tx: VerifiedTransactionInformation = TX_V1_SIG0.clone(); //! ``` -mod constants; pub use constants::{ BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_BBD604, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73, TX_B6B439, TX_D7FEBD, TX_E2D393, TX_E57440, }; +pub use statics::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3, TX_V1_SIG0, TX_V1_SIG2, TX_V2_RCT3}; +mod constants; mod statics; -pub use statics::{ - tx_fee, BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3, TX_V1_SIG0, TX_V1_SIG2, TX_V2_RCT3, -}; diff --git a/test-utils/src/data/statics.rs b/test-utils/src/data/statics.rs index 474e35c..c67c7eb 100644 --- a/test-utils/src/data/statics.rs +++ b/test-utils/src/data/statics.rs @@ -8,12 +8,12 @@ //---------------------------------------------------------------------------------------------------- Import use std::sync::LazyLock; -use cuprate_helper::map::combine_low_high_bits_to_u128; -use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; use hex_literal::hex; -use monero_serai::transaction::Input; use monero_serai::{block::Block, transaction::Transaction}; +use cuprate_helper::{map::combine_low_high_bits_to_u128, tx::tx_fee}; +use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; + use crate::data::constants::{ BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73, TX_B6B439, TX_D7FEBD, TX_E2D393, TX_E57440, @@ -110,36 +110,6 @@ fn to_tx_verification_data(tx_blob: impl AsRef<[u8]>) -> VerifiedTransactionInfo } } -/// Calculates the fee of the [`Transaction`]. -/// -/// # Panics -/// This will panic if the inputs overflow or the transaction outputs too much. -pub fn tx_fee(tx: &Transaction) -> u64 { - let mut fee = 0_u64; - - match &tx { - Transaction::V1 { prefix, .. } => { - for input in &prefix.inputs { - match input { - Input::Gen(_) => return 0, - Input::ToKey { amount, .. } => { - fee = fee.checked_add(amount.unwrap_or(0)).unwrap(); - } - } - } - - for output in &prefix.outputs { - fee.checked_sub(output.amount.unwrap_or(0)).unwrap(); - } - } - Transaction::V2 { proofs, .. } => { - fee = proofs.as_ref().unwrap().base.fee; - } - }; - - fee -} - //---------------------------------------------------------------------------------------------------- Blocks /// Generate a `static LazyLock`. /// @@ -311,12 +281,12 @@ transaction_verification_data! { //---------------------------------------------------------------------------------------------------- TESTS #[cfg(test)] mod tests { - use super::*; - use pretty_assertions::assert_eq; use crate::rpc::client::HttpRpcClient; + use super::*; + /// Assert the defined blocks are the same compared to ones received from a local RPC call. #[ignore] // FIXME: doesn't work in CI, we need a real unrestricted node #[tokio::test] diff --git a/test-utils/src/rpc/client.rs b/test-utils/src/rpc/client.rs index 25240ae..ce7fb09 100644 --- a/test-utils/src/rpc/client.rs +++ b/test-utils/src/rpc/client.rs @@ -1,18 +1,16 @@ //! HTTP RPC client. //---------------------------------------------------------------------------------------------------- Use +use monero_rpc::Rpc; +use monero_serai::block::Block; +use monero_simple_request_rpc::SimpleRequestRpc; use serde::Deserialize; use serde_json::json; use tokio::task::spawn_blocking; -use monero_rpc::Rpc; -use monero_serai::block::Block; -use monero_simple_request_rpc::SimpleRequestRpc; - +use cuprate_helper::tx::tx_fee; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; -use crate::data::tx_fee; - //---------------------------------------------------------------------------------------------------- Constants /// The default URL used for Monero RPC connections. pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081"; @@ -184,9 +182,10 @@ impl HttpRpcClient { //---------------------------------------------------------------------------------------------------- TESTS #[cfg(test)] mod tests { - use super::*; use hex_literal::hex; + use super::*; + /// Assert the default address is localhost. #[tokio::test] async fn localhost() { diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index b502c3f..f2b96db 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -2,14 +2,16 @@ //! //! Tests that assert particular requests lead to particular //! responses are also tested in Cuprate's blockchain database crate. - //---------------------------------------------------------------------------------------------------- Import use std::{ collections::{HashMap, HashSet}, ops::Range, }; -use crate::types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}; +use crate::{ + types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}, + AltBlockInformation, ChainId, +}; //---------------------------------------------------------------------------------------------------- ReadRequest /// A read request to the blockchain database. @@ -92,26 +94,49 @@ pub enum BlockchainReadRequest { CompactChainHistory, /// A request to find the first unknown block ID in a list of block IDs. - //// + /// /// # Invariant /// The [`Vec`] containing the block IDs must be sorted in chronological block /// order, or else the returned response is unspecified and meaningless, /// as this request performs a binary search. FindFirstUnknown(Vec<[u8; 32]>), + + /// A request for all alt blocks in the chain with the given [`ChainId`]. + AltBlocksInChain(ChainId), } //---------------------------------------------------------------------------------------------------- WriteRequest /// A write request to the blockchain database. -/// -/// There is currently only 1 write request to the database, -/// as such, the only valid [`BlockchainResponse`] to this request is -/// the proper response for a [`BlockchainResponse::WriteBlockOk`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum BlockchainWriteRequest { /// Request that a block be written to the database. /// /// Input is an already verified block. WriteBlock(VerifiedBlockInformation), + + /// Write an alternative block to the database, + /// + /// Input is the alternative block. + WriteAltBlock(AltBlockInformation), + + /// A request to pop some blocks from the top of the main chain + /// + /// Input is the amount of blocks to pop. + /// + /// This request flushes all alt-chains from the cache before adding the popped blocks to the + /// alt cache. + PopBlocks(usize), + + /// A request to reverse the re-org process. + /// + /// The inner value is the [`ChainId`] of the old main chain. + /// + /// # Invariant + /// It is invalid to call this with a [`ChainId`] that was not returned from [`BlockchainWriteRequest::PopBlocks`]. + ReverseReorg(ChainId), + + /// A request to flush all alternative blocks. + FlushAltBlocks, } //---------------------------------------------------------------------------------------------------- Response @@ -197,12 +222,24 @@ pub enum BlockchainResponse { /// This will be [`None`] if all blocks were known. FindFirstUnknown(Option<(usize, usize)>), - //------------------------------------------------------ Writes - /// Response to [`BlockchainWriteRequest::WriteBlock`]. + /// The response for [`BlockchainReadRequest::AltBlocksInChain`]. /// - /// This response indicates that the requested block has - /// successfully been written to the database without error. - WriteBlockOk, + /// Contains all the alt blocks in the alt-chain in chronological order. + AltBlocksInChain(Vec), + + //------------------------------------------------------ Writes + /// A generic Ok response to indicate a request was successfully handled. + /// + /// currently the response for: + /// - [`BlockchainWriteRequest::WriteBlock`] + /// - [`BlockchainWriteRequest::WriteAltBlock`] + /// - [`BlockchainWriteRequest::ReverseReorg`] + /// - [`BlockchainWriteRequest::FlushAltBlocks`] + Ok, + /// The response for [`BlockchainWriteRequest::PopBlocks`]. + /// + /// The inner value is the alt-chain ID for the old main chain blocks. + PopBlocks(ChainId), } //---------------------------------------------------------------------------------------------------- Tests diff --git a/types/src/types.rs b/types/src/types.rs index 4b6e2e1..a60ce6c 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -1,6 +1,8 @@ //! Various shared data types in Cuprate. //---------------------------------------------------------------------------------------------------- Import +use std::num::NonZero; + use curve25519_dalek::edwards::EdwardsPoint; use monero_serai::{ block::Block, @@ -38,8 +40,7 @@ pub struct ExtendedBlockHeader { //---------------------------------------------------------------------------------------------------- VerifiedTransactionInformation /// Verified information of a transaction. /// -/// - If this is in a [`VerifiedBlockInformation`] this represents a valid transaction -/// - If this is in an [`AltBlockInformation`] this represents a potentially valid transaction +/// This represents a valid transaction #[derive(Clone, Debug, PartialEq, Eq)] pub struct VerifiedTransactionInformation { /// The transaction itself. @@ -79,6 +80,7 @@ pub struct VerifiedBlockInformation { /// [`Block::hash`]. pub block_hash: [u8; 32], /// The block's proof-of-work hash. + // TODO: make this an option. pub pow_hash: [u8; 32], /// The block's height. pub height: usize, @@ -97,7 +99,7 @@ pub struct VerifiedBlockInformation { /// /// The inner value is meaningless. #[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] -pub struct ChainId(pub u64); +pub struct ChainId(pub NonZero); //---------------------------------------------------------------------------------------------------- Chain /// An identifier for a chain. From e7c6bba63d7224decc0fd30f27fb50eb2072c4ea Mon Sep 17 00:00:00 2001 From: Asurar Date: Thu, 19 Sep 2024 21:05:41 +0200 Subject: [PATCH 063/104] Database: Split `BlockBlobs` table + Miscellaneous fixes (#290) * Split `BlockBlobs` database table + misc fixes - Split the `BlockBlobs` database table into two new tables: `BlockHeaderBlobs` and `BlockTxsHashes`. - `add_block`, `pop_block` and `get_block_extended_header` have been edited consequently. - `VerifiedBlockInformation` now have a `mining_tx_index: u64` field. - Made `cuprate-helper`'s `thread` feature a dependency of the `service` feature - Edited service test mapping of output. It is now a full iterator. * fix fmt * Update storage/blockchain/src/types.rs Co-authored-by: Boog900 * Update storage/blockchain/src/ops/block.rs Co-authored-by: Boog900 * fix warning --------- Co-authored-by: Boog900 --- storage/blockchain/Cargo.toml | 7 +-- storage/blockchain/src/ops/block.rs | 54 +++++++++++++------- storage/blockchain/src/ops/blockchain.rs | 3 +- storage/blockchain/src/ops/output.rs | 3 +- storage/blockchain/src/ops/tx.rs | 3 +- storage/blockchain/src/service/tests.rs | 65 ++++++++++++------------ storage/blockchain/src/tables.rs | 62 ++++++++++++---------- storage/blockchain/src/tests.rs | 13 ++--- storage/blockchain/src/types.rs | 11 +++- 9 files changed, 128 insertions(+), 93 deletions(-) diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 46b8414..6eecb89 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -15,15 +15,12 @@ default = ["heed", "service"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -service = ["dep:thread_local", "dep:rayon"] +service = ["dep:thread_local", "dep:rayon", "cuprate-helper/thread"] [dependencies] -# FIXME: -# We only need the `thread` feature if `service` is enabled. -# Figure out how to enable features of an already pulled in dependency conditionally. cuprate-database = { path = "../database" } cuprate-database-service = { path = "../service" } -cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } +cuprate-helper = { path = "../../helper", features = ["fs", "map"] } cuprate-types = { path = "../../types", features = ["blockchain"] } cuprate-pruning = { path = "../../pruning" } diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index af81348..6d32fd8 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -2,7 +2,10 @@ //---------------------------------------------------------------------------------------------------- Import use bytemuck::TransparentWrapper; -use monero_serai::block::{Block, BlockHeader}; +use monero_serai::{ + block::{Block, BlockHeader}, + transaction::Transaction, +}; use cuprate_database::{ RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, @@ -76,10 +79,10 @@ pub fn add_block( //------------------------------------------------------ Transaction / Outputs / Key Images // Add the miner transaction first. - { + let mining_tx_index = { let tx = &block.block.miner_transaction; - add_tx(tx, &tx.serialize(), &tx.hash(), &chain_height, tables)?; - } + add_tx(tx, &tx.serialize(), &tx.hash(), &chain_height, tables)? + }; for tx in &block.txs { add_tx(&tx.tx, &tx.tx_blob, &tx.tx_hash, &chain_height, tables)?; @@ -111,13 +114,21 @@ pub fn add_block( block_hash: block.block_hash, weight: block.weight, long_term_weight: block.long_term_weight, + mining_tx_index, }, )?; - // Block blobs. - tables - .block_blobs_mut() - .put(&block.height, StorableVec::wrap_ref(&block.block_blob))?; + // Block header blob. + tables.block_header_blobs_mut().put( + &block.height, + StorableVec::wrap_ref(&block.block.header.serialize()), + )?; + + // Block transaction hashes + tables.block_txs_hashes_mut().put( + &block.height, + StorableVec::wrap_ref(&block.block.transactions), + )?; // Block heights. tables @@ -151,10 +162,18 @@ pub fn pop_block( tables.block_heights_mut().delete(&block_info.block_hash)?; // Block blobs. - // We deserialize the block blob into a `Block`, such - // that we can remove the associated transactions later. - let block_blob = tables.block_blobs_mut().take(&block_height)?.0; - let block = Block::read(&mut block_blob.as_slice())?; + // + // We deserialize the block header blob and mining transaction blob + // to form a `Block`, such that we can remove the associated transactions + // later. + let block_header = tables.block_header_blobs_mut().take(&block_height)?.0; + let block_txs_hashes = tables.block_txs_hashes_mut().take(&block_height)?.0; + let miner_transaction = tables.tx_blobs().get(&block_info.mining_tx_index)?.0; + let block = Block { + header: BlockHeader::read(&mut block_header.as_slice())?, + miner_transaction: Transaction::read(&mut miner_transaction.as_slice())?, + transactions: block_txs_hashes, + }; //------------------------------------------------------ Transaction / Outputs / Key Images remove_tx(&block.miner_transaction.hash(), tables)?; @@ -181,7 +200,7 @@ pub fn pop_block( alt_block::add_alt_block( &AltBlockInformation { block: block.clone(), - block_blob, + block_blob: block.serialize(), txs, block_hash: block_info.block_hash, // We know the PoW is valid for this block so just set it so it will always verify as valid. @@ -236,8 +255,8 @@ pub fn get_block_extended_header_from_height( tables: &impl Tables, ) -> Result { let block_info = tables.block_infos().get(block_height)?; - let block_blob = tables.block_blobs().get(block_height)?.0; - let block_header = BlockHeader::read(&mut block_blob.as_slice())?; + let block_header_blob = tables.block_header_blobs().get(block_height)?.0; + let block_header = BlockHeader::read(&mut block_header_blob.as_slice())?; let cumulative_difficulty = combine_low_high_bits_to_u128( block_info.cumulative_difficulty_low, @@ -304,7 +323,7 @@ pub fn block_exists( //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] -#[expect(clippy::significant_drop_tightening, clippy::too_many_lines)] +#[expect(clippy::too_many_lines)] mod test { use pretty_assertions::assert_eq; @@ -370,7 +389,8 @@ mod test { // Assert only the proper tables were added to. AssertTableLen { block_infos: 3, - block_blobs: 3, + block_header_blobs: 3, + block_txs_hashes: 3, block_heights: 3, key_images: 69, num_outputs: 41, diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index acda96f..04f8b26 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -138,7 +138,8 @@ mod test { // Assert reads are correct. AssertTableLen { block_infos: 3, - block_blobs: 3, + block_header_blobs: 3, + block_txs_hashes: 3, block_heights: 3, key_images: 69, num_outputs: 41, diff --git a/storage/blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs index f3453e4..1c7c1d7 100644 --- a/storage/blockchain/src/ops/output.rs +++ b/storage/blockchain/src/ops/output.rs @@ -316,7 +316,8 @@ mod test { // Assert proper tables were added to. AssertTableLen { block_infos: 0, - block_blobs: 0, + block_header_blobs: 0, + block_txs_hashes: 0, block_heights: 0, key_images: 0, num_outputs: 1, diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index e7dbdcf..c9799a2 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -366,7 +366,8 @@ mod test { // Assert only the proper tables were added to. AssertTableLen { block_infos: 0, - block_blobs: 0, + block_header_blobs: 0, + block_txs_hashes: 0, block_heights: 0, key_images: 4, // added to key images pruned_tx_blobs: 0, diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index b3ccbbd..719f361 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -241,42 +241,38 @@ async fn test_template( //----------------------------------------------------------------------- Output checks // Create the map of amounts and amount indices. - // - // FIXME: There's definitely a better way to map - // `Vec` -> `HashMap>` let (map, output_count) = { - let mut ids = tables - .outputs_iter() - .keys() - .unwrap() - .map(Result::unwrap) - .collect::>(); - - ids.extend( - tables - .rct_outputs_iter() - .keys() - .unwrap() - .map(Result::unwrap) - .map(|amount_index| PreRctOutputId { - amount: 0, - amount_index, - }), - ); + let mut map = HashMap::>::new(); // Used later to compare the amount of Outputs // returned in the Response is equal to the amount // we asked for. - let output_count = ids.len(); + let mut output_count: usize = 0; - let mut map = HashMap::>::new(); - for id in ids { - map.entry(id.amount) - .and_modify(|set| { - set.insert(id.amount_index); - }) - .or_insert_with(|| HashSet::from([id.amount_index])); - } + tables + .outputs_iter() + .keys() + .unwrap() + .map(Result::unwrap) + .chain( + tables + .rct_outputs_iter() + .keys() + .unwrap() + .map(Result::unwrap) + .map(|amount_index| PreRctOutputId { + amount: 0, + amount_index, + }), + ) + .for_each(|id| { + output_count += 1; + map.entry(id.amount) + .and_modify(|set| { + set.insert(id.amount_index); + }) + .or_insert_with(|| HashSet::from([id.amount_index])); + }); (map, output_count) }; @@ -347,7 +343,8 @@ async fn v1_tx2() { 14_535_350_982_449, AssertTableLen { block_infos: 1, - block_blobs: 1, + block_header_blobs: 1, + block_txs_hashes: 1, block_heights: 1, key_images: 65, num_outputs: 41, @@ -373,7 +370,8 @@ async fn v9_tx3() { 3_403_774_022_163, AssertTableLen { block_infos: 1, - block_blobs: 1, + block_header_blobs: 1, + block_txs_hashes: 1, block_heights: 1, key_images: 4, num_outputs: 0, @@ -399,7 +397,8 @@ async fn v16_tx0() { 600_000_000_000, AssertTableLen { block_infos: 1, - block_blobs: 1, + block_header_blobs: 1, + block_txs_hashes: 1, block_heights: 1, key_images: 0, num_outputs: 0, diff --git a/storage/blockchain/src/tables.rs b/storage/blockchain/src/tables.rs index 75c33ae..b9fc5ed 100644 --- a/storage/blockchain/src/tables.rs +++ b/storage/blockchain/src/tables.rs @@ -9,7 +9,7 @@ //! Table structs are `CamelCase`, and their static string //! names used by the actual database backend are `snake_case`. //! -//! For example: [`BlockBlobs`] -> `block_blobs`. +//! For example: [`BlockHeaderBlobs`] -> `block_header_blobs`. //! //! # Traits //! This module also contains a set of traits for @@ -18,9 +18,9 @@ //---------------------------------------------------------------------------------------------------- Import use crate::types::{ AltBlockHeight, AltChainInfo, AltTransactionInfo, Amount, AmountIndex, AmountIndices, - BlockBlob, BlockHash, BlockHeight, BlockInfo, CompactAltBlockInfo, KeyImage, Output, - PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RawChainId, RctOutput, TxBlob, TxHash, - TxId, UnlockTime, + BlockBlob, BlockHash, BlockHeaderBlob, BlockHeight, BlockInfo, BlockTxHashes, + CompactAltBlockInfo, KeyImage, Output, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, + RawChainId, RctOutput, TxBlob, TxHash, TxId, UnlockTime, }; //---------------------------------------------------------------------------------------------------- Tables @@ -30,22 +30,28 @@ use crate::types::{ // - If adding/changing a table also edit: // - the tests in `src/backend/tests.rs` cuprate_database::define_tables! { - /// Serialized block blobs (bytes). + /// Serialized block header blobs (bytes). /// - /// Contains the serialized version of all blocks. - 0 => BlockBlobs, - BlockHeight => BlockBlob, + /// Contains the serialized version of all blocks headers. + 0 => BlockHeaderBlobs, + BlockHeight => BlockHeaderBlob, + + /// Block transactions hashes + /// + /// Contains all the transaction hashes of all blocks. + 1 => BlockTxsHashes, + BlockHeight => BlockTxHashes, /// Block heights. /// /// Contains the height of all blocks. - 1 => BlockHeights, + 2 => BlockHeights, BlockHash => BlockHeight, /// Block information. /// /// Contains metadata of all blocks. - 2 => BlockInfos, + 3 => BlockInfos, BlockHeight => BlockInfo, /// Set of key images. @@ -54,38 +60,38 @@ cuprate_database::define_tables! { /// /// This table has `()` as the value type, as in, /// it is a set of key images. - 3 => KeyImages, + 4 => KeyImages, KeyImage => (), /// Maps an output's amount to the number of outputs with that amount. /// /// For example, if there are 5 outputs with `amount = 123` /// then calling `get(123)` on this table will return 5. - 4 => NumOutputs, + 5 => NumOutputs, Amount => u64, /// Pre-RCT output data. - 5 => Outputs, + 6 => Outputs, PreRctOutputId => Output, /// Pruned transaction blobs (bytes). /// /// Contains the pruned portion of serialized transaction data. - 6 => PrunedTxBlobs, + 7 => PrunedTxBlobs, TxId => PrunedBlob, /// Prunable transaction blobs (bytes). /// /// Contains the prunable portion of serialized transaction data. // SOMEDAY: impl when `monero-serai` supports pruning - 7 => PrunableTxBlobs, + 8 => PrunableTxBlobs, TxId => PrunableBlob, /// Prunable transaction hashes. /// /// Contains the prunable portion of transaction hashes. // SOMEDAY: impl when `monero-serai` supports pruning - 8 => PrunableHashes, + 9 => PrunableHashes, TxId => PrunableHash, // SOMEDAY: impl a properties table: @@ -95,74 +101,74 @@ cuprate_database::define_tables! { // StorableString => StorableVec, /// RCT output data. - 9 => RctOutputs, + 10 => RctOutputs, AmountIndex => RctOutput, /// Transaction blobs (bytes). /// /// Contains the serialized version of all transactions. // SOMEDAY: remove when `monero-serai` supports pruning - 10 => TxBlobs, + 11 => TxBlobs, TxId => TxBlob, /// Transaction indices. /// /// Contains the indices all transactions. - 11 => TxIds, + 12 => TxIds, TxHash => TxId, /// Transaction heights. /// /// Contains the block height associated with all transactions. - 12 => TxHeights, + 13 => TxHeights, TxId => BlockHeight, /// Transaction outputs. /// /// Contains the list of `AmountIndex`'s of the /// outputs associated with all transactions. - 13 => TxOutputs, + 14 => TxOutputs, TxId => AmountIndices, /// Transaction unlock time. /// /// Contains the unlock time of transactions IF they have one. /// Transactions without unlock times will not exist in this table. - 14 => TxUnlockTime, + 15 => TxUnlockTime, TxId => UnlockTime, /// Information on alt-chains. - 15 => AltChainInfos, + 16 => AltChainInfos, RawChainId => AltChainInfo, /// Alt-block heights. /// /// Contains the height of all alt-blocks. - 16 => AltBlockHeights, + 17 => AltBlockHeights, BlockHash => AltBlockHeight, /// Alt-block information. /// /// Contains information on all alt-blocks. - 17 => AltBlocksInfo, + 18 => AltBlocksInfo, AltBlockHeight => CompactAltBlockInfo, /// Alt-block blobs. /// /// Contains the raw bytes of all alt-blocks. - 18 => AltBlockBlobs, + 19 => AltBlockBlobs, AltBlockHeight => BlockBlob, /// Alt-block transaction blobs. /// /// Contains the raw bytes of alt transactions, if those transactions are not in the main-chain. - 19 => AltTransactionBlobs, + 20 => AltTransactionBlobs, TxHash => TxBlob, /// Alt-block transaction information. /// /// Contains information on all alt transactions, even if they are in the main-chain. - 20 => AltTransactionInfos, + 21 => AltTransactionInfos, TxHash => AltTransactionInfo, } diff --git a/storage/blockchain/src/tests.rs b/storage/blockchain/src/tests.rs index d57a371..1fe2063 100644 --- a/storage/blockchain/src/tests.rs +++ b/storage/blockchain/src/tests.rs @@ -9,7 +9,7 @@ use std::{borrow::Cow, fmt::Debug}; use pretty_assertions::assert_eq; -use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner}; +use cuprate_database::{DatabaseRo, Env, EnvInner}; use cuprate_types::{AltBlockInformation, ChainId, VerifiedBlockInformation}; use crate::{ @@ -26,7 +26,8 @@ use crate::{ #[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub(crate) struct AssertTableLen { pub(crate) block_infos: u64, - pub(crate) block_blobs: u64, + pub(crate) block_header_blobs: u64, + pub(crate) block_txs_hashes: u64, pub(crate) block_heights: u64, pub(crate) key_images: u64, pub(crate) num_outputs: u64, @@ -46,7 +47,8 @@ impl AssertTableLen { pub(crate) fn assert(self, tables: &impl Tables) { let other = Self { block_infos: tables.block_infos().len().unwrap(), - block_blobs: tables.block_blobs().len().unwrap(), + block_header_blobs: tables.block_header_blobs().len().unwrap(), + block_txs_hashes: tables.block_txs_hashes().len().unwrap(), block_heights: tables.block_heights().len().unwrap(), key_images: tables.key_images().len().unwrap(), num_outputs: tables.num_outputs().len().unwrap(), @@ -69,8 +71,7 @@ impl AssertTableLen { /// Create an `Env` in a temporarily directory. /// The directory is automatically removed after the `TempDir` is dropped. /// -/// FIXME: changing this to `-> impl Env` causes lifetime errors... -pub(crate) fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) { +pub(crate) fn tmp_concrete_env() -> (impl Env, tempfile::TempDir) { let tempdir = tempfile::tempdir().unwrap(); let config = ConfigBuilder::new() .db_directory(Cow::Owned(tempdir.path().into())) @@ -82,7 +83,7 @@ pub(crate) fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) { } /// Assert all the tables in the environment are empty. -pub(crate) fn assert_all_tables_are_empty(env: &ConcreteEnv) { +pub(crate) fn assert_all_tables_are_empty(env: &impl Env) { let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro().unwrap(); let tables = env_inner.open_tables(&tx_ro).unwrap(); diff --git a/storage/blockchain/src/types.rs b/storage/blockchain/src/types.rs index 6afd3d4..86ef91c 100644 --- a/storage/blockchain/src/types.rs +++ b/storage/blockchain/src/types.rs @@ -66,6 +66,12 @@ pub type AmountIndices = StorableVec; /// A serialized block. pub type BlockBlob = StorableVec; +/// A serialized block header +pub type BlockHeaderBlob = StorableVec; + +/// A block transaction hashes +pub type BlockTxHashes = StorableVec<[u8; 32]>; + /// A block's hash. pub type BlockHash = [u8; 32]; @@ -166,6 +172,7 @@ impl Key for PreRctOutputId {} /// block_hash: [54; 32], /// cumulative_rct_outs: 2389, /// long_term_weight: 2389, +/// mining_tx_index: 23 /// }; /// let b = Storable::as_bytes(&a); /// let c: BlockInfo = Storable::from_bytes(b); @@ -175,7 +182,7 @@ impl Key for PreRctOutputId {} /// # Size & Alignment /// ```rust /// # use cuprate_blockchain::types::*; -/// assert_eq!(size_of::(), 88); +/// assert_eq!(size_of::(), 96); /// assert_eq!(align_of::(), 8); /// ``` #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -202,6 +209,8 @@ pub struct BlockInfo { /// /// See [`long_term_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#long-term-block-weight). pub long_term_weight: usize, + /// [`TxId`] (u64) of the block coinbase transaction. + pub mining_tx_index: TxId, } //---------------------------------------------------------------------------------------------------- OutputFlags From 19150df355142f7c4e0664881c94bafaf2f9b673 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 20 Sep 2024 09:36:34 -0400 Subject: [PATCH 064/104] p2p/dandelion-tower: enable workspace lints (#287) * dandelion-tower: add/fix workspace lints * fmt * fixes * todos * fixes * fixes * expect reason --- p2p/dandelion-tower/Cargo.toml | 5 ++++- p2p/dandelion-tower/src/config.rs | 4 ++-- p2p/dandelion-tower/src/lib.rs | 4 ++-- p2p/dandelion-tower/src/pool/incoming_tx.rs | 2 +- p2p/dandelion-tower/src/pool/manager.rs | 8 ++++---- p2p/dandelion-tower/src/router.rs | 10 +++++----- p2p/dandelion-tower/src/tests/mod.rs | 12 ++++++------ p2p/dandelion-tower/src/tests/pool.rs | 2 +- 8 files changed, 25 insertions(+), 22 deletions(-) diff --git a/p2p/dandelion-tower/Cargo.toml b/p2p/dandelion-tower/Cargo.toml index 976dad6..92e4915 100644 --- a/p2p/dandelion-tower/Cargo.toml +++ b/p2p/dandelion-tower/Cargo.toml @@ -24,4 +24,7 @@ thiserror = { workspace = true } [dev-dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync"] } -proptest = { workspace = true, features = ["default"] } \ No newline at end of file +proptest = { workspace = true, features = ["default"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/p2p/dandelion-tower/src/config.rs b/p2p/dandelion-tower/src/config.rs index 6266d60..46c780a 100644 --- a/p2p/dandelion-tower/src/config.rs +++ b/p2p/dandelion-tower/src/config.rs @@ -8,7 +8,7 @@ use std::{ /// (1 - ep) is the probability that a transaction travels for `k` hops before a nodes embargo timeout fires, this constant is (1 - ep). const EMBARGO_FULL_TRAVEL_PROBABILITY: f64 = 0.90; -/// The graph type to use for dandelion routing, the dandelion paper recommends [Graph::FourRegular]. +/// The graph type to use for dandelion routing, the dandelion paper recommends [`Graph::FourRegular`]. /// /// The decision between line graphs and 4-regular graphs depend on the priorities of the system, if /// linkability of transactions is a first order concern then line graphs may be better, however 4-regular graphs @@ -66,7 +66,7 @@ impl DandelionConfig { /// Returns the number of outbound peers to use to stem transactions. /// /// This value depends on the [`Graph`] chosen. - pub fn number_of_stems(&self) -> usize { + pub const fn number_of_stems(&self) -> usize { match self.graph { Graph::Line => 1, Graph::FourRegular => 2, diff --git a/p2p/dandelion-tower/src/lib.rs b/p2p/dandelion-tower/src/lib.rs index 60b5ea5..2c8de71 100644 --- a/p2p/dandelion-tower/src/lib.rs +++ b/p2p/dandelion-tower/src/lib.rs @@ -26,7 +26,7 @@ //! The diffuse service should have a request of [`DiffuseRequest`](traits::DiffuseRequest) and it's error //! should be [`tower::BoxError`]. //! -//! ## Outbound Peer TryStream +//! ## Outbound Peer `TryStream` //! //! The outbound peer [`TryStream`](futures::TryStream) should provide a stream of randomly selected outbound //! peers, these peers will then be used to route stem txs to. @@ -37,7 +37,7 @@ //! ## Peer Service //! //! This service represents a connection to an individual peer, this should be returned from the Outbound Peer -//! TryStream. This should immediately send the transaction to the peer when requested, it should _not_ set +//! `TryStream`. This should immediately send the transaction to the peer when requested, it should _not_ set //! a timer. //! //! The peer service should have a request of [`StemRequest`](traits::StemRequest) and its error diff --git a/p2p/dandelion-tower/src/pool/incoming_tx.rs b/p2p/dandelion-tower/src/pool/incoming_tx.rs index c9a30de..13cdffe 100644 --- a/p2p/dandelion-tower/src/pool/incoming_tx.rs +++ b/p2p/dandelion-tower/src/pool/incoming_tx.rs @@ -30,7 +30,7 @@ pub struct IncomingTxBuilder impl IncomingTxBuilder { /// Creates a new [`IncomingTxBuilder`]. - pub fn new(tx: Tx, tx_id: TxId) -> Self { + pub const fn new(tx: Tx, tx_id: TxId) -> Self { Self { tx, tx_id, diff --git a/p2p/dandelion-tower/src/pool/manager.rs b/p2p/dandelion-tower/src/pool/manager.rs index 9e1572e..2ac3302 100644 --- a/p2p/dandelion-tower/src/pool/manager.rs +++ b/p2p/dandelion-tower/src/pool/manager.rs @@ -88,9 +88,7 @@ where .insert(peer.clone()); } - let state = from - .map(|from| TxState::Stem { from }) - .unwrap_or(TxState::Local); + let state = from.map_or(TxState::Local, |from| TxState::Stem { from }); let fut = self .dandelion_router @@ -280,13 +278,15 @@ where }; if let Err(e) = self.handle_incoming_tx(tx, routing_state, tx_id).await { + #[expect(clippy::let_underscore_must_use, reason = "dropped receivers can be ignored")] let _ = res_tx.send(()); tracing::error!("Error handling transaction in dandelion pool: {e}"); return; } - let _ = res_tx.send(()); + #[expect(clippy::let_underscore_must_use)] + let _ = res_tx.send(()); } } } diff --git a/p2p/dandelion-tower/src/router.rs b/p2p/dandelion-tower/src/router.rs index edeccae..88702be 100644 --- a/p2p/dandelion-tower/src/router.rs +++ b/p2p/dandelion-tower/src/router.rs @@ -140,7 +140,7 @@ where State::Stem }; - DandelionRouter { + Self { outbound_peer_discover: Box::pin(outbound_peer_discover), broadcast_svc, current_state, @@ -198,7 +198,7 @@ where fn stem_tx( &mut self, tx: Tx, - from: Id, + from: &Id, ) -> BoxFuture<'static, Result> { if self.stem_peers.is_empty() { tracing::debug!("Stem peers are empty, fluffing stem transaction."); @@ -216,7 +216,7 @@ where }); let Some(peer) = self.stem_peers.get_mut(stem_route) else { - self.stem_routes.remove(&from); + self.stem_routes.remove(from); continue; }; @@ -302,7 +302,7 @@ where tracing::debug!( parent: span, "Peer returned an error on `poll_ready`: {e}, removing from router.", - ) + ); }) .is_ok(), Poll::Pending => { @@ -341,7 +341,7 @@ where State::Stem => { tracing::trace!(parent: &self.span, "Steming transaction"); - self.stem_tx(req.tx, from) + self.stem_tx(req.tx, &from) } }, TxState::Local => { diff --git a/p2p/dandelion-tower/src/tests/mod.rs b/p2p/dandelion-tower/src/tests/mod.rs index 1c6a3e0..601ee25 100644 --- a/p2p/dandelion-tower/src/tests/mod.rs +++ b/p2p/dandelion-tower/src/tests/mod.rs @@ -12,7 +12,7 @@ use crate::{ OutboundPeer, State, }; -pub fn mock_discover_svc() -> ( +pub(crate) fn mock_discover_svc() -> ( impl Stream< Item = Result< OutboundPeer< @@ -49,7 +49,7 @@ pub fn mock_discover_svc() -> ( (discover, rx) } -pub fn mock_broadcast_svc() -> ( +pub(crate) fn mock_broadcast_svc() -> ( impl Service< Req, Future = impl Future> + Send + 'static, @@ -70,8 +70,8 @@ pub fn mock_broadcast_svc() -> ( ) } -#[allow(clippy::type_complexity)] // just test code. -pub fn mock_in_memory_backing_pool< +#[expect(clippy::type_complexity, reason = "just test code.")] +pub(crate) fn mock_in_memory_backing_pool< Tx: Clone + Send + 'static, TxID: Clone + Hash + Eq + Send + 'static, >() -> ( @@ -85,11 +85,11 @@ pub fn mock_in_memory_backing_pool< Arc>>, ) { let txs = Arc::new(std::sync::Mutex::new(HashMap::new())); - let txs_2 = txs.clone(); + let txs_2 = Arc::clone(&txs); ( service_fn(move |req: TxStoreRequest| { - let txs = txs.clone(); + let txs = Arc::clone(&txs); async move { match req { TxStoreRequest::Get(tx_id) => { diff --git a/p2p/dandelion-tower/src/tests/pool.rs b/p2p/dandelion-tower/src/tests/pool.rs index b7fa55e..70f642a 100644 --- a/p2p/dandelion-tower/src/tests/pool.rs +++ b/p2p/dandelion-tower/src/tests/pool.rs @@ -39,5 +39,5 @@ async fn basic_functionality() { // TODO: the DandelionPoolManager doesn't handle adding txs to the pool, add more tests here to test // all functionality. //assert!(pool.lock().unwrap().contains_key(&1)); - assert!(broadcast_rx.try_recv().is_ok()) + assert!(broadcast_rx.try_recv().is_ok()); } From 5588671501a34e8e28fa097102976fa95045477c Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 20 Sep 2024 10:11:27 -0400 Subject: [PATCH 065/104] levin: enable workspace lints (#292) * levin: enable workspace lints * use `drop()` * dep fixes --- Cargo.lock | 1 + Cargo.toml | 1 - net/levin/Cargo.toml | 6 +++- net/levin/src/codec.rs | 14 ++++---- net/levin/src/header.rs | 10 +++--- net/levin/src/lib.rs | 48 ++++++++++++++++----------- net/levin/src/message.rs | 17 +++++----- net/levin/tests/fragmented_message.rs | 14 +++++--- 8 files changed, 66 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 61fa2a2..054ca85 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -724,6 +724,7 @@ version = "0.1.0" dependencies = [ "bitflags 2.6.0", "bytes", + "cfg-if", "cuprate-helper", "futures", "proptest", diff --git a/Cargo.toml b/Cargo.toml index f991f73..2554fbd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -210,7 +210,6 @@ unseparated_literal_suffix = "deny" unnecessary_safety_doc = "deny" unnecessary_safety_comment = "deny" unnecessary_self_imports = "deny" -tests_outside_test_module = "deny" string_to_string = "deny" rest_pat_in_fully_bound_structs = "deny" redundant_type_annotations = "deny" diff --git a/net/levin/Cargo.toml b/net/levin/Cargo.toml index 1c585b9..68c32e5 100644 --- a/net/levin/Cargo.toml +++ b/net/levin/Cargo.toml @@ -14,6 +14,7 @@ tracing = ["dep:tracing", "tokio-util/tracing"] [dependencies] cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } +cfg-if = { workspace = true } thiserror = { workspace = true } bytes = { workspace = true, features = ["std"] } bitflags = { workspace = true } @@ -26,4 +27,7 @@ proptest = { workspace = true } rand = { workspace = true, features = ["std", "std_rng"] } tokio-util = { workspace = true, features = ["io-util"]} tokio = { workspace = true, features = ["full"] } -futures = { workspace = true, features = ["std"] } \ No newline at end of file +futures = { workspace = true, features = ["std"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/net/levin/src/codec.rs b/net/levin/src/codec.rs index 1177733..4c7695e 100644 --- a/net/levin/src/codec.rs +++ b/net/levin/src/codec.rs @@ -47,7 +47,7 @@ pub struct LevinBucketCodec { impl Default for LevinBucketCodec { fn default() -> Self { - LevinBucketCodec { + Self { state: LevinBucketState::WaitingForHeader, protocol: Protocol::default(), handshake_message_seen: false, @@ -56,8 +56,8 @@ impl Default for LevinBucketCodec { } impl LevinBucketCodec { - pub fn new(protocol: Protocol) -> Self { - LevinBucketCodec { + pub const fn new(protocol: Protocol) -> Self { + Self { state: LevinBucketState::WaitingForHeader, protocol, handshake_message_seen: false, @@ -112,8 +112,10 @@ impl Decoder for LevinBucketCodec { } } - let _ = - std::mem::replace(&mut self.state, LevinBucketState::WaitingForBody(head)); + drop(std::mem::replace( + &mut self.state, + LevinBucketState::WaitingForBody(head), + )); } LevinBucketState::WaitingForBody(head) => { let body_len = u64_to_usize(head.size); @@ -145,7 +147,7 @@ impl Encoder> for LevinBucketCodec { type Error = BucketError; fn encode(&mut self, item: Bucket, dst: &mut BytesMut) -> Result<(), Self::Error> { if let Some(additional) = (HEADER_SIZE + item.body.len()).checked_sub(dst.capacity()) { - dst.reserve(additional) + dst.reserve(additional); } item.header.write_bytes_into(dst); diff --git a/net/levin/src/header.rs b/net/levin/src/header.rs index 7acd085..057eee8 100644 --- a/net/levin/src/header.rs +++ b/net/levin/src/header.rs @@ -13,7 +13,7 @@ // copies or substantial portions of the Software. // -//! This module provides a struct BucketHead for the header of a levin protocol +//! This module provides a struct `BucketHead` for the header of a levin protocol //! message. use bitflags::bitflags; @@ -62,7 +62,7 @@ bitflags! { impl From for Flags { fn from(value: u32) -> Self { - Flags(value) + Self(value) } } @@ -99,9 +99,9 @@ impl BucketHead { /// /// # Panics /// This function will panic if there aren't enough bytes to fill the header. - /// Currently [HEADER_SIZE] - pub fn from_bytes(buf: &mut BytesMut) -> BucketHead { - BucketHead { + /// Currently [`HEADER_SIZE`] + pub fn from_bytes(buf: &mut BytesMut) -> Self { + Self { signature: buf.get_u64_le(), size: buf.get_u64_le(), have_to_return_data: buf.get_u8() != 0, diff --git a/net/levin/src/lib.rs b/net/levin/src/lib.rs index ab03bfb..a3f4b69 100644 --- a/net/levin/src/lib.rs +++ b/net/levin/src/lib.rs @@ -33,6 +33,16 @@ #![deny(unused_mut)] //#![deny(missing_docs)] +cfg_if::cfg_if! { + // Used in `tests/`. + if #[cfg(test)] { + use futures as _; + use proptest as _; + use rand as _; + use tokio as _; + } +} + use std::fmt::Debug; use bytes::{Buf, Bytes}; @@ -99,7 +109,7 @@ pub struct Protocol { impl Default for Protocol { fn default() -> Self { - Protocol { + Self { version: MONERO_PROTOCOL_VERSION, signature: MONERO_LEVIN_SIGNATURE, max_packet_size_before_handshake: MONERO_MAX_PACKET_SIZE_BEFORE_HANDSHAKE, @@ -130,22 +140,22 @@ pub enum MessageType { impl MessageType { /// Returns if the message requires a response - pub fn have_to_return_data(&self) -> bool { + pub const fn have_to_return_data(&self) -> bool { match self { - MessageType::Request => true, - MessageType::Response | MessageType::Notification => false, + Self::Request => true, + Self::Response | Self::Notification => false, } } - /// Returns the `MessageType` given the flags and have_to_return_data fields - pub fn from_flags_and_have_to_return( + /// Returns the `MessageType` given the flags and `have_to_return_data` fields + pub const fn from_flags_and_have_to_return( flags: Flags, have_to_return: bool, ) -> Result { Ok(match (flags, have_to_return) { - (Flags::REQUEST, true) => MessageType::Request, - (Flags::REQUEST, false) => MessageType::Notification, - (Flags::RESPONSE, false) => MessageType::Response, + (Flags::REQUEST, true) => Self::Request, + (Flags::REQUEST, false) => Self::Notification, + (Flags::RESPONSE, false) => Self::Response, _ => { return Err(BucketError::InvalidHeaderFlags( "Unable to assign a message type to this bucket", @@ -154,10 +164,10 @@ impl MessageType { }) } - pub fn as_flags(&self) -> header::Flags { + pub const fn as_flags(&self) -> Flags { match self { - MessageType::Request | MessageType::Notification => header::Flags::REQUEST, - MessageType::Response => header::Flags::RESPONSE, + Self::Request | Self::Notification => Flags::REQUEST, + Self::Response => Flags::RESPONSE, } } } @@ -173,7 +183,7 @@ pub struct BucketBuilder { } impl BucketBuilder { - pub fn new(protocol: &Protocol) -> Self { + pub const fn new(protocol: &Protocol) -> Self { Self { signature: Some(protocol.signature), ty: None, @@ -185,27 +195,27 @@ impl BucketBuilder { } pub fn set_signature(&mut self, sig: u64) { - self.signature = Some(sig) + self.signature = Some(sig); } pub fn set_message_type(&mut self, ty: MessageType) { - self.ty = Some(ty) + self.ty = Some(ty); } pub fn set_command(&mut self, command: C) { - self.command = Some(command) + self.command = Some(command); } pub fn set_return_code(&mut self, code: i32) { - self.return_code = Some(code) + self.return_code = Some(code); } pub fn set_protocol_version(&mut self, version: u32) { - self.protocol_version = Some(version) + self.protocol_version = Some(version); } pub fn set_body(&mut self, body: Bytes) { - self.body = Some(body) + self.body = Some(body); } pub fn finish(self) -> Bucket { diff --git a/net/levin/src/message.rs b/net/levin/src/message.rs index 19aa1b5..32be653 100644 --- a/net/levin/src/message.rs +++ b/net/levin/src/message.rs @@ -33,13 +33,13 @@ pub enum LevinMessage { impl From for LevinMessage { fn from(value: T) -> Self { - LevinMessage::Body(value) + Self::Body(value) } } impl From> for LevinMessage { fn from(value: Bucket) -> Self { - LevinMessage::Bucket(value) + Self::Bucket(value) } } @@ -58,7 +58,7 @@ pub struct Dummy(pub usize); impl From for LevinMessage { fn from(value: Dummy) -> Self { - LevinMessage::Dummy(value.0) + Self::Dummy(value.0) } } @@ -76,12 +76,11 @@ pub fn make_fragmented_messages( fragment_size: usize, message: T, ) -> Result>, BucketError> { - if fragment_size * 2 < HEADER_SIZE { - panic!( - "Fragment size: {fragment_size}, is too small, must be at least {}", - 2 * HEADER_SIZE - ); - } + assert!( + fragment_size * 2 >= HEADER_SIZE, + "Fragment size: {fragment_size}, is too small, must be at least {}", + 2 * HEADER_SIZE + ); let mut builder = BucketBuilder::new(protocol); message.encode(&mut builder)?; diff --git a/net/levin/tests/fragmented_message.rs b/net/levin/tests/fragmented_message.rs index 512fd46..f34b145 100644 --- a/net/levin/tests/fragmented_message.rs +++ b/net/levin/tests/fragmented_message.rs @@ -1,3 +1,9 @@ +#![expect( + clippy::tests_outside_test_module, + unused_crate_dependencies, + reason = "outer test module" +)] + use bytes::{Buf, BufMut, Bytes, BytesMut}; use futures::{SinkExt, StreamExt}; use proptest::{prelude::any_with, prop_assert_eq, proptest, sample::size_range}; @@ -58,12 +64,12 @@ impl LevinBody for TestBody { ) -> Result { let size = u64_to_usize(body.get_u64_le()); // bucket - Ok(TestBody::Bytes(size, body.copy_to_bytes(size))) + Ok(Self::Bytes(size, body.copy_to_bytes(size))) } fn encode(self, builder: &mut BucketBuilder) -> Result<(), BucketError> { match self { - TestBody::Bytes(len, bytes) => { + Self::Bytes(len, bytes) => { let mut buf = BytesMut::new(); buf.put_u64_le(len as u64); buf.extend_from_slice(bytes.as_ref()); @@ -141,12 +147,12 @@ proptest! { message2.extend_from_slice(&fragments[0].body[(33 + 8)..]); for frag in fragments.iter().skip(1) { - message2.extend_from_slice(frag.body.as_ref()) + message2.extend_from_slice(frag.body.as_ref()); } prop_assert_eq!(message.as_slice(), &message2[0..message.len()], "numb_fragments: {}", fragments.len()); - for byte in message2[message.len()..].iter(){ + for byte in &message2[message.len()..]{ prop_assert_eq!(*byte, 0); } } From 57af45e01d0f42c463fc33c1dc9464c4389d9769 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 20 Sep 2024 10:13:55 -0400 Subject: [PATCH 066/104] epee-encoding: enable workspace lints (#294) * epee-encoding: enable workspace lints * fmt * fixes * fixes * fmt --- net/epee-encoding/Cargo.toml | 3 +++ net/epee-encoding/src/container_as_blob.rs | 10 ++++---- net/epee-encoding/src/error.rs | 18 +++++++------- net/epee-encoding/src/io.rs | 26 +++++++++++--------- net/epee-encoding/src/lib.rs | 16 ++++++------- net/epee-encoding/src/marker.rs | 26 ++++++++++---------- net/epee-encoding/src/value.rs | 28 ++++++++++------------ net/epee-encoding/src/varint.rs | 11 +++++---- net/epee-encoding/tests/alt_name.rs | 2 ++ net/epee-encoding/tests/duplicate_key.rs | 2 ++ net/epee-encoding/tests/epee_default.rs | 6 +++-- net/epee-encoding/tests/flattened.rs | 3 +++ net/epee-encoding/tests/options.rs | 5 ++-- net/epee-encoding/tests/p2p.rs | 4 +++- net/epee-encoding/tests/rpc.rs | 2 ++ net/epee-encoding/tests/seq.rs | 2 ++ net/epee-encoding/tests/stack_overflow.rs | 4 +++- 17 files changed, 96 insertions(+), 72 deletions(-) diff --git a/net/epee-encoding/Cargo.toml b/net/epee-encoding/Cargo.toml index 85ee2c9..90a339f 100644 --- a/net/epee-encoding/Cargo.toml +++ b/net/epee-encoding/Cargo.toml @@ -25,3 +25,6 @@ thiserror = { workspace = true, optional = true} [dev-dependencies] hex = { workspace = true, features = ["default"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/net/epee-encoding/src/container_as_blob.rs b/net/epee-encoding/src/container_as_blob.rs index 701ec66..83078c2 100644 --- a/net/epee-encoding/src/container_as_blob.rs +++ b/net/epee-encoding/src/container_as_blob.rs @@ -9,7 +9,7 @@ pub struct ContainerAsBlob(Vec); impl From> for ContainerAsBlob { fn from(value: Vec) -> Self { - ContainerAsBlob(value) + Self(value) } } @@ -36,9 +36,7 @@ impl EpeeValue for ContainerAsBlob { )); } - Ok(ContainerAsBlob( - bytes.chunks(T::SIZE).map(T::from_bytes).collect(), - )) + Ok(Self(bytes.chunks(T::SIZE).map(T::from_bytes).collect())) } fn should_write(&self) -> bool { @@ -46,10 +44,10 @@ impl EpeeValue for ContainerAsBlob { } fn epee_default_value() -> Option { - Some(ContainerAsBlob(vec![])) + Some(Self(vec![])) } - fn write(self, w: &mut B) -> crate::Result<()> { + fn write(self, w: &mut B) -> Result<()> { let mut buf = BytesMut::with_capacity(self.0.len() * T::SIZE); self.0.iter().for_each(|tt| tt.push_bytes(&mut buf)); buf.write(w) diff --git a/net/epee-encoding/src/error.rs b/net/epee-encoding/src/error.rs index 4b3c7b0..756cd13 100644 --- a/net/epee-encoding/src/error.rs +++ b/net/epee-encoding/src/error.rs @@ -7,6 +7,7 @@ use core::{ pub type Result = core::result::Result; #[cfg_attr(feature = "std", derive(thiserror::Error))] +#[expect(clippy::error_impl_error, reason = "FIXME: rename this type")] pub enum Error { #[cfg_attr(feature = "std", error("IO error: {0}"))] IO(&'static str), @@ -17,19 +18,18 @@ pub enum Error { } impl Error { - fn field_name(&self) -> &'static str { + const fn field_name(&self) -> &'static str { match self { - Error::IO(_) => "io", - Error::Format(_) => "format", - Error::Value(_) => "value", + Self::IO(_) => "io", + Self::Format(_) => "format", + Self::Value(_) => "value", } } fn field_data(&self) -> &str { match self { - Error::IO(data) => data, - Error::Format(data) => data, - Error::Value(data) => data, + Self::IO(data) | Self::Format(data) => data, + Self::Value(data) => data, } } } @@ -44,12 +44,12 @@ impl Debug for Error { impl From for Error { fn from(_: TryFromIntError) -> Self { - Error::Value("Int is too large".to_string()) + Self::Value("Int is too large".to_string()) } } impl From for Error { fn from(_: Utf8Error) -> Self { - Error::Value("Invalid utf8 str".to_string()) + Self::Value("Invalid utf8 str".to_string()) } } diff --git a/net/epee-encoding/src/io.rs b/net/epee-encoding/src/io.rs index 110a1ec..c118145 100644 --- a/net/epee-encoding/src/io.rs +++ b/net/epee-encoding/src/io.rs @@ -3,7 +3,7 @@ use bytes::{Buf, BufMut}; use crate::error::*; #[inline] -pub fn checked_read_primitive( +pub(crate) fn checked_read_primitive( b: &mut B, read: impl Fn(&mut B) -> R, ) -> Result { @@ -11,16 +11,20 @@ pub fn checked_read_primitive( } #[inline] -pub fn checked_read(b: &mut B, read: impl Fn(&mut B) -> R, size: usize) -> Result { +pub(crate) fn checked_read( + b: &mut B, + read: impl Fn(&mut B) -> R, + size: usize, +) -> Result { if b.remaining() < size { - Err(Error::IO("Not enough bytes in buffer to build object."))?; + Err(Error::IO("Not enough bytes in buffer to build object.")) + } else { + Ok(read(b)) } - - Ok(read(b)) } #[inline] -pub fn checked_write_primitive( +pub(crate) fn checked_write_primitive( b: &mut B, write: impl Fn(&mut B, T), t: T, @@ -29,16 +33,16 @@ pub fn checked_write_primitive( } #[inline] -pub fn checked_write( +pub(crate) fn checked_write( b: &mut B, write: impl Fn(&mut B, T), t: T, size: usize, ) -> Result<()> { if b.remaining_mut() < size { - Err(Error::IO("Not enough capacity to write object."))?; + Err(Error::IO("Not enough capacity to write object.")) + } else { + write(b, t); + Ok(()) } - - write(b, t); - Ok(()) } diff --git a/net/epee-encoding/src/lib.rs b/net/epee-encoding/src/lib.rs index fa3449b..d55a546 100644 --- a/net/epee-encoding/src/lib.rs +++ b/net/epee-encoding/src/lib.rs @@ -59,9 +59,12 @@ //! //! ``` +#[cfg(test)] +use hex as _; + extern crate alloc; -use core::{ops::Deref, str::from_utf8 as str_from_utf8}; +use core::str::from_utf8 as str_from_utf8; use bytes::{Buf, BufMut, Bytes, BytesMut}; @@ -130,7 +133,7 @@ pub fn to_bytes(val: T) -> Result { fn read_header(r: &mut B) -> Result<()> { let buf = checked_read(r, |b: &mut B| b.copy_to_bytes(HEADER.len()), HEADER.len())?; - if buf.deref() != HEADER { + if &*buf != HEADER { return Err(Error::Format("Data does not contain header")); } Ok(()) @@ -185,7 +188,7 @@ fn read_object(r: &mut B, skipped_objects: &mut u8) -> Re for _ in 0..number_o_field { let field_name_bytes = read_field_name_bytes(r)?; - let field_name = str_from_utf8(field_name_bytes.deref())?; + let field_name = str_from_utf8(&field_name_bytes)?; if !object_builder.add_field(field_name, r)? { skip_epee_value(r, skipped_objects)?; @@ -289,7 +292,7 @@ where B: BufMut, { write_varint(usize_to_u64(iterator.len()), w)?; - for item in iterator.into_iter() { + for item in iterator { item.write(w)?; } Ok(()) @@ -329,10 +332,7 @@ impl EpeeObject for SkipObject { fn skip_epee_value(r: &mut B, skipped_objects: &mut u8) -> Result<()> { let marker = read_marker(r)?; - let mut len = 1; - if marker.is_seq { - len = read_varint(r)?; - } + let len = if marker.is_seq { read_varint(r)? } else { 1 }; if let Some(size) = marker.inner_marker.size() { let bytes_to_skip = size diff --git a/net/epee-encoding/src/marker.rs b/net/epee-encoding/src/marker.rs index d8ffc4b..16eaa6a 100644 --- a/net/epee-encoding/src/marker.rs +++ b/net/epee-encoding/src/marker.rs @@ -19,13 +19,13 @@ pub enum InnerMarker { } impl InnerMarker { - pub fn size(&self) -> Option { + pub const fn size(&self) -> Option { Some(match self { - InnerMarker::I64 | InnerMarker::U64 | InnerMarker::F64 => 8, - InnerMarker::I32 | InnerMarker::U32 => 4, - InnerMarker::I16 | InnerMarker::U16 => 2, - InnerMarker::I8 | InnerMarker::U8 | InnerMarker::Bool => 1, - InnerMarker::String | InnerMarker::Object => return None, + Self::I64 | Self::U64 | Self::F64 => 8, + Self::I32 | Self::U32 => 4, + Self::I16 | Self::U16 => 2, + Self::I8 | Self::U8 | Self::Bool => 1, + Self::String | Self::Object => return None, }) } } @@ -40,23 +40,23 @@ pub struct Marker { impl Marker { pub(crate) const fn new(inner_marker: InnerMarker) -> Self { - Marker { + Self { inner_marker, is_seq: false, } } + + #[must_use] pub const fn into_seq(self) -> Self { - if self.is_seq { - panic!("Sequence of sequence not allowed!"); - } + assert!(!self.is_seq, "Sequence of sequence not allowed!"); if matches!(self.inner_marker, InnerMarker::U8) { - return Marker { + return Self { inner_marker: InnerMarker::String, is_seq: false, }; } - Marker { + Self { inner_marker: self.inner_marker, is_seq: true, } @@ -112,7 +112,7 @@ impl TryFrom for Marker { _ => return Err(Error::Format("Unknown value Marker")), }; - Ok(Marker { + Ok(Self { inner_marker, is_seq, }) diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs index 000d89c..816203e 100644 --- a/net/epee-encoding/src/value.rs +++ b/net/epee-encoding/src/value.rs @@ -71,7 +71,7 @@ impl EpeeValue for Vec { let individual_marker = Marker::new(marker.inner_marker); - let mut res = Vec::with_capacity(len); + let mut res = Self::with_capacity(len); for _ in 0..len { res.push(T::read(r, &individual_marker)?); } @@ -83,7 +83,7 @@ impl EpeeValue for Vec { } fn epee_default_value() -> Option { - Some(Vec::new()) + Some(Self::new()) } fn write(self, w: &mut B) -> Result<()> { @@ -181,7 +181,7 @@ impl EpeeValue for Vec { } fn epee_default_value() -> Option { - Some(Vec::new()) + Some(Self::new()) } fn should_write(&self) -> bool { @@ -216,7 +216,7 @@ impl EpeeValue for Bytes { } fn epee_default_value() -> Option { - Some(Bytes::new()) + Some(Self::new()) } fn should_write(&self) -> bool { @@ -247,14 +247,14 @@ impl EpeeValue for BytesMut { return Err(Error::IO("Not enough bytes to fill object")); } - let mut bytes = BytesMut::zeroed(len); + let mut bytes = Self::zeroed(len); r.copy_to_slice(&mut bytes); Ok(bytes) } fn epee_default_value() -> Option { - Some(BytesMut::new()) + Some(Self::new()) } fn should_write(&self) -> bool { @@ -285,12 +285,11 @@ impl EpeeValue for ByteArrayVec { return Err(Error::IO("Not enough bytes to fill object")); } - ByteArrayVec::try_from(r.copy_to_bytes(len)) - .map_err(|_| Error::Format("Field has invalid length")) + Self::try_from(r.copy_to_bytes(len)).map_err(|_| Error::Format("Field has invalid length")) } fn epee_default_value() -> Option { - Some(ByteArrayVec::try_from(Bytes::new()).unwrap()) + Some(Self::try_from(Bytes::new()).unwrap()) } fn should_write(&self) -> bool { @@ -320,8 +319,7 @@ impl EpeeValue for ByteArray { return Err(Error::IO("Not enough bytes to fill object")); } - ByteArray::try_from(r.copy_to_bytes(N)) - .map_err(|_| Error::Format("Field has invalid length")) + Self::try_from(r.copy_to_bytes(N)).map_err(|_| Error::Format("Field has invalid length")) } fn write(self, w: &mut B) -> Result<()> { @@ -335,7 +333,7 @@ impl EpeeValue for String { fn read(r: &mut B, marker: &Marker) -> Result { let bytes = Vec::::read(r, marker)?; - String::from_utf8(bytes).map_err(|_| Error::Format("Invalid string")) + Self::from_utf8(bytes).map_err(|_| Error::Format("Invalid string")) } fn should_write(&self) -> bool { @@ -343,7 +341,7 @@ impl EpeeValue for String { } fn epee_default_value() -> Option { - Some(String::new()) + Some(Self::new()) } fn write(self, w: &mut B) -> Result<()> { @@ -383,7 +381,7 @@ impl EpeeValue for Vec<[u8; N]> { let individual_marker = Marker::new(marker.inner_marker); - let mut res = Vec::with_capacity(len); + let mut res = Self::with_capacity(len); for _ in 0..len { res.push(<[u8; N]>::read(r, &individual_marker)?); } @@ -395,7 +393,7 @@ impl EpeeValue for Vec<[u8; N]> { } fn epee_default_value() -> Option { - Some(Vec::new()) + Some(Self::new()) } fn write(self, w: &mut B) -> Result<()> { diff --git a/net/epee-encoding/src/varint.rs b/net/epee-encoding/src/varint.rs index ae9c569..3f191dc 100644 --- a/net/epee-encoding/src/varint.rs +++ b/net/epee-encoding/src/varint.rs @@ -21,14 +21,14 @@ const FITS_IN_FOUR_BYTES: u64 = 2_u64.pow(32 - SIZE_OF_SIZE_MARKER) - 1; /// ``` pub fn read_varint(r: &mut B) -> Result { if !r.has_remaining() { - Err(Error::IO("Not enough bytes to build VarInt"))? + return Err(Error::IO("Not enough bytes to build VarInt")); } let vi_start = r.get_u8(); let len = 1 << (vi_start & 0b11); if r.remaining() < len - 1 { - Err(Error::IO("Not enough bytes to build VarInt"))? + return Err(Error::IO("Not enough bytes to build VarInt")); } let mut vi = u64::from(vi_start >> 2); @@ -67,12 +67,15 @@ pub fn write_varint(number: u64, w: &mut B) -> Result<()> { }; if w.remaining_mut() < 1 << size_marker { - Err(Error::IO("Not enough capacity to write VarInt"))?; + return Err(Error::IO("Not enough capacity to write VarInt")); } let number = (number << 2) | size_marker; - // Although `as` is unsafe we just checked the length. + #[expect( + clippy::cast_possible_truncation, + reason = "Although `as` is unsafe we just checked the length." + )] match size_marker { 0 => w.put_u8(number as u8), 1 => w.put_u16_le(number as u16), diff --git a/net/epee-encoding/tests/alt_name.rs b/net/epee-encoding/tests/alt_name.rs index 8a9bc6f..3ddd1ef 100644 --- a/net/epee-encoding/tests/alt_name.rs +++ b/net/epee-encoding/tests/alt_name.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; struct AltName { diff --git a/net/epee-encoding/tests/duplicate_key.rs b/net/epee-encoding/tests/duplicate_key.rs index 0ed87af..fd8ccc9 100644 --- a/net/epee-encoding/tests/duplicate_key.rs +++ b/net/epee-encoding/tests/duplicate_key.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes}; struct T { diff --git a/net/epee-encoding/tests/epee_default.rs b/net/epee-encoding/tests/epee_default.rs index c221b28..778bbc0 100644 --- a/net/epee-encoding/tests/epee_default.rs +++ b/net/epee-encoding/tests/epee_default.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; pub struct Optional { @@ -58,7 +60,7 @@ fn epee_non_default_does_encode() { let val: Optional = from_bytes(&mut bytes).unwrap(); assert_eq!(val.optional_val, -3); - assert_eq!(val.val, 8) + assert_eq!(val.val, 8); } #[test] @@ -70,5 +72,5 @@ fn epee_value_not_present_with_default() { let val: Optional = from_bytes(&mut bytes).unwrap(); assert_eq!(val.optional_val, -4); - assert_eq!(val.val, 76) + assert_eq!(val.val, 76); } diff --git a/net/epee-encoding/tests/flattened.rs b/net/epee-encoding/tests/flattened.rs index a737370..dfb951f 100644 --- a/net/epee-encoding/tests/flattened.rs +++ b/net/epee-encoding/tests/flattened.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; struct Child { @@ -37,6 +39,7 @@ epee_object!( ); #[test] +#[expect(clippy::float_cmp)] fn epee_flatten() { let val2 = ParentChild { h: 38.9, diff --git a/net/epee-encoding/tests/options.rs b/net/epee-encoding/tests/options.rs index 5bae9a9..d242124 100644 --- a/net/epee-encoding/tests/options.rs +++ b/net/epee-encoding/tests/options.rs @@ -1,5 +1,6 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; -use std::ops::Deref; #[derive(Clone)] struct T { @@ -28,6 +29,6 @@ fn optional_val_in_data() { ]; let t: T = from_bytes(&mut &bytes[..]).unwrap(); let bytes2 = to_bytes(t.clone()).unwrap(); - assert_eq!(bytes.as_slice(), bytes2.deref()); + assert_eq!(bytes.as_slice(), &*bytes2); assert_eq!(t.val.unwrap(), 21); } diff --git a/net/epee-encoding/tests/p2p.rs b/net/epee-encoding/tests/p2p.rs index 2f74ef6..ba17386 100644 --- a/net/epee-encoding/tests/p2p.rs +++ b/net/epee-encoding/tests/p2p.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; #[derive(Eq, PartialEq, Debug, Clone)] @@ -5,7 +7,7 @@ pub struct SupportFlags(u32); impl From for SupportFlags { fn from(value: u32) -> Self { - SupportFlags(value) + Self(value) } } diff --git a/net/epee-encoding/tests/rpc.rs b/net/epee-encoding/tests/rpc.rs index 973498e..b366854 100644 --- a/net/epee-encoding/tests/rpc.rs +++ b/net/epee-encoding/tests/rpc.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; #[derive(Clone, Debug, PartialEq)] diff --git a/net/epee-encoding/tests/seq.rs b/net/epee-encoding/tests/seq.rs index a4685d0..b4ae788 100644 --- a/net/epee-encoding/tests/seq.rs +++ b/net/epee-encoding/tests/seq.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes}; struct ObjSeq { diff --git a/net/epee-encoding/tests/stack_overflow.rs b/net/epee-encoding/tests/stack_overflow.rs index c53420a..78a1120 100644 --- a/net/epee-encoding/tests/stack_overflow.rs +++ b/net/epee-encoding/tests/stack_overflow.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes}; struct D { @@ -737,5 +739,5 @@ fn stack_overflow() { let obj: Result = from_bytes(&mut bytes.as_slice()); - assert!(obj.is_err()) + assert!(obj.is_err()); } From c84005385434f87a8a8eb34bf3c5fd4c086f52d1 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 20 Sep 2024 20:32:03 -0400 Subject: [PATCH 067/104] consensus: enable workspace lints (#295) * consensus: enable workspace lints * rules/fast-sync: enable workspace lints * typos * fixes * `PoW` -> proof-of-work --- Cargo.lock | 5 +- consensus/Cargo.toml | 8 +- consensus/fast-sync/Cargo.toml | 29 ++-- consensus/fast-sync/src/create.rs | 22 +-- consensus/fast-sync/src/data/hashes_of_hashes | 20 +-- consensus/fast-sync/src/fast_sync.rs | 80 +++++----- consensus/fast-sync/src/lib.rs | 6 + consensus/rules/Cargo.toml | 15 +- consensus/rules/src/blocks.rs | 30 ++-- consensus/rules/src/decomposed_amount.rs | 6 +- consensus/rules/src/genesis.rs | 12 +- consensus/rules/src/hard_forks.rs | 22 +-- consensus/rules/src/hard_forks/tests.rs | 6 +- consensus/rules/src/lib.rs | 9 ++ consensus/rules/src/miner_tx.rs | 48 +++--- consensus/rules/src/transactions.rs | 139 +++++++++--------- .../rules/src/transactions/contextual_data.rs | 20 +-- consensus/rules/src/transactions/ring_ct.rs | 16 +- .../rules/src/transactions/ring_signatures.rs | 4 +- consensus/rules/src/transactions/tests.rs | 78 +++++----- consensus/src/batch_verifier.rs | 8 +- consensus/src/block.rs | 36 ++--- consensus/src/block/alt_block.rs | 28 ++-- consensus/src/block/batch_prepare.rs | 18 ++- consensus/src/context.rs | 22 +-- consensus/src/context/alt_chains.rs | 24 +-- consensus/src/context/difficulty.rs | 34 ++--- consensus/src/context/hardforks.rs | 22 +-- consensus/src/context/rx_vms.rs | 44 +++--- consensus/src/context/task.rs | 20 +-- consensus/src/context/tokens.rs | 6 +- consensus/src/context/weight.rs | 36 ++--- consensus/src/lib.rs | 16 +- consensus/src/tests.rs | 2 +- consensus/src/tests/context/data.rs | 9 +- consensus/src/tests/context/difficulty.rs | 34 ++--- consensus/src/tests/context/hardforks.rs | 2 +- consensus/src/tests/context/rx_vms.rs | 1 + consensus/src/tests/context/weight.rs | 5 +- consensus/src/tests/mock_db.rs | 35 ++--- consensus/src/transactions.rs | 52 ++++--- consensus/src/transactions/contextual_data.rs | 14 +- consensus/src/transactions/free.rs | 4 +- consensus/tests/verify_correct_txs.rs | 5 +- types/src/block_complete_entry.rs | 1 - 45 files changed, 536 insertions(+), 517 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 054ca85..72325bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -552,6 +552,7 @@ dependencies = [ name = "cuprate-consensus" version = "0.1.0" dependencies = [ + "cfg-if", "cuprate-consensus-rules", "cuprate-helper", "cuprate-test-utils", @@ -579,6 +580,7 @@ dependencies = [ name = "cuprate-consensus-rules" version = "0.1.0" dependencies = [ + "cfg-if", "crypto-bigint", "cuprate-cryptonight", "cuprate-helper", @@ -670,15 +672,14 @@ dependencies = [ "cuprate-blockchain", "cuprate-consensus", "cuprate-consensus-rules", + "cuprate-helper", "cuprate-types", "hex", "hex-literal", "monero-serai", - "rayon", "sha3", "thiserror", "tokio", - "tokio-test", "tower", ] diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index bd3994a..12d97ee 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -12,6 +12,7 @@ cuprate-helper = { path = "../helper", default-features = false, features = ["st cuprate-consensus-rules = { path = "./rules", features = ["rayon"] } cuprate-types = { path = "../types" } +cfg-if = { workspace = true } thiserror = { workspace = true } tower = { workspace = true, features = ["util"] } tracing = { workspace = true, features = ["std", "attributes"] } @@ -19,7 +20,6 @@ futures = { workspace = true, features = ["std", "async-await"] } randomx-rs = { workspace = true } monero-serai = { workspace = true, features = ["std"] } -curve25519-dalek = { workspace = true } rayon = { workspace = true } thread_local = { workspace = true } @@ -34,8 +34,12 @@ cuprate-test-utils = { path = "../test-utils" } cuprate-consensus-rules = {path = "./rules", features = ["proptest"]} hex-literal = { workspace = true } +curve25519-dalek = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"]} tokio-test = { workspace = true } proptest = { workspace = true } -proptest-derive = { workspace = true } \ No newline at end of file +proptest-derive = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/consensus/fast-sync/Cargo.toml b/consensus/fast-sync/Cargo.toml index 32fce11..1d7d97b 100644 --- a/consensus/fast-sync/Cargo.toml +++ b/consensus/fast-sync/Cargo.toml @@ -9,19 +9,22 @@ name = "cuprate-fast-sync-create-hashes" path = "src/create.rs" [dependencies] -clap = { workspace = true, features = ["derive", "std"] } -cuprate-blockchain = { path = "../../storage/blockchain" } -cuprate-consensus = { path = ".." } +cuprate-blockchain = { path = "../../storage/blockchain" } +cuprate-consensus = { path = ".." } cuprate-consensus-rules = { path = "../rules" } -cuprate-types = { path = "../../types" } -hex.workspace = true -hex-literal.workspace = true -monero-serai.workspace = true -rayon.workspace = true -sha3 = "0.10.8" -thiserror.workspace = true -tokio = { workspace = true, features = ["full"] } -tower.workspace = true +cuprate-types = { path = "../../types" } +cuprate-helper = { path = "../../helper", features = ["cast"] } + +clap = { workspace = true, features = ["derive", "std"] } +hex = { workspace = true } +hex-literal = { workspace = true } +monero-serai = { workspace = true } +sha3 = { version = "0.10.8" } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } +tower = { workspace = true } [dev-dependencies] -tokio-test = "0.4.4" + +[lints] +workspace = true \ No newline at end of file diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index 0d6d03f..8c47b8e 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -1,3 +1,8 @@ +#![expect( + unused_crate_dependencies, + reason = "binary shares same Cargo.toml as library" +)] + use std::{fmt::Write, fs::write}; use clap::Parser; @@ -70,15 +75,12 @@ async fn main() { let mut height = 0_usize; while height < height_target { - match read_batch(&mut read_handle, height).await { - Ok(block_ids) => { - let hash = hash_of_hashes(block_ids.as_slice()); - hashes_of_hashes.push(hash); - } - Err(_) => { - println!("Failed to read next batch from database"); - break; - } + if let Ok(block_ids) = read_batch(&mut read_handle, height).await { + let hash = hash_of_hashes(block_ids.as_slice()); + hashes_of_hashes.push(hash); + } else { + println!("Failed to read next batch from database"); + break; } height += BATCH_SIZE; } @@ -88,5 +90,5 @@ async fn main() { let generated = generate_hex(&hashes_of_hashes); write("src/data/hashes_of_hashes", generated).expect("Could not write file"); - println!("Generated hashes up to block height {}", height); + println!("Generated hashes up to block height {height}"); } diff --git a/consensus/fast-sync/src/data/hashes_of_hashes b/consensus/fast-sync/src/data/hashes_of_hashes index 74fec4c..2e5e99a 100644 --- a/consensus/fast-sync/src/data/hashes_of_hashes +++ b/consensus/fast-sync/src/data/hashes_of_hashes @@ -1,12 +1,12 @@ [ - hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"), - hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"), - hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"), - hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"), - hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"), - hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"), - hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"), - hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"), - hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"), - hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"), + hex_literal::hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"), + hex_literal::hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"), + hex_literal::hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"), + hex_literal::hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"), + hex_literal::hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"), + hex_literal::hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"), + hex_literal::hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"), + hex_literal::hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"), + hex_literal::hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"), + hex_literal::hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"), ] diff --git a/consensus/fast-sync/src/fast_sync.rs b/consensus/fast-sync/src/fast_sync.rs index 35fa674..b4fc12b 100644 --- a/consensus/fast-sync/src/fast_sync.rs +++ b/consensus/fast-sync/src/fast_sync.rs @@ -6,8 +6,6 @@ use std::{ task::{Context, Poll}, }; -#[allow(unused_imports)] -use hex_literal::hex; use monero_serai::{ block::Block, transaction::{Input, Transaction}, @@ -19,6 +17,7 @@ use cuprate_consensus::{ transactions::new_tx_verification_data, }; use cuprate_consensus_rules::{miner_tx::MinerTxError, ConsensusError}; +use cuprate_helper::cast::u64_to_usize; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; use crate::{hash_of_hashes, BlockId, HashOfHashes}; @@ -31,9 +30,9 @@ const BATCH_SIZE: usize = 512; #[cfg(test)] static HASHES_OF_HASHES: &[HashOfHashes] = &[ - hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"), - hex!("0102030405060708010203040506070801020304050607080102030405060708"), - hex!("0102030405060708010203040506070801020304050607080102030405060708"), + hex_literal::hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"), + hex_literal::hex!("0102030405060708010203040506070801020304050607080102030405060708"), + hex_literal::hex!("0102030405060708010203040506070801020304050607080102030405060708"), ]; #[cfg(test)] @@ -44,14 +43,14 @@ fn max_height() -> u64 { (HASHES_OF_HASHES.len() * BATCH_SIZE) as u64 } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub struct ValidBlockId(BlockId); fn valid_block_ids(block_ids: &[BlockId]) -> Vec { block_ids.iter().map(|b| ValidBlockId(*b)).collect() } -#[allow(clippy::large_enum_variant)] +#[expect(clippy::large_enum_variant)] pub enum FastSyncRequest { ValidateHashes { start_height: u64, @@ -64,8 +63,8 @@ pub enum FastSyncRequest { }, } -#[allow(clippy::large_enum_variant)] -#[derive(Debug, PartialEq)] +#[expect(clippy::large_enum_variant)] +#[derive(Debug, PartialEq, Eq)] pub enum FastSyncResponse { ValidateHashes { validated_hashes: Vec, @@ -74,7 +73,7 @@ pub enum FastSyncResponse { ValidateBlock(VerifiedBlockInformation), } -#[derive(thiserror::Error, Debug, PartialEq)] +#[derive(thiserror::Error, Debug, PartialEq, Eq)] pub enum FastSyncError { #[error("Block does not match its expected hash")] BlockHashMismatch, @@ -127,9 +126,9 @@ where + Send + 'static, { - #[allow(dead_code)] - pub(crate) fn new(context_svc: C) -> FastSyncService { - FastSyncService { context_svc } + #[expect(dead_code)] + pub(crate) const fn new(context_svc: C) -> Self { + Self { context_svc } } } @@ -161,7 +160,7 @@ where FastSyncRequest::ValidateHashes { start_height, block_ids, - } => validate_hashes(start_height, &block_ids).await, + } => validate_hashes(start_height, &block_ids), FastSyncRequest::ValidateBlock { block, txs, token } => { validate_block(context_svc, block, txs, token).await } @@ -170,11 +169,13 @@ where } } -async fn validate_hashes( +fn validate_hashes( start_height: u64, block_ids: &[BlockId], ) -> Result { - if start_height as usize % BATCH_SIZE != 0 { + let start_height_usize = u64_to_usize(start_height); + + if start_height_usize % BATCH_SIZE != 0 { return Err(FastSyncError::InvalidStartHeight); } @@ -182,9 +183,9 @@ async fn validate_hashes( return Err(FastSyncError::OutOfRange); } - let stop_height = start_height as usize + block_ids.len(); + let stop_height = start_height_usize + block_ids.len(); - let batch_from = start_height as usize / BATCH_SIZE; + let batch_from = start_height_usize / BATCH_SIZE; let batch_to = cmp::min(stop_height / BATCH_SIZE, HASHES_OF_HASHES.len()); let n_batches = batch_to - batch_from; @@ -285,7 +286,7 @@ where block_blob, txs: verified_txs, block_hash, - pow_hash: [0u8; 32], + pow_hash: [0_u8; 32], height: *height, generated_coins, weight, @@ -299,46 +300,36 @@ where #[cfg(test)] mod tests { use super::*; - use tokio_test::block_on; #[test] fn test_validate_hashes_errors() { - let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]]; + let ids = [[1_u8; 32], [2_u8; 32], [3_u8; 32], [4_u8; 32], [5_u8; 32]]; assert_eq!( - block_on(validate_hashes(3, &[])), + validate_hashes(3, &[]), Err(FastSyncError::InvalidStartHeight) ); assert_eq!( - block_on(validate_hashes(3, &ids)), + validate_hashes(3, &ids), Err(FastSyncError::InvalidStartHeight) ); - assert_eq!( - block_on(validate_hashes(20, &[])), - Err(FastSyncError::OutOfRange) - ); - assert_eq!( - block_on(validate_hashes(20, &ids)), - Err(FastSyncError::OutOfRange) - ); + assert_eq!(validate_hashes(20, &[]), Err(FastSyncError::OutOfRange)); + assert_eq!(validate_hashes(20, &ids), Err(FastSyncError::OutOfRange)); + assert_eq!(validate_hashes(4, &[]), Err(FastSyncError::NothingToDo)); assert_eq!( - block_on(validate_hashes(4, &[])), - Err(FastSyncError::NothingToDo) - ); - assert_eq!( - block_on(validate_hashes(4, &ids[..3])), + validate_hashes(4, &ids[..3]), Err(FastSyncError::NothingToDo) ); } #[test] fn test_validate_hashes_success() { - let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]]; + let ids = [[1_u8; 32], [2_u8; 32], [3_u8; 32], [4_u8; 32], [5_u8; 32]]; let validated_hashes = valid_block_ids(&ids[0..4]); let unknown_hashes = ids[4..].to_vec(); assert_eq!( - block_on(validate_hashes(0, &ids)), + validate_hashes(0, &ids), Ok(FastSyncResponse::ValidateHashes { validated_hashes, unknown_hashes @@ -349,15 +340,10 @@ mod tests { #[test] fn test_validate_hashes_mismatch() { let ids = [ - [1u8; 32], [2u8; 32], [3u8; 32], [5u8; 32], [1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], + [1_u8; 32], [2_u8; 32], [3_u8; 32], [5_u8; 32], [1_u8; 32], [2_u8; 32], [3_u8; 32], + [4_u8; 32], ]; - assert_eq!( - block_on(validate_hashes(0, &ids)), - Err(FastSyncError::Mismatch) - ); - assert_eq!( - block_on(validate_hashes(4, &ids)), - Err(FastSyncError::Mismatch) - ); + assert_eq!(validate_hashes(0, &ids), Err(FastSyncError::Mismatch)); + assert_eq!(validate_hashes(4, &ids), Err(FastSyncError::Mismatch)); } } diff --git a/consensus/fast-sync/src/lib.rs b/consensus/fast-sync/src/lib.rs index f82b163..8dbdc64 100644 --- a/consensus/fast-sync/src/lib.rs +++ b/consensus/fast-sync/src/lib.rs @@ -1,3 +1,9 @@ +// Used in `create.rs` +use clap as _; +use cuprate_blockchain as _; +use hex as _; +use tokio as _; + pub mod fast_sync; pub mod util; diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index 8ba321d..575bed7 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Boog900"] [features] default = [] -proptest = ["dep:proptest", "dep:proptest-derive", "cuprate-types/proptest"] +proptest = ["cuprate-types/proptest"] rayon = ["dep:rayon"] [dependencies] @@ -24,15 +24,16 @@ hex = { workspace = true, features = ["std"] } hex-literal = { workspace = true } crypto-bigint = { workspace = true } +cfg-if = { workspace = true } tracing = { workspace = true, features = ["std"] } thiserror = { workspace = true } rayon = { workspace = true, optional = true } -proptest = {workspace = true, optional = true} -proptest-derive = {workspace = true, optional = true} - [dev-dependencies] -proptest = {workspace = true} -proptest-derive = {workspace = true} -tokio = {version = "1.35.0", features = ["rt-multi-thread", "macros"]} \ No newline at end of file +proptest = { workspace = true } +proptest-derive = { workspace = true } +tokio = { version = "1.35.0", features = ["rt-multi-thread", "macros"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/consensus/rules/src/blocks.rs b/consensus/rules/src/blocks.rs index e118e9a..5e55ce2 100644 --- a/consensus/rules/src/blocks.rs +++ b/consensus/rules/src/blocks.rs @@ -44,22 +44,22 @@ pub enum BlockError { MinerTxError(#[from] MinerTxError), } -/// A trait to represent the RandomX VM. +/// A trait to represent the `RandomX` VM. pub trait RandomX { type Error; fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error>; } -/// Returns if this height is a RandomX seed height. -pub fn is_randomx_seed_height(height: usize) -> bool { +/// Returns if this height is a `RandomX` seed height. +pub const fn is_randomx_seed_height(height: usize) -> bool { height % RX_SEEDHASH_EPOCH_BLOCKS == 0 } -/// Returns the RandomX seed height for this block. +/// Returns the `RandomX` seed height for this block. /// /// ref: -pub fn randomx_seed_height(height: usize) -> usize { +pub const fn randomx_seed_height(height: usize) -> usize { if height <= RX_SEEDHASH_EPOCH_BLOCKS + RX_SEEDHASH_EPOCH_LAG { 0 } else { @@ -122,10 +122,10 @@ pub fn check_block_pow(hash: &[u8; 32], difficulty: u128) -> Result<(), BlockErr /// Returns the penalty free zone /// /// -pub fn penalty_free_zone(hf: &HardFork) -> usize { - if hf == &HardFork::V1 { +pub fn penalty_free_zone(hf: HardFork) -> usize { + if hf == HardFork::V1 { PENALTY_FREE_ZONE_1 - } else if hf >= &HardFork::V2 && hf < &HardFork::V5 { + } else if hf >= HardFork::V2 && hf < HardFork::V5 { PENALTY_FREE_ZONE_2 } else { PENALTY_FREE_ZONE_5 @@ -135,7 +135,7 @@ pub fn penalty_free_zone(hf: &HardFork) -> usize { /// Sanity check on the block blob size. /// /// ref: -fn block_size_sanity_check( +const fn block_size_sanity_check( block_blob_len: usize, effective_median: usize, ) -> Result<(), BlockError> { @@ -149,7 +149,7 @@ fn block_size_sanity_check( /// Sanity check on the block weight. /// /// ref: -pub fn check_block_weight( +pub const fn check_block_weight( block_weight: usize, median_for_block_reward: usize, ) -> Result<(), BlockError> { @@ -163,7 +163,7 @@ pub fn check_block_weight( /// Sanity check on number of txs in the block. /// /// ref: -fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> { +const fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> { if number_none_miner_txs + 1 > 0x10000000 { Err(BlockError::TooManyTxs) } else { @@ -175,10 +175,10 @@ fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> { /// /// ref: fn check_prev_id(block: &Block, top_hash: &[u8; 32]) -> Result<(), BlockError> { - if &block.header.previous != top_hash { - Err(BlockError::PreviousIDIncorrect) - } else { + if &block.header.previous == top_hash { Ok(()) + } else { + Err(BlockError::PreviousIDIncorrect) } } @@ -273,7 +273,7 @@ pub fn check_block( block_weight, block_chain_ctx.median_weight_for_block_reward, block_chain_ctx.already_generated_coins, - &block_chain_ctx.current_hf, + block_chain_ctx.current_hf, )?; Ok((vote, generated_coins)) diff --git a/consensus/rules/src/decomposed_amount.rs b/consensus/rules/src/decomposed_amount.rs index a8821f3..ebed8b0 100644 --- a/consensus/rules/src/decomposed_amount.rs +++ b/consensus/rules/src/decomposed_amount.rs @@ -1,6 +1,6 @@ #[rustfmt::skip] /// Decomposed amount table. -pub static DECOMPOSED_AMOUNTS: [u64; 172] = [ +pub(crate) static DECOMPOSED_AMOUNTS: [u64; 172] = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 600, 700, 800, 900, @@ -40,8 +40,8 @@ mod tests { #[test] fn decomposed_amounts_return_decomposed() { - for amount in DECOMPOSED_AMOUNTS.iter() { - assert!(is_decomposed_amount(amount)) + for amount in &DECOMPOSED_AMOUNTS { + assert!(is_decomposed_amount(amount)); } } diff --git a/consensus/rules/src/genesis.rs b/consensus/rules/src/genesis.rs index b796119..e1cf4f8 100644 --- a/consensus/rules/src/genesis.rs +++ b/consensus/rules/src/genesis.rs @@ -8,7 +8,7 @@ use monero_serai::{ use cuprate_helper::network::Network; -const fn genesis_nonce(network: &Network) -> u32 { +const fn genesis_nonce(network: Network) -> u32 { match network { Network::Mainnet => 10000, Network::Testnet => 10001, @@ -16,7 +16,7 @@ const fn genesis_nonce(network: &Network) -> u32 { } } -fn genesis_miner_tx(network: &Network) -> Transaction { +fn genesis_miner_tx(network: Network) -> Transaction { Transaction::read(&mut hex::decode(match network { Network::Mainnet | Network::Testnet => "013c01ff0001ffffffffffff03029b2e4c0281c0b02e7c53291a94d1d0cbff8883f8024f5142ee494ffbbd08807121017767aafcde9be00dcfd098715ebcf7f410daebc582fda69d24a28e9d0bc890d1", Network::Stagenet => "013c01ff0001ffffffffffff0302df5d56da0c7d643ddd1ce61901c7bdc5fb1738bfe39fbe69c28a3a7032729c0f2101168d0c4ca86fb55a4cf6a36d31431be1c53a3bd7411bb24e8832410289fa6f3b" @@ -26,7 +26,7 @@ fn genesis_miner_tx(network: &Network) -> Transaction { /// Generates the Monero genesis block. /// /// ref: -pub fn generate_genesis_block(network: &Network) -> Block { +pub fn generate_genesis_block(network: Network) -> Block { Block { header: BlockHeader { hardfork_version: 1, @@ -47,19 +47,19 @@ mod tests { #[test] fn generate_genesis_blocks() { assert_eq!( - &generate_genesis_block(&Network::Mainnet).hash(), + &generate_genesis_block(Network::Mainnet).hash(), hex::decode("418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3") .unwrap() .as_slice() ); assert_eq!( - &generate_genesis_block(&Network::Testnet).hash(), + &generate_genesis_block(Network::Testnet).hash(), hex::decode("48ca7cd3c8de5b6a4d53d2861fbdaedca141553559f9be9520068053cda8430b") .unwrap() .as_slice() ); assert_eq!( - &generate_genesis_block(&Network::Stagenet).hash(), + &generate_genesis_block(Network::Stagenet).hash(), hex::decode("76ee3cc98646292206cd3e86f74d88b4dcc1d937088645e9b0cbca84b7ce74eb") .unwrap() .as_slice() diff --git a/consensus/rules/src/hard_forks.rs b/consensus/rules/src/hard_forks.rs index 4f786e4..7e9a881 100644 --- a/consensus/rules/src/hard_forks.rs +++ b/consensus/rules/src/hard_forks.rs @@ -25,10 +25,10 @@ pub fn check_block_version_vote( ) -> Result<(), HardForkError> { // self = current hf if hf != version { - Err(HardForkError::VersionIncorrect)?; + return Err(HardForkError::VersionIncorrect); } if hf > vote { - Err(HardForkError::VoteTooLow)?; + return Err(HardForkError::VoteTooLow); } Ok(()) @@ -41,8 +41,8 @@ pub struct HFInfo { threshold: usize, } impl HFInfo { - pub const fn new(height: usize, threshold: usize) -> HFInfo { - HFInfo { height, threshold } + pub const fn new(height: usize, threshold: usize) -> Self { + Self { height, threshold } } } @@ -51,7 +51,7 @@ impl HFInfo { pub struct HFsInfo([HFInfo; NUMB_OF_HARD_FORKS]); impl HFsInfo { - pub fn info_for_hf(&self, hf: &HardFork) -> HFInfo { + pub const fn info_for_hf(&self, hf: &HardFork) -> HFInfo { self.0[*hf as usize - 1] } @@ -62,7 +62,7 @@ impl HFsInfo { /// Returns the main-net hard-fork information. /// /// ref: - pub const fn main_net() -> HFsInfo { + pub const fn main_net() -> Self { Self([ HFInfo::new(0, 0), HFInfo::new(1009827, 0), @@ -86,7 +86,7 @@ impl HFsInfo { /// Returns the test-net hard-fork information. /// /// ref: - pub const fn test_net() -> HFsInfo { + pub const fn test_net() -> Self { Self([ HFInfo::new(0, 0), HFInfo::new(624634, 0), @@ -110,7 +110,7 @@ impl HFsInfo { /// Returns the test-net hard-fork information. /// /// ref: - pub const fn stage_net() -> HFsInfo { + pub const fn stage_net() -> Self { Self([ HFInfo::new(0, 0), HFInfo::new(32000, 0), @@ -165,8 +165,8 @@ impl Display for HFVotes { } impl HFVotes { - pub fn new(window_size: usize) -> HFVotes { - HFVotes { + pub fn new(window_size: usize) -> Self { + Self { votes: [0; NUMB_OF_HARD_FORKS], vote_list: VecDeque::with_capacity(window_size), window_size, @@ -251,6 +251,6 @@ impl HFVotes { /// Returns the votes needed for a hard-fork. /// /// ref: -pub fn votes_needed(threshold: usize, window: usize) -> usize { +pub const fn votes_needed(threshold: usize, window: usize) -> usize { (threshold * window).div_ceil(100) } diff --git a/consensus/rules/src/hard_forks/tests.rs b/consensus/rules/src/hard_forks/tests.rs index 00dd036..1a24627 100644 --- a/consensus/rules/src/hard_forks/tests.rs +++ b/consensus/rules/src/hard_forks/tests.rs @@ -51,7 +51,7 @@ proptest! { prop_assert_eq!(hf_votes.total_votes(), hf_votes.vote_list.len()); let mut votes = [0_usize; NUMB_OF_HARD_FORKS]; - for vote in hf_votes.vote_list.iter() { + for vote in &hf_votes.vote_list { // manually go through the list of votes tallying votes[*vote as usize - 1] += 1; } @@ -61,9 +61,9 @@ proptest! { #[test] fn window_size_kept_constant(mut hf_votes in arb_full_hf_votes(), new_votes in any::>()) { - for new_vote in new_votes.into_iter() { + for new_vote in new_votes { hf_votes.add_vote_for_hf(&new_vote); - prop_assert_eq!(hf_votes.total_votes(), TEST_WINDOW_SIZE) + prop_assert_eq!(hf_votes.total_votes(), TEST_WINDOW_SIZE); } } diff --git a/consensus/rules/src/lib.rs b/consensus/rules/src/lib.rs index a5f8800..876e2f7 100644 --- a/consensus/rules/src/lib.rs +++ b/consensus/rules/src/lib.rs @@ -1,3 +1,12 @@ +cfg_if::cfg_if! { + // Used in external `tests/`. + if #[cfg(test)] { + use proptest as _; + use proptest_derive as _; + use tokio as _; + } +} + use std::time::{SystemTime, UNIX_EPOCH}; pub mod batch_verifier; diff --git a/consensus/rules/src/miner_tx.rs b/consensus/rules/src/miner_tx.rs index 663c95e..e6b51d2 100644 --- a/consensus/rules/src/miner_tx.rs +++ b/consensus/rules/src/miner_tx.rs @@ -40,7 +40,7 @@ const MINER_TX_TIME_LOCKED_BLOCKS: usize = 60; /// the block. /// /// ref: -fn calculate_base_reward(already_generated_coins: u64, hf: &HardFork) -> u64 { +fn calculate_base_reward(already_generated_coins: u64, hf: HardFork) -> u64 { let target_mins = hf.block_time().as_secs() / 60; let emission_speed_factor = 20 - (target_mins - 1); ((MONEY_SUPPLY - already_generated_coins) >> emission_speed_factor) @@ -54,7 +54,7 @@ pub fn calculate_block_reward( block_weight: usize, median_bw: usize, already_generated_coins: u64, - hf: &HardFork, + hf: HardFork, ) -> u64 { let base_reward = calculate_base_reward(already_generated_coins, hf); @@ -75,9 +75,9 @@ pub fn calculate_block_reward( /// Checks the miner transactions version. /// /// ref: -fn check_miner_tx_version(tx_version: &TxVersion, hf: &HardFork) -> Result<(), MinerTxError> { +fn check_miner_tx_version(tx_version: TxVersion, hf: HardFork) -> Result<(), MinerTxError> { // The TxVersion enum checks if the version is not 1 or 2 - if hf >= &HardFork::V12 && tx_version != &TxVersion::RingCT { + if hf >= HardFork::V12 && tx_version != TxVersion::RingCT { Err(MinerTxError::VersionInvalid) } else { Ok(()) @@ -94,31 +94,31 @@ fn check_inputs(inputs: &[Input], chain_height: usize) -> Result<(), MinerTxErro match &inputs[0] { Input::Gen(height) => { - if height != &chain_height { - Err(MinerTxError::InputsHeightIncorrect) - } else { + if height == &chain_height { Ok(()) + } else { + Err(MinerTxError::InputsHeightIncorrect) } } - _ => Err(MinerTxError::InputNotOfTypeGen), + Input::ToKey { .. } => Err(MinerTxError::InputNotOfTypeGen), } } /// Checks the miner transaction has a correct time lock. /// /// ref: -fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> { +const fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> { match time_lock { &Timelock::Block(till_height) => { // Lock times above this amount are timestamps not blocks. // This is just for safety though and shouldn't actually be hit. if till_height > 500_000_000 { - Err(MinerTxError::InvalidLockTime)?; + return Err(MinerTxError::InvalidLockTime); } - if till_height != chain_height + MINER_TX_TIME_LOCKED_BLOCKS { - Err(MinerTxError::InvalidLockTime) - } else { + if till_height == chain_height + MINER_TX_TIME_LOCKED_BLOCKS { Ok(()) + } else { + Err(MinerTxError::InvalidLockTime) } } _ => Err(MinerTxError::InvalidLockTime), @@ -131,18 +131,18 @@ fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), Mine /// && fn sum_outputs( outputs: &[Output], - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, ) -> Result { let mut sum: u64 = 0; for out in outputs { let amt = out.amount.unwrap_or(0); - if tx_version == &TxVersion::RingSignatures && amt == 0 { + if tx_version == TxVersion::RingSignatures && amt == 0 { return Err(MinerTxError::OutputAmountIncorrect); } - if hf == &HardFork::V3 && !is_decomposed_amount(&amt) { + if hf == HardFork::V3 && !is_decomposed_amount(&amt) { return Err(MinerTxError::OutputNotDecomposed); } sum = sum.checked_add(amt).ok_or(MinerTxError::OutputsOverflow)?; @@ -157,9 +157,9 @@ fn check_total_output_amt( total_output: u64, reward: u64, fees: u64, - hf: &HardFork, + hf: HardFork, ) -> Result { - if hf == &HardFork::V1 || hf >= &HardFork::V12 { + if hf == HardFork::V1 || hf >= HardFork::V12 { if total_output != reward + fees { return Err(MinerTxError::OutputAmountIncorrect); } @@ -185,16 +185,16 @@ pub fn check_miner_tx( block_weight: usize, median_bw: usize, already_generated_coins: u64, - hf: &HardFork, + hf: HardFork, ) -> Result { let tx_version = TxVersion::from_raw(tx.version()).ok_or(MinerTxError::VersionInvalid)?; - check_miner_tx_version(&tx_version, hf)?; + check_miner_tx_version(tx_version, hf)?; // ref: match tx { Transaction::V1 { .. } => (), Transaction::V2 { proofs, .. } => { - if hf >= &HardFork::V12 && proofs.is_some() { + if hf >= HardFork::V12 && proofs.is_some() { return Err(MinerTxError::RCTTypeNotNULL); } } @@ -207,7 +207,7 @@ pub fn check_miner_tx( check_output_types(&tx.prefix().outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?; let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf); - let total_outs = sum_outputs(&tx.prefix().outputs, hf, &tx_version)?; + let total_outs = sum_outputs(&tx.prefix().outputs, hf, tx_version)?; check_total_output_amt(total_outs, reward, total_fees, hf) } @@ -221,7 +221,7 @@ mod tests { proptest! { #[test] fn tail_emission(generated_coins in any::(), hf in any::()) { - prop_assert!(calculate_base_reward(generated_coins, &hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60) + prop_assert!(calculate_base_reward(generated_coins, hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60); } } } diff --git a/consensus/rules/src/transactions.rs b/consensus/rules/src/transactions.rs index 9c6ad51..b4eac19 100644 --- a/consensus/rules/src/transactions.rs +++ b/consensus/rules/src/transactions.rs @@ -99,11 +99,8 @@ fn check_output_keys(outputs: &[Output]) -> Result<(), TransactionError> { /// /// /// -pub(crate) fn check_output_types( - outputs: &[Output], - hf: &HardFork, -) -> Result<(), TransactionError> { - if hf == &HardFork::V15 { +pub(crate) fn check_output_types(outputs: &[Output], hf: HardFork) -> Result<(), TransactionError> { + if hf == HardFork::V15 { for outs in outputs.windows(2) { if outs[0].view_tag.is_some() != outs[1].view_tag.is_some() { return Err(TransactionError::OutputTypeInvalid); @@ -113,8 +110,8 @@ pub(crate) fn check_output_types( } for out in outputs { - if hf <= &HardFork::V14 && out.view_tag.is_some() - || hf >= &HardFork::V16 && out.view_tag.is_none() + if hf <= HardFork::V14 && out.view_tag.is_some() + || hf >= HardFork::V16 && out.view_tag.is_none() { return Err(TransactionError::OutputTypeInvalid); } @@ -125,12 +122,12 @@ pub(crate) fn check_output_types( /// Checks the individual outputs amount for version 1 txs. /// /// ref: -fn check_output_amount_v1(amount: u64, hf: &HardFork) -> Result<(), TransactionError> { +fn check_output_amount_v1(amount: u64, hf: HardFork) -> Result<(), TransactionError> { if amount == 0 { return Err(TransactionError::ZeroOutputForV1); } - if hf >= &HardFork::V2 && !is_decomposed_amount(&amount) { + if hf >= HardFork::V2 && !is_decomposed_amount(&amount) { return Err(TransactionError::AmountNotDecomposed); } @@ -140,7 +137,7 @@ fn check_output_amount_v1(amount: u64, hf: &HardFork) -> Result<(), TransactionE /// Checks the individual outputs amount for version 2 txs. /// /// ref: -fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> { +const fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> { if amount == 0 { Ok(()) } else { @@ -154,8 +151,8 @@ fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> { /// && fn sum_outputs( outputs: &[Output], - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, ) -> Result { let mut sum: u64 = 0; @@ -181,15 +178,15 @@ fn sum_outputs( /// && fn check_number_of_outputs( outputs: usize, - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, bp_or_bpp: bool, ) -> Result<(), TransactionError> { - if tx_version == &TxVersion::RingSignatures { + if tx_version == TxVersion::RingSignatures { return Ok(()); } - if hf >= &HardFork::V12 && outputs < 2 { + if hf >= HardFork::V12 && outputs < 2 { return Err(TransactionError::InvalidNumberOfOutputs); } @@ -207,8 +204,8 @@ fn check_number_of_outputs( /// && fn check_outputs_semantics( outputs: &[Output], - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, bp_or_bpp: bool, ) -> Result { check_output_types(outputs, hf)?; @@ -223,11 +220,11 @@ fn check_outputs_semantics( /// Checks if an outputs unlock time has passed. /// /// -pub fn output_unlocked( +pub const fn output_unlocked( time_lock: &Timelock, current_chain_height: usize, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> bool { match *time_lock { Timelock::None => true, @@ -243,7 +240,7 @@ pub fn output_unlocked( /// Returns if a locked output, which uses a block height, can be spent. /// /// ref: -fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool { +const fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool { // current_chain_height = 1 + top height unlock_height <= current_chain_height } @@ -251,10 +248,10 @@ fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> b /// Returns if a locked output, which uses a block height, can be spent. /// /// ref: -fn check_timestamp_time_lock( +const fn check_timestamp_time_lock( unlock_timestamp: u64, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> bool { current_time_lock_timestamp + hf.block_time().as_secs() >= unlock_timestamp } @@ -269,19 +266,19 @@ fn check_all_time_locks( time_locks: &[Timelock], current_chain_height: usize, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { time_locks.iter().try_for_each(|time_lock| { - if !output_unlocked( + if output_unlocked( time_lock, current_chain_height, current_time_lock_timestamp, hf, ) { + Ok(()) + } else { tracing::debug!("Transaction invalid: one or more inputs locked, lock: {time_lock:?}."); Err(TransactionError::OneOrMoreRingMembersLocked) - } else { - Ok(()) } }) } @@ -292,11 +289,11 @@ fn check_all_time_locks( /// /// ref: /// && -pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), TransactionError> { - if hf == &HardFork::V15 { +pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: HardFork) -> Result<(), TransactionError> { + if hf == HardFork::V15 { // Hard-fork 15 allows both v14 and v16 rules - return check_decoy_info(decoy_info, &HardFork::V14) - .or_else(|_| check_decoy_info(decoy_info, &HardFork::V16)); + return check_decoy_info(decoy_info, HardFork::V14) + .or_else(|_| check_decoy_info(decoy_info, HardFork::V16)); } let current_minimum_decoys = minimum_decoys(hf); @@ -310,13 +307,13 @@ pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), Tra if decoy_info.mixable > 1 { return Err(TransactionError::MoreThanOneMixableInputWithUnmixable); } - } else if hf >= &HardFork::V8 && decoy_info.min_decoys != current_minimum_decoys { + } else if hf >= HardFork::V8 && decoy_info.min_decoys != current_minimum_decoys { // From V8 enforce the minimum used number of rings is the default minimum. return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys); } // From v12 all inputs must have the same number of decoys. - if hf >= &HardFork::V12 && decoy_info.min_decoys != decoy_info.max_decoys { + if hf >= HardFork::V12 && decoy_info.min_decoys != decoy_info.max_decoys { return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys); } @@ -334,19 +331,19 @@ fn check_key_images(input: &Input) -> Result<(), TransactionError> { return Err(TransactionError::KeyImageIsNotInPrimeSubGroup); } } - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } Ok(()) } -/// Checks that the input is of type [`Input::ToKey`] aka txin_to_key. +/// Checks that the input is of type [`Input::ToKey`] aka `txin_to_key`. /// /// ref: -fn check_input_type(input: &Input) -> Result<(), TransactionError> { +const fn check_input_type(input: &Input) -> Result<(), TransactionError> { match input { Input::ToKey { .. } => Ok(()), - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => Err(TransactionError::IncorrectInputType), } } @@ -362,15 +359,15 @@ fn check_input_has_decoys(input: &Input) -> Result<(), TransactionError> { Ok(()) } } - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => Err(TransactionError::IncorrectInputType), } } /// Checks that the ring members for the input are unique after hard-fork 6. /// /// ref: -fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), TransactionError> { - if hf >= &HardFork::V6 { +fn check_ring_members_unique(input: &Input, hf: HardFork) -> Result<(), TransactionError> { + if hf >= HardFork::V6 { match input { Input::ToKey { key_offsets, .. } => key_offsets.iter().skip(1).try_for_each(|offset| { if *offset == 0 { @@ -379,7 +376,7 @@ fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), Transac Ok(()) } }), - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => Err(TransactionError::IncorrectInputType), } } else { Ok(()) @@ -389,23 +386,22 @@ fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), Transac /// Checks that from hf 7 the inputs are sorted by key image. /// /// ref: -fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), TransactionError> { +fn check_inputs_sorted(inputs: &[Input], hf: HardFork) -> Result<(), TransactionError> { let get_ki = |inp: &Input| match inp { Input::ToKey { key_image, .. } => Ok(key_image.compress().to_bytes()), - _ => Err(TransactionError::IncorrectInputType), + Input::Gen(_) => Err(TransactionError::IncorrectInputType), }; - if hf >= &HardFork::V7 { + if hf >= HardFork::V7 { for inps in inputs.windows(2) { match get_ki(&inps[0])?.cmp(&get_ki(&inps[1])?) { Ordering::Greater => (), _ => return Err(TransactionError::InputsAreNotOrdered), } } - Ok(()) - } else { - Ok(()) } + + Ok(()) } /// Checks the youngest output is at least 10 blocks old. @@ -414,9 +410,9 @@ fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), Transactio fn check_10_block_lock( youngest_used_out_height: usize, current_chain_height: usize, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { - if hf >= &HardFork::V12 { + if hf >= HardFork::V12 { if youngest_used_out_height + 10 > current_chain_height { tracing::debug!( "Transaction invalid: One or more ring members younger than 10 blocks." @@ -442,7 +438,7 @@ fn sum_inputs_check_overflow(inputs: &[Input]) -> Result .checked_add(amount.unwrap_or(0)) .ok_or(TransactionError::InputsOverflow)?; } - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } } @@ -454,7 +450,7 @@ fn sum_inputs_check_overflow(inputs: &[Input]) -> Result /// Semantic rules are rules that don't require blockchain context, the hard-fork does not require blockchain context as: /// - The tx-pool will use the current hard-fork /// - When syncing the hard-fork is in the block header. -fn check_inputs_semantics(inputs: &[Input], hf: &HardFork) -> Result { +fn check_inputs_semantics(inputs: &[Input], hf: HardFork) -> Result { // if inputs.is_empty() { return Err(TransactionError::NoInputs); @@ -481,14 +477,14 @@ fn check_inputs_contextual( inputs: &[Input], tx_ring_members_info: &TxRingMembersInfo, current_chain_height: usize, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { // This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members. // When picking ring members monerod will only look in the DB at past blocks so an output has to be younger // than this transaction to be used in this tx. if tx_ring_members_info.youngest_used_out_height >= current_chain_height { tracing::debug!("Transaction invalid: One or more ring members too young."); - Err(TransactionError::OneOrMoreRingMembersLocked)?; + return Err(TransactionError::OneOrMoreRingMembersLocked); } check_10_block_lock( @@ -500,7 +496,7 @@ fn check_inputs_contextual( if let Some(decoys_info) = &tx_ring_members_info.decoy_info { check_decoy_info(decoys_info, hf)?; } else { - assert_eq!(hf, &HardFork::V1); + assert_eq!(hf, HardFork::V1); } for input in inputs { @@ -517,22 +513,22 @@ fn check_inputs_contextual( /// fn check_tx_version( decoy_info: &Option, - version: &TxVersion, - hf: &HardFork, + version: TxVersion, + hf: HardFork, ) -> Result<(), TransactionError> { if let Some(decoy_info) = decoy_info { let max = max_tx_version(hf); - if version > &max { + if version > max { return Err(TransactionError::TransactionVersionInvalid); } let min = min_tx_version(hf); - if version < &min && decoy_info.not_mixable == 0 { + if version < min && decoy_info.not_mixable == 0 { return Err(TransactionError::TransactionVersionInvalid); } } else { // This will only happen for hard-fork 1 when only RingSignatures are allowed. - if version != &TxVersion::RingSignatures { + if version != TxVersion::RingSignatures { return Err(TransactionError::TransactionVersionInvalid); } } @@ -541,8 +537,8 @@ fn check_tx_version( } /// Returns the default maximum tx version for the given hard-fork. -fn max_tx_version(hf: &HardFork) -> TxVersion { - if hf <= &HardFork::V3 { +fn max_tx_version(hf: HardFork) -> TxVersion { + if hf <= HardFork::V3 { TxVersion::RingSignatures } else { TxVersion::RingCT @@ -550,15 +546,15 @@ fn max_tx_version(hf: &HardFork) -> TxVersion { } /// Returns the default minimum tx version for the given hard-fork. -fn min_tx_version(hf: &HardFork) -> TxVersion { - if hf >= &HardFork::V6 { +fn min_tx_version(hf: HardFork) -> TxVersion { + if hf >= HardFork::V6 { TxVersion::RingCT } else { TxVersion::RingSignatures } } -fn transaction_weight_limit(hf: &HardFork) -> usize { +fn transaction_weight_limit(hf: HardFork) -> usize { penalty_free_zone(hf) / 2 - 600 } @@ -575,14 +571,14 @@ pub fn check_transaction_semantic( tx_blob_size: usize, tx_weight: usize, tx_hash: &[u8; 32], - hf: &HardFork, + hf: HardFork, verifier: impl BatchVerifier, ) -> Result { // if tx_blob_size > MAX_TX_BLOB_SIZE - || (hf >= &HardFork::V8 && tx_weight > transaction_weight_limit(hf)) + || (hf >= HardFork::V8 && tx_weight > transaction_weight_limit(hf)) { - Err(TransactionError::TooBig)?; + return Err(TransactionError::TooBig); } let tx_version = @@ -602,13 +598,13 @@ pub fn check_transaction_semantic( Transaction::V2 { proofs: None, .. } | Transaction::V1 { .. } => false, }; - let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, &tx_version, bp_or_bpp)?; + let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, tx_version, bp_or_bpp)?; let inputs_sum = check_inputs_semantics(&tx.prefix().inputs, hf)?; let fee = match tx { Transaction::V1 { .. } => { if outputs_sum >= inputs_sum { - Err(TransactionError::OutputsTooHigh)?; + return Err(TransactionError::OutputsTooHigh); } inputs_sum - outputs_sum } @@ -633,13 +629,12 @@ pub fn check_transaction_semantic( /// This function also does _not_ check for duplicate key-images: . /// /// `current_time_lock_timestamp` must be: . - pub fn check_transaction_contextual( tx: &Transaction, tx_ring_members_info: &TxRingMembersInfo, current_chain_height: usize, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { let tx_version = TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?; @@ -650,7 +645,7 @@ pub fn check_transaction_contextual( current_chain_height, hf, )?; - check_tx_version(&tx_ring_members_info.decoy_info, &tx_version, hf)?; + check_tx_version(&tx_ring_members_info.decoy_info, tx_version, hf)?; check_all_time_locks( &tx_ring_members_info.time_locked_outs, diff --git a/consensus/rules/src/transactions/contextual_data.rs b/consensus/rules/src/transactions/contextual_data.rs index 282093d..73bc12e 100644 --- a/consensus/rules/src/transactions/contextual_data.rs +++ b/consensus/rules/src/transactions/contextual_data.rs @@ -26,7 +26,7 @@ pub fn get_absolute_offsets(relative_offsets: &[u64]) -> Result, Transa Ok(offsets) } -/// Inserts the output IDs that are needed to verify the transaction inputs into the provided HashMap. +/// Inserts the output IDs that are needed to verify the transaction inputs into the provided `HashMap`. /// /// This will error if the inputs are empty /// @@ -49,7 +49,7 @@ pub fn insert_ring_member_ids( .entry(amount.unwrap_or(0)) .or_default() .extend(get_absolute_offsets(key_offsets)?), - _ => return Err(TransactionError::IncorrectInputType), + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } } Ok(()) @@ -60,7 +60,7 @@ pub fn insert_ring_member_ids( pub enum Rings { /// Legacy, pre-ringCT, rings. Legacy(Vec>), - /// RingCT rings, (outkey, amount commitment). + /// `RingCT` rings, (outkey, amount commitment). RingCT(Vec>), } @@ -103,15 +103,15 @@ impl DecoyInfo { /// /// So: /// - /// amount_outs_on_chain(inputs`[X]`) == outputs_with_amount`[X]` + /// `amount_outs_on_chain(inputs[X]) == outputs_with_amount[X]` /// /// Do not rely on this function to do consensus checks! /// pub fn new( inputs: &[Input], outputs_with_amount: impl Fn(u64) -> usize, - hf: &HardFork, - ) -> Result { + hf: HardFork, + ) -> Result { let mut min_decoys = usize::MAX; let mut max_decoys = usize::MIN; let mut mixable = 0; @@ -119,7 +119,7 @@ impl DecoyInfo { let minimum_decoys = minimum_decoys(hf); - for inp in inputs.iter() { + for inp in inputs { match inp { Input::ToKey { key_offsets, @@ -149,11 +149,11 @@ impl DecoyInfo { min_decoys = min(min_decoys, numb_decoys); max_decoys = max(max_decoys, numb_decoys); } - _ => return Err(TransactionError::IncorrectInputType), + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } } - Ok(DecoyInfo { + Ok(Self { mixable, not_mixable, min_decoys, @@ -166,7 +166,7 @@ impl DecoyInfo { /// **There are exceptions to this always being the minimum decoys** /// /// ref: -pub(crate) fn minimum_decoys(hf: &HardFork) -> usize { +pub(crate) fn minimum_decoys(hf: HardFork) -> usize { use HardFork as HF; match hf { HF::V1 => panic!("hard-fork 1 does not use these rules!"), diff --git a/consensus/rules/src/transactions/ring_ct.rs b/consensus/rules/src/transactions/ring_ct.rs index 62f71dd..32cedd4 100644 --- a/consensus/rules/src/transactions/ring_ct.rs +++ b/consensus/rules/src/transactions/ring_ct.rs @@ -40,10 +40,10 @@ pub enum RingCTError { CLSAGError(#[from] ClsagError), } -/// Checks the RingCT type is allowed for the current hard fork. +/// Checks the `RingCT` type is allowed for the current hard fork. /// /// -fn check_rct_type(ty: &RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), RingCTError> { +fn check_rct_type(ty: RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), RingCTError> { use HardFork as F; use RctType as T; @@ -125,11 +125,11 @@ pub(crate) fn ring_ct_semantic_checks( proofs: &RctProofs, tx_hash: &[u8; 32], verifier: impl BatchVerifier, - hf: &HardFork, + hf: HardFork, ) -> Result<(), RingCTError> { let rct_type = proofs.rct_type(); - check_rct_type(&rct_type, *hf, tx_hash)?; + check_rct_type(rct_type, hf, tx_hash)?; check_output_range_proofs(proofs, verifier)?; if rct_type != RctType::AggregateMlsagBorromean { @@ -154,7 +154,7 @@ pub(crate) fn check_input_signatures( }; if rings.is_empty() { - Err(RingCTError::RingInvalid)?; + return Err(RingCTError::RingInvalid); } let pseudo_outs = match &proofs.prunable { @@ -222,20 +222,20 @@ mod tests { #[test] fn grandfathered_bulletproofs2() { assert!(check_rct_type( - &RctType::MlsagBulletproofsCompactAmount, + RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &[0; 32] ) .is_err()); assert!(check_rct_type( - &RctType::MlsagBulletproofsCompactAmount, + RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &GRANDFATHERED_TRANSACTIONS[0] ) .is_ok()); assert!(check_rct_type( - &RctType::MlsagBulletproofsCompactAmount, + RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &GRANDFATHERED_TRANSACTIONS[1] ) diff --git a/consensus/rules/src/transactions/ring_signatures.rs b/consensus/rules/src/transactions/ring_signatures.rs index 7d4b8f9..a226f5e 100644 --- a/consensus/rules/src/transactions/ring_signatures.rs +++ b/consensus/rules/src/transactions/ring_signatures.rs @@ -17,7 +17,7 @@ use crate::try_par_iter; /// Verifies the ring signature. /// /// ref: -pub fn check_input_signatures( +pub(crate) fn check_input_signatures( inputs: &[Input], signatures: &[RingSignature], rings: &Rings, @@ -45,7 +45,7 @@ pub fn check_input_signatures( Ok(()) })?; } - _ => panic!("tried to verify v1 tx with a non v1 ring"), + Rings::RingCT(_) => panic!("tried to verify v1 tx with a non v1 ring"), } Ok(()) } diff --git a/consensus/rules/src/transactions/tests.rs b/consensus/rules/src/transactions/tests.rs index 4da8fd5..936d843 100644 --- a/consensus/rules/src/transactions/tests.rs +++ b/consensus/rules/src/transactions/tests.rs @@ -16,13 +16,13 @@ use crate::decomposed_amount::DECOMPOSED_AMOUNTS; #[test] fn test_check_output_amount_v1() { - for amount in DECOMPOSED_AMOUNTS.iter() { - assert!(check_output_amount_v1(*amount, &HardFork::V2).is_ok()) + for amount in &DECOMPOSED_AMOUNTS { + assert!(check_output_amount_v1(*amount, HardFork::V2).is_ok()); } proptest!(|(amount in any::().prop_filter("value_decomposed", |val| !is_decomposed_amount(val)))| { - prop_assert!(check_output_amount_v1(amount, &HardFork::V2).is_err()); - prop_assert!(check_output_amount_v1(amount, &HardFork::V1).is_ok()) + prop_assert!(check_output_amount_v1(amount, HardFork::V2).is_err()); + prop_assert!(check_output_amount_v1(amount, HardFork::V1).is_ok()); }); } @@ -41,10 +41,10 @@ fn test_sum_outputs() { let outs = [output_10, outputs_20]; - let sum = sum_outputs(&outs, &HardFork::V16, &TxVersion::RingSignatures).unwrap(); + let sum = sum_outputs(&outs, HardFork::V16, TxVersion::RingSignatures).unwrap(); assert_eq!(sum, 30); - assert!(sum_outputs(&outs, &HardFork::V16, &TxVersion::RingCT).is_err()) + assert!(sum_outputs(&outs, HardFork::V16, TxVersion::RingCT).is_err()); } #[test] @@ -52,50 +52,50 @@ fn test_decoy_info() { let decoy_info = DecoyInfo { mixable: 0, not_mixable: 0, - min_decoys: minimum_decoys(&HardFork::V8), - max_decoys: minimum_decoys(&HardFork::V8) + 1, + min_decoys: minimum_decoys(HardFork::V8), + max_decoys: minimum_decoys(HardFork::V8) + 1, }; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok()); - assert!(check_decoy_info(&decoy_info, &HardFork::V16).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_ok()); + assert!(check_decoy_info(&decoy_info, HardFork::V16).is_err()); let mut decoy_info = DecoyInfo { mixable: 0, not_mixable: 0, - min_decoys: minimum_decoys(&HardFork::V8) - 1, - max_decoys: minimum_decoys(&HardFork::V8) + 1, + min_decoys: minimum_decoys(HardFork::V8) - 1, + max_decoys: minimum_decoys(HardFork::V8) + 1, }; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_err()); decoy_info.not_mixable = 1; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_ok()); decoy_info.mixable = 2; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_err()); let mut decoy_info = DecoyInfo { mixable: 0, not_mixable: 0, - min_decoys: minimum_decoys(&HardFork::V12), - max_decoys: minimum_decoys(&HardFork::V12) + 1, + min_decoys: minimum_decoys(HardFork::V12), + max_decoys: minimum_decoys(HardFork::V12) + 1, }; - assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V12).is_err()); decoy_info.max_decoys = decoy_info.min_decoys; - assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_ok()); + assert!(check_decoy_info(&decoy_info, HardFork::V12).is_ok()); } #[test] fn test_torsion_ki() { - for &key_image in EIGHT_TORSION[1..].iter() { + for &key_image in &EIGHT_TORSION[1..] { assert!(check_key_images(&Input::ToKey { key_image, amount: None, key_offsets: vec![], }) - .is_err()) + .is_err()); } } @@ -109,7 +109,7 @@ prop_compose! { prop_compose! { /// Returns a valid torsioned point. fn random_torsioned_point()(point in random_point(), torsion in 1..8_usize ) -> EdwardsPoint { - point + curve25519_dalek::constants::EIGHT_TORSION[torsion] + point + EIGHT_TORSION[torsion] } } @@ -175,7 +175,7 @@ prop_compose! { /// Returns a [`Timelock`] that is unlocked given a height and time. fn unlocked_timelock(height: u64, time_for_time_lock: u64)( ty in 0..3, - lock_height in 0..(height+1), + lock_height in 0..=height, time_for_time_lock in 0..(time_for_time_lock+121), ) -> Timelock { match ty { @@ -203,33 +203,33 @@ proptest! { hf_no_view_tags in hf_in_range(1..14), hf_view_tags in hf_in_range(16..17), ) { - prop_assert!(check_output_types(&view_tag_outs, &hf_view_tags).is_ok()); - prop_assert!(check_output_types(&view_tag_outs, &hf_no_view_tags).is_err()); + prop_assert!(check_output_types(&view_tag_outs, hf_view_tags).is_ok()); + prop_assert!(check_output_types(&view_tag_outs, hf_no_view_tags).is_err()); - prop_assert!(check_output_types(&non_view_tag_outs, &hf_no_view_tags).is_ok()); - prop_assert!(check_output_types(&non_view_tag_outs, &hf_view_tags).is_err()); + prop_assert!(check_output_types(&non_view_tag_outs, hf_no_view_tags).is_ok()); + prop_assert!(check_output_types(&non_view_tag_outs, hf_view_tags).is_err()); - prop_assert!(check_output_types(&non_view_tag_outs, &HardFork::V15).is_ok()); - prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_ok()); + prop_assert!(check_output_types(&non_view_tag_outs, HardFork::V15).is_ok()); + prop_assert!(check_output_types(&view_tag_outs, HardFork::V15).is_ok()); view_tag_outs.append(&mut non_view_tag_outs); - prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_err()); + prop_assert!(check_output_types(&view_tag_outs, HardFork::V15).is_err()); } #[test] fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize) { - prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_ok()); + prop_assert!(check_number_of_outputs(valid_numb_outs, HardFork::V16, TxVersion::RingCT, true).is_ok()); } #[test] fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX) { - prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_err()); + prop_assert!(check_number_of_outputs(numb_outs, HardFork::V16, TxVersion::RingCT, true).is_err()); } #[test] fn test_check_output_amount_v2(amt in 1..u64::MAX) { prop_assert!(check_output_amount_v2(amt).is_err()); - prop_assert!(check_output_amount_v2(0).is_ok()) + prop_assert!(check_output_amount_v2(0).is_ok()); } #[test] @@ -241,9 +241,9 @@ proptest! { #[test] fn test_timestamp_time_lock(timestamp in 500_000_001..u64::MAX) { - prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, &HardFork::V16)); - prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, &HardFork::V16)); - prop_assert!(check_timestamp_time_lock(timestamp, timestamp, &HardFork::V16)); + prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, HardFork::V16)); + prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, HardFork::V16)); + prop_assert!(check_timestamp_time_lock(timestamp, timestamp, HardFork::V16)); } #[test] @@ -251,11 +251,11 @@ proptest! { mut locked_locks in vec(locked_timelock(5_000, 100_000_000), 1..50), mut unlocked_locks in vec(unlocked_timelock(5_000, 100_000_000), 1..50) ) { - assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, &HardFork::V16).is_err()); - assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_ok()); + assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, HardFork::V16).is_err()); + assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, HardFork::V16).is_ok()); unlocked_locks.append(&mut locked_locks); - assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_err()); + assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, HardFork::V16).is_err()); } #[test] diff --git a/consensus/src/batch_verifier.rs b/consensus/src/batch_verifier.rs index 69018ac..101f981 100644 --- a/consensus/src/batch_verifier.rs +++ b/consensus/src/batch_verifier.rs @@ -1,4 +1,4 @@ -use std::{cell::RefCell, ops::DerefMut}; +use std::cell::RefCell; use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier; use rayon::prelude::*; @@ -13,8 +13,8 @@ pub struct MultiThreadedBatchVerifier { impl MultiThreadedBatchVerifier { /// Create a new multithreaded batch verifier, - pub fn new(numb_threads: usize) -> MultiThreadedBatchVerifier { - MultiThreadedBatchVerifier { + pub fn new(numb_threads: usize) -> Self { + Self { internal: ThreadLocal::with_capacity(numb_threads), } } @@ -42,6 +42,6 @@ impl BatchVerifier for &'_ MultiThreadedBatchVerifier { .get_or(|| RefCell::new(InternalBatchVerifier::new())) .borrow_mut(); - stmt(verifier.deref_mut()) + stmt(&mut verifier) } } diff --git a/consensus/src/block.rs b/consensus/src/block.rs index e785a6b..3d0db99 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -72,17 +72,17 @@ impl PreparedBlockExPow { /// This errors if either the `block`'s: /// - Hard-fork values are invalid /// - Miner transaction is missing a miner input - pub fn new(block: Block) -> Result { + pub fn new(block: Block) -> Result { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header) .map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?; let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, - )))? + ))); }; - Ok(PreparedBlockExPow { + Ok(Self { block_blob: block.serialize(), hf_vote, hf_version, @@ -123,20 +123,17 @@ impl PreparedBlock { /// /// The randomX VM must be Some if RX is needed or this will panic. /// The randomX VM must also be initialised with the correct seed. - fn new( - block: Block, - randomx_vm: Option<&R>, - ) -> Result { + fn new(block: Block, randomx_vm: Option<&R>) -> Result { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header) .map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?; let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, - )))? + ))); }; - Ok(PreparedBlock { + Ok(Self { block_blob: block.serialize(), hf_vote, hf_version, @@ -156,17 +153,17 @@ impl PreparedBlock { /// Creates a new [`PreparedBlock`] from a [`PreparedBlockExPow`]. /// - /// This function will give an invalid PoW hash if `randomx_vm` is not initialised + /// This function will give an invalid proof-of-work hash if `randomx_vm` is not initialised /// with the correct seed. /// /// # Panics /// This function will panic if `randomx_vm` is - /// [`None`] even though RandomX is needed. + /// [`None`] even though `RandomX` is needed. fn new_prepped( block: PreparedBlockExPow, randomx_vm: Option<&R>, - ) -> Result { - Ok(PreparedBlock { + ) -> Result { + Ok(Self { block_blob: block.block_blob, hf_vote: block.hf_vote, hf_version: block.hf_version, @@ -218,7 +215,6 @@ pub enum VerifyBlockRequest { } /// A response from a verify block request. -#[allow(clippy::large_enum_variant)] // The largest variant is most common ([`MainChain`]) pub enum VerifyBlockResponse { /// This block is valid. MainChain(VerifiedBlockInformation), @@ -254,12 +250,8 @@ where D::Future: Send + 'static, { /// Creates a new block verifier. - pub(crate) fn new( - context_svc: C, - tx_verifier_svc: TxV, - database: D, - ) -> BlockVerifierService { - BlockVerifierService { + pub(crate) const fn new(context_svc: C, tx_verifier_svc: TxV, database: D) -> Self { + Self { context_svc, tx_verifier_svc, _database: database, diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs index b20b4f2..3a5ea7c 100644 --- a/consensus/src/block/alt_block.rs +++ b/consensus/src/block/alt_block.rs @@ -36,8 +36,8 @@ use crate::{ /// /// Returns [`AltBlockInformation`], which contains the cumulative difficulty of the alt chain. /// -/// This function only checks the block's PoW and its weight. -pub async fn sanity_check_alt_block( +/// This function only checks the block's proof-of-work and its weight. +pub(crate) async fn sanity_check_alt_block( block: Block, txs: HashMap<[u8; 32], TransactionVerificationData>, mut context_svc: C, @@ -66,15 +66,17 @@ where // Check if the block's miner input is formed correctly. let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, - )))? + )) + .into()); }; if *height != alt_context_cache.chain_height { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputsHeightIncorrect, - )))? + )) + .into()); } // prep the alt block. @@ -103,10 +105,10 @@ where if let Some(median_timestamp) = difficulty_cache.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)) { - check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)? + check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)?; }; - let next_difficulty = difficulty_cache.next_difficulty(&prepped_block.hf_version); + let next_difficulty = difficulty_cache.next_difficulty(prepped_block.hf_version); // make sure the block's PoW is valid for this difficulty. check_block_pow(&prepped_block.pow_hash, next_difficulty).map_err(ConsensusError::Block)?; @@ -127,12 +129,12 @@ where // Check the block weight is below the limit. check_block_weight( block_weight, - alt_weight_cache.median_for_block_reward(&prepped_block.hf_version), + alt_weight_cache.median_for_block_reward(prepped_block.hf_version), ) .map_err(ConsensusError::Block)?; let long_term_weight = weight::calculate_block_long_term_weight( - &prepped_block.hf_version, + prepped_block.hf_version, block_weight, alt_weight_cache.median_long_term_weight(), ); @@ -232,9 +234,9 @@ where } }; - Ok(Some( - alt_chain_context.cached_rx_vm.insert(cached_vm).1.clone(), - )) + Ok(Some(Arc::clone( + &alt_chain_context.cached_rx_vm.insert(cached_vm).1, + ))) } /// Returns the [`DifficultyCache`] for the alt chain. diff --git a/consensus/src/block/batch_prepare.rs b/consensus/src/block/batch_prepare.rs index d32cd76..9c77848 100644 --- a/consensus/src/block/batch_prepare.rs +++ b/consensus/src/block/batch_prepare.rs @@ -68,16 +68,17 @@ where // Make sure no blocks in the batch have a higher hard fork than the last block. if block_0.hf_version > top_hf_in_batch { - Err(ConsensusError::Block(BlockError::HardForkError( + return Err(ConsensusError::Block(BlockError::HardForkError( HardForkError::VersionIncorrect, - )))?; + )) + .into()); } if block_0.block_hash != block_1.block.header.previous || block_0.height != block_1.height - 1 { tracing::debug!("Blocks do not follow each other, verification failed."); - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); } // Cache any potential RX VM seeds as we may need them for future blocks in the batch. @@ -85,7 +86,7 @@ where new_rx_vm = Some((block_0.height, block_0.block_hash)); } - timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version)) + timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version)); } // Get the current blockchain context. @@ -117,15 +118,16 @@ where if context.chain_height != blocks[0].height { tracing::debug!("Blocks do not follow main chain, verification failed."); - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputsHeightIncorrect, - )))?; + )) + .into()); } if context.top_hash != blocks[0].block.header.previous { tracing::debug!("Blocks do not follow main chain, verification failed."); - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); } let mut rx_vms = if top_hf_in_batch < HardFork::V12 { @@ -156,7 +158,7 @@ where context_svc .oneshot(BlockChainContextRequest::NewRXVM(( new_vm_seed, - new_vm.clone(), + Arc::clone(&new_vm), ))) .await?; diff --git a/consensus/src/context.rs b/consensus/src/context.rs index 9e71304..5bdb1ce 100644 --- a/consensus/src/context.rs +++ b/consensus/src/context.rs @@ -56,8 +56,8 @@ pub struct ContextConfig { impl ContextConfig { /// Get the config for main-net. - pub fn main_net() -> ContextConfig { - ContextConfig { + pub const fn main_net() -> Self { + Self { hard_fork_cfg: HardForkConfig::main_net(), difficulty_cfg: DifficultyCacheConfig::main_net(), weights_config: BlockWeightsCacheConfig::main_net(), @@ -65,8 +65,8 @@ impl ContextConfig { } /// Get the config for stage-net. - pub fn stage_net() -> ContextConfig { - ContextConfig { + pub const fn stage_net() -> Self { + Self { hard_fork_cfg: HardForkConfig::stage_net(), // These 2 have the same config as main-net. difficulty_cfg: DifficultyCacheConfig::main_net(), @@ -75,8 +75,8 @@ impl ContextConfig { } /// Get the config for test-net. - pub fn test_net() -> ContextConfig { - ContextConfig { + pub const fn test_net() -> Self { + Self { hard_fork_cfg: HardForkConfig::test_net(), // These 2 have the same config as main-net. difficulty_cfg: DifficultyCacheConfig::main_net(), @@ -155,7 +155,7 @@ impl RawBlockChainContext { /// Returns the next blocks long term weight from its block weight. pub fn next_block_long_term_weight(&self, block_weight: usize) -> usize { weight::calculate_block_long_term_weight( - &self.current_hf, + self.current_hf, block_weight, self.median_long_term_weight, ) @@ -191,7 +191,7 @@ impl BlockChainContext { } /// Returns the blockchain context without checking the validity token. - pub fn unchecked_blockchain_context(&self) -> &RawBlockChainContext { + pub const fn unchecked_blockchain_context(&self) -> &RawBlockChainContext { &self.raw } } @@ -222,7 +222,7 @@ pub struct NewBlockData { pub enum BlockChainContextRequest { /// Get the current blockchain context. GetContext, - /// Gets the current RandomX VM. + /// Gets the current `RandomX` VM. GetCurrentRxVm, /// Get the next difficulties for these blocks. /// @@ -288,7 +288,7 @@ pub enum BlockChainContextRequest { /// This variant is private and is not callable from outside this crate, the block verifier service will /// handle getting the randomX VM of an alt chain. AltChainRxVM { - /// The height the RandomX VM is needed for. + /// The height the `RandomX` VM is needed for. height: usize, /// The chain to look in for the seed. chain: Chain, @@ -312,7 +312,7 @@ pub enum BlockChainContextRequest { pub enum BlockChainContextResponse { /// Blockchain context response. Context(BlockChainContext), - /// A map of seed height to RandomX VMs. + /// A map of seed height to `RandomX` VMs. RxVms(HashMap>), /// A list of difficulties. BatchDifficulties(Vec), diff --git a/consensus/src/context/alt_chains.rs b/consensus/src/context/alt_chains.rs index 937e847..cd945c8 100644 --- a/consensus/src/context/alt_chains.rs +++ b/consensus/src/context/alt_chains.rs @@ -68,29 +68,33 @@ impl AltChainContextCache { } /// A map of top IDs to alt chains. -pub struct AltChainMap { +pub(crate) struct AltChainMap { alt_cache_map: HashMap<[u8; 32], Box>, } impl AltChainMap { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { alt_cache_map: HashMap::new(), } } - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { self.alt_cache_map.clear(); } /// Add an alt chain cache to the map. - pub fn add_alt_cache(&mut self, prev_id: [u8; 32], alt_cache: Box) { + pub(crate) fn add_alt_cache( + &mut self, + prev_id: [u8; 32], + alt_cache: Box, + ) { self.alt_cache_map.insert(prev_id, alt_cache); } /// Attempts to take an [`AltChainContextCache`] from the map, returning [`None`] if no cache is /// present. - pub async fn get_alt_chain_context( + pub(crate) async fn get_alt_chain_context( &mut self, prev_id: [u8; 32], database: D, @@ -109,7 +113,7 @@ impl AltChainMap { let Some((parent_chain, top_height)) = res else { // Couldn't find prev_id - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); }; Ok(Box::new(AltChainContextCache { @@ -125,7 +129,7 @@ impl AltChainMap { } /// Builds a [`DifficultyCache`] for an alt chain. -pub async fn get_alt_chain_difficulty_cache( +pub(crate) async fn get_alt_chain_difficulty_cache( prev_id: [u8; 32], main_chain_difficulty_cache: &DifficultyCache, mut database: D, @@ -142,7 +146,7 @@ pub async fn get_alt_chain_difficulty_cache( let Some((chain, top_height)) = res else { // Can't find prev_id - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); }; Ok(match chain { @@ -172,7 +176,7 @@ pub async fn get_alt_chain_difficulty_cache( } /// Builds a [`BlockWeightsCache`] for an alt chain. -pub async fn get_alt_chain_weight_cache( +pub(crate) async fn get_alt_chain_weight_cache( prev_id: [u8; 32], main_chain_weight_cache: &BlockWeightsCache, mut database: D, @@ -189,7 +193,7 @@ pub async fn get_alt_chain_weight_cache( let Some((chain, top_height)) = res else { // Can't find prev_id - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); }; Ok(match chain { diff --git a/consensus/src/context/difficulty.rs b/consensus/src/context/difficulty.rs index eb67cf5..9316dc5 100644 --- a/consensus/src/context/difficulty.rs +++ b/consensus/src/context/difficulty.rs @@ -43,24 +43,24 @@ impl DifficultyCacheConfig { /// /// # Notes /// You probably do not need this, use [`DifficultyCacheConfig::main_net`] instead. - pub const fn new(window: usize, cut: usize, lag: usize) -> DifficultyCacheConfig { - DifficultyCacheConfig { window, cut, lag } + pub const fn new(window: usize, cut: usize, lag: usize) -> Self { + Self { window, cut, lag } } /// Returns the total amount of blocks we need to track to calculate difficulty - pub fn total_block_count(&self) -> usize { + pub const fn total_block_count(&self) -> usize { self.window + self.lag } /// The amount of blocks we account for after removing the outliers. - pub fn accounted_window_len(&self) -> usize { + pub const fn accounted_window_len(&self) -> usize { self.window - 2 * self.cut } /// Returns the config needed for [`Mainnet`](cuprate_helper::network::Network::Mainnet). This is also the /// config for all other current networks. - pub const fn main_net() -> DifficultyCacheConfig { - DifficultyCacheConfig { + pub const fn main_net() -> Self { + Self { window: DIFFICULTY_WINDOW, cut: DIFFICULTY_CUT, lag: DIFFICULTY_LAG, @@ -112,7 +112,7 @@ impl DifficultyCache { timestamps.len() ); - let diff = DifficultyCache { + let diff = Self { timestamps, cumulative_difficulties, last_accounted_height: chain_height - 1, @@ -203,8 +203,8 @@ impl DifficultyCache { /// Returns the required difficulty for the next block. /// - /// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/difficulty.html#calculating-difficulty - pub fn next_difficulty(&self, hf: &HardFork) -> u128 { + /// See: + pub fn next_difficulty(&self, hf: HardFork) -> u128 { next_difficulty( &self.config, &self.timestamps, @@ -223,7 +223,7 @@ impl DifficultyCache { pub fn next_difficulties( &self, blocks: Vec<(u64, HardFork)>, - current_hf: &HardFork, + current_hf: HardFork, ) -> Vec { let mut timestamps = self.timestamps.clone(); let mut cumulative_difficulties = self.cumulative_difficulties.clone(); @@ -232,8 +232,6 @@ impl DifficultyCache { difficulties.push(self.next_difficulty(current_hf)); - let mut diff_info_popped = Vec::new(); - for (new_timestamp, hf) in blocks { timestamps.push_back(new_timestamp); @@ -241,17 +239,15 @@ impl DifficultyCache { cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap()); if timestamps.len() > self.config.total_block_count() { - diff_info_popped.push(( - timestamps.pop_front().unwrap(), - cumulative_difficulties.pop_front().unwrap(), - )); + timestamps.pop_front().unwrap(); + cumulative_difficulties.pop_front().unwrap(); } difficulties.push(next_difficulty( &self.config, ×tamps, &cumulative_difficulties, - &hf, + hf, )); } @@ -295,12 +291,12 @@ impl DifficultyCache { } } -/// Calculates the next difficulty with the inputted config/timestamps/cumulative_difficulties. +/// Calculates the next difficulty with the inputted `config/timestamps/cumulative_difficulties`. fn next_difficulty( config: &DifficultyCacheConfig, timestamps: &VecDeque, cumulative_difficulties: &VecDeque, - hf: &HardFork, + hf: HardFork, ) -> u128 { if timestamps.len() <= 1 { return 1; diff --git a/consensus/src/context/hardforks.rs b/consensus/src/context/hardforks.rs index 682933d..16ae763 100644 --- a/consensus/src/context/hardforks.rs +++ b/consensus/src/context/hardforks.rs @@ -28,7 +28,7 @@ pub struct HardForkConfig { impl HardForkConfig { /// Config for main-net. - pub const fn main_net() -> HardForkConfig { + pub const fn main_net() -> Self { Self { info: HFsInfo::main_net(), window: DEFAULT_WINDOW_SIZE, @@ -36,7 +36,7 @@ impl HardForkConfig { } /// Config for stage-net. - pub const fn stage_net() -> HardForkConfig { + pub const fn stage_net() -> Self { Self { info: HFsInfo::stage_net(), window: DEFAULT_WINDOW_SIZE, @@ -44,7 +44,7 @@ impl HardForkConfig { } /// Config for test-net. - pub const fn test_net() -> HardForkConfig { + pub const fn test_net() -> Self { Self { info: HFsInfo::test_net(), window: DEFAULT_WINDOW_SIZE, @@ -54,7 +54,7 @@ impl HardForkConfig { /// A struct that keeps track of the current hard-fork and current votes. #[derive(Debug, Clone, Eq, PartialEq)] -pub struct HardForkState { +pub(crate) struct HardForkState { /// The current active hard-fork. pub(crate) current_hardfork: HardFork, @@ -83,7 +83,7 @@ impl HardForkState { get_votes_in_range(database.clone(), block_start..chain_height, config.window).await?; if chain_height > config.window { - debug_assert_eq!(votes.total_votes(), config.window) + debug_assert_eq!(votes.total_votes(), config.window); } let BlockchainResponse::BlockExtendedHeader(ext_header) = database @@ -97,7 +97,7 @@ impl HardForkState { let current_hardfork = ext_header.version; - let mut hfs = HardForkState { + let mut hfs = Self { config, current_hardfork, votes, @@ -122,7 +122,7 @@ impl HardForkState { /// # Invariant /// /// This _must_ only be used on a main-chain cache. - pub async fn pop_blocks_main_chain( + pub(crate) async fn pop_blocks_main_chain( &mut self, numb_blocks: usize, database: D, @@ -159,7 +159,7 @@ impl HardForkState { } /// Add a new block to the cache. - pub fn new_block(&mut self, vote: HardFork, height: usize) { + pub(crate) fn new_block(&mut self, vote: HardFork, height: usize) { // We don't _need_ to take in `height` but it's for safety, so we don't silently loose track // of blocks. assert_eq!(self.last_height + 1, height); @@ -183,7 +183,7 @@ impl HardForkState { /// Checks if the next hard-fork should be activated and activates it if it should. /// - /// https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork + /// fn check_set_new_hf(&mut self) { self.current_hardfork = self.votes.current_fork( &self.current_hardfork, @@ -194,7 +194,7 @@ impl HardForkState { } /// Returns the current hard-fork. - pub fn current_hardfork(&self) -> HardFork { + pub(crate) const fn current_hardfork(&self) -> HardFork { self.current_hardfork } } @@ -218,7 +218,7 @@ async fn get_votes_in_range( panic!("Database sent incorrect response!"); }; - for hf_info in vote_list.into_iter() { + for hf_info in vote_list { votes.add_vote_for_hf(&HardFork::from_vote(hf_info.vote)); } diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index b1ab102..c6375fc 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -1,6 +1,6 @@ -//! RandomX VM Cache +//! `RandomX` VM Cache //! -//! This module keeps track of the RandomX VM to calculate the next blocks PoW, if the block needs a randomX VM and potentially +//! This module keeps track of the `RandomX` VM to calculate the next blocks proof-of-work, if the block needs a randomX VM and potentially //! more VMs around this height. //! use std::{ @@ -34,11 +34,11 @@ const RX_SEEDS_CACHED: usize = 2; /// A multithreaded randomX VM. #[derive(Debug)] pub struct RandomXVm { - /// These RandomX VMs all share the same cache. + /// These `RandomX` VMs all share the same cache. vms: ThreadLocal, - /// The RandomX cache. + /// The `RandomX` cache. cache: RandomXCache, - /// The flags used to start the RandomX VMs. + /// The flags used to start the `RandomX` VMs. flags: RandomXFlag, } @@ -50,7 +50,7 @@ impl RandomXVm { let cache = RandomXCache::new(flags, seed.as_slice())?; - Ok(RandomXVm { + Ok(Self { vms: ThreadLocal::new(), cache, flags, @@ -69,10 +69,10 @@ impl RandomX for RandomXVm { } } -/// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a +/// The randomX VMs cache, keeps the VM needed to calculate the current block's proof-of-work hash (if a VM is needed) and a /// couple more around this VM. #[derive(Clone, Debug)] -pub struct RandomXVmCache { +pub(crate) struct RandomXVmCache { /// The top [`RX_SEEDS_CACHED`] RX seeds. pub(crate) seeds: VecDeque<(usize, [u8; 32])>, /// The VMs for `seeds` (if after hf 12, otherwise this will be empty). @@ -117,7 +117,7 @@ impl RandomXVmCache { HashMap::new() }; - Ok(RandomXVmCache { + Ok(Self { seeds, vms, cached_vm: None, @@ -125,14 +125,14 @@ impl RandomXVmCache { } /// Add a randomX VM to the cache, with the seed it was created with. - pub fn add_vm(&mut self, vm: ([u8; 32], Arc)) { + pub(crate) fn add_vm(&mut self, vm: ([u8; 32], Arc)) { self.cached_vm.replace(vm); } /// Creates a RX VM for an alt chain, looking at the main chain RX VMs to see if we can use one /// of them first. - pub async fn get_alt_vm( - &mut self, + pub(crate) async fn get_alt_vm( + &self, height: usize, chain: Chain, database: D, @@ -152,7 +152,7 @@ impl RandomXVmCache { break; }; - return Ok(vm.clone()); + return Ok(Arc::clone(vm)); } } @@ -161,8 +161,8 @@ impl RandomXVmCache { Ok(alt_vm) } - /// Get the main-chain RandomX VMs. - pub async fn get_vms(&mut self) -> HashMap> { + /// Get the main-chain `RandomX` VMs. + pub(crate) async fn get_vms(&mut self) -> HashMap> { match self.seeds.len().checked_sub(self.vms.len()) { // No difference in the amount of seeds to VMs. Some(0) => (), @@ -206,23 +206,23 @@ impl RandomXVmCache { }) .collect() }) - .await + .await; } } self.vms.clone() } - /// Removes all the RandomX VMs above the `new_height`. - pub fn pop_blocks_main_chain(&mut self, new_height: usize) { + /// Removes all the `RandomX` VMs above the `new_height`. + pub(crate) fn pop_blocks_main_chain(&mut self, new_height: usize) { self.seeds.retain(|(height, _)| *height < new_height); self.vms.retain(|height, _| *height < new_height); } /// Add a new block to the VM cache. /// - /// hash is the block hash not the blocks PoW hash. - pub fn new_block(&mut self, height: usize, hash: &[u8; 32]) { + /// hash is the block hash not the blocks proof-of-work hash. + pub(crate) fn new_block(&mut self, height: usize, hash: &[u8; 32]) { if is_randomx_seed_height(height) { tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",); @@ -235,7 +235,7 @@ impl RandomXVmCache { self.seeds .iter() .any(|(cached_height, _)| height == cached_height) - }) + }); } } } @@ -258,7 +258,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: usize, mut amount: usize // We don't include the lag as we only want seeds not the specific seed for this height. let seed_height = (last_height - 1) & !(RX_SEEDHASH_EPOCH_BLOCKS - 1); seeds.push(seed_height); - last_height = seed_height + last_height = seed_height; } seeds diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index bc54285..82b466c 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -36,7 +36,7 @@ pub(super) struct ContextTaskRequest { } /// The Context task that keeps the blockchain context and handles requests. -pub struct ContextTask { +pub(crate) struct ContextTask { /// A token used to invalidate previous contexts when a new /// block is added to the chain. current_validity_token: ValidityToken, @@ -65,7 +65,7 @@ pub struct ContextTask { impl ContextTask { /// Initialize the [`ContextTask`], this will need to pull a lot of data from the database so may take a /// while to complete. - pub async fn init_context( + pub(crate) async fn init_context( cfg: ContextConfig, mut database: D, ) -> Result { @@ -131,7 +131,7 @@ impl ContextTask { rx_vms::RandomXVmCache::init_from_chain_height(chain_height, ¤t_hf, db).await }); - let context_svc = ContextTask { + let context_svc = Self { current_validity_token: ValidityToken::new(), difficulty_cache: difficulty_cache_handle.await.unwrap()?, weight_cache: weight_cache_handle.await.unwrap()?, @@ -148,7 +148,7 @@ impl ContextTask { } /// Handles a [`BlockChainContextRequest`] and returns a [`BlockChainContextResponse`]. - pub async fn handle_req( + pub(crate) async fn handle_req( &mut self, req: BlockChainContextRequest, ) -> Result { @@ -164,17 +164,17 @@ impl ContextTask { context_to_verify_block: ContextToVerifyBlock { median_weight_for_block_reward: self .weight_cache - .median_for_block_reward(¤t_hf), + .median_for_block_reward(current_hf), effective_median_weight: self .weight_cache - .effective_median_block_weight(¤t_hf), + .effective_median_block_weight(current_hf), top_hash: self.top_block_hash, median_block_timestamp: self .difficulty_cache .median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)), chain_height: self.chain_height, current_hf, - next_difficulty: self.difficulty_cache.next_difficulty(¤t_hf), + next_difficulty: self.difficulty_cache.next_difficulty(current_hf), already_generated_coins: self.already_generated_coins, }, cumulative_difficulty: self.difficulty_cache.cumulative_difficulty(), @@ -191,7 +191,7 @@ impl ContextTask { let next_diffs = self .difficulty_cache - .next_difficulties(blocks, &self.hardfork_state.current_hardfork()); + .next_difficulties(blocks, self.hardfork_state.current_hardfork()); BlockChainContextResponse::BatchDifficulties(next_diffs) } BlockChainContextRequest::NewRXVM(vm) => { @@ -330,10 +330,10 @@ impl ContextTask { /// Run the [`ContextTask`], the task will listen for requests on the passed in channel. When the channel closes the /// task will finish. - pub async fn run(mut self, mut rx: mpsc::Receiver) { + pub(crate) async fn run(mut self, mut rx: mpsc::Receiver) { while let Some(req) = rx.recv().await { let res = self.handle_req(req.req).instrument(req.span).await; - let _ = req.tx.send(res); + drop(req.tx.send(res)); } tracing::info!("Shutting down blockchain context task."); diff --git a/consensus/src/context/tokens.rs b/consensus/src/context/tokens.rs index 882d3b5..d222303 100644 --- a/consensus/src/context/tokens.rs +++ b/consensus/src/context/tokens.rs @@ -15,8 +15,8 @@ pub struct ValidityToken { impl ValidityToken { /// Creates a new [`ValidityToken`] - pub fn new() -> ValidityToken { - ValidityToken { + pub fn new() -> Self { + Self { token: CancellationToken::new(), } } @@ -28,6 +28,6 @@ impl ValidityToken { /// Sets the data to invalid. pub fn set_data_invalid(self) { - self.token.cancel() + self.token.cancel(); } } diff --git a/consensus/src/context/weight.rs b/consensus/src/context/weight.rs index 4c89139..e95ae60 100644 --- a/consensus/src/context/weight.rs +++ b/consensus/src/context/weight.rs @@ -38,16 +38,16 @@ pub struct BlockWeightsCacheConfig { impl BlockWeightsCacheConfig { /// Creates a new [`BlockWeightsCacheConfig`] - pub const fn new(short_term_window: usize, long_term_window: usize) -> BlockWeightsCacheConfig { - BlockWeightsCacheConfig { + pub const fn new(short_term_window: usize, long_term_window: usize) -> Self { + Self { short_term_window, long_term_window, } } /// Returns the [`BlockWeightsCacheConfig`] for all networks (They are all the same as mainnet). - pub fn main_net() -> BlockWeightsCacheConfig { - BlockWeightsCacheConfig { + pub const fn main_net() -> Self { + Self { short_term_window: SHORT_TERM_WINDOW, long_term_window: LONG_TERM_WINDOW, } @@ -99,7 +99,7 @@ impl BlockWeightsCache { tracing::info!("Initialized block weight cache, chain-height: {:?}, long term weights length: {:?}, short term weights length: {:?}", chain_height, long_term_weights.len(), short_term_block_weights.len()); - Ok(BlockWeightsCache { + Ok(Self { short_term_block_weights: rayon_spawn_async(move || { RollingMedian::from_vec(short_term_block_weights, config.short_term_window) }) @@ -178,7 +178,7 @@ impl BlockWeightsCache { /// Add a new block to the cache. /// - /// The block_height **MUST** be one more than the last height the cache has + /// The `block_height` **MUST** be one more than the last height the cache has /// seen. pub fn new_block(&mut self, block_height: usize, block_weight: usize, long_term_weight: usize) { assert_eq!(self.tip_height + 1, block_height); @@ -208,8 +208,8 @@ impl BlockWeightsCache { /// Returns the effective median weight, used for block reward calculations and to calculate /// the block weight limit. /// - /// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#calculating-effective-median-weight - pub fn effective_median_block_weight(&self, hf: &HardFork) -> usize { + /// See: + pub fn effective_median_block_weight(&self, hf: HardFork) -> usize { calculate_effective_median_block_weight( hf, self.median_short_term_weight(), @@ -219,9 +219,9 @@ impl BlockWeightsCache { /// Returns the median weight used to calculate block reward punishment. /// - /// https://cuprate.github.io/monero-book/consensus_rules/blocks/reward.html#calculating-block-reward - pub fn median_for_block_reward(&self, hf: &HardFork) -> usize { - if hf < &HardFork::V12 { + /// + pub fn median_for_block_reward(&self, hf: HardFork) -> usize { + if hf < HardFork::V12 { self.median_short_term_weight() } else { self.effective_median_block_weight(hf) @@ -232,17 +232,17 @@ impl BlockWeightsCache { /// Calculates the effective median with the long term and short term median. fn calculate_effective_median_block_weight( - hf: &HardFork, + hf: HardFork, median_short_term_weight: usize, median_long_term_weight: usize, ) -> usize { - if hf < &HardFork::V10 { + if hf < HardFork::V10 { return median_short_term_weight.max(penalty_free_zone(hf)); } let long_term_median = median_long_term_weight.max(PENALTY_FREE_ZONE_5); let short_term_median = median_short_term_weight; - let effective_median = if hf >= &HardFork::V10 && hf < &HardFork::V15 { + let effective_median = if hf >= HardFork::V10 && hf < HardFork::V15 { min( max(PENALTY_FREE_ZONE_5, short_term_median), 50 * long_term_median, @@ -258,19 +258,19 @@ fn calculate_effective_median_block_weight( } /// Calculates a blocks long term weight. -pub fn calculate_block_long_term_weight( - hf: &HardFork, +pub(crate) fn calculate_block_long_term_weight( + hf: HardFork, block_weight: usize, long_term_median: usize, ) -> usize { - if hf < &HardFork::V10 { + if hf < HardFork::V10 { return block_weight; } let long_term_median = max(penalty_free_zone(hf), long_term_median); let (short_term_constraint, adjusted_block_weight) = - if hf >= &HardFork::V10 && hf < &HardFork::V15 { + if hf >= HardFork::V10 && hf < HardFork::V15 { let stc = long_term_median + long_term_median * 2 / 5; (stc, block_weight) } else { diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 004285d..e104cec 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -10,6 +10,16 @@ //! implement a database you need to have a service which accepts [`BlockchainReadRequest`] and responds //! with [`BlockchainResponse`]. //! + +cfg_if::cfg_if! { + // Used in external `tests/`. + if #[cfg(test)] { + use cuprate_test_utils as _; + use curve25519_dalek as _; + use hex_literal as _; + } +} + use cuprate_consensus_rules::ConsensusError; mod batch_verifier; @@ -34,6 +44,7 @@ pub use cuprate_types::{ /// An Error returned from one of the consensus services. #[derive(Debug, thiserror::Error)] +#[expect(variant_size_differences)] pub enum ExtendedConsensusError { /// A consensus error. #[error("{0}")] @@ -53,7 +64,8 @@ pub enum ExtendedConsensusError { } /// Initialize the 2 verifier [`tower::Service`]s (block and transaction). -pub async fn initialize_verifier( +#[expect(clippy::type_complexity)] +pub fn initialize_verifier( database: D, ctx_svc: Ctx, ) -> Result< @@ -112,7 +124,7 @@ pub mod __private { Response = BlockchainResponse, Error = tower::BoxError, >, - > crate::Database for T + > Database for T where T::Future: Future> + Send + 'static, { diff --git a/consensus/src/tests.rs b/consensus/src/tests.rs index 13598be..0efef82 100644 --- a/consensus/src/tests.rs +++ b/consensus/src/tests.rs @@ -1,2 +1,2 @@ mod context; -pub mod mock_db; +pub(crate) mod mock_db; diff --git a/consensus/src/tests/context/data.rs b/consensus/src/tests/context/data.rs index baa591c..28f61a4 100644 --- a/consensus/src/tests/context/data.rs +++ b/consensus/src/tests/context/data.rs @@ -1,11 +1,12 @@ use cuprate_consensus_rules::HardFork; -pub static HFS_2688888_2689608: [(HardFork, HardFork); 720] = +pub(crate) static HFS_2688888_2689608: [(HardFork, HardFork); 720] = include!("./data/hfs_2688888_2689608"); -pub static HFS_2678808_2688888: [(HardFork, HardFork); 10080] = +pub(crate) static HFS_2678808_2688888: [(HardFork, HardFork); 10080] = include!("./data/hfs_2678808_2688888"); -pub static BW_2850000_3050000: [(usize, usize); 200_000] = include!("./data/bw_2850000_3050000"); +pub(crate) static BW_2850000_3050000: [(usize, usize); 200_000] = + include!("./data/bw_2850000_3050000"); -pub static DIF_3000000_3002000: [(u128, u64); 2000] = include!("./data/dif_3000000_3002000"); +pub(crate) static DIF_3000000_3002000: [(u128, u64); 2000] = include!("./data/dif_3000000_3002000"); diff --git a/consensus/src/tests/context/difficulty.rs b/consensus/src/tests/context/difficulty.rs index a79ae9b..d5027f5 100644 --- a/consensus/src/tests/context/difficulty.rs +++ b/consensus/src/tests/context/difficulty.rs @@ -17,7 +17,7 @@ const TEST_LAG: usize = 2; const TEST_TOTAL_ACCOUNTED_BLOCKS: usize = TEST_WINDOW + TEST_LAG; -pub const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig = +pub(crate) const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig = DifficultyCacheConfig::new(TEST_WINDOW, TEST_CUT, TEST_LAG); #[tokio::test] @@ -35,7 +35,7 @@ async fn first_3_blocks_fixed_difficulty() -> Result<(), tower::BoxError> { .await?; for height in 1..3 { - assert_eq!(difficulty_cache.next_difficulty(&HardFork::V1), 1); + assert_eq!(difficulty_cache.next_difficulty(HardFork::V1), 1); difficulty_cache.new_block(height, 0, u128::MAX); } Ok(()) @@ -66,7 +66,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { for (cum_dif, timestamp) in DIF_3000000_3002000.iter().take(cfg.total_block_count()) { db_builder.add_block( DummyBlockExtendedHeader::default().with_difficulty_info(*timestamp, *cum_dif), - ) + ); } let mut diff_cache = DifficultyCache::init_from_chain_height( @@ -84,7 +84,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { { let diff = diff_info[1].0 - diff_info[0].0; - assert_eq!(diff_cache.next_difficulty(&HardFork::V16), diff); + assert_eq!(diff_cache.next_difficulty(HardFork::V16), diff); diff_cache.new_block(3_000_720 + i, diff_info[1].1, diff_info[1].0); } @@ -139,22 +139,22 @@ proptest! { no_lag_cache.cumulative_difficulties.pop_front(); } // get the difficulty - let next_diff_no_lag = no_lag_cache.next_difficulty(&hf); + let next_diff_no_lag = no_lag_cache.next_difficulty(hf); for _ in 0..TEST_LAG { // add new blocks to the lagged cache diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } // they both should now be the same - prop_assert_eq!(diff_cache.next_difficulty(&hf), next_diff_no_lag) + prop_assert_eq!(diff_cache.next_difficulty(hf), next_diff_no_lag); } #[test] fn next_difficulty_consistent(diff_cache in arb_difficulty_cache(TEST_TOTAL_ACCOUNTED_BLOCKS), hf in any::()) { - let first_call = diff_cache.next_difficulty(&hf); - prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf)); - prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf)); - prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf)); + let first_call = diff_cache.next_difficulty(hf); + prop_assert_eq!(first_call, diff_cache.next_difficulty(hf)); + prop_assert_eq!(first_call, diff_cache.next_difficulty(hf)); + prop_assert_eq!(first_call, diff_cache.next_difficulty(hf)); } #[test] @@ -178,7 +178,7 @@ proptest! { #[test] fn window_size_kept_constant(mut diff_cache in arb_difficulty_cache(TEST_TOTAL_ACCOUNTED_BLOCKS), new_blocks in any::>()) { - for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + for (timestamp, cumulative_difficulty) in new_blocks { diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty); prop_assert_eq!(diff_cache.timestamps.len(), TEST_TOTAL_ACCOUNTED_BLOCKS); prop_assert_eq!(diff_cache.cumulative_difficulties.len(), TEST_TOTAL_ACCOUNTED_BLOCKS); @@ -193,7 +193,7 @@ proptest! { ) { let cache = diff_cache.clone(); - diff_cache.next_difficulties(timestamps.into_iter().zip([hf].into_iter().cycle()).collect(), &hf); + diff_cache.next_difficulties(timestamps.into_iter().zip(std::iter::once(hf).cycle()).collect(), hf); prop_assert_eq!(diff_cache, cache); } @@ -204,12 +204,12 @@ proptest! { timestamps in any_with::>(size_range(0..1000).lift()), hf in any::(), ) { - let timestamps: Vec<_> = timestamps.into_iter().zip([hf].into_iter().cycle()).collect(); + let timestamps: Vec<_> = timestamps.into_iter().zip(std::iter::once(hf).cycle()).collect(); - let diffs = diff_cache.next_difficulties(timestamps.clone(), &hf); + let diffs = diff_cache.next_difficulties(timestamps.clone(), hf); for (timestamp, diff) in timestamps.into_iter().zip(diffs.into_iter()) { - prop_assert_eq!(diff_cache.next_difficulty(×tamp.1), diff); + prop_assert_eq!(diff_cache.next_difficulty(timestamp.1), diff); diff_cache.new_block(diff_cache.last_accounted_height +1, timestamp.0, diff + diff_cache.cumulative_difficulty()); } @@ -226,7 +226,7 @@ proptest! { let blocks_to_pop = new_blocks.len(); let mut new_cache = old_cache.clone(); - for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + for (timestamp, cumulative_difficulty) in new_blocks { database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty)); new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } @@ -250,7 +250,7 @@ proptest! { let blocks_to_pop = new_blocks.len(); let mut new_cache = old_cache.clone(); - for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + for (timestamp, cumulative_difficulty) in new_blocks { database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty)); new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } diff --git a/consensus/src/tests/context/hardforks.rs b/consensus/src/tests/context/hardforks.rs index ffdff59..17bd47f 100644 --- a/consensus/src/tests/context/hardforks.rs +++ b/consensus/src/tests/context/hardforks.rs @@ -31,7 +31,7 @@ const TEST_HFS: [HFInfo; NUMB_OF_HARD_FORKS] = [ HFInfo::new(150, 0), ]; -pub const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig { +pub(crate) const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig { window: TEST_WINDOW_SIZE, info: HFsInfo::new(TEST_HFS), }; diff --git a/consensus/src/tests/context/rx_vms.rs b/consensus/src/tests/context/rx_vms.rs index 5c198cf..b1eba8e 100644 --- a/consensus/src/tests/context/rx_vms.rs +++ b/consensus/src/tests/context/rx_vms.rs @@ -39,6 +39,7 @@ fn rx_heights_consistent() { } #[tokio::test] +#[expect(unused_qualifications, reason = "false positive in tokio macro")] async fn rx_vm_created_on_hf_12() { let db = DummyDatabaseBuilder::default().finish(Some(10)); diff --git a/consensus/src/tests/context/weight.rs b/consensus/src/tests/context/weight.rs index 6706d97..b23f8f8 100644 --- a/consensus/src/tests/context/weight.rs +++ b/consensus/src/tests/context/weight.rs @@ -8,7 +8,8 @@ use crate::{ }; use cuprate_types::Chain; -pub const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = BlockWeightsCacheConfig::new(100, 5000); +pub(crate) const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = + BlockWeightsCacheConfig::new(100, 5000); #[tokio::test] async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> { @@ -157,7 +158,7 @@ async fn calc_bw_ltw_2850000_3050000() { for (i, (weight, ltw)) in BW_2850000_3050000.iter().skip(100_000).enumerate() { let calc_ltw = calculate_block_long_term_weight( - &HardFork::V16, + HardFork::V16, *weight, weight_cache.median_long_term_weight(), ); diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index a260cf0..5ca53d8 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -1,3 +1,5 @@ +#![expect(non_local_definitions, reason = "proptest macro")] + use std::{ future::Future, pin::Pin, @@ -60,7 +62,7 @@ pub struct DummyBlockExtendedHeader { impl From for ExtendedBlockHeader { fn from(value: DummyBlockExtendedHeader) -> Self { - ExtendedBlockHeader { + Self { version: value.version.unwrap_or(HardFork::V1), vote: value.vote.unwrap_or(HardFork::V1).as_u8(), timestamp: value.timestamp.unwrap_or_default(), @@ -72,31 +74,23 @@ impl From for ExtendedBlockHeader { } impl DummyBlockExtendedHeader { - pub fn with_weight_into( - mut self, - weight: usize, - long_term_weight: usize, - ) -> DummyBlockExtendedHeader { + pub const fn with_weight_into(mut self, weight: usize, long_term_weight: usize) -> Self { self.block_weight = Some(weight); self.long_term_weight = Some(long_term_weight); self } - pub fn with_hard_fork_info( - mut self, - version: HardFork, - vote: HardFork, - ) -> DummyBlockExtendedHeader { + pub const fn with_hard_fork_info(mut self, version: HardFork, vote: HardFork) -> Self { self.vote = Some(vote); self.version = Some(version); self } - pub fn with_difficulty_info( + pub const fn with_difficulty_info( mut self, timestamp: u64, cumulative_difficulty: u128, - ) -> DummyBlockExtendedHeader { + ) -> Self { self.timestamp = Some(timestamp); self.cumulative_difficulty = Some(cumulative_difficulty); self @@ -104,16 +98,16 @@ impl DummyBlockExtendedHeader { } #[derive(Debug, Default)] -pub struct DummyDatabaseBuilder { +pub(crate) struct DummyDatabaseBuilder { blocks: Vec, } impl DummyDatabaseBuilder { - pub fn add_block(&mut self, block: DummyBlockExtendedHeader) { + pub(crate) fn add_block(&mut self, block: DummyBlockExtendedHeader) { self.blocks.push(block); } - pub fn finish(self, dummy_height: Option) -> DummyDatabase { + pub(crate) fn finish(self, dummy_height: Option) -> DummyDatabase { DummyDatabase { blocks: Arc::new(self.blocks.into()), dummy_height, @@ -122,14 +116,15 @@ impl DummyDatabaseBuilder { } #[derive(Clone, Debug)] -pub struct DummyDatabase { +pub(crate) struct DummyDatabase { blocks: Arc>>, dummy_height: Option, } impl DummyDatabase { - pub fn add_block(&mut self, block: DummyBlockExtendedHeader) { - self.blocks.write().unwrap().push(block) + #[expect(clippy::needless_pass_by_ref_mut)] + pub(crate) fn add_block(&mut self, block: DummyBlockExtendedHeader) { + self.blocks.write().unwrap().push(block); } } @@ -144,7 +139,7 @@ impl Service for DummyDatabase { } fn call(&mut self, req: BlockchainReadRequest) -> Self::Future { - let blocks = self.blocks.clone(); + let blocks = Arc::clone(&self.blocks); let dummy_height = self.dummy_height; async move { diff --git a/consensus/src/transactions.rs b/consensus/src/transactions.rs index 09f6884..f29c852 100644 --- a/consensus/src/transactions.rs +++ b/consensus/src/transactions.rs @@ -5,7 +5,6 @@ use std::{ collections::HashSet, future::Future, - ops::Deref, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -102,8 +101,8 @@ where D::Future: Send + 'static, { /// Creates a new [`TxVerifierService`]. - pub fn new(database: D) -> TxVerifierService { - TxVerifierService { database } + pub const fn new(database: D) -> Self { + Self { database } } } @@ -244,7 +243,7 @@ where if kis_spent { tracing::debug!("One or more key images in batch already spent."); - Err(ConsensusError::Transaction(TransactionError::KeyImageSpent))?; + return Err(ConsensusError::Transaction(TransactionError::KeyImageSpent).into()); } let mut verified_at_block_hashes = txs @@ -281,8 +280,8 @@ where let (txs_needing_full_verification, txs_needing_partial_verification) = transactions_needing_verification( txs, - verified_at_block_hashes, - &hf, + &verified_at_block_hashes, + hf, current_chain_height, time_for_time_lock, )?; @@ -302,11 +301,14 @@ where Ok(VerifyTxResponse::Ok) } -#[allow(clippy::type_complexity)] // I don't think the return is too complex +#[expect( + clippy::type_complexity, + reason = "I don't think the return is too complex" +)] fn transactions_needing_verification( txs: &[Arc], - hashes_in_main_chain: HashSet<[u8; 32]>, - current_hf: &HardFork, + hashes_in_main_chain: &HashSet<[u8; 32]>, + current_hf: HardFork, current_chain_height: usize, time_for_time_lock: u64, ) -> Result< @@ -321,27 +323,28 @@ fn transactions_needing_verification( // txs needing partial _contextual_ validation, not semantic. let mut partial_validation_transactions = Vec::new(); - for tx in txs.iter() { + for tx in txs { let guard = tx.cached_verification_state.lock().unwrap(); - match guard.deref() { + match &*guard { CachedVerificationState::NotVerified => { drop(guard); full_validation_transactions - .push((tx.clone(), VerificationNeeded::SemanticAndContextual)); + .push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual)); continue; } CachedVerificationState::ValidAtHashAndHF { block_hash, hf } => { - if current_hf != hf { + if current_hf != *hf { drop(guard); full_validation_transactions - .push((tx.clone(), VerificationNeeded::SemanticAndContextual)); + .push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual)); continue; } if !hashes_in_main_chain.contains(block_hash) { drop(guard); - full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual)); + full_validation_transactions + .push((Arc::clone(tx), VerificationNeeded::Contextual)); continue; } } @@ -350,21 +353,22 @@ fn transactions_needing_verification( hf, time_lock, } => { - if current_hf != hf { + if current_hf != *hf { drop(guard); full_validation_transactions - .push((tx.clone(), VerificationNeeded::SemanticAndContextual)); + .push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual)); continue; } if !hashes_in_main_chain.contains(block_hash) { drop(guard); - full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual)); + full_validation_transactions + .push((Arc::clone(tx), VerificationNeeded::Contextual)); continue; } // If the time lock is still locked then the transaction is invalid. - if !output_unlocked(time_lock, current_chain_height, time_for_time_lock, hf) { + if !output_unlocked(time_lock, current_chain_height, time_for_time_lock, *hf) { return Err(ConsensusError::Transaction( TransactionError::OneOrMoreRingMembersLocked, )); @@ -374,7 +378,7 @@ fn transactions_needing_verification( if tx.version == TxVersion::RingSignatures { drop(guard); - partial_validation_transactions.push(tx.clone()); + partial_validation_transactions.push(Arc::clone(tx)); continue; } } @@ -400,7 +404,7 @@ where batch_get_decoy_info(&txs, hf, database) .await? - .try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, &hf)?)))?; + .try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, hf)?)))?; Ok(()) } @@ -417,7 +421,7 @@ where D: Database + Clone + Sync + Send + 'static, { let txs_ring_member_info = - batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?; + batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), hf, database).await?; rayon_spawn_async(move || { let batch_verifier = MultiThreadedBatchVerifier::new(rayon::current_num_threads()); @@ -432,7 +436,7 @@ where tx.tx_blob.len(), tx.tx_weight, &tx.tx_hash, - &hf, + hf, &batch_verifier, )?; // make sure we calculated the right fee. @@ -445,7 +449,7 @@ where ring, current_chain_height, current_time_lock_timestamp, - &hf, + hf, )?; Ok::<_, ConsensusError>(()) diff --git a/consensus/src/transactions/contextual_data.rs b/consensus/src/transactions/contextual_data.rs index 82f9976..66c53b3 100644 --- a/consensus/src/transactions/contextual_data.rs +++ b/consensus/src/transactions/contextual_data.rs @@ -57,7 +57,7 @@ fn get_ring_members_for_inputs( }) .collect::>()?) } - _ => Err(TransactionError::IncorrectInputType), + Input::Gen(_) => Err(TransactionError::IncorrectInputType), }) .collect::>() } @@ -143,7 +143,7 @@ fn new_rings( /// them. pub async fn batch_get_ring_member_info( txs_verification_data: impl Iterator> + Clone, - hf: &HardFork, + hf: HardFork, mut database: D, ) -> Result, ExtendedConsensusError> { let mut output_ids = HashMap::new(); @@ -183,14 +183,14 @@ pub async fn batch_get_ring_member_info( ) .map_err(ConsensusError::Transaction)?; - let decoy_info = if hf != &HardFork::V1 { + let decoy_info = if hf == HardFork::V1 { + None + } else { // this data is only needed after hard-fork 1. Some( DecoyInfo::new(&tx_v_data.tx.prefix().inputs, numb_outputs, hf) .map_err(ConsensusError::Transaction)?, ) - } else { - None }; new_ring_member_info(ring_members_for_tx, decoy_info, tx_v_data.version) @@ -224,7 +224,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( .flat_map(|tx_info| { tx_info.tx.prefix().inputs.iter().map(|input| match input { Input::ToKey { amount, .. } => amount.unwrap_or(0), - _ => 0, + Input::Gen(_) => 0, }) }) .collect::>(); @@ -249,7 +249,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( DecoyInfo::new( &tx_v_data.tx.prefix().inputs, |amt| outputs_with_amount.get(&amt).copied().unwrap_or(0), - &hf, + hf, ) .map_err(ConsensusError::Transaction) })) diff --git a/consensus/src/transactions/free.rs b/consensus/src/transactions/free.rs index 67b675a..3613f29 100644 --- a/consensus/src/transactions/free.rs +++ b/consensus/src/transactions/free.rs @@ -39,7 +39,7 @@ pub fn new_tx_verification_data( /// Calculates the weight of a [`Transaction`]. /// /// This is more efficient that [`Transaction::weight`] if you already have the transaction blob. -pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { +pub(crate) fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { // the tx weight is only different from the blobs length for bp(+) txs. match &tx { @@ -64,7 +64,7 @@ pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { } /// Calculates the fee of the [`Transaction`]. -pub fn tx_fee(tx: &Transaction) -> Result { +pub(crate) fn tx_fee(tx: &Transaction) -> Result { let mut fee = 0_u64; match &tx { diff --git a/consensus/tests/verify_correct_txs.rs b/consensus/tests/verify_correct_txs.rs index 7afb370..4d6c179 100644 --- a/consensus/tests/verify_correct_txs.rs +++ b/consensus/tests/verify_correct_txs.rs @@ -1,3 +1,6 @@ +#![expect(unused_crate_dependencies, reason = "external test module")] +#![expect(clippy::allow_attributes, reason = "usage inside macro")] + use std::{ collections::{BTreeMap, HashMap}, future::ready, @@ -29,7 +32,7 @@ fn dummy_database(outputs: BTreeMap) -> impl Database + Clon BlockchainResponse::NumberOutputsWithAmount(HashMap::new()) } BlockchainReadRequest::Outputs(outs) => { - let idxs = outs.get(&0).unwrap(); + let idxs = &outs[&0]; let mut ret = HashMap::new(); diff --git a/types/src/block_complete_entry.rs b/types/src/block_complete_entry.rs index ba5fc2b..77ed82d 100644 --- a/types/src/block_complete_entry.rs +++ b/types/src/block_complete_entry.rs @@ -1,7 +1,6 @@ //! Contains [`BlockCompleteEntry`] and the related types. //---------------------------------------------------------------------------------------------------- Import -#[cfg(feature = "epee")] use bytes::Bytes; #[cfg(feature = "serde")] From f4c88b6f0538f9abe19b4384c3aabcb3fd3deb40 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 20 Sep 2024 20:36:39 -0400 Subject: [PATCH 068/104] p2p: enable workspace lints (#289) * p2p: enable workspace lints * fmt * fixes * fixes * fixes * review fixes --- p2p/p2p/Cargo.toml | 3 ++ p2p/p2p/src/block_downloader.rs | 29 +++++++++-------- p2p/p2p/src/block_downloader/block_queue.rs | 14 ++++---- p2p/p2p/src/block_downloader/chain_tracker.rs | 25 ++++++++------- .../src/block_downloader/download_batch.rs | 2 ++ p2p/p2p/src/block_downloader/request_chain.rs | 4 +-- p2p/p2p/src/block_downloader/tests.rs | 7 ++-- p2p/p2p/src/broadcast.rs | 21 ++++++------ p2p/p2p/src/client_pool.rs | 17 +++++----- p2p/p2p/src/client_pool/disconnect_monitor.rs | 2 +- p2p/p2p/src/connection_maintainer.rs | 32 ++++++++++++------- p2p/p2p/src/inbound_server.rs | 15 +++++---- p2p/p2p/src/lib.rs | 12 +++---- p2p/p2p/src/sync_states.rs | 24 +++++++------- 14 files changed, 116 insertions(+), 91 deletions(-) diff --git a/p2p/p2p/Cargo.toml b/p2p/p2p/Cargo.toml index 7cbbdcb..ef85277 100644 --- a/p2p/p2p/Cargo.toml +++ b/p2p/p2p/Cargo.toml @@ -39,3 +39,6 @@ cuprate-test-utils = { path = "../../test-utils" } indexmap = { workspace = true } proptest = { workspace = true } tokio-test = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index d295016..39980a0 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -78,7 +78,7 @@ pub struct BlockDownloaderConfig { /// An error that occurred in the [`BlockDownloader`]. #[derive(Debug, thiserror::Error)] -pub enum BlockDownloadError { +pub(crate) enum BlockDownloadError { #[error("A request to a peer timed out.")] TimedOut, #[error("The block buffer was closed.")] @@ -219,7 +219,7 @@ struct BlockDownloader { /// The running chain entry tasks. /// /// Returns a result of the chain entry or an error. - #[allow(clippy::type_complexity)] + #[expect(clippy::type_complexity)] chain_entry_task: JoinSet, ChainEntry), BlockDownloadError>>, /// The current inflight requests. @@ -273,7 +273,7 @@ where } /// Checks if we can make use of any peers that are currently pending requests. - async fn check_pending_peers( + fn check_pending_peers( &mut self, chain_tracker: &mut ChainTracker, pending_peers: &mut BTreeMap>>, @@ -287,7 +287,8 @@ where continue; } - if let Some(peer) = self.try_handle_free_client(chain_tracker, peer).await { + let client = self.try_handle_free_client(chain_tracker, peer); + if let Some(peer) = client { // This peer is ok however it does not have the data we currently need, this will only happen // because of its pruning seed so just skip over all peers with this pruning seed. peers.push(peer); @@ -303,7 +304,7 @@ where /// for them. /// /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the batch according to its pruning seed. - async fn request_inflight_batch_again( + fn request_inflight_batch_again( &mut self, client: ClientPoolDropGuard, ) -> Option> { @@ -354,7 +355,7 @@ where /// /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according /// to its pruning seed. - async fn request_block_batch( + fn request_block_batch( &mut self, chain_tracker: &mut ChainTracker, client: ClientPoolDropGuard, @@ -399,7 +400,7 @@ where // If our ready queue is too large send duplicate requests for the blocks we are waiting on. if self.block_queue.size() >= self.config.in_progress_queue_size { - return self.request_inflight_batch_again(client).await; + return self.request_inflight_batch_again(client); } // No failed requests that we can handle, request some new blocks. @@ -434,7 +435,7 @@ where /// /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according /// to its pruning seed. - async fn try_handle_free_client( + fn try_handle_free_client( &mut self, chain_tracker: &mut ChainTracker, client: ClientPoolDropGuard, @@ -472,7 +473,7 @@ where } // Request a batch of blocks instead. - self.request_block_batch(chain_tracker, client).await + self.request_block_batch(chain_tracker, client) } /// Checks the [`ClientPool`] for free peers. @@ -516,7 +517,7 @@ where .push(client); } - self.check_pending_peers(chain_tracker, pending_peers).await; + self.check_pending_peers(chain_tracker, pending_peers); Ok(()) } @@ -574,7 +575,7 @@ where .or_default() .push(client); - self.check_pending_peers(chain_tracker, pending_peers).await; + self.check_pending_peers(chain_tracker, pending_peers); return Ok(()); }; @@ -611,7 +612,7 @@ where .or_default() .push(client); - self.check_pending_peers(chain_tracker, pending_peers).await; + self.check_pending_peers(chain_tracker, pending_peers); Ok(()) } @@ -679,7 +680,7 @@ where .or_default() .push(client); - self.check_pending_peers(&mut chain_tracker, &mut pending_peers).await; + self.check_pending_peers(&mut chain_tracker, &mut pending_peers); } Err(_) => self.amount_of_empty_chain_entries += 1 } @@ -698,7 +699,7 @@ struct BlockDownloadTaskResponse { } /// Returns if a peer has all the blocks in a range, according to its [`PruningSeed`]. -fn client_has_block_in_range( +const fn client_has_block_in_range( pruning_seed: &PruningSeed, start_height: usize, length: usize, diff --git a/p2p/p2p/src/block_downloader/block_queue.rs b/p2p/p2p/src/block_downloader/block_queue.rs index 5a92f49..5dd1b0d 100644 --- a/p2p/p2p/src/block_downloader/block_queue.rs +++ b/p2p/p2p/src/block_downloader/block_queue.rs @@ -13,7 +13,7 @@ use super::{BlockBatch, BlockDownloadError}; /// /// Also, the [`Ord`] impl is reversed so older blocks (lower height) come first in a [`BinaryHeap`]. #[derive(Debug, Clone)] -pub struct ReadyQueueBatch { +pub(crate) struct ReadyQueueBatch { /// The start height of the batch. pub start_height: usize, /// The batch of blocks. @@ -43,7 +43,7 @@ impl Ord for ReadyQueueBatch { /// The block queue that holds downloaded block batches, adding them to the [`async_buffer`] when the /// oldest batch has been downloaded. -pub struct BlockQueue { +pub(crate) struct BlockQueue { /// A queue of ready batches. ready_batches: BinaryHeap, /// The size, in bytes, of all the batches in [`Self::ready_batches`]. @@ -55,8 +55,8 @@ pub struct BlockQueue { impl BlockQueue { /// Creates a new [`BlockQueue`]. - pub fn new(buffer_appender: BufferAppender) -> BlockQueue { - BlockQueue { + pub(crate) const fn new(buffer_appender: BufferAppender) -> Self { + Self { ready_batches: BinaryHeap::new(), ready_batches_size: 0, buffer_appender, @@ -64,12 +64,12 @@ impl BlockQueue { } /// Returns the oldest batch that has not been put in the [`async_buffer`] yet. - pub fn oldest_ready_batch(&self) -> Option { + pub(crate) fn oldest_ready_batch(&self) -> Option { self.ready_batches.peek().map(|batch| batch.start_height) } /// Returns the size of all the batches that have not been put into the [`async_buffer`] yet. - pub fn size(&self) -> usize { + pub(crate) const fn size(&self) -> usize { self.ready_batches_size } @@ -77,7 +77,7 @@ impl BlockQueue { /// /// `oldest_in_flight_start_height` should be the start height of the oldest batch that is still inflight, if /// there are no batches inflight then this should be [`None`]. - pub async fn add_incoming_batch( + pub(crate) async fn add_incoming_batch( &mut self, new_batch: ReadyQueueBatch, oldest_in_flight_start_height: Option, diff --git a/p2p/p2p/src/block_downloader/chain_tracker.rs b/p2p/p2p/src/block_downloader/chain_tracker.rs index aacb163..a2f03c5 100644 --- a/p2p/p2p/src/block_downloader/chain_tracker.rs +++ b/p2p/p2p/src/block_downloader/chain_tracker.rs @@ -20,7 +20,7 @@ pub(crate) struct ChainEntry { /// A batch of blocks to retrieve. #[derive(Clone)] -pub struct BlocksToRetrieve { +pub(crate) struct BlocksToRetrieve { /// The block IDs to get. pub ids: ByteArrayVec<32>, /// The hash of the last block before this batch. @@ -39,7 +39,7 @@ pub struct BlocksToRetrieve { /// An error returned from the [`ChainTracker`]. #[derive(Debug, Clone)] -pub enum ChainTrackerError { +pub(crate) enum ChainTrackerError { /// The new chain entry is invalid. NewEntryIsInvalid, /// The new chain entry does not follow from the top of our chain tracker. @@ -50,7 +50,7 @@ pub enum ChainTrackerError { /// /// This struct allows following a single chain. It takes in [`ChainEntry`]s and /// allows getting [`BlocksToRetrieve`]. -pub struct ChainTracker { +pub(crate) struct ChainTracker { /// A list of [`ChainEntry`]s, in order. entries: VecDeque>, /// The height of the first block, in the first entry in [`Self::entries`]. @@ -65,7 +65,7 @@ pub struct ChainTracker { impl ChainTracker { /// Creates a new chain tracker. - pub fn new( + pub(crate) fn new( new_entry: ChainEntry, first_height: usize, our_genesis: [u8; 32], @@ -76,9 +76,9 @@ impl ChainTracker { entries.push_back(new_entry); Self { - top_seen_hash, entries, first_height, + top_seen_hash, previous_hash, our_genesis, } @@ -86,17 +86,17 @@ impl ChainTracker { /// Returns `true` if the peer is expected to have the next block after our highest seen block /// according to their pruning seed. - pub fn should_ask_for_next_chain_entry(&self, seed: &PruningSeed) -> bool { + pub(crate) fn should_ask_for_next_chain_entry(&self, seed: &PruningSeed) -> bool { seed.has_full_block(self.top_height(), CRYPTONOTE_MAX_BLOCK_HEIGHT) } /// Returns the simple history, the highest seen block and the genesis block. - pub fn get_simple_history(&self) -> [[u8; 32]; 2] { + pub(crate) const fn get_simple_history(&self) -> [[u8; 32]; 2] { [self.top_seen_hash, self.our_genesis] } /// Returns the height of the highest block we are tracking. - pub fn top_height(&self) -> usize { + pub(crate) fn top_height(&self) -> usize { let top_block_idx = self .entries .iter() @@ -110,7 +110,7 @@ impl ChainTracker { /// /// # Panics /// This function panics if `batch_size` is `0`. - pub fn block_requests_queued(&self, batch_size: usize) -> usize { + pub(crate) fn block_requests_queued(&self, batch_size: usize) -> usize { self.entries .iter() .map(|entry| entry.ids.len().div_ceil(batch_size)) @@ -118,7 +118,10 @@ impl ChainTracker { } /// Attempts to add an incoming [`ChainEntry`] to the chain tracker. - pub fn add_entry(&mut self, mut chain_entry: ChainEntry) -> Result<(), ChainTrackerError> { + pub(crate) fn add_entry( + &mut self, + mut chain_entry: ChainEntry, + ) -> Result<(), ChainTrackerError> { if chain_entry.ids.is_empty() { // The peer must send at lest one overlapping block. chain_entry.handle.ban_peer(MEDIUM_BAN); @@ -154,7 +157,7 @@ impl ChainTracker { /// Returns a batch of blocks to request. /// /// The returned batches length will be less than or equal to `max_blocks` - pub fn blocks_to_get( + pub(crate) fn blocks_to_get( &mut self, pruning_seed: &PruningSeed, max_blocks: usize, diff --git a/p2p/p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs index ea57ead..bbb14b3 100644 --- a/p2p/p2p/src/block_downloader/download_batch.rs +++ b/p2p/p2p/src/block_downloader/download_batch.rs @@ -30,6 +30,7 @@ use crate::{ attempt = _attempt ) )] +#[expect(clippy::used_underscore_binding)] pub async fn download_batch_task( client: ClientPoolDropGuard, ids: ByteArrayVec<32>, @@ -103,6 +104,7 @@ async fn request_batch_from_peer( Ok((client, batch)) } +#[expect(clippy::needless_pass_by_value)] fn deserialize_batch( blocks_response: GetObjectsResponse, expected_start_height: usize, diff --git a/p2p/p2p/src/block_downloader/request_chain.rs b/p2p/p2p/src/block_downloader/request_chain.rs index 4b0b47e..bde40ce 100644 --- a/p2p/p2p/src/block_downloader/request_chain.rs +++ b/p2p/p2p/src/block_downloader/request_chain.rs @@ -30,7 +30,7 @@ use crate::{ /// /// Because the block downloader only follows and downloads one chain we only have to send the block hash of /// top block we have found and the genesis block, this is then called `short_history`. -pub async fn request_chain_entry_from_peer( +pub(crate) async fn request_chain_entry_from_peer( mut client: ClientPoolDropGuard, short_history: [[u8; 32]; 2], ) -> Result<(ClientPoolDropGuard, ChainEntry), BlockDownloadError> { @@ -179,7 +179,7 @@ where Some(res) => { // res has already been set, replace it if this peer claims higher cumulative difficulty if res.0.cumulative_difficulty() < task_res.0.cumulative_difficulty() { - let _ = mem::replace(res, task_res); + drop(mem::replace(res, task_res)); } } None => { diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index 86a9a46..a5c5e92 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -47,6 +47,7 @@ proptest! { let tokio_pool = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); + #[expect(clippy::significant_drop_tightening)] tokio_pool.block_on(async move { timeout(Duration::from_secs(600), async move { let client_pool = ClientPool::new(); @@ -54,7 +55,7 @@ proptest! { let mut peer_ids = Vec::with_capacity(peers); for _ in 0..peers { - let client = mock_block_downloader_client(blockchain.clone()); + let client = mock_block_downloader_client(Arc::clone(&blockchain)); peer_ids.push(client.info.id); @@ -156,7 +157,7 @@ prop_compose! { for (height, mut block) in blocks.into_iter().enumerate() { if let Some(last) = blockchain.last() { block.0.header.previous = *last.0; - block.0.miner_transaction.prefix_mut().inputs = vec![Input::Gen(height)] + block.0.miner_transaction.prefix_mut().inputs = vec![Input::Gen(height)]; } blockchain.insert(block.0.hash(), block); @@ -173,7 +174,7 @@ fn mock_block_downloader_client(blockchain: Arc) -> Client( +pub(crate) fn init_broadcast_channels( config: BroadcastConfig, ) -> ( BroadcastSvc, @@ -193,7 +193,7 @@ impl Service> for BroadcastSvc { }; // An error here means _all_ receivers were dropped which we assume will never happen. - let _ = match direction { + drop(match direction { Some(ConnectionDirection::Inbound) => { self.tx_broadcast_channel_inbound.send(nex_tx_info) } @@ -201,10 +201,10 @@ impl Service> for BroadcastSvc { self.tx_broadcast_channel_outbound.send(nex_tx_info) } None => { - let _ = self.tx_broadcast_channel_outbound.send(nex_tx_info.clone()); + drop(self.tx_broadcast_channel_outbound.send(nex_tx_info.clone())); self.tx_broadcast_channel_inbound.send(nex_tx_info) } - }; + }); } } @@ -246,7 +246,7 @@ struct BroadcastTxInfo { /// /// This is given to the connection task to await on for broadcast messages. #[pin_project::pin_project] -pub struct BroadcastMessageStream { +pub(crate) struct BroadcastMessageStream { /// The peer that is holding this stream. addr: InternalPeerID, @@ -336,8 +336,9 @@ impl Stream for BroadcastMessageStream { Poll::Ready(Some(BroadcastMessage::NewTransaction(txs))) } else { tracing::trace!("Diffusion flush timer expired but no txs to diffuse"); - // poll next_flush now to register the waker with it + // poll next_flush now to register the waker with it. // the waker will already be registered with the block broadcast channel. + #[expect(clippy::let_underscore_must_use)] let _ = this.next_flush.poll(cx); Poll::Pending } @@ -458,7 +459,7 @@ mod tests { let match_tx = |mes, txs| match mes { BroadcastMessage::NewTransaction(tx) => assert_eq!(tx.txs.as_slice(), txs), - _ => panic!("Block broadcast?"), + BroadcastMessage::NewFluffyBlock(_) => panic!("Block broadcast?"), }; let next = outbound_stream.next().await.unwrap(); @@ -520,7 +521,7 @@ mod tests { let match_tx = |mes, txs| match mes { BroadcastMessage::NewTransaction(tx) => assert_eq!(tx.txs.as_slice(), txs), - _ => panic!("Block broadcast?"), + BroadcastMessage::NewFluffyBlock(_) => panic!("Block broadcast?"), }; let next = outbound_stream.next().await.unwrap(); @@ -536,6 +537,6 @@ mod tests { futures::future::select(inbound_stream_from.next(), outbound_stream_from.next()) ) .await - .is_err()) + .is_err()); } } diff --git a/p2p/p2p/src/client_pool.rs b/p2p/p2p/src/client_pool.rs index 51f57e9..3405224 100644 --- a/p2p/p2p/src/client_pool.rs +++ b/p2p/p2p/src/client_pool.rs @@ -8,7 +8,7 @@ //! returns the peer to the pool when it is dropped. //! //! Internally the pool is a [`DashMap`] which means care should be taken in `async` code -//! as internally this uses blocking RwLocks. +//! as internally this uses blocking `RwLock`s. use std::sync::Arc; use dashmap::DashMap; @@ -24,7 +24,7 @@ use cuprate_p2p_core::{ pub(crate) mod disconnect_monitor; mod drop_guard_client; -pub use drop_guard_client::ClientPoolDropGuard; +pub(crate) use drop_guard_client::ClientPoolDropGuard; /// The client pool, which holds currently connected free peers. /// @@ -38,16 +38,17 @@ pub struct ClientPool { impl ClientPool { /// Returns a new [`ClientPool`] wrapped in an [`Arc`]. - pub fn new() -> Arc> { + pub fn new() -> Arc { let (tx, rx) = mpsc::unbounded_channel(); - let pool = Arc::new(ClientPool { + let pool = Arc::new(Self { clients: DashMap::new(), new_connection_tx: tx, }); tokio::spawn( - disconnect_monitor::disconnect_monitor(rx, pool.clone()).instrument(Span::current()), + disconnect_monitor::disconnect_monitor(rx, Arc::clone(&pool)) + .instrument(Span::current()), ); pool @@ -69,8 +70,7 @@ impl ClientPool { return; } - let res = self.clients.insert(id, client); - assert!(res.is_none()); + assert!(self.clients.insert(id, client).is_none()); // We have to check this again otherwise we could have a race condition where a // peer is disconnected after the first check, the disconnect monitor tries to remove it, @@ -121,7 +121,6 @@ impl ClientPool { /// Note that the returned iterator is not guaranteed to contain every peer asked for. /// /// See [`Self::borrow_client`] for borrowing a single client. - #[allow(private_interfaces)] // TODO: Remove me when 2024 Rust pub fn borrow_clients<'a, 'b>( self: &'a Arc, peers: &'b [InternalPeerID], @@ -133,7 +132,7 @@ impl ClientPool { mod sealed { /// TODO: Remove me when 2024 Rust /// - /// https://rust-lang.github.io/rfcs/3498-lifetime-capture-rules-2024.html#the-captures-trick + /// pub trait Captures {} impl Captures for T {} diff --git a/p2p/p2p/src/client_pool/disconnect_monitor.rs b/p2p/p2p/src/client_pool/disconnect_monitor.rs index f45d5e3..f54b560 100644 --- a/p2p/p2p/src/client_pool/disconnect_monitor.rs +++ b/p2p/p2p/src/client_pool/disconnect_monitor.rs @@ -78,6 +78,6 @@ impl Future for PeerDisconnectFut { this.closed_fut .poll(cx) - .map(|_| this.peer_id.take().unwrap()) + .map(|()| this.peer_id.take().unwrap()) } } diff --git a/p2p/p2p/src/connection_maintainer.rs b/p2p/p2p/src/connection_maintainer.rs index 3dfd5e8..be89973 100644 --- a/p2p/p2p/src/connection_maintainer.rs +++ b/p2p/p2p/src/connection_maintainer.rs @@ -99,12 +99,17 @@ where /// Connects to random seeds to get peers and immediately disconnects #[instrument(level = "info", skip(self))] + #[expect( + clippy::significant_drop_in_scrutinee, + clippy::significant_drop_tightening + )] async fn connect_to_random_seeds(&mut self) -> Result<(), OutboundConnectorError> { let seeds = N::SEEDS.choose_multiple(&mut thread_rng(), MAX_SEED_CONNECTIONS); - if seeds.len() == 0 { - panic!("No seed nodes available to get peers from"); - } + assert!( + seeds.len() != 0, + "No seed nodes available to get peers from" + ); let mut allowed_errors = seeds.len(); @@ -129,7 +134,7 @@ where } while let Some(res) = handshake_futs.join_next().await { - if matches!(res, Err(_) | Ok(Err(_)) | Ok(Ok(Err(_)))) { + if matches!(res, Err(_) | Ok(Err(_) | Ok(Err(_)))) { allowed_errors -= 1; } } @@ -144,7 +149,7 @@ where /// Connects to a given outbound peer. #[instrument(level = "info", skip_all)] async fn connect_to_outbound_peer(&mut self, permit: OwnedSemaphorePermit, addr: N::Addr) { - let client_pool = self.client_pool.clone(); + let client_pool = Arc::clone(&self.client_pool); let connection_fut = self .connector_svc .ready() @@ -157,6 +162,7 @@ where tokio::spawn( async move { + #[expect(clippy::significant_drop_in_scrutinee)] if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, connection_fut).await { client_pool.add_new_client(peer); } @@ -166,14 +172,16 @@ where } /// Handles a request from the peer set for more peers. + #[expect( + clippy::significant_drop_tightening, + reason = "we need to hold onto a permit" + )] async fn handle_peer_request( &mut self, req: &MakeConnectionRequest, ) -> Result<(), OutboundConnectorError> { // try to get a permit. - let permit = self - .outbound_semaphore - .clone() + let permit = Arc::clone(&self.outbound_semaphore) .try_acquire_owned() .or_else(|_| { // if we can't get a permit add one if we are below the max number of connections. @@ -183,7 +191,9 @@ where } else { self.outbound_semaphore.add_permits(1); self.extra_peers += 1; - Ok(self.outbound_semaphore.clone().try_acquire_owned().unwrap()) + Ok(Arc::clone(&self.outbound_semaphore) + .try_acquire_owned() + .unwrap()) } })?; @@ -272,12 +282,12 @@ where tracing::info!("Shutting down outbound connector, make connection channel closed."); return; }; - // We can't really do much about errors in this function. + #[expect(clippy::let_underscore_must_use, reason = "We can't really do much about errors in this function.")] let _ = self.handle_peer_request(&peer_req).await; }, // This future is not cancellation safe as you will lose your space in the queue but as we are the only place // that actually requires permits that should be ok. - Ok(permit) = self.outbound_semaphore.clone().acquire_owned() => { + Ok(permit) = Arc::clone(&self.outbound_semaphore).acquire_owned() => { if self.handle_free_permit(permit).await.is_err() { // if we got an error then we still have a permit free so to prevent this from just looping // uncontrollably add a timeout. diff --git a/p2p/p2p/src/inbound_server.rs b/p2p/p2p/src/inbound_server.rs index 80ff38e..0d50d54 100644 --- a/p2p/p2p/src/inbound_server.rs +++ b/p2p/p2p/src/inbound_server.rs @@ -100,7 +100,7 @@ where }; // If we're still behind our maximum limit, Initiate handshake. - if let Ok(permit) = semaphore.clone().try_acquire_owned() { + if let Ok(permit) = Arc::clone(&semaphore).try_acquire_owned() { tracing::debug!("Permit free for incoming connection, attempting handshake."); let fut = handshaker.ready().await?.call(DoHandshakeRequest { @@ -111,11 +111,12 @@ where permit: Some(permit), }); - let cloned_pool = client_pool.clone(); + let cloned_pool = Arc::clone(&client_pool); tokio::spawn( async move { - if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, fut).await { + let client = timeout(HANDSHAKE_TIMEOUT, fut).await; + if let Ok(Ok(peer)) = client { cloned_pool.add_new_client(peer); } } @@ -133,8 +134,10 @@ where let fut = timeout(PING_REQUEST_TIMEOUT, peer_stream.next()); // Ok if timeout did not elapsed -> Some if there is a message -> Ok if it has been decoded - if let Ok(Some(Ok(Message::Request(AdminRequestMessage::Ping)))) = fut.await - { + if matches!( + fut.await, + Ok(Some(Ok(Message::Request(AdminRequestMessage::Ping)))) + ) { let response = peer_sink .send( Message::Response(AdminResponseMessage::Ping(PingResponse { @@ -148,7 +151,7 @@ where if let Err(err) = response { tracing::debug!( "Unable to respond to ping request from peer ({addr}): {err}" - ) + ); } } } diff --git a/p2p/p2p/src/lib.rs b/p2p/p2p/src/lib.rs index be18c2a..2f51c6c 100644 --- a/p2p/p2p/src/lib.rs +++ b/p2p/p2p/src/lib.rs @@ -103,7 +103,7 @@ where let outbound_connector = Connector::new(outbound_handshaker); let outbound_connection_maintainer = connection_maintainer::OutboundConnectionKeeper::new( config.clone(), - client_pool.clone(), + Arc::clone(&client_pool), make_connection_rx, address_book.clone(), outbound_connector, @@ -118,17 +118,17 @@ where ); background_tasks.spawn( inbound_server::inbound_server( - client_pool.clone(), + Arc::clone(&client_pool), inbound_handshaker, address_book.clone(), config, ) .map(|res| { if let Err(e) = res { - tracing::error!("Error in inbound connection listener: {e}") + tracing::error!("Error in inbound connection listener: {e}"); } - tracing::info!("Inbound connection listener shutdown") + tracing::info!("Inbound connection listener shutdown"); }) .instrument(Span::current()), ); @@ -155,7 +155,7 @@ pub struct NetworkInterface { /// on that claimed chain. top_block_watch: watch::Receiver, /// A channel to request extra connections. - #[allow(dead_code)] // will be used eventually + #[expect(dead_code, reason = "will be used eventually")] make_connection_tx: mpsc::Sender, /// The address book service. address_book: BoxCloneService, AddressBookResponse, tower::BoxError>, @@ -184,7 +184,7 @@ impl NetworkInterface { C::Future: Send + 'static, { block_downloader::download_blocks( - self.pool.clone(), + Arc::clone(&self.pool), self.sync_states_svc.clone(), our_chain_service, config, diff --git a/p2p/p2p/src/sync_states.rs b/p2p/p2p/src/sync_states.rs index 70ef6ca..0c03795 100644 --- a/p2p/p2p/src/sync_states.rs +++ b/p2p/p2p/src/sync_states.rs @@ -40,7 +40,7 @@ pub struct NewSyncInfo { /// This is the service that handles: /// 1. Finding out if we need to sync /// 1. Giving the peers that should be synced _from_, to the requester -pub struct PeerSyncSvc { +pub(crate) struct PeerSyncSvc { /// A map of cumulative difficulties to peers. cumulative_difficulties: BTreeMap>>, /// A map of peers to cumulative difficulties. @@ -56,7 +56,7 @@ pub struct PeerSyncSvc { impl PeerSyncSvc { /// Creates a new [`PeerSyncSvc`] with a [`Receiver`](watch::Receiver) that will be updated with /// the highest seen sync data, this makes no guarantees about which peer will be chosen in case of a tie. - pub fn new() -> (Self, watch::Receiver) { + pub(crate) fn new() -> (Self, watch::Receiver) { let (watch_tx, mut watch_rx) = watch::channel(NewSyncInfo { chain_height: 0, top_hash: [0; 32], @@ -108,9 +108,7 @@ impl PeerSyncSvc { if let Some(block_needed) = block_needed { // we just use CRYPTONOTE_MAX_BLOCK_HEIGHT as the blockchain height, this only means // we don't take into account the tip blocks which are not pruned. - self.peers - .get(peer) - .unwrap() + self.peers[peer] .1 .has_full_block(block_needed, CRYPTONOTE_MAX_BLOCK_HEIGHT) } else { @@ -126,7 +124,7 @@ impl PeerSyncSvc { &mut self, peer_id: InternalPeerID, handle: ConnectionHandle, - core_sync_data: CoreSyncData, + core_sync_data: &CoreSyncData, ) -> Result<(), tower::BoxError> { tracing::trace!( "Received new core sync data from peer, top hash: {}", @@ -176,7 +174,7 @@ impl PeerSyncSvc { self.closed_connections.push(PeerDisconnectFut { closed_fut: handle.closed(), peer_id: Some(peer_id), - }) + }); } self.cumulative_difficulties @@ -190,11 +188,15 @@ impl PeerSyncSvc { || self .last_peer_in_watcher_handle .as_ref() - .is_some_and(|handle| handle.is_closed()) + .is_some_and(ConnectionHandle::is_closed) { tracing::debug!( "Updating sync watcher channel with new highest seen cumulative difficulty: {new_cumulative_difficulty}" ); + #[expect( + clippy::let_underscore_must_use, + reason = "dropped receivers can be ignored" + )] let _ = self.new_height_watcher.send(NewSyncInfo { top_hash: core_sync_data.top_id, chain_height: core_sync_data.current_height, @@ -228,8 +230,8 @@ impl Service> for PeerSyncSvc { block_needed, ))), PeerSyncRequest::IncomingCoreSyncData(peer_id, handle, sync_data) => self - .update_peer_sync_info(peer_id, handle, sync_data) - .map(|_| PeerSyncResponse::Ok), + .update_peer_sync_info(peer_id, handle, &sync_data) + .map(|()| PeerSyncResponse::Ok), }; ready(res) @@ -413,6 +415,6 @@ mod tests { assert!( peers.contains(&InternalPeerID::Unknown(0)) && peers.contains(&InternalPeerID::Unknown(1)) - ) + ); } } From 848a6a71c4164a31b46fc4e69b8d514615df1cf0 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 20 Sep 2024 20:37:06 -0400 Subject: [PATCH 069/104] p2p/p2p-core: enable workspace lints (#288) * p2p-core: enable workspace lints * fmt * fix tests * fixes * fixes * fixes * expect reason --- Cargo.lock | 2 +- p2p/p2p-core/Cargo.toml | 10 +-- p2p/p2p-core/src/client.rs | 13 ++-- p2p/p2p-core/src/client/connection.rs | 47 ++++++++------ p2p/p2p-core/src/client/connector.rs | 4 +- p2p/p2p-core/src/client/handshaker.rs | 23 ++++--- p2p/p2p-core/src/client/handshaker/builder.rs | 26 ++++---- .../src/client/handshaker/builder/dummy.rs | 16 ++--- p2p/p2p-core/src/client/request_handler.rs | 4 +- p2p/p2p-core/src/client/timeout_monitor.rs | 4 +- p2p/p2p-core/src/error.rs | 2 +- p2p/p2p-core/src/handles.rs | 15 +++-- p2p/p2p-core/src/lib.rs | 14 +++- p2p/p2p-core/src/network_zones/clear.rs | 2 +- p2p/p2p-core/src/protocol.rs | 18 +++--- p2p/p2p-core/src/protocol/try_from.rs | 64 +++++++++---------- p2p/p2p-core/src/services.rs | 6 +- p2p/p2p-core/tests/fragmented_handshake.rs | 19 +++--- p2p/p2p-core/tests/handles.rs | 2 + p2p/p2p-core/tests/handshake.rs | 14 ++-- p2p/p2p-core/tests/sending_receiving.rs | 5 +- 21 files changed, 168 insertions(+), 142 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72325bb..5481b62 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -776,6 +776,7 @@ version = "0.1.0" dependencies = [ "async-trait", "borsh", + "cfg-if", "cuprate-helper", "cuprate-pruning", "cuprate-test-utils", @@ -790,7 +791,6 @@ dependencies = [ "tokio-util", "tower", "tracing", - "tracing-subscriber", ] [[package]] diff --git a/p2p/p2p-core/Cargo.toml b/p2p/p2p-core/Cargo.toml index 9ef8e24..8341fe9 100644 --- a/p2p/p2p-core/Cargo.toml +++ b/p2p/p2p-core/Cargo.toml @@ -14,13 +14,14 @@ cuprate-helper = { path = "../../helper", features = ["asynch"], default-feature cuprate-wire = { path = "../../net/wire", features = ["tracing"] } cuprate-pruning = { path = "../../pruning" } -tokio = { workspace = true, features = ["net", "sync", "macros", "time"]} +tokio = { workspace = true, features = ["net", "sync", "macros", "time", "rt", "rt-multi-thread"]} tokio-util = { workspace = true, features = ["codec"] } tokio-stream = { workspace = true, features = ["sync"]} futures = { workspace = true, features = ["std"] } async-trait = { workspace = true } tower = { workspace = true, features = ["util", "tracing"] } +cfg-if = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true, features = ["std", "attributes"] } hex-literal = { workspace = true } @@ -28,9 +29,10 @@ hex-literal = { workspace = true } borsh = { workspace = true, features = ["derive", "std"], optional = true } [dev-dependencies] -cuprate-test-utils = {path = "../../test-utils"} +cuprate-test-utils = { path = "../../test-utils" } hex = { workspace = true, features = ["std"] } -tokio = { workspace = true, features = ["net", "rt-multi-thread", "rt", "macros"]} tokio-test = { workspace = true } -tracing-subscriber = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/p2p/p2p-core/src/client.rs b/p2p/p2p-core/src/client.rs index 662a8ee..8685189 100644 --- a/p2p/p2p-core/src/client.rs +++ b/p2p/p2p-core/src/client.rs @@ -43,8 +43,8 @@ pub enum InternalPeerID { impl Display for InternalPeerID { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - InternalPeerID::KnownAddr(addr) => addr.fmt(f), - InternalPeerID::Unknown(id) => f.write_str(&format!("Unknown, ID: {id}")), + Self::KnownAddr(addr) => addr.fmt(f), + Self::Unknown(id) => f.write_str(&format!("Unknown, ID: {id}")), } } } @@ -113,7 +113,7 @@ impl Client { fn set_err(&self, err: PeerError) -> tower::BoxError { let err_str = err.to_string(); match self.error.try_insert_err(err) { - Ok(_) => err_str, + Ok(()) => err_str, Err(e) => e.to_string(), } .into() @@ -169,9 +169,8 @@ impl Service for Client { TrySendError::Closed(req) | TrySendError::Full(req) => { self.set_err(PeerError::ClientChannelClosed); - let _ = req - .response_channel - .send(Err(PeerError::ClientChannelClosed.into())); + let resp = Err(PeerError::ClientChannelClosed.into()); + drop(req.response_channel.send(resp)); } } } @@ -216,7 +215,7 @@ where tracing::debug!("Sending back response"); - let _ = req.response_channel.send(Ok(res)); + drop(req.response_channel.send(Ok(res))); } } .instrument(task_span), diff --git a/p2p/p2p-core/src/client/connection.rs b/p2p/p2p-core/src/client/connection.rs index f3f3f6b..f7b9be5 100644 --- a/p2p/p2p-core/src/client/connection.rs +++ b/p2p/p2p-core/src/client/connection.rs @@ -26,7 +26,7 @@ use crate::{ }; /// A request to the connection task from a [`Client`](crate::client::Client). -pub struct ConnectionTaskRequest { +pub(crate) struct ConnectionTaskRequest { /// The request. pub request: PeerRequest, /// The response channel. @@ -36,7 +36,7 @@ pub struct ConnectionTaskRequest { } /// The connection state. -pub enum State { +pub(crate) enum State { /// Waiting for a request from Cuprate or the connected peer. WaitingForRequest, /// Waiting for a response from the peer. @@ -53,7 +53,7 @@ pub enum State { /// Returns if the [`LevinCommand`] is the correct response message for our request. /// /// e.g. that we didn't get a block for a txs request. -fn levin_command_response(message_id: &MessageID, command: LevinCommand) -> bool { +const fn levin_command_response(message_id: MessageID, command: LevinCommand) -> bool { matches!( (message_id, command), (MessageID::Handshake, LevinCommand::Handshake) @@ -71,7 +71,7 @@ fn levin_command_response(message_id: &MessageID, command: LevinCommand) -> bool } /// This represents a connection to a peer. -pub struct Connection { +pub(crate) struct Connection { /// The peer sink - where we send messages to the peer. peer_sink: Z::Sink, @@ -104,15 +104,15 @@ where BrdcstStrm: Stream + Send + 'static, { /// Create a new connection struct. - pub fn new( + pub(crate) fn new( peer_sink: Z::Sink, client_rx: mpsc::Receiver, broadcast_stream: BrdcstStrm, peer_request_handler: PeerRequestHandler, connection_guard: ConnectionGuard, error: SharedError, - ) -> Connection { - Connection { + ) -> Self { + Self { peer_sink, state: State::WaitingForRequest, request_timeout: None, @@ -174,15 +174,14 @@ where if let Err(e) = res { // can't clone the error so turn it to a string first, hacky but oh well. let err_str = e.to_string(); - let _ = req.response_channel.send(Err(err_str.clone().into())); + drop(req.response_channel.send(Err(err_str.into()))); return Err(e); - } else { - // We still need to respond even if the response is this. - let _ = req - .response_channel - .send(Ok(PeerResponse::Protocol(ProtocolResponse::NA))); } + // We still need to respond even if the response is this. + let resp = Ok(PeerResponse::Protocol(ProtocolResponse::NA)); + drop(req.response_channel.send(resp)); + Ok(()) } @@ -215,7 +214,7 @@ where }; // Check if the message is a response to our request. - if levin_command_response(request_id, mes.command()) { + if levin_command_response(*request_id, mes.command()) { // TODO: Do more checks before returning response. let State::WaitingForResponse { tx, .. } = @@ -224,9 +223,11 @@ where panic!("Not in correct state, can't receive response!") }; - let _ = tx.send(Ok(mes + let resp = Ok(mes .try_into() - .map_err(|_| PeerError::PeerSentInvalidMessage)?)); + .map_err(|_| PeerError::PeerSentInvalidMessage)?); + + drop(tx.send(resp)); self.request_timeout = None; @@ -282,7 +283,7 @@ where tokio::select! { biased; - _ = self.request_timeout.as_mut().expect("Request timeout was not set!") => { + () = self.request_timeout.as_mut().expect("Request timeout was not set!") => { Err(PeerError::ClientChannelClosed) } broadcast_req = self.broadcast_stream.next() => { @@ -306,8 +307,11 @@ where /// Runs the Connection handler logic, this should be put in a separate task. /// /// `eager_protocol_messages` are protocol messages that we received during a handshake. - pub async fn run(mut self, mut stream: Str, eager_protocol_messages: Vec) - where + pub(crate) async fn run( + mut self, + mut stream: Str, + eager_protocol_messages: Vec, + ) where Str: FusedStream> + Unpin, { tracing::debug!( @@ -348,6 +352,7 @@ where /// Shutdowns the connection, flushing pending requests and setting the error slot, if it hasn't been /// set already. + #[expect(clippy::significant_drop_tightening)] fn shutdown(mut self, err: PeerError) { tracing::debug!("Connection task shutting down: {}", err); @@ -362,11 +367,11 @@ where if let State::WaitingForResponse { tx, .. } = std::mem::replace(&mut self.state, State::WaitingForRequest) { - let _ = tx.send(Err(err_str.clone().into())); + drop(tx.send(Err(err_str.clone().into()))); } while let Ok(req) = client_rx.try_recv() { - let _ = req.response_channel.send(Err(err_str.clone().into())); + drop(req.response_channel.send(Err(err_str.clone().into()))); } self.connection_guard.connection_closed(); diff --git a/p2p/p2p-core/src/client/connector.rs b/p2p/p2p-core/src/client/connector.rs index d937165..553f5a4 100644 --- a/p2p/p2p-core/src/client/connector.rs +++ b/p2p/p2p-core/src/client/connector.rs @@ -40,7 +40,9 @@ impl Connector { /// Create a new connector from a handshaker. - pub fn new(handshaker: HandShaker) -> Self { + pub const fn new( + handshaker: HandShaker, + ) -> Self { Self { handshaker } } } diff --git a/p2p/p2p-core/src/client/handshaker.rs b/p2p/p2p-core/src/client/handshaker.rs index 67a58d4..d6873a8 100644 --- a/p2p/p2p-core/src/client/handshaker.rs +++ b/p2p/p2p-core/src/client/handshaker.rs @@ -113,7 +113,7 @@ impl HandShaker { /// Creates a new handshaker. - fn new( + const fn new( address_book: AdrBook, peer_sync_svc: PSync, core_sync_svc: CSync, @@ -226,11 +226,12 @@ pub async fn ping(addr: N::Addr) -> Result Err(BucketError::IO(std::io::Error::new( std::io::ErrorKind::ConnectionAborted, "The peer stream returned None", - )))? + )) + .into()) } /// This function completes a handshake with the requested peer. -#[allow(clippy::too_many_arguments)] +#[expect(clippy::too_many_arguments)] async fn handshake( req: DoHandshakeRequest, @@ -403,7 +404,10 @@ where break 'check_out_addr None; }; - // u32 does not make sense as a port so just truncate it. + #[expect( + clippy::cast_possible_truncation, + reason = "u32 does not make sense as a port so just truncate it." + )] outbound_address.set_port(peer_node_data.my_port as u16); let Ok(Ok(ping_peer_id)) = timeout( @@ -508,7 +512,7 @@ where info.id, info.handle.clone(), connection_tx.clone(), - semaphore.clone(), + Arc::clone(&semaphore), address_book, core_sync_svc, peer_sync_svc, @@ -671,7 +675,7 @@ async fn wait_for_message( _ => { return Err(HandshakeError::PeerSentInvalidMessage( "Peer sent an admin request before responding to the handshake", - )) + )); } } } @@ -686,16 +690,17 @@ async fn wait_for_message( )); } - _ => Err(HandshakeError::PeerSentInvalidMessage( + Message::Response(_) => Err(HandshakeError::PeerSentInvalidMessage( "Peer sent an incorrect message", )), - }? + }?; } Err(BucketError::IO(std::io::Error::new( std::io::ErrorKind::ConnectionAborted, "The peer stream returned None", - )))? + )) + .into()) } /// Sends a [`AdminResponseMessage::SupportFlags`] down the peer sink. diff --git a/p2p/p2p-core/src/client/handshaker/builder.rs b/p2p/p2p-core/src/client/handshaker/builder.rs index a40f396..069811d 100644 --- a/p2p/p2p-core/src/client/handshaker/builder.rs +++ b/p2p/p2p-core/src/client/handshaker/builder.rs @@ -87,14 +87,13 @@ impl where NAdrBook: AddressBook + Clone, { - let HandshakerBuilder { + let Self { core_sync_svc, peer_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, .. } = self; @@ -106,7 +105,7 @@ impl our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, + _zone: PhantomData, } } @@ -130,14 +129,13 @@ impl where NCSync: CoreSyncSvc + Clone, { - let HandshakerBuilder { + let Self { address_book, peer_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, .. } = self; @@ -149,7 +147,7 @@ impl our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, + _zone: PhantomData, } } @@ -167,14 +165,13 @@ impl where NPSync: PeerSyncSvc + Clone, { - let HandshakerBuilder { + let Self { address_book, core_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, .. } = self; @@ -186,7 +183,7 @@ impl our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, + _zone: PhantomData, } } @@ -204,14 +201,13 @@ impl where NProtoHdlr: ProtocolRequestHandler + Clone, { - let HandshakerBuilder { + let Self { address_book, core_sync_svc, peer_sync_svc, our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, .. } = self; @@ -223,7 +219,7 @@ impl our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, + _zone: PhantomData, } } @@ -242,14 +238,13 @@ impl BrdcstStrm: Stream + Send + 'static, NBrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Clone + Send + 'static, { - let HandshakerBuilder { + let Self { address_book, core_sync_svc, peer_sync_svc, protocol_request_svc, our_basic_node_data, connection_parent_span, - _zone, .. } = self; @@ -261,7 +256,7 @@ impl our_basic_node_data, broadcast_stream_maker: new_broadcast_stream_maker, connection_parent_span, - _zone, + _zone: PhantomData, } } @@ -270,6 +265,7 @@ impl /// ## Default Connection Parent Span /// /// The default connection span will be [`Span::none`]. + #[must_use] pub fn with_connection_parent_span(self, connection_parent_span: Span) -> Self { Self { connection_parent_span: Some(connection_parent_span), diff --git a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs index ae97cdc..e3c4335 100644 --- a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs +++ b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs @@ -42,8 +42,8 @@ pub struct DummyCoreSyncSvc(CoreSyncData); impl DummyCoreSyncSvc { /// Returns a [`DummyCoreSyncSvc`] that will just return the mainnet genesis [`CoreSyncData`]. - pub fn static_mainnet_genesis() -> DummyCoreSyncSvc { - DummyCoreSyncSvc(CoreSyncData { + pub const fn static_mainnet_genesis() -> Self { + Self(CoreSyncData { cumulative_difficulty: 1, cumulative_difficulty_top64: 0, current_height: 1, @@ -56,8 +56,8 @@ impl DummyCoreSyncSvc { } /// Returns a [`DummyCoreSyncSvc`] that will just return the testnet genesis [`CoreSyncData`]. - pub fn static_testnet_genesis() -> DummyCoreSyncSvc { - DummyCoreSyncSvc(CoreSyncData { + pub const fn static_testnet_genesis() -> Self { + Self(CoreSyncData { cumulative_difficulty: 1, cumulative_difficulty_top64: 0, current_height: 1, @@ -70,8 +70,8 @@ impl DummyCoreSyncSvc { } /// Returns a [`DummyCoreSyncSvc`] that will just return the stagenet genesis [`CoreSyncData`]. - pub fn static_stagenet_genesis() -> DummyCoreSyncSvc { - DummyCoreSyncSvc(CoreSyncData { + pub const fn static_stagenet_genesis() -> Self { + Self(CoreSyncData { cumulative_difficulty: 1, cumulative_difficulty_top64: 0, current_height: 1, @@ -84,8 +84,8 @@ impl DummyCoreSyncSvc { } /// Returns a [`DummyCoreSyncSvc`] that will return the provided [`CoreSyncData`]. - pub fn static_custom(data: CoreSyncData) -> DummyCoreSyncSvc { - DummyCoreSyncSvc(data) + pub const fn static_custom(data: CoreSyncData) -> Self { + Self(data) } } diff --git a/p2p/p2p-core/src/client/request_handler.rs b/p2p/p2p-core/src/client/request_handler.rs index 284f954..7059eed 100644 --- a/p2p/p2p-core/src/client/request_handler.rs +++ b/p2p/p2p-core/src/client/request_handler.rs @@ -46,7 +46,7 @@ pub(crate) struct PeerRequestHandler { pub peer_info: PeerInformation, } -impl PeerRequestHandler +impl PeerRequestHandler where Z: NetworkZone, A: AddressBook, @@ -55,7 +55,7 @@ where PR: ProtocolRequestHandler, { /// Handles an incoming [`PeerRequest`] to our node. - pub async fn handle_peer_request( + pub(crate) async fn handle_peer_request( &mut self, req: PeerRequest, ) -> Result { diff --git a/p2p/p2p-core/src/client/timeout_monitor.rs b/p2p/p2p-core/src/client/timeout_monitor.rs index 5228ede..6dbb4a2 100644 --- a/p2p/p2p-core/src/client/timeout_monitor.rs +++ b/p2p/p2p-core/src/client/timeout_monitor.rs @@ -1,6 +1,6 @@ //! Timeout Monitor //! -//! This module holds the task that sends periodic [TimedSync](PeerRequest::TimedSync) requests to a peer to make +//! This module holds the task that sends periodic [`TimedSync`](PeerRequest::TimedSync) requests to a peer to make //! sure the connection is still active. use std::sync::Arc; @@ -64,7 +64,7 @@ where return Ok(()); } - let Ok(permit) = semaphore.clone().try_acquire_owned() else { + let Ok(permit) = Arc::clone(&semaphore).try_acquire_owned() else { // If we can't get a permit the connection is currently waiting for a response, so no need to // do a timed sync. continue; diff --git a/p2p/p2p-core/src/error.rs b/p2p/p2p-core/src/error.rs index 65303ad..d0de923 100644 --- a/p2p/p2p-core/src/error.rs +++ b/p2p/p2p-core/src/error.rs @@ -4,7 +4,7 @@ pub struct SharedError(Arc>); impl Clone for SharedError { fn clone(&self) -> Self { - Self(self.0.clone()) + Self(Arc::clone(&self.0)) } } diff --git a/p2p/p2p-core/src/handles.rs b/p2p/p2p-core/src/handles.rs index da47b65..06dc212 100644 --- a/p2p/p2p-core/src/handles.rs +++ b/p2p/p2p-core/src/handles.rs @@ -18,11 +18,12 @@ pub struct HandleBuilder { impl HandleBuilder { /// Create a new builder. - pub fn new() -> Self { + pub const fn new() -> Self { Self { permit: None } } /// Sets the permit for this connection. + #[must_use] pub fn with_permit(mut self, permit: Option) -> Self { self.permit = permit; self @@ -40,7 +41,7 @@ impl HandleBuilder { _permit: self.permit, }, ConnectionHandle { - token: token.clone(), + token, ban: Arc::new(OnceLock::new()), }, ) @@ -66,13 +67,13 @@ impl ConnectionGuard { /// /// This will be called on [`Drop::drop`]. pub fn connection_closed(&self) { - self.token.cancel() + self.token.cancel(); } } impl Drop for ConnectionGuard { fn drop(&mut self) { - self.token.cancel() + self.token.cancel(); } } @@ -90,6 +91,10 @@ impl ConnectionHandle { } /// Bans the peer for the given `duration`. pub fn ban_peer(&self, duration: Duration) { + #[expect( + clippy::let_underscore_must_use, + reason = "error means peer is already banned; fine to ignore" + )] let _ = self.ban.set(BanPeer(duration)); self.token.cancel(); } @@ -103,6 +108,6 @@ impl ConnectionHandle { } /// Sends the signal to the connection task to disconnect. pub fn send_close_signal(&self) { - self.token.cancel() + self.token.cancel(); } } diff --git a/p2p/p2p-core/src/lib.rs b/p2p/p2p-core/src/lib.rs index 83cc4d2..04e8676 100644 --- a/p2p/p2p-core/src/lib.rs +++ b/p2p/p2p-core/src/lib.rs @@ -6,7 +6,7 @@ //! //! # Network Zones //! -//! This crate abstracts over network zones, Tor/I2p/clearnet with the [NetworkZone] trait. Currently only clearnet is implemented: [ClearNet]. +//! This crate abstracts over network zones, Tor/I2p/clearnet with the [`NetworkZone`] trait. Currently only clearnet is implemented: [`ClearNet`]. //! //! # Usage //! @@ -56,6 +56,16 @@ //! .unwrap(); //! # }); //! ``` + +cfg_if::cfg_if! { + // Used in `tests/` + if #[cfg(test)] { + use cuprate_test_utils as _; + use tokio_test as _; + use hex as _; + } +} + use std::{fmt::Debug, future::Future, hash::Hash}; use futures::{Sink, Stream}; @@ -102,7 +112,7 @@ pub trait NetZoneAddress: + Unpin + 'static { - /// Cuprate needs to be able to ban peers by IP addresses and not just by SocketAddr as + /// Cuprate needs to be able to ban peers by IP addresses and not just by `SocketAddr` as /// that include the port, to be able to facilitate this network addresses must have a ban ID /// which for hidden services could just be the address it self but for clear net addresses will /// be the IP address. diff --git a/p2p/p2p-core/src/network_zones/clear.rs b/p2p/p2p-core/src/network_zones/clear.rs index acde368..261d5ad 100644 --- a/p2p/p2p-core/src/network_zones/clear.rs +++ b/p2p/p2p-core/src/network_zones/clear.rs @@ -19,7 +19,7 @@ impl NetZoneAddress for SocketAddr { type BanID = IpAddr; fn set_port(&mut self, port: u16) { - SocketAddr::set_port(self, port) + Self::set_port(self, port); } fn ban_id(&self) -> Self::BanID { diff --git a/p2p/p2p-core/src/protocol.rs b/p2p/p2p-core/src/protocol.rs index 5e4f4d7..7d8d431 100644 --- a/p2p/p2p-core/src/protocol.rs +++ b/p2p/p2p-core/src/protocol.rs @@ -8,7 +8,7 @@ //! //! Here is every P2P request/response. //! -//! *note admin messages are already request/response so "Handshake" is actually made of a HandshakeRequest & HandshakeResponse +//! *note admin messages are already request/response so "Handshake" is actually made of a `HandshakeRequest` & `HandshakeResponse` //! //! ```md //! Admin: @@ -78,15 +78,15 @@ pub enum PeerRequest { } impl PeerRequest { - pub fn id(&self) -> MessageID { + pub const fn id(&self) -> MessageID { match self { - PeerRequest::Admin(admin_req) => match admin_req { + Self::Admin(admin_req) => match admin_req { AdminRequestMessage::Handshake(_) => MessageID::Handshake, AdminRequestMessage::TimedSync(_) => MessageID::TimedSync, AdminRequestMessage::Ping => MessageID::Ping, AdminRequestMessage::SupportFlags => MessageID::SupportFlags, }, - PeerRequest::Protocol(protocol_request) => match protocol_request { + Self::Protocol(protocol_request) => match protocol_request { ProtocolRequest::GetObjects(_) => MessageID::GetObjects, ProtocolRequest::GetChain(_) => MessageID::GetChain, ProtocolRequest::FluffyMissingTxs(_) => MessageID::FluffyMissingTxs, @@ -98,10 +98,10 @@ impl PeerRequest { } } - pub fn needs_response(&self) -> bool { + pub const fn needs_response(&self) -> bool { !matches!( self, - PeerRequest::Protocol( + Self::Protocol( ProtocolRequest::NewBlock(_) | ProtocolRequest::NewFluffyBlock(_) | ProtocolRequest::NewTransactions(_) @@ -126,15 +126,15 @@ pub enum PeerResponse { } impl PeerResponse { - pub fn id(&self) -> Option { + pub const fn id(&self) -> Option { Some(match self { - PeerResponse::Admin(admin_res) => match admin_res { + Self::Admin(admin_res) => match admin_res { AdminResponseMessage::Handshake(_) => MessageID::Handshake, AdminResponseMessage::TimedSync(_) => MessageID::TimedSync, AdminResponseMessage::Ping(_) => MessageID::Ping, AdminResponseMessage::SupportFlags(_) => MessageID::SupportFlags, }, - PeerResponse::Protocol(protocol_res) => match protocol_res { + Self::Protocol(protocol_res) => match protocol_res { ProtocolResponse::GetObjects(_) => MessageID::GetObjects, ProtocolResponse::GetChain(_) => MessageID::GetChain, ProtocolResponse::NewFluffyBlock(_) => MessageID::NewBlock, diff --git a/p2p/p2p-core/src/protocol/try_from.rs b/p2p/p2p-core/src/protocol/try_from.rs index 8a0b67d..d3a7260 100644 --- a/p2p/p2p-core/src/protocol/try_from.rs +++ b/p2p/p2p-core/src/protocol/try_from.rs @@ -11,15 +11,13 @@ pub struct MessageConversionError; impl From for ProtocolMessage { fn from(value: ProtocolRequest) -> Self { match value { - ProtocolRequest::GetObjects(val) => ProtocolMessage::GetObjectsRequest(val), - ProtocolRequest::GetChain(val) => ProtocolMessage::ChainRequest(val), - ProtocolRequest::FluffyMissingTxs(val) => { - ProtocolMessage::FluffyMissingTransactionsRequest(val) - } - ProtocolRequest::GetTxPoolCompliment(val) => ProtocolMessage::GetTxPoolCompliment(val), - ProtocolRequest::NewBlock(val) => ProtocolMessage::NewBlock(val), - ProtocolRequest::NewFluffyBlock(val) => ProtocolMessage::NewFluffyBlock(val), - ProtocolRequest::NewTransactions(val) => ProtocolMessage::NewTransactions(val), + ProtocolRequest::GetObjects(val) => Self::GetObjectsRequest(val), + ProtocolRequest::GetChain(val) => Self::ChainRequest(val), + ProtocolRequest::FluffyMissingTxs(val) => Self::FluffyMissingTransactionsRequest(val), + ProtocolRequest::GetTxPoolCompliment(val) => Self::GetTxPoolCompliment(val), + ProtocolRequest::NewBlock(val) => Self::NewBlock(val), + ProtocolRequest::NewFluffyBlock(val) => Self::NewFluffyBlock(val), + ProtocolRequest::NewTransactions(val) => Self::NewTransactions(val), } } } @@ -29,15 +27,13 @@ impl TryFrom for ProtocolRequest { fn try_from(value: ProtocolMessage) -> Result { Ok(match value { - ProtocolMessage::GetObjectsRequest(val) => ProtocolRequest::GetObjects(val), - ProtocolMessage::ChainRequest(val) => ProtocolRequest::GetChain(val), - ProtocolMessage::FluffyMissingTransactionsRequest(val) => { - ProtocolRequest::FluffyMissingTxs(val) - } - ProtocolMessage::GetTxPoolCompliment(val) => ProtocolRequest::GetTxPoolCompliment(val), - ProtocolMessage::NewBlock(val) => ProtocolRequest::NewBlock(val), - ProtocolMessage::NewFluffyBlock(val) => ProtocolRequest::NewFluffyBlock(val), - ProtocolMessage::NewTransactions(val) => ProtocolRequest::NewTransactions(val), + ProtocolMessage::GetObjectsRequest(val) => Self::GetObjects(val), + ProtocolMessage::ChainRequest(val) => Self::GetChain(val), + ProtocolMessage::FluffyMissingTransactionsRequest(val) => Self::FluffyMissingTxs(val), + ProtocolMessage::GetTxPoolCompliment(val) => Self::GetTxPoolCompliment(val), + ProtocolMessage::NewBlock(val) => Self::NewBlock(val), + ProtocolMessage::NewFluffyBlock(val) => Self::NewFluffyBlock(val), + ProtocolMessage::NewTransactions(val) => Self::NewTransactions(val), ProtocolMessage::GetObjectsResponse(_) | ProtocolMessage::ChainEntryResponse(_) => { return Err(MessageConversionError) } @@ -48,8 +44,8 @@ impl TryFrom for ProtocolRequest { impl From for Message { fn from(value: PeerRequest) -> Self { match value { - PeerRequest::Admin(val) => Message::Request(val), - PeerRequest::Protocol(val) => Message::Protocol(val.into()), + PeerRequest::Admin(val) => Self::Request(val), + PeerRequest::Protocol(val) => Self::Protocol(val.into()), } } } @@ -59,8 +55,8 @@ impl TryFrom for PeerRequest { fn try_from(value: Message) -> Result { match value { - Message::Request(req) => Ok(PeerRequest::Admin(req)), - Message::Protocol(pro) => Ok(PeerRequest::Protocol(pro.try_into()?)), + Message::Request(req) => Ok(Self::Admin(req)), + Message::Protocol(pro) => Ok(Self::Protocol(pro.try_into()?)), Message::Response(_) => Err(MessageConversionError), } } @@ -71,10 +67,10 @@ impl TryFrom for ProtocolMessage { fn try_from(value: ProtocolResponse) -> Result { Ok(match value { - ProtocolResponse::NewTransactions(val) => ProtocolMessage::NewTransactions(val), - ProtocolResponse::NewFluffyBlock(val) => ProtocolMessage::NewFluffyBlock(val), - ProtocolResponse::GetChain(val) => ProtocolMessage::ChainEntryResponse(val), - ProtocolResponse::GetObjects(val) => ProtocolMessage::GetObjectsResponse(val), + ProtocolResponse::NewTransactions(val) => Self::NewTransactions(val), + ProtocolResponse::NewFluffyBlock(val) => Self::NewFluffyBlock(val), + ProtocolResponse::GetChain(val) => Self::ChainEntryResponse(val), + ProtocolResponse::GetObjects(val) => Self::GetObjectsResponse(val), ProtocolResponse::NA => return Err(MessageConversionError), }) } @@ -85,10 +81,10 @@ impl TryFrom for ProtocolResponse { fn try_from(value: ProtocolMessage) -> Result { Ok(match value { - ProtocolMessage::NewTransactions(val) => ProtocolResponse::NewTransactions(val), - ProtocolMessage::NewFluffyBlock(val) => ProtocolResponse::NewFluffyBlock(val), - ProtocolMessage::ChainEntryResponse(val) => ProtocolResponse::GetChain(val), - ProtocolMessage::GetObjectsResponse(val) => ProtocolResponse::GetObjects(val), + ProtocolMessage::NewTransactions(val) => Self::NewTransactions(val), + ProtocolMessage::NewFluffyBlock(val) => Self::NewFluffyBlock(val), + ProtocolMessage::ChainEntryResponse(val) => Self::GetChain(val), + ProtocolMessage::GetObjectsResponse(val) => Self::GetObjects(val), ProtocolMessage::ChainRequest(_) | ProtocolMessage::FluffyMissingTransactionsRequest(_) | ProtocolMessage::GetObjectsRequest(_) @@ -103,8 +99,8 @@ impl TryFrom for PeerResponse { fn try_from(value: Message) -> Result { match value { - Message::Response(res) => Ok(PeerResponse::Admin(res)), - Message::Protocol(pro) => Ok(PeerResponse::Protocol(pro.try_into()?)), + Message::Response(res) => Ok(Self::Admin(res)), + Message::Protocol(pro) => Ok(Self::Protocol(pro.try_into()?)), Message::Request(_) => Err(MessageConversionError), } } @@ -115,8 +111,8 @@ impl TryFrom for Message { fn try_from(value: PeerResponse) -> Result { Ok(match value { - PeerResponse::Admin(val) => Message::Response(val), - PeerResponse::Protocol(val) => Message::Protocol(val.try_into()?), + PeerResponse::Admin(val) => Self::Response(val), + PeerResponse::Protocol(val) => Self::Protocol(val.try_into()?), }) } } diff --git a/p2p/p2p-core/src/services.rs b/p2p/p2p-core/src/services.rs index 6d66cfa..ba87684 100644 --- a/p2p/p2p-core/src/services.rs +++ b/p2p/p2p-core/src/services.rs @@ -52,7 +52,7 @@ pub struct ZoneSpecificPeerListEntryBase { pub rpc_credits_per_hash: u32, } -impl From> for cuprate_wire::PeerListEntryBase { +impl From> for PeerListEntryBase { fn from(value: ZoneSpecificPeerListEntryBase) -> Self { Self { adr: value.adr.into(), @@ -74,9 +74,7 @@ pub enum PeerListConversionError { PruningSeed(#[from] PruningError), } -impl TryFrom - for ZoneSpecificPeerListEntryBase -{ +impl TryFrom for ZoneSpecificPeerListEntryBase { type Error = PeerListConversionError; fn try_from(value: PeerListEntryBase) -> Result { diff --git a/p2p/p2p-core/tests/fragmented_handshake.rs b/p2p/p2p-core/tests/fragmented_handshake.rs index c19a2a6..1235df9 100644 --- a/p2p/p2p-core/tests/fragmented_handshake.rs +++ b/p2p/p2p-core/tests/fragmented_handshake.rs @@ -1,4 +1,7 @@ //! This file contains a test for a handshake with monerod but uses fragmented messages. + +#![expect(unused_crate_dependencies, reason = "external test module")] + use std::{ net::SocketAddr, pin::Pin, @@ -21,6 +24,13 @@ use tokio_util::{ use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; +use cuprate_test_utils::monerod::monerod; +use cuprate_wire::{ + common::PeerSupportFlags, + levin::{message::make_fragmented_messages, LevinMessage, Protocol}, + BasicNodeData, Message, MoneroWireCodec, +}; + use cuprate_p2p_core::{ client::{ handshaker::HandshakerBuilder, ConnectRequest, Connector, DoHandshakeRequest, @@ -28,13 +38,6 @@ use cuprate_p2p_core::{ }, ClearNetServerCfg, ConnectionDirection, NetworkZone, }; -use cuprate_wire::{ - common::PeerSupportFlags, - levin::{message::make_fragmented_messages, LevinMessage, Protocol}, - BasicNodeData, Message, MoneroWireCodec, -}; - -use cuprate_test_utils::monerod::monerod; /// A network zone equal to clear net where every message sent is turned into a fragmented message. /// Does not support sending fragmented or dummy messages manually. @@ -184,7 +187,7 @@ async fn fragmented_handshake_monerod_to_cuprate() { let next_connection_fut = timeout(Duration::from_secs(30), listener.next()); if let Some(Ok((addr, stream, sink))) = next_connection_fut.await.unwrap() { - let _ = handshaker + handshaker .ready() .await .unwrap() diff --git a/p2p/p2p-core/tests/handles.rs b/p2p/p2p-core/tests/handles.rs index 47d70b0..2a2e2be 100644 --- a/p2p/p2p-core/tests/handles.rs +++ b/p2p/p2p-core/tests/handles.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "external test module")] + use std::{sync::Arc, time::Duration}; use tokio::sync::Semaphore; diff --git a/p2p/p2p-core/tests/handshake.rs b/p2p/p2p-core/tests/handshake.rs index 5ce6153..86d62ed 100644 --- a/p2p/p2p-core/tests/handshake.rs +++ b/p2p/p2p-core/tests/handshake.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "external test module")] + use std::time::Duration; use futures::StreamExt; @@ -9,6 +11,10 @@ use tokio_util::codec::{FramedRead, FramedWrite}; use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; +use cuprate_test_utils::{ + monerod::monerod, + test_netzone::{TestNetZone, TestNetZoneAddr}, +}; use cuprate_wire::{common::PeerSupportFlags, BasicNodeData, MoneroWireCodec}; use cuprate_p2p_core::{ @@ -19,12 +25,8 @@ use cuprate_p2p_core::{ ClearNet, ClearNetServerCfg, ConnectionDirection, NetworkZone, }; -use cuprate_test_utils::{ - monerod::monerod, - test_netzone::{TestNetZone, TestNetZoneAddr}, -}; - #[tokio::test] +#[expect(clippy::significant_drop_tightening)] async fn handshake_cuprate_to_cuprate() { // Tests a Cuprate <-> Cuprate handshake by making 2 handshake services and making them talk to // each other. @@ -147,7 +149,7 @@ async fn handshake_monerod_to_cuprate() { let next_connection_fut = timeout(Duration::from_secs(30), listener.next()); if let Some(Ok((addr, stream, sink))) = next_connection_fut.await.unwrap() { - let _ = handshaker + handshaker .ready() .await .unwrap() diff --git a/p2p/p2p-core/tests/sending_receiving.rs b/p2p/p2p-core/tests/sending_receiving.rs index e035daf..8c90c83 100644 --- a/p2p/p2p-core/tests/sending_receiving.rs +++ b/p2p/p2p-core/tests/sending_receiving.rs @@ -1,6 +1,9 @@ +#![expect(unused_crate_dependencies, reason = "external test module")] + use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; +use cuprate_test_utils::monerod::monerod; use cuprate_wire::{common::PeerSupportFlags, protocol::GetObjectsRequest, BasicNodeData}; use cuprate_p2p_core::{ @@ -9,8 +12,6 @@ use cuprate_p2p_core::{ ClearNet, ProtocolRequest, ProtocolResponse, }; -use cuprate_test_utils::monerod::monerod; - #[tokio::test] async fn get_single_block_from_monerod() { let monerod = monerod(["--out-peers=0"]).await; From 5eb712f4de65da7c86c2d9e498eeb3bb171a4651 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Sun, 22 Sep 2024 14:34:20 -0400 Subject: [PATCH 070/104] `cargo upgrade` (#296) cargo upgrade Co-authored-by: Boog900 --- Cargo.lock | 4 ++-- Cargo.toml | 46 ++++++++++++++++++------------------ consensus/rules/Cargo.toml | 2 +- helper/Cargo.toml | 4 ++-- net/epee-encoding/Cargo.toml | 4 ++-- rpc/interface/Cargo.toml | 4 ++-- storage/database/Cargo.toml | 12 +++++----- 7 files changed, 38 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5481b62..3caf437 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -52,9 +52,9 @@ checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anyhow" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "async-stream" diff --git a/Cargo.toml b/Cargo.toml index 2554fbd..254d3ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,48 +48,48 @@ opt-level = 1 opt-level = 3 [workspace.dependencies] -anyhow = { version = "1.0.87", default-features = false } -async-trait = { version = "0.1.74", default-features = false } -bitflags = { version = "2.4.2", default-features = false } -borsh = { version = "1.2.1", default-features = false } -bytemuck = { version = "1.14.3", default-features = false } -bytes = { version = "1.5.0", default-features = false } +anyhow = { version = "1.0.89", default-features = false } +async-trait = { version = "0.1.82", default-features = false } +bitflags = { version = "2.6.0", default-features = false } +borsh = { version = "1.5.1", default-features = false } +bytemuck = { version = "1.18.0", default-features = false } +bytes = { version = "1.7.2", default-features = false } cfg-if = { version = "1.0.0", default-features = false } -clap = { version = "4.4.7", default-features = false } -chrono = { version = "0.4.31", default-features = false } +clap = { version = "4.5.17", default-features = false } +chrono = { version = "0.4.38", default-features = false } crypto-bigint = { version = "0.5.5", default-features = false } crossbeam = { version = "0.8.4", default-features = false } curve25519-dalek = { version = "4.1.3", default-features = false } dashmap = { version = "5.5.3", default-features = false } dirs = { version = "5.0.1", default-features = false } -futures = { version = "0.3.29", default-features = false } +futures = { version = "0.3.30", default-features = false } hex = { version = "0.4.3", default-features = false } hex-literal = { version = "0.4", default-features = false } -indexmap = { version = "2.2.5", default-features = false } +indexmap = { version = "2.5.0", default-features = false } monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce", default-features = false } -paste = { version = "1.0.14", default-features = false } -pin-project = { version = "1.1.3", default-features = false } +paste = { version = "1.0.15", default-features = false } +pin-project = { version = "1.1.5", default-features = false } randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false } rand = { version = "0.8.5", default-features = false } rand_distr = { version = "0.4.3", default-features = false } -rayon = { version = "1.9.0", default-features = false } -serde_bytes = { version = "0.11.12", default-features = false } -serde_json = { version = "1.0.108", default-features = false } -serde = { version = "1.0.190", default-features = false } -thiserror = { version = "1.0.50", default-features = false } -thread_local = { version = "1.1.7", default-features = false } -tokio-util = { version = "0.7.10", default-features = false } -tokio-stream = { version = "0.1.14", default-features = false } -tokio = { version = "1.33.0", default-features = false } +rayon = { version = "1.10.0", default-features = false } +serde_bytes = { version = "0.11.15", default-features = false } +serde_json = { version = "1.0.128", default-features = false } +serde = { version = "1.0.210", default-features = false } +thiserror = { version = "1.0.63", default-features = false } +thread_local = { version = "1.1.8", default-features = false } +tokio-util = { version = "0.7.12", default-features = false } +tokio-stream = { version = "0.1.16", default-features = false } +tokio = { version = "1.40.0", default-features = false } tower = { version = "0.4.13", default-features = false } -tracing-subscriber = { version = "0.3.17", default-features = false } +tracing-subscriber = { version = "0.3.18", default-features = false } tracing = { version = "0.1.40", default-features = false } ## workspace.dev-dependencies monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } tempfile = { version = "3" } -pretty_assertions = { version = "1.4.0" } +pretty_assertions = { version = "1.4.1" } proptest = { version = "1" } proptest-derive = { version = "0.4.0" } tokio-test = { version = "0.4.4" } diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index 575bed7..ed97d33 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -36,4 +36,4 @@ proptest-derive = { workspace = true } tokio = { version = "1.35.0", features = ["rt-multi-thread", "macros"] } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/helper/Cargo.toml b/helper/Cargo.toml index baa3f23..614bdb2 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -37,11 +37,11 @@ rayon = { workspace = true, optional = true } [target.'cfg(windows)'.dependencies] target_os_lib = { package = "windows", version = ">=0.51", features = ["Win32_System_Threading", "Win32_Foundation"], optional = true } [target.'cfg(unix)'.dependencies] -target_os_lib = { package = "libc", version = "0.2.151", optional = true } +target_os_lib = { package = "libc", version = "0.2.158", optional = true } [dev-dependencies] tokio = { workspace = true, features = ["full"] } curve25519-dalek = { workspace = true } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/net/epee-encoding/Cargo.toml b/net/epee-encoding/Cargo.toml index 90a339f..c021e42 100644 --- a/net/epee-encoding/Cargo.toml +++ b/net/epee-encoding/Cargo.toml @@ -18,8 +18,8 @@ std = ["dep:thiserror", "bytes/std", "cuprate-fixed-bytes/std"] cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } cuprate-fixed-bytes = { path = "../fixed-bytes", default-features = false } -paste = "1.0.14" -ref-cast = "1.0.22" +paste = "1.0.15" +ref-cast = "1.0.23" bytes = { workspace = true } thiserror = { workspace = true, optional = true} diff --git a/rpc/interface/Cargo.toml b/rpc/interface/Cargo.toml index 42d1055..00f7a22 100644 --- a/rpc/interface/Cargo.toml +++ b/rpc/interface/Cargo.toml @@ -31,7 +31,7 @@ cuprate-test-utils = { path = "../../test-utils" } axum = { version = "0.7.5", features = ["json", "tokio", "http2"] } serde_json = { workspace = true, features = ["std"] } tokio = { workspace = true, features = ["full"] } -ureq = { version = "2.10.0", features = ["json"] } +ureq = { version = "2.10.1", features = ["json"] } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/storage/database/Cargo.toml b/storage/database/Cargo.toml index 0ef4a97..7a2f4ae 100644 --- a/storage/database/Cargo.toml +++ b/storage/database/Cargo.toml @@ -17,7 +17,7 @@ redb = ["dep:redb"] redb-memory = ["redb"] [dependencies] -bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } +bytemuck = { version = "1.18.0", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } bytes = { workspace = true } cfg-if = { workspace = true } page_size = { version = "0.6.0" } # Needed for database resizes, they must be a multiple of the OS page size. @@ -25,14 +25,14 @@ paste = { workspace = true } thiserror = { workspace = true } # Optional features. -heed = { version = "0.20.0", features = ["read-txn-no-tls"], optional = true } -redb = { version = "2.1.0", optional = true } +heed = { version = "0.20.5", features = ["read-txn-no-tls"], optional = true } +redb = { version = "2.1.3", optional = true } serde = { workspace = true, optional = true } [dev-dependencies] -bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } +bytemuck = { version = "1.18.0", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } page_size = { version = "0.6.0" } -tempfile = { version = "3.10.0" } +tempfile = { version = "3.12.0" } [lints] -workspace = true \ No newline at end of file +workspace = true From 88605b081f0870c21017fd5487f0f0a08c986e0a Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Tue, 24 Sep 2024 12:23:22 -0400 Subject: [PATCH 071/104] books/architecture: port database design document (#267) * add chapters * add files, intro * db abstraction * backends * abstraction * syncing * serde * issues * common/types * common/ops * common/service * service diagram * service/resize * service/thread-model * service/shutdown * storage/blockchain * update md files * cleanup * fixes * update for https://github.com/Cuprate/cuprate/pull/290 * review fix --- books/architecture/src/SUMMARY.md | 36 +- books/architecture/src/storage/blockchain.md | 1 - .../src/storage/blockchain/intro.md | 3 + .../src/storage/blockchain/schema/intro.md | 2 + .../src/storage/blockchain/schema/multimap.md | 45 ++ .../src/storage/blockchain/schema/tables.md | 39 ++ .../architecture/src/storage/common/intro.md | 9 + books/architecture/src/storage/common/ops.md | 21 + .../storage/common/service/initialization.md | 9 + .../src/storage/common/service/intro.md | 65 ++ .../src/storage/common/service/requests.md | 8 + .../src/storage/common/service/resizing.md | 15 + .../src/storage/common/service/responses.md | 18 + .../src/storage/common/service/shutdown.md | 4 + .../storage/common/service/thread-model.md | 23 + .../architecture/src/storage/common/types.md | 21 + .../src/storage/database-abstraction.md | 1 - .../src/storage/db/abstraction/backend.md | 50 ++ .../storage/db/abstraction/concrete_env.md | 15 + .../src/storage/db/abstraction/intro.md | 33 + .../src/storage/db/abstraction/trait.md | 49 ++ books/architecture/src/storage/db/intro.md | 23 + .../src/storage/db/issues/endian.md | 6 + .../src/storage/db/issues/hot-swap.md | 17 + .../src/storage/db/issues/intro.md | 7 + .../src/storage/db/issues/multimap.md | 22 + .../src/storage/db/issues/traits.md | 15 + .../src/storage/db/issues/unaligned.md | 24 + books/architecture/src/storage/db/resizing.md | 8 + books/architecture/src/storage/db/serde.md | 44 ++ books/architecture/src/storage/db/syncing.md | 17 + books/architecture/src/storage/intro.md | 35 +- .../storage/{pruning.md => pruning/intro.md} | 0 .../{transaction-pool.md => txpool/intro.md} | 0 storage/README.md | 11 +- storage/blockchain/DESIGN.md | 600 ------------------ 36 files changed, 685 insertions(+), 611 deletions(-) delete mode 100644 books/architecture/src/storage/blockchain.md create mode 100644 books/architecture/src/storage/blockchain/intro.md create mode 100644 books/architecture/src/storage/blockchain/schema/intro.md create mode 100644 books/architecture/src/storage/blockchain/schema/multimap.md create mode 100644 books/architecture/src/storage/blockchain/schema/tables.md create mode 100644 books/architecture/src/storage/common/intro.md create mode 100644 books/architecture/src/storage/common/ops.md create mode 100644 books/architecture/src/storage/common/service/initialization.md create mode 100644 books/architecture/src/storage/common/service/intro.md create mode 100644 books/architecture/src/storage/common/service/requests.md create mode 100644 books/architecture/src/storage/common/service/resizing.md create mode 100644 books/architecture/src/storage/common/service/responses.md create mode 100644 books/architecture/src/storage/common/service/shutdown.md create mode 100644 books/architecture/src/storage/common/service/thread-model.md create mode 100644 books/architecture/src/storage/common/types.md delete mode 100644 books/architecture/src/storage/database-abstraction.md create mode 100644 books/architecture/src/storage/db/abstraction/backend.md create mode 100644 books/architecture/src/storage/db/abstraction/concrete_env.md create mode 100644 books/architecture/src/storage/db/abstraction/intro.md create mode 100644 books/architecture/src/storage/db/abstraction/trait.md create mode 100644 books/architecture/src/storage/db/intro.md create mode 100644 books/architecture/src/storage/db/issues/endian.md create mode 100644 books/architecture/src/storage/db/issues/hot-swap.md create mode 100644 books/architecture/src/storage/db/issues/intro.md create mode 100644 books/architecture/src/storage/db/issues/multimap.md create mode 100644 books/architecture/src/storage/db/issues/traits.md create mode 100644 books/architecture/src/storage/db/issues/unaligned.md create mode 100644 books/architecture/src/storage/db/resizing.md create mode 100644 books/architecture/src/storage/db/serde.md create mode 100644 books/architecture/src/storage/db/syncing.md rename books/architecture/src/storage/{pruning.md => pruning/intro.md} (100%) rename books/architecture/src/storage/{transaction-pool.md => txpool/intro.md} (100%) delete mode 100644 storage/blockchain/DESIGN.md diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index d97d223..bf66860 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -27,11 +27,37 @@ --- -- [⚪️ Storage](storage/intro.md) - - [⚪️ Database abstraction](storage/database-abstraction.md) - - [⚪️ Blockchain](storage/blockchain.md) - - [⚪️ Transaction pool](storage/transaction-pool.md) - - [⚪️ Pruning](storage/pruning.md) +- [🟢 Storage](storage/intro.md) + - [🟢 Database abstraction](storage/db/intro.md) + - [🟢 Abstraction](storage/db/abstraction/intro.md) + - [🟢 Backend](storage/db/abstraction/backend.md) + - [🟢 ConcreteEnv](storage/db/abstraction/concrete_env.md) + - [🟢 Trait](storage/db/abstraction/trait.md) + - [🟢 Syncing](storage/db/syncing.md) + - [🟢 Resizing](storage/db/resizing.md) + - [🟢 (De)serialization](storage/db/serde.md) + - [🟢 Known issues and tradeoffs](storage/db/issues/intro.md) + - [🟢 Abstracting backends](storage/db/issues/traits.md) + - [🟢 Hot-swap](storage/db/issues/hot-swap.md) + - [🟢 Unaligned bytes](storage/db/issues/unaligned.md) + - [🟢 Endianness](storage/db/issues/endian.md) + - [🟢 Multimap](storage/db/issues/multimap.md) + - [🟢 Common behavior](storage/common/intro.md) + - [🟢 Types](storage/common/types.md) + - [🟢 `ops`](storage/common/ops.md) + - [🟢 `tower::Service`](storage/common/service/intro.md) + - [🟢 Initialization](storage/common/service/initialization.md) + - [🟢 Requests](storage/common/service/requests.md) + - [🟢 Responses](storage/common/service/responses.md) + - [🟢 Resizing](storage/common/service/resizing.md) + - [🟢 Thread model](storage/common/service/thread-model.md) + - [🟢 Shutdown](storage/common/service/shutdown.md) + - [🟢 Blockchain](storage/blockchain/intro.md) + - [🟢 Schema](storage/blockchain/schema/intro.md) + - [🟢 Tables](storage/blockchain/schema/tables.md) + - [🟢 Multimap tables](storage/blockchain/schema/multimap.md) + - [⚪️ Transaction pool](storage/txpool/intro.md) + - [⚪️ Pruning](storage/pruning/intro.md) --- diff --git a/books/architecture/src/storage/blockchain.md b/books/architecture/src/storage/blockchain.md deleted file mode 100644 index 6046687..0000000 --- a/books/architecture/src/storage/blockchain.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Blockchain diff --git a/books/architecture/src/storage/blockchain/intro.md b/books/architecture/src/storage/blockchain/intro.md new file mode 100644 index 0000000..9d35fca --- /dev/null +++ b/books/architecture/src/storage/blockchain/intro.md @@ -0,0 +1,3 @@ +# Blockchain +This section contains storage information specific to [`cuprate_blockchain`](https://doc.cuprate.org/cuprate_blockchain), +the database built on-top of [`cuprate_database`](https://doc.cuprate.org/cuprate_database) that stores the blockchain. diff --git a/books/architecture/src/storage/blockchain/schema/intro.md b/books/architecture/src/storage/blockchain/schema/intro.md new file mode 100644 index 0000000..3bd825f --- /dev/null +++ b/books/architecture/src/storage/blockchain/schema/intro.md @@ -0,0 +1,2 @@ +# Schema +This section contains the schema of `cuprate_blockchain`'s database tables. \ No newline at end of file diff --git a/books/architecture/src/storage/blockchain/schema/multimap.md b/books/architecture/src/storage/blockchain/schema/multimap.md new file mode 100644 index 0000000..2a4c6eb --- /dev/null +++ b/books/architecture/src/storage/blockchain/schema/multimap.md @@ -0,0 +1,45 @@ +# Multimap tables +## Outputs +When referencing outputs, Monero will [use the amount and the amount index](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/blockchain_db/lmdb/db_lmdb.cpp#L3447-L3449). This means 2 keys are needed to reach an output. + +With LMDB you can set the `DUP_SORT` flag on a table and then set the key/value to: +```rust +Key = KEY_PART_1 +``` +```rust +Value = { + KEY_PART_2, + VALUE // The actual value we are storing. +} +``` + +Then you can set a custom value sorting function that only takes `KEY_PART_2` into account; this is how `monerod` does it. + +This requires that the underlying database supports: +- multimap tables +- custom sort functions on values +- setting a cursor on a specific key/value + +## How `cuprate_blockchain` does it +Another way to implement this is as follows: +```rust +Key = { KEY_PART_1, KEY_PART_2 } +``` +```rust +Value = VALUE +``` + +Then the key type is simply used to look up the value; this is how `cuprate_blockchain` does it +as [`cuprate_database` does not have a multimap abstraction (yet)](../../db/issues/multimap.md). + +For example, the key/value pair for outputs is: +```rust +PreRctOutputId => Output +``` +where `PreRctOutputId` looks like this: +```rust +struct PreRctOutputId { + amount: u64, + amount_index: u64, +} +``` \ No newline at end of file diff --git a/books/architecture/src/storage/blockchain/schema/tables.md b/books/architecture/src/storage/blockchain/schema/tables.md new file mode 100644 index 0000000..15e0c63 --- /dev/null +++ b/books/architecture/src/storage/blockchain/schema/tables.md @@ -0,0 +1,39 @@ +# Tables + +> See also: & . + +The `CamelCase` names of the table headers documented here (e.g. `TxIds`) are the actual type name of the table within `cuprate_blockchain`. + +Note that words written within `code blocks` mean that it is a real type defined and usable within `cuprate_blockchain`. Other standard types like u64 and type aliases (TxId) are written normally. + +Within `cuprate_blockchain::tables`, the below table is essentially defined as-is with [a macro](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/tables.rs#L369-L470). + +Many of the data types stored are the same data types, although are different semantically, as such, a map of aliases used and their real data types is also provided below. + +| Alias | Real Type | +|----------------------------------------------------|-----------| +| BlockHeight, Amount, AmountIndex, TxId, UnlockTime | u64 +| BlockHash, KeyImage, TxHash, PrunableHash | [u8; 32] + +--- + +| Table | Key | Value | Description | +|--------------------|----------------------|-------------------------|-------------| +| `BlockHeaderBlobs` | BlockHeight | `StorableVec` | Maps a block's height to a serialized byte form of its header +| `BlockTxsHashes` | BlockHeight | `StorableVec<[u8; 32]>` | Maps a block's height to the block's transaction hashes +| `BlockHeights` | BlockHash | BlockHeight | Maps a block's hash to its height +| `BlockInfos` | BlockHeight | `BlockInfo` | Contains metadata of all blocks +| `KeyImages` | KeyImage | () | This table is a set with no value, it stores transaction key images +| `NumOutputs` | Amount | u64 | Maps an output's amount to the number of outputs with that amount +| `Outputs` | `PreRctOutputId` | `Output` | This table contains legacy CryptoNote outputs which have clear amounts. This table will not contain an output with 0 amount. +| `PrunedTxBlobs` | TxId | `StorableVec` | Contains pruned transaction blobs (even if the database is not pruned) +| `PrunableTxBlobs` | TxId | `StorableVec` | Contains the prunable part of a transaction +| `PrunableHashes` | TxId | PrunableHash | Contains the hash of the prunable part of a transaction +| `RctOutputs` | AmountIndex | `RctOutput` | Contains RingCT outputs mapped from their global RCT index +| `TxBlobs` | TxId | `StorableVec` | Serialized transaction blobs (bytes) +| `TxIds` | TxHash | TxId | Maps a transaction's hash to its index/ID +| `TxHeights` | TxId | BlockHeight | Maps a transaction's ID to the height of the block it comes from +| `TxOutputs` | TxId | `StorableVec` | Gives the amount indices of a transaction's outputs +| `TxUnlockTime` | TxId | UnlockTime | Stores the unlock time of a transaction (only if it has a non-zero lock time) + + \ No newline at end of file diff --git a/books/architecture/src/storage/common/intro.md b/books/architecture/src/storage/common/intro.md new file mode 100644 index 0000000..a772d87 --- /dev/null +++ b/books/architecture/src/storage/common/intro.md @@ -0,0 +1,9 @@ +# Common behavior +The crates that build on-top of the database abstraction ([`cuprate_database`](https://doc.cuprate.org/cuprate_database)) +share some common behavior including but not limited to: + +- Defining their specific database tables and types +- Having an `ops` module +- Exposing a `tower::Service` API (backed by a threadpool) for public usage + +This section provides more details on these behaviors. \ No newline at end of file diff --git a/books/architecture/src/storage/common/ops.md b/books/architecture/src/storage/common/ops.md new file mode 100644 index 0000000..3a4e617 --- /dev/null +++ b/books/architecture/src/storage/common/ops.md @@ -0,0 +1,21 @@ +# `ops` +Both [`cuprate_blockchain`](https://doc.cuprate.org/cuprate_blockchain) +and [`cuprate_txpool`](https://doc.cuprate.org/cuprate_txpool) expose an +`ops` module containing abstracted abstracted Monero-related database operations. + +For example, [`cuprate_blockchain::ops::block::add_block`](https://doc.cuprate.org/cuprate_blockchain/ops/block/fn.add_block.html). + +These functions build on-top of the database traits and allow for more abstracted database operations. + +For example, instead of these signatures: +```rust +fn get(_: &Key) -> Value; +fn put(_: &Key, &Value); +``` +the `ops` module provides much higher-level signatures like such: +```rust +fn add_block(block: &Block) -> Result<_, _>; +``` + +Although these functions are exposed, they are not the main API, that would be next section: +the [`tower::Service`](./service/intro.md) (which uses these functions). \ No newline at end of file diff --git a/books/architecture/src/storage/common/service/initialization.md b/books/architecture/src/storage/common/service/initialization.md new file mode 100644 index 0000000..8350971 --- /dev/null +++ b/books/architecture/src/storage/common/service/initialization.md @@ -0,0 +1,9 @@ +# Initialization +A database service is started simply by calling: [`init()`](https://doc.cuprate.org/cuprate_blockchain/service/fn.init.html). + +This function initializes the database, spawns threads, and returns a: +- Read handle to the database +- Write handle to the database +- The database itself + +These handles implement the `tower::Service` trait, which allows sending requests and receiving responses `async`hronously. \ No newline at end of file diff --git a/books/architecture/src/storage/common/service/intro.md b/books/architecture/src/storage/common/service/intro.md new file mode 100644 index 0000000..bba7486 --- /dev/null +++ b/books/architecture/src/storage/common/service/intro.md @@ -0,0 +1,65 @@ +# tower::Service +Both [`cuprate_blockchain`](https://doc.cuprate.org/cuprate_blockchain) +and [`cuprate_txpool`](https://doc.cuprate.org/cuprate_txpool) provide +`async` [`tower::Service`](https://docs.rs/tower)s that define database requests/responses. + +The main API that other Cuprate crates use. + +There are 2 `tower::Service`s: +1. A read service which is backed by a [`rayon::ThreadPool`](https://docs.rs/rayon) +1. A write service which spawns a single thread to handle write requests + +As this behavior is the same across all users of [`cuprate_database`](https://doc.cuprate.org/cuprate_database), +it is extracted into its own crate: [`cuprate_database_service`](https://doc.cuprate.org/cuprate_database_service). + +## Diagram +As a recap, here is how this looks to a user of a higher-level database crate, +`cuprate_blockchain` in this example. Starting from the lowest layer: + +1. `cuprate_database` is used to abstract the database +1. `cuprate_blockchain` builds on-top of that with tables, types, operations +1. `cuprate_blockchain` exposes a `tower::Service` using `cuprate_database_service` +1. The user now interfaces with `cuprate_blockchain` with that `tower::Service` in a request/response fashion + +``` + ┌──────────────────┐ + │ cuprate_database │ + └────────┬─────────┘ +┌─────────────────────────────────┴─────────────────────────────────┐ +│ cuprate_blockchain │ +│ │ +│ ┌──────────────────────┐ ┌─────────────────────────────────────┐ │ +│ │ Tables, types │ │ ops │ │ +│ │ ┌───────────┐┌─────┐ │ │ ┌─────────────┐ ┌──────────┐┌─────┐ │ │ +│ │ │ BlockInfo ││ ... │ ├──┤ │ add_block() │ │ add_tx() ││ ... │ │ │ +│ │ └───────────┘└─────┘ │ │ └─────────────┘ └──────────┘└─────┘ │ │ +│ └──────────────────────┘ └─────┬───────────────────────────────┘ │ +│ │ │ +│ ┌─────────┴───────────────────────────────┐ │ +│ │ tower::Service │ │ +│ │ ┌──────────────────────────────┐┌─────┐ │ │ +│ │ │ Blockchain{Read,Write}Handle ││ ... │ │ │ +│ │ └──────────────────────────────┘└─────┘ │ │ +│ └─────────┬───────────────────────────────┘ │ +│ │ │ +└─────────────────────────────────┼─────────────────────────────────┘ + │ + ┌─────┴─────┐ + ┌────────────────────┴────┐ ┌────┴──────────────────────────────────┐ + │ Database requests │ │ Database responses │ + │ ┌─────────────────────┐ │ │ ┌───────────────────────────────────┐ │ + │ │ FindBlock([u8; 32]) │ │ │ │ FindBlock(Option<(Chain, usize)>) │ │ + │ └─────────────────────┘ │ │ └───────────────────────────────────┘ │ + │ ┌─────────────────────┐ │ │ ┌───────────────────────────────────┐ │ + │ │ ChainHeight │ │ │ │ ChainHeight(usize, [u8; 32]) │ │ + │ └─────────────────────┘ │ │ └───────────────────────────────────┘ │ + │ ┌─────────────────────┐ │ │ ┌───────────────────────────────────┐ │ + │ │ ... │ │ │ │ ... │ │ + │ └─────────────────────┘ │ │ └───────────────────────────────────┘ │ + └─────────────────────────┘ └───────────────────────────────────────┘ + ▲ │ + │ ▼ + ┌─────────────────────────┐ + │ cuprate_blockchain user │ + └─────────────────────────┘ +``` \ No newline at end of file diff --git a/books/architecture/src/storage/common/service/requests.md b/books/architecture/src/storage/common/service/requests.md new file mode 100644 index 0000000..9157359 --- /dev/null +++ b/books/architecture/src/storage/common/service/requests.md @@ -0,0 +1,8 @@ +# Requests +Along with the 2 handles, there are 2 types of requests: +- Read requests, e.g. [`BlockchainReadRequest`](https://doc.cuprate.org/cuprate_types/blockchain/enum.BlockchainReadRequest.html) +- Write requests, e.g. [`BlockchainWriteRequest`](https://doc.cuprate.org/cuprate_types/blockchain/enum.BlockchainWriteRequest.html) + +Quite obviously: +- Read requests are for retrieving various data from the database +- Write requests are for writing data to the database \ No newline at end of file diff --git a/books/architecture/src/storage/common/service/resizing.md b/books/architecture/src/storage/common/service/resizing.md new file mode 100644 index 0000000..13cd3b4 --- /dev/null +++ b/books/architecture/src/storage/common/service/resizing.md @@ -0,0 +1,15 @@ +# Resizing +As noted in the [`cuprate_database` resizing section](../../db/resizing.md), +builders on-top of `cuprate_database` are responsible for resizing the database. + +In `cuprate_{blockchain,txpool}`'s case, that means the `tower::Service` must know +how to resize. This logic is shared between both crates, defined in `cuprate_database_service`: +. + +By default, this uses a _similar_ algorithm as `monerod`'s: + +- [If there's not enough space to fit a write request's data](https://github.com/Cuprate/cuprate/blob/0941f68efcd7dfe66124ad0c1934277f47da9090/storage/service/src/service/write.rs#L130), start a resize +- Each resize adds around [`1,073,745,920`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) bytes to the current map size +- A resize will be [attempted `3` times](https://github.com/Cuprate/cuprate/blob/0941f68efcd7dfe66124ad0c1934277f47da9090/storage/service/src/service/write.rs#L110) before failing + +There are other [resizing algorithms](https://doc.cuprate.org/cuprate_database/resize/enum.ResizeAlgorithm.html) that define how the database's memory map grows, although currently the behavior of `monerod` is closely followed (for no particular reason). \ No newline at end of file diff --git a/books/architecture/src/storage/common/service/responses.md b/books/architecture/src/storage/common/service/responses.md new file mode 100644 index 0000000..c03b42f --- /dev/null +++ b/books/architecture/src/storage/common/service/responses.md @@ -0,0 +1,18 @@ +# Responses +After sending a request using the read/write handle, the value returned is _not_ the response, yet an `async`hronous channel that will eventually return the response: +```rust,ignore +// Send a request. +// tower::Service::call() +// V +let response_channel: Channel = read_handle.call(BlockchainReadRequest::ChainHeight)?; + +// Await the response. +let response: BlockchainReadRequest = response_channel.await?; +``` + +After `await`ing the returned channel, a `Response` will eventually be returned when +the `Service` threadpool has fetched the value from the database and sent it off. + +Both read/write requests variants match in name with `Response` variants, i.e. +- `BlockchainReadRequest::ChainHeight` leads to `BlockchainResponse::ChainHeight` +- `BlockchainWriteRequest::WriteBlock` leads to `BlockchainResponse::WriteBlockOk` diff --git a/books/architecture/src/storage/common/service/shutdown.md b/books/architecture/src/storage/common/service/shutdown.md new file mode 100644 index 0000000..4f9890e --- /dev/null +++ b/books/architecture/src/storage/common/service/shutdown.md @@ -0,0 +1,4 @@ +# Shutdown +Once the read/write handles to the `tower::Service` are `Drop`ed, the backing thread(pool) will gracefully exit, automatically. + +Note the writer thread and reader threadpool aren't connected whatsoever; dropping the write handle will make the writer thread exit, however, the reader handle is free to be held onto and can be continued to be read from - and vice-versa for the write handle. diff --git a/books/architecture/src/storage/common/service/thread-model.md b/books/architecture/src/storage/common/service/thread-model.md new file mode 100644 index 0000000..b69d62c --- /dev/null +++ b/books/architecture/src/storage/common/service/thread-model.md @@ -0,0 +1,23 @@ +# Thread model +The base database abstractions themselves are not concerned with parallelism, they are mostly functions to be called from a single-thread. + +However, the `cuprate_database_service` API, _does_ have a thread model backing it. + +When a `Service`'s init() function is called, threads will be spawned and +maintained until the user drops (disconnects) the returned handles. + +The current behavior for thread count is: +- [1 writer thread](https://github.com/Cuprate/cuprate/blob/0941f68efcd7dfe66124ad0c1934277f47da9090/storage/service/src/service/write.rs#L48-L52) +- [As many reader threads as there are system threads](https://github.com/Cuprate/cuprate/blob/0941f68efcd7dfe66124ad0c1934277f47da9090/storage/service/src/reader_threads.rs#L44-L49) + +For example, on a system with 32-threads, `cuprate_database_service` will spawn: +- 1 writer thread +- 32 reader threads + +whose sole responsibility is to listen for database requests, access the database (potentially in parallel), and return a response. + +Note that the `1 system thread = 1 reader thread` model is only the default setting, the reader thread count can be configured by the user to be any number between `1 .. amount_of_system_threads`. + +The reader threads are managed by [`rayon`](https://docs.rs/rayon). + +For an example of where multiple reader threads are used: given a request that asks if any key-image within a set already exists, `cuprate_blockchain` will [split that work between the threads with `rayon`](https://github.com/Cuprate/cuprate/blob/0941f68efcd7dfe66124ad0c1934277f47da9090/storage/blockchain/src/service/read.rs#L400). \ No newline at end of file diff --git a/books/architecture/src/storage/common/types.md b/books/architecture/src/storage/common/types.md new file mode 100644 index 0000000..b6f2c6f --- /dev/null +++ b/books/architecture/src/storage/common/types.md @@ -0,0 +1,21 @@ +# Types +## POD types +Since [all types in the database are POD types](../db/serde.md), we must often +provide mappings between outside types and the types actually stored in the database. + +A common case is mapping infallible types to and from [`bitflags`](https://docs.rs/bitflag) and/or their raw integer representation. +For example, the [`OutputFlag`](https://doc.cuprate.org/cuprate_blockchain/types/struct.OutputFlags.html) type or `bool` types. + +As types like `enum`s, `bool`s and `char`s cannot be casted from an integer infallibly, +`bytemuck::Pod` cannot be implemented on it safely. Thus, we store some infallible version +of it inside the database with a custom type and map them when fetching the data. + +## Lean types +Another reason why database crates define their own types is +to cut any unneeded data from the type. + +Many of the types used in normal operation (e.g. [`cuprate_types::VerifiedBlockInformation`](https://doc.cuprate.org/cuprate_types/struct.VerifiedBlockInformation.html)) contain lots of extra pre-processed data for convenience. + +This would be a waste to store in the database, so in this example, the much leaner +"raw" [`BlockInfo`](https://doc.cuprate.org/cuprate_blockchain/types/struct.BlockInfo.html) +type is stored. diff --git a/books/architecture/src/storage/database-abstraction.md b/books/architecture/src/storage/database-abstraction.md deleted file mode 100644 index b21a192..0000000 --- a/books/architecture/src/storage/database-abstraction.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Database abstraction diff --git a/books/architecture/src/storage/db/abstraction/backend.md b/books/architecture/src/storage/db/abstraction/backend.md new file mode 100644 index 0000000..02e796a --- /dev/null +++ b/books/architecture/src/storage/db/abstraction/backend.md @@ -0,0 +1,50 @@ +# Backend +First, we need an actual database implementation. + +`cuprate-database`'s `trait`s allow abstracting over the actual database, such that any backend in particular could be used. + +This page is an enumeration of all the backends Cuprate has, has tried, and may try in the future. + +## `heed` +The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB). The upstream versions from [`crates.io`](https://crates.io/crates/heed) are used. `LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically. + +`heed`'s filenames inside Cuprate's data folder are: + +| Filename | Purpose | +|------------|---------| +| `data.mdb` | Main data file +| `lock.mdb` | Database lock file + +`heed`-specific notes: +- [There is a maximum reader limit](https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372). Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for +- [LMDB does not work on remote filesystem](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129) + +## `redb` +The 2nd database backend is the 100% Rust [`redb`](https://github.com/cberner/redb). + +The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used. + +`redb`'s filenames inside Cuprate's data folder are: + +| Filename | Purpose | +|-------------|---------| +| `data.redb` | Main data file + + + +## `redb-memory` +This backend is 100% the same as `redb`, although, it uses [`redb::backend::InMemoryBackend`](https://docs.rs/redb/2.1.2/redb/backends/struct.InMemoryBackend.html) which is a database that completely resides in memory instead of a file. + +All other details about this should be the same as the normal `redb` backend. + +## `sanakirja` +[`sanakirja`](https://docs.rs/sanakirja) was a candidate as a backend, however there were problems with maximum value sizes. + +The default maximum value size is [1012 bytes](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.Storable.html) which was too small for our requirements. Using [`sanakirja::Slice`](https://docs.rs/sanakirja/1.4.1/sanakirja/union.Slice.html) and [sanakirja::UnsizedStorage](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.UnsizedStorable.html) was attempted, but there were bugs found when inserting a value in-between `512..=4096` bytes. + +As such, it is not implemented. + +## `MDBX` +[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement multimap tables. It is also quite similar to the main backend LMDB (of which it was originally a fork of). + +As such, it is not implemented (yet). diff --git a/books/architecture/src/storage/db/abstraction/concrete_env.md b/books/architecture/src/storage/db/abstraction/concrete_env.md new file mode 100644 index 0000000..059358e --- /dev/null +++ b/books/architecture/src/storage/db/abstraction/concrete_env.md @@ -0,0 +1,15 @@ +# `ConcreteEnv` +After a backend is selected, the main database environment struct is "abstracted" by putting it in the non-generic, concrete [`struct ConcreteEnv`](https://doc.cuprate.org/cuprate_database/struct.ConcreteEnv.html). + +This is the main object used when handling the database directly. + +This struct contains all the data necessary to operate the database. +The actual database backend `ConcreteEnv` will use internally [depends on which backend feature is used](https://github.com/Cuprate/cuprate/blob/0941f68efcd7dfe66124ad0c1934277f47da9090/storage/database/src/backend/mod.rs#L3-L13). + +`ConcreteEnv` itself is not too important, what is important is that: +1. It allows callers to not directly reference any particular backend environment +1. It implements [`trait Env`](https://doc.cuprate.org/cuprate_database/trait.Env.html) which opens the door to all the other database traits + +The equivalent "database environment" objects in the backends themselves are: +- [`heed::Env`](https://docs.rs/heed/0.20.0/heed/struct.Env.html) +- [`redb::Database`](https://docs.rs/redb/2.1.0/redb/struct.Database.html) \ No newline at end of file diff --git a/books/architecture/src/storage/db/abstraction/intro.md b/books/architecture/src/storage/db/abstraction/intro.md new file mode 100644 index 0000000..34a4320 --- /dev/null +++ b/books/architecture/src/storage/db/abstraction/intro.md @@ -0,0 +1,33 @@ +# Abstraction +This next section details how `cuprate_database` abstracts multiple database backends into 1 API. + +## Diagram +A simple diagram describing the responsibilities/relationship of `cuprate_database`. + +```text +┌───────────────────────────────────────────────────────────────────────┐ +│ cuprate_database │ +│ │ +│ ┌───────────────────────────┐ ┌─────────────────────────────────┐ │ +│ │ Database traits │ │ Backends │ │ +│ │ ┌─────┐┌──────┐┌────────┐ │ │ ┌─────────────┐ ┌─────────────┐ │ │ +│ │ │ Env ││ TxRw ││ ... │ ├─────┤ │ heed (LMDB) │ │ redb │ │ │ +│ │ └─────┘└──────┘└────────┘ │ │ └─────────────┘ └─────────────┘ │ │ +│ └──────────┬─────────────┬──┘ └──┬──────────────────────────────┘ │ +│ │ └─────┬─────┘ │ +│ │ ┌─────────┴──────────────┐ │ +│ │ │ Database types │ │ +│ │ │ ┌─────────────┐┌─────┐ │ │ +│ │ │ │ ConcreteEnv ││ ... │ │ │ +│ │ │ └─────────────┘└─────┘ │ │ +│ │ └─────────┬──────────────┘ │ +│ │ │ │ +└────────────┼───────────────────┼──────────────────────────────────────┘ + │ │ + └───────────────────┤ + │ + ▼ + ┌───────────────────────┐ + │ cuprate_database user │ + └───────────────────────┘ +``` \ No newline at end of file diff --git a/books/architecture/src/storage/db/abstraction/trait.md b/books/architecture/src/storage/db/abstraction/trait.md new file mode 100644 index 0000000..e7b25d2 --- /dev/null +++ b/books/architecture/src/storage/db/abstraction/trait.md @@ -0,0 +1,49 @@ +# Trait +`cuprate_database` provides a set of `trait`s that abstract over the various database backends. + +This allows the function signatures and behavior to stay the same but allows for swapping out databases in an easier fashion. + +All common behavior of the backend's are encapsulated here and used instead of using the backend directly. + +Examples: +- [`trait Env`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/env.rs) +- [`trait {TxRo, TxRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/transaction.rs) +- [`trait {DatabaseRo, DatabaseRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/database.rs) + +For example, instead of calling `heed` or `redb`'s `get()` function directly, `DatabaseRo::get()` is called. + +## Usage +With a `ConcreteEnv` and a particular backend selected, +we can now start using it alongside these traits to start +doing database operations in a generic manner. + +An example: + +```rust +use cuprate_database::{ + ConcreteEnv, + config::ConfigBuilder, + Env, EnvInner, + DatabaseRo, DatabaseRw, TxRo, TxRw, +}; + +// Initialize the database environment. +let env = ConcreteEnv::open(config)?; + +// Open up a transaction + tables for writing. +let env_inner = env.env_inner(); +let tx_rw = env_inner.tx_rw()?; +env_inner.create_db::
(&tx_rw)?; + +// Write data to the table. +{ + let mut table = env_inner.open_db_rw::
(&tx_rw)?; + table.put(&0, &1)?; +} + +// Commit the transaction. +TxRw::commit(tx_rw)?; +``` + +As seen above, there is no direct call to `heed` or `redb`. +Their functionality is abstracted behind `ConcreteEnv` and the `trait`s. \ No newline at end of file diff --git a/books/architecture/src/storage/db/intro.md b/books/architecture/src/storage/db/intro.md new file mode 100644 index 0000000..5973fbe --- /dev/null +++ b/books/architecture/src/storage/db/intro.md @@ -0,0 +1,23 @@ +# Database abstraction +[`cuprate_database`](https://doc.cuprate.org/cuprate_database) is Cuprate’s database abstraction. + +This crate abstracts various database backends with `trait`s. + +All backends have the following attributes: + +- [Embedded](https://en.wikipedia.org/wiki/Embedded_database) +- [Multiversion concurrency control](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) +- [ACID](https://en.wikipedia.org/wiki/ACID) +- Are `(key, value)` oriented and have the expected API (`get()`, `insert()`, `delete()`) +- Are table oriented (`"table_name" -> (key, value)`) +- Allows concurrent readers + +The currently implemented backends are: +- [`heed`](https://github.com/meilisearch/heed) (LMDB) +- [`redb`](https://github.com/cberner/redb) + +Said precicely, `cuprate_database` is the embedded database other Cuprate +crates interact with instead of using any particular backend implementation. +This allows the backend to be swapped and/or future backends to be implemented. + +This section will go over `cuprate_database` details. \ No newline at end of file diff --git a/books/architecture/src/storage/db/issues/endian.md b/books/architecture/src/storage/db/issues/endian.md new file mode 100644 index 0000000..577e586 --- /dev/null +++ b/books/architecture/src/storage/db/issues/endian.md @@ -0,0 +1,6 @@ +# Endianness +`cuprate_database`'s (de)serialization and storage of bytes are native-endian, as in, byte storage order will depend on the machine it is running on. + +As Cuprate's build-targets are all little-endian ([big-endian by default machines barely exist](https://en.wikipedia.org/wiki/Endianness#Hardware)), this doesn't matter much and the byte ordering can be seen as a constant. + +Practically, this means `cuprated`'s database files can be transferred across computers, as can `monerod`'s. \ No newline at end of file diff --git a/books/architecture/src/storage/db/issues/hot-swap.md b/books/architecture/src/storage/db/issues/hot-swap.md new file mode 100644 index 0000000..aebfe20 --- /dev/null +++ b/books/architecture/src/storage/db/issues/hot-swap.md @@ -0,0 +1,17 @@ +# Hot-swappable backends +> See also: . + +Using a different backend is really as simple as re-building `cuprate_database` with a different feature flag: +```bash +# Use LMDB. +cargo build --package cuprate-database --features heed + +# Use redb. +cargo build --package cuprate-database --features redb +``` + +This is "good enough" for now, however ideally, this hot-swapping of backends would be able to be done at _runtime_. + +As it is now, `cuprate_database` cannot compile both backends and swap based on user input at runtime; it must be compiled with a certain backend, which will produce a binary with only that backend. + +This also means things like [CI testing multiple backends is awkward](https://github.com/Cuprate/cuprate/blob/main/.github/workflows/ci.yml#L132-L136), as we must re-compile with different feature flags instead. \ No newline at end of file diff --git a/books/architecture/src/storage/db/issues/intro.md b/books/architecture/src/storage/db/issues/intro.md new file mode 100644 index 0000000..eee4981 --- /dev/null +++ b/books/architecture/src/storage/db/issues/intro.md @@ -0,0 +1,7 @@ +# Known issues and tradeoffs +`cuprate_database` takes many tradeoffs, whether due to: +- Prioritizing certain values over others +- Not having a better solution +- Being "good enough" + +This section is a list of the larger ones, along with issues that don't have answers yet. \ No newline at end of file diff --git a/books/architecture/src/storage/db/issues/multimap.md b/books/architecture/src/storage/db/issues/multimap.md new file mode 100644 index 0000000..7e43ce1 --- /dev/null +++ b/books/architecture/src/storage/db/issues/multimap.md @@ -0,0 +1,22 @@ +# Multimap +`cuprate_database` does not currently have an abstraction for [multimap tables](https://en.wikipedia.org/wiki/Multimap). + +All tables are single maps of keys to values. + +This matters as this means some of `cuprate_blockchain`'s tables differ from `monerod`'s tables - the primary key is stored _for all_ entries, compared to `monerod` only needing to store it once: + +```rust +// `monerod` only stores `amount: 1` once, +// `cuprated` stores it each time it appears. +struct PreRctOutputId { amount: 1, amount_index: 0 } +struct PreRctOutputId { amount: 1, amount_index: 1 } +``` + +This means `cuprated`'s database will be slightly larger than `monerod`'s. + +The current method `cuprate_blockchain` uses will be "good enough" as the multimap +keys needed for now are fixed, e.g. pre-RCT outputs are no longer being produced. + +This may need to change in the future when multimap is all but required, e.g. for FCMP++. + +Until then, multimap tables are not implemented as they are tricky to implement across all backends. \ No newline at end of file diff --git a/books/architecture/src/storage/db/issues/traits.md b/books/architecture/src/storage/db/issues/traits.md new file mode 100644 index 0000000..9cf66e4 --- /dev/null +++ b/books/architecture/src/storage/db/issues/traits.md @@ -0,0 +1,15 @@ +# Traits abstracting backends +Although all database backends used are very similar, they have some crucial differences in small implementation details that must be worked around when conforming them to `cuprate_database`'s traits. + +Put simply: using `cuprate_database`'s traits is less efficient and more awkward than using the backend directly. + +For example: +- [Data types must be wrapped in compatibility layers when they otherwise wouldn't be](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/backend/heed/env.rs#L101-L116) +- [There are types that only apply to a specific backend, but are visible to all](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/error.rs#L86-L89) +- [There are extra layers of abstraction to smoothen the differences between all backends](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/env.rs#L62-L68) +- [Existing functionality of backends must be taken away, as it isn't supported in the others](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/database.rs#L27-L34) + +This is a _tradeoff_ that `cuprate_database` takes, as: +- The backend itself is usually not the source of bottlenecks in the greater system, as such, small inefficiencies are OK +- None of the lost functionality is crucial for operation +- The ability to use, test, and swap between multiple database backends is [worth it](https://github.com/Cuprate/cuprate/pull/35#issuecomment-1952804393) diff --git a/books/architecture/src/storage/db/issues/unaligned.md b/books/architecture/src/storage/db/issues/unaligned.md new file mode 100644 index 0000000..3c45c19 --- /dev/null +++ b/books/architecture/src/storage/db/issues/unaligned.md @@ -0,0 +1,24 @@ +# Copying unaligned bytes +As mentioned in [`(De)serialization`](../serde.md), bytes are _copied_ when they are turned into a type `T` due to unaligned bytes being returned from database backends. + +Using a regular reference cast results in an improperly aligned type `T`; [such a type even existing causes undefined behavior](https://doc.rust-lang.org/reference/behavior-considered-undefined.html). In our case, `bytemuck` saves us by panicking before this occurs. + +Thus, when using `cuprate_database`'s database traits, an _owned_ `T` is returned. + +This is doubly unfortunately for `&[u8]` as this does not even need deserialization. + +For example, `StorableVec` could have been this: +```rust +enum StorableBytes<'a, T: Storable> { + Owned(T), + Ref(&'a T), +} +``` +but this would require supporting types that must be copied regardless with the occasional `&[u8]` that can be returned without casting. This was hard to do so in a generic way, thus all `[u8]`'s are copied and returned as owned `StorableVec`s. + +This is a _tradeoff_ `cuprate_database` takes as: +- `bytemuck::pod_read_unaligned` is cheap enough +- The main API, `service`, needs to return owned value anyway +- Having no references removes a lot of lifetime complexity + +The alternative is somehow fixing the alignment issues in the backends mentioned previously. \ No newline at end of file diff --git a/books/architecture/src/storage/db/resizing.md b/books/architecture/src/storage/db/resizing.md new file mode 100644 index 0000000..ebf989e --- /dev/null +++ b/books/architecture/src/storage/db/resizing.md @@ -0,0 +1,8 @@ +# Resizing +`cuprate_database` itself does not handle memory map resizes automatically +(for database backends that need resizing, i.e. heed/LMDB). + +When a user directly using `cuprate_database`, it is up to them on how to resize. The database will return [`RuntimeError::ResizeNeeded`](https://doc.cuprate.org/cuprate_database/enum.RuntimeError.html#variant.ResizeNeeded) when it needs resizing. + +However, `cuprate_database` exposes some [resizing algorithms](https://doc.cuprate.org/cuprate_database/resize/index.html) +that define how the database's memory map grows. \ No newline at end of file diff --git a/books/architecture/src/storage/db/serde.md b/books/architecture/src/storage/db/serde.md new file mode 100644 index 0000000..de17f30 --- /dev/null +++ b/books/architecture/src/storage/db/serde.md @@ -0,0 +1,44 @@ +# (De)serialization +All types stored inside the database are either bytes already or are perfectly bitcast-able. + +As such, they do not incur heavy (de)serialization costs when storing/fetching them from the database. The main (de)serialization used is [`bytemuck`](https://docs.rs/bytemuck)'s traits and casting functions. + +## Size and layout +The size & layout of types is stable across compiler versions, as they are set and determined with [`#[repr(C)]`](https://doc.rust-lang.org/nomicon/other-reprs.html#reprc) and `bytemuck`'s derive macros such as [`bytemuck::Pod`](https://docs.rs/bytemuck/latest/bytemuck/derive.Pod.html). + +Note that the data stored in the tables are still type-safe; we still refer to the key and values within our tables by the type. + +## How +The main deserialization `trait` for database storage is [`Storable`](https://doc.cuprate.org/cuprate_database/trait.Storable.html). + +- Before storage, the type is [simply cast into bytes](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L125) +- When fetching, the bytes are [simply cast into the type](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L130) + +When a type is casted into bytes, [the reference is casted](https://docs.rs/bytemuck/latest/bytemuck/fn.bytes_of.html), i.e. this is zero-cost serialization. + +However, it is worth noting that when bytes are casted into the type, [it is copied](https://docs.rs/bytemuck/latest/bytemuck/fn.pod_read_unaligned.html). This is due to byte alignment guarantee issues with both backends, see: +- +- + +Without this, `bytemuck` will panic with [`TargetAlignmentGreaterAndInputNotAligned`](https://docs.rs/bytemuck/latest/bytemuck/enum.PodCastError.html#variant.TargetAlignmentGreaterAndInputNotAligned) when casting. + +Copying the bytes fixes this problem, although it is more costly than necessary. However, in the main use-case for `cuprate_database` (`tower::Service` API) the bytes would need to be owned regardless as the `Request/Response` API uses owned data types (`T`, `Vec`, `HashMap`, etc). + +Practically speaking, this means lower-level database functions that normally look like such: +```rust +fn get(key: &Key) -> &Value; +``` +end up looking like this in `cuprate_database`: +```rust +fn get(key: &Key) -> Value; +``` + +Since each backend has its own (de)serialization methods, our types are wrapped in compatibility types that map our `Storable` functions into whatever is required for the backend, e.g: +- [`StorableHeed`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/heed/storable.rs#L11-L45) +- [`StorableRedb`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/redb/storable.rs#L11-L30) + +Compatibility structs also exist for any `Storable` containers: +- [`StorableVec`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L135-L191) +- [`StorableBytes`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L208-L241) + +Again, it's unfortunate that these must be owned, although in the `tower::Service` use-case, they would have to be owned anyway. \ No newline at end of file diff --git a/books/architecture/src/storage/db/syncing.md b/books/architecture/src/storage/db/syncing.md new file mode 100644 index 0000000..3f3444e --- /dev/null +++ b/books/architecture/src/storage/db/syncing.md @@ -0,0 +1,17 @@ +# Syncing +`cuprate_database`'s database has 5 disk syncing modes. + +1. `FastThenSafe` +1. `Safe` +1. `Async` +1. `Threshold` +1. `Fast` + +The default mode is `Safe`. + +This means that upon each transaction commit, all the data that was written will be fully synced to disk. +This is the slowest, but safest mode of operation. + +Note that upon any database `Drop`, the current implementation will sync to disk regardless of any configuration. + +For more information on the other modes, read the documentation [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/config/sync_mode.rs#L63-L144). diff --git a/books/architecture/src/storage/intro.md b/books/architecture/src/storage/intro.md index 214cf15..a28a017 100644 --- a/books/architecture/src/storage/intro.md +++ b/books/architecture/src/storage/intro.md @@ -1 +1,34 @@ -# ⚪️ Storage +# Storage +This section covers all things related to the on-disk storage of data within Cuprate. + +## Overview +The quick overview is that Cuprate has a [database abstraction crate](./database-abstraction.md) +that handles "low-level" database details such as key and value (de)serialization, tables, transactions, etc. + +This database abstraction crate is then used by all crates that need on-disk storage, i.e. the +- [Blockchain database](./blockchain/intro.md) +- [Transaction pool database](./txpool/intro.md) + +## Service +The interface provided by all crates building on-top of the +database abstraction is a [`tower::Service`](https://docs.rs/tower), i.e. +database requests/responses are sent/received asynchronously. + +As the interface details are similar across crates (threadpool, read operations, write operations), +the interface itself is abstracted in the [`cuprate_database_service`](./common/service/intro.md) crate, +which is then used by the crates. + +## Diagram +This is roughly how database crates are set up. + +```text + ┌─────────────────┐ +┌──────────────────────────────────┐ │ │ +│ Some crate that needs a database │ ┌────────────────┐ │ │ +│ │ │ Public │ │ │ +│ ┌──────────────────────────────┐ │─►│ tower::Service │◄─►│ Rest of Cuprate │ +│ │ Database abstraction │ │ │ API │ │ │ +│ └──────────────────────────────┘ │ └────────────────┘ │ │ +└──────────────────────────────────┘ │ │ + └─────────────────┘ +``` diff --git a/books/architecture/src/storage/pruning.md b/books/architecture/src/storage/pruning/intro.md similarity index 100% rename from books/architecture/src/storage/pruning.md rename to books/architecture/src/storage/pruning/intro.md diff --git a/books/architecture/src/storage/transaction-pool.md b/books/architecture/src/storage/txpool/intro.md similarity index 100% rename from books/architecture/src/storage/transaction-pool.md rename to books/architecture/src/storage/txpool/intro.md diff --git a/storage/README.md b/storage/README.md index b04d8e7..77a8bcb 100644 --- a/storage/README.md +++ b/storage/README.md @@ -1,5 +1,10 @@ -# storage +# Storage +This subdirectory contains all things related to the on-disk storage of data within Cuprate. -TODO: This subdirectory used to be `database/` and is in the middle of being shifted around. +See for design documentation +and the following links for user documentation: -The old `database/` design document is in `cuprate-blockchain/` which will eventually be ported Cuprate's architecture book. +- +- +- +- \ No newline at end of file diff --git a/storage/blockchain/DESIGN.md b/storage/blockchain/DESIGN.md deleted file mode 100644 index 22f729f..0000000 --- a/storage/blockchain/DESIGN.md +++ /dev/null @@ -1,600 +0,0 @@ -# Database -FIXME: This documentation must be updated and moved to the architecture book. - -Cuprate's blockchain implementation. - -- [1. Documentation](#1-documentation) -- [2. File structure](#2-file-structure) - - [2.1 `src/`](#21-src) - - [2.2 `src/backend/`](#22-srcbackend) - - [2.3 `src/config/`](#23-srcconfig) - - [2.4 `src/ops/`](#24-srcops) - - [2.5 `src/service/`](#25-srcservice) -- [3. Backends](#3-backends) - - [3.1 heed](#31-heed) - - [3.2 redb](#32-redb) - - [3.3 redb-memory](#33-redb-memory) - - [3.4 sanakirja](#34-sanakirja) - - [3.5 MDBX](#35-mdbx) -- [4. Layers](#4-layers) - - [4.1 Backend](#41-backend) - - [4.2 Trait](#42-trait) - - [4.3 ConcreteEnv](#43-concreteenv) - - [4.4 ops](#44-ops) - - [4.5 service](#45-service) -- [5. The service](#5-the-service) - - [5.1 Initialization](#51-initialization) - - [5.2 Requests](#53-requests) - - [5.3 Responses](#54-responses) - - [5.4 Thread model](#52-thread-model) - - [5.5 Shutdown](#55-shutdown) -- [6. Syncing](#6-Syncing) -- [7. Resizing](#7-resizing) -- [8. (De)serialization](#8-deserialization) -- [9. Schema](#9-schema) - - [9.1 Tables](#91-tables) - - [9.2 Multimap tables](#92-multimap-tables) -- [10. Known issues and tradeoffs](#10-known-issues-and-tradeoffs) - - [10.1 Traits abstracting backends](#101-traits-abstracting-backends) - - [10.2 Hot-swappable backends](#102-hot-swappable-backends) - - [10.3 Copying unaligned bytes](#103-copying-unaligned-bytes) - - [10.4 Endianness](#104-endianness) - - [10.5 Extra table data](#105-extra-table-data) - ---- - -## 1. Documentation -Documentation for `database/` is split into 3 locations: - -| Documentation location | Purpose | -|---------------------------|---------| -| `database/README.md` | High level design of `cuprate-database` -| `cuprate-database` | Practical usage documentation/warnings/notes/etc -| Source file `// comments` | Implementation-specific details (e.g, how many reader threads to spawn?) - -This README serves as the implementation design document. - -For actual practical usage, `cuprate-database`'s types and general usage are documented via standard Rust tooling. - -Run: -```bash -cargo doc --package cuprate-database --open -``` -at the root of the repo to open/read the documentation. - -If this documentation is too abstract, refer to any of the source files, they are heavily commented. There are many `// Regular comments` that explain more implementation specific details that aren't present here or in the docs. Use the file reference below to find what you're looking for. - -The code within `src/` is also littered with some `grep`-able comments containing some keywords: - -| Word | Meaning | -|-------------|---------| -| `INVARIANT` | This code makes an _assumption_ that must be upheld for correctness -| `SAFETY` | This `unsafe` code is okay, for `x,y,z` reasons -| `FIXME` | This code works but isn't ideal -| `HACK` | This code is a brittle workaround -| `PERF` | This code is weird for performance reasons -| `TODO` | This must be implemented; There should be 0 of these in production code -| `SOMEDAY` | This should be implemented... someday - -## 2. File structure -A quick reference of the structure of the folders & files in `cuprate-database`. - -Note that `lib.rs/mod.rs` files are purely for re-exporting/visibility/lints, and contain no code. Each sub-directory has a corresponding `mod.rs`. - -### 2.1 `src/` -The top-level `src/` files. - -| File | Purpose | -|------------------------|---------| -| `constants.rs` | General constants used throughout `cuprate-database` -| `database.rs` | Abstracted database; `trait DatabaseR{o,w}` -| `env.rs` | Abstracted database environment; `trait Env` -| `error.rs` | Database error types -| `free.rs` | General free functions (related to the database) -| `key.rs` | Abstracted database keys; `trait Key` -| `resize.rs` | Database resizing algorithms -| `storable.rs` | Data (de)serialization; `trait Storable` -| `table.rs` | Database table abstraction; `trait Table` -| `tables.rs` | All the table definitions used by `cuprate-database` -| `tests.rs` | Utilities for `cuprate_database` testing -| `transaction.rs` | Database transaction abstraction; `trait TxR{o,w}` -| `types.rs` | Database-specific types -| `unsafe_unsendable.rs` | Marker type to impl `Send` for objects not `Send` - -### 2.2 `src/backend/` -This folder contains the implementation for actual databases used as the backend for `cuprate-database`. - -Each backend has its own folder. - -| Folder/File | Purpose | -|-------------|---------| -| `heed/` | Backend using using [`heed`](https://github.com/meilisearch/heed) (LMDB) -| `redb/` | Backend using [`redb`](https://github.com/cberner/redb) -| `tests.rs` | Backend-agnostic tests - -All backends follow the same file structure: - -| File | Purpose | -|------------------|---------| -| `database.rs` | Implementation of `trait DatabaseR{o,w}` -| `env.rs` | Implementation of `trait Env` -| `error.rs` | Implementation of backend's errors to `cuprate_database`'s error types -| `storable.rs` | Compatibility layer between `cuprate_database::Storable` and backend-specific (de)serialization -| `transaction.rs` | Implementation of `trait TxR{o,w}` -| `types.rs` | Type aliases for long backend-specific types - -### 2.3 `src/config/` -This folder contains the `cupate_database::config` module; configuration options for the database. - -| File | Purpose | -|---------------------|---------| -| `config.rs` | Main database `Config` struct -| `reader_threads.rs` | Reader thread configuration for `service` thread-pool -| `sync_mode.rs` | Disk sync configuration for backends - -### 2.4 `src/ops/` -This folder contains the `cupate_database::ops` module. - -These are higher-level functions abstracted over the database, that are Monero-related. - -| File | Purpose | -|-----------------|---------| -| `block.rs` | Block related (main functions) -| `blockchain.rs` | Blockchain related (height, cumulative values, etc) -| `key_image.rs` | Key image related -| `macros.rs` | Macros specific to `ops/` -| `output.rs` | Output related -| `property.rs` | Database properties (pruned, version, etc) -| `tx.rs` | Transaction related - -### 2.5 `src/service/` -This folder contains the `cupate_database::service` module. - -The `async`hronous request/response API other Cuprate crates use instead of managing the database directly themselves. - -| File | Purpose | -|----------------|---------| -| `free.rs` | General free functions used (related to `cuprate_database::service`) -| `read.rs` | Read thread-pool definitions and logic -| `tests.rs` | Thread-pool tests and test helper functions -| `types.rs` | `cuprate_database::service`-related type aliases -| `write.rs` | Writer thread definitions and logic - -## 3. Backends -`cuprate-database`'s `trait`s allow abstracting over the actual database, such that any backend in particular could be used. - -Each database's implementation for those `trait`'s are located in its respective folder in `src/backend/${DATABASE_NAME}/`. - -### 3.1 heed -The default database used is [`heed`](https://github.com/meilisearch/heed) (LMDB). The upstream versions from [`crates.io`](https://crates.io/crates/heed) are used. `LMDB` should not need to be installed as `heed` has a build script that pulls it in automatically. - -`heed`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are: - -| Filename | Purpose | -|------------|---------| -| `data.mdb` | Main data file -| `lock.mdb` | Database lock file - -`heed`-specific notes: -- [There is a maximum reader limit](https://github.com/monero-project/monero/blob/059028a30a8ae9752338a7897329fe8012a310d5/src/blockchain_db/lmdb/db_lmdb.cpp#L1372). Other potential processes (e.g. `xmrblocks`) that are also reading the `data.mdb` file need to be accounted for -- [LMDB does not work on remote filesystem](https://github.com/LMDB/lmdb/blob/b8e54b4c31378932b69f1298972de54a565185b1/libraries/liblmdb/lmdb.h#L129) - -### 3.2 redb -The 2nd database backend is the 100% Rust [`redb`](https://github.com/cberner/redb). - -The upstream versions from [`crates.io`](https://crates.io/crates/redb) are used. - -`redb`'s filenames inside Cuprate's database folder (`~/.local/share/cuprate/database/`) are: - -| Filename | Purpose | -|-------------|---------| -| `data.redb` | Main data file - - - -### 3.3 redb-memory -This backend is 100% the same as `redb`, although, it uses `redb::backend::InMemoryBackend` which is a database that completely resides in memory instead of a file. - -All other details about this should be the same as the normal `redb` backend. - -### 3.4 sanakirja -[`sanakirja`](https://docs.rs/sanakirja) was a candidate as a backend, however there were problems with maximum value sizes. - -The default maximum value size is [1012 bytes](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.Storable.html) which was too small for our requirements. Using [`sanakirja::Slice`](https://docs.rs/sanakirja/1.4.1/sanakirja/union.Slice.html) and [sanakirja::UnsizedStorage](https://docs.rs/sanakirja/1.4.1/sanakirja/trait.UnsizedStorable.html) was attempted, but there were bugs found when inserting a value in-between `512..=4096` bytes. - -As such, it is not implemented. - -### 3.5 MDBX -[`MDBX`](https://erthink.github.io/libmdbx) was a candidate as a backend, however MDBX deprecated the custom key/value comparison functions, this makes it a bit trickier to implement [`9.2 Multimap tables`](#92-multimap-tables). It is also quite similar to the main backend LMDB (of which it was originally a fork of). - -As such, it is not implemented (yet). - -## 4. Layers -`cuprate_database` is logically abstracted into 5 layers, with each layer being built upon the last. - -Starting from the lowest: -1. Backend -2. Trait -3. ConcreteEnv -4. `ops` -5. `service` - - - -### 4.1 Backend -This is the actual database backend implementation (or a Rust shim over one). - -Examples: -- `heed` (LMDB) -- `redb` - -`cuprate_database` itself just uses a backend, it does not implement one. - -All backends have the following attributes: -- [Embedded](https://en.wikipedia.org/wiki/Embedded_database) -- [Multiversion concurrency control](https://en.wikipedia.org/wiki/Multiversion_concurrency_control) -- [ACID](https://en.wikipedia.org/wiki/ACID) -- Are `(key, value)` oriented and have the expected API (`get()`, `insert()`, `delete()`) -- Are table oriented (`"table_name" -> (key, value)`) -- Allows concurrent readers - -### 4.2 Trait -`cuprate_database` provides a set of `trait`s that abstract over the various database backends. - -This allows the function signatures and behavior to stay the same but allows for swapping out databases in an easier fashion. - -All common behavior of the backend's are encapsulated here and used instead of using the backend directly. - -Examples: -- [`trait Env`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/env.rs) -- [`trait {TxRo, TxRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/transaction.rs) -- [`trait {DatabaseRo, DatabaseRw}`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/database.rs) - -For example, instead of calling `LMDB` or `redb`'s `get()` function directly, `DatabaseRo::get()` is called. - -### 4.3 ConcreteEnv -This is the non-generic, concrete `struct` provided by `cuprate_database` that contains all the data necessary to operate the database. The actual database backend `ConcreteEnv` will use internally depends on which backend feature is used. - -`ConcreteEnv` implements `trait Env`, which opens the door to all the other traits. - -The equivalent objects in the backends themselves are: -- [`heed::Env`](https://docs.rs/heed/0.20.0/heed/struct.Env.html) -- [`redb::Database`](https://docs.rs/redb/2.1.0/redb/struct.Database.html) - -This is the main object used when handling the database directly, although that is not strictly necessary as a user if the [`4.5 service`](#45-service) layer is used. - -### 4.4 ops -These are Monero-specific functions that use the abstracted `trait` forms of the database. - -Instead of dealing with the database directly: -- `get()` -- `delete()` - -the `ops` layer provides more abstract functions that deal with commonly used Monero operations: -- `add_block()` -- `pop_block()` - -### 4.5 service -The final layer abstracts the database completely into a [Monero-specific `async` request/response API](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/types/src/service.rs#L18-L78) using [`tower::Service`](https://docs.rs/tower/latest/tower/trait.Service.html). - -For more information on this layer, see the next section: [`5. The service`](#5-the-service). - -## 5. The service -The main API `cuprate_database` exposes for other crates to use is the `cuprate_database::service` module. - -This module exposes an `async` request/response API with `tower::Service`, backed by a threadpool, that allows reading/writing Monero-related data from/to the database. - -`cuprate_database::service` itself manages the database using a separate writer thread & reader thread-pool, and uses the previously mentioned [`4.4 ops`](#44-ops) functions when responding to requests. - -### 5.1 Initialization -The service is started simply by calling: [`cuprate_database::service::init()`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/service/free.rs#L23). - -This function initializes the database, spawns threads, and returns a: -- Read handle to the database (cloneable) -- Write handle to the database (not cloneable) - -These "handles" implement the `tower::Service` trait, which allows sending requests and receiving responses `async`hronously. - -### 5.2 Requests -Along with the 2 handles, there are 2 types of requests: -- [`ReadRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L23-L90) -- [`WriteRequest`](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/types/src/service.rs#L93-L105) - -`ReadRequest` is for retrieving various types of information from the database. - -`WriteRequest` currently only has 1 variant: to write a block to the database. - -### 5.3 Responses -After sending one of the above requests using the read/write handle, the value returned is _not_ the response, yet an `async`hronous channel that will eventually return the response: -```rust,ignore -// Send a request. -// tower::Service::call() -// V -let response_channel: Channel = read_handle.call(ReadResponse::ChainHeight)?; - -// Await the response. -let response: ReadResponse = response_channel.await?; - -// Assert the response is what we expected. -assert_eq!(matches!(response), Response::ChainHeight(_)); -``` - -After `await`ing the returned channel, a `Response` will eventually be returned when the `service` threadpool has fetched the value from the database and sent it off. - -Both read/write requests variants match in name with `Response` variants, i.e. -- `ReadRequest::ChainHeight` leads to `Response::ChainHeight` -- `WriteRequest::WriteBlock` leads to `Response::WriteBlockOk` - -### 5.4 Thread model -As mentioned in the [`4. Layers`](#4-layers) section, the base database abstractions themselves are not concerned with parallelism, they are mostly functions to be called from a single-thread. - -However, the `cuprate_database::service` API, _does_ have a thread model backing it. - -When [`cuprate_database::service`'s initialization function](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/free.rs#L33-L44) is called, threads will be spawned and maintained until the user drops (disconnects) the returned handles. - -The current behavior for thread count is: -- [1 writer thread](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/write.rs#L52-L66) -- [As many reader threads as there are system threads](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L104-L126) - -For example, on a system with 32-threads, `cuprate_database` will spawn: -- 1 writer thread -- 32 reader threads - -whose sole responsibility is to listen for database requests, access the database (potentially in parallel), and return a response. - -Note that the `1 system thread = 1 reader thread` model is only the default setting, the reader thread count can be configured by the user to be any number between `1 .. amount_of_system_threads`. - -The reader threads are managed by [`rayon`](https://docs.rs/rayon). - -For an example of where multiple reader threads are used: given a request that asks if any key-image within a set already exists, `cuprate_database` will [split that work between the threads with `rayon`](https://github.com/Cuprate/cuprate/blob/9c27ba5791377d639cb5d30d0f692c228568c122/database/src/service/read.rs#L490-L503). - -### 5.5 Shutdown -Once the read/write handles are `Drop`ed, the backing thread(pool) will gracefully exit, automatically. - -Note the writer thread and reader threadpool aren't connected whatsoever; dropping the write handle will make the writer thread exit, however, the reader handle is free to be held onto and can be continued to be read from - and vice-versa for the write handle. - -## 6. Syncing -`cuprate_database`'s database has 5 disk syncing modes. - -1. FastThenSafe -1. Safe -1. Async -1. Threshold -1. Fast - -The default mode is `Safe`. - -This means that upon each transaction commit, all the data that was written will be fully synced to disk. This is the slowest, but safest mode of operation. - -Note that upon any database `Drop`, whether via `service` or dropping the database directly, the current implementation will sync to disk regardless of any configuration. - -For more information on the other modes, read the documentation [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/config/sync_mode.rs#L63-L144). - -## 7. Resizing -Database backends that require manually resizing will, by default, use a similar algorithm as `monerod`'s. - -Note that this only relates to the `service` module, where the database is handled by `cuprate_database` itself, not the user. In the case of a user directly using `cuprate_database`, it is up to them on how to resize. - -Within `service`, the resizing logic defined [here](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/service/write.rs#L139-L201) does the following: - -- If there's not enough space to fit a write request's data, start a resize -- Each resize adds around [`1_073_745_920`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) bytes to the current map size -- A resize will be attempted `3` times before failing - -There are other [resizing algorithms](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L38-L47) that define how the database's memory map grows, although currently the behavior of [`monerod`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/resize.rs#L104-L160) is closely followed. - -## 8. (De)serialization -All types stored inside the database are either bytes already, or are perfectly bitcast-able. - -As such, they do not incur heavy (de)serialization costs when storing/fetching them from the database. The main (de)serialization used is [`bytemuck`](https://docs.rs/bytemuck)'s traits and casting functions. - -The size & layout of types is stable across compiler versions, as they are set and determined with [`#[repr(C)]`](https://doc.rust-lang.org/nomicon/other-reprs.html#reprc) and `bytemuck`'s derive macros such as [`bytemuck::Pod`](https://docs.rs/bytemuck/latest/bytemuck/derive.Pod.html). - -Note that the data stored in the tables are still type-safe; we still refer to the key and values within our tables by the type. - -The main deserialization `trait` for database storage is: [`cuprate_database::Storable`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L16-L115). - -- Before storage, the type is [simply cast into bytes](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L125) -- When fetching, the bytes are [simply cast into the type](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L130) - -When a type is casted into bytes, [the reference is casted](https://docs.rs/bytemuck/latest/bytemuck/fn.bytes_of.html), i.e. this is zero-cost serialization. - -However, it is worth noting that when bytes are casted into the type, [it is copied](https://docs.rs/bytemuck/latest/bytemuck/fn.pod_read_unaligned.html). This is due to byte alignment guarantee issues with both backends, see: -- https://github.com/AltSysrq/lmdb-zero/issues/8 -- https://github.com/cberner/redb/issues/360 - -Without this, `bytemuck` will panic with [`TargetAlignmentGreaterAndInputNotAligned`](https://docs.rs/bytemuck/latest/bytemuck/enum.PodCastError.html#variant.TargetAlignmentGreaterAndInputNotAligned) when casting. - -Copying the bytes fixes this problem, although it is more costly than necessary. However, in the main use-case for `cuprate_database` (the `service` module) the bytes would need to be owned regardless as the `Request/Response` API uses owned data types (`T`, `Vec`, `HashMap`, etc). - -Practically speaking, this means lower-level database functions that normally look like such: -```rust -fn get(key: &Key) -> &Value; -``` -end up looking like this in `cuprate_database`: -```rust -fn get(key: &Key) -> Value; -``` - -Since each backend has its own (de)serialization methods, our types are wrapped in compatibility types that map our `Storable` functions into whatever is required for the backend, e.g: -- [`StorableHeed`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/heed/storable.rs#L11-L45) -- [`StorableRedb`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/backend/redb/storable.rs#L11-L30) - -Compatibility structs also exist for any `Storable` containers: -- [`StorableVec`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L135-L191) -- [`StorableBytes`](https://github.com/Cuprate/cuprate/blob/2ac90420c658663564a71b7ecb52d74f3c2c9d0f/database/src/storable.rs#L208-L241) - -Again, it's unfortunate that these must be owned, although in `service`'s use-case, they would have to be owned anyway. - -## 9. Schema -This following section contains Cuprate's database schema, it may change throughout the development of Cuprate, as such, nothing here is final. - -### 9.1 Tables -The `CamelCase` names of the table headers documented here (e.g. `TxIds`) are the actual type name of the table within `cuprate_database`. - -Note that words written within `code blocks` mean that it is a real type defined and usable within `cuprate_database`. Other standard types like u64 and type aliases (TxId) are written normally. - -Within `cuprate_database::tables`, the below table is essentially defined as-is with [a macro](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/tables.rs#L369-L470). - -Many of the data types stored are the same data types, although are different semantically, as such, a map of aliases used and their real data types is also provided below. - -| Alias | Real Type | -|----------------------------------------------------|-----------| -| BlockHeight, Amount, AmountIndex, TxId, UnlockTime | u64 -| BlockHash, KeyImage, TxHash, PrunableHash | [u8; 32] - -| Table | Key | Value | Description | -|-------------------|----------------------|--------------------|-------------| -| `BlockBlobs` | BlockHeight | `StorableVec` | Maps a block's height to a serialized byte form of a block -| `BlockHeights` | BlockHash | BlockHeight | Maps a block's hash to its height -| `BlockInfos` | BlockHeight | `BlockInfo` | Contains metadata of all blocks -| `KeyImages` | KeyImage | () | This table is a set with no value, it stores transaction key images -| `NumOutputs` | Amount | u64 | Maps an output's amount to the number of outputs with that amount -| `Outputs` | `PreRctOutputId` | `Output` | This table contains legacy CryptoNote outputs which have clear amounts. This table will not contain an output with 0 amount. -| `PrunedTxBlobs` | TxId | `StorableVec` | Contains pruned transaction blobs (even if the database is not pruned) -| `PrunableTxBlobs` | TxId | `StorableVec` | Contains the prunable part of a transaction -| `PrunableHashes` | TxId | PrunableHash | Contains the hash of the prunable part of a transaction -| `RctOutputs` | AmountIndex | `RctOutput` | Contains RingCT outputs mapped from their global RCT index -| `TxBlobs` | TxId | `StorableVec` | Serialized transaction blobs (bytes) -| `TxIds` | TxHash | TxId | Maps a transaction's hash to its index/ID -| `TxHeights` | TxId | BlockHeight | Maps a transaction's ID to the height of the block it comes from -| `TxOutputs` | TxId | `StorableVec` | Gives the amount indices of a transaction's outputs -| `TxUnlockTime` | TxId | UnlockTime | Stores the unlock time of a transaction (only if it has a non-zero lock time) - -The definitions for aliases and types (e.g. `RctOutput`) are within the [`cuprate_database::types`](https://github.com/Cuprate/cuprate/blob/31ce89412aa174fc33754f22c9a6d9ef5ddeda28/database/src/types.rs#L51) module. - - - -### 9.2 Multimap tables -When referencing outputs, Monero will [use the amount and the amount index](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/blockchain_db/lmdb/db_lmdb.cpp#L3447-L3449). This means 2 keys are needed to reach an output. - -With LMDB you can set the `DUP_SORT` flag on a table and then set the key/value to: -```rust -Key = KEY_PART_1 -``` -```rust -Value = { - KEY_PART_2, - VALUE // The actual value we are storing. -} -``` - -Then you can set a custom value sorting function that only takes `KEY_PART_2` into account; this is how `monerod` does it. - -This requires that the underlying database supports: -- multimap tables -- custom sort functions on values -- setting a cursor on a specific key/value - ---- - -Another way to implement this is as follows: -```rust -Key = { KEY_PART_1, KEY_PART_2 } -``` -```rust -Value = VALUE -``` - -Then the key type is simply used to look up the value; this is how `cuprate_database` does it. - -For example, the key/value pair for outputs is: -```rust -PreRctOutputId => Output -``` -where `PreRctOutputId` looks like this: -```rust -struct PreRctOutputId { - amount: u64, - amount_index: u64, -} -``` - -## 10. Known issues and tradeoffs -`cuprate_database` takes many tradeoffs, whether due to: -- Prioritizing certain values over others -- Not having a better solution -- Being "good enough" - -This is a list of the larger ones, along with issues that don't have answers yet. - -### 10.1 Traits abstracting backends -Although all database backends used are very similar, they have some crucial differences in small implementation details that must be worked around when conforming them to `cuprate_database`'s traits. - -Put simply: using `cuprate_database`'s traits is less efficient and more awkward than using the backend directly. - -For example: -- [Data types must be wrapped in compatibility layers when they otherwise wouldn't be](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/backend/heed/env.rs#L101-L116) -- [There are types that only apply to a specific backend, but are visible to all](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/error.rs#L86-L89) -- [There are extra layers of abstraction to smoothen the differences between all backends](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/env.rs#L62-L68) -- [Existing functionality of backends must be taken away, as it isn't supported in the others](https://github.com/Cuprate/cuprate/blob/d0ac94a813e4cd8e0ed8da5e85a53b1d1ace2463/database/src/database.rs#L27-L34) - -This is a _tradeoff_ that `cuprate_database` takes, as: -- The backend itself is usually not the source of bottlenecks in the greater system, as such, small inefficiencies are OK -- None of the lost functionality is crucial for operation -- The ability to use, test, and swap between multiple database backends is [worth it](https://github.com/Cuprate/cuprate/pull/35#issuecomment-1952804393) - -### 10.2 Hot-swappable backends -Using a different backend is really as simple as re-building `cuprate_database` with a different feature flag: -```bash -# Use LMDB. -cargo build --package cuprate-database --features heed - -# Use redb. -cargo build --package cuprate-database --features redb -``` - -This is "good enough" for now, however ideally, this hot-swapping of backends would be able to be done at _runtime_. - -As it is now, `cuprate_database` cannot compile both backends and swap based on user input at runtime; it must be compiled with a certain backend, which will produce a binary with only that backend. - -This also means things like [CI testing multiple backends is awkward](https://github.com/Cuprate/cuprate/blob/main/.github/workflows/ci.yml#L132-L136), as we must re-compile with different feature flags instead. - -### 10.3 Copying unaligned bytes -As mentioned in [`8. (De)serialization`](#8-deserialization), bytes are _copied_ when they are turned into a type `T` due to unaligned bytes being returned from database backends. - -Using a regular reference cast results in an improperly aligned type `T`; [such a type even existing causes undefined behavior](https://doc.rust-lang.org/reference/behavior-considered-undefined.html). In our case, `bytemuck` saves us by panicking before this occurs. - -Thus, when using `cuprate_database`'s database traits, an _owned_ `T` is returned. - -This is doubly unfortunately for `&[u8]` as this does not even need deserialization. - -For example, `StorableVec` could have been this: -```rust -enum StorableBytes<'a, T: Storable> { - Owned(T), - Ref(&'a T), -} -``` -but this would require supporting types that must be copied regardless with the occasional `&[u8]` that can be returned without casting. This was hard to do so in a generic way, thus all `[u8]`'s are copied and returned as owned `StorableVec`s. - -This is a _tradeoff_ `cuprate_database` takes as: -- `bytemuck::pod_read_unaligned` is cheap enough -- The main API, `service`, needs to return owned value anyway -- Having no references removes a lot of lifetime complexity - -The alternative is either: -- Using proper (de)serialization instead of casting (which comes with its own costs) -- Somehow fixing the alignment issues in the backends mentioned previously - -### 10.4 Endianness -`cuprate_database`'s (de)serialization and storage of bytes are native-endian, as in, byte storage order will depend on the machine it is running on. - -As Cuprate's build-targets are all little-endian ([big-endian by default machines barely exist](https://en.wikipedia.org/wiki/Endianness#Hardware)), this doesn't matter much and the byte ordering can be seen as a constant. - -Practically, this means `cuprated`'s database files can be transferred across computers, as can `monerod`'s. - -### 10.5 Extra table data -Some of `cuprate_database`'s tables differ from `monerod`'s tables, for example, the way [`9.2 Multimap tables`](#92-multimap-tables) tables are done requires that the primary key is stored _for all_ entries, compared to `monerod` only needing to store it once. - -For example: -```rust -// `monerod` only stores `amount: 1` once, -// `cuprated` stores it each time it appears. -struct PreRctOutputId { amount: 1, amount_index: 0 } -struct PreRctOutputId { amount: 1, amount_index: 1 } -``` - -This means `cuprated`'s database will be slightly larger than `monerod`'s. - -The current method `cuprate_database` uses will be "good enough" until usage shows that it must be optimized as multimap tables are tricky to implement across all backends. From a072d44a0d21274b9b3d66e3d0856a94c8770823 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Wed, 25 Sep 2024 20:56:57 +0100 Subject: [PATCH 072/104] P2P: fix connection disconnect on `Client` drop (#298) fix connection disconnect on `Client` drop --- p2p/p2p-core/src/client/timeout_monitor.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/p2p/p2p-core/src/client/timeout_monitor.rs b/p2p/p2p-core/src/client/timeout_monitor.rs index 6dbb4a2..b736966 100644 --- a/p2p/p2p-core/src/client/timeout_monitor.rs +++ b/p2p/p2p-core/src/client/timeout_monitor.rs @@ -45,6 +45,9 @@ where CSync: CoreSyncSvc, PSync: PeerSyncSvc, { + let connection_tx_weak = connection_tx.downgrade(); + drop(connection_tx); + // Instead of tracking the time from last message from the peer and sending a timed sync if this value is too high, // we just send a timed sync every [TIMEOUT_INTERVAL] seconds. let mut interval = interval(TIMEOUT_INTERVAL); @@ -59,10 +62,10 @@ where tracing::trace!("timeout monitor tick."); - if connection_tx.is_closed() { + let Some(connection_tx) = connection_tx_weak.upgrade() else { tracing::debug!("Closing timeout monitor, connection disconnected."); return Ok(()); - } + }; let Ok(permit) = Arc::clone(&semaphore).try_acquire_owned() else { // If we can't get a permit the connection is currently waiting for a response, so no need to From 12bbadd749d17571afd4c0d8d8281ff6daa0b332 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 27 Sep 2024 20:41:34 -0400 Subject: [PATCH 073/104] cuprated: add `constants` & `statics` modules (#301) * add modules * docs * test * rename * tabs -> spaces --- Cargo.lock | 29 +++++++++++++++- Cargo.toml | 1 + binaries/cuprated/Cargo.toml | 5 +-- binaries/cuprated/src/constants.rs | 34 +++++++++++++++++++ binaries/cuprated/src/main.rs | 6 ++++ binaries/cuprated/src/statics.rs | 53 ++++++++++++++++++++++++++++++ 6 files changed, 125 insertions(+), 3 deletions(-) create mode 100644 binaries/cuprated/src/constants.rs create mode 100644 binaries/cuprated/src/statics.rs diff --git a/Cargo.lock b/Cargo.lock index 3caf437..fe9d1ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -383,6 +383,26 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +[[package]] +name = "const_format" +version = "0.2.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c655d81ff1114fb0dcdea9225ea9f0cc712a6f8d189378e82bdf62a473a64b" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -913,7 +933,7 @@ dependencies = [ [[package]] name = "cuprated" -version = "0.1.0" +version = "0.0.1" dependencies = [ "anyhow", "async-trait", @@ -924,6 +944,7 @@ dependencies = [ "cfg-if", "chrono", "clap", + "const_format", "crossbeam", "crypto-bigint", "cuprate-address-book", @@ -2899,6 +2920,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + [[package]] name = "untrusted" version = "0.9.0" diff --git a/Cargo.toml b/Cargo.toml index 254d3ce..0aa5875 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,6 +59,7 @@ clap = { version = "4.5.17", default-features = false } chrono = { version = "0.4.38", default-features = false } crypto-bigint = { version = "0.5.5", default-features = false } crossbeam = { version = "0.8.4", default-features = false } +const_format = { version = "0.2.33", default-features = false } curve25519-dalek = { version = "4.1.3", default-features = false } dashmap = { version = "5.5.3", default-features = false } dirs = { version = "5.0.1", default-features = false } diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index a886c12..c8ccd5a 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cuprated" -version = "0.1.0" +version = "0.0.1" edition = "2021" description = "The Cuprate Monero Rust node." license = "AGPL-3.0-only" @@ -42,11 +42,12 @@ borsh = { workspace = true } bytemuck = { workspace = true } bytes = { workspace = true } cfg-if = { workspace = true } -clap = { workspace = true } +clap = { workspace = true, features = ["cargo"] } chrono = { workspace = true } crypto-bigint = { workspace = true } crossbeam = { workspace = true } curve25519-dalek = { workspace = true } +const_format = { workspace = true } dashmap = { workspace = true } dirs = { workspace = true } futures = { workspace = true } diff --git a/binaries/cuprated/src/constants.rs b/binaries/cuprated/src/constants.rs new file mode 100644 index 0000000..9463d47 --- /dev/null +++ b/binaries/cuprated/src/constants.rs @@ -0,0 +1,34 @@ +//! General constants used throughout `cuprated`. + +use const_format::formatcp; + +/// `cuprated`'s semantic version (`MAJOR.MINOR.PATCH`) as string. +pub const VERSION: &str = clap::crate_version!(); + +/// [`VERSION`] + the build type. +/// +/// If a debug build, the suffix is `-debug`, else it is `-release`. +pub const VERSION_BUILD: &str = if cfg!(debug_assertions) { + formatcp!("{VERSION}-debug") +} else { + formatcp!("{VERSION}-release") +}; + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn version() { + assert_eq!(VERSION, "0.0.1"); + } + + #[test] + fn version_build() { + if cfg!(debug_assertions) { + assert_eq!(VERSION_BUILD, "0.0.1-debug"); + } else { + assert_eq!(VERSION_BUILD, "0.0.1-release"); + } + } +} diff --git a/binaries/cuprated/src/main.rs b/binaries/cuprated/src/main.rs index 76eb85e..775843d 100644 --- a/binaries/cuprated/src/main.rs +++ b/binaries/cuprated/src/main.rs @@ -13,10 +13,16 @@ mod blockchain; mod config; +mod constants; mod p2p; mod rpc; +mod statics; mod txpool; fn main() { + // Initialize global static `LazyLock` data. + statics::init_lazylock_statics(); + + // TODO: everything else. todo!() } diff --git a/binaries/cuprated/src/statics.rs b/binaries/cuprated/src/statics.rs new file mode 100644 index 0000000..8aab1c9 --- /dev/null +++ b/binaries/cuprated/src/statics.rs @@ -0,0 +1,53 @@ +//! Global `static`s used throughout `cuprated`. + +use std::{ + sync::{atomic::AtomicU64, LazyLock}, + time::{SystemTime, UNIX_EPOCH}, +}; + +/// Define all the `static`s that should be always be initialized early on. +/// +/// This wraps all `static`s inside a `LazyLock` and generates +/// a [`init_lazylock_statics`] function that must/should be +/// used by `main()` early on. +macro_rules! define_init_lazylock_statics { + ($( + $( #[$attr:meta] )* + $name:ident: $t:ty = $init_fn:expr; + )*) => { + /// Initialize global static `LazyLock` data. + pub fn init_lazylock_statics() { + $( + LazyLock::force(&$name); + )* + } + + $( + $(#[$attr])* + pub static $name: LazyLock<$t> = LazyLock::new(|| $init_fn); + )* + }; +} + +define_init_lazylock_statics! { + /// The start time of `cuprated`. + START_INSTANT: SystemTime = SystemTime::now(); + + /// Start time of `cuprated` as a UNIX timestamp. + START_INSTANT_UNIX: u64 = START_INSTANT + .duration_since(UNIX_EPOCH) + .expect("Failed to set `cuprated` startup time.") + .as_secs(); +} + +#[cfg(test)] +mod test { + use super::*; + + /// Sanity check for startup UNIX time. + #[test] + fn start_instant_unix() { + // Fri Sep 27 01:07:13 AM UTC 2024 + assert!(*START_INSTANT_UNIX > 1727399233); + } +} From 6da9d2d734da8d7c3b2a03695e2dd286a4ec2321 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Mon, 30 Sep 2024 22:15:48 +0100 Subject: [PATCH 074/104] P2P: remove peer sync service (#299) * remove peer sync service * change `p2p` to not use the peer sync service * fmt & clippy * doc updates * review fixes * add a little more detail to comment --- Cargo.lock | 1 - p2p/p2p-core/src/client.rs | 14 +- p2p/p2p-core/src/client/connection.rs | 11 +- p2p/p2p-core/src/client/connector.rs | 19 +- p2p/p2p-core/src/client/handshaker.rs | 47 +- p2p/p2p-core/src/client/handshaker/builder.rs | 69 +-- .../src/client/handshaker/builder/dummy.rs | 22 - p2p/p2p-core/src/client/request_handler.rs | 24 +- p2p/p2p-core/src/client/timeout_monitor.rs | 27 +- p2p/p2p-core/src/lib.rs | 24 - p2p/p2p-core/src/services.rs | 22 - p2p/p2p/Cargo.toml | 1 - p2p/p2p/src/block_downloader.rs | 58 +-- p2p/p2p/src/block_downloader/request_chain.rs | 38 +- p2p/p2p/src/block_downloader/tests.rs | 44 +- p2p/p2p/src/client_pool.rs | 26 ++ p2p/p2p/src/constants.rs | 1 + p2p/p2p/src/lib.rs | 35 +- p2p/p2p/src/sync_states.rs | 420 ------------------ 19 files changed, 118 insertions(+), 785 deletions(-) delete mode 100644 p2p/p2p/src/sync_states.rs diff --git a/Cargo.lock b/Cargo.lock index fe9d1ed..8522255 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -773,7 +773,6 @@ dependencies = [ "cuprate-wire", "dashmap", "futures", - "hex", "indexmap", "monero-serai", "pin-project", diff --git a/p2p/p2p-core/src/client.rs b/p2p/p2p-core/src/client.rs index 8685189..73b33ba 100644 --- a/p2p/p2p-core/src/client.rs +++ b/p2p/p2p-core/src/client.rs @@ -1,6 +1,6 @@ use std::{ fmt::{Debug, Display, Formatter}, - sync::Arc, + sync::{Arc, Mutex}, task::{ready, Context, Poll}, }; @@ -15,6 +15,7 @@ use tracing::Instrument; use cuprate_helper::asynch::InfallibleOneshotReceiver; use cuprate_pruning::PruningSeed; +use cuprate_wire::CoreSyncData; use crate::{ handles::{ConnectionGuard, ConnectionHandle}, @@ -59,8 +60,17 @@ pub struct PeerInformation { pub handle: ConnectionHandle, /// The direction of this connection (inbound|outbound). pub direction: ConnectionDirection, - /// The peers pruning seed. + /// The peer's [`PruningSeed`]. pub pruning_seed: PruningSeed, + /// The [`CoreSyncData`] of this peer. + /// + /// Data across fields are not necessarily related, so [`CoreSyncData::top_id`] is not always the + /// block hash for the block at height one below [`CoreSyncData::current_height`]. + /// + /// This value is behind a [`Mutex`] and is updated whenever the peer sends new information related + /// to their sync state. It is publicly accessible to anyone who has a peers [`Client`] handle. You + /// probably should not mutate this value unless you are creating a custom [`ProtocolRequestHandler`](crate::ProtocolRequestHandler). + pub core_sync_data: Arc>, } /// This represents a connection to a peer. diff --git a/p2p/p2p-core/src/client/connection.rs b/p2p/p2p-core/src/client/connection.rs index f7b9be5..892fa64 100644 --- a/p2p/p2p-core/src/client/connection.rs +++ b/p2p/p2p-core/src/client/connection.rs @@ -22,7 +22,7 @@ use crate::{ constants::{REQUEST_TIMEOUT, SENDING_TIMEOUT}, handles::ConnectionGuard, AddressBook, BroadcastMessage, CoreSyncSvc, MessageID, NetworkZone, PeerError, PeerRequest, - PeerResponse, PeerSyncSvc, ProtocolRequestHandler, ProtocolResponse, SharedError, + PeerResponse, ProtocolRequestHandler, ProtocolResponse, SharedError, }; /// A request to the connection task from a [`Client`](crate::client::Client). @@ -71,7 +71,7 @@ const fn levin_command_response(message_id: MessageID, command: LevinCommand) -> } /// This represents a connection to a peer. -pub(crate) struct Connection { +pub(crate) struct Connection { /// The peer sink - where we send messages to the peer. peer_sink: Z::Sink, @@ -86,7 +86,7 @@ pub(crate) struct Connection { broadcast_stream: Pin>, /// The inner handler for any requests that come from the requested peer. - peer_request_handler: PeerRequestHandler, + peer_request_handler: PeerRequestHandler, /// The connection guard which will send signals to other parts of Cuprate when this connection is dropped. connection_guard: ConnectionGuard, @@ -94,12 +94,11 @@ pub(crate) struct Connection { error: SharedError, } -impl Connection +impl Connection where Z: NetworkZone, A: AddressBook, CS: CoreSyncSvc, - PS: PeerSyncSvc, PR: ProtocolRequestHandler, BrdcstStrm: Stream + Send + 'static, { @@ -108,7 +107,7 @@ where peer_sink: Z::Sink, client_rx: mpsc::Receiver, broadcast_stream: BrdcstStrm, - peer_request_handler: PeerRequestHandler, + peer_request_handler: PeerRequestHandler, connection_guard: ConnectionGuard, error: SharedError, ) -> Self { diff --git a/p2p/p2p-core/src/client/connector.rs b/p2p/p2p-core/src/client/connector.rs index 553f5a4..b378075 100644 --- a/p2p/p2p-core/src/client/connector.rs +++ b/p2p/p2p-core/src/client/connector.rs @@ -16,7 +16,7 @@ use tower::{Service, ServiceExt}; use crate::{ client::{handshaker::HandShaker, Client, DoHandshakeRequest, HandshakeError, InternalPeerID}, - AddressBook, BroadcastMessage, ConnectionDirection, CoreSyncSvc, NetworkZone, PeerSyncSvc, + AddressBook, BroadcastMessage, ConnectionDirection, CoreSyncSvc, NetworkZone, ProtocolRequestHandler, }; @@ -32,27 +32,24 @@ pub struct ConnectRequest { } /// The connector service, this service connects to peer and returns the [`Client`]. -pub struct Connector { - handshaker: HandShaker, +pub struct Connector { + handshaker: HandShaker, } -impl - Connector +impl + Connector { /// Create a new connector from a handshaker. - pub const fn new( - handshaker: HandShaker, - ) -> Self { + pub const fn new(handshaker: HandShaker) -> Self { Self { handshaker } } } -impl - Service> for Connector +impl + Service> for Connector where AdrBook: AddressBook + Clone, CSync: CoreSyncSvc + Clone, - PSync: PeerSyncSvc + Clone, ProtoHdlr: ProtocolRequestHandler + Clone, BrdcstStrm: Stream + Send + 'static, BrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Clone + Send + 'static, diff --git a/p2p/p2p-core/src/client/handshaker.rs b/p2p/p2p-core/src/client/handshaker.rs index d6873a8..bf5165e 100644 --- a/p2p/p2p-core/src/client/handshaker.rs +++ b/p2p/p2p-core/src/client/handshaker.rs @@ -8,7 +8,7 @@ use std::{ future::Future, marker::PhantomData, pin::Pin, - sync::Arc, + sync::{Arc, Mutex}, task::{Context, Poll}, }; @@ -40,10 +40,9 @@ use crate::{ PING_TIMEOUT, }, handles::HandleBuilder, - services::PeerSyncRequest, AddressBook, AddressBookRequest, AddressBookResponse, BroadcastMessage, ConnectionDirection, CoreSyncDataRequest, CoreSyncDataResponse, CoreSyncSvc, NetZoneAddress, NetworkZone, - PeerSyncSvc, ProtocolRequestHandler, SharedError, + ProtocolRequestHandler, SharedError, }; pub mod builder; @@ -87,13 +86,11 @@ pub struct DoHandshakeRequest { /// The peer handshaking service. #[derive(Debug, Clone)] -pub struct HandShaker { +pub struct HandShaker { /// The address book service. address_book: AdrBook, /// The core sync data service. core_sync_svc: CSync, - /// The peer sync service. - peer_sync_svc: PSync, /// The protocol request handler service. protocol_request_svc: ProtoHdlr, @@ -109,13 +106,12 @@ pub struct HandShaker, } -impl - HandShaker +impl + HandShaker { /// Creates a new handshaker. const fn new( address_book: AdrBook, - peer_sync_svc: PSync, core_sync_svc: CSync, protocol_request_svc: ProtoHdlr, broadcast_stream_maker: BrdcstStrmMkr, @@ -124,7 +120,6 @@ impl ) -> Self { Self { address_book, - peer_sync_svc, core_sync_svc, protocol_request_svc, broadcast_stream_maker, @@ -135,13 +130,11 @@ impl } } -impl - Service> - for HandShaker +impl + Service> for HandShaker where AdrBook: AddressBook + Clone, CSync: CoreSyncSvc + Clone, - PSync: PeerSyncSvc + Clone, ProtoHdlr: ProtocolRequestHandler + Clone, BrdcstStrm: Stream + Send + 'static, BrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Clone + Send + 'static, @@ -161,7 +154,6 @@ where let address_book = self.address_book.clone(); let protocol_request_svc = self.protocol_request_svc.clone(); let core_sync_svc = self.core_sync_svc.clone(); - let peer_sync_svc = self.peer_sync_svc.clone(); let our_basic_node_data = self.our_basic_node_data.clone(); let connection_parent_span = self.connection_parent_span.clone(); @@ -176,7 +168,6 @@ where broadcast_stream_maker, address_book, core_sync_svc, - peer_sync_svc, protocol_request_svc, our_basic_node_data, connection_parent_span, @@ -231,15 +222,13 @@ pub async fn ping(addr: N::Addr) -> Result } /// This function completes a handshake with the requested peer. -#[expect(clippy::too_many_arguments)] -async fn handshake( +async fn handshake( req: DoHandshakeRequest, broadcast_stream_maker: BrdcstStrmMkr, mut address_book: AdrBook, mut core_sync_svc: CSync, - mut peer_sync_svc: PSync, protocol_request_handler: ProtoHdlr, our_basic_node_data: BasicNodeData, connection_parent_span: Span, @@ -247,7 +236,6 @@ async fn handshake + Clone, CSync: CoreSyncSvc + Clone, - PSync: PeerSyncSvc + Clone, ProtoHdlr: ProtocolRequestHandler, BrdcstStrm: Stream + Send + 'static, BrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Send + 'static, @@ -458,17 +446,6 @@ where }) .await?; - // Tell the core sync service about the new peer. - peer_sync_svc - .ready() - .await? - .call(PeerSyncRequest::IncomingCoreSyncData( - addr, - handle.clone(), - peer_core_sync, - )) - .await?; - // Set up the connection data. let error_slot = SharedError::new(); let (connection_tx, client_rx) = mpsc::channel(1); @@ -478,18 +455,18 @@ where handle, direction, pruning_seed, + core_sync_data: Arc::new(Mutex::new(peer_core_sync)), }; let request_handler = PeerRequestHandler { address_book_svc: address_book.clone(), our_sync_svc: core_sync_svc.clone(), - peer_sync_svc: peer_sync_svc.clone(), protocol_request_handler, our_basic_node_data, peer_info: info.clone(), }; - let connection = Connection::::new( + let connection = Connection::::new( peer_sink, client_rx, broadcast_stream_maker(addr), @@ -509,13 +486,11 @@ where let semaphore = Arc::new(Semaphore::new(1)); let timeout_handle = tokio::spawn(connection_timeout_monitor_task( - info.id, - info.handle.clone(), + info.clone(), connection_tx.clone(), Arc::clone(&semaphore), address_book, core_sync_svc, - peer_sync_svc, )); let client = Client::::new( diff --git a/p2p/p2p-core/src/client/handshaker/builder.rs b/p2p/p2p-core/src/client/handshaker/builder.rs index 069811d..c7109ed 100644 --- a/p2p/p2p-core/src/client/handshaker/builder.rs +++ b/p2p/p2p-core/src/client/handshaker/builder.rs @@ -7,13 +7,11 @@ use cuprate_wire::BasicNodeData; use crate::{ client::{handshaker::HandShaker, InternalPeerID}, - AddressBook, BroadcastMessage, CoreSyncSvc, NetworkZone, PeerSyncSvc, ProtocolRequestHandler, + AddressBook, BroadcastMessage, CoreSyncSvc, NetworkZone, ProtocolRequestHandler, }; mod dummy; -pub use dummy::{ - DummyAddressBook, DummyCoreSyncSvc, DummyPeerSyncSvc, DummyProtocolRequestHandler, -}; +pub use dummy::{DummyAddressBook, DummyCoreSyncSvc, DummyProtocolRequestHandler}; /// A [`HandShaker`] [`Service`](tower::Service) builder. /// @@ -28,7 +26,6 @@ pub struct HandshakerBuilder< N: NetworkZone, AdrBook = DummyAddressBook, CSync = DummyCoreSyncSvc, - PSync = DummyPeerSyncSvc, ProtoHdlr = DummyProtocolRequestHandler, BrdcstStrmMkr = fn( InternalPeerID<::Addr>, @@ -38,8 +35,6 @@ pub struct HandshakerBuilder< address_book: AdrBook, /// The core sync data service. core_sync_svc: CSync, - /// The peer sync service. - peer_sync_svc: PSync, /// The protocol request service. protocol_request_svc: ProtoHdlr, /// Our [`BasicNodeData`] @@ -59,7 +54,6 @@ impl HandshakerBuilder { Self { address_book: DummyAddressBook, core_sync_svc: DummyCoreSyncSvc::static_mainnet_genesis(), - peer_sync_svc: DummyPeerSyncSvc, protocol_request_svc: DummyProtocolRequestHandler, our_basic_node_data, broadcast_stream_maker: |_| stream::pending(), @@ -69,8 +63,8 @@ impl HandshakerBuilder { } } -impl - HandshakerBuilder +impl + HandshakerBuilder { /// Changes the address book to the provided one. /// @@ -83,13 +77,12 @@ impl pub fn with_address_book( self, new_address_book: NAdrBook, - ) -> HandshakerBuilder + ) -> HandshakerBuilder where NAdrBook: AddressBook + Clone, { let Self { core_sync_svc, - peer_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker, @@ -100,7 +93,6 @@ impl HandshakerBuilder { address_book: new_address_book, core_sync_svc, - peer_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker, @@ -125,13 +117,12 @@ impl pub fn with_core_sync_svc( self, new_core_sync_svc: NCSync, - ) -> HandshakerBuilder + ) -> HandshakerBuilder where NCSync: CoreSyncSvc + Clone, { let Self { address_book, - peer_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker, @@ -142,43 +133,6 @@ impl HandshakerBuilder { address_book, core_sync_svc: new_core_sync_svc, - peer_sync_svc, - protocol_request_svc, - our_basic_node_data, - broadcast_stream_maker, - connection_parent_span, - _zone: PhantomData, - } - } - - /// Changes the peer sync service, which keeps track of peers sync states. - /// - /// ## Default Peer Sync Service - /// - /// The default peer sync service will be used if this method is not called. - /// - /// The default peer sync service will not keep track of peers sync states. - pub fn with_peer_sync_svc( - self, - new_peer_sync_svc: NPSync, - ) -> HandshakerBuilder - where - NPSync: PeerSyncSvc + Clone, - { - let Self { - address_book, - core_sync_svc, - protocol_request_svc, - our_basic_node_data, - broadcast_stream_maker, - connection_parent_span, - .. - } = self; - - HandshakerBuilder { - address_book, - core_sync_svc, - peer_sync_svc: new_peer_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker, @@ -197,14 +151,13 @@ impl pub fn with_protocol_request_handler( self, new_protocol_handler: NProtoHdlr, - ) -> HandshakerBuilder + ) -> HandshakerBuilder where NProtoHdlr: ProtocolRequestHandler + Clone, { let Self { address_book, core_sync_svc, - peer_sync_svc, our_basic_node_data, broadcast_stream_maker, connection_parent_span, @@ -214,7 +167,6 @@ impl HandshakerBuilder { address_book, core_sync_svc, - peer_sync_svc, protocol_request_svc: new_protocol_handler, our_basic_node_data, broadcast_stream_maker, @@ -233,7 +185,7 @@ impl pub fn with_broadcast_stream_maker( self, new_broadcast_stream_maker: NBrdcstStrmMkr, - ) -> HandshakerBuilder + ) -> HandshakerBuilder where BrdcstStrm: Stream + Send + 'static, NBrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Clone + Send + 'static, @@ -241,7 +193,6 @@ impl let Self { address_book, core_sync_svc, - peer_sync_svc, protocol_request_svc, our_basic_node_data, connection_parent_span, @@ -251,7 +202,6 @@ impl HandshakerBuilder { address_book, core_sync_svc, - peer_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker: new_broadcast_stream_maker, @@ -274,10 +224,9 @@ impl } /// Builds the [`HandShaker`]. - pub fn build(self) -> HandShaker { + pub fn build(self) -> HandShaker { HandShaker::new( self.address_book, - self.peer_sync_svc, self.core_sync_svc, self.protocol_request_svc, self.broadcast_stream_maker, diff --git a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs index e3c4335..1dcc2be 100644 --- a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs +++ b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs @@ -10,32 +10,10 @@ use cuprate_wire::CoreSyncData; use crate::{ services::{ AddressBookRequest, AddressBookResponse, CoreSyncDataRequest, CoreSyncDataResponse, - PeerSyncRequest, PeerSyncResponse, }, NetworkZone, ProtocolRequest, ProtocolResponse, }; -/// A dummy peer sync service, that doesn't actually keep track of peers sync states. -#[derive(Debug, Clone)] -pub struct DummyPeerSyncSvc; - -impl Service> for DummyPeerSyncSvc { - type Response = PeerSyncResponse; - type Error = tower::BoxError; - type Future = Ready>; - - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: PeerSyncRequest) -> Self::Future { - ready(Ok(match req { - PeerSyncRequest::PeersToSyncFrom { .. } => PeerSyncResponse::PeersToSyncFrom(vec![]), - PeerSyncRequest::IncomingCoreSyncData(_, _, _) => PeerSyncResponse::Ok, - })) - } -} - /// A dummy core sync service that just returns static [`CoreSyncData`]. #[derive(Debug, Clone)] pub struct DummyCoreSyncSvc(CoreSyncData); diff --git a/p2p/p2p-core/src/client/request_handler.rs b/p2p/p2p-core/src/client/request_handler.rs index 7059eed..c2f3b8e 100644 --- a/p2p/p2p-core/src/client/request_handler.rs +++ b/p2p/p2p-core/src/client/request_handler.rs @@ -14,10 +14,8 @@ use crate::{ constants::MAX_PEERS_IN_PEER_LIST_MESSAGE, services::{ AddressBookRequest, AddressBookResponse, CoreSyncDataRequest, CoreSyncDataResponse, - PeerSyncRequest, }, - AddressBook, CoreSyncSvc, NetworkZone, PeerRequest, PeerResponse, PeerSyncSvc, - ProtocolRequestHandler, + AddressBook, CoreSyncSvc, NetworkZone, PeerRequest, PeerResponse, ProtocolRequestHandler, }; #[derive(thiserror::Error, Debug, Copy, Clone, Eq, PartialEq)] @@ -28,13 +26,11 @@ enum PeerRequestHandlerError { /// The peer request handler, handles incoming [`PeerRequest`]s to our node. #[derive(Debug, Clone)] -pub(crate) struct PeerRequestHandler { +pub(crate) struct PeerRequestHandler { /// The address book service. pub address_book_svc: A, /// Our core sync service. pub our_sync_svc: CS, - /// The peer sync service. - pub peer_sync_svc: PS, /// The handler for [`ProtocolRequest`](crate::ProtocolRequest)s to our node. pub protocol_request_handler: PR, @@ -46,12 +42,11 @@ pub(crate) struct PeerRequestHandler { pub peer_info: PeerInformation, } -impl PeerRequestHandler +impl PeerRequestHandler where Z: NetworkZone, A: AddressBook, CS: CoreSyncSvc, - PS: PeerSyncSvc, PR: ProtocolRequestHandler, { /// Handles an incoming [`PeerRequest`] to our node. @@ -104,18 +99,7 @@ where ) -> Result { // TODO: add a limit on the amount of these requests in a certain time period. - let peer_id = self.peer_info.id; - let handle = self.peer_info.handle.clone(); - - self.peer_sync_svc - .ready() - .await? - .call(PeerSyncRequest::IncomingCoreSyncData( - peer_id, - handle, - req.payload_data, - )) - .await?; + *self.peer_info.core_sync_data.lock().unwrap() = req.payload_data; let AddressBookResponse::Peers(peers) = self .address_book_svc diff --git a/p2p/p2p-core/src/client/timeout_monitor.rs b/p2p/p2p-core/src/client/timeout_monitor.rs index b736966..d9703d6 100644 --- a/p2p/p2p-core/src/client/timeout_monitor.rs +++ b/p2p/p2p-core/src/client/timeout_monitor.rs @@ -15,35 +15,31 @@ use tracing::instrument; use cuprate_wire::{admin::TimedSyncRequest, AdminRequestMessage, AdminResponseMessage}; use crate::{ - client::{connection::ConnectionTaskRequest, InternalPeerID}, + client::{connection::ConnectionTaskRequest, PeerInformation}, constants::{MAX_PEERS_IN_PEER_LIST_MESSAGE, TIMEOUT_INTERVAL}, - handles::ConnectionHandle, - services::{AddressBookRequest, CoreSyncDataRequest, CoreSyncDataResponse, PeerSyncRequest}, - AddressBook, CoreSyncSvc, NetworkZone, PeerRequest, PeerResponse, PeerSyncSvc, + services::{AddressBookRequest, CoreSyncDataRequest, CoreSyncDataResponse}, + AddressBook, CoreSyncSvc, NetworkZone, PeerRequest, PeerResponse, }; /// The timeout monitor task, this task will send periodic timed sync requests to the peer to make sure it is still active. #[instrument( name = "timeout_monitor", level = "debug", - fields(addr = %id), + fields(addr = %peer_information.id), skip_all, )] -pub async fn connection_timeout_monitor_task( - id: InternalPeerID, - handle: ConnectionHandle, +pub async fn connection_timeout_monitor_task( + peer_information: PeerInformation, connection_tx: mpsc::Sender, semaphore: Arc, mut address_book_svc: AdrBook, mut core_sync_svc: CSync, - mut peer_core_sync_svc: PSync, ) -> Result<(), tower::BoxError> where AdrBook: AddressBook, CSync: CoreSyncSvc, - PSync: PeerSyncSvc, { let connection_tx_weak = connection_tx.downgrade(); drop(connection_tx); @@ -125,15 +121,6 @@ where )) .await?; - // Tell the peer sync service about the peers core sync data - peer_core_sync_svc - .ready() - .await? - .call(PeerSyncRequest::IncomingCoreSyncData( - id, - handle.clone(), - timed_sync.payload_data, - )) - .await?; + *peer_information.core_sync_data.lock().unwrap() = timed_sync.payload_data; } } diff --git a/p2p/p2p-core/src/lib.rs b/p2p/p2p-core/src/lib.rs index 04e8676..ca83f8f 100644 --- a/p2p/p2p-core/src/lib.rs +++ b/p2p/p2p-core/src/lib.rs @@ -192,30 +192,6 @@ pub trait NetworkZone: Clone + Copy + Send + 'static { // Below here is just helper traits, so we don't have to type out tower::Service bounds // everywhere but still get to use tower. -pub trait PeerSyncSvc: - tower::Service< - PeerSyncRequest, - Response = PeerSyncResponse, - Error = tower::BoxError, - Future = Self::Future2, - > + Send - + 'static -{ - // This allows us to put more restrictive bounds on the future without defining the future here - // explicitly. - type Future2: Future> + Send + 'static; -} - -impl PeerSyncSvc for T -where - T: tower::Service, Response = PeerSyncResponse, Error = tower::BoxError> - + Send - + 'static, - T::Future: Future> + Send + 'static, -{ - type Future2 = T::Future; -} - pub trait AddressBook: tower::Service< AddressBookRequest, diff --git a/p2p/p2p-core/src/services.rs b/p2p/p2p-core/src/services.rs index ba87684..b858f33 100644 --- a/p2p/p2p-core/src/services.rs +++ b/p2p/p2p-core/src/services.rs @@ -6,28 +6,6 @@ use crate::{ NetworkZone, }; -/// A request to the service that keeps track of peers sync states. -pub enum PeerSyncRequest { - /// Request some peers to sync from. - /// - /// This takes in the current cumulative difficulty of our chain and will return peers that - /// claim to have a higher cumulative difficulty. - PeersToSyncFrom { - current_cumulative_difficulty: u128, - block_needed: Option, - }, - /// Add/update a peer's core sync data. - IncomingCoreSyncData(InternalPeerID, ConnectionHandle, CoreSyncData), -} - -/// A response from the service that keeps track of peers sync states. -pub enum PeerSyncResponse { - /// The return value of [`PeerSyncRequest::PeersToSyncFrom`]. - PeersToSyncFrom(Vec>), - /// A generic ok response. - Ok, -} - /// A request to the core sync service for our node's [`CoreSyncData`]. pub struct CoreSyncDataRequest; diff --git a/p2p/p2p/Cargo.toml b/p2p/p2p/Cargo.toml index ef85277..b53baaa 100644 --- a/p2p/p2p/Cargo.toml +++ b/p2p/p2p/Cargo.toml @@ -30,7 +30,6 @@ thiserror = { workspace = true } bytes = { workspace = true, features = ["std"] } rand = { workspace = true, features = ["std", "std_rng"] } rand_distr = { workspace = true, features = ["std"] } -hex = { workspace = true, features = ["std"] } tracing = { workspace = true, features = ["std", "attributes"] } borsh = { workspace = true, features = ["derive", "std"] } diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index 39980a0..eccb385 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -22,11 +22,7 @@ use tower::{Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; use cuprate_async_buffer::{BufferAppender, BufferStream}; -use cuprate_p2p_core::{ - handles::ConnectionHandle, - services::{PeerSyncRequest, PeerSyncResponse}, - NetworkZone, PeerSyncSvc, -}; +use cuprate_p2p_core::{handles::ConnectionHandle, NetworkZone}; use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; use crate::{ @@ -137,14 +133,12 @@ pub enum ChainSvcResponse { /// The block downloader may fail before the whole chain is downloaded. If this is the case you can /// call this function again, so it can start the search again. #[instrument(level = "error", skip_all, name = "block_downloader")] -pub fn download_blocks( +pub fn download_blocks( client_pool: Arc>, - peer_sync_svc: S, our_chain_svc: C, config: BlockDownloaderConfig, ) -> BufferStream where - S: PeerSyncSvc + Clone, C: Service + Send + 'static, @@ -152,13 +146,8 @@ where { let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size); - let block_downloader = BlockDownloader::new( - client_pool, - peer_sync_svc, - our_chain_svc, - buffer_appender, - config, - ); + let block_downloader = + BlockDownloader::new(client_pool, our_chain_svc, buffer_appender, config); tokio::spawn( block_downloader @@ -195,12 +184,10 @@ where /// - request the next chain entry /// - download an already requested batch of blocks (this might happen due to an error in the previous request /// or because the queue of ready blocks is too large, so we need the oldest block to clear it). -struct BlockDownloader { +struct BlockDownloader { /// The client pool. client_pool: Arc>, - /// The service that holds the peer's sync states. - peer_sync_svc: S, /// The service that holds our current chain state. our_chain_svc: C, @@ -238,9 +225,8 @@ struct BlockDownloader { config: BlockDownloaderConfig, } -impl BlockDownloader +impl BlockDownloader where - S: PeerSyncSvc + Clone, C: Service + Send + 'static, @@ -249,16 +235,12 @@ where /// Creates a new [`BlockDownloader`] fn new( client_pool: Arc>, - - peer_sync_svc: S, our_chain_svc: C, buffer_appender: BufferAppender, - config: BlockDownloaderConfig, ) -> Self { Self { client_pool, - peer_sync_svc, our_chain_svc, amount_of_blocks_to_request: config.initial_batch_size, amount_of_blocks_to_request_updated_at: 0, @@ -495,22 +477,10 @@ where panic!("Chain service returned wrong response."); }; - let PeerSyncResponse::PeersToSyncFrom(peers) = self - .peer_sync_svc - .ready() - .await? - .call(PeerSyncRequest::PeersToSyncFrom { - current_cumulative_difficulty, - block_needed: None, - }) - .await? - else { - panic!("Peer sync service returned wrong response."); - }; - - tracing::debug!("Response received from peer sync service"); - - for client in self.client_pool.borrow_clients(&peers) { + for client in self + .client_pool + .clients_with_more_cumulative_difficulty(current_cumulative_difficulty) + { pending_peers .entry(client.info.pruning_seed) .or_default() @@ -621,12 +591,8 @@ where /// Starts the main loop of the block downloader. async fn run(mut self) -> Result<(), BlockDownloadError> { - let mut chain_tracker = initial_chain_search( - &self.client_pool, - self.peer_sync_svc.clone(), - &mut self.our_chain_svc, - ) - .await?; + let mut chain_tracker = + initial_chain_search(&self.client_pool, &mut self.our_chain_svc).await?; let mut pending_peers = BTreeMap::new(); diff --git a/p2p/p2p/src/block_downloader/request_chain.rs b/p2p/p2p/src/block_downloader/request_chain.rs index bde40ce..d6a2a0a 100644 --- a/p2p/p2p/src/block_downloader/request_chain.rs +++ b/p2p/p2p/src/block_downloader/request_chain.rs @@ -1,16 +1,12 @@ use std::{mem, sync::Arc}; -use rand::prelude::SliceRandom; -use rand::thread_rng; use tokio::{task::JoinSet, time::timeout}; use tower::{Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; use cuprate_p2p_core::{ - client::InternalPeerID, - handles::ConnectionHandle, - services::{PeerSyncRequest, PeerSyncResponse}, - NetworkZone, PeerRequest, PeerResponse, PeerSyncSvc, ProtocolRequest, ProtocolResponse, + client::InternalPeerID, handles::ConnectionHandle, NetworkZone, PeerRequest, PeerResponse, + ProtocolRequest, ProtocolResponse, }; use cuprate_wire::protocol::{ChainRequest, ChainResponse}; @@ -83,13 +79,11 @@ pub(crate) async fn request_chain_entry_from_peer( /// /// We then wait for their response and choose the peer who claims the highest cumulative difficulty. #[instrument(level = "error", skip_all)] -pub async fn initial_chain_search( +pub async fn initial_chain_search( client_pool: &Arc>, - mut peer_sync_svc: S, mut our_chain_svc: C, ) -> Result, BlockDownloadError> where - S: PeerSyncSvc, C: Service, { tracing::debug!("Getting our chain history"); @@ -108,29 +102,9 @@ where let our_genesis = *block_ids.last().expect("Blockchain had no genesis block."); - tracing::debug!("Getting a list of peers with higher cumulative difficulty"); - - let PeerSyncResponse::PeersToSyncFrom(mut peers) = peer_sync_svc - .ready() - .await? - .call(PeerSyncRequest::PeersToSyncFrom { - block_needed: None, - current_cumulative_difficulty: cumulative_difficulty, - }) - .await? - else { - panic!("peer sync service sent wrong response."); - }; - - tracing::debug!( - "{} peers claim they have a higher cumulative difficulty", - peers.len() - ); - - // Shuffle the list to remove any possibility of peers being able to prioritize getting picked. - peers.shuffle(&mut thread_rng()); - - let mut peers = client_pool.borrow_clients(&peers); + let mut peers = client_pool + .clients_with_more_cumulative_difficulty(cumulative_difficulty) + .into_iter(); let mut futs = JoinSet::new(); diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index a5c5e92..83dd417 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -2,7 +2,7 @@ use std::{ fmt::{Debug, Formatter}, future::Future, pin::Pin, - sync::Arc, + sync::{Arc, Mutex}, task::{Context, Poll}, time::Duration, }; @@ -20,13 +20,14 @@ use tower::{service_fn, Service}; use cuprate_fixed_bytes::ByteArrayVec; use cuprate_p2p_core::{ client::{mock_client, Client, InternalPeerID, PeerInformation}, - services::{PeerSyncRequest, PeerSyncResponse}, - ClearNet, ConnectionDirection, NetworkZone, PeerRequest, PeerResponse, ProtocolRequest, - ProtocolResponse, + ClearNet, ConnectionDirection, PeerRequest, PeerResponse, ProtocolRequest, ProtocolResponse, }; use cuprate_pruning::PruningSeed; use cuprate_types::{BlockCompleteEntry, TransactionBlobs}; -use cuprate_wire::protocol::{ChainResponse, GetObjectsResponse}; +use cuprate_wire::{ + protocol::{ChainResponse, GetObjectsResponse}, + CoreSyncData, +}; use crate::{ block_downloader::{download_blocks, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}, @@ -52,19 +53,14 @@ proptest! { timeout(Duration::from_secs(600), async move { let client_pool = ClientPool::new(); - let mut peer_ids = Vec::with_capacity(peers); - for _ in 0..peers { let client = mock_block_downloader_client(Arc::clone(&blockchain)); - peer_ids.push(client.info.id); - client_pool.add_new_client(client); } let stream = download_blocks( client_pool, - SyncStateSvc(peer_ids) , OurChainSvc { genesis: *blockchain.blocks.first().unwrap().0 }, @@ -255,31 +251,19 @@ fn mock_block_downloader_client(blockchain: Arc) -> Client(Vec>); - -impl Service> for SyncStateSvc { - type Response = PeerSyncResponse; - type Error = tower::BoxError; - type Future = - Pin> + Send + 'static>>; - - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, _: PeerSyncRequest) -> Self::Future { - let peers = self.0.clone(); - - async move { Ok(PeerSyncResponse::PeersToSyncFrom(peers)) }.boxed() - } -} - struct OurChainSvc { genesis: [u8; 32], } diff --git a/p2p/p2p/src/client_pool.rs b/p2p/p2p/src/client_pool.rs index 3405224..77d3b6e 100644 --- a/p2p/p2p/src/client_pool.rs +++ b/p2p/p2p/src/client_pool.rs @@ -127,6 +127,32 @@ impl ClientPool { ) -> impl Iterator> + sealed::Captures<(&'a (), &'b ())> { peers.iter().filter_map(|peer| self.borrow_client(peer)) } + + /// Borrows all [`Client`]s from the pool that have claimed a higher cumulative difficulty than + /// the amount passed in. + /// + /// The [`Client`]s are wrapped in [`ClientPoolDropGuard`] which + /// will return the clients to the pool when they are dropped. + pub fn clients_with_more_cumulative_difficulty( + self: &Arc, + cumulative_difficulty: u128, + ) -> Vec> { + let peers = self + .clients + .iter() + .filter_map(|element| { + let peer_sync_info = element.value().info.core_sync_data.lock().unwrap(); + + if peer_sync_info.cumulative_difficulty() > cumulative_difficulty { + Some(*element.key()) + } else { + None + } + }) + .collect::>(); + + self.borrow_clients(&peers).collect() + } } mod sealed { diff --git a/p2p/p2p/src/constants.rs b/p2p/p2p/src/constants.rs index 4c08eb8..f234960 100644 --- a/p2p/p2p/src/constants.rs +++ b/p2p/p2p/src/constants.rs @@ -16,6 +16,7 @@ pub(crate) const MAX_SEED_CONNECTIONS: usize = 3; pub(crate) const OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT: Duration = Duration::from_secs(5); /// The durations of a short ban. +#[cfg_attr(not(test), expect(dead_code))] pub(crate) const SHORT_BAN: Duration = Duration::from_secs(60 * 10); /// The durations of a medium ban. diff --git a/p2p/p2p/src/lib.rs b/p2p/p2p/src/lib.rs index 2f51c6c..4a35ace 100644 --- a/p2p/p2p/src/lib.rs +++ b/p2p/p2p/src/lib.rs @@ -5,11 +5,7 @@ use std::sync::Arc; use futures::FutureExt; -use tokio::{ - sync::{mpsc, watch}, - task::JoinSet, -}; -use tokio_stream::wrappers::WatchStream; +use tokio::{sync::mpsc, task::JoinSet}; use tower::{buffer::Buffer, util::BoxCloneService, Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; @@ -17,7 +13,7 @@ use cuprate_async_buffer::BufferStream; use cuprate_p2p_core::{ client::Connector, client::InternalPeerID, - services::{AddressBookRequest, AddressBookResponse, PeerSyncRequest}, + services::{AddressBookRequest, AddressBookResponse}, CoreSyncSvc, NetworkZone, ProtocolRequestHandler, }; @@ -28,7 +24,6 @@ pub mod config; pub mod connection_maintainer; mod constants; mod inbound_server; -mod sync_states; use block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}; pub use broadcast::{BroadcastRequest, BroadcastSvc}; @@ -63,12 +58,6 @@ where config.max_inbound_connections + config.outbound_connections, ); - let (sync_states_svc, top_block_watch) = sync_states::PeerSyncSvc::new(); - let sync_states_svc = Buffer::new( - sync_states_svc, - config.max_inbound_connections + config.outbound_connections, - ); - // Use the default config. Changing the defaults affects tx fluff times, which could affect D++ so for now don't allow changing // this. let (broadcast_svc, outbound_mkr, inbound_mkr) = @@ -83,7 +72,6 @@ where let outbound_handshaker_builder = cuprate_p2p_core::client::HandshakerBuilder::new(basic_node_data) .with_address_book(address_book.clone()) - .with_peer_sync_svc(sync_states_svc.clone()) .with_core_sync_svc(core_sync_svc) .with_protocol_request_handler(protocol_request_handler) .with_broadcast_stream_maker(outbound_mkr) @@ -136,9 +124,7 @@ where Ok(NetworkInterface { pool: client_pool, broadcast_svc, - top_block_watch, make_connection_tx, - sync_states_svc, address_book: address_book.boxed_clone(), _background_tasks: Arc::new(background_tasks), }) @@ -151,16 +137,11 @@ pub struct NetworkInterface { pool: Arc>, /// A [`Service`] that allows broadcasting to all connected peers. broadcast_svc: BroadcastSvc, - /// A [`watch`] channel that contains the highest seen cumulative difficulty and other info - /// on that claimed chain. - top_block_watch: watch::Receiver, /// A channel to request extra connections. #[expect(dead_code, reason = "will be used eventually")] make_connection_tx: mpsc::Sender, /// The address book service. address_book: BoxCloneService, AddressBookResponse, tower::BoxError>, - /// The peer's sync states service. - sync_states_svc: Buffer, PeerSyncRequest>, /// Background tasks that will be aborted when this interface is dropped. _background_tasks: Arc>, } @@ -183,17 +164,7 @@ impl NetworkInterface { + 'static, C::Future: Send + 'static, { - block_downloader::download_blocks( - Arc::clone(&self.pool), - self.sync_states_svc.clone(), - our_chain_service, - config, - ) - } - - /// Returns a stream which yields the highest seen sync state from a connected peer. - pub fn top_sync_stream(&self) -> WatchStream { - WatchStream::from_changes(self.top_block_watch.clone()) + block_downloader::download_blocks(Arc::clone(&self.pool), our_chain_service, config) } /// Returns the address book service. diff --git a/p2p/p2p/src/sync_states.rs b/p2p/p2p/src/sync_states.rs deleted file mode 100644 index 0c03795..0000000 --- a/p2p/p2p/src/sync_states.rs +++ /dev/null @@ -1,420 +0,0 @@ -//! # Sync States -//! -//! This module contains a [`PeerSyncSvc`], which keeps track of the claimed chain states of connected peers. -//! This allows checking if we are behind and getting a list of peers who claim they are ahead. -use std::{ - cmp::Ordering, - collections::{BTreeMap, HashMap, HashSet}, - future::{ready, Ready}, - task::{Context, Poll}, -}; - -use futures::{stream::FuturesUnordered, StreamExt}; -use tokio::sync::watch; -use tower::Service; - -use cuprate_p2p_core::{ - client::InternalPeerID, - handles::ConnectionHandle, - services::{PeerSyncRequest, PeerSyncResponse}, - NetworkZone, -}; -use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; -use cuprate_wire::CoreSyncData; - -use crate::{client_pool::disconnect_monitor::PeerDisconnectFut, constants::SHORT_BAN}; - -/// The highest claimed sync info from our connected peers. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct NewSyncInfo { - /// The peers chain height. - pub chain_height: u64, - /// The peers top block's hash. - pub top_hash: [u8; 32], - /// The peers cumulative difficulty. - pub cumulative_difficulty: u128, -} - -/// A service that keeps track of our peers blockchains. -/// -/// This is the service that handles: -/// 1. Finding out if we need to sync -/// 1. Giving the peers that should be synced _from_, to the requester -pub(crate) struct PeerSyncSvc { - /// A map of cumulative difficulties to peers. - cumulative_difficulties: BTreeMap>>, - /// A map of peers to cumulative difficulties. - peers: HashMap, (u128, PruningSeed)>, - /// A watch channel for *a* top synced peer info. - new_height_watcher: watch::Sender, - /// The handle to the peer that has data in `new_height_watcher`. - last_peer_in_watcher_handle: Option, - /// A [`FuturesUnordered`] that resolves when a peer disconnects. - closed_connections: FuturesUnordered>, -} - -impl PeerSyncSvc { - /// Creates a new [`PeerSyncSvc`] with a [`Receiver`](watch::Receiver) that will be updated with - /// the highest seen sync data, this makes no guarantees about which peer will be chosen in case of a tie. - pub(crate) fn new() -> (Self, watch::Receiver) { - let (watch_tx, mut watch_rx) = watch::channel(NewSyncInfo { - chain_height: 0, - top_hash: [0; 32], - cumulative_difficulty: 0, - }); - - watch_rx.mark_unchanged(); - - ( - Self { - cumulative_difficulties: BTreeMap::new(), - peers: HashMap::new(), - new_height_watcher: watch_tx, - last_peer_in_watcher_handle: None, - closed_connections: FuturesUnordered::new(), - }, - watch_rx, - ) - } - - /// This function checks if any peers have disconnected, removing them if they have. - fn poll_disconnected(&mut self, cx: &mut Context<'_>) { - while let Poll::Ready(Some(peer_id)) = self.closed_connections.poll_next_unpin(cx) { - tracing::trace!("Peer {peer_id} disconnected, removing from peers sync info service."); - let (peer_cum_diff, _) = self.peers.remove(&peer_id).unwrap(); - - let cum_diff_peers = self - .cumulative_difficulties - .get_mut(&peer_cum_diff) - .unwrap(); - cum_diff_peers.remove(&peer_id); - if cum_diff_peers.is_empty() { - // If this was the last peer remove the whole entry for this cumulative difficulty. - self.cumulative_difficulties.remove(&peer_cum_diff); - } - } - } - - /// Returns a list of peers that claim to have a higher cumulative difficulty than `current_cum_diff`. - fn peers_to_sync_from( - &self, - current_cum_diff: u128, - block_needed: Option, - ) -> Vec> { - self.cumulative_difficulties - .range((current_cum_diff + 1)..) - .flat_map(|(_, peers)| peers) - .filter(|peer| { - if let Some(block_needed) = block_needed { - // we just use CRYPTONOTE_MAX_BLOCK_HEIGHT as the blockchain height, this only means - // we don't take into account the tip blocks which are not pruned. - self.peers[peer] - .1 - .has_full_block(block_needed, CRYPTONOTE_MAX_BLOCK_HEIGHT) - } else { - true - } - }) - .copied() - .collect() - } - - /// Updates a peers sync state. - fn update_peer_sync_info( - &mut self, - peer_id: InternalPeerID, - handle: ConnectionHandle, - core_sync_data: &CoreSyncData, - ) -> Result<(), tower::BoxError> { - tracing::trace!( - "Received new core sync data from peer, top hash: {}", - hex::encode(core_sync_data.top_id) - ); - - let new_cumulative_difficulty = core_sync_data.cumulative_difficulty(); - - if let Some((old_cum_diff, _)) = self.peers.get_mut(&peer_id) { - match (*old_cum_diff).cmp(&new_cumulative_difficulty) { - Ordering::Equal => { - // If the cumulative difficulty of the peers chain hasn't changed then no need to update anything. - return Ok(()); - } - Ordering::Greater => { - // This will only happen if a peer lowers its cumulative difficulty during the connection. - // This won't happen if a peer re-syncs their blockchain as then the connection would have closed. - tracing::debug!( - "Peer's claimed cumulative difficulty has dropped, closing connection and banning peer for: {} seconds.", SHORT_BAN.as_secs() - ); - handle.ban_peer(SHORT_BAN); - return Err("Peers cumulative difficulty dropped".into()); - } - Ordering::Less => (), - } - - // Remove the old cumulative difficulty entry for this peer - let old_cum_diff_peers = self.cumulative_difficulties.get_mut(old_cum_diff).unwrap(); - old_cum_diff_peers.remove(&peer_id); - if old_cum_diff_peers.is_empty() { - // If this was the last peer remove the whole entry for this cumulative difficulty. - self.cumulative_difficulties.remove(old_cum_diff); - } - // update the cumulative difficulty - *old_cum_diff = new_cumulative_difficulty; - } else { - // The peer is new so add it the list of peers. - self.peers.insert( - peer_id, - ( - new_cumulative_difficulty, - PruningSeed::decompress_p2p_rules(core_sync_data.pruning_seed)?, - ), - ); - - // add it to the list of peers to watch for disconnection. - self.closed_connections.push(PeerDisconnectFut { - closed_fut: handle.closed(), - peer_id: Some(peer_id), - }); - } - - self.cumulative_difficulties - .entry(new_cumulative_difficulty) - .or_default() - .insert(peer_id); - - // If the claimed cumulative difficulty is higher than the current one in the watcher - // or if the peer in the watch has disconnected, update it. - if self.new_height_watcher.borrow().cumulative_difficulty < new_cumulative_difficulty - || self - .last_peer_in_watcher_handle - .as_ref() - .is_some_and(ConnectionHandle::is_closed) - { - tracing::debug!( - "Updating sync watcher channel with new highest seen cumulative difficulty: {new_cumulative_difficulty}" - ); - #[expect( - clippy::let_underscore_must_use, - reason = "dropped receivers can be ignored" - )] - let _ = self.new_height_watcher.send(NewSyncInfo { - top_hash: core_sync_data.top_id, - chain_height: core_sync_data.current_height, - cumulative_difficulty: new_cumulative_difficulty, - }); - self.last_peer_in_watcher_handle.replace(handle); - } - - Ok(()) - } -} - -impl Service> for PeerSyncSvc { - type Response = PeerSyncResponse; - type Error = tower::BoxError; - type Future = Ready>; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.poll_disconnected(cx); - - Poll::Ready(Ok(())) - } - - fn call(&mut self, req: PeerSyncRequest) -> Self::Future { - let res = match req { - PeerSyncRequest::PeersToSyncFrom { - current_cumulative_difficulty, - block_needed, - } => Ok(PeerSyncResponse::PeersToSyncFrom(self.peers_to_sync_from( - current_cumulative_difficulty, - block_needed, - ))), - PeerSyncRequest::IncomingCoreSyncData(peer_id, handle, sync_data) => self - .update_peer_sync_info(peer_id, handle, &sync_data) - .map(|()| PeerSyncResponse::Ok), - }; - - ready(res) - } -} - -#[cfg(test)] -mod tests { - use tower::{Service, ServiceExt}; - - use cuprate_p2p_core::{ - client::InternalPeerID, handles::HandleBuilder, services::PeerSyncRequest, - }; - use cuprate_wire::CoreSyncData; - - use cuprate_p2p_core::services::PeerSyncResponse; - use cuprate_test_utils::test_netzone::TestNetZone; - - use super::PeerSyncSvc; - - #[tokio::test] - async fn top_sync_channel_updates() { - let (_g, handle) = HandleBuilder::new().build(); - - let (mut svc, mut watch) = PeerSyncSvc::>::new(); - - assert!(!watch.has_changed().unwrap()); - - svc.ready() - .await - .unwrap() - .call(PeerSyncRequest::IncomingCoreSyncData( - InternalPeerID::Unknown(0), - handle.clone(), - CoreSyncData { - cumulative_difficulty: 1_000, - cumulative_difficulty_top64: 0, - current_height: 0, - pruning_seed: 0, - top_id: [0; 32], - top_version: 0, - }, - )) - .await - .unwrap(); - - assert!(watch.has_changed().unwrap()); - - assert_eq!(watch.borrow().top_hash, [0; 32]); - assert_eq!(watch.borrow().cumulative_difficulty, 1000); - assert_eq!(watch.borrow_and_update().chain_height, 0); - - svc.ready() - .await - .unwrap() - .call(PeerSyncRequest::IncomingCoreSyncData( - InternalPeerID::Unknown(1), - handle.clone(), - CoreSyncData { - cumulative_difficulty: 1_000, - cumulative_difficulty_top64: 0, - current_height: 0, - pruning_seed: 0, - top_id: [0; 32], - top_version: 0, - }, - )) - .await - .unwrap(); - - assert!(!watch.has_changed().unwrap()); - - svc.ready() - .await - .unwrap() - .call(PeerSyncRequest::IncomingCoreSyncData( - InternalPeerID::Unknown(2), - handle.clone(), - CoreSyncData { - cumulative_difficulty: 1_001, - cumulative_difficulty_top64: 0, - current_height: 0, - pruning_seed: 0, - top_id: [1; 32], - top_version: 0, - }, - )) - .await - .unwrap(); - - assert!(watch.has_changed().unwrap()); - - assert_eq!(watch.borrow().top_hash, [1; 32]); - assert_eq!(watch.borrow().cumulative_difficulty, 1001); - assert_eq!(watch.borrow_and_update().chain_height, 0); - } - - #[tokio::test] - async fn peer_sync_info_updates() { - let (_g, handle) = HandleBuilder::new().build(); - - let (mut svc, _watch) = PeerSyncSvc::>::new(); - - svc.ready() - .await - .unwrap() - .call(PeerSyncRequest::IncomingCoreSyncData( - InternalPeerID::Unknown(0), - handle.clone(), - CoreSyncData { - cumulative_difficulty: 1_000, - cumulative_difficulty_top64: 0, - current_height: 0, - pruning_seed: 0, - top_id: [0; 32], - top_version: 0, - }, - )) - .await - .unwrap(); - - assert_eq!(svc.peers.len(), 1); - assert_eq!(svc.cumulative_difficulties.len(), 1); - - svc.ready() - .await - .unwrap() - .call(PeerSyncRequest::IncomingCoreSyncData( - InternalPeerID::Unknown(0), - handle.clone(), - CoreSyncData { - cumulative_difficulty: 1_001, - cumulative_difficulty_top64: 0, - current_height: 0, - pruning_seed: 0, - top_id: [0; 32], - top_version: 0, - }, - )) - .await - .unwrap(); - - assert_eq!(svc.peers.len(), 1); - assert_eq!(svc.cumulative_difficulties.len(), 1); - - svc.ready() - .await - .unwrap() - .call(PeerSyncRequest::IncomingCoreSyncData( - InternalPeerID::Unknown(1), - handle.clone(), - CoreSyncData { - cumulative_difficulty: 10, - cumulative_difficulty_top64: 0, - current_height: 0, - pruning_seed: 0, - top_id: [0; 32], - top_version: 0, - }, - )) - .await - .unwrap(); - - assert_eq!(svc.peers.len(), 2); - assert_eq!(svc.cumulative_difficulties.len(), 2); - - let PeerSyncResponse::PeersToSyncFrom(peers) = svc - .ready() - .await - .unwrap() - .call(PeerSyncRequest::PeersToSyncFrom { - block_needed: None, - current_cumulative_difficulty: 0, - }) - .await - .unwrap() - else { - panic!("Wrong response for request.") - }; - - assert!( - peers.contains(&InternalPeerID::Unknown(0)) - && peers.contains(&InternalPeerID::Unknown(1)) - ); - } -} From 521bf877dbad175f8c872db253de99978e9e91a7 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Mon, 30 Sep 2024 23:19:53 +0100 Subject: [PATCH 075/104] P2P: give the protocol handler access to the peer info (#302) * give the protocol handler access to the peer info * add trait alias * clippy + fmt * update doc * simplify trait aliases * use tower `Shared` * clean import * fmt * Update Cargo.toml Co-authored-by: hinto-janai * fix merge --------- Co-authored-by: hinto-janai --- Cargo.lock | 69 ++++++++++++------ Cargo.toml | 2 +- p2p/p2p-core/Cargo.toml | 2 +- p2p/p2p-core/src/client/connector.rs | 20 +++--- p2p/p2p-core/src/client/handshaker.rs | 37 ++++++---- p2p/p2p-core/src/client/handshaker/builder.rs | 45 ++++++------ p2p/p2p-core/src/lib.rs | 72 +++++++++++-------- p2p/p2p/src/lib.rs | 8 +-- 8 files changed, 153 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8522255..ca5c154 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -129,9 +129,9 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", - "tower-layer", - "tower-service", + "tower 0.4.13", + "tower-layer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", ] @@ -151,8 +151,8 @@ dependencies = [ "pin-project-lite", "rustversion", "sync_wrapper 0.1.2", - "tower-layer", - "tower-service", + "tower-layer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", ] @@ -527,7 +527,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util", - "tower", + "tower 0.5.1", "tracing", ] @@ -565,7 +565,7 @@ dependencies = [ "tempfile", "thread_local", "tokio", - "tower", + "tower 0.5.1", ] [[package]] @@ -592,7 +592,7 @@ dependencies = [ "tokio", "tokio-test", "tokio-util", - "tower", + "tower 0.5.1", "tracing", ] @@ -638,7 +638,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util", - "tower", + "tower 0.5.1", "tracing", ] @@ -668,7 +668,7 @@ dependencies = [ "futures", "rayon", "serde", - "tower", + "tower 0.5.1", ] [[package]] @@ -700,7 +700,7 @@ dependencies = [ "sha3", "thiserror", "tokio", - "tower", + "tower 0.5.1", ] [[package]] @@ -785,7 +785,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "tower", + "tower 0.5.1", "tracing", ] @@ -808,7 +808,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "tower", + "tower 0.5.1", "tracing", ] @@ -836,7 +836,7 @@ dependencies = [ "serde", "serde_json", "tokio", - "tower", + "tower 0.5.1", "ureq", ] @@ -897,7 +897,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "tower", + "tower 0.5.1", ] [[package]] @@ -992,7 +992,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tower", + "tower 0.5.1", "tracing", "tracing-subscriber", ] @@ -1491,7 +1491,7 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls", - "tower-service", + "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1509,8 +1509,8 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", - "tower-service", + "tower 0.4.13", + "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", ] @@ -2539,7 +2539,7 @@ dependencies = [ "hyper-rustls", "hyper-util", "tokio", - "tower-service", + "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2821,9 +2821,24 @@ dependencies = [ "pin-project", "pin-project-lite", "tokio", + "tower-layer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.1" +source = "git+https://github.com/Cuprate/tower.git?rev=6c7faf0#6c7faf0e9dbc74aef5d3110313324bc7e1f997cf" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", "tokio-util", - "tower-layer", - "tower-service", + "tower-layer 0.3.3 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", + "tower-service 0.3.3 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -2833,12 +2848,22 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "git+https://github.com/Cuprate/tower.git?rev=6c7faf0#6c7faf0e9dbc74aef5d3110313324bc7e1f997cf" + [[package]] name = "tower-service" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +[[package]] +name = "tower-service" +version = "0.3.3" +source = "git+https://github.com/Cuprate/tower.git?rev=6c7faf0#6c7faf0e9dbc74aef5d3110313324bc7e1f997cf" + [[package]] name = "tracing" version = "0.1.40" diff --git a/Cargo.toml b/Cargo.toml index 0aa5875..31e9285 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,7 +82,7 @@ thread_local = { version = "1.1.8", default-features = false } tokio-util = { version = "0.7.12", default-features = false } tokio-stream = { version = "0.1.16", default-features = false } tokio = { version = "1.40.0", default-features = false } -tower = { version = "0.4.13", default-features = false } +tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # tracing-subscriber = { version = "0.3.18", default-features = false } tracing = { version = "0.1.40", default-features = false } diff --git a/p2p/p2p-core/Cargo.toml b/p2p/p2p-core/Cargo.toml index 8341fe9..a30590f 100644 --- a/p2p/p2p-core/Cargo.toml +++ b/p2p/p2p-core/Cargo.toml @@ -19,7 +19,7 @@ tokio-util = { workspace = true, features = ["codec"] } tokio-stream = { workspace = true, features = ["sync"]} futures = { workspace = true, features = ["std"] } async-trait = { workspace = true } -tower = { workspace = true, features = ["util", "tracing"] } +tower = { workspace = true, features = ["util", "tracing", "make"] } cfg-if = { workspace = true } thiserror = { workspace = true } diff --git a/p2p/p2p-core/src/client/connector.rs b/p2p/p2p-core/src/client/connector.rs index b378075..abe7e13 100644 --- a/p2p/p2p-core/src/client/connector.rs +++ b/p2p/p2p-core/src/client/connector.rs @@ -17,7 +17,7 @@ use tower::{Service, ServiceExt}; use crate::{ client::{handshaker::HandShaker, Client, DoHandshakeRequest, HandshakeError, InternalPeerID}, AddressBook, BroadcastMessage, ConnectionDirection, CoreSyncSvc, NetworkZone, - ProtocolRequestHandler, + ProtocolRequestHandlerMaker, }; /// A request to connect to a peer. @@ -32,25 +32,27 @@ pub struct ConnectRequest { } /// The connector service, this service connects to peer and returns the [`Client`]. -pub struct Connector { - handshaker: HandShaker, +pub struct Connector { + handshaker: HandShaker, } -impl - Connector +impl + Connector { /// Create a new connector from a handshaker. - pub const fn new(handshaker: HandShaker) -> Self { + pub const fn new( + handshaker: HandShaker, + ) -> Self { Self { handshaker } } } -impl - Service> for Connector +impl + Service> for Connector where AdrBook: AddressBook + Clone, CSync: CoreSyncSvc + Clone, - ProtoHdlr: ProtocolRequestHandler + Clone, + ProtoHdlrMkr: ProtocolRequestHandlerMaker + Clone, BrdcstStrm: Stream + Send + 'static, BrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Clone + Send + 'static, { diff --git a/p2p/p2p-core/src/client/handshaker.rs b/p2p/p2p-core/src/client/handshaker.rs index bf5165e..66acb5b 100644 --- a/p2p/p2p-core/src/client/handshaker.rs +++ b/p2p/p2p-core/src/client/handshaker.rs @@ -42,7 +42,7 @@ use crate::{ handles::HandleBuilder, AddressBook, AddressBookRequest, AddressBookResponse, BroadcastMessage, ConnectionDirection, CoreSyncDataRequest, CoreSyncDataResponse, CoreSyncSvc, NetZoneAddress, NetworkZone, - ProtocolRequestHandler, SharedError, + ProtocolRequestHandlerMaker, SharedError, }; pub mod builder; @@ -86,13 +86,13 @@ pub struct DoHandshakeRequest { /// The peer handshaking service. #[derive(Debug, Clone)] -pub struct HandShaker { +pub struct HandShaker { /// The address book service. address_book: AdrBook, /// The core sync data service. core_sync_svc: CSync, /// The protocol request handler service. - protocol_request_svc: ProtoHdlr, + protocol_request_svc_maker: ProtoHdlrMkr, /// Our [`BasicNodeData`] our_basic_node_data: BasicNodeData, @@ -106,14 +106,14 @@ pub struct HandShaker _zone: PhantomData, } -impl - HandShaker +impl + HandShaker { /// Creates a new handshaker. const fn new( address_book: AdrBook, core_sync_svc: CSync, - protocol_request_svc: ProtoHdlr, + protocol_request_svc_maker: ProtoHdlrMkr, broadcast_stream_maker: BrdcstStrmMkr, our_basic_node_data: BasicNodeData, connection_parent_span: Span, @@ -121,7 +121,7 @@ impl Self { address_book, core_sync_svc, - protocol_request_svc, + protocol_request_svc_maker, broadcast_stream_maker, our_basic_node_data, connection_parent_span, @@ -130,12 +130,12 @@ impl } } -impl - Service> for HandShaker +impl + Service> for HandShaker where AdrBook: AddressBook + Clone, CSync: CoreSyncSvc + Clone, - ProtoHdlr: ProtocolRequestHandler + Clone, + ProtoHdlrMkr: ProtocolRequestHandlerMaker + Clone, BrdcstStrm: Stream + Send + 'static, BrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Clone + Send + 'static, { @@ -152,7 +152,7 @@ where let broadcast_stream_maker = self.broadcast_stream_maker.clone(); let address_book = self.address_book.clone(); - let protocol_request_svc = self.protocol_request_svc.clone(); + let protocol_request_svc_maker = self.protocol_request_svc_maker.clone(); let core_sync_svc = self.core_sync_svc.clone(); let our_basic_node_data = self.our_basic_node_data.clone(); @@ -168,7 +168,7 @@ where broadcast_stream_maker, address_book, core_sync_svc, - protocol_request_svc, + protocol_request_svc_maker, our_basic_node_data, connection_parent_span, ), @@ -222,21 +222,21 @@ pub async fn ping(addr: N::Addr) -> Result } /// This function completes a handshake with the requested peer. -async fn handshake( +async fn handshake( req: DoHandshakeRequest, broadcast_stream_maker: BrdcstStrmMkr, mut address_book: AdrBook, mut core_sync_svc: CSync, - protocol_request_handler: ProtoHdlr, + mut protocol_request_svc_maker: ProtoHdlrMkr, our_basic_node_data: BasicNodeData, connection_parent_span: Span, ) -> Result, HandshakeError> where AdrBook: AddressBook + Clone, CSync: CoreSyncSvc + Clone, - ProtoHdlr: ProtocolRequestHandler, + ProtoHdlrMkr: ProtocolRequestHandlerMaker, BrdcstStrm: Stream + Send + 'static, BrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Send + 'static, { @@ -458,6 +458,13 @@ where core_sync_data: Arc::new(Mutex::new(peer_core_sync)), }; + let protocol_request_handler = protocol_request_svc_maker + .as_service() + .ready() + .await? + .call(info.clone()) + .await?; + let request_handler = PeerRequestHandler { address_book_svc: address_book.clone(), our_sync_svc: core_sync_svc.clone(), diff --git a/p2p/p2p-core/src/client/handshaker/builder.rs b/p2p/p2p-core/src/client/handshaker/builder.rs index c7109ed..c1c3f3f 100644 --- a/p2p/p2p-core/src/client/handshaker/builder.rs +++ b/p2p/p2p-core/src/client/handshaker/builder.rs @@ -1,13 +1,14 @@ -use std::marker::PhantomData; +use std::{convert::Infallible, marker::PhantomData}; use futures::{stream, Stream}; +use tower::{make::Shared, util::MapErr}; use tracing::Span; use cuprate_wire::BasicNodeData; use crate::{ client::{handshaker::HandShaker, InternalPeerID}, - AddressBook, BroadcastMessage, CoreSyncSvc, NetworkZone, ProtocolRequestHandler, + AddressBook, BroadcastMessage, CoreSyncSvc, NetworkZone, ProtocolRequestHandlerMaker, }; mod dummy; @@ -16,7 +17,7 @@ pub use dummy::{DummyAddressBook, DummyCoreSyncSvc, DummyProtocolRequestHandler} /// A [`HandShaker`] [`Service`](tower::Service) builder. /// /// This builder applies default values to make usage easier, behaviour and drawbacks of the defaults are documented -/// on the `with_*` method to change it, for example [`HandshakerBuilder::with_protocol_request_handler`]. +/// on the `with_*` method to change it, for example [`HandshakerBuilder::with_protocol_request_handler_maker`]. /// /// If you want to use any network other than [`Mainnet`](crate::Network::Mainnet) /// you will need to change the core sync service with [`HandshakerBuilder::with_core_sync_svc`], @@ -26,7 +27,7 @@ pub struct HandshakerBuilder< N: NetworkZone, AdrBook = DummyAddressBook, CSync = DummyCoreSyncSvc, - ProtoHdlr = DummyProtocolRequestHandler, + ProtoHdlrMkr = MapErr, fn(Infallible) -> tower::BoxError>, BrdcstStrmMkr = fn( InternalPeerID<::Addr>, ) -> stream::Pending, @@ -36,7 +37,7 @@ pub struct HandshakerBuilder< /// The core sync data service. core_sync_svc: CSync, /// The protocol request service. - protocol_request_svc: ProtoHdlr, + protocol_request_svc_maker: ProtoHdlrMkr, /// Our [`BasicNodeData`] our_basic_node_data: BasicNodeData, /// A function that returns a stream that will give items to be broadcast by a connection. @@ -54,7 +55,10 @@ impl HandshakerBuilder { Self { address_book: DummyAddressBook, core_sync_svc: DummyCoreSyncSvc::static_mainnet_genesis(), - protocol_request_svc: DummyProtocolRequestHandler, + protocol_request_svc_maker: MapErr::new( + Shared::new(DummyProtocolRequestHandler), + tower::BoxError::from, + ), our_basic_node_data, broadcast_stream_maker: |_| stream::pending(), connection_parent_span: None, @@ -83,7 +87,7 @@ impl { let Self { core_sync_svc, - protocol_request_svc, + protocol_request_svc_maker, our_basic_node_data, broadcast_stream_maker, connection_parent_span, @@ -93,7 +97,7 @@ impl HandshakerBuilder { address_book: new_address_book, core_sync_svc, - protocol_request_svc, + protocol_request_svc_maker, our_basic_node_data, broadcast_stream_maker, connection_parent_span, @@ -123,7 +127,7 @@ impl { let Self { address_book, - protocol_request_svc, + protocol_request_svc_maker, our_basic_node_data, broadcast_stream_maker, connection_parent_span, @@ -133,7 +137,7 @@ impl HandshakerBuilder { address_book, core_sync_svc: new_core_sync_svc, - protocol_request_svc, + protocol_request_svc_maker, our_basic_node_data, broadcast_stream_maker, connection_parent_span, @@ -141,19 +145,20 @@ impl } } - /// Changes the protocol request handler, which handles [`ProtocolRequest`](crate::ProtocolRequest)s to our node. + /// Changes the protocol request handler maker, which creates the service that handles [`ProtocolRequest`](crate::ProtocolRequest)s + /// to our node. /// /// ## Default Protocol Request Handler /// - /// The default protocol request handler will not respond to any protocol requests, this should not + /// The default service maker will create services that will not respond to any protocol requests, this should not /// be an issue as long as peers do not think we are ahead of them, if they do they will send requests /// for our blocks, and we won't respond which will cause them to disconnect. - pub fn with_protocol_request_handler( + pub fn with_protocol_request_handler_maker( self, - new_protocol_handler: NProtoHdlr, - ) -> HandshakerBuilder + new_protocol_request_svc_maker: NProtoHdlrMkr, + ) -> HandshakerBuilder where - NProtoHdlr: ProtocolRequestHandler + Clone, + NProtoHdlrMkr: ProtocolRequestHandlerMaker + Clone, { let Self { address_book, @@ -167,7 +172,7 @@ impl HandshakerBuilder { address_book, core_sync_svc, - protocol_request_svc: new_protocol_handler, + protocol_request_svc_maker: new_protocol_request_svc_maker, our_basic_node_data, broadcast_stream_maker, connection_parent_span, @@ -193,7 +198,7 @@ impl let Self { address_book, core_sync_svc, - protocol_request_svc, + protocol_request_svc_maker, our_basic_node_data, connection_parent_span, .. @@ -202,7 +207,7 @@ impl HandshakerBuilder { address_book, core_sync_svc, - protocol_request_svc, + protocol_request_svc_maker, our_basic_node_data, broadcast_stream_maker: new_broadcast_stream_maker, connection_parent_span, @@ -228,7 +233,7 @@ impl HandShaker::new( self.address_book, self.core_sync_svc, - self.protocol_request_svc, + self.protocol_request_svc_maker, self.broadcast_stream_maker, self.our_basic_node_data, self.connection_parent_span.unwrap_or(Span::none()), diff --git a/p2p/p2p-core/src/lib.rs b/p2p/p2p-core/src/lib.rs index ca83f8f..c9a58f5 100644 --- a/p2p/p2p-core/src/lib.rs +++ b/p2p/p2p-core/src/lib.rs @@ -66,7 +66,7 @@ cfg_if::cfg_if! { } } -use std::{fmt::Debug, future::Future, hash::Hash}; +use std::{fmt::Debug, hash::Hash}; use futures::{Sink, Stream}; @@ -197,26 +197,21 @@ pub trait AddressBook: AddressBookRequest, Response = AddressBookResponse, Error = tower::BoxError, - Future = Self::Future2, + Future: Send + 'static, > + Send + 'static { - // This allows us to put more restrictive bounds on the future without defining the future here - // explicitly. - type Future2: Future> + Send + 'static; } -impl AddressBook for T -where +impl AddressBook for T where T: tower::Service< AddressBookRequest, Response = AddressBookResponse, Error = tower::BoxError, + Future: Send + 'static, > + Send - + 'static, - T::Future: Future> + Send + 'static, + + 'static { - type Future2 = T::Future; } pub trait CoreSyncSvc: @@ -224,26 +219,21 @@ pub trait CoreSyncSvc: CoreSyncDataRequest, Response = CoreSyncDataResponse, Error = tower::BoxError, - Future = Self::Future2, + Future: Send + 'static, > + Send + 'static { - // This allows us to put more restrictive bounds on the future without defining the future here - // explicitly. - type Future2: Future> + Send + 'static; } -impl CoreSyncSvc for T -where +impl CoreSyncSvc for T where T: tower::Service< CoreSyncDataRequest, Response = CoreSyncDataResponse, Error = tower::BoxError, + Future: Send + 'static, > + Send - + 'static, - T::Future: Future> + Send + 'static, + + 'static { - type Future2 = T::Future; } pub trait ProtocolRequestHandler: @@ -251,21 +241,43 @@ pub trait ProtocolRequestHandler: ProtocolRequest, Response = ProtocolResponse, Error = tower::BoxError, - Future = Self::Future2, + Future: Send + 'static, > + Send + 'static { - // This allows us to put more restrictive bounds on the future without defining the future here - // explicitly. - type Future2: Future> + Send + 'static; } -impl ProtocolRequestHandler for T -where - T: tower::Service - + Send - + 'static, - T::Future: Future> + Send + 'static, +impl ProtocolRequestHandler for T where + T: tower::Service< + ProtocolRequest, + Response = ProtocolResponse, + Error = tower::BoxError, + Future: Send + 'static, + > + Send + + 'static +{ +} + +pub trait ProtocolRequestHandlerMaker: + tower::MakeService< + client::PeerInformation, + ProtocolRequest, + MakeError = tower::BoxError, + Service: ProtocolRequestHandler, + Future: Send + 'static, + > + Send + + 'static +{ +} + +impl ProtocolRequestHandlerMaker for T where + T: tower::MakeService< + client::PeerInformation, + ProtocolRequest, + MakeError = tower::BoxError, + Service: ProtocolRequestHandler, + Future: Send + 'static, + > + Send + + 'static { - type Future2 = T::Future; } diff --git a/p2p/p2p/src/lib.rs b/p2p/p2p/src/lib.rs index 4a35ace..2431158 100644 --- a/p2p/p2p/src/lib.rs +++ b/p2p/p2p/src/lib.rs @@ -14,7 +14,7 @@ use cuprate_p2p_core::{ client::Connector, client::InternalPeerID, services::{AddressBookRequest, AddressBookResponse}, - CoreSyncSvc, NetworkZone, ProtocolRequestHandler, + CoreSyncSvc, NetworkZone, ProtocolRequestHandlerMaker, }; mod block_downloader; @@ -41,14 +41,14 @@ use connection_maintainer::MakeConnectionRequest; /// - A core sync service, which keeps track of the sync state of our node #[instrument(level = "debug", name = "net", skip_all, fields(zone = N::NAME))] pub async fn initialize_network( - protocol_request_handler: PR, + protocol_request_handler_maker: PR, core_sync_svc: CS, config: P2PConfig, ) -> Result, tower::BoxError> where N: NetworkZone, N::Addr: borsh::BorshDeserialize + borsh::BorshSerialize, - PR: ProtocolRequestHandler + Clone, + PR: ProtocolRequestHandlerMaker + Clone, CS: CoreSyncSvc + Clone, { let address_book = @@ -73,7 +73,7 @@ where cuprate_p2p_core::client::HandshakerBuilder::new(basic_node_data) .with_address_book(address_book.clone()) .with_core_sync_svc(core_sync_svc) - .with_protocol_request_handler(protocol_request_handler) + .with_protocol_request_handler_maker(protocol_request_handler_maker) .with_broadcast_stream_maker(outbound_mkr) .with_connection_parent_span(Span::current()); From a003e0588d0515c62b42762730ee88cf9d406014 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 2 Oct 2024 13:51:58 -0400 Subject: [PATCH 076/104] Add `constants/` crate (#280) * add `constants/` * ci: add `A-constants` labeler * add modules, move `cuprate_helper::constants` * add `genesis.rs` * `rpc.rs` docs * remove todos * `CRYPTONOTE_MAX_BLOCK_HEIGHT` * add genesis data for all networks * features * fix feature cfgs * test fixes * add to architecture book * fix comment * remove `genesis` add other constants * fixes * revert * fix --- .github/labeler.yml | 4 + Cargo.lock | 10 ++ Cargo.toml | 1 + books/architecture/src/appendix/crates.md | 1 + consensus/rules/Cargo.toml | 1 + consensus/rules/src/miner_tx.rs | 3 +- consensus/rules/src/transactions/tests.rs | 7 +- constants/Cargo.toml | 22 ++++ constants/README.md | 3 + {helper => constants}/build.rs | 2 - constants/src/block.rs | 11 ++ constants/src/build.rs | 22 ++++ constants/src/lib.rs | 12 +++ constants/src/macros.rs | 35 ++++++ constants/src/rpc.rs | 101 ++++++++++++++++++ helper/Cargo.toml | 4 +- helper/src/constants.rs | 22 ---- helper/src/lib.rs | 3 - helper/src/map.rs | 16 +-- p2p/address-book/Cargo.toml | 1 + p2p/address-book/src/peer_list.rs | 5 +- p2p/p2p/Cargo.toml | 1 + p2p/p2p/src/block_downloader.rs | 7 +- p2p/p2p/src/block_downloader/block_queue.rs | 3 +- p2p/p2p/src/block_downloader/chain_tracker.rs | 11 +- pruning/Cargo.toml | 2 + pruning/src/lib.rs | 19 ++-- storage/blockchain/Cargo.toml | 1 + storage/blockchain/src/service/free.rs | 4 +- storage/database/README.md | 2 +- 30 files changed, 276 insertions(+), 60 deletions(-) create mode 100644 constants/Cargo.toml create mode 100644 constants/README.md rename {helper => constants}/build.rs (93%) create mode 100644 constants/src/block.rs create mode 100644 constants/src/build.rs create mode 100644 constants/src/lib.rs create mode 100644 constants/src/macros.rs create mode 100644 constants/src/rpc.rs delete mode 100644 helper/src/constants.rs diff --git a/.github/labeler.yml b/.github/labeler.yml index cf48df1..85a15b9 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -56,6 +56,10 @@ A-cryptonight: - changed-files: - any-glob-to-any-file: cryptonight/** +A-constants: +- changed-files: + - any-glob-to-any-file: constants/** + A-storage: - changed-files: - any-glob-to-any-file: storage/** diff --git a/Cargo.lock b/Cargo.lock index ca5c154..0fc5da0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -518,6 +518,7 @@ name = "cuprate-address-book" version = "0.1.0" dependencies = [ "borsh", + "cuprate-constants", "cuprate-p2p-core", "cuprate-pruning", "cuprate-test-utils", @@ -547,6 +548,7 @@ version = "0.0.0" dependencies = [ "bitflags 2.6.0", "bytemuck", + "cuprate-constants", "cuprate-database", "cuprate-database-service", "cuprate-helper", @@ -602,6 +604,7 @@ version = "0.1.0" dependencies = [ "cfg-if", "crypto-bigint", + "cuprate-constants", "cuprate-cryptonight", "cuprate-helper", "cuprate-types", @@ -618,6 +621,10 @@ dependencies = [ "tracing", ] +[[package]] +name = "cuprate-constants" +version = "0.1.0" + [[package]] name = "cuprate-cryptonight" version = "0.1.0" @@ -719,6 +726,7 @@ version = "0.1.0" dependencies = [ "chrono", "crossbeam", + "cuprate-constants", "curve25519-dalek", "dirs", "futures", @@ -764,6 +772,7 @@ dependencies = [ "bytes", "cuprate-address-book", "cuprate-async-buffer", + "cuprate-constants", "cuprate-fixed-bytes", "cuprate-helper", "cuprate-p2p-core", @@ -817,6 +826,7 @@ name = "cuprate-pruning" version = "0.1.0" dependencies = [ "borsh", + "cuprate-constants", "thiserror", ] diff --git a/Cargo.toml b/Cargo.toml index 31e9285..fa348cc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ resolver = "2" members = [ "binaries/cuprated", + "constants", "consensus", "consensus/fast-sync", "consensus/rules", diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md index e5311a8..1993c47 100644 --- a/books/architecture/src/appendix/crates.md +++ b/books/architecture/src/appendix/crates.md @@ -55,6 +55,7 @@ cargo doc --open --package cuprate-blockchain ## 1-off crates | Crate | In-tree path | Purpose | |-------|--------------|---------| +| [`cuprate-constants`](https://doc.cuprate.org/cuprate_constants) | [`constants/`](https://github.com/Cuprate/cuprate/tree/main/constants) | Shared `const/static` data across Cuprate | [`cuprate-cryptonight`](https://doc.cuprate.org/cuprate_cryptonight) | [`cryptonight/`](https://github.com/Cuprate/cuprate/tree/main/cryptonight) | CryptoNight hash functions | [`cuprate-pruning`](https://doc.cuprate.org/cuprate_pruning) | [`pruning/`](https://github.com/Cuprate/cuprate/tree/main/pruning) | Monero pruning logic/types | [`cuprate-helper`](https://doc.cuprate.org/cuprate_helper) | [`helper/`](https://github.com/Cuprate/cuprate/tree/main/helper) | Kitchen-sink helper crate for Cuprate diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index ed97d33..50117ac 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -11,6 +11,7 @@ proptest = ["cuprate-types/proptest"] rayon = ["dep:rayon"] [dependencies] +cuprate-constants = { path = "../../constants", default-features = false } cuprate-helper = { path = "../../helper", default-features = false, features = ["std", "cast"] } cuprate-types = { path = "../../types", default-features = false } cuprate-cryptonight = {path = "../../cryptonight"} diff --git a/consensus/rules/src/miner_tx.rs b/consensus/rules/src/miner_tx.rs index e6b51d2..5221ee5 100644 --- a/consensus/rules/src/miner_tx.rs +++ b/consensus/rules/src/miner_tx.rs @@ -1,5 +1,6 @@ use monero_serai::transaction::{Input, Output, Timelock, Transaction}; +use cuprate_constants::block::MAX_BLOCK_HEIGHT_USIZE; use cuprate_types::TxVersion; use crate::{is_decomposed_amount, transactions::check_output_types, HardFork}; @@ -112,7 +113,7 @@ const fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<() &Timelock::Block(till_height) => { // Lock times above this amount are timestamps not blocks. // This is just for safety though and shouldn't actually be hit. - if till_height > 500_000_000 { + if till_height > MAX_BLOCK_HEIGHT_USIZE { return Err(MinerTxError::InvalidLockTime); } if till_height == chain_height + MINER_TX_TIME_LOCKED_BLOCKS { diff --git a/consensus/rules/src/transactions/tests.rs b/consensus/rules/src/transactions/tests.rs index 936d843..e154396 100644 --- a/consensus/rules/src/transactions/tests.rs +++ b/consensus/rules/src/transactions/tests.rs @@ -9,6 +9,7 @@ use proptest::{collection::vec, prelude::*}; use monero_serai::transaction::Output; +use cuprate_constants::block::MAX_BLOCK_HEIGHT; use cuprate_helper::cast::u64_to_usize; use super::*; @@ -160,10 +161,10 @@ prop_compose! { /// Returns a [`Timelock`] that is locked given a height and time. fn locked_timelock(height: u64, time_for_time_lock: u64)( timebased in any::(), - lock_height in (height+1)..500_000_001, + lock_height in (height+1)..=MAX_BLOCK_HEIGHT, time_for_time_lock in (time_for_time_lock+121).., ) -> Timelock { - if timebased || lock_height > 500_000_000 { + if timebased || lock_height > MAX_BLOCK_HEIGHT { Timelock::Time(time_for_time_lock) } else { Timelock::Block(u64_to_usize(lock_height)) @@ -240,7 +241,7 @@ proptest! { } #[test] - fn test_timestamp_time_lock(timestamp in 500_000_001..u64::MAX) { + fn test_timestamp_time_lock(timestamp in MAX_BLOCK_HEIGHT+1..u64::MAX) { prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, HardFork::V16)); prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, HardFork::V16)); prop_assert!(check_timestamp_time_lock(timestamp, timestamp, HardFork::V16)); diff --git a/constants/Cargo.toml b/constants/Cargo.toml new file mode 100644 index 0000000..6d3e031 --- /dev/null +++ b/constants/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "cuprate-constants" +version = "0.1.0" +edition = "2021" +description = "Constant/static data used throughout Cuprate" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/constants" +keywords = ["cuprate", "constants"] + +[features] +default = [] +block = [] +build = [] +rpc = [] + +[dependencies] + +[dev-dependencies] + +[lints] +workspace = true \ No newline at end of file diff --git a/constants/README.md b/constants/README.md new file mode 100644 index 0000000..b045447 --- /dev/null +++ b/constants/README.md @@ -0,0 +1,3 @@ +# cuprate-constants +This crate contains general constants that are not specific to any particular +part of the codebase yet are used in multiple places such as the maximum block height. diff --git a/helper/build.rs b/constants/build.rs similarity index 93% rename from helper/build.rs rename to constants/build.rs index 709db42..a680714 100644 --- a/helper/build.rs +++ b/constants/build.rs @@ -1,9 +1,7 @@ fn main() { - #[cfg(feature = "constants")] set_commit_env(); } -#[cfg(feature = "constants")] /// This sets the git `COMMIT` environment variable. fn set_commit_env() { const PATH: &str = "../.git/refs/heads/"; diff --git a/constants/src/block.rs b/constants/src/block.rs new file mode 100644 index 0000000..9ddaff6 --- /dev/null +++ b/constants/src/block.rs @@ -0,0 +1,11 @@ +//! Block related. + +use crate::macros::monero_definition_link; + +/// The maximum block height possible. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/cryptonote_config.h", 40)] +pub const MAX_BLOCK_HEIGHT: u64 = 500_000_000; + +/// [`MAX_BLOCK_HEIGHT`] as a [`usize`]. +#[expect(clippy::cast_possible_truncation, reason = "will not be truncated")] +pub const MAX_BLOCK_HEIGHT_USIZE: usize = MAX_BLOCK_HEIGHT as usize; diff --git a/constants/src/build.rs b/constants/src/build.rs new file mode 100644 index 0000000..12236ad --- /dev/null +++ b/constants/src/build.rs @@ -0,0 +1,22 @@ +//! Build related metadata. + +/// The current commit hash of the root Cuprate repository. +/// +/// # Case & length +/// It is guaranteed that `COMMIT` will be: +/// - Lowercase ASCII +/// - 40 characters long (no newline) +/// +/// ```rust +/// # use cuprate_constants::build::*; +/// assert!(COMMIT.is_ascii()); +/// assert_eq!(COMMIT.as_bytes().len(), 40); +/// assert_eq!(COMMIT.to_lowercase(), COMMIT); +/// ``` +pub const COMMIT: &str = core::env!("COMMIT"); // Set in `constants/build.rs`. + +/// `true` if debug build, else `false`. +pub const DEBUG: bool = cfg!(debug_assertions); + +/// `true` if release build, else `false`. +pub const RELEASE: bool = !DEBUG; diff --git a/constants/src/lib.rs b/constants/src/lib.rs new file mode 100644 index 0000000..f1b29fb --- /dev/null +++ b/constants/src/lib.rs @@ -0,0 +1,12 @@ +#![doc = include_str!("../README.md")] +#![deny(missing_docs, reason = "all constants should document what they are")] +#![no_std] // This can be removed if we eventually need `std`. + +mod macros; + +#[cfg(feature = "block")] +pub mod block; +#[cfg(feature = "build")] +pub mod build; +#[cfg(feature = "rpc")] +pub mod rpc; diff --git a/constants/src/macros.rs b/constants/src/macros.rs new file mode 100644 index 0000000..f41ae7b --- /dev/null +++ b/constants/src/macros.rs @@ -0,0 +1,35 @@ +/// Output a string link to `monerod` source code. +#[allow( + clippy::allow_attributes, + unused_macros, + reason = "used in feature gated modules" +)] +macro_rules! monero_definition_link { + ( + $commit:ident, // Git commit hash + $file_path:literal, // File path within `monerod`'s `src/`, e.g. `rpc/core_rpc_server_commands_defs.h` + $start:literal$(..=$end:literal)? // File lines, e.g. `0..=123` or `0` + ) => { + concat!( + "", + "[Original definition](https://github.com/monero-project/monero/blob/", + stringify!($commit), + "/src/", + $file_path, + "#L", + stringify!($start), + $( + "-L", + stringify!($end), + )? + ")." + ) + }; +} + +#[allow( + clippy::allow_attributes, + unused_imports, + reason = "used in feature gated modules" +)] +pub(crate) use monero_definition_link; diff --git a/constants/src/rpc.rs b/constants/src/rpc.rs new file mode 100644 index 0000000..1130eb7 --- /dev/null +++ b/constants/src/rpc.rs @@ -0,0 +1,101 @@ +//! RPC related. + +use core::time::Duration; + +use crate::macros::monero_definition_link; + +/// Maximum requestable block header range. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/rpc/core_rpc_server.cpp", 74)] +/// +/// This is the maximum amount of blocks that can be requested +/// per invocation of `get_block_headers` if the RPC server is +/// in restricted mode. +/// +/// Used at: +/// - +pub const RESTRICTED_BLOCK_HEADER_RANGE: u64 = 1000; + +/// Maximum requestable transaction count. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/rpc/core_rpc_server.cpp", 75)] +/// +/// This is the maximum amount of transactions that can be requested +/// per invocation of `get_transactions` and `get_indexes` if the +/// RPC server is in restricted mode. +/// +/// Used at: +/// - +/// - +pub const RESTRICTED_TRANSACTIONS_COUNT: usize = 100; + +/// Maximum amount of requestable key image checks. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/rpc/core_rpc_server.cpp", 76)] +/// +/// This is the maximum amount of key images that can be requested +/// to be checked per `/is_key_image_spent` call if the RPC server +/// is in restricted mode. +/// +/// Used at: +/// - +/// - +pub const RESTRICTED_SPENT_KEY_IMAGES_COUNT: usize = 5000; + +/// Maximum amount of requestable blocks. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/rpc/core_rpc_server.cpp", 77)] +/// +/// This is the maximum amount of blocks that can be +/// requested if the RPC server is in restricted mode. +/// +/// Used at: +/// - +/// - +pub const RESTRICTED_BLOCK_COUNT: usize = 1000; + +/// Maximum amount of fake outputs. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/rpc/core_rpc_server.cpp", 67)] +/// +/// This is the maximum amount of outputs that can be +/// requested if the RPC server is in restricted mode. +/// +/// Used at: +/// - +/// - +pub const MAX_RESTRICTED_GLOBAL_FAKE_OUTS_COUNT: usize = 5000; + +/// Maximum output histrogram cutoff. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/rpc/core_rpc_server.cpp", 69)] +/// +/// This is the maximum cutoff duration allowed in `get_output_histogram` (3 days). +/// +/// ```rust +/// # use cuprate_constants::rpc::*; +/// assert_eq!(OUTPUT_HISTOGRAM_RECENT_CUTOFF_RESTRICTION.as_secs(), 86_400 * 3); +/// ``` +/// +/// Used at: +/// +pub const OUTPUT_HISTOGRAM_RECENT_CUTOFF_RESTRICTION: Duration = Duration::from_secs(86400 * 3); + +/// Maximum amount of requestable blocks in `/get_blocks.bin`. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/cryptonote_config.h", 128)] +pub const GET_BLOCKS_BIN_MAX_BLOCK_COUNT: u64 = 1000; + +/// Maximum amount of requestable transactions in `/get_blocks.bin`. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/cryptonote_config.h", 129)] +pub const GET_BLOCKS_BIN_MAX_TX_COUNT: u64 = 20_000; + +/// Max message content length in the RPC server. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/cryptonote_config.h", 130)] +/// +/// This is the maximum amount of bytes an HTTP request +/// body can be before the RPC server rejects it (1 megabyte). +pub const MAX_RPC_CONTENT_LENGTH: u64 = 1_048_576; + +/// Amount of fails before blocking a remote RPC server. +#[doc = monero_definition_link!(a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623, "/src/cryptonote_config.h", 159)] +/// +/// This is the amount of times an RPC will attempt to +/// connect to another remote IP before blocking it. +/// +/// RPC servers connect to nodes when they themselves +/// lack the data to fulfill the response. +pub const RPC_IP_FAILS_BEFORE_BLOCK: u64 = 3; diff --git a/helper/Cargo.toml b/helper/Cargo.toml index 614bdb2..111c6f0 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -18,12 +18,14 @@ cast = [] constants = [] fs = ["dep:dirs"] num = [] -map = ["cast", "dep:monero-serai"] +map = ["cast", "dep:monero-serai", "dep:cuprate-constants"] time = ["dep:chrono", "std"] thread = ["std", "dep:target_os_lib"] tx = ["dep:monero-serai"] [dependencies] +cuprate-constants = { path = "../constants", optional = true, features = ["block"] } + crossbeam = { workspace = true, optional = true } chrono = { workspace = true, optional = true, features = ["std", "clock"] } dirs = { workspace = true, optional = true } diff --git a/helper/src/constants.rs b/helper/src/constants.rs deleted file mode 100644 index b77fad1..0000000 --- a/helper/src/constants.rs +++ /dev/null @@ -1,22 +0,0 @@ -//! General `const`ants and `static`s. -//! -//! `#[no_std]` compatible. - -//---------------------------------------------------------------------------------------------------- Commit -/// The current commit hash of the root Cuprate repository. -/// -/// # Case & length -/// It is guaranteed that `COMMIT` will be: -/// - Lowercase -/// - 40 characters long (no newline) -/// -/// ```rust -/// # use cuprate_helper::constants::*; -/// assert_eq!(COMMIT.as_bytes().len(), 40); -/// assert_eq!(COMMIT.to_lowercase(), COMMIT); -/// ``` -pub const COMMIT: &str = core::env!("COMMIT"); // Set in `helper/build.rs`. - -//---------------------------------------------------------------------------------------------------- Tests -#[cfg(test)] -mod test {} diff --git a/helper/src/lib.rs b/helper/src/lib.rs index f29c499..bfd2fd6 100644 --- a/helper/src/lib.rs +++ b/helper/src/lib.rs @@ -11,9 +11,6 @@ pub mod atomic; #[cfg(feature = "cast")] pub mod cast; -#[cfg(feature = "constants")] -pub mod constants; - #[cfg(feature = "fs")] pub mod fs; diff --git a/helper/src/map.rs b/helper/src/map.rs index 8cf0978..b719f8f 100644 --- a/helper/src/map.rs +++ b/helper/src/map.rs @@ -7,6 +7,8 @@ //---------------------------------------------------------------------------------------------------- Use use monero_serai::transaction::Timelock; +use cuprate_constants::block::MAX_BLOCK_HEIGHT; + use crate::cast::{u64_to_usize, usize_to_u64}; //---------------------------------------------------------------------------------------------------- `(u64, u64) <-> u128` @@ -61,7 +63,7 @@ pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u12 /// Map a [`u64`] to a [`Timelock`]. /// /// Height/time is not differentiated via type, but rather: -/// "height is any value less than `500_000_000` and timestamp is any value above" +/// "height is any value less than [`MAX_BLOCK_HEIGHT`] and timestamp is any value above" /// so the `u64/usize` is stored without any tag. /// /// See [`timelock_to_u64`] for the inverse function. @@ -72,14 +74,15 @@ pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u12 /// ```rust /// # use cuprate_helper::map::*; /// # use monero_serai::transaction::*; +/// use cuprate_constants::block::{MAX_BLOCK_HEIGHT, MAX_BLOCK_HEIGHT_USIZE}; /// assert_eq!(u64_to_timelock(0), Timelock::None); -/// assert_eq!(u64_to_timelock(499_999_999), Timelock::Block(499_999_999)); -/// assert_eq!(u64_to_timelock(500_000_000), Timelock::Time(500_000_000)); +/// assert_eq!(u64_to_timelock(MAX_BLOCK_HEIGHT-1), Timelock::Block(MAX_BLOCK_HEIGHT_USIZE-1)); +/// assert_eq!(u64_to_timelock(MAX_BLOCK_HEIGHT), Timelock::Time(MAX_BLOCK_HEIGHT)); /// ``` pub const fn u64_to_timelock(u: u64) -> Timelock { if u == 0 { Timelock::None - } else if u < 500_000_000 { + } else if u < MAX_BLOCK_HEIGHT { Timelock::Block(u64_to_usize(u)) } else { Timelock::Time(u) @@ -93,9 +96,10 @@ pub const fn u64_to_timelock(u: u64) -> Timelock { /// ```rust /// # use cuprate_helper::map::*; /// # use monero_serai::transaction::*; +/// use cuprate_constants::block::{MAX_BLOCK_HEIGHT, MAX_BLOCK_HEIGHT_USIZE}; /// assert_eq!(timelock_to_u64(Timelock::None), 0); -/// assert_eq!(timelock_to_u64(Timelock::Block(499_999_999)), 499_999_999); -/// assert_eq!(timelock_to_u64(Timelock::Time(500_000_000)), 500_000_000); +/// assert_eq!(timelock_to_u64(Timelock::Block(MAX_BLOCK_HEIGHT_USIZE-1)), MAX_BLOCK_HEIGHT-1); +/// assert_eq!(timelock_to_u64(Timelock::Time(MAX_BLOCK_HEIGHT)), MAX_BLOCK_HEIGHT); /// ``` pub const fn timelock_to_u64(timelock: Timelock) -> u64 { match timelock { diff --git a/p2p/address-book/Cargo.toml b/p2p/address-book/Cargo.toml index 0871163..9afc255 100644 --- a/p2p/address-book/Cargo.toml +++ b/p2p/address-book/Cargo.toml @@ -7,6 +7,7 @@ authors = ["Boog900"] [dependencies] +cuprate-constants = { path = "../../constants" } cuprate-pruning = { path = "../../pruning" } cuprate-p2p-core = { path = "../p2p-core" } diff --git a/p2p/address-book/src/peer_list.rs b/p2p/address-book/src/peer_list.rs index 9b98a8a..fdaf336 100644 --- a/p2p/address-book/src/peer_list.rs +++ b/p2p/address-book/src/peer_list.rs @@ -3,8 +3,9 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use indexmap::IndexMap; use rand::prelude::*; +use cuprate_constants::block::MAX_BLOCK_HEIGHT_USIZE; use cuprate_p2p_core::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress, NetworkZone}; -use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; +use cuprate_pruning::PruningSeed; #[cfg(test)] pub(crate) mod tests; @@ -97,7 +98,7 @@ impl PeerList { if let Some(needed_height) = block_needed { let (_, addresses_with_block) = self.pruning_seeds.iter().find(|(seed, _)| { // TODO: factor in peer blockchain height? - seed.get_next_unpruned_block(needed_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) + seed.get_next_unpruned_block(needed_height, MAX_BLOCK_HEIGHT_USIZE) .expect("Block needed is higher than max block allowed.") == needed_height })?; diff --git a/p2p/p2p/Cargo.toml b/p2p/p2p/Cargo.toml index b53baaa..3444b5e 100644 --- a/p2p/p2p/Cargo.toml +++ b/p2p/p2p/Cargo.toml @@ -6,6 +6,7 @@ license = "MIT" authors = ["Boog900"] [dependencies] +cuprate-constants = { path = "../../constants" } cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } cuprate-wire = { path = "../../net/wire" } cuprate-p2p-core = { path = "../p2p-core", features = ["borsh"] } diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index eccb385..72eac28 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -22,8 +22,9 @@ use tower::{Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; use cuprate_async_buffer::{BufferAppender, BufferStream}; +use cuprate_constants::block::MAX_BLOCK_HEIGHT_USIZE; use cuprate_p2p_core::{handles::ConnectionHandle, NetworkZone}; -use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; +use cuprate_pruning::PruningSeed; use crate::{ client_pool::{ClientPool, ClientPoolDropGuard}, @@ -670,8 +671,8 @@ const fn client_has_block_in_range( start_height: usize, length: usize, ) -> bool { - pruning_seed.has_full_block(start_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) - && pruning_seed.has_full_block(start_height + length, CRYPTONOTE_MAX_BLOCK_HEIGHT) + pruning_seed.has_full_block(start_height, MAX_BLOCK_HEIGHT_USIZE) + && pruning_seed.has_full_block(start_height + length, MAX_BLOCK_HEIGHT_USIZE) } /// Calculates the next amount of blocks to request in a batch. diff --git a/p2p/p2p/src/block_downloader/block_queue.rs b/p2p/p2p/src/block_downloader/block_queue.rs index 5dd1b0d..ba7c02b 100644 --- a/p2p/p2p/src/block_downloader/block_queue.rs +++ b/p2p/p2p/src/block_downloader/block_queue.rs @@ -119,12 +119,13 @@ mod tests { use proptest::{collection::vec, prelude::*}; use tokio_test::block_on; + use cuprate_constants::block::MAX_BLOCK_HEIGHT_USIZE; use cuprate_p2p_core::handles::HandleBuilder; use super::*; prop_compose! { - fn ready_batch_strategy()(start_height in 0_usize..500_000_000) -> ReadyQueueBatch { + fn ready_batch_strategy()(start_height in 0..MAX_BLOCK_HEIGHT_USIZE) -> ReadyQueueBatch { let (_, peer_handle) = HandleBuilder::new().build(); ReadyQueueBatch { diff --git a/p2p/p2p/src/block_downloader/chain_tracker.rs b/p2p/p2p/src/block_downloader/chain_tracker.rs index a2f03c5..df5aebb 100644 --- a/p2p/p2p/src/block_downloader/chain_tracker.rs +++ b/p2p/p2p/src/block_downloader/chain_tracker.rs @@ -2,8 +2,9 @@ use std::{cmp::min, collections::VecDeque}; use cuprate_fixed_bytes::ByteArrayVec; +use cuprate_constants::block::MAX_BLOCK_HEIGHT_USIZE; use cuprate_p2p_core::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone}; -use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; +use cuprate_pruning::PruningSeed; use crate::constants::MEDIUM_BAN; @@ -87,7 +88,7 @@ impl ChainTracker { /// Returns `true` if the peer is expected to have the next block after our highest seen block /// according to their pruning seed. pub(crate) fn should_ask_for_next_chain_entry(&self, seed: &PruningSeed) -> bool { - seed.has_full_block(self.top_height(), CRYPTONOTE_MAX_BLOCK_HEIGHT) + seed.has_full_block(self.top_height(), MAX_BLOCK_HEIGHT_USIZE) } /// Returns the simple history, the highest seen block and the genesis block. @@ -162,7 +163,7 @@ impl ChainTracker { pruning_seed: &PruningSeed, max_blocks: usize, ) -> Option> { - if !pruning_seed.has_full_block(self.first_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) { + if !pruning_seed.has_full_block(self.first_height, MAX_BLOCK_HEIGHT_USIZE) { return None; } @@ -175,10 +176,10 @@ impl ChainTracker { let end_idx = min( min(entry.ids.len(), max_blocks), pruning_seed - .get_next_pruned_block(self.first_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) + .get_next_pruned_block(self.first_height, MAX_BLOCK_HEIGHT_USIZE) .expect("We use local values to calculate height which should be below the sanity limit") // Use a big value as a fallback if the seed does no pruning. - .unwrap_or(CRYPTONOTE_MAX_BLOCK_HEIGHT) + .unwrap_or(MAX_BLOCK_HEIGHT_USIZE) - self.first_height, ); diff --git a/pruning/Cargo.toml b/pruning/Cargo.toml index 497c04b..e898fd5 100644 --- a/pruning/Cargo.toml +++ b/pruning/Cargo.toml @@ -10,6 +10,8 @@ default = [] borsh = ["dep:borsh"] [dependencies] +cuprate-constants = { path = "../constants" } + thiserror = { workspace = true } borsh = { workspace = true, features = ["derive", "std"], optional = true } diff --git a/pruning/src/lib.rs b/pruning/src/lib.rs index 1f5ee2a..cd31598 100644 --- a/pruning/src/lib.rs +++ b/pruning/src/lib.rs @@ -20,9 +20,10 @@ use std::cmp::Ordering; +use cuprate_constants::block::MAX_BLOCK_HEIGHT_USIZE; + use thiserror::Error; -pub const CRYPTONOTE_MAX_BLOCK_HEIGHT: usize = 500000000; /// The default log stripes for Monero pruning. pub const CRYPTONOTE_PRUNING_LOG_STRIPES: u32 = 3; /// The amount of blocks that peers keep before another stripe starts storing blocks. @@ -41,9 +42,9 @@ pub enum PruningError { LogStripesOutOfRange, #[error("Stripe is out of range")] StripeOutOfRange, - #[error("The block height is greater than `CRYPTONOTE_MAX_BLOCK_HEIGHT`")] + #[error("The block height is greater than `MAX_BLOCK_HEIGHT_USIZE`")] BlockHeightTooLarge, - #[error("The blockchain height is greater than `CRYPTONOTE_MAX_BLOCK_HEIGHT`")] + #[error("The blockchain height is greater than `MAX_BLOCK_HEIGHT_USIZE`")] BlockChainHeightTooLarge, #[error("The calculated height is smaller than the block height entered")] CalculatedHeightSmallerThanEnteredBlock, @@ -144,7 +145,7 @@ impl PruningSeed { /// ### Errors /// /// This function will return an Error if the inputted `block_height` or - /// `blockchain_height` is greater than [`CRYPTONOTE_MAX_BLOCK_HEIGHT`]. + /// `blockchain_height` is greater than [`MAX_BLOCK_HEIGHT_USIZE`]. /// /// This function will also error if `block_height` > `blockchain_height` pub fn get_next_pruned_block( @@ -167,7 +168,7 @@ impl PruningSeed { /// ### Errors /// /// This function will return an Error if the inputted `block_height` or - /// `blockchain_height` is greater than [`CRYPTONOTE_MAX_BLOCK_HEIGHT`]. + /// `blockchain_height` is greater than [`MAX_BLOCK_HEIGHT_USIZE`]. /// /// This function will also error if `block_height` > `blockchain_height` /// @@ -322,7 +323,7 @@ impl DecompressedPruningSeed { /// ### Errors /// /// This function will return an Error if the inputted `block_height` or - /// `blockchain_height` is greater than [`CRYPTONOTE_MAX_BLOCK_HEIGHT`]. + /// `blockchain_height` is greater than [`MAX_BLOCK_HEIGHT_USIZE`]. /// /// This function will also error if `block_height` > `blockchain_height` /// @@ -331,11 +332,11 @@ impl DecompressedPruningSeed { block_height: usize, blockchain_height: usize, ) -> Result { - if block_height > CRYPTONOTE_MAX_BLOCK_HEIGHT || block_height > blockchain_height { + if block_height > MAX_BLOCK_HEIGHT_USIZE || block_height > blockchain_height { return Err(PruningError::BlockHeightTooLarge); } - if blockchain_height > CRYPTONOTE_MAX_BLOCK_HEIGHT { + if blockchain_height > MAX_BLOCK_HEIGHT_USIZE { return Err(PruningError::BlockChainHeightTooLarge); } @@ -388,7 +389,7 @@ impl DecompressedPruningSeed { /// ### Errors /// /// This function will return an Error if the inputted `block_height` or - /// `blockchain_height` is greater than [`CRYPTONOTE_MAX_BLOCK_HEIGHT`]. + /// `blockchain_height` is greater than [`MAX_BLOCK_HEIGHT_USIZE`]. /// /// This function will also error if `block_height` > `blockchain_height` /// diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 6eecb89..0057911 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -37,6 +37,7 @@ thread_local = { workspace = true, optional = true } rayon = { workspace = true, optional = true } [dev-dependencies] +cuprate-constants = { path = "../../constants" } cuprate-helper = { path = "../../helper", features = ["thread", "cast"] } cuprate-test-utils = { path = "../../test-utils" } diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index d8a878c..7cc8da8 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -128,11 +128,13 @@ pub(super) fn map_valid_alt_block_to_verified_block( mod tests { use proptest::prelude::*; + use cuprate_constants::block::MAX_BLOCK_HEIGHT_USIZE; + use super::*; proptest! { #[test] - fn compact_history(top_height in 0_usize..500_000_000) { + fn compact_history(top_height in 0..MAX_BLOCK_HEIGHT_USIZE) { let mut heights = (0..) .map(compact_history_index_to_height_offset::<11>) .map_while(|i| top_height.checked_sub(i)) diff --git a/storage/database/README.md b/storage/database/README.md index c805ab0..fe22247 100644 --- a/storage/database/README.md +++ b/storage/database/README.md @@ -76,7 +76,7 @@ and should not be relied upon. This extends to any `struct/enum` that contains ` - It implements [`Env`] - Upon [`Drop::drop`], all database data will sync to disk -Note that `ConcreteEnv` itself is not a clonable type, +Note that `ConcreteEnv` itself is not a cloneable type, it should be wrapped in [`std::sync::Arc`]. use bytemuck::{Pod, Zeroable}; - use monero_serai::transaction::Timelock; use cuprate_types::{CachedVerificationState, HardFork}; @@ -17,6 +16,9 @@ pub type KeyImage = [u8; 32]; /// A transaction hash. pub type TransactionHash = [u8; 32]; +/// A transaction blob hash. +pub type TransactionBlobHash = [u8; 32]; + bitflags::bitflags! { /// Flags representing the state of the transaction in the pool. #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] From 44981f2b2490daf7f4d2467d4ebed7cdff1bd707 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Fri, 1 Nov 2024 20:22:14 +0000 Subject: [PATCH 089/104] CI: add cargo hack (#170) * add workflow * fix errors * fix workflow * install dependencies * fix more errors * Update CONTRIBUTING.md * Update CONTRIBUTING.md Co-authored-by: hinto-janai * fix hack + enable it for cuprate-database * move hack to main CI * fix docs * fix ci formatting * fix txpool tests * fix CONTRIBUTING.md formatting * service -> tower::Service * review fixes * review fixes * fix CI --------- Co-authored-by: hinto-janai --- .github/workflows/ci.yml | 7 ++++++- CONTRIBUTING.md | 15 +++++++++------ binaries/cuprated/Cargo.toml | 2 +- helper/Cargo.toml | 2 +- helper/src/lib.rs | 2 +- net/epee-encoding/src/container_as_blob.rs | 2 ++ net/epee-encoding/src/error.rs | 1 + net/epee-encoding/src/lib.rs | 1 + net/epee-encoding/src/value.rs | 2 +- p2p/address-book/Cargo.toml | 2 +- pruning/Cargo.toml | 2 +- rpc/interface/Cargo.toml | 8 ++++---- rpc/types/src/bin.rs | 8 ++++++-- rpc/types/src/json.rs | 10 ++++++---- rpc/types/src/lib.rs | 1 + rpc/types/src/misc/distribution.rs | 14 +++++--------- rpc/types/src/misc/misc.rs | 10 +++++----- rpc/types/src/other.rs | 4 +++- storage/blockchain/Cargo.toml | 13 ++++++------- storage/blockchain/README.md | 5 +---- storage/blockchain/src/lib.rs | 6 +----- storage/blockchain/src/service/mod.rs | 2 -- storage/database/Cargo.toml | 6 +++--- storage/database/src/backend/mod.rs | 2 ++ storage/service/Cargo.toml | 10 ++++++++-- storage/txpool/Cargo.toml | 11 +++++------ storage/txpool/README.md | 4 ---- storage/txpool/src/lib.rs | 6 +++--- storage/txpool/src/service.rs | 4 +--- types/Cargo.toml | 10 ++++++---- types/src/hex.rs | 1 + types/src/json/block.rs | 6 +++--- types/src/json/output.rs | 2 +- types/src/json/tx.rs | 6 +++--- 34 files changed, 99 insertions(+), 88 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8c2271d..367e8e1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -133,7 +133,12 @@ jobs: - name: Test run: | cargo test --all-features --workspace - cargo test --package cuprate-blockchain --no-default-features --features redb --features service + cargo test --package cuprate-blockchain --no-default-features --features redb + + - name: Hack Check + run: | + cargo install cargo-hack --locked + cargo hack --workspace check --feature-powerset --no-dev-deps # TODO: upload binaries with `actions/upload-artifact@v3` - name: Build diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1b66a58..2d99060 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -120,12 +120,15 @@ Before pushing your code, please run the following at the root of the repository After that, ensure all other CI passes by running: -| Command | Does what | -|------------------------------------------------------------------------|-----------| -| `RUSTDOCFLAGS='-D warnings' cargo doc --workspace --all-features` | Checks documentation is OK -| `cargo clippy --workspace --all-features --all-targets -- -D warnings` | Checks clippy lints are satisfied -| `cargo test --all-features --workspace` | Runs all tests -| `cargo build --all-features --all-targets --workspace` | Builds all code +| Command | Does what | +|------------------------------------------------------------------------|-------------------------------------------------------------------------| +| `RUSTDOCFLAGS='-D warnings' cargo doc --workspace --all-features` | Checks documentation is OK | +| `cargo clippy --workspace --all-features --all-targets -- -D warnings` | Checks clippy lints are satisfied | +| `cargo test --all-features --workspace` | Runs all tests | +| `cargo build --all-features --all-targets --workspace` | Builds all code | +| `cargo hack --workspace check --feature-powerset --no-dev-deps` | Uses `cargo hack` to check our crates build with different features set | + +`cargo hack` can be installed with `cargo` from: https://github.com/taiki-e/cargo-hack. **Note: in order for some tests to work, you will need to place a [`monerod`](https://www.getmonero.org/downloads/) binary at the root of the repository.** diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index 880c205..d59b4c3 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -24,7 +24,7 @@ cuprate-p2p-core = { workspace = true } cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } cuprate-async-buffer = { workspace = true } cuprate-address-book = { workspace = true } -cuprate-blockchain = { workspace = true, features = ["service"] } +cuprate-blockchain = { workspace = true } cuprate-database-service = { workspace = true } cuprate-txpool = { workspace = true } cuprate-database = { workspace = true } diff --git a/helper/Cargo.toml b/helper/Cargo.toml index ad78a44..1b3158f 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -17,7 +17,7 @@ asynch = ["dep:futures", "dep:rayon"] cast = [] constants = [] crypto = ["dep:curve25519-dalek", "dep:monero-serai", "std"] -fs = ["dep:dirs"] +fs = ["dep:dirs", "std"] num = [] map = ["cast", "dep:monero-serai", "dep:cuprate-constants"] time = ["dep:chrono", "std"] diff --git a/helper/src/lib.rs b/helper/src/lib.rs index 47d47a2..9bd64fa 100644 --- a/helper/src/lib.rs +++ b/helper/src/lib.rs @@ -11,7 +11,7 @@ pub mod atomic; #[cfg(feature = "cast")] pub mod cast; -#[cfg(feature = "fs")] +#[cfg(all(feature = "fs", feature = "std"))] pub mod fs; pub mod network; diff --git a/net/epee-encoding/src/container_as_blob.rs b/net/epee-encoding/src/container_as_blob.rs index 83078c2..363e157 100644 --- a/net/epee-encoding/src/container_as_blob.rs +++ b/net/epee-encoding/src/container_as_blob.rs @@ -1,3 +1,5 @@ +use alloc::{string::ToString, vec, vec::Vec}; + use bytes::{Buf, BufMut, Bytes, BytesMut}; use ref_cast::RefCast; diff --git a/net/epee-encoding/src/error.rs b/net/epee-encoding/src/error.rs index 756cd13..7206189 100644 --- a/net/epee-encoding/src/error.rs +++ b/net/epee-encoding/src/error.rs @@ -1,3 +1,4 @@ +use alloc::string::{String, ToString}; use core::{ fmt::{Debug, Formatter}, num::TryFromIntError, diff --git a/net/epee-encoding/src/lib.rs b/net/epee-encoding/src/lib.rs index d55a546..a6ff1b0 100644 --- a/net/epee-encoding/src/lib.rs +++ b/net/epee-encoding/src/lib.rs @@ -64,6 +64,7 @@ use hex as _; extern crate alloc; +use alloc::string::ToString; use core::str::from_utf8 as str_from_utf8; use bytes::{Buf, BufMut, Bytes, BytesMut}; diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs index 816203e..4762c96 100644 --- a/net/epee-encoding/src/value.rs +++ b/net/epee-encoding/src/value.rs @@ -1,7 +1,7 @@ //! This module contains a [`EpeeValue`] trait and //! impls for some possible base epee values. -use alloc::{string::String, vec::Vec}; +use alloc::{string::String, vec, vec::Vec}; use core::fmt::Debug; use bytes::{Buf, BufMut, Bytes, BytesMut}; diff --git a/p2p/address-book/Cargo.toml b/p2p/address-book/Cargo.toml index 9cbba71..a88819f 100644 --- a/p2p/address-book/Cargo.toml +++ b/p2p/address-book/Cargo.toml @@ -9,7 +9,7 @@ authors = ["Boog900"] [dependencies] cuprate-constants = { workspace = true } cuprate-pruning = { workspace = true } -cuprate-p2p-core = { workspace = true } +cuprate-p2p-core = { workspace = true, features = ["borsh"] } tower = { workspace = true, features = ["util"] } tokio = { workspace = true, features = ["time", "fs", "rt"]} diff --git a/pruning/Cargo.toml b/pruning/Cargo.toml index 6fcc74e..4b03551 100644 --- a/pruning/Cargo.toml +++ b/pruning/Cargo.toml @@ -10,7 +10,7 @@ default = [] borsh = ["dep:borsh"] [dependencies] -cuprate-constants = { workspace = true } +cuprate-constants = { workspace = true, features = ["block"] } thiserror = { workspace = true } diff --git a/rpc/interface/Cargo.toml b/rpc/interface/Cargo.toml index ef62d34..c5d4db7 100644 --- a/rpc/interface/Cargo.toml +++ b/rpc/interface/Cargo.toml @@ -10,20 +10,20 @@ keywords = ["cuprate", "rpc", "interface"] [features] default = ["dummy", "serde"] -dummy = [] +dummy = ["dep:cuprate-helper", "dep:futures"] [dependencies] cuprate-epee-encoding = { workspace = true, default-features = false } cuprate-json-rpc = { workspace = true, default-features = false } cuprate-rpc-types = { workspace = true, features = ["serde", "epee"], default-features = false } -cuprate-helper = { workspace = true, features = ["asynch"], default-features = false } +cuprate-helper = { workspace = true, features = ["asynch"], default-features = false, optional = true } anyhow = { workspace = true } axum = { version = "0.7.5", features = ["json"], default-features = false } serde = { workspace = true, optional = true } -tower = { workspace = true } +tower = { workspace = true, features = ["util"] } paste = { workspace = true } -futures = { workspace = true } +futures = { workspace = true, optional = true } [dev-dependencies] cuprate-test-utils = { workspace = true } diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index a68d3e1..7b94191 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -20,12 +20,16 @@ use cuprate_types::BlockCompleteEntry; use crate::{ base::AccessResponseBase, - defaults::{default_false, default_zero}, macros::{define_request, define_request_and_response, define_request_and_response_doc}, - misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfoExtent, PoolTxInfo, Status}, + misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolTxInfo, Status}, rpc_call::RpcCallValue, }; +#[cfg(any(feature = "epee", feature = "serde"))] +use crate::defaults::{default_false, default_zero}; +#[cfg(feature = "epee")] +use crate::misc::PoolInfoExtent; + //---------------------------------------------------------------------------------------------------- Definitions define_request_and_response! { get_blocks_by_heightbin, diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index fd9ffa3..6fb538c 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -8,10 +8,6 @@ use serde::{Deserialize, Serialize}; use crate::{ base::{AccessResponseBase, ResponseBase}, - defaults::{ - default_false, default_height, default_one, default_string, default_true, default_vec, - default_zero, - }, macros::define_request_and_response, misc::{ AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan, @@ -21,6 +17,12 @@ use crate::{ rpc_call::RpcCallValue, }; +#[cfg(any(feature = "epee", feature = "serde"))] +use crate::defaults::{ + default_false, default_height, default_one, default_string, default_true, default_vec, + default_zero, +}; + //---------------------------------------------------------------------------------------------------- Macro /// Adds a (de)serialization doc-test to a type in `json.rs`. /// diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index be1069e..403a3ea 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -6,6 +6,7 @@ )] mod constants; +#[cfg(any(feature = "serde", feature = "epee"))] mod defaults; mod free; mod macros; diff --git a/rpc/types/src/misc/distribution.rs b/rpc/types/src/misc/distribution.rs index faac7ad..e920d12 100644 --- a/rpc/types/src/misc/distribution.rs +++ b/rpc/types/src/misc/distribution.rs @@ -20,8 +20,8 @@ use cuprate_epee_encoding::{ "rpc/core_rpc_server_commands_defs.h", 45..=55 )] -#[cfg(feature = "epee")] -fn compress_integer_array(_: &[u64]) -> error::Result> { +#[cfg(any(feature = "epee", feature = "serde"))] +fn compress_integer_array(_: &[u64]) -> Vec { todo!() } @@ -33,6 +33,7 @@ fn compress_integer_array(_: &[u64]) -> error::Result> { "rpc/core_rpc_server_commands_defs.h", 57..=72 )] +#[cfg(any(feature = "epee", feature = "serde"))] fn decompress_integer_array(_: &[u8]) -> Vec { todo!() } @@ -135,12 +136,7 @@ fn serialize_distribution_as_compressed_data(v: &Vec, s: S) -> Result compressed_data.serialize(s), - Err(_) => Err(serde::ser::Error::custom( - "error compressing distribution array", - )), - } + compress_integer_array(v).serialize(s) } /// Deserializer function for [`DistributionCompressedBinary::distribution`]. @@ -256,7 +252,7 @@ impl EpeeObject for Distribution { distribution, amount, }) => { - let compressed_data = compress_integer_array(&distribution)?; + let compressed_data = compress_integer_array(&distribution); start_height.write(w)?; base.write(w)?; diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 842997b..4430dbe 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -11,10 +11,10 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::epee_object; -use crate::{ - defaults::{default_string, default_zero}, - macros::monero_definition_link, -}; +use crate::macros::monero_definition_link; + +#[cfg(any(feature = "epee", feature = "serde"))] +use crate::defaults::default_zero; //---------------------------------------------------------------------------------------------------- Macros /// This macro (local to this file) defines all the misc types. @@ -148,7 +148,7 @@ define_struct_and_impl_epee! { )] /// Used in [`crate::json::SetBansRequest`]. SetBan { - #[cfg_attr(feature = "serde", serde(default = "default_string"))] + #[cfg_attr(feature = "serde", serde(default = "crate::defaults::default_string"))] host: String, #[cfg_attr(feature = "serde", serde(default = "default_zero"))] ip: u32, diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index e7f3394..f743392 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -8,7 +8,6 @@ use serde::{Deserialize, Serialize}; use crate::{ base::{AccessResponseBase, ResponseBase}, - defaults::{default_false, default_string, default_true, default_vec, default_zero}, macros::define_request_and_response, misc::{ GetOutputsOut, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, TxEntry, TxInfo, @@ -17,6 +16,9 @@ use crate::{ RpcCallValue, }; +#[cfg(any(feature = "serde", feature = "epee"))] +use crate::defaults::{default_false, default_string, default_true, default_vec, default_zero}; + //---------------------------------------------------------------------------------------------------- Macro /// Adds a (de)serialization doc-test to a type in `other.rs`. /// diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index d0a43b3..6fd973c 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -9,32 +9,31 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/cuprate-bloc keywords = ["cuprate", "blockchain", "database"] [features] -default = ["heed", "service"] +default = ["heed"] # default = ["redb", "service"] # default = ["redb-memory", "service"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -service = ["dep:thread_local", "dep:rayon", "cuprate-helper/thread"] +serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] [dependencies] cuprate-database = { workspace = true } cuprate-database-service = { workspace = true } -cuprate-helper = { workspace = true, features = ["fs", "map", "crypto"] } +cuprate-helper = { workspace = true, features = ["fs", "map", "crypto", "tx", "thread"] } cuprate-types = { workspace = true, features = ["blockchain"] } cuprate-pruning = { workspace = true } bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } curve25519-dalek = { workspace = true } -rand = { workspace = true } +rand = { workspace = true, features = ["std", "std_rng"] } monero-serai = { workspace = true, features = ["std"] } serde = { workspace = true, optional = true } -# `service` feature. tower = { workspace = true } -thread_local = { workspace = true, optional = true } -rayon = { workspace = true, optional = true } +thread_local = { workspace = true } +rayon = { workspace = true } [dev-dependencies] cuprate-constants = { workspace = true } diff --git a/storage/blockchain/README.md b/storage/blockchain/README.md index 4800546..3f97a3d 100644 --- a/storage/blockchain/README.md +++ b/storage/blockchain/README.md @@ -32,9 +32,6 @@ use cuprate_blockchain::{ This ensures the types/traits used from `cuprate_database` are the same ones used by `cuprate_blockchain` internally. # Feature flags -The `service` module requires the `service` feature to be enabled. -See the module for more documentation. - Different database backends are enabled by the feature flags: - `heed` (LMDB) - `redb` @@ -45,7 +42,7 @@ The default is `heed`. # Invariants when not using `service` -`cuprate_blockchain` can be used without the `service` feature enabled but +`cuprate_blockchain` can be used without the `service` module but there are some things that must be kept in mind when doing so. Failing to uphold these invariants may cause panics. diff --git a/storage/blockchain/src/lib.rs b/storage/blockchain/src/lib.rs index f66cd99..7db8cc6 100644 --- a/storage/blockchain/src/lib.rs +++ b/storage/blockchain/src/lib.rs @@ -29,16 +29,12 @@ pub use free::open; pub mod config; pub mod ops; +pub mod service; pub mod tables; pub mod types; -//---------------------------------------------------------------------------------------------------- Feature-gated -#[cfg(feature = "service")] -pub mod service; - //---------------------------------------------------------------------------------------------------- Private #[cfg(test)] pub(crate) mod tests; -#[cfg(feature = "service")] // only needed in `service` for now pub(crate) mod unsafe_sendable; diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index 53bf1df..c5eb80c 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -10,8 +10,6 @@ //! //! The system is managed by this crate, and only requires [`init`] by the user. //! -//! This module must be enabled with the `service` feature. -//! //! ## Handles //! The 2 handles to the database are: //! - [`BlockchainReadHandle`] diff --git a/storage/database/Cargo.toml b/storage/database/Cargo.toml index 7a2f4ae..feeaf87 100644 --- a/storage/database/Cargo.toml +++ b/storage/database/Cargo.toml @@ -9,10 +9,10 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/database" keywords = ["cuprate", "database"] [features] -# default = ["heed"] +default = ["heed"] # default = ["redb"] # default = ["redb-memory"] -heed = ["dep:heed"] +heed = [] redb = ["dep:redb"] redb-memory = ["redb"] @@ -25,7 +25,7 @@ paste = { workspace = true } thiserror = { workspace = true } # Optional features. -heed = { version = "0.20.5", features = ["read-txn-no-tls"], optional = true } +heed = { version = "0.20.5", features = ["read-txn-no-tls"] } redb = { version = "2.1.3", optional = true } serde = { workspace = true, optional = true } diff --git a/storage/database/src/backend/mod.rs b/storage/database/src/backend/mod.rs index 11ae40b..ebe12d8 100644 --- a/storage/database/src/backend/mod.rs +++ b/storage/database/src/backend/mod.rs @@ -4,6 +4,8 @@ cfg_if::cfg_if! { // If both backends are enabled, fallback to `heed`. // This is useful when using `--all-features`. if #[cfg(all(feature = "redb", not(feature = "heed")))] { + use heed as _; + mod redb; pub use redb::ConcreteEnv; } else { diff --git a/storage/service/Cargo.toml b/storage/service/Cargo.toml index fa6971c..ebdb13e 100644 --- a/storage/service/Cargo.toml +++ b/storage/service/Cargo.toml @@ -8,14 +8,20 @@ authors = ["Boog900"] repository = "https://github.com/Cuprate/cuprate/tree/main/storage/service" keywords = ["cuprate", "service", "database"] +[features] +default = ["heed"] +heed = ["cuprate-database/heed"] +redb = ["cuprate-database/redb"] +redb-memorey = ["cuprate-database/redb-memory"] + [dependencies] cuprate-database = { workspace = true } -cuprate-helper = { workspace = true, features = ["fs", "thread", "map"] } +cuprate-helper = { workspace = true, features = ["fs", "thread", "map", "asynch"] } serde = { workspace = true, optional = true } rayon = { workspace = true } tower = { workspace = true } -futures = { workspace = true } +futures = { workspace = true, features = ["std"] } crossbeam = { workspace = true, features = ["std"] } [lints] diff --git a/storage/txpool/Cargo.toml b/storage/txpool/Cargo.toml index c301166..c908265 100644 --- a/storage/txpool/Cargo.toml +++ b/storage/txpool/Cargo.toml @@ -9,18 +9,17 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/txpool" keywords = ["cuprate", "txpool", "transaction", "pool", "database"] [features] -default = ["heed", "service"] +default = ["heed"] # default = ["redb", "service"] # default = ["redb-memory", "service"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -service = ["dep:tower", "dep:rayon", "dep:cuprate-database-service"] serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] [dependencies] cuprate-database = { workspace = true, features = ["heed"] } -cuprate-database-service = { workspace = true, optional = true } +cuprate-database-service = { workspace = true } cuprate-types = { workspace = true } cuprate-helper = { workspace = true, default-features = false, features = ["constants"] } @@ -28,11 +27,11 @@ monero-serai = { workspace = true, features = ["std"] } bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } thiserror = { workspace = true } -hex = { workspace = true } +hex = { workspace = true, features = ["std"] } blake3 = { workspace = true, features = ["std"] } -tower = { workspace = true, optional = true } -rayon = { workspace = true, optional = true } +tower = { workspace = true } +rayon = { workspace = true } serde = { workspace = true, optional = true } diff --git a/storage/txpool/README.md b/storage/txpool/README.md index 80d3b25..d14f445 100644 --- a/storage/txpool/README.md +++ b/storage/txpool/README.md @@ -37,10 +37,6 @@ use cuprate_txpool::{ This ensures the types/traits used from `cuprate_database` are the same ones used by `cuprate_txpool` internally. # Feature flags - -The `service` module requires the `service` feature to be enabled. -See the module for more documentation. - Different database backends are enabled by the feature flags: - `heed` (LMDB) diff --git a/storage/txpool/src/lib.rs b/storage/txpool/src/lib.rs index 8a57c72..53e53ec 100644 --- a/storage/txpool/src/lib.rs +++ b/storage/txpool/src/lib.rs @@ -4,10 +4,12 @@ clippy::significant_drop_tightening )] +// Used in docs: . +use tower as _; + pub mod config; mod free; pub mod ops; -#[cfg(feature = "service")] pub mod service; pub mod tables; mod tx; @@ -20,8 +22,6 @@ pub use tx::TxEntry; //re-exports pub use cuprate_database; -// TODO: remove when used. -use tower as _; #[cfg(test)] mod test { use cuprate_test_utils as _; diff --git a/storage/txpool/src/service.rs b/storage/txpool/src/service.rs index 91a7060..a82de5b 100644 --- a/storage/txpool/src/service.rs +++ b/storage/txpool/src/service.rs @@ -10,8 +10,6 @@ //! //! The system is managed by this crate, and only requires [`init`] by the user. //! -//! This module must be enabled with the `service` feature. -//! //! ## Handles //! The 2 handles to the database are: //! - [`TxpoolReadHandle`] @@ -42,7 +40,7 @@ //! To interact with the database (whether reading or writing data), //! a `Request` can be sent using one of the above handles. //! -//! Both the handles implement `tower::Service`, so they can be [`tower::Service::call`]ed. +//! Both the handles implement [`tower::Service`], so they can be [`tower::Service::call`]ed. //! //! An `async`hronous channel will be returned from the call. //! This channel can be `.await`ed upon to (eventually) receive diff --git a/types/Cargo.toml b/types/Cargo.toml index 29887bd..e1ffb19 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -12,21 +12,23 @@ keywords = ["cuprate", "types"] default = ["blockchain", "epee", "serde", "json", "hex"] blockchain = [] epee = ["dep:cuprate-epee-encoding"] -serde = ["dep:serde"] +serde = ["dep:serde", "hex"] proptest = ["dep:proptest", "dep:proptest-derive"] json = ["hex", "dep:cuprate-helper"] -hex = ["dep:hex"] +# We sadly have no choice but to enable serde here as otherwise we will get warnings from the `hex` dep being unused. +# This isn't too bad as `HexBytes` only makes sense with serde anyway. +hex = ["serde", "dep:hex"] [dependencies] cuprate-epee-encoding = { workspace = true, optional = true, features = ["std"] } cuprate-helper = { workspace = true, optional = true, features = ["cast"] } -cuprate-fixed-bytes = { workspace = true } +cuprate-fixed-bytes = { workspace = true, features = ["std", "serde"] } bytes = { workspace = true } curve25519-dalek = { workspace = true } monero-serai = { workspace = true } hex = { workspace = true, features = ["serde", "alloc"], optional = true } -serde = { workspace = true, features = ["derive"], optional = true } +serde = { workspace = true, features = ["std", "derive"], optional = true } strum = { workspace = true, features = ["derive"] } thiserror = { workspace = true } diff --git a/types/src/hex.rs b/types/src/hex.rs index 34da09d..de4fc81 100644 --- a/types/src/hex.rs +++ b/types/src/hex.rs @@ -22,6 +22,7 @@ pub struct HexBytes( #[cfg_attr(feature = "serde", serde(with = "hex::serde"))] pub [u8; N], ); +#[cfg(feature = "serde")] impl<'de, const N: usize> Deserialize<'de> for HexBytes where [u8; N]: hex::FromHex, diff --git a/types/src/json/block.rs b/types/src/json/block.rs index 1397f6f..88f134d 100644 --- a/types/src/json/block.rs +++ b/types/src/json/block.rs @@ -51,17 +51,17 @@ impl From for Block { /// [`Block::miner_tx`]. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[serde(untagged)] +#[cfg_attr(feature = "serde", serde(untagged))] pub enum MinerTransaction { V1 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: MinerTransactionPrefix, signatures: [(); 0], }, V2 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: MinerTransactionPrefix, rct_signatures: MinerTransactionRctSignatures, }, diff --git a/types/src/json/output.rs b/types/src/json/output.rs index 050132a..182618c 100644 --- a/types/src/json/output.rs +++ b/types/src/json/output.rs @@ -20,7 +20,7 @@ pub struct Output { /// [`Output::target`]. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[serde(untagged)] +#[cfg_attr(feature = "serde", serde(untagged))] pub enum Target { Key { key: HexBytes<32> }, TaggedKey { tagged_key: TaggedKey }, diff --git a/types/src/json/tx.rs b/types/src/json/tx.rs index 46ec827..a18dc89 100644 --- a/types/src/json/tx.rs +++ b/types/src/json/tx.rs @@ -24,17 +24,17 @@ use crate::{ /// - [`/get_transaction_pool` -> `tx_json`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_transaction_pool) #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[serde(untagged)] +#[cfg_attr(feature = "serde", serde(untagged))] pub enum Transaction { V1 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: TransactionPrefix, signatures: Vec>, }, V2 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: TransactionPrefix, rct_signatures: RctSignatures, /// This field is [`Some`] if [`Self::V2::rct_signatures`] From 372cab24d72bd61a51b55421e1730f57b12ea4f6 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 1 Nov 2024 18:25:55 -0400 Subject: [PATCH 090/104] cuprated: internal signatures required for RPC pt. 2 (#320) * apply diffs * clippy * fix tests * rpc: fix tests * remove `BlockchainManagerRequest::Overview` * cuprated/p2p: fix `ConnectionInfo` * move `CalculatePow` * remove `AddAuxPow` * move `Spans` and `NextNeededPruningSeed` * factor types into `cuprate-types` * scope cargo features * fix/doc type serde * Update binaries/cuprated/src/rpc/request/address_book.rs Co-authored-by: Boog900 * Update binaries/cuprated/src/rpc/request/blockchain_context.rs Co-authored-by: Boog900 * Update binaries/cuprated/src/rpc/request/blockchain_manager.rs Co-authored-by: Boog900 * fmt * txpool: collapse `TxEntry` * `ConnectionId` * fix import * fix bin --------- Co-authored-by: Boog900 --- Cargo.lock | 2 + binaries/cuprated/Cargo.toml | 49 +++--- binaries/cuprated/src/main.rs | 4 + binaries/cuprated/src/rpc.rs | 1 + binaries/cuprated/src/rpc/constants.rs | 5 + binaries/cuprated/src/rpc/handler.rs | 50 ++++++ .../cuprated/src/rpc/request/address_book.rs | 104 +++++++++--- .../cuprated/src/rpc/request/blockchain.rs | 141 ++++++++++++----- .../src/rpc/request/blockchain_context.rs | 64 ++++++-- .../src/rpc/request/blockchain_manager.rs | 104 ++++++++++-- binaries/cuprated/src/rpc/request/txpool.rs | 37 +++-- consensus/context/src/lib.rs | 19 +++ consensus/context/src/task.rs | 3 +- helper/src/cast.rs | 1 - p2p/address-book/src/book.rs | 3 +- p2p/p2p-core/Cargo.toml | 5 +- p2p/p2p-core/src/ban.rs | 23 --- .../src/client/handshaker/builder/dummy.rs | 3 +- p2p/p2p-core/src/lib.rs | 2 +- p2p/p2p-core/src/services.rs | 8 +- p2p/p2p-core/src/types.rs | 96 ++++++++++++ rpc/types/Cargo.toml | 10 +- rpc/types/src/base.rs | 90 ++++------- rpc/types/src/json.rs | 74 ++++----- rpc/types/src/misc/misc.rs | 4 +- rpc/types/src/other.rs | 42 ++--- storage/blockchain/src/service/read.rs | 12 ++ storage/txpool/src/service/interface.rs | 8 +- storage/txpool/src/service/read.rs | 6 +- storage/txpool/src/tx.rs | 2 + types/src/address_type.rs | 147 +++++++++++++++++ types/src/blockchain.rs | 15 +- types/src/connection_state.rs | 148 ++++++++++++++++++ types/src/lib.rs | 11 +- types/src/types.rs | 20 ++- 35 files changed, 1028 insertions(+), 285 deletions(-) create mode 100644 binaries/cuprated/src/rpc/constants.rs delete mode 100644 p2p/p2p-core/src/ban.rs create mode 100644 p2p/p2p-core/src/types.rs create mode 100644 types/src/address_type.rs create mode 100644 types/src/connection_state.rs diff --git a/Cargo.lock b/Cargo.lock index 0f851dc..7ad2f2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -865,6 +865,7 @@ dependencies = [ "cuprate-helper", "cuprate-pruning", "cuprate-test-utils", + "cuprate-types", "cuprate-wire", "futures", "hex", @@ -1026,6 +1027,7 @@ dependencies = [ "cuprate-consensus", "cuprate-consensus-context", "cuprate-consensus-rules", + "cuprate-constants", "cuprate-cryptonight", "cuprate-dandelion-tower", "cuprate-database", diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index d59b4c3..9ebdd78 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -9,31 +9,32 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" [dependencies] # TODO: after v1.0.0, remove unneeded dependencies. -cuprate-consensus = { workspace = true } -cuprate-fast-sync = { workspace = true } +cuprate-consensus = { workspace = true } +cuprate-fast-sync = { workspace = true } cuprate-consensus-context = { workspace = true } -cuprate-consensus-rules = { workspace = true } -cuprate-cryptonight = { workspace = true } -cuprate-helper = { workspace = true } -cuprate-epee-encoding = { workspace = true } -cuprate-fixed-bytes = { workspace = true } -cuprate-levin = { workspace = true } -cuprate-wire = { workspace = true } -cuprate-p2p = { workspace = true } -cuprate-p2p-core = { workspace = true } -cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } -cuprate-async-buffer = { workspace = true } -cuprate-address-book = { workspace = true } -cuprate-blockchain = { workspace = true } -cuprate-database-service = { workspace = true } -cuprate-txpool = { workspace = true } -cuprate-database = { workspace = true } -cuprate-pruning = { workspace = true } -cuprate-test-utils = { workspace = true } -cuprate-types = { workspace = true } -cuprate-json-rpc = { workspace = true } -cuprate-rpc-interface = { workspace = true } -cuprate-rpc-types = { workspace = true } +cuprate-consensus-rules = { workspace = true } +cuprate-constants = { workspace = true } +cuprate-cryptonight = { workspace = true } +cuprate-helper = { workspace = true } +cuprate-epee-encoding = { workspace = true } +cuprate-fixed-bytes = { workspace = true } +cuprate-levin = { workspace = true } +cuprate-wire = { workspace = true } +cuprate-p2p = { workspace = true } +cuprate-p2p-core = { workspace = true } +cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } +cuprate-async-buffer = { workspace = true } +cuprate-address-book = { workspace = true } +cuprate-blockchain = { workspace = true } +cuprate-database-service = { workspace = true } +cuprate-txpool = { workspace = true } +cuprate-database = { workspace = true } +cuprate-pruning = { workspace = true } +cuprate-test-utils = { workspace = true } +cuprate-types = { workspace = true } +cuprate-json-rpc = { workspace = true } +cuprate-rpc-interface = { workspace = true } +cuprate-rpc-types = { workspace = true } # TODO: after v1.0.0, remove unneeded dependencies. anyhow = { workspace = true } diff --git a/binaries/cuprated/src/main.rs b/binaries/cuprated/src/main.rs index d3fe1f5..d5c832e 100644 --- a/binaries/cuprated/src/main.rs +++ b/binaries/cuprated/src/main.rs @@ -9,6 +9,10 @@ unused_variables, clippy::needless_pass_by_value, clippy::unused_async, + clippy::diverging_sub_expression, + unused_mut, + clippy::let_unit_value, + clippy::needless_pass_by_ref_mut, reason = "TODO: remove after v1.0.0" )] diff --git a/binaries/cuprated/src/rpc.rs b/binaries/cuprated/src/rpc.rs index fe8e5f2..255d90d 100644 --- a/binaries/cuprated/src/rpc.rs +++ b/binaries/cuprated/src/rpc.rs @@ -3,6 +3,7 @@ //! Will contain the code to initiate the RPC and a request handler. mod bin; +mod constants; mod handler; mod json; mod other; diff --git a/binaries/cuprated/src/rpc/constants.rs b/binaries/cuprated/src/rpc/constants.rs new file mode 100644 index 0000000..1236269 --- /dev/null +++ b/binaries/cuprated/src/rpc/constants.rs @@ -0,0 +1,5 @@ +//! Constants used within RPC. + +/// The string message used in RPC response fields for when +/// `cuprated` does not support a field that `monerod` has. +pub(super) const FIELD_NOT_SUPPORTED: &str = "`cuprated` does not support this field."; diff --git a/binaries/cuprated/src/rpc/handler.rs b/binaries/cuprated/src/rpc/handler.rs index af2e3f2..1f73403 100644 --- a/binaries/cuprated/src/rpc/handler.rs +++ b/binaries/cuprated/src/rpc/handler.rs @@ -8,6 +8,8 @@ use monero_serai::block::Block; use tower::Service; use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle}; +use cuprate_consensus::BlockChainContextService; +use cuprate_pruning::PruningSeed; use cuprate_rpc_interface::RpcHandler; use cuprate_rpc_types::{ bin::{BinRequest, BinResponse}, @@ -15,6 +17,7 @@ use cuprate_rpc_types::{ other::{OtherRequest, OtherResponse}, }; use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle}; +use cuprate_types::{AddAuxPow, AuxPow, HardFork}; use crate::rpc::{bin, json, other}; @@ -54,6 +57,32 @@ pub enum BlockchainManagerRequest { /// The height of the next block in the chain. TargetHeight, + + /// Generate new blocks. + /// + /// This request is only for regtest, see RPC's `generateblocks`. + GenerateBlocks { + /// Number of the blocks to be generated. + amount_of_blocks: u64, + /// The previous block's hash. + prev_block: [u8; 32], + /// The starting value for the nonce. + starting_nonce: u32, + /// The address that will receive the coinbase reward. + wallet_address: String, + }, + + // // TODO: the below requests actually belong to the block downloader/syncer: + // // + // /// Get [`Span`] data. + // /// + // /// This is data that describes an active downloading process, + // /// if we are fully synced, this will return an empty [`Vec`]. + // Spans, + + // + /// Get the next [`PruningSeed`] needed for a pruned sync. + NextNeededPruningSeed, } /// TODO: use real type when public. @@ -69,6 +98,9 @@ pub enum BlockchainManagerResponse { /// Response to [`BlockchainManagerRequest::PopBlocks`] PopBlocks { new_height: usize }, + /// Response to [`BlockchainManagerRequest::Prune`] + Prune(PruningSeed), + /// Response to [`BlockchainManagerRequest::Pruned`] Pruned(bool), @@ -83,6 +115,19 @@ pub enum BlockchainManagerResponse { /// Response to [`BlockchainManagerRequest::TargetHeight`] TargetHeight { height: usize }, + + /// Response to [`BlockchainManagerRequest::GenerateBlocks`] + GenerateBlocks { + /// Hashes of the blocks generated. + blocks: Vec<[u8; 32]>, + /// The new top height. (TODO: is this correct?) + height: usize, + }, + + // /// Response to [`BlockchainManagerRequest::Spans`]. + // Spans(Vec>), + /// Response to [`BlockchainManagerRequest::NextNeededPruningSeed`]. + NextNeededPruningSeed(PruningSeed), } /// TODO: use real type when public. @@ -102,6 +147,9 @@ pub struct CupratedRpcHandler { /// Read handle to the blockchain database. pub blockchain_read: BlockchainReadHandle, + /// Handle to the blockchain context service. + pub blockchain_context: BlockChainContextService, + /// Handle to the blockchain manager. pub blockchain_manager: BlockchainManagerHandle, @@ -117,6 +165,7 @@ impl CupratedRpcHandler { pub const fn new( restricted: bool, blockchain_read: BlockchainReadHandle, + blockchain_context: BlockChainContextService, blockchain_manager: BlockchainManagerHandle, txpool_read: TxpoolReadHandle, txpool_manager: std::convert::Infallible, @@ -124,6 +173,7 @@ impl CupratedRpcHandler { Self { restricted, blockchain_read, + blockchain_context, blockchain_manager, txpool_read, txpool_manager, diff --git a/binaries/cuprated/src/rpc/request/address_book.rs b/binaries/cuprated/src/rpc/request/address_book.rs index 2aa58e8..6760a6c 100644 --- a/binaries/cuprated/src/rpc/request/address_book.rs +++ b/binaries/cuprated/src/rpc/request/address_book.rs @@ -2,26 +2,33 @@ use std::convert::Infallible; -use anyhow::Error; +use anyhow::{anyhow, Error}; use tower::ServiceExt; use cuprate_helper::cast::usize_to_u64; use cuprate_p2p_core::{ services::{AddressBookRequest, AddressBookResponse}, + types::{BanState, ConnectionId}, AddressBook, NetworkZone, }; +use cuprate_pruning::PruningSeed; +use cuprate_rpc_types::misc::{ConnectionInfo, Span}; + +use crate::rpc::constants::FIELD_NOT_SUPPORTED; + +// FIXME: use `anyhow::Error` over `tower::BoxError` in address book. /// [`AddressBookRequest::PeerlistSize`] -pub(super) async fn peerlist_size( +pub(crate) async fn peerlist_size( address_book: &mut impl AddressBook, ) -> Result<(u64, u64), Error> { let AddressBookResponse::PeerlistSize { white, grey } = address_book .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(AddressBookRequest::PeerlistSize) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -29,17 +36,74 @@ pub(super) async fn peerlist_size( Ok((usize_to_u64(white), usize_to_u64(grey))) } +/// [`AddressBookRequest::ConnectionInfo`] +pub(crate) async fn connection_info( + address_book: &mut impl AddressBook, +) -> Result, Error> { + let AddressBookResponse::ConnectionInfo(vec) = address_book + .ready() + .await + .map_err(|e| anyhow!(e))? + .call(AddressBookRequest::ConnectionInfo) + .await + .map_err(|e| anyhow!(e))? + else { + unreachable!(); + }; + + // FIXME: impl this map somewhere instead of inline. + let vec = vec + .into_iter() + .map(|info| { + let (ip, port) = match info.socket_addr { + Some(socket) => (socket.ip().to_string(), socket.port().to_string()), + None => (String::new(), String::new()), + }; + + ConnectionInfo { + address: info.address.to_string(), + address_type: info.address_type, + avg_download: info.avg_download, + avg_upload: info.avg_upload, + connection_id: String::from(ConnectionId::DEFAULT_STR), + current_download: info.current_download, + current_upload: info.current_upload, + height: info.height, + host: info.host, + incoming: info.incoming, + ip, + live_time: info.live_time, + localhost: info.localhost, + local_ip: info.local_ip, + peer_id: hex::encode(info.peer_id.to_ne_bytes()), + port, + pruning_seed: info.pruning_seed.compress(), + recv_count: info.recv_count, + recv_idle_time: info.recv_idle_time, + rpc_credits_per_hash: info.rpc_credits_per_hash, + rpc_port: info.rpc_port, + send_count: info.send_count, + send_idle_time: info.send_idle_time, + state: info.state, + support_flags: info.support_flags, + } + }) + .collect(); + + Ok(vec) +} + /// [`AddressBookRequest::ConnectionCount`] -pub(super) async fn connection_count( +pub(crate) async fn connection_count( address_book: &mut impl AddressBook, ) -> Result<(u64, u64), Error> { let AddressBookResponse::ConnectionCount { incoming, outgoing } = address_book .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(AddressBookRequest::ConnectionCount) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -48,17 +112,17 @@ pub(super) async fn connection_count( } /// [`AddressBookRequest::SetBan`] -pub(super) async fn set_ban( +pub(crate) async fn set_ban( address_book: &mut impl AddressBook, - peer: cuprate_p2p_core::ban::SetBan, + set_ban: cuprate_p2p_core::types::SetBan, ) -> Result<(), Error> { let AddressBookResponse::Ok = address_book .ready() .await - .expect("TODO") - .call(AddressBookRequest::SetBan(peer)) + .map_err(|e| anyhow!(e))? + .call(AddressBookRequest::SetBan(set_ban)) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -67,17 +131,17 @@ pub(super) async fn set_ban( } /// [`AddressBookRequest::GetBan`] -pub(super) async fn get_ban( +pub(crate) async fn get_ban( address_book: &mut impl AddressBook, peer: Z::Addr, ) -> Result, Error> { let AddressBookResponse::GetBan { unban_instant } = address_book .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(AddressBookRequest::GetBan(peer)) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -86,19 +150,19 @@ pub(super) async fn get_ban( } /// [`AddressBookRequest::GetBans`] -pub(super) async fn get_bans( +pub(crate) async fn get_bans( address_book: &mut impl AddressBook, -) -> Result<(), Error> { +) -> Result>, Error> { let AddressBookResponse::GetBans(bans) = address_book .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(AddressBookRequest::GetBans) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; - Ok(todo!()) + Ok(bans) } diff --git a/binaries/cuprated/src/rpc/request/blockchain.rs b/binaries/cuprated/src/rpc/request/blockchain.rs index 8af80e5..97c7f48 100644 --- a/binaries/cuprated/src/rpc/request/blockchain.rs +++ b/binaries/cuprated/src/rpc/request/blockchain.rs @@ -1,24 +1,61 @@ //! Functions for [`BlockchainReadRequest`]. use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, ops::Range, }; use anyhow::Error; -use cuprate_blockchain::service::BlockchainReadHandle; +use monero_serai::block::Block; use tower::{Service, ServiceExt}; +use cuprate_blockchain::{service::BlockchainReadHandle, types::AltChainInfo}; use cuprate_helper::cast::{u64_to_usize, usize_to_u64}; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, - Chain, CoinbaseTxSum, ExtendedBlockHeader, MinerData, OutputHistogramEntry, - OutputHistogramInput, OutputOnChain, + Chain, ChainInfo, CoinbaseTxSum, ExtendedBlockHeader, HardFork, MinerData, + OutputHistogramEntry, OutputHistogramInput, OutputOnChain, }; +/// [`BlockchainReadRequest::Block`]. +pub(crate) async fn block( + blockchain_read: &mut BlockchainReadHandle, + height: u64, +) -> Result { + let BlockchainResponse::Block(block) = blockchain_read + .ready() + .await? + .call(BlockchainReadRequest::Block { + height: u64_to_usize(height), + }) + .await? + else { + unreachable!(); + }; + + Ok(block) +} + +/// [`BlockchainReadRequest::BlockByHash`]. +pub(crate) async fn block_by_hash( + blockchain_read: &mut BlockchainReadHandle, + hash: [u8; 32], +) -> Result { + let BlockchainResponse::Block(block) = blockchain_read + .ready() + .await? + .call(BlockchainReadRequest::BlockByHash(hash)) + .await? + else { + unreachable!(); + }; + + Ok(block) +} + /// [`BlockchainReadRequest::BlockExtendedHeader`]. -pub(super) async fn block_extended_header( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn block_extended_header( + blockchain_read: &mut BlockchainReadHandle, height: u64, ) -> Result { let BlockchainResponse::BlockExtendedHeader(header) = blockchain_read @@ -36,8 +73,8 @@ pub(super) async fn block_extended_header( } /// [`BlockchainReadRequest::BlockHash`]. -pub(super) async fn block_hash( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn block_hash( + blockchain_read: &mut BlockchainReadHandle, height: u64, chain: Chain, ) -> Result<[u8; 32], Error> { @@ -57,8 +94,8 @@ pub(super) async fn block_hash( } /// [`BlockchainReadRequest::FindBlock`]. -pub(super) async fn find_block( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn find_block( + blockchain_read: &mut BlockchainReadHandle, block_hash: [u8; 32], ) -> Result, Error> { let BlockchainResponse::FindBlock(option) = blockchain_read @@ -74,8 +111,8 @@ pub(super) async fn find_block( } /// [`BlockchainReadRequest::FilterUnknownHashes`]. -pub(super) async fn filter_unknown_hashes( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn filter_unknown_hashes( + blockchain_read: &mut BlockchainReadHandle, block_hashes: HashSet<[u8; 32]>, ) -> Result, Error> { let BlockchainResponse::FilterUnknownHashes(output) = blockchain_read @@ -91,8 +128,8 @@ pub(super) async fn filter_unknown_hashes( } /// [`BlockchainReadRequest::BlockExtendedHeaderInRange`] -pub(super) async fn block_extended_header_in_range( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn block_extended_header_in_range( + blockchain_read: &mut BlockchainReadHandle, range: Range, chain: Chain, ) -> Result, Error> { @@ -111,8 +148,8 @@ pub(super) async fn block_extended_header_in_range( } /// [`BlockchainReadRequest::ChainHeight`]. -pub(super) async fn chain_height( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn chain_height( + blockchain_read: &mut BlockchainReadHandle, ) -> Result<(u64, [u8; 32]), Error> { let BlockchainResponse::ChainHeight(height, hash) = blockchain_read .ready() @@ -127,8 +164,8 @@ pub(super) async fn chain_height( } /// [`BlockchainReadRequest::GeneratedCoins`]. -pub(super) async fn generated_coins( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn generated_coins( + blockchain_read: &mut BlockchainReadHandle, block_height: u64, ) -> Result { let BlockchainResponse::GeneratedCoins(generated_coins) = blockchain_read @@ -146,8 +183,8 @@ pub(super) async fn generated_coins( } /// [`BlockchainReadRequest::Outputs`] -pub(super) async fn outputs( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn outputs( + blockchain_read: &mut BlockchainReadHandle, outputs: HashMap>, ) -> Result>, Error> { let BlockchainResponse::Outputs(outputs) = blockchain_read @@ -163,8 +200,8 @@ pub(super) async fn outputs( } /// [`BlockchainReadRequest::NumberOutputsWithAmount`] -pub(super) async fn number_outputs_with_amount( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn number_outputs_with_amount( + blockchain_read: &mut BlockchainReadHandle, output_amounts: Vec, ) -> Result, Error> { let BlockchainResponse::NumberOutputsWithAmount(map) = blockchain_read @@ -182,8 +219,8 @@ pub(super) async fn number_outputs_with_amount( } /// [`BlockchainReadRequest::KeyImagesSpent`] -pub(super) async fn key_images_spent( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn key_images_spent( + blockchain_read: &mut BlockchainReadHandle, key_images: HashSet<[u8; 32]>, ) -> Result { let BlockchainResponse::KeyImagesSpent(is_spent) = blockchain_read @@ -199,8 +236,8 @@ pub(super) async fn key_images_spent( } /// [`BlockchainReadRequest::CompactChainHistory`] -pub(super) async fn compact_chain_history( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn compact_chain_history( + blockchain_read: &mut BlockchainReadHandle, ) -> Result<(Vec<[u8; 32]>, u128), Error> { let BlockchainResponse::CompactChainHistory { block_ids, @@ -218,8 +255,8 @@ pub(super) async fn compact_chain_history( } /// [`BlockchainReadRequest::FindFirstUnknown`] -pub(super) async fn find_first_unknown( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn find_first_unknown( + blockchain_read: &mut BlockchainReadHandle, hashes: Vec<[u8; 32]>, ) -> Result, Error> { let BlockchainResponse::FindFirstUnknown(resp) = blockchain_read @@ -235,8 +272,8 @@ pub(super) async fn find_first_unknown( } /// [`BlockchainReadRequest::TotalTxCount`] -pub(super) async fn total_tx_count( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn total_tx_count( + blockchain_read: &mut BlockchainReadHandle, ) -> Result { let BlockchainResponse::TotalTxCount(tx_count) = blockchain_read .ready() @@ -251,8 +288,8 @@ pub(super) async fn total_tx_count( } /// [`BlockchainReadRequest::DatabaseSize`] -pub(super) async fn database_size( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn database_size( + blockchain_read: &mut BlockchainReadHandle, ) -> Result<(u64, u64), Error> { let BlockchainResponse::DatabaseSize { database_size, @@ -270,8 +307,8 @@ pub(super) async fn database_size( } /// [`BlockchainReadRequest::OutputHistogram`] -pub(super) async fn output_histogram( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn output_histogram( + blockchain_read: &mut BlockchainReadHandle, input: OutputHistogramInput, ) -> Result, Error> { let BlockchainResponse::OutputHistogram(histogram) = blockchain_read @@ -287,8 +324,8 @@ pub(super) async fn output_histogram( } /// [`BlockchainReadRequest::CoinbaseTxSum`] -pub(super) async fn coinbase_tx_sum( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn coinbase_tx_sum( + blockchain_read: &mut BlockchainReadHandle, height: u64, count: u64, ) -> Result { @@ -306,3 +343,35 @@ pub(super) async fn coinbase_tx_sum( Ok(sum) } + +/// [`BlockchainReadRequest::AltChains`] +pub(crate) async fn alt_chains( + blockchain_read: &mut BlockchainReadHandle, +) -> Result, Error> { + let BlockchainResponse::AltChains(vec) = blockchain_read + .ready() + .await? + .call(BlockchainReadRequest::AltChains) + .await? + else { + unreachable!(); + }; + + Ok(vec) +} + +/// [`BlockchainReadRequest::AltChainCount`] +pub(crate) async fn alt_chain_count( + blockchain_read: &mut BlockchainReadHandle, +) -> Result { + let BlockchainResponse::AltChainCount(count) = blockchain_read + .ready() + .await? + .call(BlockchainReadRequest::AltChainCount) + .await? + else { + unreachable!(); + }; + + Ok(usize_to_u64(count)) +} diff --git a/binaries/cuprated/src/rpc/request/blockchain_context.rs b/binaries/cuprated/src/rpc/request/blockchain_context.rs index 2b14d46..c6f0f22 100644 --- a/binaries/cuprated/src/rpc/request/blockchain_context.rs +++ b/binaries/cuprated/src/rpc/request/blockchain_context.rs @@ -2,27 +2,30 @@ use std::convert::Infallible; -use anyhow::Error; +use anyhow::{anyhow, Error}; +use monero_serai::block::Block; use tower::{Service, ServiceExt}; use cuprate_consensus_context::{ BlockChainContext, BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService, }; +use cuprate_helper::cast::u64_to_usize; use cuprate_types::{FeeEstimate, HardFork, HardForkInfo}; +// FIXME: use `anyhow::Error` over `tower::BoxError` in blockchain context. + /// [`BlockChainContextRequest::Context`]. -pub(super) async fn context( - service: &mut BlockChainContextService, - height: u64, +pub(crate) async fn context( + blockchain_context: &mut BlockChainContextService, ) -> Result { - let BlockChainContextResponse::Context(context) = service + let BlockChainContextResponse::Context(context) = blockchain_context .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(BlockChainContextRequest::Context) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -31,17 +34,17 @@ pub(super) async fn context( } /// [`BlockChainContextRequest::HardForkInfo`]. -pub(super) async fn hard_fork_info( - service: &mut BlockChainContextService, +pub(crate) async fn hard_fork_info( + blockchain_context: &mut BlockChainContextService, hard_fork: HardFork, ) -> Result { - let BlockChainContextResponse::HardForkInfo(hf_info) = service + let BlockChainContextResponse::HardForkInfo(hf_info) = blockchain_context .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(BlockChainContextRequest::HardForkInfo(hard_fork)) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -50,20 +53,47 @@ pub(super) async fn hard_fork_info( } /// [`BlockChainContextRequest::FeeEstimate`]. -pub(super) async fn fee_estimate( - service: &mut BlockChainContextService, +pub(crate) async fn fee_estimate( + blockchain_context: &mut BlockChainContextService, grace_blocks: u64, ) -> Result { - let BlockChainContextResponse::FeeEstimate(fee) = service + let BlockChainContextResponse::FeeEstimate(fee) = blockchain_context .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(BlockChainContextRequest::FeeEstimate { grace_blocks }) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; Ok(fee) } + +/// [`BlockChainContextRequest::CalculatePow`] +pub(crate) async fn calculate_pow( + blockchain_context: &mut BlockChainContextService, + hardfork: HardFork, + height: u64, + block: Box, + seed_hash: [u8; 32], +) -> Result<[u8; 32], Error> { + let BlockChainContextResponse::CalculatePow(hash) = blockchain_context + .ready() + .await + .map_err(|e| anyhow!(e))? + .call(BlockChainContextRequest::CalculatePow { + hardfork, + height: u64_to_usize(height), + block, + seed_hash, + }) + .await + .map_err(|e| anyhow!(e))? + else { + unreachable!(); + }; + + Ok(hash) +} diff --git a/binaries/cuprated/src/rpc/request/blockchain_manager.rs b/binaries/cuprated/src/rpc/request/blockchain_manager.rs index 4dc91c8..18b75de 100644 --- a/binaries/cuprated/src/rpc/request/blockchain_manager.rs +++ b/binaries/cuprated/src/rpc/request/blockchain_manager.rs @@ -5,13 +5,18 @@ use monero_serai::block::Block; use tower::{Service, ServiceExt}; use cuprate_helper::cast::{u64_to_usize, usize_to_u64}; +use cuprate_p2p_core::{types::ConnectionId, NetworkZone}; +use cuprate_pruning::PruningSeed; +use cuprate_rpc_types::misc::Span; +use cuprate_types::{AddAuxPow, AuxPow, HardFork}; -use crate::rpc::handler::{ - BlockchainManagerHandle, BlockchainManagerRequest, BlockchainManagerResponse, +use crate::rpc::{ + constants::FIELD_NOT_SUPPORTED, + handler::{BlockchainManagerHandle, BlockchainManagerRequest, BlockchainManagerResponse}, }; /// [`BlockchainManagerRequest::PopBlocks`] -pub(super) async fn pop_blocks( +pub(crate) async fn pop_blocks( blockchain_manager: &mut BlockchainManagerHandle, amount: u64, ) -> Result { @@ -30,8 +35,10 @@ pub(super) async fn pop_blocks( } /// [`BlockchainManagerRequest::Prune`] -pub(super) async fn prune(blockchain_manager: &mut BlockchainManagerHandle) -> Result<(), Error> { - let BlockchainManagerResponse::Ok = blockchain_manager +pub(crate) async fn prune( + blockchain_manager: &mut BlockchainManagerHandle, +) -> Result { + let BlockchainManagerResponse::Prune(seed) = blockchain_manager .ready() .await? .call(BlockchainManagerRequest::Prune) @@ -40,11 +47,11 @@ pub(super) async fn prune(blockchain_manager: &mut BlockchainManagerHandle) -> R unreachable!(); }; - Ok(()) + Ok(seed) } /// [`BlockchainManagerRequest::Pruned`] -pub(super) async fn pruned( +pub(crate) async fn pruned( blockchain_manager: &mut BlockchainManagerHandle, ) -> Result { let BlockchainManagerResponse::Pruned(pruned) = blockchain_manager @@ -60,7 +67,7 @@ pub(super) async fn pruned( } /// [`BlockchainManagerRequest::RelayBlock`] -pub(super) async fn relay_block( +pub(crate) async fn relay_block( blockchain_manager: &mut BlockchainManagerHandle, block: Block, ) -> Result<(), Error> { @@ -77,7 +84,7 @@ pub(super) async fn relay_block( } /// [`BlockchainManagerRequest::Syncing`] -pub(super) async fn syncing( +pub(crate) async fn syncing( blockchain_manager: &mut BlockchainManagerHandle, ) -> Result { let BlockchainManagerResponse::Syncing(syncing) = blockchain_manager @@ -93,7 +100,7 @@ pub(super) async fn syncing( } /// [`BlockchainManagerRequest::Synced`] -pub(super) async fn synced( +pub(crate) async fn synced( blockchain_manager: &mut BlockchainManagerHandle, ) -> Result { let BlockchainManagerResponse::Synced(syncing) = blockchain_manager @@ -109,7 +116,7 @@ pub(super) async fn synced( } /// [`BlockchainManagerRequest::Target`] -pub(super) async fn target( +pub(crate) async fn target( blockchain_manager: &mut BlockchainManagerHandle, ) -> Result { let BlockchainManagerResponse::Target(target) = blockchain_manager @@ -125,7 +132,7 @@ pub(super) async fn target( } /// [`BlockchainManagerRequest::TargetHeight`] -pub(super) async fn target_height( +pub(crate) async fn target_height( blockchain_manager: &mut BlockchainManagerHandle, ) -> Result { let BlockchainManagerResponse::TargetHeight { height } = blockchain_manager @@ -139,3 +146,76 @@ pub(super) async fn target_height( Ok(usize_to_u64(height)) } + +/// [`BlockchainManagerRequest::GenerateBlocks`] +pub(crate) async fn generate_blocks( + blockchain_manager: &mut BlockchainManagerHandle, + amount_of_blocks: u64, + prev_block: [u8; 32], + starting_nonce: u32, + wallet_address: String, +) -> Result<(Vec<[u8; 32]>, u64), Error> { + let BlockchainManagerResponse::GenerateBlocks { blocks, height } = blockchain_manager + .ready() + .await? + .call(BlockchainManagerRequest::GenerateBlocks { + amount_of_blocks, + prev_block, + starting_nonce, + wallet_address, + }) + .await? + else { + unreachable!(); + }; + + Ok((blocks, usize_to_u64(height))) +} + +// [`BlockchainManagerRequest::Spans`] +pub(crate) async fn spans( + blockchain_manager: &mut BlockchainManagerHandle, +) -> Result, Error> { + // let BlockchainManagerResponse::Spans(vec) = blockchain_manager + // .ready() + // .await? + // .call(BlockchainManagerRequest::Spans) + // .await? + // else { + // unreachable!(); + // }; + + let vec: Vec> = todo!(); + + // FIXME: impl this map somewhere instead of inline. + let vec = vec + .into_iter() + .map(|span| Span { + connection_id: String::from(ConnectionId::DEFAULT_STR), + nblocks: span.nblocks, + rate: span.rate, + remote_address: span.remote_address.to_string(), + size: span.size, + speed: span.speed, + start_block_height: span.start_block_height, + }) + .collect(); + + Ok(vec) +} + +/// [`BlockchainManagerRequest::NextNeededPruningSeed`] +pub(crate) async fn next_needed_pruning_seed( + blockchain_manager: &mut BlockchainManagerHandle, +) -> Result { + let BlockchainManagerResponse::NextNeededPruningSeed(seed) = blockchain_manager + .ready() + .await? + .call(BlockchainManagerRequest::NextNeededPruningSeed) + .await? + else { + unreachable!(); + }; + + Ok(seed) +} diff --git a/binaries/cuprated/src/rpc/request/txpool.rs b/binaries/cuprated/src/rpc/request/txpool.rs index a36778e..eadbb23 100644 --- a/binaries/cuprated/src/rpc/request/txpool.rs +++ b/binaries/cuprated/src/rpc/request/txpool.rs @@ -2,7 +2,7 @@ use std::convert::Infallible; -use anyhow::Error; +use anyhow::{anyhow, Error}; use tower::{Service, ServiceExt}; use cuprate_helper::cast::usize_to_u64; @@ -14,15 +14,17 @@ use cuprate_txpool::{ TxEntry, }; +// FIXME: use `anyhow::Error` over `tower::BoxError` in txpool. + /// [`TxpoolReadRequest::Backlog`] -pub(super) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result, Error> { +pub(crate) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result, Error> { let TxpoolReadResponse::Backlog(tx_entries) = txpool_read .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(TxpoolReadRequest::Backlog) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -31,14 +33,19 @@ pub(super) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result Result { +pub(crate) async fn size( + txpool_read: &mut TxpoolReadHandle, + include_sensitive_txs: bool, +) -> Result { let TxpoolReadResponse::Size(size) = txpool_read .ready() .await - .expect("TODO") - .call(TxpoolReadRequest::Size) + .map_err(|e| anyhow!(e))? + .call(TxpoolReadRequest::Size { + include_sensitive_txs, + }) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -47,9 +54,17 @@ pub(super) async fn size(txpool_read: &mut TxpoolReadHandle) -> Result, +) -> Result<(), Error> { + todo!(); + Ok(()) +} + +/// TODO +pub(crate) async fn relay( + txpool_manager: &mut Infallible, tx_hashes: Vec<[u8; 32]>, ) -> Result<(), Error> { todo!(); diff --git a/consensus/context/src/lib.rs b/consensus/context/src/lib.rs index 198d5a1..acc4d23 100644 --- a/consensus/context/src/lib.rs +++ b/consensus/context/src/lib.rs @@ -18,6 +18,7 @@ use std::{ }; use futures::{channel::oneshot, FutureExt}; +use monero_serai::block::Block; use tokio::sync::mpsc; use tokio_util::sync::PollSender; use tower::Service; @@ -267,6 +268,21 @@ pub enum BlockChainContextRequest { grace_blocks: u64, }, + /// Calculate proof-of-work for this block. + CalculatePow { + /// The hardfork of the protocol at this block height. + hardfork: HardFork, + /// The height of the block. + height: usize, + /// The block data. + /// + /// This is boxed because [`Block`] causes this enum to be 1200 bytes, + /// where the 2nd variant is only 96 bytes. + block: Box, + /// The seed hash for the proof-of-work. + seed_hash: [u8; 32], + }, + /// Clear the alt chain context caches. ClearAltCache, @@ -364,6 +380,9 @@ pub enum BlockChainContextResponse { /// Response to [`BlockChainContextRequest::FeeEstimate`] FeeEstimate(FeeEstimate), + /// Response to [`BlockChainContextRequest::CalculatePow`] + CalculatePow([u8; 32]), + /// Response to [`BlockChainContextRequest::AltChains`] /// /// If the inner [`Vec::is_empty`], there were no alternate chains. diff --git a/consensus/context/src/task.rs b/consensus/context/src/task.rs index 65cfea9..b075995 100644 --- a/consensus/context/src/task.rs +++ b/consensus/context/src/task.rs @@ -324,7 +324,8 @@ impl ContextTask { } BlockChainContextRequest::HardForkInfo(_) | BlockChainContextRequest::FeeEstimate { .. } - | BlockChainContextRequest::AltChains => { + | BlockChainContextRequest::AltChains + | BlockChainContextRequest::CalculatePow { .. } => { todo!("finish https://github.com/Cuprate/cuprate/pull/297") } }) diff --git a/helper/src/cast.rs b/helper/src/cast.rs index 99b7f53..5628d7d 100644 --- a/helper/src/cast.rs +++ b/helper/src/cast.rs @@ -18,7 +18,6 @@ // // //============================ SAFETY: DO NOT REMOVE ===========================// -//---------------------------------------------------------------------------------------------------- Free functions /// Cast [`u64`] to [`usize`]. #[inline(always)] pub const fn u64_to_usize(u: u64) -> usize { diff --git a/p2p/address-book/src/book.rs b/p2p/address-book/src/book.rs index 907d691..3e5269f 100644 --- a/p2p/address-book/src/book.rs +++ b/p2p/address-book/src/book.rs @@ -423,7 +423,8 @@ impl Service> for AddressBook { AddressBookRequest::PeerlistSize | AddressBookRequest::ConnectionCount | AddressBookRequest::SetBan(_) - | AddressBookRequest::GetBans => { + | AddressBookRequest::GetBans + | AddressBookRequest::ConnectionInfo => { todo!("finish https://github.com/Cuprate/cuprate/pull/297") } }; diff --git a/p2p/p2p-core/Cargo.toml b/p2p/p2p-core/Cargo.toml index 0a6aaf3..bc6c833 100644 --- a/p2p/p2p-core/Cargo.toml +++ b/p2p/p2p-core/Cargo.toml @@ -10,9 +10,10 @@ default = ["borsh"] borsh = ["dep:borsh", "cuprate-pruning/borsh"] [dependencies] -cuprate-helper = { workspace = true, features = ["asynch"], default-features = false } -cuprate-wire = { workspace = true, features = ["tracing"] } +cuprate-helper = { workspace = true, features = ["asynch"], default-features = false } +cuprate-wire = { workspace = true, features = ["tracing"] } cuprate-pruning = { workspace = true } +cuprate-types = { workspace = true } tokio = { workspace = true, features = ["net", "sync", "macros", "time", "rt", "rt-multi-thread"]} tokio-util = { workspace = true, features = ["codec"] } diff --git a/p2p/p2p-core/src/ban.rs b/p2p/p2p-core/src/ban.rs deleted file mode 100644 index 76fd3eb..0000000 --- a/p2p/p2p-core/src/ban.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Data structures related to bans. - -use std::time::{Duration, Instant}; - -use crate::NetZoneAddress; - -/// Data within [`crate::services::AddressBookRequest::SetBan`]. -pub struct SetBan { - /// Address of the peer. - pub address: A, - /// - If [`Some`], how long this peer should be banned for - /// - If [`None`], the peer will be unbanned - pub ban: Option, -} - -/// Data within [`crate::services::AddressBookResponse::GetBans`]. -pub struct BanState { - /// Address of the peer. - pub address: A, - /// - If [`Some`], the peer is banned until this [`Instant`] - /// - If [`None`], the peer is not currently banned - pub unban_instant: Option, -} diff --git a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs index 8bb966d..48b3daf 100644 --- a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs +++ b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs @@ -111,7 +111,8 @@ impl Service> for DummyAddressBook { AddressBookRequest::PeerlistSize | AddressBookRequest::ConnectionCount | AddressBookRequest::SetBan(_) - | AddressBookRequest::GetBans => { + | AddressBookRequest::GetBans + | AddressBookRequest::ConnectionInfo => { todo!("finish https://github.com/Cuprate/cuprate/pull/297") } })) diff --git a/p2p/p2p-core/src/lib.rs b/p2p/p2p-core/src/lib.rs index 5b93b59..26e1068 100644 --- a/p2p/p2p-core/src/lib.rs +++ b/p2p/p2p-core/src/lib.rs @@ -75,7 +75,6 @@ use cuprate_wire::{ NetworkAddress, }; -pub mod ban; pub mod client; mod constants; pub mod error; @@ -83,6 +82,7 @@ pub mod handles; mod network_zones; pub mod protocol; pub mod services; +pub mod types; pub use error::*; pub use network_zones::{ClearNet, ClearNetServerCfg}; diff --git a/p2p/p2p-core/src/services.rs b/p2p/p2p-core/src/services.rs index 495b719..6d1089c 100644 --- a/p2p/p2p-core/src/services.rs +++ b/p2p/p2p-core/src/services.rs @@ -4,9 +4,9 @@ use cuprate_pruning::{PruningError, PruningSeed}; use cuprate_wire::{CoreSyncData, PeerListEntryBase}; use crate::{ - ban::{BanState, SetBan}, client::InternalPeerID, handles::ConnectionHandle, + types::{BanState, ConnectionInfo, SetBan}, NetZoneAddress, NetworkAddressIncorrectZone, NetworkZone, }; @@ -118,6 +118,9 @@ pub enum AddressBookRequest { /// Get the amount of white & grey peers. PeerlistSize, + /// Get information on all connections. + ConnectionInfo, + /// Get the amount of incoming & outgoing connections. ConnectionCount, @@ -152,6 +155,9 @@ pub enum AddressBookResponse { /// Response to [`AddressBookRequest::PeerlistSize`]. PeerlistSize { white: usize, grey: usize }, + /// Response to [`AddressBookRequest::ConnectionInfo`]. + ConnectionInfo(Vec>), + /// Response to [`AddressBookRequest::ConnectionCount`]. ConnectionCount { incoming: usize, outgoing: usize }, diff --git a/p2p/p2p-core/src/types.rs b/p2p/p2p-core/src/types.rs new file mode 100644 index 0000000..ca56055 --- /dev/null +++ b/p2p/p2p-core/src/types.rs @@ -0,0 +1,96 @@ +//! General data structures. + +use std::time::{Duration, Instant}; + +use cuprate_pruning::PruningSeed; +use cuprate_types::{AddressType, ConnectionState}; + +use crate::NetZoneAddress; + +/// Data within [`crate::services::AddressBookRequest::SetBan`]. +pub struct SetBan { + /// Address of the peer. + pub address: A, + /// - If [`Some`], how long this peer should be banned for + /// - If [`None`], the peer will be unbanned + pub ban: Option, +} + +/// Data within [`crate::services::AddressBookResponse::GetBans`]. +pub struct BanState { + /// Address of the peer. + pub address: A, + /// - If [`Some`], the peer is banned until this [`Instant`] + /// - If [`None`], the peer is not currently banned + pub unban_instant: Option, +} + +/// Data within [`crate::services::AddressBookResponse::ConnectionInfo`]. +pub struct ConnectionInfo { + // The following fields are mostly the same as `monerod`. + pub address: A, + pub address_type: AddressType, + pub avg_download: u64, + pub avg_upload: u64, + pub current_download: u64, + pub current_upload: u64, + pub height: u64, + /// Either a domain or an IP without the port. + pub host: String, + pub incoming: bool, + pub live_time: u64, + pub localhost: bool, + pub local_ip: bool, + pub peer_id: u64, + pub pruning_seed: PruningSeed, + pub recv_count: u64, + pub recv_idle_time: u64, + pub rpc_credits_per_hash: u32, + pub rpc_port: u16, + pub send_count: u64, + pub send_idle_time: u64, + pub state: ConnectionState, + pub support_flags: u32, + + // The following fields are slightly different than `monerod`. + + // + /// [`None`] if Tor/i2p or unknown. + pub socket_addr: Option, + + /// This field does not exist for `cuprated`'s RPC, this is just a marker type: + /// - + /// - + /// + /// [`ConnectionId::DEFAULT_STR`] is used when mapping to the RPC type. + pub connection_id: ConnectionId, +} + +/// Marker type for `monerod`'s connection ID. +/// +/// `connection_id` is a 128-bit `uuid` in `monerod`. +/// `cuprated` does not support this field so it returns +/// the default value in the RPC interface, an all 0-bit UUID. +/// +/// This default value in string form is [`ConnectionId::DEFAULT_STR`]. +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ConnectionId; + +impl ConnectionId { + /// [`str`] representation of a default connection ID. + pub const DEFAULT_STR: &str = "00000000000000000000000000000000"; +} + +/// Used in RPC's `sync_info`. +/// +// TODO: fix docs after +// Data within [`crate::services::AddressBookResponse::Spans`]. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Span { + pub nblocks: u64, + pub rate: u32, + pub remote_address: A, + pub size: u64, + pub speed: u32, + pub start_block_height: u64, +} diff --git a/rpc/types/Cargo.toml b/rpc/types/Cargo.toml index e9ca529..6d8797b 100644 --- a/rpc/types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -10,16 +10,16 @@ keywords = ["cuprate", "rpc", "types", "monero"] [features] default = ["serde", "epee"] -serde = ["dep:serde", "cuprate-fixed-bytes/serde"] -epee = ["dep:cuprate-epee-encoding"] +serde = ["dep:serde", "cuprate-fixed-bytes/serde", "cuprate-types/serde"] +epee = ["dep:cuprate-epee-encoding", "cuprate-types/epee"] [dependencies] cuprate-epee-encoding = { workspace = true, optional = true } cuprate-fixed-bytes = { workspace = true } -cuprate-types = { workspace = true, default-features = false, features = ["epee", "serde"] } +cuprate-types = { workspace = true, default-features = false } -paste = { workspace = true } -serde = { workspace = true, optional = true } +paste = { workspace = true } +serde = { workspace = true, optional = true } [dev-dependencies] cuprate-test-utils = { workspace = true } diff --git a/rpc/types/src/base.rs b/rpc/types/src/base.rs index c131e41..89eafc5 100644 --- a/rpc/types/src/base.rs +++ b/rpc/types/src/base.rs @@ -58,61 +58,37 @@ pub struct ResponseBase { } impl ResponseBase { - /// `const` version of [`Default::default`]. - /// - /// ```rust - /// use cuprate_rpc_types::{misc::*, base::*}; - /// - /// let new = ResponseBase::new(); - /// assert_eq!(new, ResponseBase { - /// status: Status::Ok, - /// untrusted: false, - /// }); - /// ``` - pub const fn new() -> Self { - Self { - status: Status::Ok, - untrusted: false, - } - } - - /// Returns OK and trusted [`Self`]. + /// [`Status::Ok`] and trusted [`Self`]. /// /// This is the most common version of [`Self`]. /// /// ```rust /// use cuprate_rpc_types::{misc::*, base::*}; /// - /// let ok = ResponseBase::ok(); - /// assert_eq!(ok, ResponseBase { + /// assert_eq!(ResponseBase::OK, ResponseBase { /// status: Status::Ok, /// untrusted: false, /// }); /// ``` - pub const fn ok() -> Self { - Self { - status: Status::Ok, - untrusted: false, - } - } + pub const OK: Self = Self { + status: Status::Ok, + untrusted: false, + }; - /// Same as [`Self::ok`] but with [`Self::untrusted`] set to `true`. + /// Same as [`Self::OK`] but with [`Self::untrusted`] set to `true`. /// /// ```rust /// use cuprate_rpc_types::{misc::*, base::*}; /// - /// let ok_untrusted = ResponseBase::ok_untrusted(); - /// assert_eq!(ok_untrusted, ResponseBase { + /// assert_eq!(ResponseBase::OK_UNTRUSTED, ResponseBase { /// status: Status::Ok, /// untrusted: true, /// }); /// ``` - pub const fn ok_untrusted() -> Self { - Self { - status: Status::Ok, - untrusted: true, - } - } + pub const OK_UNTRUSTED: Self = Self { + status: Status::Ok, + untrusted: true, + }; } #[cfg(feature = "epee")] @@ -148,9 +124,9 @@ impl AccessResponseBase { /// ```rust /// use cuprate_rpc_types::{misc::*, base::*}; /// - /// let new = AccessResponseBase::new(ResponseBase::ok()); + /// let new = AccessResponseBase::new(ResponseBase::OK); /// assert_eq!(new, AccessResponseBase { - /// response_base: ResponseBase::ok(), + /// response_base: ResponseBase::OK, /// credits: 0, /// top_hash: "".into(), /// }); @@ -163,47 +139,41 @@ impl AccessResponseBase { } } - /// Returns OK and trusted [`Self`]. + /// [`Status::Ok`] and trusted [`Self`]. /// /// This is the most common version of [`Self`]. /// /// ```rust /// use cuprate_rpc_types::{misc::*, base::*}; /// - /// let ok = AccessResponseBase::ok(); - /// assert_eq!(ok, AccessResponseBase { - /// response_base: ResponseBase::ok(), + /// assert_eq!(AccessResponseBase::OK, AccessResponseBase { + /// response_base: ResponseBase::OK, /// credits: 0, /// top_hash: "".into(), /// }); /// ``` - pub const fn ok() -> Self { - Self { - response_base: ResponseBase::ok(), - credits: 0, - top_hash: String::new(), - } - } + pub const OK: Self = Self { + response_base: ResponseBase::OK, + credits: 0, + top_hash: String::new(), + }; - /// Same as [`Self::ok`] but with `untrusted` set to `true`. + /// Same as [`Self::OK`] but with `untrusted` set to `true`. /// /// ```rust /// use cuprate_rpc_types::{misc::*, base::*}; /// - /// let ok_untrusted = AccessResponseBase::ok_untrusted(); - /// assert_eq!(ok_untrusted, AccessResponseBase { - /// response_base: ResponseBase::ok_untrusted(), + /// assert_eq!(AccessResponseBase::OK_UNTRUSTED, AccessResponseBase { + /// response_base: ResponseBase::OK_UNTRUSTED, /// credits: 0, /// top_hash: "".into(), /// }); /// ``` - pub const fn ok_untrusted() -> Self { - Self { - response_base: ResponseBase::ok_untrusted(), - credits: 0, - top_hash: String::new(), - } - } + pub const OK_UNTRUSTED: Self = Self { + response_base: ResponseBase::OK_UNTRUSTED, + credits: 0, + top_hash: String::new(), + }; } #[cfg(feature = "epee")] diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index 6fb538c..cb55e64 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -186,7 +186,7 @@ define_request_and_response! { // . #[doc = serde_doc_test!( GET_BLOCK_TEMPLATE_RESPONSE => GetBlockTemplateResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, blockhashing_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a00000000e0c20372be23d356347091025c5b5e8f2abf83ab618378565cce2b703491523401".into(), blocktemplate_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(), difficulty_top64: 0, @@ -242,7 +242,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BLOCK_COUNT_RESPONSE => GetBlockCountResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, count: 3195019, } )] @@ -334,7 +334,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GENERATE_BLOCKS_RESPONSE => GenerateBlocksResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, blocks: vec!["49b712db7760e3728586f8434ee8bc8d7b3d410dac6bb6e98bf5845c83b917e4".into()], height: 9783, } @@ -359,7 +359,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_LAST_BLOCK_HEADER_RESPONSE => GetLastBlockHeaderResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, block_header: BlockHeader { block_size: 200419, block_weight: 200419, @@ -411,7 +411,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BLOCK_HEADER_BY_HASH_RESPONSE => GetBlockHeaderByHashResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, block_headers: vec![], block_header: BlockHeader { block_size: 210, @@ -466,7 +466,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BLOCK_HEADER_BY_HEIGHT_RESPONSE => GetBlockHeaderByHeightResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, block_header: BlockHeader { block_size: 210, block_weight: 210, @@ -521,7 +521,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BLOCK_HEADERS_RANGE_RESPONSE => GetBlockHeadersRangeResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, headers: vec![ BlockHeader { block_size: 301413, @@ -603,7 +603,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BLOCK_RESPONSE => GetBlockResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, blob: "1010c58bab9b06b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7807e07f502cef8a70101ff92f8a7010180e0a596bb1103d7cbf826b665d7a532c316982dc8dbc24f285cbc18bbcc27c7164cd9b3277a85d034019f629d8b36bd16a2bfce3ea80c31dc4d8762c67165aec21845494e32b7582fe00211000000297a787a000000000000000000000000".into(), block_header: BlockHeader { block_size: 106, @@ -656,11 +656,11 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_CONNECTIONS_RESPONSE => GetConnectionsResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, connections: vec![ ConnectionInfo { address: "3evk3kezfjg44ma6tvesy7rbxwwpgpympj45xar5fo4qajrsmkoaqdqd.onion:18083".into(), - address_type: 4, + address_type: cuprate_types::AddressType::Tor, avg_download: 0, avg_upload: 0, connection_id: "22ef856d0f1d44cc95e84fecfd065fe2".into(), @@ -682,12 +682,12 @@ define_request_and_response! { rpc_port: 0, send_count: 3406572, send_idle_time: 30, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 0 }, ConnectionInfo { address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(), - address_type: 4, + address_type: cuprate_types::AddressType::Tor, avg_download: 0, avg_upload: 0, connection_id: "c7734e15936f485a86d2b0534f87e499".into(), @@ -709,7 +709,7 @@ define_request_and_response! { rpc_port: 0, send_count: 3370566, send_idle_time: 120, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 0 } ], @@ -730,7 +730,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_INFO_RESPONSE => GetInfoResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, adjusted_time: 1721245289, alt_blocks_count: 16, block_size_limit: 600000, @@ -833,7 +833,7 @@ define_request_and_response! { #[doc = serde_doc_test!( HARD_FORK_INFO_RESPONSE => HardForkInfoResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, earliest_height: 2689608, enabled: true, state: 0, @@ -879,7 +879,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SET_BANS_RESPONSE => SetBansResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -894,7 +894,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BANS_RESPONSE => GetBansResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, bans: vec![ GetBan { host: "104.248.206.131".into(), @@ -996,7 +996,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_OUTPUT_HISTOGRAM_RESPONSE => GetOutputHistogramResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, histogram: vec![HistogramEntry { amount: 20000000000, recent_instances: 0, @@ -1030,7 +1030,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_COINBASE_TX_SUM_RESPONSE => GetCoinbaseTxSumResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, emission_amount: 9387854817320, emission_amount_top64: 0, fee_amount: 83981380000, @@ -1059,7 +1059,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_VERSION_RESPONSE => GetVersionResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, current_height: 3195051, hard_forks: vec![ HardforkEntry { @@ -1145,12 +1145,16 @@ define_request_and_response! { get_fee_estimate, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2250..=2277, - GetFeeEstimate (empty), - Request {}, + + GetFeeEstimate, + + Request { + grace_blocks: u64 = default_zero::(), "default_zero", + }, #[doc = serde_doc_test!( GET_FEE_ESTIMATE_RESPONSE => GetFeeEstimateResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, fee: 20000, fees: vec![20000,80000,320000,4000000], quantization_mask: 10000, @@ -1172,7 +1176,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_ALTERNATE_CHAINS_RESPONSE => GetAlternateChainsResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, chains: vec![ ChainInfo { block_hash: "4826c7d45d7cf4f02985b5c405b0e5d7f92c8d25e015492ce19aa3b209295dce".into(), @@ -1240,7 +1244,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SYNC_INFO_RESPONSE => SyncInfoResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, height: 3195157, next_needed_pruning_seed: 0, overview: "[]".into(), @@ -1249,7 +1253,7 @@ define_request_and_response! { SyncInfoPeer { info: ConnectionInfo { address: "142.93.128.65:44986".into(), - address_type: 1, + address_type: cuprate_types::AddressType::Ipv4, avg_download: 1, avg_upload: 1, connection_id: "a5803c4c2dac49e7b201dccdef54c862".into(), @@ -1271,14 +1275,14 @@ define_request_and_response! { rpc_port: 18089, send_count: 32235, send_idle_time: 6, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 1 } }, SyncInfoPeer { info: ConnectionInfo { address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(), - address_type: 4, + address_type: cuprate_types::AddressType::Tor, avg_download: 0, avg_upload: 0, connection_id: "277f7c821bc546878c8bd29977e780f5".into(), @@ -1300,7 +1304,7 @@ define_request_and_response! { rpc_port: 0, send_count: 99120, send_idle_time: 15, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 0 } } @@ -1330,7 +1334,7 @@ define_request_and_response! { // TODO: enable test after binary string impl. // #[doc = serde_doc_test!( // GET_TRANSACTION_POOL_BACKLOG_RESPONSE => GetTransactionPoolBacklogResponse { - // base: ResponseBase::ok(), + // base: ResponseBase::OK, // backlog: "...Binary...".into(), // } // )] @@ -1372,7 +1376,7 @@ define_request_and_response! { // TODO: enable test after binary string impl. // #[doc = serde_doc_test!( // GET_OUTPUT_DISTRIBUTION_RESPONSE => GetOutputDistributionResponse { - // base: AccessResponseBase::ok(), + // base: AccessResponseBase::OK, // distributions: vec![Distribution::Uncompressed(DistributionUncompressed { // start_height: 1462078, // base: 0, @@ -1396,7 +1400,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_MINER_DATA_RESPONSE => GetMinerDataResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, already_generated_coins: 18186022843595960691, difficulty: "0x48afae42de".into(), height: 2731375, @@ -1449,7 +1453,7 @@ define_request_and_response! { #[doc = serde_doc_test!( PRUNE_BLOCKCHAIN_RESPONSE => PruneBlockchainResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, pruned: true, pruning_seed: 387, } @@ -1515,7 +1519,7 @@ define_request_and_response! { #[doc = serde_doc_test!( FLUSH_CACHE_RESPONSE => FlushCacheResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -1544,7 +1548,7 @@ define_request_and_response! { #[doc = serde_doc_test!( ADD_AUX_POW_RESPONSE => AddAuxPowResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, aux_pow: vec![AuxPow { hash: "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a".into(), id: "3200b4ea97c3b2081cd4190b58e49572b2319fed00d030ad51809dff06b5d8c8".into(), diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 4430dbe..8f7467b 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -110,7 +110,7 @@ define_struct_and_impl_epee! { /// Used in [`crate::json::GetConnectionsResponse`]. ConnectionInfo { address: String, - address_type: u8, + address_type: cuprate_types::AddressType, avg_download: u64, avg_upload: u64, connection_id: String, @@ -135,7 +135,7 @@ define_struct_and_impl_epee! { // Exists in the original definition, but isn't // used or (de)serialized for RPC purposes. // ssl: bool, - state: String, + state: cuprate_types::ConnectionState, support_flags: u32, } } diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index f743392..3694041 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -104,7 +104,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_HEIGHT_RESPONSE => GetHeightResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, hash: "68bb1a1cff8e2a44c3221e8e1aff80bc6ca45d06fa8eff4d2a3a7ac31d4efe3f".into(), height: 3195160, } @@ -159,7 +159,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_ALT_BLOCKS_HASHES_RESPONSE => GetAltBlocksHashesResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, blks_hashes: vec!["8ee10db35b1baf943f201b303890a29e7d45437bd76c2bd4df0d2f2ee34be109".into()], } )] @@ -189,7 +189,7 @@ define_request_and_response! { #[doc = serde_doc_test!( IS_KEY_IMAGE_SPENT_RESPONSE => IsKeyImageSpentResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, spent_status: vec![1, 1], } )] @@ -285,7 +285,7 @@ define_request_and_response! { #[doc = serde_doc_test!( START_MINING_RESPONSE => StartMiningResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -300,7 +300,7 @@ define_request_and_response! { #[doc = serde_doc_test!( STOP_MINING_RESPONSE => StopMiningResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -315,7 +315,7 @@ define_request_and_response! { #[doc = serde_doc_test!( MINING_STATUS_RESPONSE => MiningStatusResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, active: false, address: "".into(), bg_idle_threshold: 0, @@ -361,7 +361,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SAVE_BC_RESPONSE => SaveBcResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -387,7 +387,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_PEER_LIST_RESPONSE => GetPeerListResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, gray_list: vec![ Peer { host: "161.97.193.0".into(), @@ -469,7 +469,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SET_LOG_HASH_RATE_RESPONSE => SetLogHashRateResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -494,7 +494,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SET_LOG_LEVEL_RESPONSE => SetLogLevelResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -518,7 +518,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SET_LOG_CATEGORIES_RESPONSE => SetLogCategoriesResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, categories: "*:INFO".into(), } )] @@ -584,7 +584,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_TRANSACTION_POOL_STATS_RESPONSE => GetTransactionPoolStatsResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, pool_stats: TxpoolStats { bytes_max: 11843, bytes_med: 2219, @@ -646,7 +646,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_LIMIT_RESPONSE => GetLimitResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, limit_down: 1280000, limit_up: 1280000, } @@ -678,7 +678,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SET_LIMIT_RESPONSE => SetLimitResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, limit_down: 1024, limit_up: 128, } @@ -709,7 +709,7 @@ define_request_and_response! { #[doc = serde_doc_test!( OUT_PEERS_RESPONSE => OutPeersResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, out_peers: 3232235535, } )] @@ -742,7 +742,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_NET_STATS_RESPONSE => GetNetStatsResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, start_time: 1721251858, total_bytes_in: 16283817214, total_bytes_out: 34225244079, @@ -781,7 +781,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_OUTS_RESPONSE => GetOutsResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, outs: vec![ OutKey { height: 51941, @@ -825,7 +825,7 @@ define_request_and_response! { #[doc = serde_doc_test!( UPDATE_RESPONSE => UpdateResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, auto_uri: "".into(), hash: "".into(), path: "".into(), @@ -862,7 +862,7 @@ define_request_and_response! { #[doc = serde_doc_test!( POP_BLOCKS_RESPONSE => PopBlocksResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, height: 76482, } )] @@ -881,7 +881,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_TRANSACTION_POOL_HASHES_RESPONSE => GetTransactionPoolHashesResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, tx_hashes: vec![ "aa928aed888acd6152c60194d50a4df29b0b851be6169acf11b6a8e304dd6c03".into(), "794345f321a98f3135151f3056c0fdf8188646a8dab27de971428acf3551dd11".into(), @@ -931,7 +931,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_PUBLIC_NODES_RESPONSE => GetPublicNodesResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, gray: vec![], white: vec![ PublicNode { diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index a3b82bd..e3c0180 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -121,6 +121,8 @@ fn map_request( R::DatabaseSize => database_size(env), R::OutputHistogram(input) => output_histogram(env, input), R::CoinbaseTxSum { height, count } => coinbase_tx_sum(env, height, count), + R::AltChains => alt_chains(env), + R::AltChainCount => alt_chain_count(env), } /* SOMEDAY: post-request handling, run some code for each request? */ @@ -648,3 +650,13 @@ fn output_histogram(env: &ConcreteEnv, input: OutputHistogramInput) -> ResponseR fn coinbase_tx_sum(env: &ConcreteEnv, height: usize, count: u64) -> ResponseResult { Ok(BlockchainResponse::CoinbaseTxSum(todo!())) } + +/// [`BlockchainReadRequest::AltChains`] +fn alt_chains(env: &ConcreteEnv) -> ResponseResult { + Ok(BlockchainResponse::AltChains(todo!())) +} + +/// [`BlockchainReadRequest::AltChainCount`] +fn alt_chain_count(env: &ConcreteEnv) -> ResponseResult { + Ok(BlockchainResponse::AltChainCount(todo!())) +} diff --git a/storage/txpool/src/service/interface.rs b/storage/txpool/src/service/interface.rs index 5cd518f..a27c630 100644 --- a/storage/txpool/src/service/interface.rs +++ b/storage/txpool/src/service/interface.rs @@ -35,7 +35,11 @@ pub enum TxpoolReadRequest { Backlog, /// Get the number of transactions in the pool. - Size, + Size { + /// If this is [`true`], the size returned will + /// include private transactions in the pool. + include_sensitive_txs: bool, + }, } //---------------------------------------------------------------------------------------------------- TxpoolReadResponse @@ -66,7 +70,7 @@ pub enum TxpoolReadResponse { /// Response to [`TxpoolReadRequest::Backlog`]. /// - /// The inner `Vec` contains information on all + /// The inner [`Vec`] contains information on all /// the transactions currently in the pool. Backlog(Vec), diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs index 257fe8e..0de1e7d 100644 --- a/storage/txpool/src/service/read.rs +++ b/storage/txpool/src/service/read.rs @@ -71,7 +71,9 @@ fn map_request( } TxpoolReadRequest::TxsForBlock(txs_needed) => txs_for_block(env, txs_needed), TxpoolReadRequest::Backlog => backlog(env), - TxpoolReadRequest::Size => size(env), + TxpoolReadRequest::Size { + include_sensitive_txs, + } => size(env, include_sensitive_txs), } } @@ -201,6 +203,6 @@ fn backlog(env: &ConcreteEnv) -> ReadResponseResult { /// [`TxpoolReadRequest::Size`]. #[inline] -fn size(env: &ConcreteEnv) -> ReadResponseResult { +fn size(env: &ConcreteEnv, include_sensitive_txs: bool) -> ReadResponseResult { Ok(TxpoolReadResponse::Size(todo!())) } diff --git a/storage/txpool/src/tx.rs b/storage/txpool/src/tx.rs index 6425326..29afae8 100644 --- a/storage/txpool/src/tx.rs +++ b/storage/txpool/src/tx.rs @@ -5,6 +5,8 @@ /// Used in [`TxpoolReadResponse::Backlog`](crate::service::interface::TxpoolReadResponse::Backlog). #[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)] pub struct TxEntry { + /// The transaction's ID (hash). + pub id: [u8; 32], /// The transaction's weight. pub weight: u64, /// The transaction's fee. diff --git a/types/src/address_type.rs b/types/src/address_type.rs new file mode 100644 index 0000000..743902d --- /dev/null +++ b/types/src/address_type.rs @@ -0,0 +1,147 @@ +//! Types of network addresses; used in P2P. + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + error, + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +use strum::{ + AsRefStr, Display, EnumCount, EnumIs, EnumString, FromRepr, IntoStaticStr, VariantArray, +}; + +/// An enumeration of address types. +/// +/// Used in `cuprate_p2p` and `cuprate_types` +/// +/// Original definition: +/// - +/// +/// # Serde +/// This type's `serde` implementation (de)serializes from a [`u8`]. +/// +/// ```rust +/// use cuprate_types::AddressType as A; +/// use serde_json::{to_string, from_str}; +/// +/// assert_eq!(from_str::(&"0").unwrap(), A::Invalid); +/// assert_eq!(from_str::(&"1").unwrap(), A::Ipv4); +/// assert_eq!(from_str::(&"2").unwrap(), A::Ipv6); +/// assert_eq!(from_str::(&"3").unwrap(), A::I2p); +/// assert_eq!(from_str::(&"4").unwrap(), A::Tor); +/// +/// assert_eq!(to_string(&A::Invalid).unwrap(), "0"); +/// assert_eq!(to_string(&A::Ipv4).unwrap(), "1"); +/// assert_eq!(to_string(&A::Ipv6).unwrap(), "2"); +/// assert_eq!(to_string(&A::I2p).unwrap(), "3"); +/// assert_eq!(to_string(&A::Tor).unwrap(), "4"); +/// ``` +#[derive( + Copy, + Clone, + Default, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + AsRefStr, + Display, + EnumCount, + EnumIs, + EnumString, + FromRepr, + IntoStaticStr, + VariantArray, +)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", serde(untagged, try_from = "u8", into = "u8"))] +#[repr(u8)] +pub enum AddressType { + #[default] + Invalid, + Ipv4, + Ipv6, + I2p, + Tor, +} + +impl AddressType { + /// Convert [`Self`] to a [`u8`]. + /// + /// ```rust + /// use cuprate_types::AddressType as A; + /// + /// assert_eq!(A::Invalid.to_u8(), 0); + /// assert_eq!(A::Ipv4.to_u8(), 1); + /// assert_eq!(A::Ipv6.to_u8(), 2); + /// assert_eq!(A::I2p.to_u8(), 3); + /// assert_eq!(A::Tor.to_u8(), 4); + /// ``` + pub const fn to_u8(self) -> u8 { + self as u8 + } + + /// Convert a [`u8`] to a [`Self`]. + /// + /// # Errors + /// This returns [`None`] if `u > 4`. + /// + /// ```rust + /// use cuprate_types::AddressType as A; + /// + /// assert_eq!(A::from_u8(0), Some(A::Invalid)); + /// assert_eq!(A::from_u8(1), Some(A::Ipv4)); + /// assert_eq!(A::from_u8(2), Some(A::Ipv6)); + /// assert_eq!(A::from_u8(3), Some(A::I2p)); + /// assert_eq!(A::from_u8(4), Some(A::Tor)); + /// assert_eq!(A::from_u8(5), None); + /// ``` + pub const fn from_u8(u: u8) -> Option { + Some(match u { + 0 => Self::Invalid, + 1 => Self::Ipv4, + 2 => Self::Ipv6, + 3 => Self::I2p, + 4 => Self::Tor, + _ => return None, + }) + } +} + +impl From for u8 { + fn from(value: AddressType) -> Self { + value.to_u8() + } +} + +impl TryFrom for AddressType { + type Error = u8; + fn try_from(value: u8) -> Result { + match Self::from_u8(value) { + Some(s) => Ok(s), + None => Err(value), + } + } +} + +#[cfg(feature = "epee")] +impl EpeeValue for AddressType { + const MARKER: Marker = u8::MARKER; + + fn read(r: &mut B, marker: &Marker) -> error::Result { + let u = u8::read(r, marker)?; + Self::from_u8(u).ok_or(error::Error::Format("u8 was greater than 4")) + } + + fn write(self, w: &mut B) -> error::Result<()> { + let u = self.to_u8(); + u8::write(u, w)?; + Ok(()) + } +} diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index b7436f0..c39c0bd 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -12,7 +12,8 @@ use monero_serai::block::Block; use crate::{ types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}, - AltBlockInformation, ChainId, CoinbaseTxSum, OutputHistogramEntry, OutputHistogramInput, + AltBlockInformation, ChainId, ChainInfo, CoinbaseTxSum, OutputHistogramEntry, + OutputHistogramInput, }; //---------------------------------------------------------------------------------------------------- ReadRequest @@ -128,6 +129,12 @@ pub enum BlockchainReadRequest { /// /// TODO: document fields after impl. CoinbaseTxSum { height: usize, count: u64 }, + + /// Get information on all alternative chains. + AltChains, + + /// Get the amount of alternative chains that exist. + AltChainCount, } //---------------------------------------------------------------------------------------------------- WriteRequest @@ -276,6 +283,12 @@ pub enum BlockchainResponse { /// Response to [`BlockchainReadRequest::CoinbaseTxSum`]. CoinbaseTxSum(CoinbaseTxSum), + /// Response to [`BlockchainReadRequest::AltChains`]. + AltChains(Vec), + + /// Response to [`BlockchainReadRequest::AltChainCount`]. + AltChainCount(usize), + //------------------------------------------------------ Writes /// A generic Ok response to indicate a request was successfully handled. /// diff --git a/types/src/connection_state.rs b/types/src/connection_state.rs new file mode 100644 index 0000000..69b8ed6 --- /dev/null +++ b/types/src/connection_state.rs @@ -0,0 +1,148 @@ +//! [`ConnectionState`]. + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + error, + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +use strum::{ + AsRefStr, Display, EnumCount, EnumIs, EnumString, FromRepr, IntoStaticStr, VariantArray, +}; + +/// An enumeration of P2P connection states. +/// +/// Used in `cuprate_p2p` and `cuprate_rpc_types`. +/// +/// Original definition: +/// - +/// +/// # Serde +/// This type's `serde` implementation depends on `snake_case`. +/// +/// ```rust +/// use cuprate_types::ConnectionState as C; +/// use serde_json::to_string; +/// +/// assert_eq!(to_string(&C::BeforeHandshake).unwrap(), r#""before_handshake""#); +/// assert_eq!(to_string(&C::Synchronizing).unwrap(), r#""synchronizing""#); +/// assert_eq!(to_string(&C::Standby).unwrap(), r#""standby""#); +/// assert_eq!(to_string(&C::Idle).unwrap(), r#""idle""#); +/// assert_eq!(to_string(&C::Normal).unwrap(), r#""normal""#); +/// +/// assert_eq!(C::BeforeHandshake.to_string(), "before_handshake"); +/// assert_eq!(C::Synchronizing.to_string(), "synchronizing"); +/// assert_eq!(C::Standby.to_string(), "standby"); +/// assert_eq!(C::Idle.to_string(), "idle"); +/// assert_eq!(C::Normal.to_string(), "normal"); +/// ``` +#[derive( + Copy, + Clone, + Default, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + AsRefStr, + Display, + EnumCount, + EnumIs, + EnumString, + FromRepr, + IntoStaticStr, + VariantArray, +)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "snake_case"))] // cuprate-rpc-types depends on snake_case +#[strum(serialize_all = "snake_case")] +#[repr(u8)] +pub enum ConnectionState { + BeforeHandshake, + Synchronizing, + Standby, + Idle, + #[default] + Normal, +} + +impl ConnectionState { + /// Convert [`Self`] to a [`u8`]. + /// + /// ```rust + /// use cuprate_types::ConnectionState as C; + /// + /// assert_eq!(C::BeforeHandshake.to_u8(), 0); + /// assert_eq!(C::Synchronizing.to_u8(), 1); + /// assert_eq!(C::Standby.to_u8(), 2); + /// assert_eq!(C::Idle.to_u8(), 3); + /// assert_eq!(C::Normal.to_u8(), 4); + /// ``` + pub const fn to_u8(self) -> u8 { + self as u8 + } + + /// Convert a [`u8`] to a [`Self`]. + /// + /// # Errors + /// This returns [`None`] if `u > 4`. + /// + /// ```rust + /// use cuprate_types::ConnectionState as C; + /// + /// assert_eq!(C::from_u8(0), Some(C::BeforeHandshake)); + /// assert_eq!(C::from_u8(1), Some(C::Synchronizing)); + /// assert_eq!(C::from_u8(2), Some(C::Standby)); + /// assert_eq!(C::from_u8(3), Some(C::Idle)); + /// assert_eq!(C::from_u8(4), Some(C::Normal)); + /// assert_eq!(C::from_u8(5), None); + /// ``` + pub const fn from_u8(u: u8) -> Option { + Some(match u { + 0 => Self::BeforeHandshake, + 1 => Self::Synchronizing, + 2 => Self::Standby, + 3 => Self::Idle, + 4 => Self::Normal, + _ => return None, + }) + } +} + +impl From for u8 { + fn from(value: ConnectionState) -> Self { + value.to_u8() + } +} + +impl TryFrom for ConnectionState { + type Error = u8; + fn try_from(value: u8) -> Result { + match Self::from_u8(value) { + Some(s) => Ok(s), + None => Err(value), + } + } +} + +#[cfg(feature = "epee")] +impl EpeeValue for ConnectionState { + const MARKER: Marker = u8::MARKER; + + fn read(r: &mut B, marker: &Marker) -> error::Result { + let u = u8::read(r, marker)?; + Self::from_u8(u).ok_or(error::Error::Format("u8 was greater than 4")) + } + + fn write(self, w: &mut B) -> error::Result<()> { + let u = self.to_u8(); + u8::write(u, w)?; + Ok(()) + } +} diff --git a/types/src/lib.rs b/types/src/lib.rs index 0fd1ec7..a5a04f9 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -9,20 +9,25 @@ // // Documentation for each module is located in the respective file. +mod address_type; mod block_complete_entry; +mod connection_state; mod hard_fork; mod transaction_verification_data; mod types; +pub use address_type::AddressType; pub use block_complete_entry::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs}; +pub use connection_state::ConnectionState; pub use hard_fork::{HardFork, HardForkError}; pub use transaction_verification_data::{ CachedVerificationState, TransactionVerificationData, TxVersion, }; pub use types::{ - AltBlockInformation, Chain, ChainId, ChainInfo, CoinbaseTxSum, ExtendedBlockHeader, - FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry, OutputHistogramEntry, - OutputHistogramInput, OutputOnChain, VerifiedBlockInformation, VerifiedTransactionInformation, + AddAuxPow, AltBlockInformation, AuxPow, Chain, ChainId, ChainInfo, CoinbaseTxSum, + ExtendedBlockHeader, FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry, + OutputHistogramEntry, OutputHistogramInput, OutputOnChain, VerifiedBlockInformation, + VerifiedTransactionInformation, }; //---------------------------------------------------------------------------------------------------- Feature-gated diff --git a/types/src/types.rs b/types/src/types.rs index 7d5c377..720ad0a 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -177,8 +177,6 @@ pub struct OutputHistogramEntry { pub struct CoinbaseTxSum { pub emission_amount: u128, pub fee_amount: u128, - pub wide_emission_amount: u128, - pub wide_fee_amount: u128, } /// Data to create a custom block template. @@ -242,7 +240,23 @@ pub struct ChainInfo { pub height: u64, pub length: u64, pub main_chain_parent_block: [u8; 32], - pub wide_difficulty: u128, +} + +/// Used in RPC's `add_aux_pow`. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AuxPow { + pub id: [u8; 32], + pub hash: [u8; 32], +} + +/// Used in RPC's `add_aux_pow`. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AddAuxPow { + pub blocktemplate_blob: Vec, + pub blockhashing_blob: Vec, + pub merkle_root: [u8; 32], + pub merkle_tree_depth: u64, + pub aux_pow: Vec, } //---------------------------------------------------------------------------------------------------- Tests From b6c4adc83a199886d6f932c1321857fb8a535af5 Mon Sep 17 00:00:00 2001 From: SyntheticBird <118022351+SyntheticBird45@users.noreply.github.com> Date: Sat, 2 Nov 2024 00:45:56 +0000 Subject: [PATCH 091/104] p2p: Implement P2P Bucket data structure (#329) Implements P2P Bucket data structure This commit implements a "Bucket" data structure that is a collection of data that discriminates its items into "buckets" (vector of size N) following a defined function. - Implements Bucket data structure and Bucketable trait - Implements Bucketable for Ipv4Addr - Added the crate to the workspace dependencies - Added arrayvec as a dependency --- Cargo.lock | 8 + Cargo.toml | 3 + books/architecture/src/appendix/crates.md | 1 + p2p/bucket/Cargo.toml | 13 ++ p2p/bucket/src/lib.rs | 172 ++++++++++++++++++++++ 5 files changed, 197 insertions(+) create mode 100644 p2p/bucket/Cargo.toml create mode 100644 p2p/bucket/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 7ad2f2a..9a0ebd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -855,6 +855,14 @@ dependencies = [ "tracing", ] +[[package]] +name = "cuprate-p2p-bucket" +version = "0.1.0" +dependencies = [ + "arrayvec", + "rand", +] + [[package]] name = "cuprate-p2p-core" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index d5aca71..614788d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ members = [ "net/wire", "p2p/p2p", "p2p/p2p-core", + "p2p/bucket", "p2p/dandelion-tower", "p2p/async-buffer", "p2p/address-book", @@ -64,6 +65,7 @@ cuprate-levin = { path = "net/levin" ,default-feature cuprate-wire = { path = "net/wire" ,default-features = false} cuprate-p2p = { path = "p2p/p2p" ,default-features = false} cuprate-p2p-core = { path = "p2p/p2p-core" ,default-features = false} +cuprate-p2p-bucket = { path = "p2p/p2p-bucket" ,default-features = false} cuprate-dandelion-tower = { path = "p2p/dandelion-tower" ,default-features = false} cuprate-async-buffer = { path = "p2p/async-buffer" ,default-features = false} cuprate-address-book = { path = "p2p/address-book" ,default-features = false} @@ -80,6 +82,7 @@ cuprate-rpc-interface = { path = "rpc/interface" ,default-feature # External dependencies anyhow = { version = "1.0.89", default-features = false } +arrayvec = { version = "0.7", default-features = false } async-trait = { version = "0.1.82", default-features = false } bitflags = { version = "2.6.0", default-features = false } blake3 = { version = "1", default-features = false } diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md index fe8f1f0..ac2780e 100644 --- a/books/architecture/src/appendix/crates.md +++ b/books/architecture/src/appendix/crates.md @@ -35,6 +35,7 @@ cargo doc --open --package cuprate-blockchain | [`cuprate-async-buffer`](https://doc.cuprate.org/cuprate_async_buffer) | [`p2p/async-buffer/`](https://github.com/Cuprate/cuprate/tree/main/p2p/async-buffer) | A bounded SPSC, FIFO, asynchronous buffer that supports arbitrary weights for values | [`cuprate-dandelion-tower`](https://doc.cuprate.org/cuprate_dandelion_tower) | [`p2p/dandelion-tower/`](https://github.com/Cuprate/cuprate/tree/main/p2p/dandelion-tower) | TODO | [`cuprate-p2p`](https://doc.cuprate.org/cuprate_p2p) | [`p2p/p2p/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p) | TODO +| [`cuprate-p2p-bucket`](https://doc.cuprate.org/cuprate_p2p_bucket) | [`p2p/bucket/`](https://github.com/Cuprate/cuprate/tree/main/p2p/bucket) | A collection data structure discriminating its items into "buckets" of limited size. | [`cuprate-p2p-core`](https://doc.cuprate.org/cuprate_p2p_core) | [`p2p/p2p-core/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p-core) | TODO ## Storage diff --git a/p2p/bucket/Cargo.toml b/p2p/bucket/Cargo.toml new file mode 100644 index 0000000..1a53e85 --- /dev/null +++ b/p2p/bucket/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "cuprate-p2p-bucket" +version = "0.1.0" +edition = "2021" +license = "MIT" +authors = ["SyntheticBird"] + +[dependencies] +arrayvec = { workspace = true } +rand = { workspace = true, features = ["std", "std_rng"]} + +[lints] +workspace = true diff --git a/p2p/bucket/src/lib.rs b/p2p/bucket/src/lib.rs new file mode 100644 index 0000000..0f73eea --- /dev/null +++ b/p2p/bucket/src/lib.rs @@ -0,0 +1,172 @@ +//! Bucket data structure +//! +//! A collection data structure that discriminates its unique items and place them into "buckets". +//! +//! The item must implement the [`Bucketable`] trait that defines how to create the discriminant +//! from the item type. The data structure will internally contain any item into "buckets" or vectors +//! of sized capacity `N` that regroup all the stored items with this specific discriminant. +//! +//! A practical example of this data structure is for storing `N` amount of IP discriminated by their subnets. +//! You can store in each "buckets" corresponding to a `/16` subnet up to `N` IPs of that subnet. +//! +//! # Example +//! +//! ``` +//! use cuprate_p2p_bucket::Bucket; +//! use std::net::Ipv4Addr; +//! +//! // Create a new bucket that can store at most 2 IPs in a particular `/16` subnet. +//! let mut bucket = Bucket::<2,Ipv4Addr>::new(); +//! +//! // Fulfill the `96.96.0.0/16` bucket. +//! bucket.push("96.96.0.1".parse().unwrap()); +//! bucket.push("96.96.0.2".parse().unwrap()); +//! assert_eq!(2, bucket.len()); +//! assert_eq!(2, bucket.len_bucket(&[96_u8,96_u8]).unwrap()); +//! +//! // Push a new IP from another subnet +//! bucket.push("127.0.0.1".parse().unwrap()); +//! assert_eq!(3, bucket.len()); +//! assert_eq!(2, bucket.len_bucket(&[96_u8,96_u8]).unwrap()); +//! assert_eq!(1, bucket.len_bucket(&[127_u8,0_u8]).unwrap()); +//! +//! // Attempting to push a new IP within `96.96.0.0/16` bucket will return the IP back +//! // as this subnet is already full. +//! let pushed = bucket.push("96.96.0.3".parse().unwrap()); +//! assert!(pushed.is_some()); +//! assert_eq!(2, bucket.len_bucket(&[96_u8,96_u8]).unwrap()); +//! +//! ``` + +use arrayvec::{ArrayVec, CapacityError}; +use rand::random; + +use std::{collections::BTreeMap, net::Ipv4Addr}; + +/// A discriminant that can be computed from the type. +pub trait Bucketable: Sized + Eq + Clone { + /// The type of the discriminant being used in the Binary tree. + type Discriminant: Ord + AsRef<[u8]>; + + /// Method that can compute the discriminant from the item. + fn discriminant(&self) -> Self::Discriminant; +} + +/// A collection data structure discriminating its unique items +/// with a specified method. Limiting the amount of items stored +/// with that discriminant to the const `N`. +pub struct Bucket { + /// The storage of the bucket + storage: BTreeMap>, +} + +impl Bucket { + /// Create a new Bucket + pub const fn new() -> Self { + Self { + storage: BTreeMap::new(), + } + } + + /// Push a new element into the Bucket + /// + /// Will internally create a new vector for each new discriminant being + /// generated from an item. + /// + /// This function WILL NOT push the element if it already exists. + /// + /// Return `None` if the item has been pushed or ignored. `Some(I)` if + /// the vector is full. + /// + /// # Example + /// + /// ``` + /// use cuprate_p2p_bucket::Bucket; + /// use std::net::Ipv4Addr; + /// + /// let mut bucket = Bucket::<8,Ipv4Addr>::new(); + /// + /// // Push a first IP address. + /// bucket.push("127.0.0.1".parse().unwrap()); + /// assert_eq!(1, bucket.len()); + /// + /// // Push the same IP address a second time. + /// bucket.push("127.0.0.1".parse().unwrap()); + /// assert_eq!(1, bucket.len()); + /// ``` + pub fn push(&mut self, item: I) -> Option { + let discriminant = item.discriminant(); + + if let Some(vec) = self.storage.get_mut(&discriminant) { + // Push the item if it doesn't exist. + if !vec.contains(&item) { + return vec.try_push(item).err().map(CapacityError::element); + } + } else { + // Initialize the vector if not found. + let mut vec = ArrayVec::::new(); + vec.push(item); + self.storage.insert(discriminant, vec); + } + + None + } + + /// Will attempt to remove an item from the bucket. + pub fn remove(&mut self, item: &I) -> Option { + self.storage.get_mut(&item.discriminant()).and_then(|vec| { + vec.iter() + .enumerate() + .find_map(|(i, v)| (item == v).then_some(i)) + .map(|index| vec.swap_remove(index)) + }) + } + + /// Return the number of item stored within the storage + pub fn len(&self) -> usize { + self.storage.values().map(ArrayVec::len).sum() + } + + /// Return the number of item stored with a specific discriminant. + /// + /// This method returns None if the bucket with this discriminant + /// doesn't exist. + pub fn len_bucket(&self, discriminant: &I::Discriminant) -> Option { + self.storage.get(discriminant).map(ArrayVec::len) + } + + /// Return `true` if the storage contains no items + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Return a reference to an item chosen at random. + /// + /// Repeated use of this function will provide a normal distribution of + /// items based on their discriminants. + pub fn get_random(&mut self) -> Option<&I> { + // Get the total amount of discriminants to explore. + let len = self.storage.len(); + + // Get a random bucket. + let (_, vec) = self.storage.iter().nth(random::() / len).unwrap(); + + // Return a reference chose at random. + vec.get(random::() / vec.len()) + } +} + +impl Default for Bucket { + fn default() -> Self { + Self::new() + } +} + +impl Bucketable for Ipv4Addr { + /// We are discriminating by `/16` subnets. + type Discriminant = [u8; 2]; + + fn discriminant(&self) -> Self::Discriminant { + [self.octets()[0], self.octets()[1]] + } +} From 525e20e841cd6db0422d30d5bde85277c26a947f Mon Sep 17 00:00:00 2001 From: Boog900 Date: Mon, 4 Nov 2024 15:22:43 +0000 Subject: [PATCH 092/104] Fix ci and loosen version requirements (#335) * add deny exception + loosen version requirements * add a comment * remove `expect` --- .github/workflows/audit.yml | 34 --- Cargo.lock | 378 ++++++++++++++++----------------- Cargo.toml | 74 +++---- consensus/src/tests/mock_db.rs | 2 - deny.toml | 4 + 5 files changed, 227 insertions(+), 265 deletions(-) delete mode 100644 .github/workflows/audit.yml diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml deleted file mode 100644 index 84b1995..0000000 --- a/.github/workflows/audit.yml +++ /dev/null @@ -1,34 +0,0 @@ -# This runs `cargo audit` on all dependencies (only if Cargo deps changed) - -name: Audit - -on: - push: - paths: - - '**/Cargo.toml' - - '**/Cargo.lock' - workflow_dispatch: - -env: - CARGO_TERM_COLOR: always - -jobs: - audit: - - runs-on: ubuntu-latest - - steps: - - name: Cache - uses: actions/cache@v4 - with: - path: | - ~/.cargo - target - key: audit - - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Install dependencies - run: cargo install cargo-audit --locked - - name: Audit - run: cargo audit diff --git a/Cargo.lock b/Cargo.lock index 9a0ebd5..b446bf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -46,15 +46,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13" [[package]] name = "arrayref" @@ -70,9 +70,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -81,24 +81,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -109,15 +109,15 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", "axum-core", @@ -141,7 +141,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower 0.4.13", + "tower 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "tower-layer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -162,7 +162,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", @@ -292,7 +292,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.77", + "syn", "syn_derive", ] @@ -304,22 +304,22 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" +checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" +checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -330,18 +330,18 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" dependencies = [ "serde", ] [[package]] name = "cc" -version = "1.1.21" +version = "1.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" dependencies = [ "shlex", ] @@ -372,9 +372,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.17" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -382,9 +382,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.17" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstyle", "clap_lex", @@ -392,14 +392,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -559,7 +559,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -598,7 +598,7 @@ dependencies = [ "tempfile", "thread_local", "tokio", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", ] [[package]] @@ -624,7 +624,7 @@ dependencies = [ "thread_local", "tokio", "tokio-test", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -644,7 +644,7 @@ dependencies = [ "thread_local", "tokio", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -701,7 +701,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -731,7 +731,7 @@ dependencies = [ "futures", "rayon", "serde", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", ] [[package]] @@ -764,7 +764,7 @@ dependencies = [ "sha3", "thiserror", "tokio", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", ] [[package]] @@ -851,7 +851,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -883,7 +883,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -912,7 +912,7 @@ dependencies = [ "serde", "serde_json", "tokio", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "ureq", ] @@ -974,7 +974,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", ] [[package]] @@ -1077,7 +1077,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", "tracing-subscriber", ] @@ -1108,7 +1108,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -1129,12 +1129,13 @@ dependencies = [ [[package]] name = "dashmap" -version = "5.5.3" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if", - "hashbrown", + "crossbeam-utils", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core", @@ -1247,9 +1248,9 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "flate2" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide", @@ -1291,9 +1292,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1305,9 +1306,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1315,44 +1316,44 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1389,9 +1390,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "groestl" @@ -1441,6 +1442,12 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "heck" version = "0.5.0" @@ -1542,9 +1549,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -1554,9 +1561,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -1593,9 +1600,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -1606,7 +1613,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", ] @@ -1646,12 +1652,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.15.0", ] [[package]] @@ -1673,9 +1679,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -1697,15 +1703,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libredox" @@ -1992,18 +1998,18 @@ dependencies = [ [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "openssl-probe" @@ -2098,7 +2104,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -2112,29 +2118,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -2195,9 +2201,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] @@ -2224,13 +2230,13 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf16337405ca084e9c78985114633b6827711d22b9e6ef6c6c0d665eb3f0b6e" +checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn", ] [[package]] @@ -2335,18 +2341,18 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4760ad04a88ef77075ba86ba9ea79b919e6bab29c1764c5747237cd6eaedcaa" +checksum = "84b1de48a7cf7ba193e81e078d17ee2b786236eed1d3f7c60f8a09545efc4925" dependencies = [ "libc", ] [[package]] name = "redox_syscall" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -2379,14 +2385,14 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "ring" @@ -2420,9 +2426,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" dependencies = [ "bitflags 2.6.0", "errno", @@ -2433,9 +2439,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", "once_cell", @@ -2461,19 +2467,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-webpki" @@ -2488,9 +2493,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "rusty-fork" @@ -2512,9 +2517,9 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] @@ -2540,9 +2545,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -2562,9 +2567,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] @@ -2580,20 +2585,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", @@ -2733,7 +2738,7 @@ name = "std-shims" version = "0.1.1" source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ - "hashbrown", + "hashbrown 0.14.5", "spin", ] @@ -2756,7 +2761,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.77", + "syn", ] [[package]] @@ -2767,20 +2772,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "1.0.109" +version = "2.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "e89275301d38033efb81a6e60e3497e734dfcc62571f2854bf4b16690398824c" dependencies = [ "proc-macro2", "quote", @@ -2796,7 +2790,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -2828,9 +2822,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", @@ -2841,22 +2835,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "5d171f59dbaa811dbbb1aee1e73db92ec2b122911a48e1390dfe327a821ddede" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "b08be0f17bd307950653ce45db00cd31200d82b624b36e181337d9c7d92765b5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -2892,9 +2886,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" dependencies = [ "backtrace", "bytes", @@ -2916,7 +2910,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -2978,9 +2972,9 @@ checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" [[package]] name = "toml_edit" -version = "0.22.21" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", "toml_datetime", @@ -2989,14 +2983,14 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.13" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" dependencies = [ "futures-core", "futures-util", - "pin-project", "pin-project-lite", + "sync_wrapper 0.1.2", "tokio", "tower-layer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3061,7 +3055,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -3119,9 +3113,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -3217,9 +3211,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", @@ -3228,24 +3222,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.77", + "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3253,28 +3247,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "webpki-roots" -version = "0.26.5" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] @@ -3341,7 +3335,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -3352,7 +3346,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -3524,9 +3518,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -3564,7 +3558,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -3584,5 +3578,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] diff --git a/Cargo.toml b/Cargo.toml index 614788d..ccc5513 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,55 +81,55 @@ cuprate-rpc-types = { path = "rpc/types" ,default-feature cuprate-rpc-interface = { path = "rpc/interface" ,default-features = false} # External dependencies -anyhow = { version = "1.0.89", default-features = false } +anyhow = { version = "1", default-features = false } arrayvec = { version = "0.7", default-features = false } -async-trait = { version = "0.1.82", default-features = false } -bitflags = { version = "2.6.0", default-features = false } +async-trait = { version = "0.1", default-features = false } +bitflags = { version = "2", default-features = false } blake3 = { version = "1", default-features = false } -borsh = { version = "1.5.1", default-features = false } -bytemuck = { version = "1.18.0", default-features = false } -bytes = { version = "1.7.2", default-features = false } -cfg-if = { version = "1.0.0", default-features = false } -clap = { version = "4.5.17", default-features = false } -chrono = { version = "0.4.38", default-features = false } -crypto-bigint = { version = "0.5.5", default-features = false } -crossbeam = { version = "0.8.4", default-features = false } -const_format = { version = "0.2.33", default-features = false } -curve25519-dalek = { version = "4.1.3", default-features = false } -dashmap = { version = "5.5.3", default-features = false } -dirs = { version = "5.0.1", default-features = false } -futures = { version = "0.3.30", default-features = false } -hex = { version = "0.4.3", default-features = false } +borsh = { version = "1", default-features = false } +bytemuck = { version = "1", default-features = false } +bytes = { version = "1", default-features = false } +cfg-if = { version = "1", default-features = false } +clap = { version = "4", default-features = false } +chrono = { version = "0.4", default-features = false } +crypto-bigint = { version = "0.5", default-features = false } +crossbeam = { version = "0.8", default-features = false } +const_format = { version = "0.2", default-features = false } +curve25519-dalek = { version = "4", default-features = false } +dashmap = { version = "6", default-features = false } +dirs = { version = "5", default-features = false } +futures = { version = "0.3", default-features = false } +hex = { version = "0.4", default-features = false } hex-literal = { version = "0.4", default-features = false } -indexmap = { version = "2.5.0", default-features = false } +indexmap = { version = "2", default-features = false } monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce", default-features = false } -paste = { version = "1.0.15", default-features = false } -pin-project = { version = "1.1.5", default-features = false } +paste = { version = "1", default-features = false } +pin-project = { version = "1", default-features = false } randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false } -rand = { version = "0.8.5", default-features = false } -rand_distr = { version = "0.4.3", default-features = false } -rayon = { version = "1.10.0", default-features = false } -serde_bytes = { version = "0.11.15", default-features = false } -serde_json = { version = "1.0.128", default-features = false } -serde = { version = "1.0.210", default-features = false } -strum = { version = "0.26.3", default-features = false } -thiserror = { version = "1.0.63", default-features = false } -thread_local = { version = "1.1.8", default-features = false } -tokio-util = { version = "0.7.12", default-features = false } -tokio-stream = { version = "0.1.16", default-features = false } -tokio = { version = "1.40.0", default-features = false } +rand = { version = "0.8", default-features = false } +rand_distr = { version = "0.4", default-features = false } +rayon = { version = "1", default-features = false } +serde_bytes = { version = "0.11", default-features = false } +serde_json = { version = "1", default-features = false } +serde = { version = "1", default-features = false } +strum = { version = "0.26", default-features = false } +thiserror = { version = "1", default-features = false } +thread_local = { version = "1", default-features = false } +tokio-util = { version = "0.7", default-features = false } +tokio-stream = { version = "0.1", default-features = false } +tokio = { version = "1", default-features = false } tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # -tracing-subscriber = { version = "0.3.18", default-features = false } -tracing = { version = "0.1.40", default-features = false } +tracing-subscriber = { version = "0.3", default-features = false } +tracing = { version = "0.1", default-features = false } ## workspace.dev-dependencies monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } tempfile = { version = "3" } -pretty_assertions = { version = "1.4.1" } +pretty_assertions = { version = "1" } proptest = { version = "1" } -proptest-derive = { version = "0.4.0" } -tokio-test = { version = "0.4.4" } +proptest-derive = { version = "0.5" } +tokio-test = { version = "0.4" } ## TODO: ## Potential dependencies. diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index 5ca53d8..bf005d2 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -1,5 +1,3 @@ -#![expect(non_local_definitions, reason = "proptest macro")] - use std::{ future::Future, pin::Pin, diff --git a/deny.toml b/deny.toml index f469d06..e54d116 100644 --- a/deny.toml +++ b/deny.toml @@ -81,6 +81,9 @@ ignore = [ #{ id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" }, #"a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish #{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" }, + + # TODO: check this is sorted before a beta release. + { id = "RUSTSEC-2024-0370", reason = "unmaintained crate, not necessarily vulnerable yet." } ] # If this is true, then cargo deny will use the git executable to fetch advisory database. # If this is false, then it uses a built-in git library. @@ -110,6 +113,7 @@ allow = [ "Apache-2.0", # https://tldrlegal.com/license/apache-license-2.0-(apache-2.0) "MPL-2.0", # https://www.mozilla.org/en-US/MPL/2.0/FAQ/ "BSL-1.0", # https://tldrlegal.com/license/boost-software-license-1.0-explained + "Zlib", # https://spdx.org/licenses/Zlib.html # OpenSSL 3.0+ uses Apache-2.0 # OpenSSL 1.x.x uses https://www.openssl.org/source/license-openssl-ssleay.txt From 5a5f88cb139c9e64b486060e34d59e94c0d8a433 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Mon, 11 Nov 2024 23:16:08 +0000 Subject: [PATCH 093/104] types: fix pruned `BlockCompleteEntry` (#338) fix pruned `BlockCompleteEntry` --- types/src/block_complete_entry.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/types/src/block_complete_entry.rs b/types/src/block_complete_entry.rs index 77ed82d..af5fa88 100644 --- a/types/src/block_complete_entry.rs +++ b/types/src/block_complete_entry.rs @@ -136,7 +136,7 @@ impl TransactionBlobs { #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct PrunedTxBlobEntry { /// The transaction. - pub tx: Bytes, + pub blob: Bytes, /// The prunable transaction hash. pub prunable_hash: ByteArray<32>, } @@ -144,7 +144,7 @@ pub struct PrunedTxBlobEntry { #[cfg(feature = "epee")] epee_object!( PrunedTxBlobEntry, - tx: Bytes, + blob: Bytes, prunable_hash: ByteArray<32>, ); From 0f1ad6db1b1a3c5b3c66086ca1592a28d2012f3e Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 13 Nov 2024 06:01:15 -0500 Subject: [PATCH 094/104] Cargo.toml: move commas (#340) cargo.toml: move comma --- Cargo.toml | 54 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ccc5513..0f460e8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,33 +52,33 @@ opt-level = 3 [workspace.dependencies] # Cuprate members -cuprate-fast-sync = { path = "consensus/fast-sync" ,default-features = false} -cuprate-consensus-rules = { path = "consensus/rules" ,default-features = false} -cuprate-constants = { path = "constants" ,default-features = false} -cuprate-consensus = { path = "consensus" ,default-features = false} -cuprate-consensus-context = { path = "consensus/context" ,default-features = false} -cuprate-cryptonight = { path = "cryptonight" ,default-features = false} -cuprate-helper = { path = "helper" ,default-features = false} -cuprate-epee-encoding = { path = "net/epee-encoding" ,default-features = false} -cuprate-fixed-bytes = { path = "net/fixed-bytes" ,default-features = false} -cuprate-levin = { path = "net/levin" ,default-features = false} -cuprate-wire = { path = "net/wire" ,default-features = false} -cuprate-p2p = { path = "p2p/p2p" ,default-features = false} -cuprate-p2p-core = { path = "p2p/p2p-core" ,default-features = false} -cuprate-p2p-bucket = { path = "p2p/p2p-bucket" ,default-features = false} -cuprate-dandelion-tower = { path = "p2p/dandelion-tower" ,default-features = false} -cuprate-async-buffer = { path = "p2p/async-buffer" ,default-features = false} -cuprate-address-book = { path = "p2p/address-book" ,default-features = false} -cuprate-blockchain = { path = "storage/blockchain" ,default-features = false} -cuprate-database = { path = "storage/database" ,default-features = false} -cuprate-database-service = { path = "storage/service" ,default-features = false} -cuprate-txpool = { path = "storage/txpool" ,default-features = false} -cuprate-pruning = { path = "pruning" ,default-features = false} -cuprate-test-utils = { path = "test-utils" ,default-features = false} -cuprate-types = { path = "types" ,default-features = false} -cuprate-json-rpc = { path = "rpc/json-rpc" ,default-features = false} -cuprate-rpc-types = { path = "rpc/types" ,default-features = false} -cuprate-rpc-interface = { path = "rpc/interface" ,default-features = false} +cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false } +cuprate-consensus-rules = { path = "consensus/rules", default-features = false } +cuprate-constants = { path = "constants", default-features = false } +cuprate-consensus = { path = "consensus", default-features = false } +cuprate-consensus-context = { path = "consensus/context", default-features = false } +cuprate-cryptonight = { path = "cryptonight", default-features = false } +cuprate-helper = { path = "helper", default-features = false } +cuprate-epee-encoding = { path = "net/epee-encoding", default-features = false } +cuprate-fixed-bytes = { path = "net/fixed-bytes", default-features = false } +cuprate-levin = { path = "net/levin", default-features = false } +cuprate-wire = { path = "net/wire", default-features = false } +cuprate-p2p = { path = "p2p/p2p", default-features = false } +cuprate-p2p-core = { path = "p2p/p2p-core", default-features = false } +cuprate-p2p-bucket = { path = "p2p/p2p-bucket", default-features = false } +cuprate-dandelion-tower = { path = "p2p/dandelion-tower", default-features = false } +cuprate-async-buffer = { path = "p2p/async-buffer", default-features = false } +cuprate-address-book = { path = "p2p/address-book", default-features = false } +cuprate-blockchain = { path = "storage/blockchain", default-features = false } +cuprate-database = { path = "storage/database", default-features = false } +cuprate-database-service = { path = "storage/service", default-features = false } +cuprate-txpool = { path = "storage/txpool", default-features = false } +cuprate-pruning = { path = "pruning", default-features = false } +cuprate-test-utils = { path = "test-utils", default-features = false } +cuprate-types = { path = "types", default-features = false } +cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false } +cuprate-rpc-types = { path = "rpc/types", default-features = false } +cuprate-rpc-interface = { path = "rpc/interface", default-features = false } # External dependencies anyhow = { version = "1", default-features = false } From 241088e2736e8b5f29d2703a6f1e846f6f437867 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Sun, 17 Nov 2024 20:32:41 +0000 Subject: [PATCH 095/104] Wire: fix IPv4 Endianness (#342) * fix IPv4 Endianness * fix import order --- net/wire/src/network_address.rs | 6 ++++-- net/wire/src/network_address/epee_builder.rs | 12 ++++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/net/wire/src/network_address.rs b/net/wire/src/network_address.rs index ad599b7..3e15c46 100644 --- a/net/wire/src/network_address.rs +++ b/net/wire/src/network_address.rs @@ -17,10 +17,12 @@ //! Monero network. Core Monero has 4 main addresses: IPv4, IPv6, Tor, //! I2p. Currently this module only has IPv(4/6). //! -use bytes::BufMut; -use cuprate_epee_encoding::EpeeObject; use std::{hash::Hash, net, net::SocketAddr}; +use bytes::BufMut; + +use cuprate_epee_encoding::EpeeObject; + mod epee_builder; use epee_builder::*; diff --git a/net/wire/src/network_address/epee_builder.rs b/net/wire/src/network_address/epee_builder.rs index c1d1742..bd481a5 100644 --- a/net/wire/src/network_address/epee_builder.rs +++ b/net/wire/src/network_address/epee_builder.rs @@ -1,9 +1,10 @@ -use bytes::Buf; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; -use cuprate_epee_encoding::{epee_object, EpeeObjectBuilder}; +use bytes::Buf; use thiserror::Error; +use cuprate_epee_encoding::{epee_object, EpeeObjectBuilder}; + use crate::NetworkAddress; #[derive(Default)] @@ -77,7 +78,7 @@ impl From for TaggedNetworkAddress { SocketAddr::V4(addr) => Self { ty: Some(1), addr: Some(AllFieldsNetworkAddress { - m_ip: Some(u32::from_be_bytes(addr.ip().octets())), + m_ip: Some(u32::from_le_bytes(addr.ip().octets())), m_port: Some(addr.port()), addr: None, }), @@ -112,7 +113,10 @@ epee_object!( impl AllFieldsNetworkAddress { fn try_into_network_address(self, ty: u8) -> Option { Some(match ty { - 1 => NetworkAddress::from(SocketAddrV4::new(Ipv4Addr::from(self.m_ip?), self.m_port?)), + 1 => NetworkAddress::from(SocketAddrV4::new( + Ipv4Addr::from(self.m_ip?.to_le_bytes()), + self.m_port?, + )), 2 => NetworkAddress::from(SocketAddrV6::new( Ipv6Addr::from(self.addr?), self.m_port?, From e8598a082d5df0660c5f52a2375c68777408b9f4 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Mon, 18 Nov 2024 10:21:52 -0500 Subject: [PATCH 096/104] books/architecture: add `Monero oddities` (#343) * add `oddities/` * swap `Expected`, `Why` --- books/architecture/src/SUMMARY.md | 5 +++ books/architecture/src/oddities/intro.md | 37 ++++++++++++++++++++++ books/architecture/src/oddities/le-ipv4.md | 24 ++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 books/architecture/src/oddities/intro.md create mode 100644 books/architecture/src/oddities/le-ipv4.md diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index bf66860..0961d8f 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -157,6 +157,11 @@ --- +- [🟢 Monero oddities](oddities/intro.md) + - [🟡 Little-endian IPv4 addresses](oddities/le-ipv4.md) + +--- + - [⚪️ Appendix](appendix/intro.md) - [🟢 Crates](appendix/crates.md) - [🔴 Contributing](appendix/contributing.md) diff --git a/books/architecture/src/oddities/intro.md b/books/architecture/src/oddities/intro.md new file mode 100644 index 0000000..c0275b3 --- /dev/null +++ b/books/architecture/src/oddities/intro.md @@ -0,0 +1,37 @@ +# Monero oddities +This section is a list of any peculiar, interesting, +or non-standard behavior that Monero has that is not +planned on being changed or deprecated. + +This section exists to hold all the small yet noteworthy knowledge in one place, +instead of in any single contributor's mind. + +These are usually behaviors stemming from implementation rather than protocol/cryptography. + +## Formatting +This is the markdown formatting for each entry in this section. + +If applicable, consider using this formatting when adding to this section. + +```md +# + +## What +A detailed description of the behavior. + +## Expected +The norm or standard behavior that is usually expected. + +## Why +The reasoning behind why this behavior exists and/or +any links to more detailed discussion on the behavior. + +## Affects +A (potentially non-exhaustive) list of places that this behavior can/does affect. + +## Example +An example link or section of code where the behavior occurs. + +## Source +A link to original `monerod` code that defines the behavior. +``` \ No newline at end of file diff --git a/books/architecture/src/oddities/le-ipv4.md b/books/architecture/src/oddities/le-ipv4.md new file mode 100644 index 0000000..f64c1d7 --- /dev/null +++ b/books/architecture/src/oddities/le-ipv4.md @@ -0,0 +1,24 @@ +# Little-endian IPv4 addresses + +## What +Monero encodes IPv4 addresses in [little-endian](https://en.wikipedia.org/wiki/Endianness) byte order. + +## Expected +In general, [networking-related protocols/code use _networking order_ (big-endian)](https://en.wikipedia.org/wiki/Endianness#Networking). + +## Why +TODO + +- +- + +## Affects +Any representation and (de)serialization of IPv4 addresses must keep little +endian in-mind, e.g. the P2P wire format or `int` encoded IPv4 addresses in RPC. + +For example, [the `ip` field in `set_bans`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#set_bans). + +For Cuprate, this means Rust's [`Ipv4Addr::from_bits/from`](https://doc.rust-lang.org/1.82.0/src/core/net/ip_addr.rs.html#1182) cannot be used in these cases as [it assumes big-endian encoding](https://doc.rust-lang.org/1.82.0/src/core/net/ip_addr.rs.html#540). + +## Source +- From c54bb0c8b20b209c8e841ab1187c0bf772ecc33c Mon Sep 17 00:00:00 2001 From: Boog900 Date: Wed, 20 Nov 2024 01:37:52 +0000 Subject: [PATCH 097/104] P2P: Change `ClientPool` to `PeerSet` (#337) * add WeakClient * todo * client pool -> peer set * more peer set changes * fix cuprated builds * add docs * more docs + better disconnect handling * more docs * fix imports * review fixes --- Cargo.lock | 1 - binaries/cuprated/src/blockchain/syncer.rs | 27 ++- binaries/cuprated/src/txpool/dandelion.rs | 2 +- .../src/txpool/dandelion/stem_service.rs | 71 ++++-- p2p/p2p-core/src/client.rs | 13 ++ p2p/p2p-core/src/client/weak.rs | 114 +++++++++ p2p/p2p/Cargo.toml | 4 +- p2p/p2p/src/block_downloader.rs | 68 +++--- .../src/block_downloader/download_batch.rs | 8 +- p2p/p2p/src/block_downloader/request_chain.rs | 24 +- p2p/p2p/src/block_downloader/tests.rs | 15 +- p2p/p2p/src/client_pool.rs | 188 --------------- p2p/p2p/src/client_pool/disconnect_monitor.rs | 83 ------- p2p/p2p/src/client_pool/drop_guard_client.rs | 41 ---- p2p/p2p/src/connection_maintainer.rs | 11 +- p2p/p2p/src/inbound_server.rs | 9 +- p2p/p2p/src/lib.rs | 38 +-- p2p/p2p/src/peer_set.rs | 217 ++++++++++++++++++ p2p/p2p/src/peer_set/client_wrappers.rs | 86 +++++++ 19 files changed, 602 insertions(+), 418 deletions(-) create mode 100644 p2p/p2p-core/src/client/weak.rs delete mode 100644 p2p/p2p/src/client_pool.rs delete mode 100644 p2p/p2p/src/client_pool/disconnect_monitor.rs delete mode 100644 p2p/p2p/src/client_pool/drop_guard_client.rs create mode 100644 p2p/p2p/src/peer_set.rs create mode 100644 p2p/p2p/src/peer_set/client_wrappers.rs diff --git a/Cargo.lock b/Cargo.lock index b446bf6..a947a15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -837,7 +837,6 @@ dependencies = [ "cuprate-test-utils", "cuprate-types", "cuprate-wire", - "dashmap", "futures", "indexmap", "monero-serai", diff --git a/binaries/cuprated/src/blockchain/syncer.rs b/binaries/cuprated/src/blockchain/syncer.rs index 913c983..69ad330 100644 --- a/binaries/cuprated/src/blockchain/syncer.rs +++ b/binaries/cuprated/src/blockchain/syncer.rs @@ -12,7 +12,7 @@ use tracing::instrument; use cuprate_consensus::{BlockChainContext, BlockChainContextRequest, BlockChainContextResponse}; use cuprate_p2p::{ block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}, - NetworkInterface, + NetworkInterface, PeerSetRequest, PeerSetResponse, }; use cuprate_p2p_core::ClearNet; @@ -28,15 +28,11 @@ pub enum SyncerError { } /// The syncer tasks that makes sure we are fully synchronised with our connected peers. -#[expect( - clippy::significant_drop_tightening, - reason = "Client pool which will be removed" -)] #[instrument(level = "debug", skip_all)] pub async fn syncer( mut context_svc: C, our_chain: CN, - clearnet_interface: NetworkInterface, + mut clearnet_interface: NetworkInterface, incoming_block_batch_tx: mpsc::Sender, stop_current_block_downloader: Arc, block_downloader_config: BlockDownloaderConfig, @@ -67,8 +63,6 @@ where unreachable!(); }; - let client_pool = clearnet_interface.client_pool(); - tracing::debug!("Waiting for new sync info in top sync channel"); loop { @@ -79,9 +73,20 @@ where check_update_blockchain_context(&mut context_svc, &mut blockchain_ctx).await?; let raw_blockchain_context = blockchain_ctx.unchecked_blockchain_context(); - if !client_pool.contains_client_with_more_cumulative_difficulty( - raw_blockchain_context.cumulative_difficulty, - ) { + let PeerSetResponse::MostPoWSeen { + cumulative_difficulty, + .. + } = clearnet_interface + .peer_set() + .ready() + .await? + .call(PeerSetRequest::MostPoWSeen) + .await? + else { + unreachable!(); + }; + + if cumulative_difficulty <= raw_blockchain_context.cumulative_difficulty { continue; } diff --git a/binaries/cuprated/src/txpool/dandelion.rs b/binaries/cuprated/src/txpool/dandelion.rs index d791b62..00d9f5a 100644 --- a/binaries/cuprated/src/txpool/dandelion.rs +++ b/binaries/cuprated/src/txpool/dandelion.rs @@ -59,7 +59,7 @@ pub fn dandelion_router(clear_net: NetworkInterface) -> ConcreteDandel diffuse_service::DiffuseService { clear_net_broadcast_service: clear_net.broadcast_svc(), }, - stem_service::OutboundPeerStream { clear_net }, + stem_service::OutboundPeerStream::new(clear_net), DANDELION_CONFIG, ) } diff --git a/binaries/cuprated/src/txpool/dandelion/stem_service.rs b/binaries/cuprated/src/txpool/dandelion/stem_service.rs index 5c0ba65..2debfd4 100644 --- a/binaries/cuprated/src/txpool/dandelion/stem_service.rs +++ b/binaries/cuprated/src/txpool/dandelion/stem_service.rs @@ -1,14 +1,15 @@ use std::{ + future::Future, pin::Pin, - task::{Context, Poll}, + task::{ready, Context, Poll}, }; use bytes::Bytes; -use futures::Stream; +use futures::{future::BoxFuture, FutureExt, Stream}; use tower::Service; use cuprate_dandelion_tower::{traits::StemRequest, OutboundPeer}; -use cuprate_p2p::{ClientPoolDropGuard, NetworkInterface}; +use cuprate_p2p::{ClientDropGuard, NetworkInterface, PeerSetRequest, PeerSetResponse}; use cuprate_p2p_core::{ client::{Client, InternalPeerID}, ClearNet, NetworkZone, PeerRequest, ProtocolRequest, @@ -19,7 +20,17 @@ use crate::{p2p::CrossNetworkInternalPeerId, txpool::dandelion::DandelionTx}; /// The dandelion outbound peer stream. pub struct OutboundPeerStream { - pub clear_net: NetworkInterface, + clear_net: NetworkInterface, + state: OutboundPeerStreamState, +} + +impl OutboundPeerStream { + pub const fn new(clear_net: NetworkInterface) -> Self { + Self { + clear_net, + state: OutboundPeerStreamState::Standby, + } + } } impl Stream for OutboundPeerStream { @@ -28,23 +39,49 @@ impl Stream for OutboundPeerStream { tower::BoxError, >; - fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // TODO: make the outbound peer choice random. - Poll::Ready(Some(Ok(self - .clear_net - .client_pool() - .outbound_client() - .map_or(OutboundPeer::Exhausted, |client| { - OutboundPeer::Peer( - CrossNetworkInternalPeerId::ClearNet(client.info.id), - StemPeerService(client), - ) - })))) + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match &mut self.state { + OutboundPeerStreamState::Standby => { + let peer_set = self.clear_net.peer_set(); + let res = ready!(peer_set.poll_ready(cx)); + + self.state = OutboundPeerStreamState::AwaitingPeer( + peer_set.call(PeerSetRequest::StemPeer).boxed(), + ); + } + OutboundPeerStreamState::AwaitingPeer(fut) => { + let res = ready!(fut.poll_unpin(cx)); + + return Poll::Ready(Some(res.map(|res| { + let PeerSetResponse::StemPeer(stem_peer) = res else { + unreachable!() + }; + + match stem_peer { + Some(peer) => OutboundPeer::Peer( + CrossNetworkInternalPeerId::ClearNet(peer.info.id), + StemPeerService(peer), + ), + None => OutboundPeer::Exhausted, + } + }))); + } + } + } } } +/// The state of the [`OutboundPeerStream`]. +enum OutboundPeerStreamState { + /// Standby state. + Standby, + /// Awaiting a response from the peer-set. + AwaitingPeer(BoxFuture<'static, Result, tower::BoxError>>), +} + /// The stem service, used to send stem txs. -pub struct StemPeerService(ClientPoolDropGuard); +pub struct StemPeerService(ClientDropGuard); impl Service> for StemPeerService { type Response = as Service>::Response; diff --git a/p2p/p2p-core/src/client.rs b/p2p/p2p-core/src/client.rs index 73b33ba..f2fde67 100644 --- a/p2p/p2p-core/src/client.rs +++ b/p2p/p2p-core/src/client.rs @@ -27,9 +27,11 @@ mod connector; pub mod handshaker; mod request_handler; mod timeout_monitor; +mod weak; pub use connector::{ConnectRequest, Connector}; pub use handshaker::{DoHandshakeRequest, HandshakeError, HandshakerBuilder}; +pub use weak::WeakClient; /// An internal identifier for a given peer, will be their address if known /// or a random u128 if not. @@ -128,6 +130,17 @@ impl Client { } .into() } + + /// Create a [`WeakClient`] for this [`Client`]. + pub fn downgrade(&self) -> WeakClient { + WeakClient { + info: self.info.clone(), + connection_tx: self.connection_tx.downgrade(), + semaphore: self.semaphore.clone(), + permit: None, + error: self.error.clone(), + } + } } impl Service for Client { diff --git a/p2p/p2p-core/src/client/weak.rs b/p2p/p2p-core/src/client/weak.rs new file mode 100644 index 0000000..90f25dd --- /dev/null +++ b/p2p/p2p-core/src/client/weak.rs @@ -0,0 +1,114 @@ +use std::task::{ready, Context, Poll}; + +use futures::channel::oneshot; +use tokio::sync::{mpsc, OwnedSemaphorePermit}; +use tokio_util::sync::PollSemaphore; +use tower::Service; + +use cuprate_helper::asynch::InfallibleOneshotReceiver; + +use crate::{ + client::{connection, PeerInformation}, + NetworkZone, PeerError, PeerRequest, PeerResponse, SharedError, +}; + +/// A weak handle to a [`Client`](super::Client). +/// +/// When this is dropped the peer will not be disconnected. +pub struct WeakClient { + /// Information on the connected peer. + pub info: PeerInformation, + + /// The channel to the [`Connection`](connection::Connection) task. + pub(super) connection_tx: mpsc::WeakSender, + + /// The semaphore that limits the requests sent to the peer. + pub(super) semaphore: PollSemaphore, + /// A permit for the semaphore, will be [`Some`] after `poll_ready` returns ready. + pub(super) permit: Option, + + /// The error slot shared between the [`Client`] and [`Connection`](connection::Connection). + pub(super) error: SharedError, +} + +impl WeakClient { + /// Internal function to set an error on the [`SharedError`]. + fn set_err(&self, err: PeerError) -> tower::BoxError { + let err_str = err.to_string(); + match self.error.try_insert_err(err) { + Ok(()) => err_str, + Err(e) => e.to_string(), + } + .into() + } +} + +impl Service for WeakClient { + type Response = PeerResponse; + type Error = tower::BoxError; + type Future = InfallibleOneshotReceiver>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + if let Some(err) = self.error.try_get_err() { + return Poll::Ready(Err(err.to_string().into())); + } + + if self.connection_tx.strong_count() == 0 { + let err = self.set_err(PeerError::ClientChannelClosed); + return Poll::Ready(Err(err)); + } + + if self.permit.is_some() { + return Poll::Ready(Ok(())); + } + + let permit = ready!(self.semaphore.poll_acquire(cx)) + .expect("Client semaphore should not be closed!"); + + self.permit = Some(permit); + + Poll::Ready(Ok(())) + } + + #[expect(clippy::significant_drop_tightening)] + fn call(&mut self, request: PeerRequest) -> Self::Future { + let permit = self + .permit + .take() + .expect("poll_ready did not return ready before call to call"); + + let (tx, rx) = oneshot::channel(); + let req = connection::ConnectionTaskRequest { + response_channel: tx, + request, + permit: Some(permit), + }; + + match self.connection_tx.upgrade() { + None => { + self.set_err(PeerError::ClientChannelClosed); + + let resp = Err(PeerError::ClientChannelClosed.into()); + drop(req.response_channel.send(resp)); + } + Some(sender) => { + if let Err(e) = sender.try_send(req) { + // The connection task could have closed between a call to `poll_ready` and the call to + // `call`, which means if we don't handle the error here the receiver would panic. + use mpsc::error::TrySendError; + + match e { + TrySendError::Closed(req) | TrySendError::Full(req) => { + self.set_err(PeerError::ClientChannelClosed); + + let resp = Err(PeerError::ClientChannelClosed.into()); + drop(req.response_channel.send(resp)); + } + } + } + } + } + + rx.into() + } +} diff --git a/p2p/p2p/Cargo.toml b/p2p/p2p/Cargo.toml index 866fb91..e6ebccb 100644 --- a/p2p/p2p/Cargo.toml +++ b/p2p/p2p/Cargo.toml @@ -20,12 +20,12 @@ monero-serai = { workspace = true, features = ["std"] } tower = { workspace = true, features = ["buffer"] } tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } -rayon = { workspace = true } tokio-util = { workspace = true } +rayon = { workspace = true } tokio-stream = { workspace = true, features = ["sync", "time"] } futures = { workspace = true, features = ["std"] } pin-project = { workspace = true } -dashmap = { workspace = true } +indexmap = { workspace = true, features = ["std"] } thiserror = { workspace = true } bytes = { workspace = true, features = ["std"] } diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index fcc9eb6..faac4d5 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -8,7 +8,6 @@ use std::{ cmp::{max, min, Reverse}, collections::{BTreeMap, BinaryHeap}, - sync::Arc, time::Duration, }; @@ -18,7 +17,7 @@ use tokio::{ task::JoinSet, time::{interval, timeout, MissedTickBehavior}, }; -use tower::{Service, ServiceExt}; +use tower::{util::BoxCloneService, Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; use cuprate_async_buffer::{BufferAppender, BufferStream}; @@ -27,11 +26,11 @@ use cuprate_p2p_core::{handles::ConnectionHandle, NetworkZone}; use cuprate_pruning::PruningSeed; use crate::{ - client_pool::{ClientPool, ClientPoolDropGuard}, constants::{ BLOCK_DOWNLOADER_REQUEST_TIMEOUT, EMPTY_CHAIN_ENTRIES_BEFORE_TOP_ASSUMED, LONG_BAN, MAX_BLOCK_BATCH_LEN, MAX_DOWNLOAD_FAILURES, }, + peer_set::ClientDropGuard, }; mod block_queue; @@ -41,6 +40,7 @@ mod request_chain; #[cfg(test)] mod tests; +use crate::peer_set::{PeerSetRequest, PeerSetResponse}; use block_queue::{BlockQueue, ReadyQueueBatch}; use chain_tracker::{BlocksToRetrieve, ChainEntry, ChainTracker}; use download_batch::download_batch_task; @@ -135,7 +135,7 @@ pub enum ChainSvcResponse { /// call this function again, so it can start the search again. #[instrument(level = "error", skip_all, name = "block_downloader")] pub fn download_blocks( - client_pool: Arc>, + peer_set: BoxCloneService, tower::BoxError>, our_chain_svc: C, config: BlockDownloaderConfig, ) -> BufferStream @@ -147,8 +147,7 @@ where { let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size); - let block_downloader = - BlockDownloader::new(client_pool, our_chain_svc, buffer_appender, config); + let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config); tokio::spawn( block_downloader @@ -186,8 +185,8 @@ where /// - download an already requested batch of blocks (this might happen due to an error in the previous request /// or because the queue of ready blocks is too large, so we need the oldest block to clear it). struct BlockDownloader { - /// The client pool. - client_pool: Arc>, + /// The peer set. + peer_set: BoxCloneService, tower::BoxError>, /// The service that holds our current chain state. our_chain_svc: C, @@ -208,7 +207,7 @@ struct BlockDownloader { /// /// Returns a result of the chain entry or an error. #[expect(clippy::type_complexity)] - chain_entry_task: JoinSet, ChainEntry), BlockDownloadError>>, + chain_entry_task: JoinSet, ChainEntry), BlockDownloadError>>, /// The current inflight requests. /// @@ -235,13 +234,13 @@ where { /// Creates a new [`BlockDownloader`] fn new( - client_pool: Arc>, + peer_set: BoxCloneService, tower::BoxError>, our_chain_svc: C, buffer_appender: BufferAppender, config: BlockDownloaderConfig, ) -> Self { Self { - client_pool, + peer_set, our_chain_svc, amount_of_blocks_to_request: config.initial_batch_size, amount_of_blocks_to_request_updated_at: 0, @@ -259,7 +258,7 @@ where fn check_pending_peers( &mut self, chain_tracker: &mut ChainTracker, - pending_peers: &mut BTreeMap>>, + pending_peers: &mut BTreeMap>>, ) { tracing::debug!("Checking if we can give any work to pending peers."); @@ -286,11 +285,11 @@ where /// This function will find the batch(es) that we are waiting on to clear our ready queue and sends another request /// for them. /// - /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the batch according to its pruning seed. + /// Returns the [`ClientDropGuard`] back if it doesn't have the batch according to its pruning seed. fn request_inflight_batch_again( &mut self, - client: ClientPoolDropGuard, - ) -> Option> { + client: ClientDropGuard, + ) -> Option> { tracing::debug!( "Requesting an inflight batch, current ready queue size: {}", self.block_queue.size() @@ -336,13 +335,13 @@ where /// /// The batch requested will depend on our current state, failed batches will be prioritised. /// - /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according + /// Returns the [`ClientDropGuard`] back if it doesn't have the data we currently need according /// to its pruning seed. fn request_block_batch( &mut self, chain_tracker: &mut ChainTracker, - client: ClientPoolDropGuard, - ) -> Option> { + client: ClientDropGuard, + ) -> Option> { tracing::trace!("Using peer to request a batch of blocks."); // First look to see if we have any failed requests. while let Some(failed_request) = self.failed_batches.peek() { @@ -416,13 +415,13 @@ where /// This function will use our current state to decide if we should send a request for a chain entry /// or if we should request a batch of blocks. /// - /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according + /// Returns the [`ClientDropGuard`] back if it doesn't have the data we currently need according /// to its pruning seed. fn try_handle_free_client( &mut self, chain_tracker: &mut ChainTracker, - client: ClientPoolDropGuard, - ) -> Option> { + client: ClientDropGuard, + ) -> Option> { // We send 2 requests, so if one of them is slow or doesn't have the next chain, we still have a backup. if self.chain_entry_task.len() < 2 // If we have had too many failures then assume the tip has been found so no more chain entries. @@ -463,7 +462,7 @@ where async fn check_for_free_clients( &mut self, chain_tracker: &mut ChainTracker, - pending_peers: &mut BTreeMap>>, + pending_peers: &mut BTreeMap>>, ) -> Result<(), BlockDownloadError> { tracing::debug!("Checking for free peers"); @@ -478,10 +477,19 @@ where panic!("Chain service returned wrong response."); }; - for client in self - .client_pool - .clients_with_more_cumulative_difficulty(current_cumulative_difficulty) - { + let PeerSetResponse::PeersWithMorePoW(clients) = self + .peer_set + .ready() + .await? + .call(PeerSetRequest::PeersWithMorePoW( + current_cumulative_difficulty, + )) + .await? + else { + unreachable!(); + }; + + for client in clients { pending_peers .entry(client.info.pruning_seed) .or_default() @@ -497,9 +505,9 @@ where async fn handle_download_batch_res( &mut self, start_height: usize, - res: Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError>, + res: Result<(ClientDropGuard, BlockBatch), BlockDownloadError>, chain_tracker: &mut ChainTracker, - pending_peers: &mut BTreeMap>>, + pending_peers: &mut BTreeMap>>, ) -> Result<(), BlockDownloadError> { tracing::debug!("Handling block download response"); @@ -593,7 +601,7 @@ where /// Starts the main loop of the block downloader. async fn run(mut self) -> Result<(), BlockDownloadError> { let mut chain_tracker = - initial_chain_search(&self.client_pool, &mut self.our_chain_svc).await?; + initial_chain_search(&mut self.peer_set, &mut self.our_chain_svc).await?; let mut pending_peers = BTreeMap::new(); @@ -662,7 +670,7 @@ struct BlockDownloadTaskResponse { /// The start height of the batch. start_height: usize, /// A result containing the batch or an error. - result: Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError>, + result: Result<(ClientDropGuard, BlockBatch), BlockDownloadError>, } /// Returns if a peer has all the blocks in a range, according to its [`PruningSeed`]. diff --git a/p2p/p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs index bbb14b3..ef621ce 100644 --- a/p2p/p2p/src/block_downloader/download_batch.rs +++ b/p2p/p2p/src/block_downloader/download_batch.rs @@ -16,8 +16,8 @@ use cuprate_wire::protocol::{GetObjectsRequest, GetObjectsResponse}; use crate::{ block_downloader::{BlockBatch, BlockDownloadError, BlockDownloadTaskResponse}, - client_pool::ClientPoolDropGuard, constants::{BLOCK_DOWNLOADER_REQUEST_TIMEOUT, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN}, + peer_set::ClientDropGuard, }; /// Attempts to request a batch of blocks from a peer, returning [`BlockDownloadTaskResponse`]. @@ -32,7 +32,7 @@ use crate::{ )] #[expect(clippy::used_underscore_binding)] pub async fn download_batch_task( - client: ClientPoolDropGuard, + client: ClientDropGuard, ids: ByteArrayVec<32>, previous_id: [u8; 32], expected_start_height: usize, @@ -49,11 +49,11 @@ pub async fn download_batch_task( /// This function will validate the blocks that were downloaded were the ones asked for and that they match /// the expected height. async fn request_batch_from_peer( - mut client: ClientPoolDropGuard, + mut client: ClientDropGuard, ids: ByteArrayVec<32>, previous_id: [u8; 32], expected_start_height: usize, -) -> Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError> { +) -> Result<(ClientDropGuard, BlockBatch), BlockDownloadError> { let request = PeerRequest::Protocol(ProtocolRequest::GetObjects(GetObjectsRequest { blocks: ids.clone(), pruned: false, diff --git a/p2p/p2p/src/block_downloader/request_chain.rs b/p2p/p2p/src/block_downloader/request_chain.rs index d6a2a0a..4e0f855 100644 --- a/p2p/p2p/src/block_downloader/request_chain.rs +++ b/p2p/p2p/src/block_downloader/request_chain.rs @@ -1,7 +1,7 @@ -use std::{mem, sync::Arc}; +use std::mem; use tokio::{task::JoinSet, time::timeout}; -use tower::{Service, ServiceExt}; +use tower::{util::BoxCloneService, Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; use cuprate_p2p_core::{ @@ -15,11 +15,11 @@ use crate::{ chain_tracker::{ChainEntry, ChainTracker}, BlockDownloadError, ChainSvcRequest, ChainSvcResponse, }, - client_pool::{ClientPool, ClientPoolDropGuard}, constants::{ BLOCK_DOWNLOADER_REQUEST_TIMEOUT, INITIAL_CHAIN_REQUESTS_TO_SEND, MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MEDIUM_BAN, }, + peer_set::{ClientDropGuard, PeerSetRequest, PeerSetResponse}, }; /// Request a chain entry from a peer. @@ -27,9 +27,9 @@ use crate::{ /// Because the block downloader only follows and downloads one chain we only have to send the block hash of /// top block we have found and the genesis block, this is then called `short_history`. pub(crate) async fn request_chain_entry_from_peer( - mut client: ClientPoolDropGuard, + mut client: ClientDropGuard, short_history: [[u8; 32]; 2], -) -> Result<(ClientPoolDropGuard, ChainEntry), BlockDownloadError> { +) -> Result<(ClientDropGuard, ChainEntry), BlockDownloadError> { let PeerResponse::Protocol(ProtocolResponse::GetChain(chain_res)) = client .ready() .await? @@ -80,7 +80,7 @@ pub(crate) async fn request_chain_entry_from_peer( /// We then wait for their response and choose the peer who claims the highest cumulative difficulty. #[instrument(level = "error", skip_all)] pub async fn initial_chain_search( - client_pool: &Arc>, + peer_set: &mut BoxCloneService, tower::BoxError>, mut our_chain_svc: C, ) -> Result, BlockDownloadError> where @@ -102,9 +102,15 @@ where let our_genesis = *block_ids.last().expect("Blockchain had no genesis block."); - let mut peers = client_pool - .clients_with_more_cumulative_difficulty(cumulative_difficulty) - .into_iter(); + let PeerSetResponse::PeersWithMorePoW(clients) = peer_set + .ready() + .await? + .call(PeerSetRequest::PeersWithMorePoW(cumulative_difficulty)) + .await? + else { + unreachable!(); + }; + let mut peers = clients.into_iter(); let mut futs = JoinSet::new(); diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index 83dd417..6799482 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -14,8 +14,8 @@ use monero_serai::{ transaction::{Input, Timelock, Transaction, TransactionPrefix}, }; use proptest::{collection::vec, prelude::*}; -use tokio::time::timeout; -use tower::{service_fn, Service}; +use tokio::{sync::mpsc, time::timeout}; +use tower::{buffer::Buffer, service_fn, Service, ServiceExt}; use cuprate_fixed_bytes::ByteArrayVec; use cuprate_p2p_core::{ @@ -31,7 +31,7 @@ use cuprate_wire::{ use crate::{ block_downloader::{download_blocks, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}, - client_pool::ClientPool, + peer_set::PeerSet, }; proptest! { @@ -48,19 +48,20 @@ proptest! { let tokio_pool = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); - #[expect(clippy::significant_drop_tightening)] tokio_pool.block_on(async move { timeout(Duration::from_secs(600), async move { - let client_pool = ClientPool::new(); + let (new_connection_tx, new_connection_rx) = mpsc::channel(peers); + + let peer_set = PeerSet::new(new_connection_rx); for _ in 0..peers { let client = mock_block_downloader_client(Arc::clone(&blockchain)); - client_pool.add_new_client(client); + new_connection_tx.try_send(client).unwrap(); } let stream = download_blocks( - client_pool, + Buffer::new(peer_set, 10).boxed_clone(), OurChainSvc { genesis: *blockchain.blocks.first().unwrap().0 }, diff --git a/p2p/p2p/src/client_pool.rs b/p2p/p2p/src/client_pool.rs deleted file mode 100644 index 67c8f11..0000000 --- a/p2p/p2p/src/client_pool.rs +++ /dev/null @@ -1,188 +0,0 @@ -//! # Client Pool. -//! -//! The [`ClientPool`], is a pool of currently connected peers that can be pulled from. -//! It does _not_ necessarily contain every connected peer as another place could have -//! taken a peer from the pool. -//! -//! When taking peers from the pool they are wrapped in [`ClientPoolDropGuard`], which -//! returns the peer to the pool when it is dropped. -//! -//! Internally the pool is a [`DashMap`] which means care should be taken in `async` code -//! as internally this uses blocking `RwLock`s. -use std::sync::Arc; - -use dashmap::DashMap; -use tokio::sync::mpsc; -use tracing::{Instrument, Span}; - -use cuprate_p2p_core::{ - client::{Client, InternalPeerID}, - handles::ConnectionHandle, - ConnectionDirection, NetworkZone, -}; - -pub(crate) mod disconnect_monitor; -mod drop_guard_client; - -pub use drop_guard_client::ClientPoolDropGuard; - -/// The client pool, which holds currently connected free peers. -/// -/// See the [module docs](self) for more. -pub struct ClientPool { - /// The connected [`Client`]s. - clients: DashMap, Client>, - /// A channel to send new peer ids down to monitor for disconnect. - new_connection_tx: mpsc::UnboundedSender<(ConnectionHandle, InternalPeerID)>, -} - -impl ClientPool { - /// Returns a new [`ClientPool`] wrapped in an [`Arc`]. - pub fn new() -> Arc { - let (tx, rx) = mpsc::unbounded_channel(); - - let pool = Arc::new(Self { - clients: DashMap::new(), - new_connection_tx: tx, - }); - - tokio::spawn( - disconnect_monitor::disconnect_monitor(rx, Arc::clone(&pool)) - .instrument(Span::current()), - ); - - pool - } - - /// Adds a [`Client`] to the pool, the client must have previously been taken from the - /// pool. - /// - /// See [`ClientPool::add_new_client`] to add a [`Client`] which was not taken from the pool before. - /// - /// # Panics - /// This function panics if `client` already exists in the pool. - fn add_client(&self, client: Client) { - let handle = client.info.handle.clone(); - let id = client.info.id; - - // Fast path: if the client is disconnected don't add it to the peer set. - if handle.is_closed() { - return; - } - - assert!(self.clients.insert(id, client).is_none()); - - // We have to check this again otherwise we could have a race condition where a - // peer is disconnected after the first check, the disconnect monitor tries to remove it, - // and then it is added to the pool. - if handle.is_closed() { - self.remove_client(&id); - } - } - - /// Adds a _new_ [`Client`] to the pool, this client should be a new connection, and not already - /// from the pool. - /// - /// # Panics - /// This function panics if `client` already exists in the pool. - pub fn add_new_client(&self, client: Client) { - self.new_connection_tx - .send((client.info.handle.clone(), client.info.id)) - .unwrap(); - - self.add_client(client); - } - - /// Remove a [`Client`] from the pool. - /// - /// [`None`] is returned if the client did not exist in the pool. - fn remove_client(&self, peer: &InternalPeerID) -> Option> { - self.clients.remove(peer).map(|(_, client)| client) - } - - /// Borrows a [`Client`] from the pool. - /// - /// The [`Client`] is wrapped in [`ClientPoolDropGuard`] which - /// will return the client to the pool when it's dropped. - /// - /// See [`Self::borrow_clients`] for borrowing multiple clients. - pub fn borrow_client( - self: &Arc, - peer: &InternalPeerID, - ) -> Option> { - self.remove_client(peer).map(|client| ClientPoolDropGuard { - pool: Arc::clone(self), - client: Some(client), - }) - } - - /// Borrows multiple [`Client`]s from the pool. - /// - /// Note that the returned iterator is not guaranteed to contain every peer asked for. - /// - /// See [`Self::borrow_client`] for borrowing a single client. - pub fn borrow_clients<'a, 'b>( - self: &'a Arc, - peers: &'b [InternalPeerID], - ) -> impl Iterator> + sealed::Captures<(&'a (), &'b ())> { - peers.iter().filter_map(|peer| self.borrow_client(peer)) - } - - /// Borrows all [`Client`]s from the pool that have claimed a higher cumulative difficulty than - /// the amount passed in. - /// - /// The [`Client`]s are wrapped in [`ClientPoolDropGuard`] which - /// will return the clients to the pool when they are dropped. - pub fn clients_with_more_cumulative_difficulty( - self: &Arc, - cumulative_difficulty: u128, - ) -> Vec> { - let peers = self - .clients - .iter() - .filter_map(|element| { - let peer_sync_info = element.value().info.core_sync_data.lock().unwrap(); - - if peer_sync_info.cumulative_difficulty() > cumulative_difficulty { - Some(*element.key()) - } else { - None - } - }) - .collect::>(); - - self.borrow_clients(&peers).collect() - } - - /// Checks all clients in the pool checking if any claim a higher cumulative difficulty than the - /// amount specified. - pub fn contains_client_with_more_cumulative_difficulty( - &self, - cumulative_difficulty: u128, - ) -> bool { - self.clients.iter().any(|element| { - let sync_data = element.value().info.core_sync_data.lock().unwrap(); - sync_data.cumulative_difficulty() > cumulative_difficulty - }) - } - - /// Returns the first outbound peer when iterating over the peers. - pub fn outbound_client(self: &Arc) -> Option> { - let client = self - .clients - .iter() - .find(|element| element.value().info.direction == ConnectionDirection::Outbound)?; - let id = *client.key(); - - Some(self.borrow_client(&id).unwrap()) - } -} - -mod sealed { - /// TODO: Remove me when 2024 Rust - /// - /// - pub trait Captures {} - - impl Captures for T {} -} diff --git a/p2p/p2p/src/client_pool/disconnect_monitor.rs b/p2p/p2p/src/client_pool/disconnect_monitor.rs deleted file mode 100644 index f54b560..0000000 --- a/p2p/p2p/src/client_pool/disconnect_monitor.rs +++ /dev/null @@ -1,83 +0,0 @@ -//! # Disconnect Monitor -//! -//! This module contains the [`disconnect_monitor`] task, which monitors connected peers for disconnection -//! and then removes them from the [`ClientPool`] if they do. -use std::{ - future::Future, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -use futures::{stream::FuturesUnordered, StreamExt}; -use tokio::sync::mpsc; -use tokio_util::sync::WaitForCancellationFutureOwned; -use tracing::instrument; - -use cuprate_p2p_core::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone}; - -use super::ClientPool; - -/// The disconnect monitor task. -#[instrument(level = "info", skip_all)] -pub async fn disconnect_monitor( - mut new_connection_rx: mpsc::UnboundedReceiver<(ConnectionHandle, InternalPeerID)>, - client_pool: Arc>, -) { - // We need to hold a weak reference otherwise the client pool and this would hold a reference to - // each other causing the pool to be leaked. - let weak_client_pool = Arc::downgrade(&client_pool); - drop(client_pool); - - tracing::info!("Starting peer disconnect monitor."); - - let mut futs: FuturesUnordered> = FuturesUnordered::new(); - - loop { - tokio::select! { - Some((con_handle, peer_id)) = new_connection_rx.recv() => { - tracing::debug!("Monitoring {peer_id} for disconnect"); - futs.push(PeerDisconnectFut { - closed_fut: con_handle.closed(), - peer_id: Some(peer_id), - }); - } - Some(peer_id) = futs.next() => { - tracing::debug!("{peer_id} has disconnected, removing from client pool."); - let Some(pool) = weak_client_pool.upgrade() else { - tracing::info!("Peer disconnect monitor shutting down."); - return; - }; - - pool.remove_client(&peer_id); - drop(pool); - } - else => { - tracing::info!("Peer disconnect monitor shutting down."); - return; - } - } - } -} - -/// A [`Future`] that resolves when a peer disconnects. -#[pin_project::pin_project] -pub(crate) struct PeerDisconnectFut { - /// The inner [`Future`] that resolves when a peer disconnects. - #[pin] - pub(crate) closed_fut: WaitForCancellationFutureOwned, - /// The peers ID. - pub(crate) peer_id: Option>, -} - -impl Future for PeerDisconnectFut { - type Output = InternalPeerID; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); - - this.closed_fut - .poll(cx) - .map(|()| this.peer_id.take().unwrap()) - } -} diff --git a/p2p/p2p/src/client_pool/drop_guard_client.rs b/p2p/p2p/src/client_pool/drop_guard_client.rs deleted file mode 100644 index b10c4e9..0000000 --- a/p2p/p2p/src/client_pool/drop_guard_client.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::{ - ops::{Deref, DerefMut}, - sync::Arc, -}; - -use cuprate_p2p_core::{client::Client, NetworkZone}; - -use crate::client_pool::ClientPool; - -/// A wrapper around [`Client`] which returns the client to the [`ClientPool`] when dropped. -pub struct ClientPoolDropGuard { - /// The [`ClientPool`] to return the peer to. - pub(super) pool: Arc>, - /// The [`Client`]. - /// - /// This is set to [`Some`] when this guard is created, then - /// [`take`](Option::take)n and returned to the pool when dropped. - pub(super) client: Option>, -} - -impl Deref for ClientPoolDropGuard { - type Target = Client; - - fn deref(&self) -> &Self::Target { - self.client.as_ref().unwrap() - } -} - -impl DerefMut for ClientPoolDropGuard { - fn deref_mut(&mut self) -> &mut Self::Target { - self.client.as_mut().unwrap() - } -} - -impl Drop for ClientPoolDropGuard { - fn drop(&mut self) { - let client = self.client.take().unwrap(); - - self.pool.add_client(client); - } -} diff --git a/p2p/p2p/src/connection_maintainer.rs b/p2p/p2p/src/connection_maintainer.rs index cd9d931..245fbf1 100644 --- a/p2p/p2p/src/connection_maintainer.rs +++ b/p2p/p2p/src/connection_maintainer.rs @@ -21,7 +21,6 @@ use cuprate_p2p_core::{ }; use crate::{ - client_pool::ClientPool, config::P2PConfig, constants::{HANDSHAKE_TIMEOUT, MAX_SEED_CONNECTIONS, OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT}, }; @@ -46,7 +45,7 @@ pub struct MakeConnectionRequest { /// This handles maintaining a minimum number of connections and making extra connections when needed, upto a maximum. pub struct OutboundConnectionKeeper { /// The pool of currently connected peers. - pub client_pool: Arc>, + pub new_peers_tx: mpsc::Sender>, /// The channel that tells us to make new _extra_ outbound connections. pub make_connection_rx: mpsc::Receiver, /// The address book service @@ -77,7 +76,7 @@ where { pub fn new( config: P2PConfig, - client_pool: Arc>, + new_peers_tx: mpsc::Sender>, make_connection_rx: mpsc::Receiver, address_book_svc: A, connector_svc: C, @@ -86,7 +85,7 @@ where .expect("Gray peer percent is incorrect should be 0..=1"); Self { - client_pool, + new_peers_tx, make_connection_rx, address_book_svc, connector_svc, @@ -149,7 +148,7 @@ where /// Connects to a given outbound peer. #[instrument(level = "info", skip_all)] async fn connect_to_outbound_peer(&mut self, permit: OwnedSemaphorePermit, addr: N::Addr) { - let client_pool = Arc::clone(&self.client_pool); + let new_peers_tx = self.new_peers_tx.clone(); let connection_fut = self .connector_svc .ready() @@ -164,7 +163,7 @@ where async move { #[expect(clippy::significant_drop_in_scrutinee)] if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, connection_fut).await { - client_pool.add_new_client(peer); + drop(new_peers_tx.send(peer).await); } } .instrument(Span::current()), diff --git a/p2p/p2p/src/inbound_server.rs b/p2p/p2p/src/inbound_server.rs index 6e793bd..0479560 100644 --- a/p2p/p2p/src/inbound_server.rs +++ b/p2p/p2p/src/inbound_server.rs @@ -6,7 +6,7 @@ use std::{pin::pin, sync::Arc}; use futures::{SinkExt, StreamExt}; use tokio::{ - sync::Semaphore, + sync::{mpsc, Semaphore}, task::JoinSet, time::{sleep, timeout}, }; @@ -24,7 +24,6 @@ use cuprate_wire::{ }; use crate::{ - client_pool::ClientPool, constants::{ HANDSHAKE_TIMEOUT, INBOUND_CONNECTION_COOL_DOWN, PING_REQUEST_CONCURRENCY, PING_REQUEST_TIMEOUT, @@ -36,7 +35,7 @@ use crate::{ /// and initiate handshake if needed, after verifying the address isn't banned. #[instrument(level = "warn", skip_all)] pub async fn inbound_server( - client_pool: Arc>, + new_connection_tx: mpsc::Sender>, mut handshaker: HS, mut address_book: A, config: P2PConfig, @@ -111,13 +110,13 @@ where permit: Some(permit), }); - let cloned_pool = Arc::clone(&client_pool); + let new_connection_tx = new_connection_tx.clone(); tokio::spawn( async move { let client = timeout(HANDSHAKE_TIMEOUT, fut).await; if let Ok(Ok(peer)) = client { - cloned_pool.add_new_client(peer); + drop(new_connection_tx.send(peer).await); } } .instrument(Span::current()), diff --git a/p2p/p2p/src/lib.rs b/p2p/p2p/src/lib.rs index 541784c..fb50658 100644 --- a/p2p/p2p/src/lib.rs +++ b/p2p/p2p/src/lib.rs @@ -18,17 +18,18 @@ use cuprate_p2p_core::{ pub mod block_downloader; mod broadcast; -pub mod client_pool; pub mod config; pub mod connection_maintainer; pub mod constants; mod inbound_server; +mod peer_set; use block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}; pub use broadcast::{BroadcastRequest, BroadcastSvc}; -pub use client_pool::{ClientPool, ClientPoolDropGuard}; pub use config::{AddressBookConfig, P2PConfig}; use connection_maintainer::MakeConnectionRequest; +use peer_set::PeerSet; +pub use peer_set::{ClientDropGuard, PeerSetRequest, PeerSetResponse}; /// Initializes the P2P [`NetworkInterface`] for a specific [`NetworkZone`]. /// @@ -54,7 +55,10 @@ where cuprate_address_book::init_address_book(config.address_book_config.clone()).await?; let address_book = Buffer::new( address_book, - config.max_inbound_connections + config.outbound_connections, + config + .max_inbound_connections + .checked_add(config.outbound_connections) + .unwrap(), ); // Use the default config. Changing the defaults affects tx fluff times, which could affect D++ so for now don't allow changing @@ -83,19 +87,25 @@ where let outbound_handshaker = outbound_handshaker_builder.build(); - let client_pool = ClientPool::new(); - + let (new_connection_tx, new_connection_rx) = mpsc::channel( + config + .outbound_connections + .checked_add(config.max_inbound_connections) + .unwrap(), + ); let (make_connection_tx, make_connection_rx) = mpsc::channel(3); let outbound_connector = Connector::new(outbound_handshaker); let outbound_connection_maintainer = connection_maintainer::OutboundConnectionKeeper::new( config.clone(), - Arc::clone(&client_pool), + new_connection_tx.clone(), make_connection_rx, address_book.clone(), outbound_connector, ); + let peer_set = PeerSet::new(new_connection_rx); + let mut background_tasks = JoinSet::new(); background_tasks.spawn( @@ -105,7 +115,7 @@ where ); background_tasks.spawn( inbound_server::inbound_server( - Arc::clone(&client_pool), + new_connection_tx, inbound_handshaker, address_book.clone(), config, @@ -121,7 +131,7 @@ where ); Ok(NetworkInterface { - pool: client_pool, + peer_set: Buffer::new(peer_set, 10).boxed_clone(), broadcast_svc, make_connection_tx, address_book: address_book.boxed_clone(), @@ -133,7 +143,7 @@ where #[derive(Clone)] pub struct NetworkInterface { /// A pool of free connected peers. - pool: Arc>, + peer_set: BoxCloneService, tower::BoxError>, /// A [`Service`] that allows broadcasting to all connected peers. broadcast_svc: BroadcastSvc, /// A channel to request extra connections. @@ -163,7 +173,7 @@ impl NetworkInterface { + 'static, C::Future: Send + 'static, { - block_downloader::download_blocks(Arc::clone(&self.pool), our_chain_service, config) + block_downloader::download_blocks(self.peer_set.clone(), our_chain_service, config) } /// Returns the address book service. @@ -173,8 +183,10 @@ impl NetworkInterface { self.address_book.clone() } - /// Borrows the `ClientPool`, for access to connected peers. - pub const fn client_pool(&self) -> &Arc> { - &self.pool + /// Borrows the `PeerSet`, for access to connected peers. + pub fn peer_set( + &mut self, + ) -> &mut BoxCloneService, tower::BoxError> { + &mut self.peer_set } } diff --git a/p2p/p2p/src/peer_set.rs b/p2p/p2p/src/peer_set.rs new file mode 100644 index 0000000..498eaaf --- /dev/null +++ b/p2p/p2p/src/peer_set.rs @@ -0,0 +1,217 @@ +use std::{ + future::{ready, Future, Ready}, + pin::{pin, Pin}, + task::{Context, Poll}, +}; + +use futures::{stream::FuturesUnordered, StreamExt}; +use indexmap::{IndexMap, IndexSet}; +use rand::{seq::index::sample, thread_rng}; +use tokio::sync::mpsc::Receiver; +use tokio_util::sync::WaitForCancellationFutureOwned; +use tower::Service; + +use cuprate_helper::cast::u64_to_usize; +use cuprate_p2p_core::{ + client::{Client, InternalPeerID}, + ConnectionDirection, NetworkZone, +}; + +mod client_wrappers; + +pub use client_wrappers::ClientDropGuard; +use client_wrappers::StoredClient; + +/// A request to the peer-set. +pub enum PeerSetRequest { + /// The most claimed proof-of-work from a peer in the peer-set. + MostPoWSeen, + /// Peers with more cumulative difficulty than the given cumulative difficulty. + /// + /// Returned peers will be remembered and won't be returned from subsequent calls until the guard is dropped. + PeersWithMorePoW(u128), + /// A random outbound peer. + /// + /// The returned peer will be remembered and won't be returned from subsequent calls until the guard is dropped. + StemPeer, +} + +/// A response from the peer-set. +pub enum PeerSetResponse { + /// [`PeerSetRequest::MostPoWSeen`] + MostPoWSeen { + /// The cumulative difficulty claimed. + cumulative_difficulty: u128, + /// The height claimed. + height: usize, + /// The claimed hash of the top block. + top_hash: [u8; 32], + }, + /// [`PeerSetRequest::PeersWithMorePoW`] + /// + /// Returned peers will be remembered and won't be returned from subsequent calls until the guard is dropped. + PeersWithMorePoW(Vec>), + /// [`PeerSetRequest::StemPeer`] + /// + /// The returned peer will be remembered and won't be returned from subsequent calls until the guard is dropped. + StemPeer(Option>), +} + +/// A [`Future`] that completes when a peer disconnects. +#[pin_project::pin_project] +struct ClosedConnectionFuture { + #[pin] + fut: WaitForCancellationFutureOwned, + id: Option>, +} + +impl Future for ClosedConnectionFuture { + type Output = InternalPeerID; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + + this.fut.poll(cx).map(|()| this.id.take().unwrap()) + } +} + +/// A collection of all connected peers on a [`NetworkZone`]. +pub(crate) struct PeerSet { + /// The connected peers. + peers: IndexMap, StoredClient>, + /// A [`FuturesUnordered`] that resolves when a peer disconnects. + closed_connections: FuturesUnordered>, + /// The [`InternalPeerID`]s of all outbound peers. + outbound_peers: IndexSet>, + /// A channel of new peers from the inbound server or outbound connector. + new_peers: Receiver>, +} + +impl PeerSet { + pub(crate) fn new(new_peers: Receiver>) -> Self { + Self { + peers: IndexMap::new(), + closed_connections: FuturesUnordered::new(), + outbound_peers: IndexSet::new(), + new_peers, + } + } + + /// Polls the new peers channel for newly connected peers. + fn poll_new_peers(&mut self, cx: &mut Context<'_>) { + while let Poll::Ready(Some(new_peer)) = self.new_peers.poll_recv(cx) { + if new_peer.info.direction == ConnectionDirection::Outbound { + self.outbound_peers.insert(new_peer.info.id); + } + + self.closed_connections.push(ClosedConnectionFuture { + fut: new_peer.info.handle.closed(), + id: Some(new_peer.info.id), + }); + + self.peers + .insert(new_peer.info.id, StoredClient::new(new_peer)); + } + } + + /// Remove disconnected peers from the peer set. + fn remove_dead_peers(&mut self, cx: &mut Context<'_>) { + while let Poll::Ready(Some(dead_peer)) = self.closed_connections.poll_next_unpin(cx) { + let Some(peer) = self.peers.swap_remove(&dead_peer) else { + continue; + }; + + if peer.client.info.direction == ConnectionDirection::Outbound { + self.outbound_peers.swap_remove(&peer.client.info.id); + } + + self.peers.swap_remove(&dead_peer); + } + } + + /// [`PeerSetRequest::MostPoWSeen`] + fn most_pow_seen(&self) -> PeerSetResponse { + let most_pow_chain = self + .peers + .values() + .map(|peer| { + let core_sync_data = peer.client.info.core_sync_data.lock().unwrap(); + + ( + core_sync_data.cumulative_difficulty(), + u64_to_usize(core_sync_data.current_height), + core_sync_data.top_id, + ) + }) + .max_by_key(|(cumulative_difficulty, ..)| *cumulative_difficulty) + .unwrap_or_default(); + + PeerSetResponse::MostPoWSeen { + cumulative_difficulty: most_pow_chain.0, + height: most_pow_chain.1, + top_hash: most_pow_chain.2, + } + } + + /// [`PeerSetRequest::PeersWithMorePoW`] + fn peers_with_more_pow(&self, cumulative_difficulty: u128) -> PeerSetResponse { + PeerSetResponse::PeersWithMorePoW( + self.peers + .values() + .filter(|&client| { + !client.is_downloading_blocks() + && client + .client + .info + .core_sync_data + .lock() + .unwrap() + .cumulative_difficulty() + > cumulative_difficulty + }) + .map(StoredClient::downloading_blocks_guard) + .collect(), + ) + } + + /// [`PeerSetRequest::StemPeer`] + fn random_peer_for_stem(&self) -> PeerSetResponse { + PeerSetResponse::StemPeer( + sample( + &mut thread_rng(), + self.outbound_peers.len(), + self.outbound_peers.len(), + ) + .into_iter() + .find_map(|i| { + let peer = self.outbound_peers.get_index(i).unwrap(); + let client = self.peers.get(peer).unwrap(); + (!client.is_a_stem_peer()).then(|| client.stem_peer_guard()) + }), + ) + } +} + +impl Service for PeerSet { + type Response = PeerSetResponse; + type Error = tower::BoxError; + type Future = Ready>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.poll_new_peers(cx); + self.remove_dead_peers(cx); + + // TODO: should we return `Pending` if we don't have any peers? + + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: PeerSetRequest) -> Self::Future { + ready(match req { + PeerSetRequest::MostPoWSeen => Ok(self.most_pow_seen()), + PeerSetRequest::PeersWithMorePoW(cumulative_difficulty) => { + Ok(self.peers_with_more_pow(cumulative_difficulty)) + } + PeerSetRequest::StemPeer => Ok(self.random_peer_for_stem()), + }) + } +} diff --git a/p2p/p2p/src/peer_set/client_wrappers.rs b/p2p/p2p/src/peer_set/client_wrappers.rs new file mode 100644 index 0000000..97d7493 --- /dev/null +++ b/p2p/p2p/src/peer_set/client_wrappers.rs @@ -0,0 +1,86 @@ +use std::{ + ops::{Deref, DerefMut}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + +use cuprate_p2p_core::{ + client::{Client, WeakClient}, + NetworkZone, +}; + +/// A client stored in the peer-set. +pub(super) struct StoredClient { + pub client: Client, + /// An [`AtomicBool`] for if the peer is currently downloading blocks. + downloading_blocks: Arc, + /// An [`AtomicBool`] for if the peer is currently being used to stem txs. + stem_peer: Arc, +} + +impl StoredClient { + pub(super) fn new(client: Client) -> Self { + Self { + client, + downloading_blocks: Arc::new(AtomicBool::new(false)), + stem_peer: Arc::new(AtomicBool::new(false)), + } + } + + /// Returns [`true`] if the [`StoredClient`] is currently downloading blocks. + pub(super) fn is_downloading_blocks(&self) -> bool { + self.downloading_blocks.load(Ordering::Relaxed) + } + + /// Returns [`true`] if the [`StoredClient`] is currently being used to stem txs. + pub(super) fn is_a_stem_peer(&self) -> bool { + self.stem_peer.load(Ordering::Relaxed) + } + + /// Returns a [`ClientDropGuard`] that while it is alive keeps the [`StoredClient`] in the downloading blocks state. + pub(super) fn downloading_blocks_guard(&self) -> ClientDropGuard { + self.downloading_blocks.store(true, Ordering::Relaxed); + + ClientDropGuard { + client: self.client.downgrade(), + bool: Arc::clone(&self.downloading_blocks), + } + } + + /// Returns a [`ClientDropGuard`] that while it is alive keeps the [`StoredClient`] in the stemming peers state. + pub(super) fn stem_peer_guard(&self) -> ClientDropGuard { + self.stem_peer.store(true, Ordering::Relaxed); + + ClientDropGuard { + client: self.client.downgrade(), + bool: Arc::clone(&self.stem_peer), + } + } +} + +/// A [`Drop`] guard for a client returned from the peer-set. +pub struct ClientDropGuard { + client: WeakClient, + bool: Arc, +} + +impl Deref for ClientDropGuard { + type Target = WeakClient; + fn deref(&self) -> &Self::Target { + &self.client + } +} + +impl DerefMut for ClientDropGuard { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.client + } +} + +impl Drop for ClientDropGuard { + fn drop(&mut self) { + self.bool.store(false, Ordering::Relaxed); + } +} From 4b925b8c78bfce9089a74f12f07ace94cb8b57f7 Mon Sep 17 00:00:00 2001 From: Dmitry Holodov Date: Wed, 20 Nov 2024 19:08:24 -0600 Subject: [PATCH 098/104] ZMQ PUB/SUB JSON Types (#330) --- Cargo.lock | 21 + Cargo.toml | 2 + books/architecture/src/appendix/crates.md | 5 + zmq/types/Cargo.toml | 20 + zmq/types/src/json_message_types.rs | 646 ++++++++++++++++++++++ zmq/types/src/lib.rs | 1 + 6 files changed, 695 insertions(+) create mode 100644 zmq/types/Cargo.toml create mode 100644 zmq/types/src/json_message_types.rs create mode 100644 zmq/types/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index a947a15..08c017c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -68,6 +68,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -1012,6 +1022,17 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cuprate-zmq-types" +version = "0.1.0" +dependencies = [ + "assert-json-diff", + "cuprate-types", + "hex", + "serde", + "serde_json", +] + [[package]] name = "cuprated" version = "0.0.1" diff --git a/Cargo.toml b/Cargo.toml index 0f460e8..1bfd680 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ members = [ "rpc/json-rpc", "rpc/types", "rpc/interface", + "zmq/types", ] [profile.release] @@ -79,6 +80,7 @@ cuprate-types = { path = "types", default-features = cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false } cuprate-rpc-types = { path = "rpc/types", default-features = false } cuprate-rpc-interface = { path = "rpc/interface", default-features = false } +cuprate-zmq-types = { path = "zmq/types", default-features = false } # External dependencies anyhow = { version = "1", default-features = false } diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md index ac2780e..a0dff48 100644 --- a/books/architecture/src/appendix/crates.md +++ b/books/architecture/src/appendix/crates.md @@ -54,6 +54,11 @@ cargo doc --open --package cuprate-blockchain | [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing | [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler) | [`rpc/handler/`](https://github.com/Cuprate/cuprate/tree/main/rpc/handler) | RPC inner handlers +## ZMQ +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-zmq-types`](https://doc.cuprate.org/cuprate_zmq_types) | [`zmq/types/`](https://github.com/Cuprate/cuprate/tree/main/zmq/types) | Message types for ZMQ Pub/Sub interface + ## 1-off crates | Crate | In-tree path | Purpose | |-------|--------------|---------| diff --git a/zmq/types/Cargo.toml b/zmq/types/Cargo.toml new file mode 100644 index 0000000..78e7d00 --- /dev/null +++ b/zmq/types/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "cuprate-zmq-types" +version = "0.1.0" +edition = "2021" +description = "Types for the ZMQ Pub/Sub API" +license = "MIT" +authors = ["dimalinux"] +repository = "https://github.com/Cuprate/cuprate/tree/main/zmq/types" + +[dependencies] +serde = { workspace = true, features = ["derive"] } +hex = { workspace = true, features = ["std", "serde"] } +cuprate-types = { workspace = true, features = ["hex"] } + +[dev-dependencies] +serde_json = { workspace = true, features = ["std"] } +assert-json-diff = "2.0.2" + +[lints] +workspace = true diff --git a/zmq/types/src/json_message_types.rs b/zmq/types/src/json_message_types.rs new file mode 100644 index 0000000..2699600 --- /dev/null +++ b/zmq/types/src/json_message_types.rs @@ -0,0 +1,646 @@ +//! Objects for JSON serialization and deserialization in message bodies of +//! the ZMQ pub/sub interface. Handles JSON for the following subscriptions: +//! * `json-full-txpool_add` (`Vec`) +//! * `json-minimal-txpool_add` (`Vec`) +//! * `json-full-chain_main` (`Vec`) +//! * `json-minimal-chain_main` (`ChainMainMin`) +//! * `json-full-miner_data` (`MinerData`) +use cuprate_types::hex::HexBytes; +use serde::{Deserialize, Serialize}; + +/// ZMQ `json-full-txpool_add` packets contain an array of `TxPoolAdd`. +/// +/// Each `TxPoolAdd` object represents a new transaction in the mempool that was +/// not previously seen in a block. Miner coinbase transactions *are not* +/// included. `do-not-relay` transactions *are* included. Values are not +/// republished during a re-org. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct TxPoolAdd { + /// transaction version number. `2` indicates Ring CT (all sub-variants). + pub version: u8, + /// if not `0` and less than `500_000_000`, this is the block height when + /// transaction output(s) are spendable; if >= `500_000_000` this is roughly + /// the unix epoch block timestamp when the output(s) are spendable. + pub unlock_time: u64, + /// transaction inputs (key images) with separate rings for each input + pub inputs: Vec, + /// transaction outputs + pub outputs: Vec, + /// extra data for the transaction with variable size, but limited to `1060` + /// bytes (`2120` hex nibbles). + #[serde(with = "hex::serde")] + pub extra: Vec, + /// obsolete, empty array in JSON + signatures: [Obsolete; 0], + /// ring confidential transaction data + pub ringct: PoolRingCt, +} + +/// ZMQ `json-minimal-txpool_add` subscriber messages contain an array of +/// `TxPoolAddMin` JSON objects. See `TxPoolAdd` for information on which +/// transactions are published to subscribers. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct TxPoolAddMin { + /// transaction ID + pub id: HexBytes<32>, + /// size of the full transaction blob + pub blob_size: u64, + /// metric used to calculate transaction fee + pub weight: u64, + /// mining fee included in the transaction in piconeros + pub fee: u64, +} + +/// ZMQ `json-full-chain_main` subscriber messages contain an array of +/// `ChainMain` JSON objects. Each `ChainMain` object represents a new block. +/// Push messages only contain more than one block if a re-org occurred. +#[derive(Debug, Serialize, Deserialize)] +pub struct ChainMain { + /// major version of the monero protocol at this block's height + pub major_version: u8, + /// minor version of the monero protocol at this block's height + pub minor_version: u8, + /// epoch time, decided by the miner, at which the block was mined + pub timestamp: u64, + /// block id of the previous block + pub prev_id: HexBytes<32>, + /// cryptographic random one-time number used in mining a Monero block + pub nonce: u32, + /// coinbase transaction information + pub miner_tx: MinerTx, + /// non-coinbase transaction IDs in the block (can be empty) + pub tx_hashes: Vec>, +} + +/// ZMQ `json-minimal-chain_main` subscriber messages contain a single +/// `ChainMainMin` JSON object. Unlike the full version, only the topmost +/// block is sent in the case of a re-org. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ChainMainMin { + /// height of the block + pub first_height: u64, + /// block id of the previous block + pub first_prev_id: HexBytes<32>, + /// block ID of the current block is the 0th entry; additional block IDs + /// will only be included if this is the topmost block of a re-org. + pub ids: Vec>, +} + +/// ZMQ `json-full-miner_data` subscriber messages contain a single +/// `MinerData` object that provides the necessary data to create a +/// custom block template. There is no min version of this object. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct MinerData { + /// major version of the monero protocol for the next mined block + pub major_version: u8, + /// height on which to mine + pub height: u64, + /// block id of the most recent block on which to mine the next block + pub prev_id: HexBytes<32>, + /// hash of block to use as seed for Random-X proof-of-work + pub seed_hash: HexBytes<32>, + /// least-significant 64 bits of the 128-bit network difficulty + #[serde(with = "hex_difficulty")] + pub difficulty: u64, + /// median adjusted block size of the latest 100000 blocks + pub median_weight: u64, + /// fixed at `u64::MAX` in perpetuity as Monero has already reached tail emission + pub already_generated_coins: u64, + /// mineable mempool transactions + pub tx_backlog: Vec, +} + +/// Holds a single input for the `TxPoolAdd` `inputs` array. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct PoolInput { + pub to_key: ToKey, +} + +/// Same as `PoolInput` (adds an extra JSON name layer) +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ToKey { + /// obsolete field (always 0), non-coinbase TX amounts are now encrypted + amount: u64, + /// integer offsets for ring members + pub key_offsets: Vec, + /// key image for the given input + pub key_image: HexBytes<32>, +} + +/// Holds the block height of the coinbase transaction. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct MinerInput { + /// namespace layer around the block height + pub r#gen: Gen, +} + +/// Additional namespace layer around the block height in `ChainMain`; gen is +/// another name for a coinbase transaction +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Gen { + /// block height when the coinbase transaction was created + pub height: u64, +} + +/// Transaction output data used by both `TxPoolAdd` and `MinerTx` +#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)] +pub struct Output { + /// zero for non-coinbase transactions which use encrypted amounts or + /// an amount in piconeros for coinbase transactions + pub amount: u64, + /// public key of the output destination + pub to_tagged_key: ToTaggedKey, +} + +/// Holds the public key of an output destination with its view tag. +#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)] +pub struct ToTaggedKey { + /// public key used to indicate the destination of a transaction output + pub key: HexBytes<32>, + /// 1st byte of a shared secret used to reduce wallet synchronization time + pub view_tag: HexBytes<1>, +} + +/// Ring CT information used inside `TxPoolAdd` +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct PoolRingCt { + /// ring CT type; `6` is CLSAG Bulletproof Plus + pub r#type: u8, + /// encrypted amount values of the transaction outputs + pub encrypted: Vec, + /// Ring CT commitments, 1 per transaction input + pub commitments: Vec>, + /// mining fee in piconeros + pub fee: u64, + /// data to validate the transaction that can be pruned from older blocks + pub prunable: Prunable, +} + +/// Ring CT information used inside `MinerTx`. Miner coinbase transactions don't +/// use Ring CT, so this only holds a block height. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +struct MinerRingCt { + /// always zero to indicate that Ring CT is not used + r#type: u8, +} + +/// Holds the encrypted amount of a non-coinbase transaction output. +#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)] +pub struct Encrypted { + /// obsolete field, but present as zeros in JSON; this does not represent + /// the newer deterministically derived mask + mask: HexBytes<32>, + /// encrypted amount of the transaction output + pub amount: HexBytes<32>, +} + +/// Data needed to validate a transaction that can optionally be pruned from +/// older blocks. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Prunable { + /// obsolete, empty array in JSON + range_proofs: [Obsolete; 0], + /// obsolete, empty array in JSON + bulletproofs: [Obsolete; 0], + /// Bulletproofs+ data used to validate a Ring CT transaction + pub bulletproofs_plus: [BulletproofPlus; 1], + /// obsolete, empty array in JSON + mlsags: [Obsolete; 0], + /// CLSAG signatures; 1 per transaction input + pub clsags: Vec, + /// Ring CT pseudo output commitments; 1 per transaction input (*not* + /// output) + pub pseudo_outs: Vec>, +} + +/// Bulletproofs+ data used to validate the legitimacy of a Ring CT transaction. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[expect(non_snake_case)] +pub struct BulletproofPlus { + pub V: Vec>, + pub A: HexBytes<32>, + pub A1: HexBytes<32>, + pub B: HexBytes<32>, + pub r1: HexBytes<32>, + pub s1: HexBytes<32>, + pub d1: HexBytes<32>, + pub L: Vec>, + pub R: Vec>, +} + +/// Placeholder element type so obsolete fields can be deserialized +/// to the empty vector for backwards compatibility. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +struct Obsolete; + +/// CLSAG signature fields +#[expect(non_snake_case)] +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Clsag { + pub s: Vec>, + pub c1: HexBytes<32>, + pub D: HexBytes<32>, +} + +/// Part of the new block information in `ChainMain` +#[derive(Debug, Serialize, Deserialize)] +pub struct MinerTx { + /// transaction version number + pub version: u8, + /// block height when the coinbase transaction becomes spendable (currently + /// 60 blocks above the coinbase transaction height) + pub unlock_time: u64, + /// contains the block height in `inputs[0].gen.height` and nothing else as + /// coinbase transactions have no inputs + pub inputs: [MinerInput; 1], + /// transaction outputs + pub outputs: Vec, + /// extra data for the transaction with variable size; not limited to `1060` + /// bytes like the extra field of non-coinbase transactions + #[serde(with = "hex::serde")] + pub extra: Vec, + /// obsolete, empty array in JSON + signatures: [Obsolete; 0], + /// only for JSON compatibility; miners' don't use Ring CT + ringct: MinerRingCt, +} + +/// Holds a transaction entry in the `MinerData` `tx_backlog` field. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct TxBacklog { + /// transaction ID + pub id: HexBytes<32>, + /// metric used to calculate transaction fee + pub weight: u64, + /// mining fee in piconeros + pub fee: u64, +} + +mod hex_difficulty { + //! Serializes the u64 difficulty field of `MinerData` in the same ways as + //! monerod. The difficulty value is inside a string, in big-endian hex, and + //! has a 0x prefix with no leading zeros. + use serde::{Deserialize, Deserializer, Serializer}; + + #[expect(clippy::trivially_copy_pass_by_ref)] + pub(super) fn serialize(difficulty: &u64, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&format!("0x{difficulty:x}")) + } + + pub(super) fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + let s = s.strip_prefix("0x").unwrap_or(&s); + u64::from_str_radix(s, 16).map_err(serde::de::Error::custom) + } +} + +#[cfg(test)] +mod tests { + use assert_json_diff::assert_json_eq; + use serde_json::{self, json}; + + use super::*; + + #[test] + fn test_txpooladd_json() { + let json1 = json!([ + { + "version": 2, + "unlock_time": 0, + "inputs": [ + { + "to_key": { + "amount": 0, + "key_offsets": [ + 82773133, + 30793552, + 578803, + 620532, + 114291, + 291870, + 111275, + 86455, + 19769, + 1238, + 15164, + 11374, + 5240, + 3547, + 7423, + 4198 + ], + "key_image": "89c060b57bba20c0b795bda4b618749e04eba5b40b30062b071dff6e8dd9071d" + } + } + ], + "outputs": [ + { + "amount": 0, + "to_tagged_key": { + "key": "05b4ff4c3ced6ba078a078af8fee5916512a1893f2b6d9373fb90e0eb4040095", + "view_tag": "7a" + } + }, + { + "amount": 0, + "to_tagged_key": { + "key": "60250376bca49bf24cef45c12738b86347df10954cd35630e81b90bf01e922af", + "view_tag": "b8" + } + } + ], + "extra": "01154b87b3334ce9f99d04635eae4e31252a20ba22acb96ff0764a03dc91d203ed020901be80cbce0723d0b4", + "signatures": [], + "ringct": { + "type": 6, + "encrypted": [ + { + "mask": "0000000000000000000000000000000000000000000000000000000000000000", + "amount": "a956be1858615454000000000000000000000000000000000000000000000000" + }, + { + "mask": "0000000000000000000000000000000000000000000000000000000000000000", + "amount": "72972be61af1210b000000000000000000000000000000000000000000000000" + } + ], + "commitments": [ + "cc2a17e43f0b183235a06e8582fcaaa7c21a07732077e66d4dcfaa0db691ea20", + "04e3cd1d3430bb7a1d9ede5ce9ec0ef2f6f9dd9fd31fb95c9e0b3148f1a660c8" + ], + "fee": 30660000, + "prunable": { + "range_proofs": [], + "bulletproofs": [], + "bulletproofs_plus": [ + { + "V": [ + "0196c1e9ba57ae053ae19c1bfd49e13146bd4b6e49401582f8a5a6f65ae560d0", + "aecd14b0e2d788315023601947c12d7e9227d8a1a0aee41f0b34fe196d96119f" + ], + "A": "8011fb75ba56d16b1ef1193e1fdfdb81e6b83afd726087427163857e8fcdf08e", + "A1": "ab91ab6863fbdee1fb71791e5297d007269f1b2cc050df40628ee7d0a1a5f3cb", + "B": "df1d082111b51d479b7fa72f6363bb731207c9343a528dc05b5798af56702521", + "r1": "2e212ae9ad704611a39b9b242453d2408045b303738b51d6f88f9dba06233401", + "s1": "36be53973fd971edff1f43cc5d04dda78d2b01f4caeaf38bbe195b04e309b30d", + "d1": "592116ca54b2d3ca0e9f222ffcc5fd63d3c992470473911fc70822f37672350a", + "L": [ + "98f1e11d62b90c665a8a96fb1b10332e37a790ea1e01a9e8ec8de74b7b27b0df", + "3a14689f3d743a3be719df9af28ca2f0f398e3a2731d5d6f342d0485bf81a525", + "bcb9e389fd494db66e4c796ff03795daa131426c0776ded6d37bfae51f29623d", + "5aa7e1f2bfcfcd74ac8305ce59a7baf5a901f84f8fbdd3a2d639e4058f35e98b", + "5939aa7ea012f88a26bab20270ce5b164c1880f793dc249ec215a0783b4d4ca7", + "08286f78d1bb0d7fc2efc7a3ac314707a4a1ac9656656d496180e131c1748496", + "7fc1de780305601aab95fda4b005927a4643f222e28407c31ad46cc935b7a27c" + ], + "R": [ + "69b4f329c0a5f8ae05891ac5ac35b947a7442b66e5b5693c99435deac3a62662", + "a193038cb8dc9d22abe6577fe44271c1693176cb636f9d101723670fb5ca5cda", + "90670e7083e503c2989b6548500234740dabf3451b0bd376979e03ca0cb5e50c", + "6ab149089f73799811f631eab272bd6c8f190f38efff4d49577364956d0148bf", + "62f2178cbdc760a0d3787b5fd42161c3c98394c2ff2b88efc039df59d2116e5d", + "536f91da278f730f2524260d2778dc5959d40a5c724dd789d35bbd309eabd933", + "e47c5c8181e692f3ad91733e7d9a52f8b7e3f5016c5e65f789eea367a13f16cd" + ] + } + ], + "mlsags": [], + "clsags": [ + { + "s": [ + "f70840a8d65da85e962d2ce5ed1293ae3de83318b464363db85505d99e317b01", + "b7c1125be139b4ed201ce85b8453920306cac7c5da11e0f8c0fd7702f15c6a06", + "5a04335699f5a816eed1cab79085814dbcf3be5cef51b078b1c3e0210bbba606", + "e4743e114fd6352ea29e0b48ac96688edaba1d5d0634c34301756902eeb1fb0e", + "34aae87ab091082356d2815a7c8e973124245ebc6d163b9f01fbfeb360edcf04", + "d2d0b6ddb44ed42096affec08ea9cd77d2c7cdc5b2e1e964f836d3717640ec00", + "79b34258c8be04ddd955389f7ee3b912286c23492c519a5687b81d770619620e", + "3c889c19693463160d6c7e642c46f5d41db052ee3358c7dcb4826f48bca26607", + "da04927a438fd0d9674e64f0c016f30fde27f251d3466f29dcd5b3d757fec90c", + "f3e08d83b11ca6529bc18748d3f732c325fca8ff79f69f0ed754bcd529898102", + "f00d7125909a9a8cc5283ffc7727fce945e85828459eecb836c7aedca414350e", + "0a635a193af37be1c9519309f25eaf9f37b7bc5892864646d8d2a2187fcec601", + "0c4154d575dff3699bd41f0c354601de6535161755bd2164526076f37e2c6908", + "f7b21e2698333285ea10a95edbe80fe0bb8740c30b35c25bd2002e3693867e02", + "a637f338ff2ed65fa96e5529abc575fc2a35ed1a3f62a9e7be495069d8438800", + "f7c355f1c3a663978c5fe1c9337aabd4085ee537a61eec2c5c1e837cb3728c09" + ], + "c1": "c5dd25e0e32dbefa6ac1d0dc9072620eb97a99224462cdd163287f2b60b9810b", + "D": "c4fa3f939ccf02e4c8842cbd417cf3690421986e558734a0a029f8a86d2791a8" + } + ], + "pseudo_outs": [ + "bcb08920f5476d74294aeb89c8001123bffd2f2ab84e105d553b807674c595ce" + ] + } + } + } + ]); + + let tx_pool_adds: Vec = serde_json::from_value(json1.clone()).unwrap(); + let json2 = serde_json::to_value(&tx_pool_adds).unwrap(); + assert_json_eq!(json1, json2); + } + + #[test] + fn test_txpooladd_min_json() { + let json1 = json!([ + { + "id": "b5086746e805d875cbbbbb49e19aac29d9b75019f656fab8516cdf64ac5cd346", + "blob_size": 1533, + "weight": 1533, + "fee": 30660000 + } + ]); + + let tx_pool_adds: Vec = serde_json::from_value(json1.clone()).unwrap(); + let json2 = serde_json::to_value(&tx_pool_adds).unwrap(); + assert_json_eq!(json1, json2); + } + + #[test] + fn test_chain_main_json() { + let json1 = json!([ + { + "major_version": 16, + "minor_version": 16, + "timestamp": 1726973843, + "prev_id": "ce3731311b7e4c1e58a2fe902dbb5c60bb2c0decc163d5397fa52a260d7f09c1", + "nonce": 537273946, + "miner_tx": { + "version": 2, + "unlock_time": 3242818, + "inputs": [ + { + "gen": { + "height": 3242758 + } + } + ], + "outputs": [ + { + "amount": 618188180000_u64, + "to_tagged_key": { + "key": "83faf44df7e9fb4cf54a8dd6a63868507d1a1896bdb35ea9110d739d5da6cf21", + "view_tag": "38" + } + } + ], + "extra": "010e3356a86dbb339354afbc693408dfe8648bffd0b276e6a431861eb73643d88d02115162e362c98e2d00000000000000000000", + "signatures": [], + "ringct": { + "type": 0 + } + }, + "tx_hashes": [ + "2c1b67d3f10b21270cac116e6d5278dc4024ee2d727e4ad56d6dedb1abc0270c", + "c2cfec0de23229a2ab80ca464cef66fc1cad53647a444f048834ec236c38c867", + "03c7649af2373c0f739d3c2eff9ee1580986b460d2abdd5e2aa332281e52da7e", + "1e0834cc658599e786040bdcd9b589a5e8d975233b72279d04ece1a3dd5572b0", + "ba65c30150e906a8799ee99bb2e6481873e42ed8b025cf967c5798528ddc81b4", + "6fc7b1da1cf433edafb142173e9ac13fe05142a36d8a72e9efdf7a3b94da11d6", + "847c06dcda4540d45cae868d4d031781bd87d9bfa4b2186a611428f52e68ccee", + "79f87a1b2fc17295d2cf25b6a65dd17fd8630829ee50f9c48f15e4a24e72d872", + "32b4f7ce6d864006b274dbd73fc8058151d0fd2dd0bb4b423120e32451fd59eb", + "430fe7fa00b63b68b301a4e4810bef2b5be1f651dba8c360e86eac61227382e7", + "9f8d2bf5e39071abccb336404ea72ab85cb731500a1d386a3bf537b9046df29d", + "f63893b8226ca28c290cb65541d60c1675dda1e2a77a629b6b2d7c3886240b23", + "ee8608b6e80cce14beaf95f747f4da8e40e40a49ad1adc20038843a6da3df3c6", + "05783765c150ed1e46d6380726e7ca1f788305754e553f5f2d49b9f09aaaf88d", + "20b4b95e62f45b72014d6ab14edb0b31e273cdc8c8d106068dd32ef6e92fc0a2", + "9230fb0a9dce8e2ca7e109ebf3480838251691de8ed73ea91f74723c5cf19bac", + "d59cf84a25f56ec0f1352bb05645efe9b9326598c4f7c5bc39a87eb7a20c48fc", + "465deb73c48a460df71861d61666dabb906648035a1fecfd0e988ee37616c655", + "5767bc633729ba4555561510f3db739431b16744234dcd549a0d346eaa6685b1", + "2c8d9af5d5774de96e67835ac5adbc6ca5579125b08bc907b395645eea6410ec", + "d385c884a0687c3360725dd3a3f6acf6f64bf38d8eeea1644d80bc23b13ee870", + "b2bc7e9fa9c1da08a8b6ee58505611c05bc388fd30aece00e9a0041470f7e950", + "69a4a79b50d42d372e91c6608c2652d1d5ddd343526c387ef6cf1e3c158b1765", + "ef508dfa79bbedd226835c42a9d000a64cc4abe0250c9aa55fd968224e2b45c3", + "0413c3b3fc621c472e10a102d77456db506f0df10a909833aed0c6738fb31eeb", + "e0c52d6d649c2f1abce4c6ffce4dd75a23308afbb6abe33af53da232c40caf5f", + "cd1fd68d2a15002ca6236083ff6ae165c8fd922f410da79640a4342fd8ebd1c8", + "ba746f80ca4ee496f4188ba278f1ed69a913238450d52bd2e2f3d3bf6fdd43d3", + "13c964bc13a55621b7bbbfe9a6d703536d951bfa19eedee93dd1286020959021", + "41a6f8d0df227a401a9bd6f5c0fbc21ed89f515ea5c8434a087e8b880080ee1f", + "41c2b5994284790b1ee158f7b87aa1231c14975d6456a91ff6f93c6f81277965", + "7e6b7f169cc6cab88e652771157cf8c2eb6f69dffb6939a79b34c6554fe6c00b", + "619517d9d138bf95c6b77eb801526b8419616de2b8618ccfd3b6d1c10364bc64", + "52cca64fb20fc2f6d06034a1a2d9b5665972ebc2569ec69f8d473caada309add", + "219c106d09da5a27b339ea0f070da090779b31ef9ccfa90d6d25e7388341eff9", + "e07ce6e96e73cff80c9cc4c1b349ad1ef53cff210b876d4e7afd89fcc8b2e5dd", + "e98f2a00b2892cd65c0252d956d88a4bb8024c7db98ca003c127b097f097f276", + "ed81aa398071fe495e37095e51ff50053e132bd11f27ba9c06ac4bf4063b756f", + "667d29a0cefa311e06fcfc22c98ef75edf81deb6c8a812492eb255a049c826db", + "8b16e8cbc1765247456bd67a3106498f686401b7529dc0f6b03360caf8671135", + "013e443e63259748f6d1a5653374826618ba066b7febcf55c829333f0dd9a6c3", + "517a05d82de59a973eb4d343c45558841c9165ccd75ca7c9d2e1a35f80c26c15", + "af74d5dd44cfed8f40f853a6fc405dae23d547482296f8dbbc13c1aed2c3d8c5", + "b5086746e805d875cbbbbb49e19aac29d9b75019f656fab8516cdf64ac5cd346", + "cfcda18d058656797a1272b384774dcfc26a504a24298aa49ba060eb6b4a19e0", + "1f380660a99030cc45f85ba8ee0e0541035c0fde719c84aa692796328974c9dd", + "53127181a0301a27b3a2749dc997556b211d949a99aa34d1c52d5c54220f49d2", + "5d50a66df97f4decc4ecc3f5030589ef966d5af84a995f7fb14f1c02ae9704db", + "cdab9628acdb57c460e292660e7a07caf2ddbcffdfff92f3e5e4fb12119a11ca", + "e740a098a74d7a66a821c4ac3c5f913a82fc7445b5593cc5fa3e48ad1b4589b1", + "760549176fec210cfe0ff58eabbf2670cf33b4cd3942a3b60a98bf8f328a6d01", + "961b0956aa6303ed8ca1687d93ed46b9aa8a0203ec4ce0cbc2e86b364fbfb613", + "b9db041b2c3bfc6b5b0facb638b0b4643eec76b060039a6b11fb43682ed77a97", + "1011c321eb386b9975e8124bdb130790dcf4ac0021da3103cabbf7dfa18ccea7", + "6a9d3d15be4b25bd544d96bb1d7685e53f9484735bb22994feffb9037009aeeb", + "bf20d6193890cf7fdead9e3b60197564c663b5a62eda782a49d4aa7819bb9665", + "472d28f9d25a95e625eb808ff3827e7f6792009e1ba0b3b21951f3058b65a75d", + "e3931b2b66da07f983d2235d9d0b3a3098008458bdc0c1ad4370fae73e1eaa9e", + "e18a0dea6382c95aa4089a971190683b171e9405c06fd4111924144600f3bcf3", + "1a336bcf24026307821b76b9ca18b178c285c591c5df9906e3ffbd2050ccd356", + "8ca2d0e5ae9b9981bb8b76ba0da383c585664b2a2f4e861d58aab00c9b0cc808", + "e1866c27023ccea276034c4d572eab42713132e4fdb2aafa9488f6d74cd49303", + "3674cfafba4cdea5775a72a82e5d553bf180beab456b3cbaa7b41a1574fe1948", + "9bb400dd317425f40176c3094a5573037b0217e0b60761cb66a8fa15b63b36c3", + "c078048028aca3e9bc40f68f4d42ef25c6af2cef4da20bf3be70dd6a23b82d52", + "c28cc85f945085e70259ed02131ae3f8c5992e789c9c75c2c6e257306beaf26e", + "4c2b121795fe2b90fda84813543952382daa29c7b96edd9f96040df13e48e347", + "63c6fba30b5471fd60e715cbaf4448badafde68dbc42c54d96b56dd2c4bf2d15", + "a4240138ecfe736113581f318f261a01992eaa8fa5b7bd6938d9dbeb65aa85d7", + "b9d088a7b21f655d0cf50f8404e874f4d1655fb5565a354d2c0dd6d113619c66", + "9133e7e98a83f6e10a7fd44c104d9124d93e0d3e920f5c160873b394dd3a2fcb", + "953985dbd0ea6f86746e83be144ec2ff2897ef1f3506eede083b893e98dd63ea", + "83af840c4cad46de96c86fcf700ade32e73260d4a16cefa330cb5a722ef59fdf", + "eea3c0c2b016ea0c269f954fd8172c3d118f08103c9842b81b05290c9faf3780", + "ac43a363fdb81fa4f6df1cb06ba49a5f4eeef411957cf2afad55cbc1e79bc4d1", + "ca72cf7bda22aed15c16ca67e7b6cc57109cdc86d4ffe38fd71210a5380fcada", + "477dc1cd62106d9df6b37f8515579a48d01b310387087c08ce7062a8eb5df98d", + "d47b6dcd3b13288825c954df6c6e30eb683d1f79434beaee7172082f8ae74280", + "9c64ef20c69589c56fcc5f3a0d10f6957ecea248e44acb432aaf16a88eeef946", + "d2aa256bfd61bdb64ac38da6cbc3e77fb315bb9fbaf422087c10345377df44f6", + "8b9623e4513594a6eaeb3475ea7d0eb585dd8f6e20e21c316db0b942fada2336", + "860725ed0bd18c744e6b8b02888ad88be1cf23d7153131b220a0f9fbb76976bf", + "387cc6e807efc263a0ad6a30e6313a27d16abef038264d0afa0e6ad943be55da" + ] + } + ]); + + let chain_main: Vec = serde_json::from_value(json1.clone()).unwrap(); + let json2 = serde_json::to_value(&chain_main).unwrap(); + assert_json_eq!(json1, json2); + } + + #[test] + fn test_chain_main_min_json() { + let json1 = json!({ + "first_height": 3242758, + "first_prev_id": "ce3731311b7e4c1e58a2fe902dbb5c60bb2c0decc163d5397fa52a260d7f09c1", + "ids": [ + "ee1238b884e64f7e438223aa8d42d0efc15e7640f1a432448fbad116dc72f1b2" + ] + }); + + let chain_main_min: ChainMainMin = serde_json::from_value(json1.clone()).unwrap(); + let json2 = serde_json::to_value(&chain_main_min).unwrap(); + assert_json_eq!(json1, json2); + } + + #[test] + fn test_miner_data_json() { + let json1 = json!({ + "major_version": 16, + "height": 3242764, + "prev_id": "dc53c24683dca14586fb2909b9aa4a44adb524e010d438e2491e7d8cc1c80831", + "seed_hash": "526577d6e6689ba8736c16ccc76e6ce4ada3b0ceeaa3a2260b96ba188a17d705", + "difficulty": "0x526f2623ce", + "median_weight": 300000, + "already_generated_coins": 18446744073709551615_u64, + "tx_backlog": [ + { + "id": "dbec64651bb4e83d0e9a05c2826bde605a940f12179fab0ab5dc8bc4392c776b", + "weight": 2905, + "fee": 929600000 + }, + { + "id": "ec5728dd1fbd98db1f93d612826e73b95f52cca49f247a6dbc35390f45766a7d", + "weight": 2222, + "fee": 44440000 + }, + { + "id": "41f613b1a470af494e0a705993e305dfaad3e365fcc0b0db0118256fc54559aa", + "weight": 2221, + "fee": 44420000 + }, + { + "id": "34fa33bf96dc2f825fe870e8f5402be6225c1623b345224e0dbc38b6407873de", + "weight": 2217, + "fee": 709440000 + } + ] + }); + + let miner_data: MinerData = serde_json::from_value(json1.clone()).unwrap(); + let json2 = serde_json::to_value(&miner_data).unwrap(); + assert_json_eq!(json1, json2); + } +} diff --git a/zmq/types/src/lib.rs b/zmq/types/src/lib.rs new file mode 100644 index 0000000..3f9562b --- /dev/null +++ b/zmq/types/src/lib.rs @@ -0,0 +1 @@ +pub mod json_message_types; From f3c1a5c2aa4629bf69b75268de21fc9112f09405 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 21 Nov 2024 17:32:48 -0500 Subject: [PATCH 099/104] Fix #346 (#347) fixes --- cryptonight/src/util.rs | 2 +- storage/blockchain/src/ops/block.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cryptonight/src/util.rs b/cryptonight/src/util.rs index 7fbf5cb..de8b70b 100644 --- a/cryptonight/src/util.rs +++ b/cryptonight/src/util.rs @@ -49,7 +49,7 @@ pub(crate) fn subarray_copy + ?Sized, U: Copy, const LEN: usize>( /// A mutable reference to a fixed-size subarray of type `[U; LEN]`. /// /// # Panics -/// Panics if `start + LEN > array.as_ref().len()`. +/// Panics if `start + LEN > array.as_mut().len()`. #[inline] pub(crate) fn subarray_mut + ?Sized, U, const LEN: usize>( array: &mut T, diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 6d32fd8..cc5cb80 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -42,7 +42,7 @@ use crate::{ /// # Panics /// This function will panic if: /// - `block.height > u32::MAX` (not normally possible) -/// - `block.height` is not != [`chain_height`] +/// - `block.height` is != [`chain_height`] // no inline, too big. pub fn add_block( block: &VerifiedBlockInformation, From caa08d5eaa1063919797a830eb0d9358d2a72b80 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Mon, 25 Nov 2024 15:10:42 -0500 Subject: [PATCH 100/104] benches: initial implementation (#196) * add readme * readme, basic examples * name changes, bin impl * example, docs * book * add `cuprate-criterion-example` * docs, tracing * fix clippy * docs * lib readme * json-rpc benchmarks * add to crates.md * add `fixme` * fix `cargo b` failing this `cfg()` existing makes a regular workspace `cargo b` fail * fix cargo.toml --- Cargo.lock | 313 +++++++++++++++++- Cargo.toml | 95 ++++-- benches/README.md | 6 +- benches/benchmark/bin/Cargo.toml | 43 +++ benches/benchmark/bin/README.md | 27 ++ benches/benchmark/bin/src/log.rs | 29 ++ benches/benchmark/bin/src/main.rs | 49 +++ benches/benchmark/bin/src/print.rs | 38 +++ benches/benchmark/bin/src/run.rs | 36 ++ benches/benchmark/bin/src/timings.rs | 5 + benches/benchmark/example/Cargo.toml | 17 + benches/benchmark/example/README.md | 3 + benches/benchmark/example/src/lib.rs | 42 +++ benches/benchmark/lib/Cargo.toml | 18 + benches/benchmark/lib/README.md | 15 + benches/benchmark/lib/src/benchmark.rs | 45 +++ benches/benchmark/lib/src/lib.rs | 5 + benches/criterion/cuprate-json-rpc/Cargo.toml | 23 ++ .../cuprate-json-rpc/benches/main.rs | 8 + .../cuprate-json-rpc/benches/response.rs | 110 ++++++ benches/criterion/cuprate-json-rpc/src/lib.rs | 2 + benches/criterion/example/Cargo.toml | 21 ++ benches/criterion/example/README.md | 14 + benches/criterion/example/benches/example.rs | 48 +++ benches/criterion/example/benches/main.rs | 10 + benches/criterion/example/src/lib.rs | 13 + books/architecture/src/SUMMARY.md | 13 +- books/architecture/src/appendix/crates.md | 8 + .../src/benchmarking/criterion.md | 1 - .../src/benchmarking/criterion/creating.md | 21 ++ .../src/benchmarking/criterion/intro.md | 4 + .../src/benchmarking/criterion/running.md | 15 + .../src/benchmarking/cuprate/creating.md | 57 ++++ .../src/benchmarking/cuprate/intro.md | 37 +++ .../src/benchmarking/cuprate/running.md | 16 + .../architecture/src/benchmarking/harness.md | 1 - books/architecture/src/benchmarking/intro.md | 23 +- 37 files changed, 1188 insertions(+), 43 deletions(-) create mode 100644 benches/benchmark/bin/Cargo.toml create mode 100644 benches/benchmark/bin/README.md create mode 100644 benches/benchmark/bin/src/log.rs create mode 100644 benches/benchmark/bin/src/main.rs create mode 100644 benches/benchmark/bin/src/print.rs create mode 100644 benches/benchmark/bin/src/run.rs create mode 100644 benches/benchmark/bin/src/timings.rs create mode 100644 benches/benchmark/example/Cargo.toml create mode 100644 benches/benchmark/example/README.md create mode 100644 benches/benchmark/example/src/lib.rs create mode 100644 benches/benchmark/lib/Cargo.toml create mode 100644 benches/benchmark/lib/README.md create mode 100644 benches/benchmark/lib/src/benchmark.rs create mode 100644 benches/benchmark/lib/src/lib.rs create mode 100644 benches/criterion/cuprate-json-rpc/Cargo.toml create mode 100644 benches/criterion/cuprate-json-rpc/benches/main.rs create mode 100644 benches/criterion/cuprate-json-rpc/benches/response.rs create mode 100644 benches/criterion/cuprate-json-rpc/src/lib.rs create mode 100644 benches/criterion/example/Cargo.toml create mode 100644 benches/criterion/example/README.md create mode 100644 benches/criterion/example/benches/example.rs create mode 100644 benches/criterion/example/benches/main.rs create mode 100644 benches/criterion/example/src/lib.rs delete mode 100644 books/architecture/src/benchmarking/criterion.md create mode 100644 books/architecture/src/benchmarking/criterion/creating.md create mode 100644 books/architecture/src/benchmarking/criterion/intro.md create mode 100644 books/architecture/src/benchmarking/criterion/running.md create mode 100644 books/architecture/src/benchmarking/cuprate/creating.md create mode 100644 books/architecture/src/benchmarking/cuprate/intro.md create mode 100644 books/architecture/src/benchmarking/cuprate/running.md delete mode 100644 books/architecture/src/benchmarking/harness.md diff --git a/Cargo.lock b/Cargo.lock index 08c017c..ac36c56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,6 +29,15 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -44,6 +53,12 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "anstyle" version = "1.0.10" @@ -347,6 +362,12 @@ dependencies = [ "serde", ] +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" version = "1.1.31" @@ -380,6 +401,33 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "clap" version = "4.5.20" @@ -478,6 +526,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + [[package]] name = "crossbeam" version = "0.8.4" @@ -534,6 +618,12 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + [[package]] name = "crypto-bigint" version = "0.5.5" @@ -583,6 +673,30 @@ dependencies = [ "tokio", ] +[[package]] +name = "cuprate-benchmark" +version = "0.0.0" +dependencies = [ + "cfg-if", + "cuprate-benchmark-example", + "cuprate-benchmark-lib", + "serde", + "serde_json", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "cuprate-benchmark-example" +version = "0.0.0" +dependencies = [ + "cuprate-benchmark-lib", +] + +[[package]] +name = "cuprate-benchmark-lib" +version = "0.0.0" + [[package]] name = "cuprate-blockchain" version = "0.0.0" @@ -685,6 +799,25 @@ dependencies = [ name = "cuprate-constants" version = "0.1.0" +[[package]] +name = "cuprate-criterion-example" +version = "0.0.0" +dependencies = [ + "criterion", + "function_name", + "serde_json", +] + +[[package]] +name = "cuprate-criterion-json-rpc" +version = "0.0.0" +dependencies = [ + "criterion", + "cuprate-json-rpc", + "function_name", + "serde_json", +] + [[package]] name = "cuprate-cryptonight" version = "0.1.0" @@ -1304,6 +1437,21 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "function_name" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1ab577a896d09940b5fe12ec5ae71f9d8211fff62c919c03a3750a9901e98a7" +dependencies = [ + "function_name-proc-macro", +] + +[[package]] +name = "function_name-proc-macro" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673464e1e314dd67a0fd9544abc99e8eb28d0c7e3b69b033bcff9b2d00b87333" + [[package]] name = "funty" version = "2.0.0" @@ -1453,6 +1601,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hashbrown" version = "0.14.5" @@ -1680,6 +1838,26 @@ dependencies = [ "hashbrown 0.15.0", ] +[[package]] +name = "is-terminal" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -1776,6 +1954,15 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "matchit" version = "0.7.3" @@ -2031,6 +2218,12 @@ version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + [[package]] name = "openssl-probe" version = "0.1.5" @@ -2168,6 +2361,34 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "plotters" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" + +[[package]] +name = "plotters-svg" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +dependencies = [ + "plotters-backend", +] + [[package]] name = "ppv-lite86" version = "0.2.20" @@ -2242,7 +2463,7 @@ dependencies = [ "rand", "rand_chacha", "rand_xorshift", - "regex-syntax", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -2408,6 +2629,44 @@ dependencies = [ "syn", ] +[[package]] +name = "regex" +version = "1.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.7", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.8.5" @@ -2535,6 +2794,15 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "schannel" version = "0.1.26" @@ -2889,6 +3157,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a693d0c8cf16973fac5a93fbe47b8c6452e7097d4fcac49f3d7a18e39c76e62e" +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.8.0" @@ -3105,10 +3383,14 @@ version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ + "matchers", "nu-ansi-term", + "once_cell", + "regex", "sharded-slab", "smallvec", "thread_local", + "tracing", "tracing-core", "tracing-log", ] @@ -3214,6 +3496,16 @@ dependencies = [ "libc", ] +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -3284,6 +3576,16 @@ version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +[[package]] +name = "web-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki-roots" version = "0.26.6" @@ -3309,6 +3611,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 1bfd680..a507631 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,36 +1,57 @@ [workspace] resolver = "2" - members = [ + # Binaries "binaries/cuprated", - "constants", + + # Benchmarks + "benches/benchmark/bin", + "benches/benchmark/lib", + "benches/benchmark/example", + "benches/criterion/example", + "benches/criterion/cuprate-json-rpc", + + # Consensus "consensus", "consensus/context", "consensus/fast-sync", "consensus/rules", - "cryptonight", - "helper", + + # Net "net/epee-encoding", "net/fixed-bytes", "net/levin", "net/wire", + + # P2P "p2p/p2p", "p2p/p2p-core", "p2p/bucket", "p2p/dandelion-tower", "p2p/async-buffer", "p2p/address-book", + + # Storage "storage/blockchain", "storage/service", "storage/txpool", "storage/database", - "pruning", - "test-utils", - "types", + + # RPC "rpc/json-rpc", "rpc/types", "rpc/interface", + + # ZMQ "zmq/types", + + # Misc + "constants", + "cryptonight", + "helper", + "pruning", + "test-utils", + "types", ] [profile.release] @@ -53,34 +74,36 @@ opt-level = 3 [workspace.dependencies] # Cuprate members -cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false } -cuprate-consensus-rules = { path = "consensus/rules", default-features = false } -cuprate-constants = { path = "constants", default-features = false } -cuprate-consensus = { path = "consensus", default-features = false } -cuprate-consensus-context = { path = "consensus/context", default-features = false } -cuprate-cryptonight = { path = "cryptonight", default-features = false } -cuprate-helper = { path = "helper", default-features = false } -cuprate-epee-encoding = { path = "net/epee-encoding", default-features = false } -cuprate-fixed-bytes = { path = "net/fixed-bytes", default-features = false } -cuprate-levin = { path = "net/levin", default-features = false } -cuprate-wire = { path = "net/wire", default-features = false } -cuprate-p2p = { path = "p2p/p2p", default-features = false } -cuprate-p2p-core = { path = "p2p/p2p-core", default-features = false } -cuprate-p2p-bucket = { path = "p2p/p2p-bucket", default-features = false } -cuprate-dandelion-tower = { path = "p2p/dandelion-tower", default-features = false } -cuprate-async-buffer = { path = "p2p/async-buffer", default-features = false } -cuprate-address-book = { path = "p2p/address-book", default-features = false } -cuprate-blockchain = { path = "storage/blockchain", default-features = false } -cuprate-database = { path = "storage/database", default-features = false } -cuprate-database-service = { path = "storage/service", default-features = false } -cuprate-txpool = { path = "storage/txpool", default-features = false } -cuprate-pruning = { path = "pruning", default-features = false } -cuprate-test-utils = { path = "test-utils", default-features = false } -cuprate-types = { path = "types", default-features = false } -cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false } -cuprate-rpc-types = { path = "rpc/types", default-features = false } -cuprate-rpc-interface = { path = "rpc/interface", default-features = false } -cuprate-zmq-types = { path = "zmq/types", default-features = false } +cuprate-benchmark-lib = { path = "benches/benchmark/lib", default-features = false } +cuprate-benchmark-example = { path = "benches/benchmark/example", default-features = false } +cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false } +cuprate-consensus-rules = { path = "consensus/rules", default-features = false } +cuprate-constants = { path = "constants", default-features = false } +cuprate-consensus = { path = "consensus", default-features = false } +cuprate-consensus-context = { path = "consensus/context", default-features = false } +cuprate-cryptonight = { path = "cryptonight", default-features = false } +cuprate-helper = { path = "helper", default-features = false } +cuprate-epee-encoding = { path = "net/epee-encoding", default-features = false } +cuprate-fixed-bytes = { path = "net/fixed-bytes", default-features = false } +cuprate-levin = { path = "net/levin", default-features = false } +cuprate-wire = { path = "net/wire", default-features = false } +cuprate-p2p = { path = "p2p/p2p", default-features = false } +cuprate-p2p-core = { path = "p2p/p2p-core", default-features = false } +cuprate-p2p-bucket = { path = "p2p/p2p-bucket", default-features = false } +cuprate-dandelion-tower = { path = "p2p/dandelion-tower", default-features = false } +cuprate-async-buffer = { path = "p2p/async-buffer", default-features = false } +cuprate-address-book = { path = "p2p/address-book", default-features = false } +cuprate-blockchain = { path = "storage/blockchain", default-features = false } +cuprate-database = { path = "storage/database", default-features = false } +cuprate-database-service = { path = "storage/service", default-features = false } +cuprate-txpool = { path = "storage/txpool", default-features = false } +cuprate-pruning = { path = "pruning", default-features = false } +cuprate-test-utils = { path = "test-utils", default-features = false } +cuprate-types = { path = "types", default-features = false } +cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false } +cuprate-rpc-types = { path = "rpc/types", default-features = false } +cuprate-rpc-interface = { path = "rpc/interface", default-features = false } +cuprate-zmq-types = { path = "zmq/types", default-features = false } # External dependencies anyhow = { version = "1", default-features = false } @@ -125,6 +148,8 @@ tracing-subscriber = { version = "0.3", default-features = false } tracing = { version = "0.1", default-features = false } ## workspace.dev-dependencies +criterion = { version = "0.5" } +function_name = { version = "0.3" } monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } tempfile = { version = "3" } diff --git a/benches/README.md b/benches/README.md index 4640904..af6bb93 100644 --- a/benches/README.md +++ b/benches/README.md @@ -1 +1,5 @@ -# TODO +# Benches +This directory contains Cuprate's benchmarks and benchmarking utilities. + +See the [`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html) +to see how to create and run these benchmarks. \ No newline at end of file diff --git a/benches/benchmark/bin/Cargo.toml b/benches/benchmark/bin/Cargo.toml new file mode 100644 index 0000000..36d0b2c --- /dev/null +++ b/benches/benchmark/bin/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "cuprate-benchmark" +version = "0.0.0" +edition = "2021" +description = "Cuprate's benchmarking binary" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin" +keywords = ["cuprate", "benchmarking", "binary"] + +[features] +# All new benchmarks should be added here! +all = ["example"] + +# Non-benchmark features. +default = [] +json = [] +trace = [] +debug = [] +warn = [] +info = [] +error = [] + +# Benchmark features. +# New benchmarks should be added here! +example = [ + "dep:cuprate-benchmark-example" +] + +[dependencies] +cuprate-benchmark-lib = { workspace = true } +cuprate-benchmark-example = { workspace = true, optional = true } + +cfg-if = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true, features = ["std"] } +tracing = { workspace = true, features = ["std", "attributes"] } +tracing-subscriber = { workspace = true, features = ["fmt", "std", "env-filter"] } + +[dev-dependencies] + +[lints] +workspace = true \ No newline at end of file diff --git a/benches/benchmark/bin/README.md b/benches/benchmark/bin/README.md new file mode 100644 index 0000000..ad0700f --- /dev/null +++ b/benches/benchmark/bin/README.md @@ -0,0 +1,27 @@ +## `cuprate-benchmark` +This crate links all benchmarks together into a single binary that can be run as: `cuprate-benchmark`. + +`cuprate-benchmark` will run all enabled benchmarks sequentially and print data at the end. + +## Benchmarks +Benchmarks are opt-in and enabled via features. + +| Feature | Enables which benchmark crate? | +|----------|--------------------------------| +| example | cuprate-benchmark-example | +| database | cuprate-benchmark-database | + +## Features +These are features that aren't for enabling benchmarks, but rather for other things. + +Since `cuprate-benchmark` is built right before it is ran, +these features almost act like command line arguments. + +| Features | Does what | +|----------|-----------| +| json | Prints JSON timings instead of a markdown table +| trace | Use the `trace` log-level +| debug | Use the `debug` log-level +| warn | Use the `warn` log-level +| info | Use the `info` log-level (default) +| error | Use the `error` log-level \ No newline at end of file diff --git a/benches/benchmark/bin/src/log.rs b/benches/benchmark/bin/src/log.rs new file mode 100644 index 0000000..455f130 --- /dev/null +++ b/benches/benchmark/bin/src/log.rs @@ -0,0 +1,29 @@ +use cfg_if::cfg_if; +use tracing::{info, instrument, Level}; +use tracing_subscriber::FmtSubscriber; + +/// Initializes the `tracing` logger. +#[instrument] +pub(crate) fn init_logger() { + const LOG_LEVEL: Level = { + cfg_if! { + if #[cfg(feature = "trace")] { + Level::TRACE + } else if #[cfg(feature = "debug")] { + Level::DEBUG + } else if #[cfg(feature = "warn")] { + Level::WARN + } else if #[cfg(feature = "info")] { + Level::INFO + } else if #[cfg(feature = "error")] { + Level::ERROR + } else { + Level::INFO + } + } + }; + + FmtSubscriber::builder().with_max_level(LOG_LEVEL).init(); + + info!("Log level: {LOG_LEVEL}"); +} diff --git a/benches/benchmark/bin/src/main.rs b/benches/benchmark/bin/src/main.rs new file mode 100644 index 0000000..02c480a --- /dev/null +++ b/benches/benchmark/bin/src/main.rs @@ -0,0 +1,49 @@ +#![doc = include_str!("../README.md")] +#![allow( + unused_crate_dependencies, + reason = "this crate imports many potentially unused dependencies" +)] + +mod log; +mod print; +mod run; +mod timings; + +use cfg_if::cfg_if; + +/// What `main()` does: +/// 1. Run all enabled benchmarks +/// 2. Record benchmark timings +/// 3. Print timing data +/// +/// To add a new benchmark to be ran here: +/// 1. Copy + paste a `cfg_if` block +/// 2. Change it to your benchmark's feature flag +/// 3. Change it to your benchmark's type +#[allow( + clippy::allow_attributes, + unused_variables, + unused_mut, + unreachable_code, + reason = "clippy does not account for all cfg()s" +)] +fn main() { + log::init_logger(); + + let mut timings = timings::Timings::new(); + + cfg_if! { + if #[cfg(not(any(feature = "example")))] { + println!("No feature specified. Use `--features $BENCHMARK_FEATURE` when building."); + return; + } + } + + cfg_if! { + if #[cfg(feature = "example")] { + run::run_benchmark::(&mut timings); + } + } + + print::print_timings(&timings); +} diff --git a/benches/benchmark/bin/src/print.rs b/benches/benchmark/bin/src/print.rs new file mode 100644 index 0000000..36a5f05 --- /dev/null +++ b/benches/benchmark/bin/src/print.rs @@ -0,0 +1,38 @@ +#![expect(dead_code, reason = "code hidden behind feature flags")] + +use cfg_if::cfg_if; + +use crate::timings::Timings; + +/// Print the final the final markdown table of benchmark timings. +pub(crate) fn print_timings(timings: &Timings) { + println!("\nFinished all benchmarks, printing results:"); + + cfg_if! { + if #[cfg(feature = "json")] { + print_timings_json(timings); + } else { + print_timings_markdown(timings); + } + } +} + +/// Default timing formatting. +pub(crate) fn print_timings_markdown(timings: &Timings) { + let mut s = String::new(); + s.push_str("| Benchmark | Time (seconds) |\n"); + s.push_str("|------------------------------------|----------------|"); + + #[expect(clippy::iter_over_hash_type)] + for (k, v) in timings { + s += &format!("\n| {k:<34} | {v:<14} |"); + } + + println!("\n{s}"); +} + +/// Enabled via `json` feature. +pub(crate) fn print_timings_json(timings: &Timings) { + let json = serde_json::to_string_pretty(timings).unwrap(); + println!("\n{json}"); +} diff --git a/benches/benchmark/bin/src/run.rs b/benches/benchmark/bin/src/run.rs new file mode 100644 index 0000000..05a220f --- /dev/null +++ b/benches/benchmark/bin/src/run.rs @@ -0,0 +1,36 @@ +use tracing::{info, instrument, trace}; + +use cuprate_benchmark_lib::Benchmark; + +use crate::timings::Timings; + +/// Run a [`Benchmark`] and record its timing. +#[instrument(skip_all)] +pub(crate) fn run_benchmark(timings: &mut Timings) { + // Get the benchmark name. + let name = B::name(); + trace!("Running benchmark: {name}"); + + // Setup the benchmark input. + let input = B::SETUP(); + + // Sleep before running the benchmark. + trace!("Pre-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION); + std::thread::sleep(B::PRE_SLEEP_DURATION); + + // Run/time the benchmark. + let now = std::time::Instant::now(); + B::MAIN(input); + let time = now.elapsed().as_secs_f32(); + + // Print the benchmark timings. + info!("{name:>34} ... {time}"); + assert!( + timings.insert(name, time).is_none(), + "There were 2 benchmarks with the same name - this collides the final output: {name}", + ); + + // Sleep for a cooldown period after the benchmark run. + trace!("Post-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION); + std::thread::sleep(B::POST_SLEEP_DURATION); +} diff --git a/benches/benchmark/bin/src/timings.rs b/benches/benchmark/bin/src/timings.rs new file mode 100644 index 0000000..34a0795 --- /dev/null +++ b/benches/benchmark/bin/src/timings.rs @@ -0,0 +1,5 @@ +/// Benchmark timing data. +/// +/// - Key = benchmark name +/// - Value = benchmark time in seconds +pub(crate) type Timings = std::collections::HashMap<&'static str, f32>; diff --git a/benches/benchmark/example/Cargo.toml b/benches/benchmark/example/Cargo.toml new file mode 100644 index 0000000..5728bcd --- /dev/null +++ b/benches/benchmark/example/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "cuprate-benchmark-example" +version = "0.0.0" +edition = "2021" +description = "Example showcasing Cuprate's benchmarking harness" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example" +keywords = ["cuprate", "benchmarking", "example"] + +[dependencies] +cuprate-benchmark-lib = { path = "../lib" } + +[dev-dependencies] + +[lints] +workspace = true \ No newline at end of file diff --git a/benches/benchmark/example/README.md b/benches/benchmark/example/README.md new file mode 100644 index 0000000..be6b716 --- /dev/null +++ b/benches/benchmark/example/README.md @@ -0,0 +1,3 @@ +## `cuprate-benchmark-example` +This crate contains a short example benchmark that shows how to implement and use +`cuprate-benchmark-lib` so that it can be ran by `cuprate-benchmark`. \ No newline at end of file diff --git a/benches/benchmark/example/src/lib.rs b/benches/benchmark/example/src/lib.rs new file mode 100644 index 0000000..cc704a7 --- /dev/null +++ b/benches/benchmark/example/src/lib.rs @@ -0,0 +1,42 @@ +#![doc = include_str!("../README.md")] + +use std::hint::black_box; + +use cuprate_benchmark_lib::Benchmark; + +/// Marker struct that implements [`Benchmark`] +pub struct Example; + +/// The input to our benchmark function. +pub type ExampleBenchmarkInput = u64; + +/// The setup function that creates the input. +pub const fn example_benchmark_setup() -> ExampleBenchmarkInput { + 1 +} + +/// The main benchmarking function. +#[expect(clippy::unit_arg)] +pub fn example_benchmark_main(input: ExampleBenchmarkInput) { + // In this case, we're simply benchmarking the + // performance of simple arithmetic on the input data. + + fn math(input: ExampleBenchmarkInput, number: u64) { + let x = input; + let x = black_box(x * number); + let x = black_box(x / number); + let x = black_box(x + number); + let _ = black_box(x - number); + } + + for number in 1..100_000_000 { + black_box(math(input, number)); + } +} + +// This implementation will be run by `cuprate-benchmark`. +impl Benchmark for Example { + type Input = ExampleBenchmarkInput; + const SETUP: fn() -> Self::Input = example_benchmark_setup; + const MAIN: fn(Self::Input) = example_benchmark_main; +} diff --git a/benches/benchmark/lib/Cargo.toml b/benches/benchmark/lib/Cargo.toml new file mode 100644 index 0000000..b0771f0 --- /dev/null +++ b/benches/benchmark/lib/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cuprate-benchmark-lib" +version = "0.0.0" +edition = "2021" +description = "Cuprate's benchmarking library" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib" +keywords = ["cuprate", "benchmarking", "library"] + +[features] + +[dependencies] + +[dev-dependencies] + +[lints] +workspace = true \ No newline at end of file diff --git a/benches/benchmark/lib/README.md b/benches/benchmark/lib/README.md new file mode 100644 index 0000000..9ea79ae --- /dev/null +++ b/benches/benchmark/lib/README.md @@ -0,0 +1,15 @@ +## `cuprate-benchmark-lib` +This crate is the glue between +[`cuprate-benchmark`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/bin) +and all the benchmark crates. + +It defines the [`crate::Benchmark`] trait, which is the behavior of all benchmarks. + +See the [`cuprate-benchmark-example`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/example) +crate to see an example implementation of this trait. + +After implementing this trait, a few steps must +be done such that the `cuprate-benchmark` binary +can actually run your benchmark crate; see the +[`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html) +to see how to do this. \ No newline at end of file diff --git a/benches/benchmark/lib/src/benchmark.rs b/benches/benchmark/lib/src/benchmark.rs new file mode 100644 index 0000000..4dca550 --- /dev/null +++ b/benches/benchmark/lib/src/benchmark.rs @@ -0,0 +1,45 @@ +//! Benchmarking trait. + +use std::time::Duration; + +/// A benchmarking function and its inputs. +pub trait Benchmark { + /// The benchmark's name. + /// + /// This is automatically implemented + /// as the name of the [`Self`] type. + // + // FIXME: use `const` instead of `fn` when stable + // + fn name() -> &'static str { + std::any::type_name::() + } + + /// Input to the main benchmarking function. + /// + /// This is passed to [`Self::MAIN`]. + type Input; + + /// Setup function to generate the input. + /// + /// This function is not timed. + const SETUP: fn() -> Self::Input; + + /// The main function to benchmark. + /// + /// The start of the timer begins right before + /// this function is called and ends after the + /// function returns. + const MAIN: fn(Self::Input); + + /// `cuprate-benchmark` will sleep for this [`Duration`] after + /// creating the [`Self::Input`], but before starting [`Self::MAIN`]. + /// + /// 1 second by default. + const PRE_SLEEP_DURATION: Duration = Duration::from_secs(1); + + /// `cuprate-benchmark` will sleep for this [`Duration`] after [`Self::MAIN`]. + /// + /// 1 second by default. + const POST_SLEEP_DURATION: Duration = Duration::from_secs(1); +} diff --git a/benches/benchmark/lib/src/lib.rs b/benches/benchmark/lib/src/lib.rs new file mode 100644 index 0000000..a6bec82 --- /dev/null +++ b/benches/benchmark/lib/src/lib.rs @@ -0,0 +1,5 @@ +#![doc = include_str!("../README.md")] + +mod benchmark; + +pub use benchmark::Benchmark; diff --git a/benches/criterion/cuprate-json-rpc/Cargo.toml b/benches/criterion/cuprate-json-rpc/Cargo.toml new file mode 100644 index 0000000..a0cae64 --- /dev/null +++ b/benches/criterion/cuprate-json-rpc/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "cuprate-criterion-json-rpc" +version = "0.0.0" +edition = "2021" +description = "Criterion benchmarking for cuprate-json-rpc" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc" +keywords = ["cuprate", "json-rpc", "criterion", "benchmark"] + +[dependencies] +cuprate-json-rpc = { workspace = true } + +criterion = { workspace = true } +function_name = { workspace = true } +serde_json = { workspace = true, features = ["default"] } + +[[bench]] +name = "main" +harness = false + +[lints] +workspace = true \ No newline at end of file diff --git a/benches/criterion/cuprate-json-rpc/benches/main.rs b/benches/criterion/cuprate-json-rpc/benches/main.rs new file mode 100644 index 0000000..a724943 --- /dev/null +++ b/benches/criterion/cuprate-json-rpc/benches/main.rs @@ -0,0 +1,8 @@ +//! Benchmarks for `cuprate-json-rpc`. +#![allow(unused_crate_dependencies)] + +mod response; + +criterion::criterion_main! { + response::serde, +} diff --git a/benches/criterion/cuprate-json-rpc/benches/response.rs b/benches/criterion/cuprate-json-rpc/benches/response.rs new file mode 100644 index 0000000..908a9f4 --- /dev/null +++ b/benches/criterion/cuprate-json-rpc/benches/response.rs @@ -0,0 +1,110 @@ +//! Benchmarks for [`Response`]. +#![allow(unused_attributes, unused_crate_dependencies)] + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use function_name::named; +use serde_json::{from_str, to_string_pretty}; + +use cuprate_json_rpc::{Id, Response}; + +// `serde` benchmarks on `Response`. +// +// These are benchmarked as `Response` has a custom serde implementation. +criterion_group! { + name = serde; + config = Criterion::default(); + targets = + response_from_str_u8, + response_from_str_u64, + response_from_str_string_5_len, + response_from_str_string_10_len, + response_from_str_string_100_len, + response_from_str_string_500_len, + response_to_string_pretty_u8, + response_to_string_pretty_u64, + response_to_string_pretty_string_5_len, + response_to_string_pretty_string_10_len, + response_to_string_pretty_string_100_len, + response_to_string_pretty_string_500_len, + response_from_str_bad_field_1, + response_from_str_bad_field_5, + response_from_str_bad_field_10, + response_from_str_bad_field_100, + response_from_str_missing_field, +} +criterion_main!(serde); + +/// Generate `from_str` deserialization benchmark functions for [`Response`]. +macro_rules! impl_from_str_benchmark { + ( + $( + $fn_name:ident => $request_type:ty => $request_string:literal, + )* + ) => { + $( + #[named] + fn $fn_name(c: &mut Criterion) { + let request_string = $request_string; + + c.bench_function(function_name!(), |b| { + b.iter(|| { + let _r = from_str::>( + black_box(request_string) + ); + }); + }); + } + )* + }; +} + +impl_from_str_benchmark! { + response_from_str_u8 => u8 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#, + response_from_str_u64 => u64 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#, + response_from_str_string_5_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hello"}"#, + response_from_str_string_10_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hellohello"}"#, + response_from_str_string_100_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#, + response_from_str_string_500_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#, + + // The custom serde currently looks at all fields. + // These are for testing the performance if the serde + // has to parse through a bunch of unrelated fields. + response_from_str_bad_field_1 => u8 => r#"{"bad_field":0,"jsonrpc":"2.0","id":123,"result":0}"#, + response_from_str_bad_field_5 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"jsonrpc":"2.0","id":123,"result":0}"#, + response_from_str_bad_field_10 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"bad_field_6":0,"bad_field_7":0,"bad_field_8":0,"bad_field_9":0,"bad_field_10":0,"jsonrpc":"2.0","id":123,"result":0}"#, + response_from_str_bad_field_100 => u8 => r#"{"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0,"52":0,"53":0,"54":0,"55":0,"56":0,"57":0,"58":0,"59":0,"60":0,"61":0,"62":0,"63":0,"64":0,"65":0,"66":0,"67":0,"68":0,"69":0,"70":0,"71":0,"72":0,"73":0,"74":0,"75":0,"76":0,"77":0,"78":0,"79":0,"80":0,"81":0,"82":0,"83":0,"84":0,"85":0,"86":0,"87":0,"88":0,"89":0,"90":0,"91":0,"92":0,"93":0,"94":0,"95":0,"96":0,"97":0,"98":0,"99":0,"100":0,"jsonrpc":"2.0","id":123,"result":0}"#, + + // These are missing the `jsonrpc` field. + response_from_str_missing_field => u8 => r#"{"id":123,"result":0}"#, +} + +/// Generate `to_string_pretty` serialization benchmark functions for [`Response`]. +macro_rules! impl_to_string_pretty_benchmark { + ( + $( + $fn_name:ident => $request_constructor:expr, + )* + ) => { + $( + #[named] + fn $fn_name(c: &mut Criterion) { + let request = $request_constructor; + + c.bench_function(function_name!(), |b| { + b.iter(|| { + let _s = to_string_pretty(black_box(&request)).unwrap(); + }); + }); + } + )* + }; +} + +impl_to_string_pretty_benchmark! { + response_to_string_pretty_u8 => Response::::ok(Id::Null, 0), + response_to_string_pretty_u64 => Response::::ok(Id::Null, 0), + response_to_string_pretty_string_5_len => Response::ok(Id::Null, String::from("hello")), + response_to_string_pretty_string_10_len => Response::ok(Id::Null, String::from("hellohello")), + response_to_string_pretty_string_100_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")), + response_to_string_pretty_string_500_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")), +} diff --git a/benches/criterion/cuprate-json-rpc/src/lib.rs b/benches/criterion/cuprate-json-rpc/src/lib.rs new file mode 100644 index 0000000..b29887a --- /dev/null +++ b/benches/criterion/cuprate-json-rpc/src/lib.rs @@ -0,0 +1,2 @@ +//! Benchmark lib for `cuprate-json-rpc`. +#![allow(unused_crate_dependencies, reason = "used in benchmarks")] diff --git a/benches/criterion/example/Cargo.toml b/benches/criterion/example/Cargo.toml new file mode 100644 index 0000000..43e6011 --- /dev/null +++ b/benches/criterion/example/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "cuprate-criterion-example" +version = "0.0.0" +edition = "2021" +description = "Criterion benchmarking example for Cuprate" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example" +keywords = ["cuprate", "criterion", "benchmark", "example"] + +[dependencies] +criterion = { workspace = true } +function_name = { workspace = true } +serde_json = { workspace = true, features = ["default"] } + +[[bench]] +name = "main" +harness = false + +[lints] +workspace = true \ No newline at end of file diff --git a/benches/criterion/example/README.md b/benches/criterion/example/README.md new file mode 100644 index 0000000..cf1983f --- /dev/null +++ b/benches/criterion/example/README.md @@ -0,0 +1,14 @@ +## `cuprate-criterion-example` +An example of using Criterion for benchmarking Cuprate crates. + +Consider copy+pasting this crate to use as a base when creating new Criterion benchmark crates. + +## `src/` +Benchmark crates have a `benches/` ran by `cargo bench`, but they are also crates themselves, +as in, they have a `src` folder that `benches/` can pull code from. + +The `src` directories in these benchmarking crates are usually filled with +helper functions, types, etc, that are used repeatedly in the benchmarks. + +## `benches/` +These are the actual benchmarks ran by `cargo bench`. diff --git a/benches/criterion/example/benches/example.rs b/benches/criterion/example/benches/example.rs new file mode 100644 index 0000000..7ea8e9a --- /dev/null +++ b/benches/criterion/example/benches/example.rs @@ -0,0 +1,48 @@ +//! Benchmarks. +#![allow(unused_attributes, unused_crate_dependencies)] + +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use function_name::named; + +use cuprate_criterion_example::SomeHardToCreateObject; + +// This is how you register criterion benchmarks. +criterion_group! { + name = benches; + config = Criterion::default(); + targets = benchmark_1, benchmark_range, +} +criterion_main!(benches); + +/// Benchmark a single input. +/// +/// +#[named] +fn benchmark_1(c: &mut Criterion) { + // It is recommended to use `function_name!()` as a benchmark + // identifier instead of manually re-typing the function name. + c.bench_function(function_name!(), |b| { + b.iter(|| { + black_box(SomeHardToCreateObject::from(1)); + }); + }); +} + +/// Benchmark a range of inputs. +/// +/// +#[named] +fn benchmark_range(c: &mut Criterion) { + let mut group = c.benchmark_group(function_name!()); + + for i in 0..4 { + group.throughput(Throughput::Elements(i)); + group.bench_with_input(BenchmarkId::from_parameter(i), &i, |b, &i| { + b.iter(|| { + black_box(SomeHardToCreateObject::from(i)); + }); + }); + } + + group.finish(); +} diff --git a/benches/criterion/example/benches/main.rs b/benches/criterion/example/benches/main.rs new file mode 100644 index 0000000..d4f0bf8 --- /dev/null +++ b/benches/criterion/example/benches/main.rs @@ -0,0 +1,10 @@ +//! Benchmarks examples. +#![allow(unused_crate_dependencies)] + +// All modules within `benches/` are `mod`ed here. +mod example; + +// And all the Criterion benchmarks are registered like so: +criterion::criterion_main! { + example::benches, +} diff --git a/benches/criterion/example/src/lib.rs b/benches/criterion/example/src/lib.rs new file mode 100644 index 0000000..0f732a4 --- /dev/null +++ b/benches/criterion/example/src/lib.rs @@ -0,0 +1,13 @@ +#![doc = include_str!("../README.md")] // See the README for crate documentation. +#![allow(unused_crate_dependencies, reason = "used in benchmarks")] + +/// Shared type that all benchmarks can use. +#[expect(dead_code)] +pub struct SomeHardToCreateObject(u64); + +impl From for SomeHardToCreateObject { + /// Shared function that all benchmarks can use. + fn from(value: u64) -> Self { + Self(value) + } +} diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index 0961d8f..a99d099 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -143,9 +143,16 @@ --- -- [⚪️ Benchmarking](benchmarking/intro.md) - - [⚪️ Criterion](benchmarking/criterion.md) - - [⚪️ Harness](benchmarking/harness.md) +- [🟢 Benchmarking](benchmarking/intro.md) + - [🟢 Criterion](benchmarking/criterion/intro.md) + - [🟢 Creating](benchmarking/criterion/creating.md) + - [🟢 Running](benchmarking/criterion/running.md) + - [🟢 `cuprate-benchmark`](benchmarking/cuprate/intro.md) + - [🟢 Creating](benchmarking/cuprate/creating.md) + - [🟢 Running](benchmarking/cuprate/running.md) + +--- + - [⚪️ Testing](testing/intro.md) - [⚪️ Monero data](testing/monero-data.md) - [⚪️ RPC client](testing/rpc-client.md) diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md index a0dff48..5124180 100644 --- a/books/architecture/src/appendix/crates.md +++ b/books/architecture/src/appendix/crates.md @@ -68,3 +68,11 @@ cargo doc --open --package cuprate-blockchain | [`cuprate-helper`](https://doc.cuprate.org/cuprate_helper) | [`helper/`](https://github.com/Cuprate/cuprate/tree/main/helper) | Kitchen-sink helper crate for Cuprate | [`cuprate-test-utils`](https://doc.cuprate.org/cuprate_test_utils) | [`test-utils/`](https://github.com/Cuprate/cuprate/tree/main/test-utils) | Testing utilities for Cuprate | [`cuprate-types`](https://doc.cuprate.org/cuprate_types) | [`types/`](https://github.com/Cuprate/cuprate/tree/main/types) | Shared types across Cuprate + +## Benchmarks +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-benchmark`](https://doc.cuprate.org/cuprate_benchmark) | [`benches/benchmark/bin/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | Cuprate benchmarking binary +| [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) | [`benches/benchmark/lib/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | Cuprate benchmarking library +| `cuprate-benchmark-*` | [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/) | Benchmark for a Cuprate crate that uses `cuprate-benchmark` +| `cuprate-criterion-*` | [`benches/criterion/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Benchmark for a Cuprate crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book) \ No newline at end of file diff --git a/books/architecture/src/benchmarking/criterion.md b/books/architecture/src/benchmarking/criterion.md deleted file mode 100644 index e9d61e6..0000000 --- a/books/architecture/src/benchmarking/criterion.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Criterion diff --git a/books/architecture/src/benchmarking/criterion/creating.md b/books/architecture/src/benchmarking/criterion/creating.md new file mode 100644 index 0000000..0100904 --- /dev/null +++ b/books/architecture/src/benchmarking/criterion/creating.md @@ -0,0 +1,21 @@ +# Creating +Creating a new Criterion-based benchmarking crate for one of Cuprate's crates is relatively simple, +although, it requires knowledge of how to use Criterion first: + +1. Read the `Getting Started` section of +2. Copy [`benches/criterion/example`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example) as base +3. Get started + +## Naming +New benchmark crates using Criterion should: +- Be in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/) +- Be in the `cuprate-criterion-$CRATE_NAME` format + +For a real example, see: +[`cuprate-criterion-json-rpc`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc). + +## Workspace +Finally, make sure to add the benchmark crate to the workspace +[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file. + +Your benchmark is now ready to be ran. \ No newline at end of file diff --git a/books/architecture/src/benchmarking/criterion/intro.md b/books/architecture/src/benchmarking/criterion/intro.md new file mode 100644 index 0000000..b7a79b2 --- /dev/null +++ b/books/architecture/src/benchmarking/criterion/intro.md @@ -0,0 +1,4 @@ +# Criterion +Each sub-directory in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) is a crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book) for timing single functions and/or groups of functions. + +They are generally be small in scope. \ No newline at end of file diff --git a/books/architecture/src/benchmarking/criterion/running.md b/books/architecture/src/benchmarking/criterion/running.md new file mode 100644 index 0000000..14067f6 --- /dev/null +++ b/books/architecture/src/benchmarking/criterion/running.md @@ -0,0 +1,15 @@ +# Running +To run all Criterion benchmarks, run this from the repository root: +```bash +cargo bench +``` + +To run specific package(s), use: +```bash +cargo bench --package $CRITERION_BENCHMARK_CRATE_NAME +``` + +For example: +```bash +cargo bench --package cuprate-criterion-json-rpc +``` \ No newline at end of file diff --git a/books/architecture/src/benchmarking/cuprate/creating.md b/books/architecture/src/benchmarking/cuprate/creating.md new file mode 100644 index 0000000..76eab78 --- /dev/null +++ b/books/architecture/src/benchmarking/cuprate/creating.md @@ -0,0 +1,57 @@ +# Creating +New benchmarks are plugged into `cuprate-benchmark` by: +1. Implementing `cuprate_benchmark_lib::Benchmark` +1. Registering the benchmark in the `cuprate_benchmark` binary + +See [`benches/benchmark/example`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example) +for an example. + +## Creating the benchmark crate +Before plugging into `cuprate-benchmark`, your actual benchmark crate must be created: + +1. Create a new crate inside `benches/benchmark` (consider copying `benches/benchmark/example` as a base) +1. Pull in `cuprate_benchmark_lib` as a dependency +1. Create a benchmark +1. Implement `cuprate_benchmark_lib::Benchmark` + +New benchmark crates using `cuprate-database` should: +- Be in [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/) +- Be in the `cuprate-benchmark-$CRATE_NAME` format + +For a real example, see: +[`cuprate-benchmark-database`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/cuprate-database). + +## `cuprate_benchmark_lib::Benchmark` +This is the trait that standardizes all benchmarks ran under `cuprate-benchmark`. + +It must be implemented by your benchmarking crate. + +See `cuprate-benchmark-lib` crate documentation for a user-guide: . + +## Adding a feature to `cuprate-benchmark` +After your benchmark's behavior is defined, it must be registered +in the binary that is actually ran: `cuprate-benchmark`. + +If your benchmark is new, add a new crate feature to [`cuprate-benchmark`'s Cargo.toml file](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/Cargo.toml) with an optional dependency to your benchmarking crate. + +Please remember to edit the feature table in the +[`README.md`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/README.md) as well! + +## Adding to `cuprate-benchmark`'s `main()` +After adding your crate's feature, add a conditional line that run the benchmark +if the feature is enabled to the `main()` function: + +For example, if your crate's name is `egg`: +```rust +cfg_if! { + if #[cfg(feature = "egg")] { + run::run_benchmark::(&mut timings); + } +} +``` + +## Workspace +Finally, make sure to add the benchmark crate to the workspace +[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file. + +Your benchmark is now ready to be ran. \ No newline at end of file diff --git a/books/architecture/src/benchmarking/cuprate/intro.md b/books/architecture/src/benchmarking/cuprate/intro.md new file mode 100644 index 0000000..25efb46 --- /dev/null +++ b/books/architecture/src/benchmarking/cuprate/intro.md @@ -0,0 +1,37 @@ +# cuprate-benchmark +Cuprate has 2 custom crates for general benchmarking: +- `cuprate-benchmark`; the actual binary crate ran +- `cuprate-benchmark-lib`; the library that other crates hook into + +The abstract purpose of `cuprate-benchmark` is very simple: +1. Set-up the benchmark +1. Start timer +1. Run benchmark +1. Output data + +`cuprate-benchmark` runs the benchmarks found in [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark). + +`cuprate-benchmark-lib` defines the `Benchmark` trait that all +benchmark crates implement to "plug-in" to the benchmarking harness. + +## Diagram +A diagram displaying the relation between `cuprate-benchmark` and related crates. + +``` + ┌─────────────────────┐ + │ cuprate_benchmark │ + │ (actual binary ran) │ + └──────────┬──────────┘ + ┌──────────────────┴───────────────────┐ + │ cuprate_benchmark_lib │ + │ ┌───────────────────────────────────┐│ + │ │ trait Benchmark ││ + │ └───────────────────────────────────┘│ + └──────────────────┬───────────────────┘ +┌───────────────────────────┐ │ ┌───────────────────────────┐ +│ cuprate_benchmark_example ├──┼───┤ cuprate_benchmark_* │ +└───────────────────────────┘ │ └───────────────────────────┘ +┌───────────────────────────┐ │ ┌───────────────────────────┐ +│ cuprate_benchmark_* ├──┴───┤ cuprate_benchmark_* │ +└───────────────────────────┘ └───────────────────────────┘ +``` \ No newline at end of file diff --git a/books/architecture/src/benchmarking/cuprate/running.md b/books/architecture/src/benchmarking/cuprate/running.md new file mode 100644 index 0000000..b776163 --- /dev/null +++ b/books/architecture/src/benchmarking/cuprate/running.md @@ -0,0 +1,16 @@ +# Running +`cuprate-benchmark` benchmarks are ran with this command: +```bash +cargo run --release --package cuprate-benchmark --features $BENCHMARK_CRATE_FEATURE +``` + +For example, to run the example benchmark: +```bash +cargo run --release --package cuprate-benchmark --features example +``` + +Use the `all` feature to run all benchmarks: +```bash +# Run all benchmarks +cargo run --release --package cuprate-benchmark --features all +``` diff --git a/books/architecture/src/benchmarking/harness.md b/books/architecture/src/benchmarking/harness.md deleted file mode 100644 index 6f82b52..0000000 --- a/books/architecture/src/benchmarking/harness.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Harness diff --git a/books/architecture/src/benchmarking/intro.md b/books/architecture/src/benchmarking/intro.md index f043a0b..e6ab6b1 100644 --- a/books/architecture/src/benchmarking/intro.md +++ b/books/architecture/src/benchmarking/intro.md @@ -1 +1,22 @@ -# ⚪️ Benchmarking +# Benchmarking +Cuprate has 2 types of benchmarks: +- [Criterion](https://bheisler.github.io/criterion.rs/book/user_guide/advanced_configuration.html) benchmarks +- `cuprate-benchmark` benchmarks + +Criterion is used for micro benchmarks; they time single functions, groups of functions, and generally are small in scope. + +`cuprate-benchmark` and [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) are custom in-house crates Cuprate uses for macro benchmarks; these test sub-systems, sections of a sub-system, or otherwise larger or more complicated code that isn't well-suited for micro benchmarks. + +## File layout and purpose +All benchmarking related files are in the [`benches/`](https://github.com/Cuprate/cuprate/tree/main/benches) folder. + +This directory is organized like such: + +| Directory | Purpose | +|-------------------------------|---------| +| [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Criterion (micro) benchmarks +| `benches/criterion/cuprate-*` | Criterion benchmarks for the crate with the same name +| [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark) | Cuprate's custom benchmarking files +| [`benches/benchmark/bin`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | The `cuprate-benchmark` crate; the actual binary run that links all benchmarks +| [`benches/benchmark/lib`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | The `cuprate-benchmark-lib` crate; the benchmarking framework all benchmarks plug into +| `benches/benchmark/cuprate-*` | `cuprate-benchmark` benchmarks for the crate with the same name From 01150ab84c1d24147bb45c61dffc45a70956aba3 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 27 Nov 2024 18:04:58 -0500 Subject: [PATCH 101/104] rpc/types: fix epee deserialization for `GetBlocksResponse` (#345) * header + flatten * fix optional values * `fn error() -> String` -> `error!() -> &'static str` * extract out `PoolInfo` * fix cargo hack --- rpc/types/src/bin.rs | 292 +++----------------------------- rpc/types/src/misc/misc.rs | 4 +- rpc/types/src/misc/mod.rs | 2 + rpc/types/src/misc/pool_info.rs | 171 +++++++++++++++++++ rpc/types/src/misc/tx_entry.rs | 5 +- 5 files changed, 199 insertions(+), 275 deletions(-) create mode 100644 rpc/types/src/misc/pool_info.rs diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index 7b94191..414214c 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -9,26 +9,19 @@ use cuprate_fixed_bytes::ByteArrayVec; use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] -use cuprate_epee_encoding::{ - container_as_blob::ContainerAsBlob, - epee_object, error, - macros::bytes::{Buf, BufMut}, - read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, -}; +use cuprate_epee_encoding::container_as_blob::ContainerAsBlob; use cuprate_types::BlockCompleteEntry; use crate::{ base::AccessResponseBase, - macros::{define_request, define_request_and_response, define_request_and_response_doc}, - misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolTxInfo, Status}, + macros::define_request_and_response, + misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfo}, rpc_call::RpcCallValue, }; #[cfg(any(feature = "epee", feature = "serde"))] use crate::defaults::{default_false, default_zero}; -#[cfg(feature = "epee")] -use crate::misc::PoolInfoExtent; //---------------------------------------------------------------------------------------------------- Definitions define_request_and_response! { @@ -115,15 +108,14 @@ define_request_and_response! { } } -//---------------------------------------------------------------------------------------------------- GetBlocks -define_request! { - #[doc = define_request_and_response_doc!( - "response" => GetBlocksResponse, - get_blocksbin, - cc73fe71162d564ffda8e549b79a350bca53c454, - core_rpc_server_commands_defs, h, 162, 262, - )] - GetBlocksRequest { +define_request_and_response! { + get_blocksbin, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 162..=262, + + GetBlocks, + + Request { requested_info: u8 = default_zero::(), "default_zero", // FIXME: This is a `std::list` in `monerod` because...? block_ids: ByteArrayVec<32>, @@ -131,259 +123,17 @@ define_request! { prune: bool, no_miner_tx: bool = default_false(), "default_false", pool_info_since: u64 = default_zero::(), "default_zero", - } -} + }, -#[doc = define_request_and_response_doc!( - "request" => GetBlocksRequest, - get_blocksbin, - cc73fe71162d564ffda8e549b79a350bca53c454, - core_rpc_server_commands_defs, h, 162, 262, -)] -/// -/// This response's variant depends upon [`PoolInfoExtent`]. -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum GetBlocksResponse { - /// Will always serialize a [`PoolInfoExtent::None`] field. - PoolInfoNone(GetBlocksResponsePoolInfoNone), - /// Will always serialize a [`PoolInfoExtent::Incremental`] field. - PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental), - /// Will always serialize a [`PoolInfoExtent::Full`] field. - PoolInfoFull(GetBlocksResponsePoolInfoFull), -} - -impl Default for GetBlocksResponse { - fn default() -> Self { - Self::PoolInfoNone(GetBlocksResponsePoolInfoNone::default()) - } -} - -/// Data within [`GetBlocksResponse::PoolInfoNone`]. -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct GetBlocksResponsePoolInfoNone { - pub status: Status, - pub untrusted: bool, - pub blocks: Vec, - pub start_height: u64, - pub current_height: u64, - pub output_indices: Vec, - pub daemon_time: u64, -} - -#[cfg(feature = "epee")] -epee_object! { - GetBlocksResponsePoolInfoNone, - status: Status, - untrusted: bool, - blocks: Vec, - start_height: u64, - current_height: u64, - output_indices: Vec, - daemon_time: u64, -} - -/// Data within [`GetBlocksResponse::PoolInfoIncremental`]. -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct GetBlocksResponsePoolInfoIncremental { - pub status: Status, - pub untrusted: bool, - pub blocks: Vec, - pub start_height: u64, - pub current_height: u64, - pub output_indices: Vec, - pub daemon_time: u64, - pub added_pool_txs: Vec, - pub remaining_added_pool_txids: ByteArrayVec<32>, - pub removed_pool_txids: ByteArrayVec<32>, -} - -#[cfg(feature = "epee")] -epee_object! { - GetBlocksResponsePoolInfoIncremental, - status: Status, - untrusted: bool, - blocks: Vec, - start_height: u64, - current_height: u64, - output_indices: Vec, - daemon_time: u64, - added_pool_txs: Vec, - remaining_added_pool_txids: ByteArrayVec<32>, - removed_pool_txids: ByteArrayVec<32>, -} - -/// Data within [`GetBlocksResponse::PoolInfoFull`]. -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct GetBlocksResponsePoolInfoFull { - pub status: Status, - pub untrusted: bool, - pub blocks: Vec, - pub start_height: u64, - pub current_height: u64, - pub output_indices: Vec, - pub daemon_time: u64, - pub added_pool_txs: Vec, - pub remaining_added_pool_txids: ByteArrayVec<32>, -} - -#[cfg(feature = "epee")] -epee_object! { - GetBlocksResponsePoolInfoFull, - status: Status, - untrusted: bool, - blocks: Vec, - start_height: u64, - current_height: u64, - output_indices: Vec, - daemon_time: u64, - added_pool_txs: Vec, - remaining_added_pool_txids: ByteArrayVec<32>, -} - -#[cfg(feature = "epee")] -/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`]. -/// -/// Not for public usage. -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct __GetBlocksResponseEpeeBuilder { - pub status: Option, - pub untrusted: Option, - pub blocks: Option>, - pub start_height: Option, - pub current_height: Option, - pub output_indices: Option>, - pub daemon_time: Option, - pub pool_info_extent: Option, - pub added_pool_txs: Option>, - pub remaining_added_pool_txids: Option>, - pub removed_pool_txids: Option>, -} - -#[cfg(feature = "epee")] -impl EpeeObjectBuilder for __GetBlocksResponseEpeeBuilder { - fn add_field(&mut self, name: &str, r: &mut B) -> error::Result { - macro_rules! read_epee_field { - ($($field:ident),*) => { - match name { - $( - stringify!($field) => { self.$field = Some(read_epee_value(r)?); }, - )* - _ => return Ok(false), - } - }; - } - - read_epee_field! { - status, - untrusted, - blocks, - start_height, - current_height, - output_indices, - daemon_time, - pool_info_extent, - added_pool_txs, - remaining_added_pool_txids, - removed_pool_txids - } - - Ok(true) - } - - fn finish(self) -> error::Result { - const ELSE: error::Error = error::Error::Format("Required field was not found!"); - - let status = self.status.ok_or(ELSE)?; - let untrusted = self.untrusted.ok_or(ELSE)?; - let blocks = self.blocks.ok_or(ELSE)?; - let start_height = self.start_height.ok_or(ELSE)?; - let current_height = self.current_height.ok_or(ELSE)?; - let output_indices = self.output_indices.ok_or(ELSE)?; - let daemon_time = self.daemon_time.ok_or(ELSE)?; - let pool_info_extent = self.pool_info_extent.ok_or(ELSE)?; - - let this = match pool_info_extent { - PoolInfoExtent::None => { - GetBlocksResponse::PoolInfoNone(GetBlocksResponsePoolInfoNone { - status, - untrusted, - blocks, - start_height, - current_height, - output_indices, - daemon_time, - }) - } - PoolInfoExtent::Incremental => { - GetBlocksResponse::PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental { - status, - untrusted, - blocks, - start_height, - current_height, - output_indices, - daemon_time, - added_pool_txs: self.added_pool_txs.ok_or(ELSE)?, - remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?, - removed_pool_txids: self.removed_pool_txids.ok_or(ELSE)?, - }) - } - PoolInfoExtent::Full => { - GetBlocksResponse::PoolInfoFull(GetBlocksResponsePoolInfoFull { - status, - untrusted, - blocks, - start_height, - current_height, - output_indices, - daemon_time, - added_pool_txs: self.added_pool_txs.ok_or(ELSE)?, - remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?, - }) - } - }; - - Ok(this) - } -} - -#[cfg(feature = "epee")] -impl EpeeObject for GetBlocksResponse { - type Builder = __GetBlocksResponseEpeeBuilder; - - fn number_of_fields(&self) -> u64 { - // [`PoolInfoExtent`] + inner struct fields. - let inner_fields = match self { - Self::PoolInfoNone(s) => s.number_of_fields(), - Self::PoolInfoIncremental(s) => s.number_of_fields(), - Self::PoolInfoFull(s) => s.number_of_fields(), - }; - - 1 + inner_fields - } - - fn write_fields(self, w: &mut B) -> error::Result<()> { - match self { - Self::PoolInfoNone(s) => { - s.write_fields(w)?; - write_field(PoolInfoExtent::None.to_u8(), "pool_info_extent", w)?; - } - Self::PoolInfoIncremental(s) => { - s.write_fields(w)?; - write_field(PoolInfoExtent::Incremental.to_u8(), "pool_info_extent", w)?; - } - Self::PoolInfoFull(s) => { - s.write_fields(w)?; - write_field(PoolInfoExtent::Full.to_u8(), "pool_info_extent", w)?; - } - } - - Ok(()) + // TODO: add `top_block_hash` field + // + AccessResponseBase { + blocks: Vec, + start_height: u64, + current_height: u64, + output_indices: Vec, + daemon_time: u64, + pool_info: PoolInfo, } } diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 8f7467b..67ec756 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -11,11 +11,11 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::epee_object; -use crate::macros::monero_definition_link; - #[cfg(any(feature = "epee", feature = "serde"))] use crate::defaults::default_zero; +use crate::macros::monero_definition_link; + //---------------------------------------------------------------------------------------------------- Macros /// This macro (local to this file) defines all the misc types. /// diff --git a/rpc/types/src/misc/mod.rs b/rpc/types/src/misc/mod.rs index e09f847..4976756 100644 --- a/rpc/types/src/misc/mod.rs +++ b/rpc/types/src/misc/mod.rs @@ -17,6 +17,7 @@ mod distribution; mod key_image_spent_status; #[expect(clippy::module_inception)] mod misc; +mod pool_info; mod pool_info_extent; mod status; mod tx_entry; @@ -30,6 +31,7 @@ pub use misc::{ OutputDistributionData, Peer, PoolTxInfo, PublicNode, SetBan, Span, SpentKeyImageInfo, SyncInfoPeer, TxBacklogEntry, TxInfo, TxOutputIndices, TxpoolHisto, TxpoolStats, }; +pub use pool_info::PoolInfo; pub use pool_info_extent::PoolInfoExtent; pub use status::Status; pub use tx_entry::TxEntry; diff --git a/rpc/types/src/misc/pool_info.rs b/rpc/types/src/misc/pool_info.rs new file mode 100644 index 0000000..e9ba875 --- /dev/null +++ b/rpc/types/src/misc/pool_info.rs @@ -0,0 +1,171 @@ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use crate::misc::PoolInfoExtent; +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + epee_object, error, + macros::bytes::{Buf, BufMut}, + read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, +}; + +use cuprate_fixed_bytes::ByteArrayVec; + +use crate::misc::PoolTxInfo; + +//---------------------------------------------------------------------------------------------------- PoolInfo +#[doc = crate::macros::monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 223..=228 +)] +/// Used in [`crate::bin::GetBlocksResponse`]. +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[repr(u8)] +pub enum PoolInfo { + #[default] + None, + Incremental(PoolInfoIncremental), + Full(PoolInfoFull), +} + +//---------------------------------------------------------------------------------------------------- Internal data +/// Data within [`PoolInfo::Incremental`]. +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PoolInfoIncremental { + pub added_pool_txs: Vec, + pub remaining_added_pool_txids: ByteArrayVec<32>, + pub removed_pool_txids: ByteArrayVec<32>, +} + +#[cfg(feature = "epee")] +epee_object! { + PoolInfoIncremental, + added_pool_txs: Vec, + remaining_added_pool_txids: ByteArrayVec<32>, + removed_pool_txids: ByteArrayVec<32>, +} + +/// Data within [`PoolInfo::Full`]. +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PoolInfoFull { + pub added_pool_txs: Vec, + pub remaining_added_pool_txids: ByteArrayVec<32>, +} + +#[cfg(feature = "epee")] +epee_object! { + PoolInfoFull, + added_pool_txs: Vec, + remaining_added_pool_txids: ByteArrayVec<32>, +} + +//---------------------------------------------------------------------------------------------------- PoolInfo epee impl +#[cfg(feature = "epee")] +/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`]. +/// +/// Not for public usage. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct __PoolInfoEpeeBuilder { + /// This is a distinct field in `monerod`, + /// which as represented in this library with [`PoolInfo`]'s `u8` tag. + pub pool_info_extent: Option, + + pub added_pool_txs: Option>, + pub remaining_added_pool_txids: Option>, + pub removed_pool_txids: Option>, +} + +// Custom epee implementation. +// +// HACK/INVARIANT: +// If any data within [`PoolInfo`] changes, the below code should be changed as well. +#[cfg(feature = "epee")] +impl EpeeObjectBuilder for __PoolInfoEpeeBuilder { + fn add_field(&mut self, name: &str, r: &mut B) -> error::Result { + macro_rules! read_epee_field { + ($($field:ident),*) => { + match name { + $( + stringify!($field) => { self.$field = Some(read_epee_value(r)?); }, + )* + _ => return Ok(false), + } + }; + } + + read_epee_field! { + pool_info_extent, + added_pool_txs, + remaining_added_pool_txids, + removed_pool_txids + } + + Ok(true) + } + + fn finish(self) -> error::Result { + // INVARIANT: + // `monerod` omits serializing the field itself when a container is empty, + // `unwrap_or_default()` is used over `error()` in these cases. + // Some of the uses are when values have default fallbacks: `pool_info_extent`. + + let pool_info_extent = self.pool_info_extent.unwrap_or_default(); + let this = match pool_info_extent { + PoolInfoExtent::None => PoolInfo::None, + PoolInfoExtent::Incremental => PoolInfo::Incremental(PoolInfoIncremental { + added_pool_txs: self.added_pool_txs.unwrap_or_default(), + remaining_added_pool_txids: self.remaining_added_pool_txids.unwrap_or_default(), + removed_pool_txids: self.removed_pool_txids.unwrap_or_default(), + }), + PoolInfoExtent::Full => PoolInfo::Full(PoolInfoFull { + added_pool_txs: self.added_pool_txs.unwrap_or_default(), + remaining_added_pool_txids: self.remaining_added_pool_txids.unwrap_or_default(), + }), + }; + + Ok(this) + } +} + +#[cfg(feature = "epee")] +impl EpeeObject for PoolInfo { + type Builder = __PoolInfoEpeeBuilder; + + fn number_of_fields(&self) -> u64 { + // Inner struct fields. + let inner_fields = match self { + Self::None => 0, + Self::Incremental(s) => s.number_of_fields(), + Self::Full(s) => s.number_of_fields(), + }; + + // [`PoolInfoExtent`] + inner struct fields + 1 + inner_fields + } + + fn write_fields(self, w: &mut B) -> error::Result<()> { + const FIELD: &str = "pool_info_extent"; + + match self { + Self::None => { + write_field(PoolInfoExtent::None.to_u8(), FIELD, w)?; + } + Self::Incremental(s) => { + s.write_fields(w)?; + write_field(PoolInfoExtent::Incremental.to_u8(), FIELD, w)?; + } + Self::Full(s) => { + s.write_fields(w)?; + write_field(PoolInfoExtent::Full.to_u8(), FIELD, w)?; + } + } + + Ok(()) + } +} diff --git a/rpc/types/src/misc/tx_entry.rs b/rpc/types/src/misc/tx_entry.rs index 86d0207..59dd460 100644 --- a/rpc/types/src/misc/tx_entry.rs +++ b/rpc/types/src/misc/tx_entry.rs @@ -2,8 +2,6 @@ //---------------------------------------------------------------------------------------------------- Use #[cfg(feature = "serde")] -use crate::serde::{serde_false, serde_true}; -#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] @@ -13,6 +11,9 @@ use cuprate_epee_encoding::{ EpeeObject, EpeeObjectBuilder, }; +#[cfg(feature = "serde")] +use crate::serde::{serde_false, serde_true}; + //---------------------------------------------------------------------------------------------------- TxEntry #[doc = crate::macros::monero_definition_link!( cc73fe71162d564ffda8e549b79a350bca53c454, From 38541dbfda781277d48dd218bb676aede2f8b8a2 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 28 Nov 2024 14:53:59 -0500 Subject: [PATCH 102/104] workspace: add/fix 1.83 lints (#353) * 1.83 `cargo clippy --fix` * fix type complexity, add `DbResult` * clippy fix * redb fix * Update consensus/context/src/difficulty.rs Co-authored-by: hinto-janai --------- Co-authored-by: Boog900 --- Cargo.toml | 4 ++ .../cuprate-json-rpc/benches/response.rs | 2 +- binaries/cuprated/src/statics.rs | 2 +- consensus/context/src/difficulty.rs | 4 +- consensus/fast-sync/src/create.rs | 4 +- net/epee-encoding/src/macros.rs | 6 +- p2p/async-buffer/src/lib.rs | 4 +- p2p/dandelion-tower/src/tests/mod.rs | 1 + p2p/p2p-core/src/lib.rs | 1 - .../src/block_downloader/download_batch.rs | 4 +- p2p/p2p/src/broadcast.rs | 1 + pruning/src/lib.rs | 2 +- rpc/interface/src/route/bin.rs | 2 +- rpc/interface/src/route/other.rs | 2 +- rpc/types/src/json.rs | 2 +- rpc/types/src/macros.rs | 10 +-- rpc/types/src/misc/misc.rs | 2 +- rpc/types/src/other.rs | 2 +- storage/blockchain/src/ops/alt_block/block.rs | 17 +++-- storage/blockchain/src/ops/alt_block/chain.rs | 6 +- storage/blockchain/src/ops/alt_block/tx.rs | 6 +- storage/blockchain/src/ops/block.rs | 23 +++---- storage/blockchain/src/ops/blockchain.rs | 10 ++- storage/blockchain/src/ops/key_image.rs | 8 +-- storage/blockchain/src/ops/macros.rs | 2 +- storage/blockchain/src/ops/output.rs | 29 ++++----- storage/blockchain/src/ops/property.rs | 7 +-- storage/blockchain/src/ops/tx.rs | 22 +++---- storage/blockchain/src/service/read.rs | 14 ++--- storage/blockchain/src/service/types.rs | 4 +- storage/blockchain/src/service/write.rs | 4 +- storage/database/src/backend/heed/database.rs | 63 ++++++++----------- storage/database/src/backend/heed/env.rs | 19 +++--- .../database/src/backend/heed/transaction.rs | 10 +-- storage/database/src/backend/redb/database.rs | 57 ++++++++--------- storage/database/src/backend/redb/env.rs | 19 +++--- storage/database/src/backend/redb/storable.rs | 10 ++- .../database/src/backend/redb/transaction.rs | 8 +-- storage/database/src/config/sync_mode.rs | 1 - storage/database/src/database.rs | 44 +++++++------ storage/database/src/env.rs | 39 ++++++------ storage/database/src/error.rs | 3 + storage/database/src/lib.rs | 2 +- storage/database/src/table.rs | 1 - storage/database/src/tables.rs | 16 ++--- storage/database/src/transaction.rs | 10 +-- storage/service/src/service/read.rs | 10 +-- storage/service/src/service/write.rs | 15 +++-- storage/txpool/src/ops/key_images.rs | 4 +- storage/txpool/src/ops/tx_read.rs | 6 +- storage/txpool/src/ops/tx_write.rs | 7 +-- storage/txpool/src/service/read.rs | 4 +- storage/txpool/src/service/types.rs | 4 +- storage/txpool/src/service/write.rs | 20 +++--- test-utils/src/data/constants.rs | 2 +- test-utils/src/rpc/data/macros.rs | 4 +- 56 files changed, 269 insertions(+), 316 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a507631..3cc3ab1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -279,6 +279,9 @@ rest_pat_in_fully_bound_structs = "deny" redundant_type_annotations = "deny" infinite_loop = "deny" zero_repeat_side_effects = "deny" +non_zero_suggestions = "deny" +manual_is_power_of_two = "deny" +used_underscore_items = "deny" # Warm cast_possible_truncation = "deny" @@ -371,6 +374,7 @@ unused_lifetimes = "deny" unused_macro_rules = "deny" ambiguous_glob_imports = "deny" unused_unsafe = "deny" +rust_2024_compatibility = "deny" # Warm let_underscore = { level = "deny", priority = -1 } diff --git a/benches/criterion/cuprate-json-rpc/benches/response.rs b/benches/criterion/cuprate-json-rpc/benches/response.rs index 908a9f4..890958e 100644 --- a/benches/criterion/cuprate-json-rpc/benches/response.rs +++ b/benches/criterion/cuprate-json-rpc/benches/response.rs @@ -82,7 +82,7 @@ impl_from_str_benchmark! { macro_rules! impl_to_string_pretty_benchmark { ( $( - $fn_name:ident => $request_constructor:expr, + $fn_name:ident => $request_constructor:expr_2021, )* ) => { $( diff --git a/binaries/cuprated/src/statics.rs b/binaries/cuprated/src/statics.rs index 9839608..2d7338d 100644 --- a/binaries/cuprated/src/statics.rs +++ b/binaries/cuprated/src/statics.rs @@ -13,7 +13,7 @@ use std::{ macro_rules! define_init_lazylock_statics { ($( $( #[$attr:meta] )* - $name:ident: $t:ty = $init_fn:expr; + $name:ident: $t:ty = $init_fn:expr_2021; )*) => { /// Initialize global static `LazyLock` data. pub fn init_lazylock_statics() { diff --git a/consensus/context/src/difficulty.rs b/consensus/context/src/difficulty.rs index 1b61eb9..3bbcb05 100644 --- a/consensus/context/src/difficulty.rs +++ b/consensus/context/src/difficulty.rs @@ -328,8 +328,8 @@ fn next_difficulty( time_span = 1; } - // TODO: do checked operations here and unwrap so we don't silently overflow? - (windowed_work * u128::from(hf.block_time().as_secs()) + time_span - 1) / time_span + // TODO: do `checked_mul` here and unwrap so we don't silently overflow? + (windowed_work * u128::from(hf.block_time().as_secs())).div_ceil(time_span) } /// Get the start and end of the window to calculate difficulty. diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index 8c47b8e..9410f60 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -9,7 +9,7 @@ use clap::Parser; use tower::{Service, ServiceExt}; use cuprate_blockchain::{ - config::ConfigBuilder, cuprate_database::RuntimeError, service::BlockchainReadHandle, + config::ConfigBuilder, cuprate_database::DbResult, service::BlockchainReadHandle, }; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, @@ -23,7 +23,7 @@ const BATCH_SIZE: usize = 512; async fn read_batch( handle: &mut BlockchainReadHandle, height_from: usize, -) -> Result, RuntimeError> { +) -> DbResult> { let mut block_ids = Vec::::with_capacity(BATCH_SIZE); for height in height_from..(height_from + BATCH_SIZE) { diff --git a/net/epee-encoding/src/macros.rs b/net/epee-encoding/src/macros.rs index 38dcc45..bb1afef 100644 --- a/net/epee-encoding/src/macros.rs +++ b/net/epee-encoding/src/macros.rs @@ -76,14 +76,14 @@ macro_rules! epee_object { // All this does is return the second (right) arg if present otherwise the left is returned. ( @internal_try_right_then_left - $a:expr, $b:expr + $a:expr_2021, $b:expr_2021 ) => { $b }; ( @internal_try_right_then_left - $a:expr, + $a:expr_2021, ) => { $a }; @@ -122,7 +122,7 @@ macro_rules! epee_object { // ------------------------------------------------------------------------ Entry Point ( $obj:ident, - $($field: ident $(($alt_name: literal))?: $ty:ty $(as $ty_as:ty )? $(= $default:expr)? $(=> $read_fn:expr, $write_fn:expr, $should_write_fn:expr)?, )* + $($field: ident $(($alt_name: literal))?: $ty:ty $(as $ty_as:ty )? $(= $default:expr_2021)? $(=> $read_fn:expr_2021, $write_fn:expr_2021, $should_write_fn:expr_2021)?, )* $(!flatten: $flat_field: ident: $flat_ty:ty ,)* ) => { diff --git a/p2p/async-buffer/src/lib.rs b/p2p/async-buffer/src/lib.rs index 0e2ced2..8174481 100644 --- a/p2p/async-buffer/src/lib.rs +++ b/p2p/async-buffer/src/lib.rs @@ -157,7 +157,7 @@ pub struct BufferSinkSend<'a, T> { item: Option, } -impl<'a, T> Future for BufferSinkSend<'a, T> { +impl Future for BufferSinkSend<'_, T> { type Output = Result<(), BufferError>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -183,7 +183,7 @@ pub struct BufferSinkReady<'a, T> { size_needed: usize, } -impl<'a, T> Future for BufferSinkReady<'a, T> { +impl Future for BufferSinkReady<'_, T> { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/p2p/dandelion-tower/src/tests/mod.rs b/p2p/dandelion-tower/src/tests/mod.rs index 601ee25..ae9bee1 100644 --- a/p2p/dandelion-tower/src/tests/mod.rs +++ b/p2p/dandelion-tower/src/tests/mod.rs @@ -12,6 +12,7 @@ use crate::{ OutboundPeer, State, }; +#[expect(clippy::type_complexity)] pub(crate) fn mock_discover_svc() -> ( impl Stream< Item = Result< diff --git a/p2p/p2p-core/src/lib.rs b/p2p/p2p-core/src/lib.rs index 26e1068..e574693 100644 --- a/p2p/p2p-core/src/lib.rs +++ b/p2p/p2p-core/src/lib.rs @@ -121,7 +121,6 @@ pub trait NetZoneAddress: /// /// - TODO: IP zone banning? /// - TODO: rename this to Host. - type BanID: Debug + Hash + Eq + Clone + Copy + Send + 'static; /// Changes the port of this address to `port`. diff --git a/p2p/p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs index ef621ce..7b6e4c9 100644 --- a/p2p/p2p/src/block_downloader/download_batch.rs +++ b/p2p/p2p/src/block_downloader/download_batch.rs @@ -146,9 +146,9 @@ fn deserialize_batch( // Check the height lines up as expected. // This must happen after the hash check. - if !block + if block .number() - .is_some_and(|height| height == expected_height) + .is_none_or(|height| height != expected_height) { tracing::warn!( "Invalid chain, expected height: {expected_height}, got height: {:?}", diff --git a/p2p/p2p/src/broadcast.rs b/p2p/p2p/src/broadcast.rs index fc73efb..38aba32 100644 --- a/p2p/p2p/src/broadcast.rs +++ b/p2p/p2p/src/broadcast.rs @@ -57,6 +57,7 @@ impl Default for BroadcastConfig { /// - The [`BroadcastSvc`] /// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **outbound** peers. /// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **inbound** peers. +#[expect(clippy::type_complexity)] pub(crate) fn init_broadcast_channels( config: BroadcastConfig, ) -> ( diff --git a/pruning/src/lib.rs b/pruning/src/lib.rs index cd31598..e49aedb 100644 --- a/pruning/src/lib.rs +++ b/pruning/src/lib.rs @@ -327,7 +327,7 @@ impl DecompressedPruningSeed { /// /// This function will also error if `block_height` > `blockchain_height` /// - pub fn get_next_unpruned_block( + pub const fn get_next_unpruned_block( &self, block_height: usize, blockchain_height: usize, diff --git a/rpc/interface/src/route/bin.rs b/rpc/interface/src/route/bin.rs index f7e3a01..2fd9963 100644 --- a/rpc/interface/src/route/bin.rs +++ b/rpc/interface/src/route/bin.rs @@ -68,7 +68,7 @@ macro_rules! generate_endpoints_with_no_input { /// - [`generate_endpoints_with_input`] /// - [`generate_endpoints_with_no_input`] macro_rules! generate_endpoints_inner { - ($variant:ident, $handler:ident, $request:expr) => { + ($variant:ident, $handler:ident, $request:expr_2021) => { paste::paste! { { // Check if restricted. diff --git a/rpc/interface/src/route/other.rs b/rpc/interface/src/route/other.rs index 3ff8448..19a58d9 100644 --- a/rpc/interface/src/route/other.rs +++ b/rpc/interface/src/route/other.rs @@ -71,7 +71,7 @@ macro_rules! generate_endpoints_with_no_input { /// - [`generate_endpoints_with_input`] /// - [`generate_endpoints_with_no_input`] macro_rules! generate_endpoints_inner { - ($variant:ident, $handler:ident, $request:expr) => { + ($variant:ident, $handler:ident, $request:expr_2021) => { paste::paste! { { // Check if restricted. diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index cb55e64..a454cab 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -37,7 +37,7 @@ macro_rules! serde_doc_test { ( // `const` string from `cuprate_test_utils::rpc::data` // v - $cuprate_test_utils_rpc_const:ident => $expected:expr + $cuprate_test_utils_rpc_const:ident => $expected:expr_2021 // ^ // Expected value as an expression ) => { diff --git a/rpc/types/src/macros.rs b/rpc/types/src/macros.rs index 85f4272..db1b5d8 100644 --- a/rpc/types/src/macros.rs +++ b/rpc/types/src/macros.rs @@ -77,7 +77,7 @@ macro_rules! define_request_and_response { $( #[$request_field_attr:meta] )* // Field attribute. $request_field:ident: $request_field_type:ty // field_name: field type $(as $request_field_type_as:ty)? // (optional) alternative type (de)serialization - $(= $request_field_type_default:expr, $request_field_type_default_string:literal)?, // (optional) default value + $(= $request_field_type_default:expr_2021, $request_field_type_default_string:literal)?, // (optional) default value )* }, @@ -89,7 +89,7 @@ macro_rules! define_request_and_response { $( #[$response_field_attr:meta] )* $response_field:ident: $response_field_type:ty $(as $response_field_type_as:ty)? - $(= $response_field_type_default:expr, $response_field_type_default_string:literal)?, + $(= $response_field_type_default:expr_2021, $response_field_type_default_string:literal)?, )* } ) => { paste::paste! { @@ -229,7 +229,7 @@ macro_rules! define_request { // field_name: FieldType $field:ident: $field_type:ty $(as $field_as:ty)? - $(= $field_default:expr, $field_default_string:literal)?, + $(= $field_default:expr_2021, $field_default_string:literal)?, // The $field_default is an optional extra token that represents // a default value to pass to [`cuprate_epee_encoding::epee_object`], // see it for usage. @@ -286,7 +286,7 @@ macro_rules! define_response { $( #[$field_attr:meta] )* $field:ident: $field_type:ty $(as $field_as:ty)? - $(= $field_default:expr, $field_default_string:literal)?, + $(= $field_default:expr_2021, $field_default_string:literal)?, )* } ) => { @@ -323,7 +323,7 @@ macro_rules! define_response { $( #[$field_attr:meta] )* $field:ident: $field_type:ty $(as $field_as:ty)? - $(= $field_default:expr, $field_default_string:literal)?, + $(= $field_default:expr_2021, $field_default_string:literal)?, )* } ) => { diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 67ec756..2d88f2a 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -37,7 +37,7 @@ macro_rules! define_struct_and_impl_epee { $( $( #[$field_attr:meta] )* // Field attributes // Field name => the type => optional `epee_object` default value. - $field_name:ident: $field_type:ty $(= $field_default:expr)?, + $field_name:ident: $field_type:ty $(= $field_default:expr_2021)?, )* } ) => { diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 3694041..d5cbe82 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -65,7 +65,7 @@ macro_rules! serde_doc_test { ( // `const` string from `cuprate_test_utils::rpc::data` // v - $cuprate_test_utils_rpc_const:ident => $expected:expr + $cuprate_test_utils_rpc_const:ident => $expected:expr_2021 // ^ // Expected value as an expression ) => { diff --git a/storage/blockchain/src/ops/alt_block/block.rs b/storage/blockchain/src/ops/alt_block/block.rs index 6bd01cb..480bd7d 100644 --- a/storage/blockchain/src/ops/alt_block/block.rs +++ b/storage/blockchain/src/ops/alt_block/block.rs @@ -1,7 +1,7 @@ use bytemuck::TransparentWrapper; use monero_serai::block::{Block, BlockHeader}; -use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, StorableVec}; use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}; use cuprate_types::{AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, HardFork}; @@ -21,7 +21,7 @@ use crate::{ pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>( env_inner: &E, tx_rw: &mut E::Rw<'_>, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { use crate::tables::{ AltBlockBlobs, AltBlockHeights, AltBlocksInfo, AltChainInfos, AltTransactionBlobs, AltTransactionInfos, @@ -47,10 +47,7 @@ pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>( /// - `alt_block.height` is == `0` /// - `alt_block.txs.len()` != `alt_block.block.transactions.len()` /// -pub fn add_alt_block( - alt_block: &AltBlockInformation, - tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +pub fn add_alt_block(alt_block: &AltBlockInformation, tables: &mut impl TablesMut) -> DbResult<()> { let alt_block_height = AltBlockHeight { chain_id: alt_block.chain_id.into(), height: alt_block.height, @@ -100,7 +97,7 @@ pub fn add_alt_block( pub fn get_alt_block( alt_block_height: &AltBlockHeight, tables: &impl Tables, -) -> Result { +) -> DbResult { let block_info = tables.alt_blocks_info().get(alt_block_height)?; let block_blob = tables.alt_block_blobs().get(alt_block_height)?.0; @@ -111,7 +108,7 @@ pub fn get_alt_block( .transactions .iter() .map(|tx_hash| get_alt_transaction(tx_hash, tables)) - .collect::>()?; + .collect::>()?; Ok(AltBlockInformation { block, @@ -141,7 +138,7 @@ pub fn get_alt_block_hash( block_height: &BlockHeight, alt_chain: ChainId, tables: &impl Tables, -) -> Result { +) -> DbResult { let alt_chains = tables.alt_chain_infos(); // First find what [`ChainId`] this block would be stored under. @@ -188,7 +185,7 @@ pub fn get_alt_block_hash( pub fn get_alt_block_extended_header_from_height( height: &AltBlockHeight, table: &impl Tables, -) -> Result { +) -> DbResult { let block_info = table.alt_blocks_info().get(height)?; let block_blob = table.alt_block_blobs().get(height)?.0; diff --git a/storage/blockchain/src/ops/alt_block/chain.rs b/storage/blockchain/src/ops/alt_block/chain.rs index 5b5f3cb..676fd7f 100644 --- a/storage/blockchain/src/ops/alt_block/chain.rs +++ b/storage/blockchain/src/ops/alt_block/chain.rs @@ -1,6 +1,6 @@ use std::cmp::{max, min}; -use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError}; +use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError}; use cuprate_types::{Chain, ChainId}; use crate::{ @@ -21,7 +21,7 @@ pub fn update_alt_chain_info( alt_block_height: &AltBlockHeight, prev_hash: &BlockHash, tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { let parent_chain = match tables.alt_block_heights().get(prev_hash) { Ok(alt_parent_height) => Chain::Alt(alt_parent_height.chain_id.into()), Err(RuntimeError::KeyNotFound) => Chain::Main, @@ -74,7 +74,7 @@ pub fn get_alt_chain_history_ranges( range: std::ops::Range, alt_chain: ChainId, alt_chain_infos: &impl DatabaseRo, -) -> Result)>, RuntimeError> { +) -> DbResult)>> { let mut ranges = Vec::with_capacity(5); let mut i = range.end; diff --git a/storage/blockchain/src/ops/alt_block/tx.rs b/storage/blockchain/src/ops/alt_block/tx.rs index 4185c6c..b410fed 100644 --- a/storage/blockchain/src/ops/alt_block/tx.rs +++ b/storage/blockchain/src/ops/alt_block/tx.rs @@ -1,7 +1,7 @@ use bytemuck::TransparentWrapper; use monero_serai::transaction::Transaction; -use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError, StorableVec}; use cuprate_types::VerifiedTransactionInformation; use crate::{ @@ -22,7 +22,7 @@ use crate::{ pub fn add_alt_transaction_blob( tx: &VerifiedTransactionInformation, tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { tables.alt_transaction_infos_mut().put( &tx.tx_hash, &AltTransactionInfo { @@ -51,7 +51,7 @@ pub fn add_alt_transaction_blob( pub fn get_alt_transaction( tx_hash: &TxHash, tables: &impl Tables, -) -> Result { +) -> DbResult { let tx_info = tables.alt_transaction_infos().get(tx_hash)?; let tx_blob = match tables.alt_transaction_blobs().get(tx_hash) { diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index cc5cb80..5e54187 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -8,7 +8,7 @@ use monero_serai::{ }; use cuprate_database::{ - RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, + DbResult, RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, }; use cuprate_helper::{ map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}, @@ -44,10 +44,7 @@ use crate::{ /// - `block.height > u32::MAX` (not normally possible) /// - `block.height` is != [`chain_height`] // no inline, too big. -pub fn add_block( - block: &VerifiedBlockInformation, - tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +pub fn add_block(block: &VerifiedBlockInformation, tables: &mut impl TablesMut) -> DbResult<()> { //------------------------------------------------------ Check preconditions first // Cast height to `u32` for storage (handled at top of function). @@ -153,7 +150,7 @@ pub fn add_block( pub fn pop_block( move_to_alt_chain: Option, tables: &mut impl TablesMut, -) -> Result<(BlockHeight, BlockHash, Block), RuntimeError> { +) -> DbResult<(BlockHeight, BlockHash, Block)> { //------------------------------------------------------ Block Info // Remove block data from tables. let (block_height, block_info) = tables.block_infos_mut().pop_last()?; @@ -195,7 +192,7 @@ pub fn pop_block( tx, }) }) - .collect::, RuntimeError>>()?; + .collect::>>()?; alt_block::add_alt_block( &AltBlockInformation { @@ -239,7 +236,7 @@ pub fn pop_block( pub fn get_block_extended_header( block_hash: &BlockHash, tables: &impl Tables, -) -> Result { +) -> DbResult { get_block_extended_header_from_height(&tables.block_heights().get(block_hash)?, tables) } @@ -253,7 +250,7 @@ pub fn get_block_extended_header( pub fn get_block_extended_header_from_height( block_height: &BlockHeight, tables: &impl Tables, -) -> Result { +) -> DbResult { let block_info = tables.block_infos().get(block_height)?; let block_header_blob = tables.block_header_blobs().get(block_height)?.0; let block_header = BlockHeader::read(&mut block_header_blob.as_slice())?; @@ -279,7 +276,7 @@ pub fn get_block_extended_header_from_height( #[inline] pub fn get_block_extended_header_top( tables: &impl Tables, -) -> Result<(ExtendedBlockHeader, BlockHeight), RuntimeError> { +) -> DbResult<(ExtendedBlockHeader, BlockHeight)> { let height = chain_height(tables.block_heights())?.saturating_sub(1); let header = get_block_extended_header_from_height(&height, tables)?; Ok((header, height)) @@ -292,7 +289,7 @@ pub fn get_block_extended_header_top( pub fn get_block_info( block_height: &BlockHeight, table_block_infos: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_block_infos.get(block_height) } @@ -302,7 +299,7 @@ pub fn get_block_info( pub fn get_block_height( block_hash: &BlockHash, table_block_heights: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_block_heights.get(block_hash) } @@ -317,7 +314,7 @@ pub fn get_block_height( pub fn block_exists( block_hash: &BlockHash, table_block_heights: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_block_heights.contains(block_hash) } diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index 04f8b26..7163363 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -1,7 +1,7 @@ //! Blockchain functions - chain height, generated coins, etc. //---------------------------------------------------------------------------------------------------- Import -use cuprate_database::{DatabaseRo, RuntimeError}; +use cuprate_database::{DatabaseRo, DbResult, RuntimeError}; use crate::{ ops::macros::doc_error, @@ -22,9 +22,7 @@ use crate::{ /// So the height of a new block would be `chain_height()`. #[doc = doc_error!()] #[inline] -pub fn chain_height( - table_block_heights: &impl DatabaseRo, -) -> Result { +pub fn chain_height(table_block_heights: &impl DatabaseRo) -> DbResult { #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")] table_block_heights.len().map(|height| height as usize) } @@ -45,7 +43,7 @@ pub fn chain_height( #[inline] pub fn top_block_height( table_block_heights: &impl DatabaseRo, -) -> Result { +) -> DbResult { match table_block_heights.len()? { 0 => Err(RuntimeError::KeyNotFound), #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")] @@ -70,7 +68,7 @@ pub fn top_block_height( pub fn cumulative_generated_coins( block_height: &BlockHeight, table_block_infos: &impl DatabaseRo, -) -> Result { +) -> DbResult { match table_block_infos.get(block_height) { Ok(block_info) => Ok(block_info.cumulative_generated_coins), Err(RuntimeError::KeyNotFound) if block_height == &0 => Ok(0), diff --git a/storage/blockchain/src/ops/key_image.rs b/storage/blockchain/src/ops/key_image.rs index 19444d6..5f17912 100644 --- a/storage/blockchain/src/ops/key_image.rs +++ b/storage/blockchain/src/ops/key_image.rs @@ -1,7 +1,7 @@ //! Key image functions. //---------------------------------------------------------------------------------------------------- Import -use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError}; +use cuprate_database::{DatabaseRo, DatabaseRw, DbResult}; use crate::{ ops::macros::{doc_add_block_inner_invariant, doc_error}, @@ -17,7 +17,7 @@ use crate::{ pub fn add_key_image( key_image: &KeyImage, table_key_images: &mut impl DatabaseRw, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { table_key_images.put(key_image, &()) } @@ -28,7 +28,7 @@ pub fn add_key_image( pub fn remove_key_image( key_image: &KeyImage, table_key_images: &mut impl DatabaseRw, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { table_key_images.delete(key_image) } @@ -38,7 +38,7 @@ pub fn remove_key_image( pub fn key_image_exists( key_image: &KeyImage, table_key_images: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_key_images.contains(key_image) } diff --git a/storage/blockchain/src/ops/macros.rs b/storage/blockchain/src/ops/macros.rs index 18ec506..9c6ef7d 100644 --- a/storage/blockchain/src/ops/macros.rs +++ b/storage/blockchain/src/ops/macros.rs @@ -8,7 +8,7 @@ macro_rules! doc_error { () => { r#"# Errors -This function returns [`RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."# +This function returns [`cuprate_database::RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."# }; } pub(super) use doc_error; diff --git a/storage/blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs index 14c209a..96d94bb 100644 --- a/storage/blockchain/src/ops/output.rs +++ b/storage/blockchain/src/ops/output.rs @@ -5,7 +5,7 @@ use curve25519_dalek::edwards::CompressedEdwardsY; use monero_serai::transaction::Timelock; use cuprate_database::{ - RuntimeError, {DatabaseRo, DatabaseRw}, + DbResult, RuntimeError, {DatabaseRo, DatabaseRw}, }; use cuprate_helper::crypto::compute_zero_commitment; use cuprate_helper::map::u64_to_timelock; @@ -30,7 +30,7 @@ pub fn add_output( amount: Amount, output: &Output, tables: &mut impl TablesMut, -) -> Result { +) -> DbResult { // FIXME: this would be much better expressed with a // `btree_map::Entry`-like API, fix `trait DatabaseRw`. let num_outputs = match tables.num_outputs().get(&amount) { @@ -61,7 +61,7 @@ pub fn add_output( pub fn remove_output( pre_rct_output_id: &PreRctOutputId, tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { // Decrement the amount index by 1, or delete the entry out-right. // FIXME: this would be much better expressed with a // `btree_map::Entry`-like API, fix `trait DatabaseRw`. @@ -86,7 +86,7 @@ pub fn remove_output( pub fn get_output( pre_rct_output_id: &PreRctOutputId, table_outputs: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_outputs.get(pre_rct_output_id) } @@ -95,7 +95,7 @@ pub fn get_output( /// This returns the amount of pre-RCT outputs currently stored. #[doc = doc_error!()] #[inline] -pub fn get_num_outputs(table_outputs: &impl DatabaseRo) -> Result { +pub fn get_num_outputs(table_outputs: &impl DatabaseRo) -> DbResult { table_outputs.len() } @@ -110,7 +110,7 @@ pub fn get_num_outputs(table_outputs: &impl DatabaseRo) -> Result, -) -> Result { +) -> DbResult { let amount_index = get_rct_num_outputs(table_rct_outputs)?; table_rct_outputs.put(&amount_index, rct_output)?; Ok(amount_index) @@ -123,7 +123,7 @@ pub fn add_rct_output( pub fn remove_rct_output( amount_index: &AmountIndex, table_rct_outputs: &mut impl DatabaseRw, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { table_rct_outputs.delete(amount_index) } @@ -133,7 +133,7 @@ pub fn remove_rct_output( pub fn get_rct_output( amount_index: &AmountIndex, table_rct_outputs: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_rct_outputs.get(amount_index) } @@ -142,9 +142,7 @@ pub fn get_rct_output( /// This returns the amount of RCT outputs currently stored. #[doc = doc_error!()] #[inline] -pub fn get_rct_num_outputs( - table_rct_outputs: &impl DatabaseRo, -) -> Result { +pub fn get_rct_num_outputs(table_rct_outputs: &impl DatabaseRo) -> DbResult { table_rct_outputs.len() } @@ -155,7 +153,7 @@ pub fn output_to_output_on_chain( output: &Output, amount: Amount, table_tx_unlock_time: &impl DatabaseRo, -) -> Result { +) -> DbResult { let commitment = compute_zero_commitment(amount); let time_lock = if output @@ -191,7 +189,7 @@ pub fn output_to_output_on_chain( pub fn rct_output_to_output_on_chain( rct_output: &RctOutput, table_tx_unlock_time: &impl DatabaseRo, -) -> Result { +) -> DbResult { // INVARIANT: Commitments stored are valid when stored by the database. let commitment = CompressedEdwardsY::from_slice(&rct_output.commitment) .unwrap() @@ -223,10 +221,7 @@ pub fn rct_output_to_output_on_chain( /// /// Note that this still support RCT outputs, in that case, [`PreRctOutputId::amount`] should be `0`. #[doc = doc_error!()] -pub fn id_to_output_on_chain( - id: &PreRctOutputId, - tables: &impl Tables, -) -> Result { +pub fn id_to_output_on_chain(id: &PreRctOutputId, tables: &impl Tables) -> DbResult { // v2 transactions. if id.amount == 0 { let rct_output = get_rct_output(&id.amount_index, tables.rct_outputs())?; diff --git a/storage/blockchain/src/ops/property.rs b/storage/blockchain/src/ops/property.rs index 7810000..3dbb950 100644 --- a/storage/blockchain/src/ops/property.rs +++ b/storage/blockchain/src/ops/property.rs @@ -3,10 +3,9 @@ //! SOMEDAY: the database `properties` table is not yet implemented. //---------------------------------------------------------------------------------------------------- Import +use cuprate_database::DbResult; use cuprate_pruning::PruningSeed; -use cuprate_database::RuntimeError; - use crate::ops::macros::doc_error; //---------------------------------------------------------------------------------------------------- Free Functions @@ -20,7 +19,7 @@ use crate::ops::macros::doc_error; /// // SOMEDAY /// ``` #[inline] -pub const fn get_blockchain_pruning_seed() -> Result { +pub const fn get_blockchain_pruning_seed() -> DbResult { // SOMEDAY: impl pruning. // We need a DB properties table. Ok(PruningSeed::NotPruned) @@ -36,7 +35,7 @@ pub const fn get_blockchain_pruning_seed() -> Result /// // SOMEDAY /// ``` #[inline] -pub const fn db_version() -> Result { +pub const fn db_version() -> DbResult { // SOMEDAY: We need a DB properties table. Ok(crate::constants::DATABASE_VERSION) } diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index 5a60ad5..0312f21 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -4,7 +4,7 @@ use bytemuck::TransparentWrapper; use monero_serai::transaction::{Input, Timelock, Transaction}; -use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError, StorableVec}; use cuprate_helper::crypto::compute_zero_commitment; use crate::{ @@ -52,7 +52,7 @@ pub fn add_tx( tx_hash: &TxHash, block_height: &BlockHeight, tables: &mut impl TablesMut, -) -> Result { +) -> DbResult { let tx_id = get_num_tx(tables.tx_ids_mut())?; //------------------------------------------------------ Transaction data @@ -129,7 +129,7 @@ pub fn add_tx( )? .amount_index) }) - .collect::, RuntimeError>>()?, + .collect::>>()?, Transaction::V2 { prefix, proofs } => prefix .outputs .iter() @@ -186,10 +186,7 @@ pub fn add_tx( /// #[doc = doc_error!()] #[inline] -pub fn remove_tx( - tx_hash: &TxHash, - tables: &mut impl TablesMut, -) -> Result<(TxId, Transaction), RuntimeError> { +pub fn remove_tx(tx_hash: &TxHash, tables: &mut impl TablesMut) -> DbResult<(TxId, Transaction)> { //------------------------------------------------------ Transaction data let tx_id = tables.tx_ids_mut().take(tx_hash)?; let tx_blob = tables.tx_blobs_mut().take(&tx_id)?; @@ -267,7 +264,7 @@ pub fn get_tx( tx_hash: &TxHash, table_tx_ids: &impl DatabaseRo, table_tx_blobs: &impl DatabaseRo, -) -> Result { +) -> DbResult { get_tx_from_id(&table_tx_ids.get(tx_hash)?, table_tx_blobs) } @@ -277,7 +274,7 @@ pub fn get_tx( pub fn get_tx_from_id( tx_id: &TxId, table_tx_blobs: &impl DatabaseRo, -) -> Result { +) -> DbResult { let tx_blob = table_tx_blobs.get(tx_id)?.0; Ok(Transaction::read(&mut tx_blob.as_slice())?) } @@ -294,7 +291,7 @@ pub fn get_tx_from_id( /// - etc #[doc = doc_error!()] #[inline] -pub fn get_num_tx(table_tx_ids: &impl DatabaseRo) -> Result { +pub fn get_num_tx(table_tx_ids: &impl DatabaseRo) -> DbResult { table_tx_ids.len() } @@ -304,10 +301,7 @@ pub fn get_num_tx(table_tx_ids: &impl DatabaseRo) -> Result, -) -> Result { +pub fn tx_exists(tx_hash: &TxHash, table_tx_ids: &impl DatabaseRo) -> DbResult { table_tx_ids.contains(tx_hash) } diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index e3c0180..7657759 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -21,7 +21,7 @@ use rayon::{ }; use thread_local::ThreadLocal; -use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; +use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, RuntimeError}; use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{ @@ -305,7 +305,7 @@ fn block_extended_header_in_range( let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); get_block_extended_header_from_height(&block_height, tables) }) - .collect::, RuntimeError>>()?, + .collect::>>()?, Chain::Alt(chain_id) => { let ranges = { let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; @@ -381,7 +381,7 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap>) -> // The 2nd mapping function. // This is pulled out from the below `map()` for readability. - let inner_map = |amount, amount_index| -> Result<(AmountIndex, OutputOnChain), RuntimeError> { + let inner_map = |amount, amount_index| -> DbResult<(AmountIndex, OutputOnChain)> { let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); @@ -404,10 +404,10 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap>) -> amount_index_set .into_par_iter() .map(|amount_index| inner_map(amount, amount_index)) - .collect::, RuntimeError>>()?, + .collect::>>()?, )) }) - .collect::>, RuntimeError>>()?; + .collect::>>>()?; Ok(BlockchainResponse::Outputs(map)) } @@ -456,7 +456,7 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec) -> Respon } } }) - .collect::, RuntimeError>>()?; + .collect::>>()?; Ok(BlockchainResponse::NumberOutputsWithAmount(map)) } @@ -522,7 +522,7 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult { .map(compact_history_index_to_height_offset::) .map_while(|i| top_block_height.checked_sub(i)) .map(|height| Ok(get_block_info(&height, &table_block_infos)?.block_hash)) - .collect::, RuntimeError>>()?; + .collect::>>()?; if compact_history_genesis_not_included::(top_block_height) { block_ids.push(get_block_info(&0, &table_block_infos)?.block_hash); diff --git a/storage/blockchain/src/service/types.rs b/storage/blockchain/src/service/types.rs index 9cd86e9..190e9f6 100644 --- a/storage/blockchain/src/service/types.rs +++ b/storage/blockchain/src/service/types.rs @@ -1,7 +1,7 @@ //! Database service type aliases. //---------------------------------------------------------------------------------------------------- Use -use cuprate_database::RuntimeError; +use cuprate_database::DbResult; use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle}; use cuprate_types::blockchain::{ BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest, @@ -11,7 +11,7 @@ use cuprate_types::blockchain::{ /// The actual type of the response. /// /// Either our [`BlockchainResponse`], or a database error occurred. -pub(super) type ResponseResult = Result; +pub(super) type ResponseResult = DbResult; /// The blockchain database write service. pub type BlockchainWriteHandle = DatabaseWriteHandle; diff --git a/storage/blockchain/src/service/write.rs b/storage/blockchain/src/service/write.rs index 07162d2..84c2538 100644 --- a/storage/blockchain/src/service/write.rs +++ b/storage/blockchain/src/service/write.rs @@ -2,7 +2,7 @@ //---------------------------------------------------------------------------------------------------- Import use std::sync::Arc; -use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError, TxRw}; +use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, TxRw}; use cuprate_database_service::DatabaseWriteHandle; use cuprate_types::{ blockchain::{BlockchainResponse, BlockchainWriteRequest}, @@ -36,7 +36,7 @@ pub fn init_write_service(env: Arc) -> BlockchainWriteHandle { fn handle_blockchain_request( env: &ConcreteEnv, req: &BlockchainWriteRequest, -) -> Result { +) -> DbResult { match req { BlockchainWriteRequest::WriteBlock(block) => write_block(env, block), BlockchainWriteRequest::WriteAltBlock(alt_block) => write_alt_block(env, alt_block), diff --git a/storage/database/src/backend/heed/database.rs b/storage/database/src/backend/heed/database.rs index c985d0d..15f16b4 100644 --- a/storage/database/src/backend/heed/database.rs +++ b/storage/database/src/backend/heed/database.rs @@ -6,7 +6,7 @@ use std::{cell::RefCell, ops::RangeBounds}; use crate::{ backend::heed::types::HeedDb, database::{DatabaseIter, DatabaseRo, DatabaseRw}, - error::RuntimeError, + error::{DbResult, RuntimeError}, table::Table, }; @@ -54,16 +54,13 @@ fn get( db: &HeedDb, tx_ro: &heed::RoTxn<'_>, key: &T::Key, -) -> Result { +) -> DbResult { db.get(tx_ro, key)?.ok_or(RuntimeError::KeyNotFound) } /// Shared [`DatabaseRo::len()`]. #[inline] -fn len( - db: &HeedDb, - tx_ro: &heed::RoTxn<'_>, -) -> Result { +fn len(db: &HeedDb, tx_ro: &heed::RoTxn<'_>) -> DbResult { Ok(db.len(tx_ro)?) } @@ -72,7 +69,7 @@ fn len( fn first( db: &HeedDb, tx_ro: &heed::RoTxn<'_>, -) -> Result<(T::Key, T::Value), RuntimeError> { +) -> DbResult<(T::Key, T::Value)> { db.first(tx_ro)?.ok_or(RuntimeError::KeyNotFound) } @@ -81,16 +78,13 @@ fn first( fn last( db: &HeedDb, tx_ro: &heed::RoTxn<'_>, -) -> Result<(T::Key, T::Value), RuntimeError> { +) -> DbResult<(T::Key, T::Value)> { db.last(tx_ro)?.ok_or(RuntimeError::KeyNotFound) } /// Shared [`DatabaseRo::is_empty()`]. #[inline] -fn is_empty( - db: &HeedDb, - tx_ro: &heed::RoTxn<'_>, -) -> Result { +fn is_empty(db: &HeedDb, tx_ro: &heed::RoTxn<'_>) -> DbResult { Ok(db.is_empty(tx_ro)?) } @@ -100,7 +94,7 @@ impl DatabaseIter for HeedTableRo<'_, T> { fn get_range<'a, Range>( &'a self, range: Range, - ) -> Result> + 'a, RuntimeError> + ) -> DbResult> + 'a> where Range: RangeBounds + 'a, { @@ -108,24 +102,17 @@ impl DatabaseIter for HeedTableRo<'_, T> { } #[inline] - fn iter( - &self, - ) -> Result> + '_, RuntimeError> - { + fn iter(&self) -> DbResult> + '_> { Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?))) } #[inline] - fn keys( - &self, - ) -> Result> + '_, RuntimeError> { + fn keys(&self) -> DbResult> + '_> { Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.0))) } #[inline] - fn values( - &self, - ) -> Result> + '_, RuntimeError> { + fn values(&self) -> DbResult> + '_> { Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.1))) } } @@ -134,27 +121,27 @@ impl DatabaseIter for HeedTableRo<'_, T> { // SAFETY: `HeedTableRo: !Send` as it holds a reference to `heed::RoTxn: Send + !Sync`. unsafe impl DatabaseRo for HeedTableRo<'_, T> { #[inline] - fn get(&self, key: &T::Key) -> Result { + fn get(&self, key: &T::Key) -> DbResult { get::(&self.db, self.tx_ro, key) } #[inline] - fn len(&self) -> Result { + fn len(&self) -> DbResult { len::(&self.db, self.tx_ro) } #[inline] - fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn first(&self) -> DbResult<(T::Key, T::Value)> { first::(&self.db, self.tx_ro) } #[inline] - fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn last(&self) -> DbResult<(T::Key, T::Value)> { last::(&self.db, self.tx_ro) } #[inline] - fn is_empty(&self) -> Result { + fn is_empty(&self) -> DbResult { is_empty::(&self.db, self.tx_ro) } } @@ -164,45 +151,45 @@ unsafe impl DatabaseRo for HeedTableRo<'_, T> { // `HeedTableRw`'s write transaction is `!Send`. unsafe impl DatabaseRo for HeedTableRw<'_, '_, T> { #[inline] - fn get(&self, key: &T::Key) -> Result { + fn get(&self, key: &T::Key) -> DbResult { get::(&self.db, &self.tx_rw.borrow(), key) } #[inline] - fn len(&self) -> Result { + fn len(&self) -> DbResult { len::(&self.db, &self.tx_rw.borrow()) } #[inline] - fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn first(&self) -> DbResult<(T::Key, T::Value)> { first::(&self.db, &self.tx_rw.borrow()) } #[inline] - fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn last(&self) -> DbResult<(T::Key, T::Value)> { last::(&self.db, &self.tx_rw.borrow()) } #[inline] - fn is_empty(&self) -> Result { + fn is_empty(&self) -> DbResult { is_empty::(&self.db, &self.tx_rw.borrow()) } } impl DatabaseRw for HeedTableRw<'_, '_, T> { #[inline] - fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> { + fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()> { Ok(self.db.put(&mut self.tx_rw.borrow_mut(), key, value)?) } #[inline] - fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> { + fn delete(&mut self, key: &T::Key) -> DbResult<()> { self.db.delete(&mut self.tx_rw.borrow_mut(), key)?; Ok(()) } #[inline] - fn take(&mut self, key: &T::Key) -> Result { + fn take(&mut self, key: &T::Key) -> DbResult { // LMDB/heed does not return the value on deletion. // So, fetch it first - then delete. let value = get::(&self.db, &self.tx_rw.borrow(), key)?; @@ -216,7 +203,7 @@ impl DatabaseRw for HeedTableRw<'_, '_, T> { } #[inline] - fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { + fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)> { let tx_rw = &mut self.tx_rw.borrow_mut(); // Get the value first... @@ -235,7 +222,7 @@ impl DatabaseRw for HeedTableRw<'_, '_, T> { } #[inline] - fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { + fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)> { let tx_rw = &mut self.tx_rw.borrow_mut(); // Get the value first... diff --git a/storage/database/src/backend/heed/env.rs b/storage/database/src/backend/heed/env.rs index 568379e..b603013 100644 --- a/storage/database/src/backend/heed/env.rs +++ b/storage/database/src/backend/heed/env.rs @@ -18,7 +18,7 @@ use crate::{ config::{Config, SyncMode}, database::{DatabaseIter, DatabaseRo, DatabaseRw}, env::{Env, EnvInner}, - error::{InitError, RuntimeError}, + error::{DbResult, InitError, RuntimeError}, key::{Key, KeyCompare}, resize::ResizeAlgorithm, table::Table, @@ -203,7 +203,7 @@ impl Env for ConcreteEnv { &self.config } - fn sync(&self) -> Result<(), RuntimeError> { + fn sync(&self) -> DbResult<()> { Ok(self.env.read().unwrap().force_sync()?) } @@ -253,12 +253,12 @@ where type Rw<'a> = RefCell>; #[inline] - fn tx_ro(&self) -> Result, RuntimeError> { + fn tx_ro(&self) -> DbResult> { Ok(self.read_txn()?) } #[inline] - fn tx_rw(&self) -> Result, RuntimeError> { + fn tx_rw(&self) -> DbResult> { Ok(RefCell::new(self.write_txn()?)) } @@ -266,7 +266,7 @@ where fn open_db_ro( &self, tx_ro: &Self::Ro<'_>, - ) -> Result + DatabaseIter, RuntimeError> { + ) -> DbResult + DatabaseIter> { // Open up a read-only database using our table's const metadata. // // INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`], @@ -281,10 +281,7 @@ where } #[inline] - fn open_db_rw( - &self, - tx_rw: &Self::Rw<'_>, - ) -> Result, RuntimeError> { + fn open_db_rw(&self, tx_rw: &Self::Rw<'_>) -> DbResult> { // Open up a read/write database using our table's const metadata. // // INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`], @@ -296,7 +293,7 @@ where }) } - fn create_db(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError> { + fn create_db(&self, tx_rw: &Self::Rw<'_>) -> DbResult<()> { // Create a database using our: // - [`Table`]'s const metadata. // - (potentially) our [`Key`] comparison function @@ -328,7 +325,7 @@ where } #[inline] - fn clear_db(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError> { + fn clear_db(&self, tx_rw: &mut Self::Rw<'_>) -> DbResult<()> { let tx_rw = tx_rw.get_mut(); // Open the table. We don't care about flags or key diff --git a/storage/database/src/backend/heed/transaction.rs b/storage/database/src/backend/heed/transaction.rs index d32f370..b7c0f54 100644 --- a/storage/database/src/backend/heed/transaction.rs +++ b/storage/database/src/backend/heed/transaction.rs @@ -4,31 +4,31 @@ use std::cell::RefCell; //---------------------------------------------------------------------------------------------------- Import use crate::{ - error::RuntimeError, + error::DbResult, transaction::{TxRo, TxRw}, }; //---------------------------------------------------------------------------------------------------- TxRo impl TxRo<'_> for heed::RoTxn<'_> { - fn commit(self) -> Result<(), RuntimeError> { + fn commit(self) -> DbResult<()> { Ok(heed::RoTxn::commit(self)?) } } //---------------------------------------------------------------------------------------------------- TxRw impl TxRo<'_> for RefCell> { - fn commit(self) -> Result<(), RuntimeError> { + fn commit(self) -> DbResult<()> { TxRw::commit(self) } } impl TxRw<'_> for RefCell> { - fn commit(self) -> Result<(), RuntimeError> { + fn commit(self) -> DbResult<()> { Ok(heed::RwTxn::commit(self.into_inner())?) } /// This function is infallible. - fn abort(self) -> Result<(), RuntimeError> { + fn abort(self) -> DbResult<()> { heed::RwTxn::abort(self.into_inner()); Ok(()) } diff --git a/storage/database/src/backend/redb/database.rs b/storage/database/src/backend/redb/database.rs index dafb241..0be58ef 100644 --- a/storage/database/src/backend/redb/database.rs +++ b/storage/database/src/backend/redb/database.rs @@ -11,7 +11,7 @@ use crate::{ types::{RedbTableRo, RedbTableRw}, }, database::{DatabaseIter, DatabaseRo, DatabaseRw}, - error::RuntimeError, + error::{DbResult, RuntimeError}, table::Table, }; @@ -25,7 +25,7 @@ use crate::{ fn get( db: &impl ReadableTable, StorableRedb>, key: &T::Key, -) -> Result { +) -> DbResult { Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value()) } @@ -33,7 +33,7 @@ fn get( #[inline] fn len( db: &impl ReadableTable, StorableRedb>, -) -> Result { +) -> DbResult { Ok(db.len()?) } @@ -41,7 +41,7 @@ fn len( #[inline] fn first( db: &impl ReadableTable, StorableRedb>, -) -> Result<(T::Key, T::Value), RuntimeError> { +) -> DbResult<(T::Key, T::Value)> { let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) } @@ -50,7 +50,7 @@ fn first( #[inline] fn last( db: &impl ReadableTable, StorableRedb>, -) -> Result<(T::Key, T::Value), RuntimeError> { +) -> DbResult<(T::Key, T::Value)> { let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) } @@ -59,7 +59,7 @@ fn last( #[inline] fn is_empty( db: &impl ReadableTable, StorableRedb>, -) -> Result { +) -> DbResult { Ok(db.is_empty()?) } @@ -69,7 +69,7 @@ impl DatabaseIter for RedbTableRo { fn get_range<'a, Range>( &'a self, range: Range, - ) -> Result> + 'a, RuntimeError> + ) -> DbResult> + 'a> where Range: RangeBounds + 'a, { @@ -80,10 +80,7 @@ impl DatabaseIter for RedbTableRo { } #[inline] - fn iter( - &self, - ) -> Result> + '_, RuntimeError> - { + fn iter(&self) -> DbResult> + '_> { Ok(ReadableTable::iter(self)?.map(|result| { let (key, value) = result?; Ok((key.value(), value.value())) @@ -91,9 +88,7 @@ impl DatabaseIter for RedbTableRo { } #[inline] - fn keys( - &self, - ) -> Result> + '_, RuntimeError> { + fn keys(&self) -> DbResult> + '_> { Ok(ReadableTable::iter(self)?.map(|result| { let (key, _value) = result?; Ok(key.value()) @@ -101,9 +96,7 @@ impl DatabaseIter for RedbTableRo { } #[inline] - fn values( - &self, - ) -> Result> + '_, RuntimeError> { + fn values(&self) -> DbResult> + '_> { Ok(ReadableTable::iter(self)?.map(|result| { let (_key, value) = result?; Ok(value.value()) @@ -115,27 +108,27 @@ impl DatabaseIter for RedbTableRo { // SAFETY: Both `redb`'s transaction and table types are `Send + Sync`. unsafe impl DatabaseRo for RedbTableRo { #[inline] - fn get(&self, key: &T::Key) -> Result { + fn get(&self, key: &T::Key) -> DbResult { get::(self, key) } #[inline] - fn len(&self) -> Result { + fn len(&self) -> DbResult { len::(self) } #[inline] - fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn first(&self) -> DbResult<(T::Key, T::Value)> { first::(self) } #[inline] - fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn last(&self) -> DbResult<(T::Key, T::Value)> { last::(self) } #[inline] - fn is_empty(&self) -> Result { + fn is_empty(&self) -> DbResult { is_empty::(self) } } @@ -144,27 +137,27 @@ unsafe impl DatabaseRo for RedbTableRo // SAFETY: Both `redb`'s transaction and table types are `Send + Sync`. unsafe impl DatabaseRo for RedbTableRw<'_, T::Key, T::Value> { #[inline] - fn get(&self, key: &T::Key) -> Result { + fn get(&self, key: &T::Key) -> DbResult { get::(self, key) } #[inline] - fn len(&self) -> Result { + fn len(&self) -> DbResult { len::(self) } #[inline] - fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn first(&self) -> DbResult<(T::Key, T::Value)> { first::(self) } #[inline] - fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn last(&self) -> DbResult<(T::Key, T::Value)> { last::(self) } #[inline] - fn is_empty(&self) -> Result { + fn is_empty(&self) -> DbResult { is_empty::(self) } } @@ -173,19 +166,19 @@ impl DatabaseRw for RedbTableRw<'_, T::Key, T::Value> { // `redb` returns the value after function calls so we end with Ok(()) instead. #[inline] - fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> { + fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()> { redb::Table::insert(self, key, value)?; Ok(()) } #[inline] - fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> { + fn delete(&mut self, key: &T::Key) -> DbResult<()> { redb::Table::remove(self, key)?; Ok(()) } #[inline] - fn take(&mut self, key: &T::Key) -> Result { + fn take(&mut self, key: &T::Key) -> DbResult { if let Some(value) = redb::Table::remove(self, key)? { Ok(value.value()) } else { @@ -194,13 +187,13 @@ impl DatabaseRw for RedbTableRw<'_, T::Key, T::Value> { } #[inline] - fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { + fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)> { let (key, value) = redb::Table::pop_first(self)?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) } #[inline] - fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { + fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)> { let (key, value) = redb::Table::pop_last(self)?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) } diff --git a/storage/database/src/backend/redb/env.rs b/storage/database/src/backend/redb/env.rs index a405ea7..4bd49d6 100644 --- a/storage/database/src/backend/redb/env.rs +++ b/storage/database/src/backend/redb/env.rs @@ -6,7 +6,7 @@ use crate::{ config::{Config, SyncMode}, database::{DatabaseIter, DatabaseRo, DatabaseRw}, env::{Env, EnvInner}, - error::{InitError, RuntimeError}, + error::{DbResult, InitError, RuntimeError}, table::Table, TxRw, }; @@ -105,7 +105,7 @@ impl Env for ConcreteEnv { &self.config } - fn sync(&self) -> Result<(), RuntimeError> { + fn sync(&self) -> DbResult<()> { // `redb`'s syncs are tied with write transactions, // so just create one, don't do anything and commit. let mut tx_rw = self.env.begin_write()?; @@ -127,12 +127,12 @@ where type Rw<'a> = redb::WriteTransaction; #[inline] - fn tx_ro(&self) -> Result { + fn tx_ro(&self) -> DbResult { Ok(self.0.begin_read()?) } #[inline] - fn tx_rw(&self) -> Result { + fn tx_rw(&self) -> DbResult { // `redb` has sync modes on the TX level, unlike heed, // which sets it at the Environment level. // @@ -146,7 +146,7 @@ where fn open_db_ro( &self, tx_ro: &Self::Ro<'_>, - ) -> Result + DatabaseIter, RuntimeError> { + ) -> DbResult + DatabaseIter> { // Open up a read-only database using our `T: Table`'s const metadata. let table: redb::TableDefinition<'static, StorableRedb, StorableRedb> = redb::TableDefinition::new(T::NAME); @@ -155,10 +155,7 @@ where } #[inline] - fn open_db_rw( - &self, - tx_rw: &Self::Rw<'_>, - ) -> Result, RuntimeError> { + fn open_db_rw(&self, tx_rw: &Self::Rw<'_>) -> DbResult> { // Open up a read/write database using our `T: Table`'s const metadata. let table: redb::TableDefinition<'static, StorableRedb, StorableRedb> = redb::TableDefinition::new(T::NAME); @@ -168,14 +165,14 @@ where Ok(tx_rw.open_table(table)?) } - fn create_db(&self, tx_rw: &redb::WriteTransaction) -> Result<(), RuntimeError> { + fn create_db(&self, tx_rw: &redb::WriteTransaction) -> DbResult<()> { // INVARIANT: `redb` creates tables if they don't exist. self.open_db_rw::(tx_rw)?; Ok(()) } #[inline] - fn clear_db(&self, tx_rw: &mut redb::WriteTransaction) -> Result<(), RuntimeError> { + fn clear_db(&self, tx_rw: &mut redb::WriteTransaction) -> DbResult<()> { let table: redb::TableDefinition< 'static, StorableRedb<::Key>, diff --git a/storage/database/src/backend/redb/storable.rs b/storage/database/src/backend/redb/storable.rs index abf2e71..f0412ef 100644 --- a/storage/database/src/backend/redb/storable.rs +++ b/storage/database/src/backend/redb/storable.rs @@ -34,8 +34,14 @@ impl redb::Value for StorableRedb where T: Storable + 'static, { - type SelfType<'a> = T where Self: 'a; - type AsBytes<'a> = &'a [u8] where Self: 'a; + type SelfType<'a> + = T + where + Self: 'a; + type AsBytes<'a> + = &'a [u8] + where + Self: 'a; #[inline] fn fixed_width() -> Option { diff --git a/storage/database/src/backend/redb/transaction.rs b/storage/database/src/backend/redb/transaction.rs index 5048851..8d93986 100644 --- a/storage/database/src/backend/redb/transaction.rs +++ b/storage/database/src/backend/redb/transaction.rs @@ -2,14 +2,14 @@ //---------------------------------------------------------------------------------------------------- Import use crate::{ - error::RuntimeError, + error::DbResult, transaction::{TxRo, TxRw}, }; //---------------------------------------------------------------------------------------------------- TxRo impl TxRo<'_> for redb::ReadTransaction { /// This function is infallible. - fn commit(self) -> Result<(), RuntimeError> { + fn commit(self) -> DbResult<()> { // `redb`'s read transactions cleanup automatically when all references are dropped. // // There is `close()`: @@ -22,11 +22,11 @@ impl TxRo<'_> for redb::ReadTransaction { //---------------------------------------------------------------------------------------------------- TxRw impl TxRw<'_> for redb::WriteTransaction { - fn commit(self) -> Result<(), RuntimeError> { + fn commit(self) -> DbResult<()> { Ok(self.commit()?) } - fn abort(self) -> Result<(), RuntimeError> { + fn abort(self) -> DbResult<()> { Ok(self.abort()?) } } diff --git a/storage/database/src/config/sync_mode.rs b/storage/database/src/config/sync_mode.rs index 5a0cba5..dbb34e7 100644 --- a/storage/database/src/config/sync_mode.rs +++ b/storage/database/src/config/sync_mode.rs @@ -9,7 +9,6 @@ //! based on these values. //---------------------------------------------------------------------------------------------------- Import - #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/storage/database/src/database.rs b/storage/database/src/database.rs index 6fbb7aa..c019972 100644 --- a/storage/database/src/database.rs +++ b/storage/database/src/database.rs @@ -3,7 +3,10 @@ //---------------------------------------------------------------------------------------------------- Import use std::ops::RangeBounds; -use crate::{error::RuntimeError, table::Table}; +use crate::{ + error::{DbResult, RuntimeError}, + table::Table, +}; //---------------------------------------------------------------------------------------------------- DatabaseIter /// Generic post-fix documentation for `DatabaseIter` methods. @@ -48,27 +51,22 @@ pub trait DatabaseIter { fn get_range<'a, Range>( &'a self, range: Range, - ) -> Result> + 'a, RuntimeError> + ) -> DbResult> + 'a> where Range: RangeBounds + 'a; /// Get an [`Iterator`] that returns the `(key, value)` types for this database. #[doc = doc_iter!()] #[expect(clippy::iter_not_returning_iterator)] - fn iter( - &self, - ) -> Result> + '_, RuntimeError>; + fn iter(&self) -> DbResult> + '_>; /// Get an [`Iterator`] that returns _only_ the `key` type for this database. #[doc = doc_iter!()] - fn keys(&self) - -> Result> + '_, RuntimeError>; + fn keys(&self) -> DbResult> + '_>; /// Get an [`Iterator`] that returns _only_ the `value` type for this database. #[doc = doc_iter!()] - fn values( - &self, - ) -> Result> + '_, RuntimeError>; + fn values(&self) -> DbResult> + '_>; } //---------------------------------------------------------------------------------------------------- DatabaseRo @@ -76,7 +74,7 @@ pub trait DatabaseIter { macro_rules! doc_database { () => { r"# Errors -This will return [`RuntimeError::KeyNotFound`] if: +This will return [`crate::RuntimeError::KeyNotFound`] if: - Input does not exist OR - Database is empty" }; @@ -111,7 +109,7 @@ This will return [`RuntimeError::KeyNotFound`] if: pub unsafe trait DatabaseRo { /// Get the value corresponding to a key. #[doc = doc_database!()] - fn get(&self, key: &T::Key) -> Result; + fn get(&self, key: &T::Key) -> DbResult; /// Returns `true` if the database contains a value for the specified key. /// @@ -120,7 +118,7 @@ pub unsafe trait DatabaseRo { /// as in that case, `Ok(false)` will be returned. /// /// Other errors may still occur. - fn contains(&self, key: &T::Key) -> Result { + fn contains(&self, key: &T::Key) -> DbResult { match self.get(key) { Ok(_) => Ok(true), Err(RuntimeError::KeyNotFound) => Ok(false), @@ -132,21 +130,21 @@ pub unsafe trait DatabaseRo { /// /// # Errors /// This will never return [`RuntimeError::KeyNotFound`]. - fn len(&self) -> Result; + fn len(&self) -> DbResult; /// Returns the first `(key, value)` pair in the database. #[doc = doc_database!()] - fn first(&self) -> Result<(T::Key, T::Value), RuntimeError>; + fn first(&self) -> DbResult<(T::Key, T::Value)>; /// Returns the last `(key, value)` pair in the database. #[doc = doc_database!()] - fn last(&self) -> Result<(T::Key, T::Value), RuntimeError>; + fn last(&self) -> DbResult<(T::Key, T::Value)>; /// Returns `true` if the database contains no `(key, value)` pairs. /// /// # Errors /// This can only return [`RuntimeError::Io`] on errors. - fn is_empty(&self) -> Result; + fn is_empty(&self) -> DbResult; } //---------------------------------------------------------------------------------------------------- DatabaseRw @@ -161,7 +159,7 @@ pub trait DatabaseRw: DatabaseRo { #[doc = doc_database!()] /// /// This will never [`RuntimeError::KeyExists`]. - fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError>; + fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()>; /// Delete a key-value pair in the database. /// @@ -170,7 +168,7 @@ pub trait DatabaseRw: DatabaseRo { #[doc = doc_database!()] /// /// This will never [`RuntimeError::KeyExists`]. - fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError>; + fn delete(&mut self, key: &T::Key) -> DbResult<()>; /// Delete and return a key-value pair in the database. /// @@ -178,7 +176,7 @@ pub trait DatabaseRw: DatabaseRo { /// it will serialize the `T::Value` and return it. /// #[doc = doc_database!()] - fn take(&mut self, key: &T::Key) -> Result; + fn take(&mut self, key: &T::Key) -> DbResult; /// Fetch the value, and apply a function to it - or delete the entry. /// @@ -192,7 +190,7 @@ pub trait DatabaseRw: DatabaseRo { /// - If `f` returns `None`, the entry will be [`DatabaseRw::delete`]d /// #[doc = doc_database!()] - fn update(&mut self, key: &T::Key, mut f: F) -> Result<(), RuntimeError> + fn update(&mut self, key: &T::Key, mut f: F) -> DbResult<()> where F: FnMut(T::Value) -> Option, { @@ -207,10 +205,10 @@ pub trait DatabaseRw: DatabaseRo { /// Removes and returns the first `(key, value)` pair in the database. /// #[doc = doc_database!()] - fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError>; + fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)>; /// Removes and returns the last `(key, value)` pair in the database. /// #[doc = doc_database!()] - fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError>; + fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)>; } diff --git a/storage/database/src/env.rs b/storage/database/src/env.rs index 1ae6aa1..56b92cb 100644 --- a/storage/database/src/env.rs +++ b/storage/database/src/env.rs @@ -6,7 +6,7 @@ use std::num::NonZeroUsize; use crate::{ config::Config, database::{DatabaseIter, DatabaseRo, DatabaseRw}, - error::{InitError, RuntimeError}, + error::{DbResult, InitError}, resize::ResizeAlgorithm, table::Table, transaction::{TxRo, TxRw}, @@ -39,7 +39,7 @@ pub trait Env: Sized { /// /// # Invariant /// If this is `false`, that means this [`Env`] - /// must _never_ return a [`RuntimeError::ResizeNeeded`]. + /// must _never_ return a [`crate::RuntimeError::ResizeNeeded`]. /// /// If this is `true`, [`Env::resize_map`] & [`Env::current_map_size`] /// _must_ be re-implemented, as it just panics by default. @@ -88,7 +88,7 @@ pub trait Env: Sized { /// This will error if the database file could not be opened. /// /// This is the only [`Env`] function that will return - /// an [`InitError`] instead of a [`RuntimeError`]. + /// an [`InitError`] instead of a [`crate::RuntimeError`]. fn open(config: Config) -> Result; /// Return the [`Config`] that this database was [`Env::open`]ed with. @@ -107,7 +107,7 @@ pub trait Env: Sized { /// /// # Errors /// If there is a synchronization error, this should return an error. - fn sync(&self) -> Result<(), RuntimeError>; + fn sync(&self) -> DbResult<()>; /// Resize the database's memory map to a /// new (bigger) size using a [`ResizeAlgorithm`]. @@ -218,14 +218,14 @@ pub trait EnvInner<'env> { /// Create a read-only transaction. /// /// # Errors - /// This will only return [`RuntimeError::Io`] if it errors. - fn tx_ro(&self) -> Result, RuntimeError>; + /// This will only return [`crate::RuntimeError::Io`] if it errors. + fn tx_ro(&self) -> DbResult>; /// Create a read/write transaction. /// /// # Errors - /// This will only return [`RuntimeError::Io`] if it errors. - fn tx_rw(&self) -> Result, RuntimeError>; + /// This will only return [`crate::RuntimeError::Io`] if it errors. + fn tx_rw(&self) -> DbResult>; /// Open a database in read-only mode. /// @@ -269,17 +269,17 @@ pub trait EnvInner<'env> { /// ``` /// /// # Errors - /// This will only return [`RuntimeError::Io`] on normal errors. + /// This will only return [`crate::RuntimeError::Io`] on normal errors. /// /// If the specified table is not created upon before this function is called, - /// this will return [`RuntimeError::TableNotFound`]. + /// this will return [`crate::RuntimeError::TableNotFound`]. /// /// # Invariant #[doc = doc_heed_create_db_invariant!()] fn open_db_ro( &self, tx_ro: &Self::Ro<'_>, - ) -> Result + DatabaseIter, RuntimeError>; + ) -> DbResult + DatabaseIter>; /// Open a database in read/write mode. /// @@ -293,25 +293,22 @@ pub trait EnvInner<'env> { /// passed as a generic to this function. /// /// # Errors - /// This will only return [`RuntimeError::Io`] on errors. + /// This will only return [`crate::RuntimeError::Io`] on errors. /// /// # Invariant #[doc = doc_heed_create_db_invariant!()] - fn open_db_rw( - &self, - tx_rw: &Self::Rw<'_>, - ) -> Result, RuntimeError>; + fn open_db_rw(&self, tx_rw: &Self::Rw<'_>) -> DbResult>; /// Create a database table. /// /// This will create the database [`Table`] passed as a generic to this function. /// /// # Errors - /// This will only return [`RuntimeError::Io`] on errors. + /// This will only return [`crate::RuntimeError::Io`] on errors. /// /// # Invariant #[doc = doc_heed_create_db_invariant!()] - fn create_db(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError>; + fn create_db(&self, tx_rw: &Self::Rw<'_>) -> DbResult<()>; /// Clear all `(key, value)`'s from a database table. /// @@ -322,9 +319,9 @@ pub trait EnvInner<'env> { /// function's effects can be aborted using [`TxRw::abort`]. /// /// # Errors - /// This will return [`RuntimeError::Io`] on normal errors. + /// This will return [`crate::RuntimeError::Io`] on normal errors. /// /// If the specified table is not created upon before this function is called, - /// this will return [`RuntimeError::TableNotFound`]. - fn clear_db(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError>; + /// this will return [`crate::RuntimeError::TableNotFound`]. + fn clear_db(&self, tx_rw: &mut Self::Rw<'_>) -> DbResult<()>; } diff --git a/storage/database/src/error.rs b/storage/database/src/error.rs index 3471ac7..82f80b9 100644 --- a/storage/database/src/error.rs +++ b/storage/database/src/error.rs @@ -7,6 +7,9 @@ use std::fmt::Debug; /// Alias for a thread-safe boxed error. type BoxError = Box; +/// [`Result`] with [`RuntimeError`] as the error. +pub type DbResult = Result; + //---------------------------------------------------------------------------------------------------- InitError /// Errors that occur during ([`Env::open`](crate::env::Env::open)). /// diff --git a/storage/database/src/lib.rs b/storage/database/src/lib.rs index 45bfc53..8e48fca 100644 --- a/storage/database/src/lib.rs +++ b/storage/database/src/lib.rs @@ -50,7 +50,7 @@ pub use constants::{ }; pub use database::{DatabaseIter, DatabaseRo, DatabaseRw}; pub use env::{Env, EnvInner}; -pub use error::{InitError, RuntimeError}; +pub use error::{DbResult, InitError, RuntimeError}; pub use key::{Key, KeyCompare}; pub use storable::{Storable, StorableBytes, StorableStr, StorableVec}; pub use table::Table; diff --git a/storage/database/src/table.rs b/storage/database/src/table.rs index 3ad0e79..6d0daa2 100644 --- a/storage/database/src/table.rs +++ b/storage/database/src/table.rs @@ -1,7 +1,6 @@ //! Database table abstraction; `trait Table`. //---------------------------------------------------------------------------------------------------- Import - use crate::{key::Key, storable::Storable}; //---------------------------------------------------------------------------------------------------- Table diff --git a/storage/database/src/tables.rs b/storage/database/src/tables.rs index 83a00e1..56203ad 100644 --- a/storage/database/src/tables.rs +++ b/storage/database/src/tables.rs @@ -211,7 +211,7 @@ macro_rules! define_tables { /// /// # Errors /// This returns errors on regular database errors. - fn all_tables_empty(&self) -> Result; + fn all_tables_empty(&self) -> $crate::DbResult; } /// Object containing all opened [`Table`](cuprate_database::Table)s in read + iter mode. @@ -293,7 +293,7 @@ macro_rules! define_tables { } )* - fn all_tables_empty(&self) -> Result { + fn all_tables_empty(&self) -> $crate::DbResult { $( if !$crate::DatabaseRo::is_empty(&self.$index)? { return Ok(false); @@ -369,7 +369,7 @@ macro_rules! define_tables { /// /// # Errors /// This will only return [`cuprate_database::RuntimeError::Io`] if it errors. - fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result; + fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> $crate::DbResult; /// Open all tables in read-write mode. /// @@ -378,7 +378,7 @@ macro_rules! define_tables { /// /// # Errors /// This will only return [`cuprate_database::RuntimeError::Io`] on errors. - fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result; + fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult; /// Create all database tables. /// @@ -386,7 +386,7 @@ macro_rules! define_tables { /// /// # Errors /// This will only return [`cuprate_database::RuntimeError::Io`] on errors. - fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError>; + fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<()>; } impl<'env, Ei> OpenTables<'env> for Ei @@ -396,19 +396,19 @@ macro_rules! define_tables { type Ro<'tx> = >::Ro<'tx>; type Rw<'tx> = >::Rw<'tx>; - fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result { + fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> $crate::DbResult { Ok(($( Self::open_db_ro::<[<$table:camel>]>(self, tx_ro)?, )*)) } - fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result { + fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult { Ok(($( Self::open_db_rw::<[<$table:camel>]>(self, tx_rw)?, )*)) } - fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError> { + fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<()> { let result = Ok(($( Self::create_db::<[<$table:camel>]>(self, tx_rw), )*)); diff --git a/storage/database/src/transaction.rs b/storage/database/src/transaction.rs index 8f33983..16d1c51 100644 --- a/storage/database/src/transaction.rs +++ b/storage/database/src/transaction.rs @@ -1,7 +1,7 @@ //! Database transaction abstraction; `trait TxRo`, `trait TxRw`. //---------------------------------------------------------------------------------------------------- Import -use crate::error::RuntimeError; +use crate::error::DbResult; //---------------------------------------------------------------------------------------------------- TxRo /// Read-only database transaction. @@ -16,7 +16,7 @@ pub trait TxRo<'tx> { /// /// # Errors /// This operation will always return `Ok(())` with the `redb` backend. - fn commit(self) -> Result<(), RuntimeError>; + fn commit(self) -> DbResult<()>; } //---------------------------------------------------------------------------------------------------- TxRw @@ -32,12 +32,12 @@ pub trait TxRw<'tx> { /// This operation will always return `Ok(())` with the `redb` backend. /// /// If `Env::MANUAL_RESIZE == true`, - /// [`RuntimeError::ResizeNeeded`] may be returned. - fn commit(self) -> Result<(), RuntimeError>; + /// [`crate::RuntimeError::ResizeNeeded`] may be returned. + fn commit(self) -> DbResult<()>; /// Abort the transaction, erasing any writes that have occurred. /// /// # Errors /// This operation will always return `Ok(())` with the `heed` backend. - fn abort(self) -> Result<(), RuntimeError>; + fn abort(self) -> DbResult<()>; } diff --git a/storage/service/src/service/read.rs b/storage/service/src/service/read.rs index 0ab6853..187ffa4 100644 --- a/storage/service/src/service/read.rs +++ b/storage/service/src/service/read.rs @@ -7,7 +7,7 @@ use futures::channel::oneshot; use rayon::ThreadPool; use tower::Service; -use cuprate_database::{ConcreteEnv, RuntimeError}; +use cuprate_database::{ConcreteEnv, DbResult, RuntimeError}; use cuprate_helper::asynch::InfallibleOneshotReceiver; /// The [`rayon::ThreadPool`] service. @@ -24,7 +24,7 @@ pub struct DatabaseReadService { pool: Arc, /// The function used to handle request. - inner_handler: Arc Result + Send + Sync + 'static>, + inner_handler: Arc DbResult + Send + Sync + 'static>, } // Deriving [`Clone`] means `Req` & `Res` need to be `Clone`, even if they aren't. @@ -51,7 +51,7 @@ where pub fn new( env: Arc, pool: Arc, - req_handler: impl Fn(&ConcreteEnv, Req) -> Result + Send + Sync + 'static, + req_handler: impl Fn(&ConcreteEnv, Req) -> DbResult + Send + Sync + 'static, ) -> Self { let inner_handler = Arc::new(move |req| req_handler(&env, req)); @@ -69,9 +69,9 @@ where { type Response = Res; type Error = RuntimeError; - type Future = InfallibleOneshotReceiver>; + type Future = InfallibleOneshotReceiver>; - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } diff --git a/storage/service/src/service/write.rs b/storage/service/src/service/write.rs index 607c4aa..6bcd725 100644 --- a/storage/service/src/service/write.rs +++ b/storage/service/src/service/write.rs @@ -6,7 +6,7 @@ use std::{ use futures::channel::oneshot; -use cuprate_database::{ConcreteEnv, Env, RuntimeError}; +use cuprate_database::{ConcreteEnv, DbResult, Env, RuntimeError}; use cuprate_helper::asynch::InfallibleOneshotReceiver; //---------------------------------------------------------------------------------------------------- Constants @@ -26,8 +26,7 @@ pub struct DatabaseWriteHandle { /// Sender channel to the database write thread-pool. /// /// We provide the response channel for the thread-pool. - pub(super) sender: - crossbeam::channel::Sender<(Req, oneshot::Sender>)>, + pub(super) sender: crossbeam::channel::Sender<(Req, oneshot::Sender>)>, } impl Clone for DatabaseWriteHandle { @@ -48,7 +47,7 @@ where #[inline(never)] // Only called once. pub fn init( env: Arc, - inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result + Send + 'static, + inner_handler: impl Fn(&ConcreteEnv, &Req) -> DbResult + Send + 'static, ) -> Self { // Initialize `Request/Response` channels. let (sender, receiver) = crossbeam::channel::unbounded(); @@ -66,10 +65,10 @@ where impl tower::Service for DatabaseWriteHandle { type Response = Res; type Error = RuntimeError; - type Future = InfallibleOneshotReceiver>; + type Future = InfallibleOneshotReceiver>; #[inline] - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } @@ -89,8 +88,8 @@ impl tower::Service for DatabaseWriteHandle { /// The main function of the writer thread. fn database_writer( env: &ConcreteEnv, - receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender>)>, - inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result, + receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender>)>, + inner_handler: impl Fn(&ConcreteEnv, &Req) -> DbResult, ) where Req: Send + 'static, Res: Debug + Send + 'static, diff --git a/storage/txpool/src/ops/key_images.rs b/storage/txpool/src/ops/key_images.rs index 04aa1b4..76cae14 100644 --- a/storage/txpool/src/ops/key_images.rs +++ b/storage/txpool/src/ops/key_images.rs @@ -1,7 +1,7 @@ //! Tx-pool key image ops. use monero_serai::transaction::Input; -use cuprate_database::{DatabaseRw, RuntimeError}; +use cuprate_database::{DatabaseRw, DbResult}; use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHash}; @@ -34,7 +34,7 @@ pub(super) fn add_tx_key_images( pub(super) fn remove_tx_key_images( inputs: &[Input], kis_table: &mut impl DatabaseRw, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { for ki in inputs.iter().map(ki_from_input) { kis_table.delete(&ki)?; } diff --git a/storage/txpool/src/ops/tx_read.rs b/storage/txpool/src/ops/tx_read.rs index 5569075..24101f7 100644 --- a/storage/txpool/src/ops/tx_read.rs +++ b/storage/txpool/src/ops/tx_read.rs @@ -5,7 +5,7 @@ use std::sync::Mutex; use monero_serai::transaction::Transaction; -use cuprate_database::{DatabaseRo, RuntimeError}; +use cuprate_database::{DatabaseRo, DbResult}; use cuprate_types::{TransactionVerificationData, TxVersion}; use crate::{ @@ -17,7 +17,7 @@ use crate::{ pub fn get_transaction_verification_data( tx_hash: &TransactionHash, tables: &impl Tables, -) -> Result { +) -> DbResult { let tx_blob = tables.transaction_blobs().get(tx_hash)?.0; let tx_info = tables.transaction_infos().get(tx_hash)?; @@ -45,7 +45,7 @@ pub fn get_transaction_verification_data( pub fn in_stem_pool( tx_hash: &TransactionHash, tx_infos: &impl DatabaseRo, -) -> Result { +) -> DbResult { Ok(tx_infos .get(tx_hash)? .flags diff --git a/storage/txpool/src/ops/tx_write.rs b/storage/txpool/src/ops/tx_write.rs index dc5ab46..8f426fb 100644 --- a/storage/txpool/src/ops/tx_write.rs +++ b/storage/txpool/src/ops/tx_write.rs @@ -4,7 +4,7 @@ use bytemuck::TransparentWrapper; use monero_serai::transaction::{NotPruned, Transaction}; -use cuprate_database::{DatabaseRw, RuntimeError, StorableVec}; +use cuprate_database::{DatabaseRw, DbResult, StorableVec}; use cuprate_types::TransactionVerificationData; use crate::{ @@ -67,10 +67,7 @@ pub fn add_transaction( } /// Removes a transaction from the transaction pool. -pub fn remove_transaction( - tx_hash: &TransactionHash, - tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +pub fn remove_transaction(tx_hash: &TransactionHash, tables: &mut impl TablesMut) -> DbResult<()> { // Remove the tx blob from table 0. let tx_blob = tables.transaction_blobs_mut().take(tx_hash)?.0; diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs index 0de1e7d..44a29b3 100644 --- a/storage/txpool/src/service/read.rs +++ b/storage/txpool/src/service/read.rs @@ -11,7 +11,7 @@ use std::{ use rayon::ThreadPool; -use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; +use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, RuntimeError}; use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; use crate::{ @@ -137,7 +137,7 @@ fn filter_known_tx_blob_hashes( // A closure that returns `true` if a tx with a certain blob hash is unknown. // This also fills in `stem_tx_hashes`. - let mut tx_unknown = |blob_hash| -> Result { + let mut tx_unknown = |blob_hash| -> DbResult { match tx_blob_hashes.get(&blob_hash) { Ok(tx_hash) => { if in_stem_pool(&tx_hash, &tx_infos)? { diff --git a/storage/txpool/src/service/types.rs b/storage/txpool/src/service/types.rs index 5c6b97c..af1ca98 100644 --- a/storage/txpool/src/service/types.rs +++ b/storage/txpool/src/service/types.rs @@ -2,7 +2,7 @@ //! //! Only used internally for our [`tower::Service`] impls. -use cuprate_database::RuntimeError; +use cuprate_database::DbResult; use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle}; use crate::service::interface::{ @@ -12,7 +12,7 @@ use crate::service::interface::{ /// The actual type of the response. /// /// Either our [`TxpoolReadResponse`], or a database error occurred. -pub(super) type ReadResponseResult = Result; +pub(super) type ReadResponseResult = DbResult; /// The transaction pool database write service. pub type TxpoolWriteHandle = DatabaseWriteHandle; diff --git a/storage/txpool/src/service/write.rs b/storage/txpool/src/service/write.rs index 13ab81f..23c5a8a 100644 --- a/storage/txpool/src/service/write.rs +++ b/storage/txpool/src/service/write.rs @@ -1,6 +1,8 @@ use std::sync::Arc; -use cuprate_database::{ConcreteEnv, DatabaseRo, DatabaseRw, Env, EnvInner, RuntimeError, TxRw}; +use cuprate_database::{ + ConcreteEnv, DatabaseRo, DatabaseRw, DbResult, Env, EnvInner, RuntimeError, TxRw, +}; use cuprate_database_service::DatabaseWriteHandle; use cuprate_types::TransactionVerificationData; @@ -25,7 +27,7 @@ pub(super) fn init_write_service(env: Arc) -> TxpoolWriteHandle { fn handle_txpool_request( env: &ConcreteEnv, req: &TxpoolWriteRequest, -) -> Result { +) -> DbResult { match req { TxpoolWriteRequest::AddTransaction { tx, state_stem } => { add_transaction(env, tx, *state_stem) @@ -50,7 +52,7 @@ fn add_transaction( env: &ConcreteEnv, tx: &TransactionVerificationData, state_stem: bool, -) -> Result { +) -> DbResult { let env_inner = env.env_inner(); let tx_rw = env_inner.tx_rw()?; @@ -83,7 +85,7 @@ fn add_transaction( fn remove_transaction( env: &ConcreteEnv, tx_hash: &TransactionHash, -) -> Result { +) -> DbResult { let env_inner = env.env_inner(); let tx_rw = env_inner.tx_rw()?; @@ -105,10 +107,7 @@ fn remove_transaction( } /// [`TxpoolWriteRequest::Promote`] -fn promote( - env: &ConcreteEnv, - tx_hash: &TransactionHash, -) -> Result { +fn promote(env: &ConcreteEnv, tx_hash: &TransactionHash) -> DbResult { let env_inner = env.env_inner(); let tx_rw = env_inner.tx_rw()?; @@ -134,10 +133,7 @@ fn promote( } /// [`TxpoolWriteRequest::NewBlock`] -fn new_block( - env: &ConcreteEnv, - spent_key_images: &[KeyImage], -) -> Result { +fn new_block(env: &ConcreteEnv, spent_key_images: &[KeyImage]) -> DbResult { let env_inner = env.env_inner(); let tx_rw = env_inner.tx_rw()?; diff --git a/test-utils/src/data/constants.rs b/test-utils/src/data/constants.rs index fff0441..78413ed 100644 --- a/test-utils/src/data/constants.rs +++ b/test-utils/src/data/constants.rs @@ -104,7 +104,7 @@ macro_rules! const_tx_blob { hash: $hash:literal, // Transaction hash data_path: $data_path:literal, // Path to the transaction blob version: $version:literal, // Transaction version - timelock: $timelock:expr, // Transaction's timelock (use the real type `Timelock`) + timelock: $timelock:expr_2021, // Transaction's timelock (use the real type `Timelock`) input_len: $input_len:literal, // Amount of inputs output_len: $output_len:literal, // Amount of outputs ) => { diff --git a/test-utils/src/rpc/data/macros.rs b/test-utils/src/rpc/data/macros.rs index 63a214c..5f87c53 100644 --- a/test-utils/src/rpc/data/macros.rs +++ b/test-utils/src/rpc/data/macros.rs @@ -25,11 +25,11 @@ macro_rules! define_request_and_response { // The request type (and any doc comments, derives, etc). $( #[$request_attr:meta] )* - Request = $request:expr; + Request = $request:expr_2021; // The response type (and any doc comments, derives, etc). $( #[$response_attr:meta] )* - Response = $response:expr; + Response = $response:expr_2021; ) => { paste::paste! { #[doc = $crate::rpc::data::macros::define_request_and_response_doc!( "response" => [<$name:upper _RESPONSE>], From ecd077b402cd907e9ffc23eb5904a3cab7e479de Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 3 Dec 2024 15:17:21 +0000 Subject: [PATCH 103/104] cuprated: config & args (#304) * init config * split sections * finish initial config. * fix clap * misc changes * fix doc * fix test & clippy * fix test 2 * try fix windows * testing * testing 2 * fix windows test * fix windows: the remix. * review comments * fix imports * rename & fix default config file * fix cargo hack * enable serde on `cuprate-helper` * changes from matrix chats * fix ci * fix doc * fix doc test * move Cuprated.toml * remove default.rs * `size` -> `bytes` * `addressbook_path` -> `address_book_path` * fix config output * fix ci * Update binaries/cuprated/src/config/args.rs Co-authored-by: hinto-janai --------- Co-authored-by: hinto-janai --- Cargo.lock | 40 +++- Cargo.toml | 2 + binaries/cuprated/Cargo.toml | 59 +++--- binaries/cuprated/Cuprated.toml | 67 +++++++ binaries/cuprated/src/config.rs | 158 ++++++++++++++++ binaries/cuprated/src/config/args.rs | 55 ++++++ binaries/cuprated/src/config/fs.rs | 21 +++ binaries/cuprated/src/config/p2p.rs | 178 ++++++++++++++++++ binaries/cuprated/src/config/storage.rs | 67 +++++++ .../cuprated/src/config/tracing_config.rs | 42 +++++ binaries/cuprated/src/constants.rs | 8 + binaries/cuprated/src/main.rs | 2 + helper/Cargo.toml | 2 + helper/src/fs.rs | 95 ++++++---- helper/src/network.rs | 34 +++- p2p/address-book/Cargo.toml | 2 +- p2p/address-book/src/book/tests.rs | 2 +- p2p/address-book/src/lib.rs | 9 +- p2p/address-book/src/store.rs | 11 +- p2p/p2p/src/block_downloader.rs | 16 +- p2p/p2p/src/block_downloader/tests.rs | 8 +- storage/blockchain/Cargo.toml | 2 +- storage/blockchain/README.md | 2 +- storage/blockchain/src/config.rs | 74 +++++--- storage/blockchain/src/ops/mod.rs | 2 +- storage/blockchain/src/service/mod.rs | 2 +- storage/blockchain/src/service/tests.rs | 3 +- storage/blockchain/src/tests.rs | 4 +- storage/txpool/Cargo.toml | 2 +- storage/txpool/README.md | 2 +- storage/txpool/src/config.rs | 84 +++++---- storage/txpool/src/ops.rs | 2 +- storage/txpool/src/service.rs | 2 +- 33 files changed, 888 insertions(+), 171 deletions(-) create mode 100644 binaries/cuprated/Cuprated.toml create mode 100644 binaries/cuprated/src/config/args.rs create mode 100644 binaries/cuprated/src/config/fs.rs create mode 100644 binaries/cuprated/src/config/p2p.rs create mode 100644 binaries/cuprated/src/config/storage.rs create mode 100644 binaries/cuprated/src/config/tracing_config.rs diff --git a/Cargo.lock b/Cargo.lock index ac36c56..0d55c8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -446,6 +446,7 @@ checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstyle", "clap_lex", + "terminal_size", ] [[package]] @@ -933,6 +934,7 @@ dependencies = [ "libc", "monero-serai", "rayon", + "serde", "tokio", "windows", ] @@ -1188,7 +1190,6 @@ dependencies = [ "cuprate-consensus", "cuprate-consensus-context", "cuprate-consensus-rules", - "cuprate-constants", "cuprate-cryptonight", "cuprate-dandelion-tower", "cuprate-database", @@ -1230,6 +1231,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", + "toml", "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", "tracing-subscriber", @@ -2904,6 +2906,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -3121,6 +3132,16 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "terminal_size" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + [[package]] name = "thiserror" version = "1.0.66" @@ -3262,11 +3283,26 @@ dependencies = [ "tracing", ] +[[package]] +name = "toml" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + [[package]] name = "toml_datetime" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -3275,6 +3311,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", + "serde", + "serde_spanned", "toml_datetime", "winnow", ] diff --git a/Cargo.toml b/Cargo.toml index 3cc3ab1..9be1528 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,7 @@ members = [ ] [profile.release] +panic = "abort" lto = true # Build with LTO strip = "none" # Keep panic stack traces codegen-units = 1 # Optimize for binary speed over compile times @@ -144,6 +145,7 @@ tokio-util = { version = "0.7", default-features = false } tokio-stream = { version = "0.1", default-features = false } tokio = { version = "1", default-features = false } tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # +toml = { version = "0.8", default-features = false } tracing-subscriber = { version = "0.3", default-features = false } tracing = { version = "0.1", default-features = false } diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index 9ebdd78..acf8827 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -2,7 +2,7 @@ name = "cuprated" version = "0.0.1" edition = "2021" -description = "The Cuprate Monero Rust node." +description = "The Cuprate Rust Monero node." license = "AGPL-3.0-only" authors = ["Boog900", "hinto-janai", "SyntheticBird45"] repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" @@ -12,29 +12,29 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" cuprate-consensus = { workspace = true } cuprate-fast-sync = { workspace = true } cuprate-consensus-context = { workspace = true } -cuprate-consensus-rules = { workspace = true } -cuprate-constants = { workspace = true } -cuprate-cryptonight = { workspace = true } -cuprate-helper = { workspace = true } -cuprate-epee-encoding = { workspace = true } -cuprate-fixed-bytes = { workspace = true } -cuprate-levin = { workspace = true } -cuprate-wire = { workspace = true } -cuprate-p2p = { workspace = true } -cuprate-p2p-core = { workspace = true } -cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } -cuprate-async-buffer = { workspace = true } -cuprate-address-book = { workspace = true } -cuprate-blockchain = { workspace = true } -cuprate-database-service = { workspace = true } -cuprate-txpool = { workspace = true } -cuprate-database = { workspace = true } -cuprate-pruning = { workspace = true } -cuprate-test-utils = { workspace = true } -cuprate-types = { workspace = true } -cuprate-json-rpc = { workspace = true } -cuprate-rpc-interface = { workspace = true } -cuprate-rpc-types = { workspace = true } +cuprate-consensus-rules = { workspace = true } +cuprate-cryptonight = { workspace = true } +cuprate-helper = { workspace = true, features = ["serde"] } +cuprate-epee-encoding = { workspace = true } +cuprate-fixed-bytes = { workspace = true } +cuprate-levin = { workspace = true } +cuprate-wire = { workspace = true } +cuprate-p2p = { workspace = true } +cuprate-p2p-core = { workspace = true } +cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } +cuprate-async-buffer = { workspace = true } +cuprate-address-book = { workspace = true } +cuprate-blockchain = { workspace = true } +cuprate-database-service = { workspace = true, features = ["serde"] } +cuprate-txpool = { workspace = true } +cuprate-database = { workspace = true, features = ["serde"] } +cuprate-pruning = { workspace = true } +cuprate-test-utils = { workspace = true } +cuprate-types = { workspace = true } +cuprate-json-rpc = { workspace = true } +cuprate-rpc-interface = { workspace = true } +cuprate-rpc-types = { workspace = true } + # TODO: after v1.0.0, remove unneeded dependencies. anyhow = { workspace = true } @@ -44,7 +44,7 @@ borsh = { workspace = true } bytemuck = { workspace = true } bytes = { workspace = true } cfg-if = { workspace = true } -clap = { workspace = true, features = ["cargo"] } +clap = { workspace = true, features = ["cargo", "help", "wrap_help"] } chrono = { workspace = true } crypto-bigint = { workspace = true } crossbeam = { workspace = true } @@ -71,15 +71,10 @@ thread_local = { workspace = true } tokio-util = { workspace = true } tokio-stream = { workspace = true } tokio = { workspace = true } +toml = { workspace = true, features = ["parse", "display"]} tower = { workspace = true } tracing-subscriber = { workspace = true, features = ["std", "fmt", "default"] } -tracing = { workspace = true } +tracing = { workspace = true, features = ["default"] } [lints] workspace = true - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" diff --git a/binaries/cuprated/Cuprated.toml b/binaries/cuprated/Cuprated.toml new file mode 100644 index 0000000..d248ce1 --- /dev/null +++ b/binaries/cuprated/Cuprated.toml @@ -0,0 +1,67 @@ +# ____ _ +# / ___| _ _ __ _ __ __ _| |_ ___ +# | | | | | | '_ \| '__/ _` | __/ _ \ +# | |__| |_| | |_) | | | (_| | || __/ +# \____\__,_| .__/|_| \__,_|\__\___| +# |_| +# + +## The network to run on, valid values: "Mainnet", "Testnet", "Stagenet". +network = "Mainnet" + +## Tracing config. +[tracing] +## The minimum level for log events to be displayed. +level = "info" + +## Clear-net config. +[p2p.clear_net] +## The number of outbound connections we should make and maintain. +outbound_connections = 64 +## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing. +extra_outbound_connections = 8 +## The maximum number of incoming we should allow. +max_inbound_connections = 128 +## The percent of outbound connections that should be to nodes we have not connected to before. +gray_peers_percent = 0.7 +## The port to accept connections on, if left `0` no connections will be accepted. +p2p_port = 0 +## The IP address to listen to connections on. +listen_on = "0.0.0.0" + +## The Clear-net addressbook config. +[p2p.clear_net.address_book_config] +## The size of the white peer list, which contains peers we have made a connection to before. +max_white_list_length = 1_000 +## The size of the gray peer list, which contains peers we have not made a connection to before. +max_gray_list_length = 5_000 +## The amount of time between address book saves. +peer_save_period = { secs = 90, nanos = 0 } + +## The block downloader config. +[p2p.block_downloader] +## The size of the buffer of sequential blocks waiting to be verified and added to the chain (bytes). +buffer_bytes = 50_000_000 +## The size of the queue of blocks which are waiting for a parent block to be downloaded (bytes). +in_progress_queue_bytes = 50_000_000 +## The target size of a batch of blocks (bytes), must not exceed 100MB. +target_batch_bytes= 5_000_000 +## The amount of time between checking the pool of connected peers for free peers to download blocks. +check_client_pool_interval = { secs = 30, nanos = 0 } + +## Storage config +[storage] +## The amount of reader threads to spawn. +reader_threads = "OnePerThread" + +## Txpool storage config. +[storage.txpool] +## The database sync mode for the txpool. +sync_mode = "Async" +## The maximum size of all the txs in the pool (bytes). +max_txpool_byte_size = 100_000_000 + +## Blockchain storage config. +[storage.blockchain] +## The database sync mode for the blockchain. +sync_mode = "Async" diff --git a/binaries/cuprated/src/config.rs b/binaries/cuprated/src/config.rs index d613c1f..c6267a6 100644 --- a/binaries/cuprated/src/config.rs +++ b/binaries/cuprated/src/config.rs @@ -1 +1,159 @@ //! cuprated config +use std::{ + fs::{read_to_string, File}, + io, + path::Path, + time::Duration, +}; + +use clap::Parser; +use serde::{Deserialize, Serialize}; + +use cuprate_consensus::ContextConfig; +use cuprate_helper::{ + fs::{CUPRATE_CONFIG_DIR, DEFAULT_CONFIG_FILE_NAME}, + network::Network, +}; +use cuprate_p2p::block_downloader::BlockDownloaderConfig; +use cuprate_p2p_core::{ClearNet, ClearNetServerCfg}; + +mod args; +mod fs; +mod p2p; +mod storage; +mod tracing_config; + +use crate::config::fs::FileSystemConfig; +use p2p::P2PConfig; +use storage::StorageConfig; +use tracing_config::TracingConfig; + +/// Reads the args & config file, returning a [`Config`]. +pub fn read_config_and_args() -> Config { + let args = args::Args::parse(); + args.do_quick_requests(); + + let config: Config = if let Some(config_file) = &args.config_file { + // If a config file was set in the args try to read it and exit if we can't. + match Config::read_from_path(config_file) { + Ok(config) => config, + Err(e) => { + eprintln!("Failed to read config from file: {e}"); + std::process::exit(1); + } + } + } else { + // First attempt to read the config file from the current directory. + std::env::current_dir() + .map(|path| path.join(DEFAULT_CONFIG_FILE_NAME)) + .map_err(Into::into) + .and_then(Config::read_from_path) + .inspect_err(|e| tracing::debug!("Failed to read config from current dir: {e}")) + // otherwise try the main config directory. + .or_else(|_| { + let file = CUPRATE_CONFIG_DIR.join(DEFAULT_CONFIG_FILE_NAME); + Config::read_from_path(file) + }) + .inspect_err(|e| { + tracing::debug!("Failed to read config from config dir: {e}"); + eprintln!("Failed to find/read config file, using default config."); + }) + .unwrap_or_default() + }; + + args.apply_args(config) +} + +/// The config for all of Cuprate. +#[derive(Default, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct Config { + /// The network we should run on. + network: Network, + + /// [`tracing`] config. + tracing: TracingConfig, + + /// The P2P network config. + p2p: P2PConfig, + + /// The storage config. + storage: StorageConfig, + + fs: FileSystemConfig, +} + +impl Config { + /// Attempts to read a config file in [`toml`] format from the given [`Path`]. + /// + /// # Errors + /// + /// Will return an [`Err`] if the file cannot be read or if the file is not a valid [`toml`] config. + fn read_from_path(file: impl AsRef) -> Result { + let file_text = read_to_string(file.as_ref())?; + + Ok(toml::from_str(&file_text) + .inspect(|_| eprintln!("Using config at: {}", file.as_ref().to_string_lossy())) + .inspect_err(|e| { + eprintln!("{e}"); + eprintln!( + "Failed to parse config file at: {}", + file.as_ref().to_string_lossy() + ); + })?) + } + + /// Returns the current [`Network`] we are running on. + pub const fn network(&self) -> Network { + self.network + } + + /// The [`ClearNet`], [`cuprate_p2p::P2PConfig`]. + pub fn clearnet_p2p_config(&self) -> cuprate_p2p::P2PConfig { + cuprate_p2p::P2PConfig { + network: self.network, + seeds: p2p::clear_net_seed_nodes(self.network), + outbound_connections: self.p2p.clear_net.general.outbound_connections, + extra_outbound_connections: self.p2p.clear_net.general.extra_outbound_connections, + max_inbound_connections: self.p2p.clear_net.general.max_inbound_connections, + gray_peers_percent: self.p2p.clear_net.general.gray_peers_percent, + server_config: Some(ClearNetServerCfg { + ip: self.p2p.clear_net.listen_on, + }), + p2p_port: self.p2p.clear_net.general.p2p_port, + // TODO: set this if a public RPC server is set. + rpc_port: 0, + address_book_config: self + .p2p + .clear_net + .general + .address_book_config(&self.fs.cache_directory, self.network), + } + } + + /// The [`ContextConfig`]. + pub const fn context_config(&self) -> ContextConfig { + match self.network { + Network::Mainnet => ContextConfig::main_net(), + Network::Stagenet => ContextConfig::stage_net(), + Network::Testnet => ContextConfig::test_net(), + } + } + + /// The [`cuprate_blockchain`] config. + pub fn blockchain_config(&self) -> cuprate_blockchain::config::Config { + let blockchain = &self.storage.blockchain; + + // We don't set reader threads as we manually make the reader threadpool. + cuprate_blockchain::config::ConfigBuilder::default() + .network(self.network) + .data_directory(self.fs.data_directory.clone()) + .sync_mode(blockchain.shared.sync_mode) + .build() + } + + /// The [`BlockDownloaderConfig`]. + pub fn block_downloader_config(&self) -> BlockDownloaderConfig { + self.p2p.block_downloader.clone().into() + } +} diff --git a/binaries/cuprated/src/config/args.rs b/binaries/cuprated/src/config/args.rs new file mode 100644 index 0000000..c4c2f9f --- /dev/null +++ b/binaries/cuprated/src/config/args.rs @@ -0,0 +1,55 @@ +use std::{io::Write, path::PathBuf, process::exit}; + +use clap::builder::TypedValueParser; + +use cuprate_helper::network::Network; + +use crate::{config::Config, constants::EXAMPLE_CONFIG}; + +/// Cuprate Args. +#[derive(clap::Parser, Debug)] +#[command(version, about)] +pub struct Args { + /// The network to run on. + #[arg( + long, + default_value_t = Network::Mainnet, + value_parser = clap::builder::PossibleValuesParser::new(["mainnet", "testnet", "stagenet"]) + .map(|s| s.parse::().unwrap()), + )] + pub network: Network, + /// The amount of outbound clear-net connections to maintain. + #[arg(long)] + pub outbound_connections: Option, + /// The PATH of the `cuprated` config file. + #[arg(long)] + pub config_file: Option, + /// Generate a config file and print it to stdout. + #[arg(long)] + pub generate_config: bool, +} + +impl Args { + /// Complete any quick requests asked for in [`Args`]. + /// + /// May cause the process to [`exit`]. + pub fn do_quick_requests(&self) { + if self.generate_config { + println!("{EXAMPLE_CONFIG}"); + exit(0); + } + } + + /// Apply the [`Args`] to the given [`Config`]. + /// + /// This may exit the program if a config value was set that requires an early exit. + pub const fn apply_args(&self, mut config: Config) -> Config { + config.network = self.network; + + if let Some(outbound_connections) = self.outbound_connections { + config.p2p.clear_net.general.outbound_connections = outbound_connections; + } + + config + } +} diff --git a/binaries/cuprated/src/config/fs.rs b/binaries/cuprated/src/config/fs.rs new file mode 100644 index 0000000..f8f6130 --- /dev/null +++ b/binaries/cuprated/src/config/fs.rs @@ -0,0 +1,21 @@ +use std::path::PathBuf; + +use serde::{Deserialize, Serialize}; + +use cuprate_helper::fs::{CUPRATE_CACHE_DIR, CUPRATE_DATA_DIR}; + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct FileSystemConfig { + pub data_directory: PathBuf, + pub cache_directory: PathBuf, +} + +impl Default for FileSystemConfig { + fn default() -> Self { + Self { + data_directory: CUPRATE_DATA_DIR.to_path_buf(), + cache_directory: CUPRATE_CACHE_DIR.to_path_buf(), + } + } +} diff --git a/binaries/cuprated/src/config/p2p.rs b/binaries/cuprated/src/config/p2p.rs new file mode 100644 index 0000000..51f8d0d --- /dev/null +++ b/binaries/cuprated/src/config/p2p.rs @@ -0,0 +1,178 @@ +use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + path::Path, + time::Duration, +}; + +use serde::{Deserialize, Serialize}; + +use cuprate_helper::{fs::address_book_path, network::Network}; + +/// P2P config. +#[derive(Default, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct P2PConfig { + /// Clear-net config. + pub clear_net: ClearNetConfig, + /// Block downloader config. + pub block_downloader: BlockDownloaderConfig, +} + +#[derive(Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct BlockDownloaderConfig { + /// The size in bytes of the buffer between the block downloader and the place which + /// is consuming the downloaded blocks. + pub buffer_bytes: usize, + /// The size of the in progress queue (in bytes) at which we stop requesting more blocks. + pub in_progress_queue_bytes: usize, + /// The [`Duration`] between checking the client pool for free peers. + pub check_client_pool_interval: Duration, + /// The target size of a single batch of blocks (in bytes). + pub target_batch_bytes: usize, +} + +impl From for cuprate_p2p::block_downloader::BlockDownloaderConfig { + fn from(value: BlockDownloaderConfig) -> Self { + Self { + buffer_bytes: value.buffer_bytes, + in_progress_queue_bytes: value.in_progress_queue_bytes, + check_client_pool_interval: value.check_client_pool_interval, + target_batch_bytes: value.target_batch_bytes, + initial_batch_len: 1, + } + } +} + +impl Default for BlockDownloaderConfig { + fn default() -> Self { + Self { + buffer_bytes: 50_000_000, + in_progress_queue_bytes: 50_000_000, + check_client_pool_interval: Duration::from_secs(30), + target_batch_bytes: 5_000_000, + } + } +} + +/// The config values for P2P clear-net. +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct ClearNetConfig { + /// The server config. + pub listen_on: IpAddr, + #[serde(flatten)] + pub general: SharedNetConfig, +} + +impl Default for ClearNetConfig { + fn default() -> Self { + Self { + listen_on: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + general: Default::default(), + } + } +} + +/// Network config values shared between all network zones. +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct SharedNetConfig { + /// The number of outbound connections to make and try keep. + pub outbound_connections: usize, + /// The amount of extra connections we can make if we are under load from the rest of Cuprate. + pub extra_outbound_connections: usize, + /// The maximum amount of inbound connections + pub max_inbound_connections: usize, + /// The percent of connections that should be to peers we haven't connected to before. + pub gray_peers_percent: f64, + /// port to use to accept p2p connections. + pub p2p_port: u16, + /// The address book config. + address_book_config: AddressBookConfig, +} + +impl SharedNetConfig { + /// Returns the [`AddressBookConfig`]. + pub fn address_book_config( + &self, + cache_dir: &Path, + network: Network, + ) -> cuprate_address_book::AddressBookConfig { + cuprate_address_book::AddressBookConfig { + max_white_list_length: self.address_book_config.max_white_list_length, + max_gray_list_length: self.address_book_config.max_gray_list_length, + peer_store_directory: address_book_path(cache_dir, network), + peer_save_period: self.address_book_config.peer_save_period, + } + } +} + +impl Default for SharedNetConfig { + fn default() -> Self { + Self { + outbound_connections: 64, + extra_outbound_connections: 8, + max_inbound_connections: 128, + gray_peers_percent: 0.7, + p2p_port: 0, + address_book_config: AddressBookConfig::default(), + } + } +} + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct AddressBookConfig { + max_white_list_length: usize, + max_gray_list_length: usize, + peer_save_period: Duration, +} + +impl Default for AddressBookConfig { + fn default() -> Self { + Self { + max_white_list_length: 1_000, + max_gray_list_length: 5_000, + peer_save_period: Duration::from_secs(30), + } + } +} + +/// Seed nodes for [`ClearNet`](cuprate_p2p_core::ClearNet). +pub fn clear_net_seed_nodes(network: Network) -> Vec { + let seeds = match network { + Network::Mainnet => [ + "176.9.0.187:18080", + "88.198.163.90:18080", + "66.85.74.134:18080", + "51.79.173.165:18080", + "192.99.8.110:18080", + "37.187.74.171:18080", + "77.172.183.193:18080", + ] + .as_slice(), + Network::Stagenet => [ + "176.9.0.187:38080", + "51.79.173.165:38080", + "192.99.8.110:38080", + "37.187.74.171:38080", + "77.172.183.193:38080", + ] + .as_slice(), + Network::Testnet => [ + "176.9.0.187:28080", + "51.79.173.165:28080", + "192.99.8.110:28080", + "37.187.74.171:28080", + "77.172.183.193:28080", + ] + .as_slice(), + }; + + seeds + .iter() + .map(|s| s.parse()) + .collect::>() + .unwrap() +} diff --git a/binaries/cuprated/src/config/storage.rs b/binaries/cuprated/src/config/storage.rs new file mode 100644 index 0000000..b3e3c1f --- /dev/null +++ b/binaries/cuprated/src/config/storage.rs @@ -0,0 +1,67 @@ +use std::path::PathBuf; + +use serde::{Deserialize, Serialize}; + +use cuprate_database::config::SyncMode; +use cuprate_database_service::ReaderThreads; +use cuprate_helper::fs::CUPRATE_DATA_DIR; + +/// The storage config. +#[derive(Default, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct StorageConfig { + /// The amount of reader threads to spawn between the tx-pool and blockchain. + pub reader_threads: ReaderThreads, + /// The tx-pool config. + pub txpool: TxpoolConfig, + /// The blockchain config. + pub blockchain: BlockchainConfig, +} + +/// The blockchain config. +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct BlockchainConfig { + #[serde(flatten)] + pub shared: SharedStorageConfig, +} + +impl Default for BlockchainConfig { + fn default() -> Self { + Self { + shared: SharedStorageConfig { + sync_mode: SyncMode::Async, + }, + } + } +} + +/// The tx-pool config. +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct TxpoolConfig { + #[serde(flatten)] + pub shared: SharedStorageConfig, + + /// The maximum size of the tx-pool. + pub max_txpool_byte_size: usize, +} + +impl Default for TxpoolConfig { + fn default() -> Self { + Self { + shared: SharedStorageConfig { + sync_mode: SyncMode::Async, + }, + max_txpool_byte_size: 100_000_000, + } + } +} + +/// Config values shared between the tx-pool and blockchain. +#[derive(Default, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct SharedStorageConfig { + /// The [`SyncMode`] of the database. + pub sync_mode: SyncMode, +} diff --git a/binaries/cuprated/src/config/tracing_config.rs b/binaries/cuprated/src/config/tracing_config.rs new file mode 100644 index 0000000..859d516 --- /dev/null +++ b/binaries/cuprated/src/config/tracing_config.rs @@ -0,0 +1,42 @@ +use serde::{Deserialize, Serialize}; +use tracing::level_filters::LevelFilter; + +/// [`tracing`] config. +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct TracingConfig { + /// The default minimum log level. + #[serde(with = "level_filter_serde")] + level: LevelFilter, +} + +impl Default for TracingConfig { + fn default() -> Self { + Self { + level: LevelFilter::INFO, + } + } +} + +mod level_filter_serde { + use std::str::FromStr; + + use serde::{Deserialize, Deserializer, Serializer}; + use tracing::level_filters::LevelFilter; + + #[expect(clippy::trivially_copy_pass_by_ref, reason = "serde")] + pub fn serialize(level_filter: &LevelFilter, s: S) -> Result + where + S: Serializer, + { + s.serialize_str(&level_filter.to_string()) + } + + pub fn deserialize<'de, D>(d: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(d)?; + LevelFilter::from_str(&s).map_err(serde::de::Error::custom) + } +} diff --git a/binaries/cuprated/src/constants.rs b/binaries/cuprated/src/constants.rs index 2f3c7bb..057e8bd 100644 --- a/binaries/cuprated/src/constants.rs +++ b/binaries/cuprated/src/constants.rs @@ -18,9 +18,12 @@ pub const VERSION_BUILD: &str = if cfg!(debug_assertions) { pub const PANIC_CRITICAL_SERVICE_ERROR: &str = "A service critical to Cuprate's function returned an unexpected error."; +pub const EXAMPLE_CONFIG: &str = include_str!("../Cuprated.toml"); + #[cfg(test)] mod test { use super::*; + use crate::config::Config; #[test] fn version() { @@ -35,4 +38,9 @@ mod test { assert_eq!(VERSION_BUILD, "0.0.1-release"); } } + + #[test] + fn generate_config_text_is_valid() { + let config: Config = toml::from_str(EXAMPLE_CONFIG).unwrap(); + } } diff --git a/binaries/cuprated/src/main.rs b/binaries/cuprated/src/main.rs index d5c832e..617c5b6 100644 --- a/binaries/cuprated/src/main.rs +++ b/binaries/cuprated/src/main.rs @@ -29,6 +29,8 @@ fn main() { // Initialize global static `LazyLock` data. statics::init_lazylock_statics(); + let _config = config::read_config_and_args(); + // TODO: everything else. todo!() } diff --git a/helper/Cargo.toml b/helper/Cargo.toml index 1b3158f..716beb3 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -35,6 +35,8 @@ futures = { workspace = true, optional = true, features = ["std"] } monero-serai = { workspace = true, optional = true } rayon = { workspace = true, optional = true } +serde = { workspace = true, optional = true, features = ["derive"] } + # This is kinda a stupid work around. # [thread] needs to activate one of these libs (windows|libc) # although it depends on what target we're building for. diff --git a/helper/src/fs.rs b/helper/src/fs.rs index 5d62a64..f694f62 100644 --- a/helper/src/fs.rs +++ b/helper/src/fs.rs @@ -28,7 +28,12 @@ //! - //---------------------------------------------------------------------------------------------------- Use -use std::{path::PathBuf, sync::LazyLock}; +use std::{ + path::{Path, PathBuf}, + sync::LazyLock, +}; + +use crate::network::Network; //---------------------------------------------------------------------------------------------------- Const /// Cuprate's main directory. @@ -58,6 +63,9 @@ pub const CUPRATE_DIR: &str = { } }; +/// The default name of Cuprate's config file. +pub const DEFAULT_CONFIG_FILE_NAME: &str = "Cuprated.toml"; + //---------------------------------------------------------------------------------------------------- Directories /// Create a `LazyLock` for common PATHs used by Cuprate. /// @@ -150,32 +158,61 @@ impl_path_lazylock! { CUPRATE_DATA_DIR, data_dir, "", +} - /// Cuprate's blockchain directory. - /// - /// This is the PATH used for any Cuprate blockchain files. - /// - /// | OS | PATH | - /// |---------|----------------------------------------------------------------| - /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` | - /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` | - /// | Linux | `/home/alice/.local/share/cuprate/blockchain/` | - CUPRATE_BLOCKCHAIN_DIR, - data_dir, - "blockchain", +/// Joins the [`Network`] to the [`Path`]. +/// +/// This will keep the path the same for [`Network::Mainnet`]. +fn path_with_network(path: &Path, network: Network) -> PathBuf { + match network { + Network::Mainnet => path.to_path_buf(), + network => path.join(network.to_string()), + } +} - /// Cuprate's transaction pool directory. - /// - /// This is the PATH used for any Cuprate txpool files. - /// - /// | OS | PATH | - /// |---------|------------------------------------------------------------| - /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` | - /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` | - /// | Linux | `/home/alice/.local/share/cuprate/txpool/` | - CUPRATE_TXPOOL_DIR, - data_dir, - "txpool", +/// Cuprate's blockchain directory. +/// +/// This is the PATH used for any Cuprate blockchain files. +/// +/// ```rust +/// use cuprate_helper::{network::Network, fs::{CUPRATE_DATA_DIR, blockchain_path}}; +/// +/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Mainnet).as_path(), CUPRATE_DATA_DIR.join("blockchain")); +/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Stagenet).as_path(), CUPRATE_DATA_DIR.join(Network::Stagenet.to_string()).join("blockchain")); +/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Testnet).as_path(), CUPRATE_DATA_DIR.join(Network::Testnet.to_string()).join("blockchain")); +/// ``` +pub fn blockchain_path(data_dir: &Path, network: Network) -> PathBuf { + path_with_network(data_dir, network).join("blockchain") +} + +/// Cuprate's txpool directory. +/// +/// This is the PATH used for any Cuprate txpool files. +/// +/// ```rust +/// use cuprate_helper::{network::Network, fs::{CUPRATE_DATA_DIR, txpool_path}}; +/// +/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Mainnet).as_path(), CUPRATE_DATA_DIR.join("txpool")); +/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Stagenet).as_path(), CUPRATE_DATA_DIR.join(Network::Stagenet.to_string()).join("txpool")); +/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Testnet).as_path(), CUPRATE_DATA_DIR.join(Network::Testnet.to_string()).join("txpool")); +/// ``` +pub fn txpool_path(data_dir: &Path, network: Network) -> PathBuf { + path_with_network(data_dir, network).join("txpool") +} + +/// Cuprate's address-book directory. +/// +/// This is the PATH used for any Cuprate address-book files. +/// +/// ```rust +/// use cuprate_helper::{network::Network, fs::{CUPRATE_CACHE_DIR, address_book_path}}; +/// +/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Mainnet).as_path(), CUPRATE_CACHE_DIR.join("addressbook")); +/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Stagenet).as_path(), CUPRATE_CACHE_DIR.join(Network::Stagenet.to_string()).join("addressbook")); +/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Testnet).as_path(), CUPRATE_CACHE_DIR.join(Network::Testnet.to_string()).join("addressbook")); +/// ``` +pub fn address_book_path(cache_dir: &Path, network: Network) -> PathBuf { + path_with_network(cache_dir, network).join("addressbook") } //---------------------------------------------------------------------------------------------------- Tests @@ -197,29 +234,21 @@ mod test { (&*CUPRATE_CACHE_DIR, ""), (&*CUPRATE_CONFIG_DIR, ""), (&*CUPRATE_DATA_DIR, ""), - (&*CUPRATE_BLOCKCHAIN_DIR, ""), - (&*CUPRATE_TXPOOL_DIR, ""), ]; if cfg!(target_os = "windows") { array[0].1 = r"AppData\Local\Cuprate"; array[1].1 = r"AppData\Roaming\Cuprate"; array[2].1 = r"AppData\Roaming\Cuprate"; - array[3].1 = r"AppData\Roaming\Cuprate\blockchain"; - array[4].1 = r"AppData\Roaming\Cuprate\txpool"; } else if cfg!(target_os = "macos") { array[0].1 = "Library/Caches/Cuprate"; array[1].1 = "Library/Application Support/Cuprate"; array[2].1 = "Library/Application Support/Cuprate"; - array[3].1 = "Library/Application Support/Cuprate/blockchain"; - array[4].1 = "Library/Application Support/Cuprate/txpool"; } else { // Assumes Linux. array[0].1 = ".cache/cuprate"; array[1].1 = ".config/cuprate"; array[2].1 = ".local/share/cuprate"; - array[3].1 = ".local/share/cuprate/blockchain"; - array[4].1 = ".local/share/cuprate/txpool"; }; for (path, expected) in array { diff --git a/helper/src/network.rs b/helper/src/network.rs index f3224b3..d296117 100644 --- a/helper/src/network.rs +++ b/helper/src/network.rs @@ -5,6 +5,12 @@ //! into it's own crate. //! //! `#[no_std]` compatible. +// TODO: move to types crate. + +use core::{ + fmt::{Display, Formatter}, + str::FromStr, +}; const MAINNET_NETWORK_ID: [u8; 16] = [ 0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x10, @@ -17,7 +23,8 @@ const STAGENET_NETWORK_ID: [u8; 16] = [ ]; /// An enum representing every Monero network. -#[derive(Debug, Clone, Copy, Default)] +#[derive(Debug, Clone, Copy, Default, Ord, PartialOrd, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] pub enum Network { /// Mainnet #[default] @@ -38,3 +45,28 @@ impl Network { } } } + +#[derive(Debug, PartialEq, Eq)] +pub struct ParseNetworkError; + +impl FromStr for Network { + type Err = ParseNetworkError; + + fn from_str(s: &str) -> Result { + match s { + "mainnet" | "Mainnet" => Ok(Self::Mainnet), + "testnet" | "Testnet" => Ok(Self::Testnet), + "stagenet" | "Stagenet" => Ok(Self::Stagenet), + _ => Err(ParseNetworkError), + } + } +} +impl Display for Network { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + f.write_str(match self { + Self::Mainnet => "mainnet", + Self::Testnet => "testnet", + Self::Stagenet => "stagenet", + }) + } +} diff --git a/p2p/address-book/Cargo.toml b/p2p/address-book/Cargo.toml index a88819f..d57cfde 100644 --- a/p2p/address-book/Cargo.toml +++ b/p2p/address-book/Cargo.toml @@ -23,7 +23,7 @@ indexmap = { workspace = true, features = ["std"] } rand = { workspace = true, features = ["std", "std_rng"] } -borsh = { workspace = true, features = ["derive", "std"]} +borsh = { workspace = true, features = ["derive", "std"] } [dev-dependencies] cuprate-test-utils = { workspace = true } diff --git a/p2p/address-book/src/book/tests.rs b/p2p/address-book/src/book/tests.rs index 216fcfa..b2c4c49 100644 --- a/p2p/address-book/src/book/tests.rs +++ b/p2p/address-book/src/book/tests.rs @@ -15,7 +15,7 @@ fn test_cfg() -> AddressBookConfig { AddressBookConfig { max_white_list_length: 100, max_gray_list_length: 500, - peer_store_file: PathBuf::new(), + peer_store_directory: PathBuf::new(), peer_save_period: Duration::from_secs(60), } } diff --git a/p2p/address-book/src/lib.rs b/p2p/address-book/src/lib.rs index c090348..054be46 100644 --- a/p2p/address-book/src/lib.rs +++ b/p2p/address-book/src/lib.rs @@ -29,8 +29,8 @@ pub struct AddressBookConfig { /// /// Gray peers are peers we are yet to make a connection to. pub max_gray_list_length: usize, - /// The location to store the address book. - pub peer_store_file: PathBuf, + /// The location to store the peer store files. + pub peer_store_directory: PathBuf, /// The amount of time between saving the address book to disk. pub peer_save_period: Duration, } @@ -63,11 +63,6 @@ pub enum AddressBookError { pub async fn init_address_book( cfg: AddressBookConfig, ) -> Result, std::io::Error> { - tracing::info!( - "Loading peers from file: {} ", - cfg.peer_store_file.display() - ); - let (white_list, gray_list) = match store::read_peers_from_disk::(&cfg).await { Ok(res) => res, Err(e) if e.kind() == ErrorKind::NotFound => (vec![], vec![]), diff --git a/p2p/address-book/src/store.rs b/p2p/address-book/src/store.rs index 9abf0c3..47994ae 100644 --- a/p2p/address-book/src/store.rs +++ b/p2p/address-book/src/store.rs @@ -39,7 +39,9 @@ pub(crate) fn save_peers_to_disk( }) .unwrap(); - let file = cfg.peer_store_file.clone(); + let file = cfg + .peer_store_directory + .join(format!("{}_p2p_state", Z::NAME)); spawn_blocking(move || fs::write(&file, &data)) } @@ -52,7 +54,12 @@ pub(crate) async fn read_peers_from_disk( ), std::io::Error, > { - let file = cfg.peer_store_file.clone(); + let file = cfg + .peer_store_directory + .join(format!("{}_p2p_state", Z::NAME)); + + tracing::info!("Loading peers from file: {} ", file.display()); + let data = spawn_blocking(move || fs::read(file)).await.unwrap()?; let de_ser: DeserPeerDataV1 = from_slice(&data)?; diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index faac4d5..db10300 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -62,15 +62,15 @@ pub struct BlockBatch { pub struct BlockDownloaderConfig { /// The size in bytes of the buffer between the block downloader and the place which /// is consuming the downloaded blocks. - pub buffer_size: usize, + pub buffer_bytes: usize, /// The size of the in progress queue (in bytes) at which we stop requesting more blocks. - pub in_progress_queue_size: usize, + pub in_progress_queue_bytes: usize, /// The [`Duration`] between checking the client pool for free peers. pub check_client_pool_interval: Duration, /// The target size of a single batch of blocks (in bytes). - pub target_batch_size: usize, + pub target_batch_bytes: usize, /// The initial amount of blocks to request (in number of blocks) - pub initial_batch_size: usize, + pub initial_batch_len: usize, } /// An error that occurred in the [`BlockDownloader`]. @@ -145,7 +145,7 @@ where + 'static, C::Future: Send + 'static, { - let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size); + let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_bytes); let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config); @@ -242,7 +242,7 @@ where Self { peer_set, our_chain_svc, - amount_of_blocks_to_request: config.initial_batch_size, + amount_of_blocks_to_request: config.initial_batch_len, amount_of_blocks_to_request_updated_at: 0, amount_of_empty_chain_entries: 0, block_download_tasks: JoinSet::new(), @@ -381,7 +381,7 @@ where } // If our ready queue is too large send duplicate requests for the blocks we are waiting on. - if self.block_queue.size() >= self.config.in_progress_queue_size { + if self.block_queue.size() >= self.config.in_progress_queue_bytes { return self.request_inflight_batch_again(client); } @@ -565,7 +565,7 @@ where self.amount_of_blocks_to_request = calculate_next_block_batch_size( block_batch.size, block_batch.blocks.len(), - self.config.target_batch_size, + self.config.target_batch_bytes, ); tracing::debug!( diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index 6799482..707b858 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -66,11 +66,11 @@ proptest! { genesis: *blockchain.blocks.first().unwrap().0 }, BlockDownloaderConfig { - buffer_size: 1_000, - in_progress_queue_size: 10_000, + buffer_bytes: 1_000, + in_progress_queue_bytes: 10_000, check_client_pool_interval: Duration::from_secs(5), - target_batch_size: 5_000, - initial_batch_size: 1, + target_batch_bytes: 5_000, + initial_batch_len: 1, }); let blocks = stream.map(|blocks| blocks.blocks).concat().await; diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 6fd973c..92b4374 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -15,7 +15,7 @@ default = ["heed"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] +serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde", "cuprate-helper/serde"] [dependencies] cuprate-database = { workspace = true } diff --git a/storage/blockchain/README.md b/storage/blockchain/README.md index 3f97a3d..8ab8b43 100644 --- a/storage/blockchain/README.md +++ b/storage/blockchain/README.md @@ -76,7 +76,7 @@ use cuprate_blockchain::{ let tmp_dir = tempfile::tempdir()?; let db_dir = tmp_dir.path().to_owned(); let config = ConfigBuilder::new() - .db_directory(db_dir.into()) + .data_directory(db_dir.into()) .build(); // Initialize the database environment. diff --git a/storage/blockchain/src/config.rs b/storage/blockchain/src/config.rs index e4b7606..4bef2cb 100644 --- a/storage/blockchain/src/config.rs +++ b/storage/blockchain/src/config.rs @@ -25,7 +25,7 @@ //! //! let config = ConfigBuilder::new() //! // Use a custom database directory. -//! .db_directory(db_dir.into()) +//! .data_directory(db_dir.into()) //! // Use as many reader threads as possible (when using `service`). //! .reader_threads(ReaderThreads::OnePerThread) //! // Use the fastest sync mode. @@ -41,13 +41,16 @@ //! ``` //---------------------------------------------------------------------------------------------------- Import -use std::{borrow::Cow, path::Path}; +use std::{borrow::Cow, path::PathBuf}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use cuprate_database::{config::SyncMode, resize::ResizeAlgorithm}; -use cuprate_helper::fs::CUPRATE_BLOCKCHAIN_DIR; +use cuprate_helper::{ + fs::{blockchain_path, CUPRATE_DATA_DIR}, + network::Network, +}; // re-exports pub use cuprate_database_service::ReaderThreads; @@ -59,8 +62,9 @@ pub use cuprate_database_service::ReaderThreads; #[derive(Debug, Clone, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ConfigBuilder { - /// [`Config::db_directory`]. - db_directory: Option>, + network: Network, + + data_dir: Option, /// [`Config::cuprate_database_config`]. db_config: cuprate_database::config::ConfigBuilder, @@ -76,10 +80,12 @@ impl ConfigBuilder { /// after this function to use default values. pub fn new() -> Self { Self { - db_directory: None, - db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( - &*CUPRATE_BLOCKCHAIN_DIR, - )), + network: Network::default(), + data_dir: None, + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(blockchain_path( + &CUPRATE_DATA_DIR, + Network::Mainnet, + ))), reader_threads: None, } } @@ -87,21 +93,21 @@ impl ConfigBuilder { /// Build into a [`Config`]. /// /// # Default values - /// If [`ConfigBuilder::db_directory`] was not called, - /// the default [`CUPRATE_BLOCKCHAIN_DIR`] will be used. + /// If [`ConfigBuilder::data_directory`] was not called, + /// [`blockchain_path`] with [`CUPRATE_DATA_DIR`] [`Network::Mainnet`] will be used. /// /// For all other values, [`Default::default`] is used. pub fn build(self) -> Config { // INVARIANT: all PATH safety checks are done // in `helper::fs`. No need to do them here. - let db_directory = self - .db_directory - .unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)); + let data_dir = self + .data_dir + .unwrap_or_else(|| CUPRATE_DATA_DIR.to_path_buf()); let reader_threads = self.reader_threads.unwrap_or_default(); let db_config = self .db_config - .db_directory(db_directory) + .db_directory(Cow::Owned(blockchain_path(&data_dir, self.network))) .reader_threads(reader_threads.as_threads()) .build(); @@ -111,10 +117,17 @@ impl ConfigBuilder { } } - /// Set a custom database directory (and file) [`Path`]. + /// Change the network this blockchain database is for. #[must_use] - pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self { - self.db_directory = Some(db_directory); + pub const fn network(mut self, network: Network) -> Self { + self.network = network; + self + } + + /// Set a custom database directory (and file) [`PathBuf`]. + #[must_use] + pub fn data_directory(mut self, db_directory: PathBuf) -> Self { + self.data_dir = Some(db_directory); self } @@ -145,9 +158,7 @@ impl ConfigBuilder { /// Good default for testing, and resource-available machines. #[must_use] pub fn fast(mut self) -> Self { - self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)) - .fast(); + self.db_config = self.db_config.fast(); self.reader_threads = Some(ReaderThreads::OnePerThread); self @@ -159,9 +170,7 @@ impl ConfigBuilder { /// Good default for resource-limited machines, e.g. a cheap VPS. #[must_use] pub fn low_power(mut self) -> Self { - self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)) - .low_power(); + self.db_config = self.db_config.low_power(); self.reader_threads = Some(ReaderThreads::One); self @@ -170,10 +179,13 @@ impl ConfigBuilder { impl Default for ConfigBuilder { fn default() -> Self { - let db_directory = Cow::Borrowed(&**CUPRATE_BLOCKCHAIN_DIR); Self { - db_directory: Some(db_directory.clone()), - db_config: cuprate_database::config::ConfigBuilder::new(db_directory), + network: Network::default(), + data_dir: Some(CUPRATE_DATA_DIR.to_path_buf()), + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(blockchain_path( + &CUPRATE_DATA_DIR, + Network::default(), + ))), reader_threads: Some(ReaderThreads::default()), } } @@ -201,7 +213,7 @@ impl Config { /// Create a new [`Config`] with sane default settings. /// /// The [`cuprate_database::config::Config::db_directory`] - /// will be set to [`CUPRATE_BLOCKCHAIN_DIR`]. + /// will be set to [`blockchain_path`] with [`CUPRATE_DATA_DIR`] [`Network::Mainnet`]. /// /// All other values will be [`Default::default`]. /// @@ -213,14 +225,14 @@ impl Config { /// resize::ResizeAlgorithm, /// DATABASE_DATA_FILENAME, /// }; - /// use cuprate_helper::fs::*; + /// use cuprate_helper::{fs::*, network::Network}; /// /// use cuprate_blockchain::config::*; /// /// let config = Config::new(); /// - /// assert_eq!(config.db_config.db_directory(), &*CUPRATE_BLOCKCHAIN_DIR); - /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_BLOCKCHAIN_DIR)); + /// assert_eq!(config.db_config.db_directory().as_ref(), blockchain_path(&CUPRATE_DATA_DIR, Network::Mainnet).as_path()); + /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_DATA_DIR)); /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); /// assert_eq!(config.db_config.sync_mode, SyncMode::default()); /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); diff --git a/storage/blockchain/src/ops/mod.rs b/storage/blockchain/src/ops/mod.rs index 285aa24..96b25af 100644 --- a/storage/blockchain/src/ops/mod.rs +++ b/storage/blockchain/src/ops/mod.rs @@ -71,7 +71,7 @@ //! let tmp_dir = tempfile::tempdir()?; //! let db_dir = tmp_dir.path().to_owned(); //! let config = ConfigBuilder::new() -//! .db_directory(db_dir.into()) +//! .data_directory(db_dir.into()) //! .build(); //! //! // Initialize the database environment. diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index c5eb80c..d6a811b 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -77,7 +77,7 @@ //! let tmp_dir = tempfile::tempdir()?; //! let db_dir = tmp_dir.path().to_owned(); //! let config = ConfigBuilder::new() -//! .db_directory(db_dir.into()) +//! .data_directory(db_dir.into()) //! .build(); //! //! // Initialize the database thread-pool. diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index 719f361..38db665 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -7,7 +7,6 @@ //---------------------------------------------------------------------------------------------------- Use use std::{ - borrow::Cow, collections::{HashMap, HashSet}, sync::Arc, }; @@ -46,7 +45,7 @@ fn init_service() -> ( ) { let tempdir = tempfile::tempdir().unwrap(); let config = ConfigBuilder::new() - .db_directory(Cow::Owned(tempdir.path().into())) + .data_directory(tempdir.path().into()) .low_power() .build(); let (reader, writer, env) = init(config).unwrap(); diff --git a/storage/blockchain/src/tests.rs b/storage/blockchain/src/tests.rs index 1fe2063..4192f81 100644 --- a/storage/blockchain/src/tests.rs +++ b/storage/blockchain/src/tests.rs @@ -5,7 +5,7 @@ //! - only used internally //---------------------------------------------------------------------------------------------------- Import -use std::{borrow::Cow, fmt::Debug}; +use std::fmt::Debug; use pretty_assertions::assert_eq; @@ -74,7 +74,7 @@ impl AssertTableLen { pub(crate) fn tmp_concrete_env() -> (impl Env, tempfile::TempDir) { let tempdir = tempfile::tempdir().unwrap(); let config = ConfigBuilder::new() - .db_directory(Cow::Owned(tempdir.path().into())) + .data_directory(tempdir.path().into()) .low_power() .build(); let env = crate::open(config).unwrap(); diff --git a/storage/txpool/Cargo.toml b/storage/txpool/Cargo.toml index c908265..0fb43b2 100644 --- a/storage/txpool/Cargo.toml +++ b/storage/txpool/Cargo.toml @@ -15,7 +15,7 @@ default = ["heed"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] +serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde", "cuprate-helper/serde"] [dependencies] cuprate-database = { workspace = true, features = ["heed"] } diff --git a/storage/txpool/README.md b/storage/txpool/README.md index d14f445..ca4f737 100644 --- a/storage/txpool/README.md +++ b/storage/txpool/README.md @@ -78,7 +78,7 @@ use cuprate_txpool::{ let tmp_dir = tempfile::tempdir()?; let db_dir = tmp_dir.path().to_owned(); let config = ConfigBuilder::new() - .db_directory(db_dir.into()) + .data_directory(db_dir.into()) .build(); // Initialize the database environment. diff --git a/storage/txpool/src/config.rs b/storage/txpool/src/config.rs index 1ef0d73..724ae21 100644 --- a/storage/txpool/src/config.rs +++ b/storage/txpool/src/config.rs @@ -1,15 +1,18 @@ //! The transaction pool [`Config`]. -use std::{borrow::Cow, path::Path}; +use std::{borrow::Cow, path::PathBuf}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; use cuprate_database::{ config::{Config as DbConfig, SyncMode}, resize::ResizeAlgorithm, }; use cuprate_database_service::ReaderThreads; -use cuprate_helper::fs::CUPRATE_TXPOOL_DIR; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; +use cuprate_helper::{ + fs::{txpool_path, CUPRATE_DATA_DIR}, + network::Network, +}; /// The default transaction pool weight limit. const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024; @@ -21,8 +24,9 @@ const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024; #[derive(Debug, Clone, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ConfigBuilder { - /// [`Config::db_directory`]. - db_directory: Option>, + network: Network, + + data_dir: Option, /// [`Config::cuprate_database_config`]. db_config: cuprate_database::config::ConfigBuilder, @@ -41,10 +45,12 @@ impl ConfigBuilder { /// after this function to use default values. pub fn new() -> Self { Self { - db_directory: None, - db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( - &*CUPRATE_TXPOOL_DIR, - )), + network: Network::default(), + data_dir: None, + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(txpool_path( + &CUPRATE_DATA_DIR, + Network::Mainnet, + ))), reader_threads: None, max_txpool_weight: None, } @@ -53,16 +59,16 @@ impl ConfigBuilder { /// Build into a [`Config`]. /// /// # Default values - /// If [`ConfigBuilder::db_directory`] was not called, - /// the default [`CUPRATE_TXPOOL_DIR`] will be used. + /// If [`ConfigBuilder::data_directory`] was not called, + /// [`txpool_path`] with [`CUPRATE_DATA_DIR`] and [`Network::Mainnet`] will be used. /// /// For all other values, [`Default::default`] is used. pub fn build(self) -> Config { // INVARIANT: all PATH safety checks are done // in `helper::fs`. No need to do them here. - let db_directory = self - .db_directory - .unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)); + let data_dir = self + .data_dir + .unwrap_or_else(|| CUPRATE_DATA_DIR.to_path_buf()); let reader_threads = self.reader_threads.unwrap_or_default(); @@ -72,7 +78,7 @@ impl ConfigBuilder { let db_config = self .db_config - .db_directory(db_directory) + .db_directory(Cow::Owned(txpool_path(&data_dir, self.network))) .reader_threads(reader_threads.as_threads()) .build(); @@ -83,6 +89,13 @@ impl ConfigBuilder { } } + /// Change the network this database is for. + #[must_use] + pub const fn network(mut self, network: Network) -> Self { + self.network = network; + self + } + /// Sets a new maximum weight for the transaction pool. #[must_use] pub const fn max_txpool_weight(mut self, max_txpool_weight: usize) -> Self { @@ -90,10 +103,10 @@ impl ConfigBuilder { self } - /// Set a custom database directory (and file) [`Path`]. + /// Set a custom data directory [`PathBuf`]. #[must_use] - pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self { - self.db_directory = Some(db_directory); + pub fn data_directory(mut self, db_directory: PathBuf) -> Self { + self.data_dir = Some(db_directory); self } @@ -124,9 +137,7 @@ impl ConfigBuilder { /// Good default for testing, and resource-available machines. #[must_use] pub fn fast(mut self) -> Self { - self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)) - .fast(); + self.db_config = self.db_config.fast(); self.reader_threads = Some(ReaderThreads::OnePerThread); self @@ -138,9 +149,7 @@ impl ConfigBuilder { /// Good default for resource-limited machines, e.g. a cheap VPS. #[must_use] pub fn low_power(mut self) -> Self { - self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)) - .low_power(); + self.db_config = self.db_config.low_power(); self.reader_threads = Some(ReaderThreads::One); self @@ -149,10 +158,13 @@ impl ConfigBuilder { impl Default for ConfigBuilder { fn default() -> Self { - let db_directory = Cow::Borrowed(CUPRATE_TXPOOL_DIR.as_path()); Self { - db_directory: Some(db_directory.clone()), - db_config: cuprate_database::config::ConfigBuilder::new(db_directory), + network: Network::default(), + data_dir: Some(CUPRATE_DATA_DIR.to_path_buf()), + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(txpool_path( + &CUPRATE_DATA_DIR, + Network::Mainnet, + ))), reader_threads: Some(ReaderThreads::default()), max_txpool_weight: Some(DEFAULT_TXPOOL_WEIGHT_LIMIT), } @@ -184,7 +196,7 @@ impl Config { /// Create a new [`Config`] with sane default settings. /// /// The [`DbConfig::db_directory`] - /// will be set to [`CUPRATE_TXPOOL_DIR`]. + /// will be set to [`txpool_path`] with [`CUPRATE_DATA_DIR`] and [`Network::Mainnet`]. /// /// All other values will be [`Default::default`]. /// @@ -197,25 +209,21 @@ impl Config { /// DATABASE_DATA_FILENAME, /// }; /// use cuprate_database_service::ReaderThreads; - /// use cuprate_helper::fs::*; + /// use cuprate_helper::{fs::*, network::Network}; /// /// use cuprate_txpool::Config; /// /// let config = Config::new(); /// - /// assert_eq!(config.db_config.db_directory(), &*CUPRATE_TXPOOL_DIR); - /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_TXPOOL_DIR)); + /// assert_eq!(config.db_config.db_directory(), txpool_path(&CUPRATE_DATA_DIR, Network::Mainnet).as_path()); + /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_DATA_DIR)); /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); /// assert_eq!(config.db_config.sync_mode, SyncMode::default()); /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); /// assert_eq!(config.reader_threads, ReaderThreads::default()); /// ``` pub fn new() -> Self { - Self { - db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)), - reader_threads: ReaderThreads::default(), - max_txpool_weight: 0, - } + ConfigBuilder::new().build() } } diff --git a/storage/txpool/src/ops.rs b/storage/txpool/src/ops.rs index 289a8bb..badc4f6 100644 --- a/storage/txpool/src/ops.rs +++ b/storage/txpool/src/ops.rs @@ -51,7 +51,7 @@ //! let tmp_dir = tempfile::tempdir()?; //! let db_dir = tmp_dir.path().to_owned(); //! let config = ConfigBuilder::new() -//! .db_directory(db_dir.into()) +//! .data_directory(db_dir.into()) //! .build(); //! //! // Initialize the database environment. diff --git a/storage/txpool/src/service.rs b/storage/txpool/src/service.rs index a82de5b..03ce2f0 100644 --- a/storage/txpool/src/service.rs +++ b/storage/txpool/src/service.rs @@ -83,7 +83,7 @@ //! let tmp_dir = tempfile::tempdir()?; //! let db_dir = tmp_dir.path().to_owned(); //! let config = ConfigBuilder::new() -//! .db_directory(db_dir.into()) +//! .data_directory(db_dir.into()) //! .build(); //! //! // Initialize the database thread-pool. From 7b8756fa80e386fb04173d8220c15c86bf9f9888 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 3 Dec 2024 20:21:05 +0000 Subject: [PATCH 104/104] cuprated: P2P protocol request handler (#303) * add cuprated skeleton * fmt and add deny exception * add main chain batch handler * add blockchain init * very rough block manager * misc changes * move more config values * add new tables & types * add function to fully add an alt block * resolve current todo!s * add new requests * WIP: starting re-orgs * add last service request * commit Cargo.lock * add test * more docs + cleanup + alt blocks request * clippy + fmt * document types * move tx_fee to helper * more doc updates * fmt * fix imports * remove config files * fix merge errors * fix generated coins * handle more p2p requests + alt blocks * clean up handler code * add function for incoming blocks * add docs to handler functions * broadcast new blocks + add commands * add fluffy block handler * fix new block handling * small cleanup * increase outbound peer count * fix merge * clean up the blockchain manger * add more docs + cleanup imports * fix typo * fix doc * remove unrelated changes * add `get_objects` handler * add `get_chain` handler * add `fluffy_missing_txs` handler * add `new_fluffy_block` handler * improve interface globals * manger -> manager * enums instead of bools * move chain service to separate file * more review fixes * sort imports + docs * init dandelion integration * add dandelion start function * finish incoming tx handler * Add tx blob hash table * Add missing txpool requests * handle duplicate stem txs * check txpool on incoming block * add request to remove tx in new blocks from the pool * tell the txpool about incoming blocks * fix merge * typos * remove blockchain height from txpool * fix merge * fix merge * handle incoming txs in p2p request handler * Allow `IncomingTxHandler` to be given later * add p2p clearnet init * fix build * misc changes * doc updates * more doc updates * sort imports * review changes * Result -> DbResult * use `NonZero` * review fixes * remove `rust-2024-compatibility` lint --- Cargo.lock | 1 + Cargo.toml | 1 - binaries/cuprated/src/p2p.rs | 49 ++ .../cuprated/src/p2p/core_sync_service.rs | 49 ++ binaries/cuprated/src/p2p/request_handler.rs | 421 ++++++++++++++++++ binaries/cuprated/src/txpool.rs | 2 +- binaries/cuprated/src/txpool/incoming_tx.rs | 5 + net/wire/src/p2p/protocol.rs | 2 +- p2p/p2p-core/src/protocol.rs | 4 + p2p/p2p-core/src/protocol/try_from.rs | 3 + p2p/p2p/src/constants.rs | 6 +- storage/blockchain/Cargo.toml | 1 + storage/blockchain/src/ops/block.rs | 68 ++- storage/blockchain/src/ops/blockchain.rs | 42 +- storage/blockchain/src/service/read.rs | 174 ++++++-- types/src/blockchain.rs | 66 ++- types/src/lib.rs | 4 +- types/src/types.rs | 7 + 18 files changed, 861 insertions(+), 44 deletions(-) create mode 100644 binaries/cuprated/src/p2p/core_sync_service.rs diff --git a/Cargo.lock b/Cargo.lock index 0d55c8a..c8701f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -704,6 +704,7 @@ version = "0.0.0" dependencies = [ "bitflags 2.6.0", "bytemuck", + "bytes", "cuprate-constants", "cuprate-database", "cuprate-database-service", diff --git a/Cargo.toml b/Cargo.toml index 9be1528..1813057 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -376,7 +376,6 @@ unused_lifetimes = "deny" unused_macro_rules = "deny" ambiguous_glob_imports = "deny" unused_unsafe = "deny" -rust_2024_compatibility = "deny" # Warm let_underscore = { level = "deny", priority = -1 } diff --git a/binaries/cuprated/src/p2p.rs b/binaries/cuprated/src/p2p.rs index cdf1cef..aeb98b6 100644 --- a/binaries/cuprated/src/p2p.rs +++ b/binaries/cuprated/src/p2p.rs @@ -1,8 +1,57 @@ //! P2P //! //! Will handle initiating the P2P and contains a protocol request handler. +use futures::{FutureExt, TryFutureExt}; +use tokio::sync::oneshot; +use tower::ServiceExt; +use cuprate_blockchain::service::BlockchainReadHandle; +use cuprate_consensus::BlockChainContextService; +use cuprate_p2p::{NetworkInterface, P2PConfig}; +use cuprate_p2p_core::ClearNet; +use cuprate_txpool::service::TxpoolReadHandle; + +use crate::txpool::IncomingTxHandler; + +mod core_sync_service; mod network_address; pub mod request_handler; pub use network_address::CrossNetworkInternalPeerId; + +/// Starts the P2P clearnet network, returning a [`NetworkInterface`] to interact with it. +/// +/// A [`oneshot::Sender`] is also returned to provide the [`IncomingTxHandler`], until this is provided network +/// handshakes can not be completed. +pub async fn start_clearnet_p2p( + blockchain_read_handle: BlockchainReadHandle, + blockchain_context_service: BlockChainContextService, + txpool_read_handle: TxpoolReadHandle, + config: P2PConfig, +) -> Result< + ( + NetworkInterface, + oneshot::Sender, + ), + tower::BoxError, +> { + let (incoming_tx_handler_tx, incoming_tx_handler_rx) = oneshot::channel(); + + let request_handler_maker = request_handler::P2pProtocolRequestHandlerMaker { + blockchain_read_handle, + blockchain_context_service: blockchain_context_service.clone(), + txpool_read_handle, + incoming_tx_handler: None, + incoming_tx_handler_fut: incoming_tx_handler_rx.shared(), + }; + + Ok(( + cuprate_p2p::initialize_network( + request_handler_maker.map_response(|s| s.map_err(Into::into)), + core_sync_service::CoreSyncService(blockchain_context_service), + config, + ) + .await?, + incoming_tx_handler_tx, + )) +} diff --git a/binaries/cuprated/src/p2p/core_sync_service.rs b/binaries/cuprated/src/p2p/core_sync_service.rs new file mode 100644 index 0000000..d3c3ca1 --- /dev/null +++ b/binaries/cuprated/src/p2p/core_sync_service.rs @@ -0,0 +1,49 @@ +use std::task::{Context, Poll}; + +use futures::{future::BoxFuture, FutureExt, TryFutureExt}; +use tower::Service; + +use cuprate_consensus::{ + BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService, +}; +use cuprate_helper::{cast::usize_to_u64, map::split_u128_into_low_high_bits}; +use cuprate_p2p_core::services::{CoreSyncDataRequest, CoreSyncDataResponse}; +use cuprate_wire::CoreSyncData; + +/// The core sync service. +#[derive(Clone)] +pub struct CoreSyncService(pub BlockChainContextService); + +impl Service for CoreSyncService { + type Response = CoreSyncDataResponse; + type Error = tower::BoxError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.0.poll_ready(cx) + } + + fn call(&mut self, _: CoreSyncDataRequest) -> Self::Future { + self.0 + .call(BlockChainContextRequest::Context) + .map_ok(|res| { + let BlockChainContextResponse::Context(context) = res else { + unreachable!() + }; + + let context = context.unchecked_blockchain_context(); + let (cumulative_difficulty, cumulative_difficulty_top64) = + split_u128_into_low_high_bits(context.cumulative_difficulty); + + CoreSyncDataResponse(CoreSyncData { + cumulative_difficulty, + cumulative_difficulty_top64, + current_height: usize_to_u64(context.chain_height), + pruning_seed: 0, + top_id: context.top_hash, + top_version: context.current_hf.as_u8(), + }) + }) + .boxed() + } +} diff --git a/binaries/cuprated/src/p2p/request_handler.rs b/binaries/cuprated/src/p2p/request_handler.rs index 8b13789..7d72fa3 100644 --- a/binaries/cuprated/src/p2p/request_handler.rs +++ b/binaries/cuprated/src/p2p/request_handler.rs @@ -1 +1,422 @@ +use std::{ + collections::HashSet, + future::{ready, Ready}, + hash::Hash, + task::{Context, Poll}, +}; +use bytes::Bytes; +use futures::{ + future::{BoxFuture, Shared}, + FutureExt, +}; +use monero_serai::{block::Block, transaction::Transaction}; +use tokio::sync::{broadcast, oneshot, watch}; +use tokio_stream::wrappers::WatchStream; +use tower::{Service, ServiceExt}; + +use cuprate_blockchain::service::BlockchainReadHandle; +use cuprate_consensus::{ + transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse, + BlockChainContextService, +}; +use cuprate_dandelion_tower::TxState; +use cuprate_fixed_bytes::ByteArrayVec; +use cuprate_helper::cast::u64_to_usize; +use cuprate_helper::{ + asynch::rayon_spawn_async, + cast::usize_to_u64, + map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}, +}; +use cuprate_p2p::constants::{ + MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MAX_BLOCK_BATCH_LEN, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN, +}; +use cuprate_p2p_core::{ + client::{InternalPeerID, PeerInformation}, + NetZoneAddress, NetworkZone, ProtocolRequest, ProtocolResponse, +}; +use cuprate_txpool::service::TxpoolReadHandle; +use cuprate_types::{ + blockchain::{BlockchainReadRequest, BlockchainResponse}, + BlockCompleteEntry, TransactionBlobs, TxsInBlock, +}; +use cuprate_wire::protocol::{ + ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest, + GetObjectsResponse, NewFluffyBlock, NewTransactions, +}; + +use crate::{ + blockchain::interface::{self as blockchain_interface, IncomingBlockError}, + constants::PANIC_CRITICAL_SERVICE_ERROR, + p2p::CrossNetworkInternalPeerId, + txpool::{IncomingTxError, IncomingTxHandler, IncomingTxs}, +}; + +/// The P2P protocol request handler [`MakeService`](tower::MakeService). +#[derive(Clone)] +pub struct P2pProtocolRequestHandlerMaker { + pub blockchain_read_handle: BlockchainReadHandle, + pub blockchain_context_service: BlockChainContextService, + pub txpool_read_handle: TxpoolReadHandle, + + /// The [`IncomingTxHandler`], wrapped in an [`Option`] as there is a cyclic reference between [`P2pProtocolRequestHandlerMaker`] + /// and the [`IncomingTxHandler`]. + pub incoming_tx_handler: Option, + + /// A [`Future`](std::future::Future) that produces the [`IncomingTxHandler`]. + pub incoming_tx_handler_fut: Shared>, +} + +impl Service> for P2pProtocolRequestHandlerMaker +where + InternalPeerID: Into, +{ + type Response = P2pProtocolRequestHandler; + type Error = tower::BoxError; + type Future = Ready>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + if self.incoming_tx_handler.is_none() { + return self + .incoming_tx_handler_fut + .poll_unpin(cx) + .map(|incoming_tx_handler| { + self.incoming_tx_handler = Some(incoming_tx_handler?); + Ok(()) + }); + } + + Poll::Ready(Ok(())) + } + + fn call(&mut self, peer_information: PeerInformation) -> Self::Future { + let Some(incoming_tx_handler) = self.incoming_tx_handler.clone() else { + panic!("poll_ready was not called or did not return `Poll::Ready`") + }; + + // TODO: check sync info? + + let blockchain_read_handle = self.blockchain_read_handle.clone(); + let txpool_read_handle = self.txpool_read_handle.clone(); + + ready(Ok(P2pProtocolRequestHandler { + peer_information, + blockchain_read_handle, + blockchain_context_service: self.blockchain_context_service.clone(), + txpool_read_handle, + incoming_tx_handler, + })) + } +} + +/// The P2P protocol request handler. +#[derive(Clone)] +pub struct P2pProtocolRequestHandler { + peer_information: PeerInformation, + blockchain_read_handle: BlockchainReadHandle, + blockchain_context_service: BlockChainContextService, + txpool_read_handle: TxpoolReadHandle, + incoming_tx_handler: IncomingTxHandler, +} + +impl Service for P2pProtocolRequestHandler +where + InternalPeerID: Into, +{ + type Response = ProtocolResponse; + type Error = anyhow::Error; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, request: ProtocolRequest) -> Self::Future { + match request { + ProtocolRequest::GetObjects(r) => { + get_objects(r, self.blockchain_read_handle.clone()).boxed() + } + ProtocolRequest::GetChain(r) => { + get_chain(r, self.blockchain_read_handle.clone()).boxed() + } + ProtocolRequest::FluffyMissingTxs(r) => { + fluffy_missing_txs(r, self.blockchain_read_handle.clone()).boxed() + } + ProtocolRequest::NewBlock(_) => ready(Err(anyhow::anyhow!( + "Peer sent a full block when we support fluffy blocks" + ))) + .boxed(), + ProtocolRequest::NewFluffyBlock(r) => new_fluffy_block( + self.peer_information.clone(), + r, + self.blockchain_read_handle.clone(), + self.txpool_read_handle.clone(), + ) + .boxed(), + ProtocolRequest::NewTransactions(r) => new_transactions( + self.peer_information.clone(), + r, + self.blockchain_context_service.clone(), + self.incoming_tx_handler.clone(), + ) + .boxed(), + ProtocolRequest::GetTxPoolCompliment(_) => ready(Ok(ProtocolResponse::NA)).boxed(), // TODO: should we support this? + } + } +} + +//---------------------------------------------------------------------------------------------------- Handler functions + +/// [`ProtocolRequest::GetObjects`] +async fn get_objects( + request: GetObjectsRequest, + mut blockchain_read_handle: BlockchainReadHandle, +) -> anyhow::Result { + if request.blocks.len() > MAX_BLOCK_BATCH_LEN { + anyhow::bail!("Peer requested more blocks than allowed.") + } + + let block_hashes: Vec<[u8; 32]> = (&request.blocks).into(); + // deallocate the backing `Bytes`. + drop(request); + + let BlockchainResponse::BlockCompleteEntries { + blocks, + missing_hashes, + blockchain_height, + } = blockchain_read_handle + .ready() + .await? + .call(BlockchainReadRequest::BlockCompleteEntries(block_hashes)) + .await? + else { + unreachable!(); + }; + + Ok(ProtocolResponse::GetObjects(GetObjectsResponse { + blocks, + missed_ids: ByteArrayVec::from(missing_hashes), + current_blockchain_height: usize_to_u64(blockchain_height), + })) +} + +/// [`ProtocolRequest::GetChain`] +async fn get_chain( + request: ChainRequest, + mut blockchain_read_handle: BlockchainReadHandle, +) -> anyhow::Result { + if request.block_ids.len() > MAX_BLOCKS_IDS_IN_CHAIN_ENTRY { + anyhow::bail!("Peer sent too many block hashes in chain request.") + } + + let block_hashes: Vec<[u8; 32]> = (&request.block_ids).into(); + let want_pruned_data = request.prune; + // deallocate the backing `Bytes`. + drop(request); + + let BlockchainResponse::NextChainEntry { + start_height, + chain_height, + block_ids, + block_weights, + cumulative_difficulty, + first_block_blob, + } = blockchain_read_handle + .ready() + .await? + .call(BlockchainReadRequest::NextChainEntry(block_hashes, 10_000)) + .await? + else { + unreachable!(); + }; + + let Some(start_height) = start_height else { + anyhow::bail!("The peers chain has a different genesis block than ours."); + }; + + let (cumulative_difficulty_low64, cumulative_difficulty_top64) = + split_u128_into_low_high_bits(cumulative_difficulty); + + Ok(ProtocolResponse::GetChain(ChainResponse { + start_height: usize_to_u64(std::num::NonZero::get(start_height)), + total_height: usize_to_u64(chain_height), + cumulative_difficulty_low64, + cumulative_difficulty_top64, + m_block_ids: ByteArrayVec::from(block_ids), + first_block: first_block_blob.map_or(Bytes::new(), Bytes::from), + // only needed when pruned + m_block_weights: if want_pruned_data { + block_weights.into_iter().map(usize_to_u64).collect() + } else { + vec![] + }, + })) +} + +/// [`ProtocolRequest::FluffyMissingTxs`] +async fn fluffy_missing_txs( + mut request: FluffyMissingTransactionsRequest, + mut blockchain_read_handle: BlockchainReadHandle, +) -> anyhow::Result { + let tx_indexes = std::mem::take(&mut request.missing_tx_indices); + let block_hash: [u8; 32] = *request.block_hash; + let current_blockchain_height = request.current_blockchain_height; + + // deallocate the backing `Bytes`. + drop(request); + + let BlockchainResponse::TxsInBlock(res) = blockchain_read_handle + .ready() + .await? + .call(BlockchainReadRequest::TxsInBlock { + block_hash, + tx_indexes, + }) + .await? + else { + unreachable!(); + }; + + let Some(TxsInBlock { block, txs }) = res else { + anyhow::bail!("The peer requested txs out of range."); + }; + + Ok(ProtocolResponse::NewFluffyBlock(NewFluffyBlock { + b: BlockCompleteEntry { + block: Bytes::from(block), + txs: TransactionBlobs::Normal(txs.into_iter().map(Bytes::from).collect()), + pruned: false, + // only needed for pruned blocks. + block_weight: 0, + }, + current_blockchain_height, + })) +} + +/// [`ProtocolRequest::NewFluffyBlock`] +async fn new_fluffy_block( + peer_information: PeerInformation, + request: NewFluffyBlock, + mut blockchain_read_handle: BlockchainReadHandle, + mut txpool_read_handle: TxpoolReadHandle, +) -> anyhow::Result { + // TODO: check context service here and ignore the block? + let current_blockchain_height = request.current_blockchain_height; + + peer_information + .core_sync_data + .lock() + .unwrap() + .current_height = current_blockchain_height; + + let (block, txs) = rayon_spawn_async(move || -> Result<_, anyhow::Error> { + let block = Block::read(&mut request.b.block.as_ref())?; + + let tx_blobs = request + .b + .txs + .take_normal() + .ok_or(anyhow::anyhow!("Peer sent pruned txs in fluffy block"))?; + + let txs = tx_blobs + .into_iter() + .map(|tx_blob| { + if tx_blob.len() > MAX_TRANSACTION_BLOB_SIZE { + anyhow::bail!("Peer sent a transaction over the size limit."); + } + + let tx = Transaction::read(&mut tx_blob.as_ref())?; + + Ok((tx.hash(), tx)) + }) + .collect::>()?; + + // The backing `Bytes` will be deallocated when this closure returns. + + Ok((block, txs)) + }) + .await?; + + let res = blockchain_interface::handle_incoming_block( + block, + txs, + &mut blockchain_read_handle, + &mut txpool_read_handle, + ) + .await; + + match res { + Ok(_) => Ok(ProtocolResponse::NA), + Err(IncomingBlockError::UnknownTransactions(block_hash, missing_tx_indices)) => Ok( + ProtocolResponse::FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest { + block_hash: block_hash.into(), + current_blockchain_height, + missing_tx_indices: missing_tx_indices.into_iter().map(usize_to_u64).collect(), + }), + ), + Err(IncomingBlockError::Orphan) => { + // Block's parent was unknown, could be syncing? + Ok(ProtocolResponse::NA) + } + Err(e) => Err(e.into()), + } +} + +/// [`ProtocolRequest::NewTransactions`] +async fn new_transactions( + peer_information: PeerInformation, + request: NewTransactions, + mut blockchain_context_service: BlockChainContextService, + mut incoming_tx_handler: IncomingTxHandler, +) -> anyhow::Result +where + A: NetZoneAddress, + InternalPeerID: Into, +{ + let BlockChainContextResponse::Context(context) = blockchain_context_service + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(BlockChainContextRequest::Context) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + else { + unreachable!() + }; + + let context = context.unchecked_blockchain_context(); + + // If we are more than 2 blocks behind the peer then ignore the txs - we are probably still syncing. + if usize_to_u64(context.chain_height + 2) + < peer_information + .core_sync_data + .lock() + .unwrap() + .current_height + { + return Ok(ProtocolResponse::NA); + } + + let state = if request.dandelionpp_fluff { + TxState::Fluff + } else { + TxState::Stem { + from: peer_information.id.into(), + } + }; + + // Drop all the data except the stuff we still need. + let NewTransactions { txs, .. } = request; + + let res = incoming_tx_handler + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(IncomingTxs { txs, state }) + .await; + + match res { + Ok(()) => Ok(ProtocolResponse::NA), + Err(e) => Err(e.into()), + } +} diff --git a/binaries/cuprated/src/txpool.rs b/binaries/cuprated/src/txpool.rs index 9592c2b..2076956 100644 --- a/binaries/cuprated/src/txpool.rs +++ b/binaries/cuprated/src/txpool.rs @@ -12,4 +12,4 @@ mod dandelion; mod incoming_tx; mod txs_being_handled; -pub use incoming_tx::IncomingTxHandler; +pub use incoming_tx::{IncomingTxError, IncomingTxHandler, IncomingTxs}; diff --git a/binaries/cuprated/src/txpool/incoming_tx.rs b/binaries/cuprated/src/txpool/incoming_tx.rs index e204159..bf7684e 100644 --- a/binaries/cuprated/src/txpool/incoming_tx.rs +++ b/binaries/cuprated/src/txpool/incoming_tx.rs @@ -43,9 +43,13 @@ use crate::{ }; /// An error that can happen handling an incoming tx. +#[derive(Debug, thiserror::Error)] pub enum IncomingTxError { + #[error("Error parsing tx: {0}")] Parse(std::io::Error), + #[error(transparent)] Consensus(ExtendedConsensusError), + #[error("Duplicate tx in message")] DuplicateTransaction, } @@ -67,6 +71,7 @@ pub(super) type TxId = [u8; 32]; /// The service than handles incoming transaction pool transactions. /// /// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes. +#[derive(Clone)] pub struct IncomingTxHandler { /// A store of txs currently being handled in incoming tx requests. pub(super) txs_being_handled: TxsBeingHandled, diff --git a/net/wire/src/p2p/protocol.rs b/net/wire/src/p2p/protocol.rs index 1d1d45a..cc4b49d 100644 --- a/net/wire/src/p2p/protocol.rs +++ b/net/wire/src/p2p/protocol.rs @@ -159,7 +159,7 @@ epee_object!( current_blockchain_height: u64, ); -/// A request for Txs we are missing from our `TxPool` +/// A request for txs we are missing from an incoming block. #[derive(Debug, Clone, PartialEq, Eq)] pub struct FluffyMissingTransactionsRequest { /// The Block we are missing the Txs in diff --git a/p2p/p2p-core/src/protocol.rs b/p2p/p2p-core/src/protocol.rs index 7d8d431..82aac82 100644 --- a/p2p/p2p-core/src/protocol.rs +++ b/p2p/p2p-core/src/protocol.rs @@ -116,6 +116,7 @@ pub enum ProtocolResponse { GetChain(ChainResponse), NewFluffyBlock(NewFluffyBlock), NewTransactions(NewTransactions), + FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest), NA, } @@ -139,6 +140,9 @@ impl PeerResponse { ProtocolResponse::GetChain(_) => MessageID::GetChain, ProtocolResponse::NewFluffyBlock(_) => MessageID::NewBlock, ProtocolResponse::NewTransactions(_) => MessageID::NewFluffyBlock, + ProtocolResponse::FluffyMissingTransactionsRequest(_) => { + MessageID::FluffyMissingTxs + } ProtocolResponse::NA => return None, }, diff --git a/p2p/p2p-core/src/protocol/try_from.rs b/p2p/p2p-core/src/protocol/try_from.rs index d3a7260..2dfc41d 100644 --- a/p2p/p2p-core/src/protocol/try_from.rs +++ b/p2p/p2p-core/src/protocol/try_from.rs @@ -71,6 +71,9 @@ impl TryFrom for ProtocolMessage { ProtocolResponse::NewFluffyBlock(val) => Self::NewFluffyBlock(val), ProtocolResponse::GetChain(val) => Self::ChainEntryResponse(val), ProtocolResponse::GetObjects(val) => Self::GetObjectsResponse(val), + ProtocolResponse::FluffyMissingTransactionsRequest(val) => { + Self::FluffyMissingTransactionsRequest(val) + } ProtocolResponse::NA => return Err(MessageConversionError), }) } diff --git a/p2p/p2p/src/constants.rs b/p2p/p2p/src/constants.rs index f70d64c..a81557c 100644 --- a/p2p/p2p/src/constants.rs +++ b/p2p/p2p/src/constants.rs @@ -52,7 +52,7 @@ pub(crate) const INITIAL_CHAIN_REQUESTS_TO_SEND: usize = 3; /// The enforced maximum amount of blocks to request in a batch. /// /// Requesting more than this will cause the peer to disconnect and potentially lead to bans. -pub(crate) const MAX_BLOCK_BATCH_LEN: usize = 100; +pub const MAX_BLOCK_BATCH_LEN: usize = 100; /// The timeout that the block downloader will use for requests. pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_secs(30); @@ -61,13 +61,13 @@ pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_sec /// be less than. /// /// ref: -pub(crate) const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000; +pub const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000; /// The maximum amount of block IDs allowed in a chain entry response. /// /// ref: // TODO: link to the protocol book when this section is added. -pub(crate) const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000; +pub const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000; /// The amount of failures downloading a specific batch before we stop attempting to download it. pub(crate) const MAX_DOWNLOAD_FAILURES: usize = 5; diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 92b4374..c935924 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -34,6 +34,7 @@ serde = { workspace = true, optional = true } tower = { workspace = true } thread_local = { workspace = true } rayon = { workspace = true } +bytes = { workspace = true } [dev-dependencies] cuprate-constants = { workspace = true } diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 5e54187..2dc88aa 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -2,21 +2,23 @@ //---------------------------------------------------------------------------------------------------- Import use bytemuck::TransparentWrapper; +use bytes::Bytes; use monero_serai::{ block::{Block, BlockHeader}, transaction::Transaction, }; use cuprate_database::{ - DbResult, RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, + DbResult, RuntimeError, StorableVec, {DatabaseIter, DatabaseRo, DatabaseRw}, }; +use cuprate_helper::cast::usize_to_u64; use cuprate_helper::{ map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}, tx::tx_fee, }; use cuprate_types::{ - AltBlockInformation, ChainId, ExtendedBlockHeader, HardFork, VerifiedBlockInformation, - VerifiedTransactionInformation, + AltBlockInformation, BlockCompleteEntry, ChainId, ExtendedBlockHeader, HardFork, + TransactionBlobs, VerifiedBlockInformation, VerifiedTransactionInformation, }; use crate::{ @@ -27,7 +29,7 @@ use crate::{ output::get_rct_num_outputs, tx::{add_tx, remove_tx}, }, - tables::{BlockHeights, BlockInfos, Tables, TablesMut}, + tables::{BlockHeights, BlockInfos, Tables, TablesIter, TablesMut}, types::{BlockHash, BlockHeight, BlockInfo}, }; @@ -222,6 +224,64 @@ pub fn pop_block( Ok((block_height, block_info.block_hash, block)) } +//---------------------------------------------------------------------------------------------------- `get_block_blob_with_tx_indexes` +/// Retrieve a block's raw bytes, the index of the miner transaction and the number of non miner-txs in the block. +/// +#[doc = doc_error!()] +pub fn get_block_blob_with_tx_indexes( + block_height: &BlockHeight, + tables: &impl Tables, +) -> Result<(Vec, u64, usize), RuntimeError> { + let miner_tx_idx = tables.block_infos().get(block_height)?.mining_tx_index; + + let block_txs = tables.block_txs_hashes().get(block_height)?.0; + let numb_txs = block_txs.len(); + + // Get the block header + let mut block = tables.block_header_blobs().get(block_height)?.0; + + // Add the miner tx to the blob. + let mut miner_tx_blob = tables.tx_blobs().get(&miner_tx_idx)?.0; + block.append(&mut miner_tx_blob); + + // Add the blocks tx hashes. + monero_serai::io::write_varint(&block_txs.len(), &mut block) + .expect("The number of txs per block will not exceed u64::MAX"); + + let block_txs_bytes = bytemuck::must_cast_slice(&block_txs); + block.extend_from_slice(block_txs_bytes); + + Ok((block, miner_tx_idx, numb_txs)) +} + +//---------------------------------------------------------------------------------------------------- `get_block_extended_header_*` +/// Retrieve a [`BlockCompleteEntry`] from the database. +/// +#[doc = doc_error!()] +pub fn get_block_complete_entry( + block_hash: &BlockHash, + tables: &impl TablesIter, +) -> Result { + let block_height = tables.block_heights().get(block_hash)?; + let (block_blob, miner_tx_idx, numb_non_miner_txs) = + get_block_blob_with_tx_indexes(&block_height, tables)?; + + let first_tx_idx = miner_tx_idx + 1; + + let tx_blobs = tables + .tx_blobs_iter() + .get_range(first_tx_idx..(usize_to_u64(numb_non_miner_txs) + first_tx_idx))? + .map(|tx_blob| Ok(Bytes::from(tx_blob?.0))) + .collect::>()?; + + Ok(BlockCompleteEntry { + block: Bytes::from(block_blob), + txs: TransactionBlobs::Normal(tx_blobs), + pruned: false, + block_weight: 0, + }) +} + //---------------------------------------------------------------------------------------------------- `get_block_extended_header_*` /// Retrieve a [`ExtendedBlockHeader`] from the database. /// diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index 7163363..54dd752 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -4,9 +4,9 @@ use cuprate_database::{DatabaseRo, DbResult, RuntimeError}; use crate::{ - ops::macros::doc_error, + ops::{block::block_exists, macros::doc_error}, tables::{BlockHeights, BlockInfos}, - types::BlockHeight, + types::{BlockHash, BlockHeight}, }; //---------------------------------------------------------------------------------------------------- Free Functions @@ -76,6 +76,44 @@ pub fn cumulative_generated_coins( } } +/// Find the split point between our chain and a list of [`BlockHash`]s from another chain. +/// +/// This function accepts chains in chronological and reverse chronological order, however +/// if the wrong order is specified the return value is meaningless. +/// +/// For chronologically ordered chains this will return the index of the first unknown, for reverse +/// chronologically ordered chains this will return the index of the first known. +/// +/// If all blocks are known for chronologically ordered chains or unknown for reverse chronologically +/// ordered chains then the length of the chain will be returned. +#[doc = doc_error!()] +#[inline] +pub fn find_split_point( + block_ids: &[BlockHash], + chronological_order: bool, + table_block_heights: &impl DatabaseRo, +) -> Result { + let mut err = None; + + // Do a binary search to find the first unknown/known block in the batch. + let idx = block_ids.partition_point(|block_id| { + match block_exists(block_id, table_block_heights) { + Ok(exists) => exists == chronological_order, + Err(e) => { + err.get_or_insert(e); + // if this happens the search is scrapped, just return `false` back. + false + } + } + }); + + if let Some(e) = err { + return Err(e); + } + + Ok(idx) +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index 7657759..84b1b92 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -10,23 +10,26 @@ //---------------------------------------------------------------------------------------------------- Import use std::{ + cmp::min, collections::{HashMap, HashSet}, sync::Arc, }; use rayon::{ - iter::{IntoParallelIterator, ParallelIterator}, + iter::{Either, IntoParallelIterator, ParallelIterator}, prelude::*, ThreadPool, }; use thread_local::ThreadLocal; -use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, RuntimeError}; +use cuprate_database::{ + ConcreteEnv, DatabaseIter, DatabaseRo, DbResult, Env, EnvInner, RuntimeError, +}; use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, - Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain, + Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain, TxsInBlock, }; use crate::{ @@ -36,9 +39,10 @@ use crate::{ get_alt_chain_history_ranges, }, block::{ - block_exists, get_block_extended_header_from_height, get_block_height, get_block_info, + block_exists, get_block_blob_with_tx_indexes, get_block_complete_entry, + get_block_extended_header_from_height, get_block_height, get_block_info, }, - blockchain::{cumulative_generated_coins, top_block_height}, + blockchain::{cumulative_generated_coins, find_split_point, top_block_height}, key_image::key_image_exists, output::id_to_output_on_chain, }, @@ -46,7 +50,7 @@ use crate::{ free::{compact_history_genesis_not_included, compact_history_index_to_height_offset}, types::{BlockchainReadHandle, ResponseResult}, }, - tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables}, + tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables, TablesIter}, types::{ AltBlockHeight, Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId, }, @@ -100,6 +104,7 @@ fn map_request( /* SOMEDAY: pre-request handling, run some code for each request? */ match request { + R::BlockCompleteEntries(block_hashes) => block_complete_entries(env, block_hashes), R::BlockExtendedHeader(block) => block_extended_header(env, block), R::BlockHash(block, chain) => block_hash(env, block, chain), R::FindBlock(block_hash) => find_block(env, block_hash), @@ -113,7 +118,12 @@ fn map_request( R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec), R::KeyImagesSpent(set) => key_images_spent(env, set), R::CompactChainHistory => compact_chain_history(env), + R::NextChainEntry(block_hashes, amount) => next_chain_entry(env, &block_hashes, amount), R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids), + R::TxsInBlock { + block_hash, + tx_indexes, + } => txs_in_block(env, block_hash, tx_indexes), R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id), R::Block { height } => block(env, height), R::BlockByHash(hash) => block_by_hash(env, hash), @@ -198,6 +208,38 @@ macro_rules! get_tables { // TODO: The overhead of parallelism may be too much for every request, perfomace test to find optimal // amount of parallelism. +/// [`BlockchainReadRequest::BlockCompleteEntries`]. +fn block_complete_entries(env: &ConcreteEnv, block_hashes: Vec) -> ResponseResult { + // Prepare tx/tables in `ThreadLocal`. + let env_inner = env.env_inner(); + let tx_ro = thread_local(env); + let tables = thread_local(env); + + let (missing_hashes, blocks) = block_hashes + .into_par_iter() + .map(|block_hash| { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + + match get_block_complete_entry(&block_hash, tables) { + Err(RuntimeError::KeyNotFound) => Ok(Either::Left(block_hash)), + res => res.map(Either::Right), + } + }) + .collect::>()?; + + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + + let blockchain_height = crate::ops::blockchain::chain_height(tables.block_heights())?; + + Ok(BlockchainResponse::BlockCompleteEntries { + blocks, + missing_hashes, + blockchain_height, + }) +} + /// [`BlockchainReadRequest::BlockExtendedHeader`]. #[inline] fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult { @@ -335,7 +377,7 @@ fn block_extended_header_in_range( } }) }) - .collect::, _>>()? + .collect::>>()? } }; @@ -534,6 +576,76 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult { }) } +/// [`BlockchainReadRequest::NextChainEntry`] +/// +/// # Invariant +/// `block_ids` must be sorted in reverse chronological block order, or else +/// the returned result is unspecified and meaningless, as this function +/// performs a binary search. +fn next_chain_entry( + env: &ConcreteEnv, + block_ids: &[BlockHash], + next_entry_size: usize, +) -> ResponseResult { + // Single-threaded, no `ThreadLocal` required. + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro()?; + + let tables = env_inner.open_tables(&tx_ro)?; + let table_block_heights = tables.block_heights(); + let table_block_infos = tables.block_infos_iter(); + + let idx = find_split_point(block_ids, false, table_block_heights)?; + + // This will happen if we have a different genesis block. + if idx == block_ids.len() { + return Ok(BlockchainResponse::NextChainEntry { + start_height: None, + chain_height: 0, + block_ids: vec![], + block_weights: vec![], + cumulative_difficulty: 0, + first_block_blob: None, + }); + } + + // The returned chain entry must overlap with one of the blocks we were told about. + let first_known_block_hash = block_ids[idx]; + let first_known_height = table_block_heights.get(&first_known_block_hash)?; + + let chain_height = crate::ops::blockchain::chain_height(table_block_heights)?; + let last_height_in_chain_entry = min(first_known_height + next_entry_size, chain_height); + + let (block_ids, block_weights) = table_block_infos + .get_range(first_known_height..last_height_in_chain_entry)? + .map(|block_info| { + let block_info = block_info?; + + Ok((block_info.block_hash, block_info.weight)) + }) + .collect::, Vec<_>)>>()?; + + let top_block_info = table_block_infos.get(&(chain_height - 1))?; + + let first_block_blob = if block_ids.len() >= 2 { + Some(get_block_blob_with_tx_indexes(&(first_known_height + 1), &tables)?.0) + } else { + None + }; + + Ok(BlockchainResponse::NextChainEntry { + start_height: std::num::NonZero::new(first_known_height), + chain_height, + block_ids, + block_weights, + cumulative_difficulty: combine_low_high_bits_to_u128( + top_block_info.cumulative_difficulty_low, + top_block_info.cumulative_difficulty_high, + ), + first_block_blob, + }) +} + /// [`BlockchainReadRequest::FindFirstUnknown`] /// /// # Invariant @@ -546,24 +658,7 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes let table_block_heights = env_inner.open_db_ro::(&tx_ro)?; - let mut err = None; - - // Do a binary search to find the first unknown block in the batch. - let idx = - block_ids.partition_point( - |block_id| match block_exists(block_id, &table_block_heights) { - Ok(exists) => exists, - Err(e) => { - err.get_or_insert(e); - // if this happens the search is scrapped, just return `false` back. - false - } - }, - ); - - if let Some(e) = err { - return Err(e); - } + let idx = find_split_point(block_ids, true, &table_block_heights)?; Ok(if idx == block_ids.len() { BlockchainResponse::FindFirstUnknown(None) @@ -576,6 +671,33 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes }) } +/// [`BlockchainReadRequest::TxsInBlock`] +fn txs_in_block(env: &ConcreteEnv, block_hash: [u8; 32], missing_txs: Vec) -> ResponseResult { + // Single-threaded, no `ThreadLocal` required. + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro()?; + let tables = env_inner.open_tables(&tx_ro)?; + + let block_height = tables.block_heights().get(&block_hash)?; + + let (block, miner_tx_index, numb_txs) = get_block_blob_with_tx_indexes(&block_height, &tables)?; + let first_tx_index = miner_tx_index + 1; + + if numb_txs < missing_txs.len() { + return Ok(BlockchainResponse::TxsInBlock(None)); + } + + let txs = missing_txs + .into_iter() + .map(|index_offset| Ok(tables.tx_blobs().get(&(first_tx_index + index_offset))?.0)) + .collect::>()?; + + Ok(BlockchainResponse::TxsInBlock(Some(TxsInBlock { + block, + txs, + }))) +} + /// [`BlockchainReadRequest::AltBlocksInChain`] fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult { // Prepare tx/tables in `ThreadLocal`. @@ -613,7 +735,7 @@ fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult { ) }) }) - .collect::>()?; + .collect::>()?; Ok(BlockchainResponse::AltBlocksInChain(blocks)) } diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index c39c0bd..7518935 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -11,9 +11,9 @@ use std::{ use monero_serai::block::Block; use crate::{ - types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}, - AltBlockInformation, ChainId, ChainInfo, CoinbaseTxSum, OutputHistogramEntry, - OutputHistogramInput, + types::{Chain, ExtendedBlockHeader, OutputOnChain, TxsInBlock, VerifiedBlockInformation}, + AltBlockInformation, BlockCompleteEntry, ChainId, ChainInfo, CoinbaseTxSum, + OutputHistogramEntry, OutputHistogramInput, }; //---------------------------------------------------------------------------------------------------- ReadRequest @@ -27,6 +27,11 @@ use crate::{ /// See `Response` for the expected responses per `Request`. #[derive(Debug, Clone, PartialEq, Eq)] pub enum BlockchainReadRequest { + /// Request [`BlockCompleteEntry`]s. + /// + /// The input is the block hashes. + BlockCompleteEntries(Vec<[u8; 32]>), + /// Request a block's extended header. /// /// The input is the block's height. @@ -96,6 +101,16 @@ pub enum BlockchainReadRequest { /// A request for the compact chain history. CompactChainHistory, + /// A request for the next chain entry. + /// + /// Input is a list of block hashes and the amount of block hashes to return in the next chain entry. + /// + /// # Invariant + /// The [`Vec`] containing the block IDs must be sorted in reverse chronological block + /// order, or else the returned response is unspecified and meaningless, + /// as this request performs a binary search + NextChainEntry(Vec<[u8; 32]>, usize), + /// A request to find the first unknown block ID in a list of block IDs. /// /// # Invariant @@ -104,6 +119,16 @@ pub enum BlockchainReadRequest { /// as this request performs a binary search. FindFirstUnknown(Vec<[u8; 32]>), + /// A request for transactions from a specific block. + TxsInBlock { + /// The block to get transactions from. + block_hash: [u8; 32], + /// The indexes of the transactions from the block. + /// This is not the global index of the txs, instead it is the local index as they appear in + /// the block. + tx_indexes: Vec, + }, + /// A request for all alt blocks in the chain with the given [`ChainId`]. AltBlocksInChain(ChainId), @@ -182,6 +207,16 @@ pub enum BlockchainWriteRequest { #[expect(clippy::large_enum_variant)] pub enum BlockchainResponse { //------------------------------------------------------ Reads + /// Response to [`BlockchainReadRequest::BlockCompleteEntries`]. + BlockCompleteEntries { + /// The [`BlockCompleteEntry`]s that we had. + blocks: Vec, + /// The hashes of blocks that were requested, but we don't have. + missing_hashes: Vec<[u8; 32]>, + /// Our blockchain height. + blockchain_height: usize, + }, + /// Response to [`BlockchainReadRequest::BlockExtendedHeader`]. /// /// Inner value is the extended headed of the requested block. @@ -248,6 +283,24 @@ pub enum BlockchainResponse { cumulative_difficulty: u128, }, + /// Response to [`BlockchainReadRequest::NextChainEntry`]. + /// + /// If all blocks were unknown `start_height` will be [`None`], the other fields will be meaningless. + NextChainEntry { + /// The start height of this entry, [`None`] if we could not find the split point. + start_height: Option>, + /// The current chain height. + chain_height: usize, + /// The next block hashes in the entry. + block_ids: Vec<[u8; 32]>, + /// The block weights of the next blocks. + block_weights: Vec, + /// The current cumulative difficulty of our chain. + cumulative_difficulty: u128, + /// The block blob of the 2nd block in `block_ids`, if there is one. + first_block_blob: Option>, + }, + /// Response to [`BlockchainReadRequest::FindFirstUnknown`]. /// /// Contains the index of the first unknown block and its expected height. @@ -255,7 +308,12 @@ pub enum BlockchainResponse { /// This will be [`None`] if all blocks were known. FindFirstUnknown(Option<(usize, usize)>), - /// Response to [`BlockchainReadRequest::AltBlocksInChain`]. + /// The response for [`BlockchainReadRequest::TxsInBlock`]. + /// + /// Will return [`None`] if the request contained an index out of range. + TxsInBlock(Option), + + /// The response for [`BlockchainReadRequest::AltBlocksInChain`]. /// /// Contains all the alt blocks in the alt-chain in chronological order. AltBlocksInChain(Vec), diff --git a/types/src/lib.rs b/types/src/lib.rs index a5a04f9..7aaf0b9 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -26,8 +26,8 @@ pub use transaction_verification_data::{ pub use types::{ AddAuxPow, AltBlockInformation, AuxPow, Chain, ChainId, ChainInfo, CoinbaseTxSum, ExtendedBlockHeader, FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry, - OutputHistogramEntry, OutputHistogramInput, OutputOnChain, VerifiedBlockInformation, - VerifiedTransactionInformation, + OutputHistogramEntry, OutputHistogramInput, OutputOnChain, TxsInBlock, + VerifiedBlockInformation, VerifiedTransactionInformation, }; //---------------------------------------------------------------------------------------------------- Feature-gated diff --git a/types/src/types.rs b/types/src/types.rs index 720ad0a..8a5b5aa 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -259,6 +259,13 @@ pub struct AddAuxPow { pub aux_pow: Vec, } +/// The inner response for a request for txs in a block. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TxsInBlock { + pub block: Vec, + pub txs: Vec>, +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test {