From 38541dbfda781277d48dd218bb676aede2f8b8a2 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 28 Nov 2024 14:53:59 -0500 Subject: [PATCH 1/3] workspace: add/fix 1.83 lints (#353) * 1.83 `cargo clippy --fix` * fix type complexity, add `DbResult` * clippy fix * redb fix * Update consensus/context/src/difficulty.rs Co-authored-by: hinto-janai --------- Co-authored-by: Boog900 --- Cargo.toml | 4 ++ .../cuprate-json-rpc/benches/response.rs | 2 +- binaries/cuprated/src/statics.rs | 2 +- consensus/context/src/difficulty.rs | 4 +- consensus/fast-sync/src/create.rs | 4 +- net/epee-encoding/src/macros.rs | 6 +- p2p/async-buffer/src/lib.rs | 4 +- p2p/dandelion-tower/src/tests/mod.rs | 1 + p2p/p2p-core/src/lib.rs | 1 - .../src/block_downloader/download_batch.rs | 4 +- p2p/p2p/src/broadcast.rs | 1 + pruning/src/lib.rs | 2 +- rpc/interface/src/route/bin.rs | 2 +- rpc/interface/src/route/other.rs | 2 +- rpc/types/src/json.rs | 2 +- rpc/types/src/macros.rs | 10 +-- rpc/types/src/misc/misc.rs | 2 +- rpc/types/src/other.rs | 2 +- storage/blockchain/src/ops/alt_block/block.rs | 17 +++-- storage/blockchain/src/ops/alt_block/chain.rs | 6 +- storage/blockchain/src/ops/alt_block/tx.rs | 6 +- storage/blockchain/src/ops/block.rs | 23 +++---- storage/blockchain/src/ops/blockchain.rs | 10 ++- storage/blockchain/src/ops/key_image.rs | 8 +-- storage/blockchain/src/ops/macros.rs | 2 +- storage/blockchain/src/ops/output.rs | 29 ++++----- storage/blockchain/src/ops/property.rs | 7 +-- storage/blockchain/src/ops/tx.rs | 22 +++---- storage/blockchain/src/service/read.rs | 14 ++--- storage/blockchain/src/service/types.rs | 4 +- storage/blockchain/src/service/write.rs | 4 +- storage/database/src/backend/heed/database.rs | 63 ++++++++----------- storage/database/src/backend/heed/env.rs | 19 +++--- .../database/src/backend/heed/transaction.rs | 10 +-- storage/database/src/backend/redb/database.rs | 57 ++++++++--------- storage/database/src/backend/redb/env.rs | 19 +++--- storage/database/src/backend/redb/storable.rs | 10 ++- .../database/src/backend/redb/transaction.rs | 8 +-- storage/database/src/config/sync_mode.rs | 1 - storage/database/src/database.rs | 44 +++++++------ storage/database/src/env.rs | 39 ++++++------ storage/database/src/error.rs | 3 + storage/database/src/lib.rs | 2 +- storage/database/src/table.rs | 1 - storage/database/src/tables.rs | 16 ++--- storage/database/src/transaction.rs | 10 +-- storage/service/src/service/read.rs | 10 +-- storage/service/src/service/write.rs | 15 +++-- storage/txpool/src/ops/key_images.rs | 4 +- storage/txpool/src/ops/tx_read.rs | 6 +- storage/txpool/src/ops/tx_write.rs | 7 +-- storage/txpool/src/service/read.rs | 4 +- storage/txpool/src/service/types.rs | 4 +- storage/txpool/src/service/write.rs | 20 +++--- test-utils/src/data/constants.rs | 2 +- test-utils/src/rpc/data/macros.rs | 4 +- 56 files changed, 269 insertions(+), 316 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a507631e..3cc3ab18 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -279,6 +279,9 @@ rest_pat_in_fully_bound_structs = "deny" redundant_type_annotations = "deny" infinite_loop = "deny" zero_repeat_side_effects = "deny" +non_zero_suggestions = "deny" +manual_is_power_of_two = "deny" +used_underscore_items = "deny" # Warm cast_possible_truncation = "deny" @@ -371,6 +374,7 @@ unused_lifetimes = "deny" unused_macro_rules = "deny" ambiguous_glob_imports = "deny" unused_unsafe = "deny" +rust_2024_compatibility = "deny" # Warm let_underscore = { level = "deny", priority = -1 } diff --git a/benches/criterion/cuprate-json-rpc/benches/response.rs b/benches/criterion/cuprate-json-rpc/benches/response.rs index 908a9f49..890958ec 100644 --- a/benches/criterion/cuprate-json-rpc/benches/response.rs +++ b/benches/criterion/cuprate-json-rpc/benches/response.rs @@ -82,7 +82,7 @@ impl_from_str_benchmark! { macro_rules! impl_to_string_pretty_benchmark { ( $( - $fn_name:ident => $request_constructor:expr, + $fn_name:ident => $request_constructor:expr_2021, )* ) => { $( diff --git a/binaries/cuprated/src/statics.rs b/binaries/cuprated/src/statics.rs index 9839608f..2d7338da 100644 --- a/binaries/cuprated/src/statics.rs +++ b/binaries/cuprated/src/statics.rs @@ -13,7 +13,7 @@ use std::{ macro_rules! define_init_lazylock_statics { ($( $( #[$attr:meta] )* - $name:ident: $t:ty = $init_fn:expr; + $name:ident: $t:ty = $init_fn:expr_2021; )*) => { /// Initialize global static `LazyLock` data. pub fn init_lazylock_statics() { diff --git a/consensus/context/src/difficulty.rs b/consensus/context/src/difficulty.rs index 1b61eb9e..3bbcb059 100644 --- a/consensus/context/src/difficulty.rs +++ b/consensus/context/src/difficulty.rs @@ -328,8 +328,8 @@ fn next_difficulty( time_span = 1; } - // TODO: do checked operations here and unwrap so we don't silently overflow? - (windowed_work * u128::from(hf.block_time().as_secs()) + time_span - 1) / time_span + // TODO: do `checked_mul` here and unwrap so we don't silently overflow? + (windowed_work * u128::from(hf.block_time().as_secs())).div_ceil(time_span) } /// Get the start and end of the window to calculate difficulty. diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index 8c47b8e5..9410f60a 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -9,7 +9,7 @@ use clap::Parser; use tower::{Service, ServiceExt}; use cuprate_blockchain::{ - config::ConfigBuilder, cuprate_database::RuntimeError, service::BlockchainReadHandle, + config::ConfigBuilder, cuprate_database::DbResult, service::BlockchainReadHandle, }; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, @@ -23,7 +23,7 @@ const BATCH_SIZE: usize = 512; async fn read_batch( handle: &mut BlockchainReadHandle, height_from: usize, -) -> Result, RuntimeError> { +) -> DbResult> { let mut block_ids = Vec::::with_capacity(BATCH_SIZE); for height in height_from..(height_from + BATCH_SIZE) { diff --git a/net/epee-encoding/src/macros.rs b/net/epee-encoding/src/macros.rs index 38dcc45d..bb1afefd 100644 --- a/net/epee-encoding/src/macros.rs +++ b/net/epee-encoding/src/macros.rs @@ -76,14 +76,14 @@ macro_rules! epee_object { // All this does is return the second (right) arg if present otherwise the left is returned. ( @internal_try_right_then_left - $a:expr, $b:expr + $a:expr_2021, $b:expr_2021 ) => { $b }; ( @internal_try_right_then_left - $a:expr, + $a:expr_2021, ) => { $a }; @@ -122,7 +122,7 @@ macro_rules! epee_object { // ------------------------------------------------------------------------ Entry Point ( $obj:ident, - $($field: ident $(($alt_name: literal))?: $ty:ty $(as $ty_as:ty )? $(= $default:expr)? $(=> $read_fn:expr, $write_fn:expr, $should_write_fn:expr)?, )* + $($field: ident $(($alt_name: literal))?: $ty:ty $(as $ty_as:ty )? $(= $default:expr_2021)? $(=> $read_fn:expr_2021, $write_fn:expr_2021, $should_write_fn:expr_2021)?, )* $(!flatten: $flat_field: ident: $flat_ty:ty ,)* ) => { diff --git a/p2p/async-buffer/src/lib.rs b/p2p/async-buffer/src/lib.rs index 0e2ced24..8174481e 100644 --- a/p2p/async-buffer/src/lib.rs +++ b/p2p/async-buffer/src/lib.rs @@ -157,7 +157,7 @@ pub struct BufferSinkSend<'a, T> { item: Option, } -impl<'a, T> Future for BufferSinkSend<'a, T> { +impl Future for BufferSinkSend<'_, T> { type Output = Result<(), BufferError>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -183,7 +183,7 @@ pub struct BufferSinkReady<'a, T> { size_needed: usize, } -impl<'a, T> Future for BufferSinkReady<'a, T> { +impl Future for BufferSinkReady<'_, T> { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/p2p/dandelion-tower/src/tests/mod.rs b/p2p/dandelion-tower/src/tests/mod.rs index 601ee252..ae9bee1e 100644 --- a/p2p/dandelion-tower/src/tests/mod.rs +++ b/p2p/dandelion-tower/src/tests/mod.rs @@ -12,6 +12,7 @@ use crate::{ OutboundPeer, State, }; +#[expect(clippy::type_complexity)] pub(crate) fn mock_discover_svc() -> ( impl Stream< Item = Result< diff --git a/p2p/p2p-core/src/lib.rs b/p2p/p2p-core/src/lib.rs index 26e10686..e5746932 100644 --- a/p2p/p2p-core/src/lib.rs +++ b/p2p/p2p-core/src/lib.rs @@ -121,7 +121,6 @@ pub trait NetZoneAddress: /// /// - TODO: IP zone banning? /// - TODO: rename this to Host. - type BanID: Debug + Hash + Eq + Clone + Copy + Send + 'static; /// Changes the port of this address to `port`. diff --git a/p2p/p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs index ef621ce8..7b6e4c96 100644 --- a/p2p/p2p/src/block_downloader/download_batch.rs +++ b/p2p/p2p/src/block_downloader/download_batch.rs @@ -146,9 +146,9 @@ fn deserialize_batch( // Check the height lines up as expected. // This must happen after the hash check. - if !block + if block .number() - .is_some_and(|height| height == expected_height) + .is_none_or(|height| height != expected_height) { tracing::warn!( "Invalid chain, expected height: {expected_height}, got height: {:?}", diff --git a/p2p/p2p/src/broadcast.rs b/p2p/p2p/src/broadcast.rs index fc73efbc..38aba323 100644 --- a/p2p/p2p/src/broadcast.rs +++ b/p2p/p2p/src/broadcast.rs @@ -57,6 +57,7 @@ impl Default for BroadcastConfig { /// - The [`BroadcastSvc`] /// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **outbound** peers. /// - A function that takes in [`InternalPeerID`]s and produces [`BroadcastMessageStream`]s to give to **inbound** peers. +#[expect(clippy::type_complexity)] pub(crate) fn init_broadcast_channels( config: BroadcastConfig, ) -> ( diff --git a/pruning/src/lib.rs b/pruning/src/lib.rs index cd31598a..e49aedb1 100644 --- a/pruning/src/lib.rs +++ b/pruning/src/lib.rs @@ -327,7 +327,7 @@ impl DecompressedPruningSeed { /// /// This function will also error if `block_height` > `blockchain_height` /// - pub fn get_next_unpruned_block( + pub const fn get_next_unpruned_block( &self, block_height: usize, blockchain_height: usize, diff --git a/rpc/interface/src/route/bin.rs b/rpc/interface/src/route/bin.rs index f7e3a01c..2fd9963c 100644 --- a/rpc/interface/src/route/bin.rs +++ b/rpc/interface/src/route/bin.rs @@ -68,7 +68,7 @@ macro_rules! generate_endpoints_with_no_input { /// - [`generate_endpoints_with_input`] /// - [`generate_endpoints_with_no_input`] macro_rules! generate_endpoints_inner { - ($variant:ident, $handler:ident, $request:expr) => { + ($variant:ident, $handler:ident, $request:expr_2021) => { paste::paste! { { // Check if restricted. diff --git a/rpc/interface/src/route/other.rs b/rpc/interface/src/route/other.rs index 3ff84487..19a58d93 100644 --- a/rpc/interface/src/route/other.rs +++ b/rpc/interface/src/route/other.rs @@ -71,7 +71,7 @@ macro_rules! generate_endpoints_with_no_input { /// - [`generate_endpoints_with_input`] /// - [`generate_endpoints_with_no_input`] macro_rules! generate_endpoints_inner { - ($variant:ident, $handler:ident, $request:expr) => { + ($variant:ident, $handler:ident, $request:expr_2021) => { paste::paste! { { // Check if restricted. diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index cb55e64a..a454cab4 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -37,7 +37,7 @@ macro_rules! serde_doc_test { ( // `const` string from `cuprate_test_utils::rpc::data` // v - $cuprate_test_utils_rpc_const:ident => $expected:expr + $cuprate_test_utils_rpc_const:ident => $expected:expr_2021 // ^ // Expected value as an expression ) => { diff --git a/rpc/types/src/macros.rs b/rpc/types/src/macros.rs index 85f4272e..db1b5d8d 100644 --- a/rpc/types/src/macros.rs +++ b/rpc/types/src/macros.rs @@ -77,7 +77,7 @@ macro_rules! define_request_and_response { $( #[$request_field_attr:meta] )* // Field attribute. $request_field:ident: $request_field_type:ty // field_name: field type $(as $request_field_type_as:ty)? // (optional) alternative type (de)serialization - $(= $request_field_type_default:expr, $request_field_type_default_string:literal)?, // (optional) default value + $(= $request_field_type_default:expr_2021, $request_field_type_default_string:literal)?, // (optional) default value )* }, @@ -89,7 +89,7 @@ macro_rules! define_request_and_response { $( #[$response_field_attr:meta] )* $response_field:ident: $response_field_type:ty $(as $response_field_type_as:ty)? - $(= $response_field_type_default:expr, $response_field_type_default_string:literal)?, + $(= $response_field_type_default:expr_2021, $response_field_type_default_string:literal)?, )* } ) => { paste::paste! { @@ -229,7 +229,7 @@ macro_rules! define_request { // field_name: FieldType $field:ident: $field_type:ty $(as $field_as:ty)? - $(= $field_default:expr, $field_default_string:literal)?, + $(= $field_default:expr_2021, $field_default_string:literal)?, // The $field_default is an optional extra token that represents // a default value to pass to [`cuprate_epee_encoding::epee_object`], // see it for usage. @@ -286,7 +286,7 @@ macro_rules! define_response { $( #[$field_attr:meta] )* $field:ident: $field_type:ty $(as $field_as:ty)? - $(= $field_default:expr, $field_default_string:literal)?, + $(= $field_default:expr_2021, $field_default_string:literal)?, )* } ) => { @@ -323,7 +323,7 @@ macro_rules! define_response { $( #[$field_attr:meta] )* $field:ident: $field_type:ty $(as $field_as:ty)? - $(= $field_default:expr, $field_default_string:literal)?, + $(= $field_default:expr_2021, $field_default_string:literal)?, )* } ) => { diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 67ec756d..2d88f2a4 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -37,7 +37,7 @@ macro_rules! define_struct_and_impl_epee { $( $( #[$field_attr:meta] )* // Field attributes // Field name => the type => optional `epee_object` default value. - $field_name:ident: $field_type:ty $(= $field_default:expr)?, + $field_name:ident: $field_type:ty $(= $field_default:expr_2021)?, )* } ) => { diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 3694041c..d5cbe82b 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -65,7 +65,7 @@ macro_rules! serde_doc_test { ( // `const` string from `cuprate_test_utils::rpc::data` // v - $cuprate_test_utils_rpc_const:ident => $expected:expr + $cuprate_test_utils_rpc_const:ident => $expected:expr_2021 // ^ // Expected value as an expression ) => { diff --git a/storage/blockchain/src/ops/alt_block/block.rs b/storage/blockchain/src/ops/alt_block/block.rs index 6bd01cb3..480bd7d8 100644 --- a/storage/blockchain/src/ops/alt_block/block.rs +++ b/storage/blockchain/src/ops/alt_block/block.rs @@ -1,7 +1,7 @@ use bytemuck::TransparentWrapper; use monero_serai::block::{Block, BlockHeader}; -use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, StorableVec}; use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}; use cuprate_types::{AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, HardFork}; @@ -21,7 +21,7 @@ use crate::{ pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>( env_inner: &E, tx_rw: &mut E::Rw<'_>, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { use crate::tables::{ AltBlockBlobs, AltBlockHeights, AltBlocksInfo, AltChainInfos, AltTransactionBlobs, AltTransactionInfos, @@ -47,10 +47,7 @@ pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>( /// - `alt_block.height` is == `0` /// - `alt_block.txs.len()` != `alt_block.block.transactions.len()` /// -pub fn add_alt_block( - alt_block: &AltBlockInformation, - tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +pub fn add_alt_block(alt_block: &AltBlockInformation, tables: &mut impl TablesMut) -> DbResult<()> { let alt_block_height = AltBlockHeight { chain_id: alt_block.chain_id.into(), height: alt_block.height, @@ -100,7 +97,7 @@ pub fn add_alt_block( pub fn get_alt_block( alt_block_height: &AltBlockHeight, tables: &impl Tables, -) -> Result { +) -> DbResult { let block_info = tables.alt_blocks_info().get(alt_block_height)?; let block_blob = tables.alt_block_blobs().get(alt_block_height)?.0; @@ -111,7 +108,7 @@ pub fn get_alt_block( .transactions .iter() .map(|tx_hash| get_alt_transaction(tx_hash, tables)) - .collect::>()?; + .collect::>()?; Ok(AltBlockInformation { block, @@ -141,7 +138,7 @@ pub fn get_alt_block_hash( block_height: &BlockHeight, alt_chain: ChainId, tables: &impl Tables, -) -> Result { +) -> DbResult { let alt_chains = tables.alt_chain_infos(); // First find what [`ChainId`] this block would be stored under. @@ -188,7 +185,7 @@ pub fn get_alt_block_hash( pub fn get_alt_block_extended_header_from_height( height: &AltBlockHeight, table: &impl Tables, -) -> Result { +) -> DbResult { let block_info = table.alt_blocks_info().get(height)?; let block_blob = table.alt_block_blobs().get(height)?.0; diff --git a/storage/blockchain/src/ops/alt_block/chain.rs b/storage/blockchain/src/ops/alt_block/chain.rs index 5b5f3cb1..676fd7f2 100644 --- a/storage/blockchain/src/ops/alt_block/chain.rs +++ b/storage/blockchain/src/ops/alt_block/chain.rs @@ -1,6 +1,6 @@ use std::cmp::{max, min}; -use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError}; +use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError}; use cuprate_types::{Chain, ChainId}; use crate::{ @@ -21,7 +21,7 @@ pub fn update_alt_chain_info( alt_block_height: &AltBlockHeight, prev_hash: &BlockHash, tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { let parent_chain = match tables.alt_block_heights().get(prev_hash) { Ok(alt_parent_height) => Chain::Alt(alt_parent_height.chain_id.into()), Err(RuntimeError::KeyNotFound) => Chain::Main, @@ -74,7 +74,7 @@ pub fn get_alt_chain_history_ranges( range: std::ops::Range, alt_chain: ChainId, alt_chain_infos: &impl DatabaseRo, -) -> Result)>, RuntimeError> { +) -> DbResult)>> { let mut ranges = Vec::with_capacity(5); let mut i = range.end; diff --git a/storage/blockchain/src/ops/alt_block/tx.rs b/storage/blockchain/src/ops/alt_block/tx.rs index 4185c6cb..b410fed9 100644 --- a/storage/blockchain/src/ops/alt_block/tx.rs +++ b/storage/blockchain/src/ops/alt_block/tx.rs @@ -1,7 +1,7 @@ use bytemuck::TransparentWrapper; use monero_serai::transaction::Transaction; -use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError, StorableVec}; use cuprate_types::VerifiedTransactionInformation; use crate::{ @@ -22,7 +22,7 @@ use crate::{ pub fn add_alt_transaction_blob( tx: &VerifiedTransactionInformation, tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { tables.alt_transaction_infos_mut().put( &tx.tx_hash, &AltTransactionInfo { @@ -51,7 +51,7 @@ pub fn add_alt_transaction_blob( pub fn get_alt_transaction( tx_hash: &TxHash, tables: &impl Tables, -) -> Result { +) -> DbResult { let tx_info = tables.alt_transaction_infos().get(tx_hash)?; let tx_blob = match tables.alt_transaction_blobs().get(tx_hash) { diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index cc5cb807..5e541878 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -8,7 +8,7 @@ use monero_serai::{ }; use cuprate_database::{ - RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, + DbResult, RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, }; use cuprate_helper::{ map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}, @@ -44,10 +44,7 @@ use crate::{ /// - `block.height > u32::MAX` (not normally possible) /// - `block.height` is != [`chain_height`] // no inline, too big. -pub fn add_block( - block: &VerifiedBlockInformation, - tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +pub fn add_block(block: &VerifiedBlockInformation, tables: &mut impl TablesMut) -> DbResult<()> { //------------------------------------------------------ Check preconditions first // Cast height to `u32` for storage (handled at top of function). @@ -153,7 +150,7 @@ pub fn add_block( pub fn pop_block( move_to_alt_chain: Option, tables: &mut impl TablesMut, -) -> Result<(BlockHeight, BlockHash, Block), RuntimeError> { +) -> DbResult<(BlockHeight, BlockHash, Block)> { //------------------------------------------------------ Block Info // Remove block data from tables. let (block_height, block_info) = tables.block_infos_mut().pop_last()?; @@ -195,7 +192,7 @@ pub fn pop_block( tx, }) }) - .collect::, RuntimeError>>()?; + .collect::>>()?; alt_block::add_alt_block( &AltBlockInformation { @@ -239,7 +236,7 @@ pub fn pop_block( pub fn get_block_extended_header( block_hash: &BlockHash, tables: &impl Tables, -) -> Result { +) -> DbResult { get_block_extended_header_from_height(&tables.block_heights().get(block_hash)?, tables) } @@ -253,7 +250,7 @@ pub fn get_block_extended_header( pub fn get_block_extended_header_from_height( block_height: &BlockHeight, tables: &impl Tables, -) -> Result { +) -> DbResult { let block_info = tables.block_infos().get(block_height)?; let block_header_blob = tables.block_header_blobs().get(block_height)?.0; let block_header = BlockHeader::read(&mut block_header_blob.as_slice())?; @@ -279,7 +276,7 @@ pub fn get_block_extended_header_from_height( #[inline] pub fn get_block_extended_header_top( tables: &impl Tables, -) -> Result<(ExtendedBlockHeader, BlockHeight), RuntimeError> { +) -> DbResult<(ExtendedBlockHeader, BlockHeight)> { let height = chain_height(tables.block_heights())?.saturating_sub(1); let header = get_block_extended_header_from_height(&height, tables)?; Ok((header, height)) @@ -292,7 +289,7 @@ pub fn get_block_extended_header_top( pub fn get_block_info( block_height: &BlockHeight, table_block_infos: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_block_infos.get(block_height) } @@ -302,7 +299,7 @@ pub fn get_block_info( pub fn get_block_height( block_hash: &BlockHash, table_block_heights: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_block_heights.get(block_hash) } @@ -317,7 +314,7 @@ pub fn get_block_height( pub fn block_exists( block_hash: &BlockHash, table_block_heights: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_block_heights.contains(block_hash) } diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index 04f8b26d..71633635 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -1,7 +1,7 @@ //! Blockchain functions - chain height, generated coins, etc. //---------------------------------------------------------------------------------------------------- Import -use cuprate_database::{DatabaseRo, RuntimeError}; +use cuprate_database::{DatabaseRo, DbResult, RuntimeError}; use crate::{ ops::macros::doc_error, @@ -22,9 +22,7 @@ use crate::{ /// So the height of a new block would be `chain_height()`. #[doc = doc_error!()] #[inline] -pub fn chain_height( - table_block_heights: &impl DatabaseRo, -) -> Result { +pub fn chain_height(table_block_heights: &impl DatabaseRo) -> DbResult { #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")] table_block_heights.len().map(|height| height as usize) } @@ -45,7 +43,7 @@ pub fn chain_height( #[inline] pub fn top_block_height( table_block_heights: &impl DatabaseRo, -) -> Result { +) -> DbResult { match table_block_heights.len()? { 0 => Err(RuntimeError::KeyNotFound), #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")] @@ -70,7 +68,7 @@ pub fn top_block_height( pub fn cumulative_generated_coins( block_height: &BlockHeight, table_block_infos: &impl DatabaseRo, -) -> Result { +) -> DbResult { match table_block_infos.get(block_height) { Ok(block_info) => Ok(block_info.cumulative_generated_coins), Err(RuntimeError::KeyNotFound) if block_height == &0 => Ok(0), diff --git a/storage/blockchain/src/ops/key_image.rs b/storage/blockchain/src/ops/key_image.rs index 19444d6b..5f179129 100644 --- a/storage/blockchain/src/ops/key_image.rs +++ b/storage/blockchain/src/ops/key_image.rs @@ -1,7 +1,7 @@ //! Key image functions. //---------------------------------------------------------------------------------------------------- Import -use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError}; +use cuprate_database::{DatabaseRo, DatabaseRw, DbResult}; use crate::{ ops::macros::{doc_add_block_inner_invariant, doc_error}, @@ -17,7 +17,7 @@ use crate::{ pub fn add_key_image( key_image: &KeyImage, table_key_images: &mut impl DatabaseRw, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { table_key_images.put(key_image, &()) } @@ -28,7 +28,7 @@ pub fn add_key_image( pub fn remove_key_image( key_image: &KeyImage, table_key_images: &mut impl DatabaseRw, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { table_key_images.delete(key_image) } @@ -38,7 +38,7 @@ pub fn remove_key_image( pub fn key_image_exists( key_image: &KeyImage, table_key_images: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_key_images.contains(key_image) } diff --git a/storage/blockchain/src/ops/macros.rs b/storage/blockchain/src/ops/macros.rs index 18ec5068..9c6ef7dd 100644 --- a/storage/blockchain/src/ops/macros.rs +++ b/storage/blockchain/src/ops/macros.rs @@ -8,7 +8,7 @@ macro_rules! doc_error { () => { r#"# Errors -This function returns [`RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."# +This function returns [`cuprate_database::RuntimeError::KeyNotFound`] if the input (if applicable) doesn't exist or other `RuntimeError`'s on database errors."# }; } pub(super) use doc_error; diff --git a/storage/blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs index 14c209ab..96d94bb1 100644 --- a/storage/blockchain/src/ops/output.rs +++ b/storage/blockchain/src/ops/output.rs @@ -5,7 +5,7 @@ use curve25519_dalek::edwards::CompressedEdwardsY; use monero_serai::transaction::Timelock; use cuprate_database::{ - RuntimeError, {DatabaseRo, DatabaseRw}, + DbResult, RuntimeError, {DatabaseRo, DatabaseRw}, }; use cuprate_helper::crypto::compute_zero_commitment; use cuprate_helper::map::u64_to_timelock; @@ -30,7 +30,7 @@ pub fn add_output( amount: Amount, output: &Output, tables: &mut impl TablesMut, -) -> Result { +) -> DbResult { // FIXME: this would be much better expressed with a // `btree_map::Entry`-like API, fix `trait DatabaseRw`. let num_outputs = match tables.num_outputs().get(&amount) { @@ -61,7 +61,7 @@ pub fn add_output( pub fn remove_output( pre_rct_output_id: &PreRctOutputId, tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { // Decrement the amount index by 1, or delete the entry out-right. // FIXME: this would be much better expressed with a // `btree_map::Entry`-like API, fix `trait DatabaseRw`. @@ -86,7 +86,7 @@ pub fn remove_output( pub fn get_output( pre_rct_output_id: &PreRctOutputId, table_outputs: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_outputs.get(pre_rct_output_id) } @@ -95,7 +95,7 @@ pub fn get_output( /// This returns the amount of pre-RCT outputs currently stored. #[doc = doc_error!()] #[inline] -pub fn get_num_outputs(table_outputs: &impl DatabaseRo) -> Result { +pub fn get_num_outputs(table_outputs: &impl DatabaseRo) -> DbResult { table_outputs.len() } @@ -110,7 +110,7 @@ pub fn get_num_outputs(table_outputs: &impl DatabaseRo) -> Result, -) -> Result { +) -> DbResult { let amount_index = get_rct_num_outputs(table_rct_outputs)?; table_rct_outputs.put(&amount_index, rct_output)?; Ok(amount_index) @@ -123,7 +123,7 @@ pub fn add_rct_output( pub fn remove_rct_output( amount_index: &AmountIndex, table_rct_outputs: &mut impl DatabaseRw, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { table_rct_outputs.delete(amount_index) } @@ -133,7 +133,7 @@ pub fn remove_rct_output( pub fn get_rct_output( amount_index: &AmountIndex, table_rct_outputs: &impl DatabaseRo, -) -> Result { +) -> DbResult { table_rct_outputs.get(amount_index) } @@ -142,9 +142,7 @@ pub fn get_rct_output( /// This returns the amount of RCT outputs currently stored. #[doc = doc_error!()] #[inline] -pub fn get_rct_num_outputs( - table_rct_outputs: &impl DatabaseRo, -) -> Result { +pub fn get_rct_num_outputs(table_rct_outputs: &impl DatabaseRo) -> DbResult { table_rct_outputs.len() } @@ -155,7 +153,7 @@ pub fn output_to_output_on_chain( output: &Output, amount: Amount, table_tx_unlock_time: &impl DatabaseRo, -) -> Result { +) -> DbResult { let commitment = compute_zero_commitment(amount); let time_lock = if output @@ -191,7 +189,7 @@ pub fn output_to_output_on_chain( pub fn rct_output_to_output_on_chain( rct_output: &RctOutput, table_tx_unlock_time: &impl DatabaseRo, -) -> Result { +) -> DbResult { // INVARIANT: Commitments stored are valid when stored by the database. let commitment = CompressedEdwardsY::from_slice(&rct_output.commitment) .unwrap() @@ -223,10 +221,7 @@ pub fn rct_output_to_output_on_chain( /// /// Note that this still support RCT outputs, in that case, [`PreRctOutputId::amount`] should be `0`. #[doc = doc_error!()] -pub fn id_to_output_on_chain( - id: &PreRctOutputId, - tables: &impl Tables, -) -> Result { +pub fn id_to_output_on_chain(id: &PreRctOutputId, tables: &impl Tables) -> DbResult { // v2 transactions. if id.amount == 0 { let rct_output = get_rct_output(&id.amount_index, tables.rct_outputs())?; diff --git a/storage/blockchain/src/ops/property.rs b/storage/blockchain/src/ops/property.rs index 7810000a..3dbb9509 100644 --- a/storage/blockchain/src/ops/property.rs +++ b/storage/blockchain/src/ops/property.rs @@ -3,10 +3,9 @@ //! SOMEDAY: the database `properties` table is not yet implemented. //---------------------------------------------------------------------------------------------------- Import +use cuprate_database::DbResult; use cuprate_pruning::PruningSeed; -use cuprate_database::RuntimeError; - use crate::ops::macros::doc_error; //---------------------------------------------------------------------------------------------------- Free Functions @@ -20,7 +19,7 @@ use crate::ops::macros::doc_error; /// // SOMEDAY /// ``` #[inline] -pub const fn get_blockchain_pruning_seed() -> Result { +pub const fn get_blockchain_pruning_seed() -> DbResult { // SOMEDAY: impl pruning. // We need a DB properties table. Ok(PruningSeed::NotPruned) @@ -36,7 +35,7 @@ pub const fn get_blockchain_pruning_seed() -> Result /// // SOMEDAY /// ``` #[inline] -pub const fn db_version() -> Result { +pub const fn db_version() -> DbResult { // SOMEDAY: We need a DB properties table. Ok(crate::constants::DATABASE_VERSION) } diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index 5a60ad53..0312f215 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -4,7 +4,7 @@ use bytemuck::TransparentWrapper; use monero_serai::transaction::{Input, Timelock, Transaction}; -use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_database::{DatabaseRo, DatabaseRw, DbResult, RuntimeError, StorableVec}; use cuprate_helper::crypto::compute_zero_commitment; use crate::{ @@ -52,7 +52,7 @@ pub fn add_tx( tx_hash: &TxHash, block_height: &BlockHeight, tables: &mut impl TablesMut, -) -> Result { +) -> DbResult { let tx_id = get_num_tx(tables.tx_ids_mut())?; //------------------------------------------------------ Transaction data @@ -129,7 +129,7 @@ pub fn add_tx( )? .amount_index) }) - .collect::, RuntimeError>>()?, + .collect::>>()?, Transaction::V2 { prefix, proofs } => prefix .outputs .iter() @@ -186,10 +186,7 @@ pub fn add_tx( /// #[doc = doc_error!()] #[inline] -pub fn remove_tx( - tx_hash: &TxHash, - tables: &mut impl TablesMut, -) -> Result<(TxId, Transaction), RuntimeError> { +pub fn remove_tx(tx_hash: &TxHash, tables: &mut impl TablesMut) -> DbResult<(TxId, Transaction)> { //------------------------------------------------------ Transaction data let tx_id = tables.tx_ids_mut().take(tx_hash)?; let tx_blob = tables.tx_blobs_mut().take(&tx_id)?; @@ -267,7 +264,7 @@ pub fn get_tx( tx_hash: &TxHash, table_tx_ids: &impl DatabaseRo, table_tx_blobs: &impl DatabaseRo, -) -> Result { +) -> DbResult { get_tx_from_id(&table_tx_ids.get(tx_hash)?, table_tx_blobs) } @@ -277,7 +274,7 @@ pub fn get_tx( pub fn get_tx_from_id( tx_id: &TxId, table_tx_blobs: &impl DatabaseRo, -) -> Result { +) -> DbResult { let tx_blob = table_tx_blobs.get(tx_id)?.0; Ok(Transaction::read(&mut tx_blob.as_slice())?) } @@ -294,7 +291,7 @@ pub fn get_tx_from_id( /// - etc #[doc = doc_error!()] #[inline] -pub fn get_num_tx(table_tx_ids: &impl DatabaseRo) -> Result { +pub fn get_num_tx(table_tx_ids: &impl DatabaseRo) -> DbResult { table_tx_ids.len() } @@ -304,10 +301,7 @@ pub fn get_num_tx(table_tx_ids: &impl DatabaseRo) -> Result, -) -> Result { +pub fn tx_exists(tx_hash: &TxHash, table_tx_ids: &impl DatabaseRo) -> DbResult { table_tx_ids.contains(tx_hash) } diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index e3c01802..76577590 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -21,7 +21,7 @@ use rayon::{ }; use thread_local::ThreadLocal; -use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; +use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, RuntimeError}; use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{ @@ -305,7 +305,7 @@ fn block_extended_header_in_range( let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); get_block_extended_header_from_height(&block_height, tables) }) - .collect::, RuntimeError>>()?, + .collect::>>()?, Chain::Alt(chain_id) => { let ranges = { let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; @@ -381,7 +381,7 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap>) -> // The 2nd mapping function. // This is pulled out from the below `map()` for readability. - let inner_map = |amount, amount_index| -> Result<(AmountIndex, OutputOnChain), RuntimeError> { + let inner_map = |amount, amount_index| -> DbResult<(AmountIndex, OutputOnChain)> { let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); @@ -404,10 +404,10 @@ fn outputs(env: &ConcreteEnv, outputs: HashMap>) -> amount_index_set .into_par_iter() .map(|amount_index| inner_map(amount, amount_index)) - .collect::, RuntimeError>>()?, + .collect::>>()?, )) }) - .collect::>, RuntimeError>>()?; + .collect::>>>()?; Ok(BlockchainResponse::Outputs(map)) } @@ -456,7 +456,7 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec) -> Respon } } }) - .collect::, RuntimeError>>()?; + .collect::>>()?; Ok(BlockchainResponse::NumberOutputsWithAmount(map)) } @@ -522,7 +522,7 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult { .map(compact_history_index_to_height_offset::) .map_while(|i| top_block_height.checked_sub(i)) .map(|height| Ok(get_block_info(&height, &table_block_infos)?.block_hash)) - .collect::, RuntimeError>>()?; + .collect::>>()?; if compact_history_genesis_not_included::(top_block_height) { block_ids.push(get_block_info(&0, &table_block_infos)?.block_hash); diff --git a/storage/blockchain/src/service/types.rs b/storage/blockchain/src/service/types.rs index 9cd86e9c..190e9f6f 100644 --- a/storage/blockchain/src/service/types.rs +++ b/storage/blockchain/src/service/types.rs @@ -1,7 +1,7 @@ //! Database service type aliases. //---------------------------------------------------------------------------------------------------- Use -use cuprate_database::RuntimeError; +use cuprate_database::DbResult; use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle}; use cuprate_types::blockchain::{ BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest, @@ -11,7 +11,7 @@ use cuprate_types::blockchain::{ /// The actual type of the response. /// /// Either our [`BlockchainResponse`], or a database error occurred. -pub(super) type ResponseResult = Result; +pub(super) type ResponseResult = DbResult; /// The blockchain database write service. pub type BlockchainWriteHandle = DatabaseWriteHandle; diff --git a/storage/blockchain/src/service/write.rs b/storage/blockchain/src/service/write.rs index 07162d2a..84c2538f 100644 --- a/storage/blockchain/src/service/write.rs +++ b/storage/blockchain/src/service/write.rs @@ -2,7 +2,7 @@ //---------------------------------------------------------------------------------------------------- Import use std::sync::Arc; -use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError, TxRw}; +use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, TxRw}; use cuprate_database_service::DatabaseWriteHandle; use cuprate_types::{ blockchain::{BlockchainResponse, BlockchainWriteRequest}, @@ -36,7 +36,7 @@ pub fn init_write_service(env: Arc) -> BlockchainWriteHandle { fn handle_blockchain_request( env: &ConcreteEnv, req: &BlockchainWriteRequest, -) -> Result { +) -> DbResult { match req { BlockchainWriteRequest::WriteBlock(block) => write_block(env, block), BlockchainWriteRequest::WriteAltBlock(alt_block) => write_alt_block(env, alt_block), diff --git a/storage/database/src/backend/heed/database.rs b/storage/database/src/backend/heed/database.rs index c985d0de..15f16b45 100644 --- a/storage/database/src/backend/heed/database.rs +++ b/storage/database/src/backend/heed/database.rs @@ -6,7 +6,7 @@ use std::{cell::RefCell, ops::RangeBounds}; use crate::{ backend::heed::types::HeedDb, database::{DatabaseIter, DatabaseRo, DatabaseRw}, - error::RuntimeError, + error::{DbResult, RuntimeError}, table::Table, }; @@ -54,16 +54,13 @@ fn get( db: &HeedDb, tx_ro: &heed::RoTxn<'_>, key: &T::Key, -) -> Result { +) -> DbResult { db.get(tx_ro, key)?.ok_or(RuntimeError::KeyNotFound) } /// Shared [`DatabaseRo::len()`]. #[inline] -fn len( - db: &HeedDb, - tx_ro: &heed::RoTxn<'_>, -) -> Result { +fn len(db: &HeedDb, tx_ro: &heed::RoTxn<'_>) -> DbResult { Ok(db.len(tx_ro)?) } @@ -72,7 +69,7 @@ fn len( fn first( db: &HeedDb, tx_ro: &heed::RoTxn<'_>, -) -> Result<(T::Key, T::Value), RuntimeError> { +) -> DbResult<(T::Key, T::Value)> { db.first(tx_ro)?.ok_or(RuntimeError::KeyNotFound) } @@ -81,16 +78,13 @@ fn first( fn last( db: &HeedDb, tx_ro: &heed::RoTxn<'_>, -) -> Result<(T::Key, T::Value), RuntimeError> { +) -> DbResult<(T::Key, T::Value)> { db.last(tx_ro)?.ok_or(RuntimeError::KeyNotFound) } /// Shared [`DatabaseRo::is_empty()`]. #[inline] -fn is_empty( - db: &HeedDb, - tx_ro: &heed::RoTxn<'_>, -) -> Result { +fn is_empty(db: &HeedDb, tx_ro: &heed::RoTxn<'_>) -> DbResult { Ok(db.is_empty(tx_ro)?) } @@ -100,7 +94,7 @@ impl DatabaseIter for HeedTableRo<'_, T> { fn get_range<'a, Range>( &'a self, range: Range, - ) -> Result> + 'a, RuntimeError> + ) -> DbResult> + 'a> where Range: RangeBounds + 'a, { @@ -108,24 +102,17 @@ impl DatabaseIter for HeedTableRo<'_, T> { } #[inline] - fn iter( - &self, - ) -> Result> + '_, RuntimeError> - { + fn iter(&self) -> DbResult> + '_> { Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?))) } #[inline] - fn keys( - &self, - ) -> Result> + '_, RuntimeError> { + fn keys(&self) -> DbResult> + '_> { Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.0))) } #[inline] - fn values( - &self, - ) -> Result> + '_, RuntimeError> { + fn values(&self) -> DbResult> + '_> { Ok(self.db.iter(self.tx_ro)?.map(|res| Ok(res?.1))) } } @@ -134,27 +121,27 @@ impl DatabaseIter for HeedTableRo<'_, T> { // SAFETY: `HeedTableRo: !Send` as it holds a reference to `heed::RoTxn: Send + !Sync`. unsafe impl DatabaseRo for HeedTableRo<'_, T> { #[inline] - fn get(&self, key: &T::Key) -> Result { + fn get(&self, key: &T::Key) -> DbResult { get::(&self.db, self.tx_ro, key) } #[inline] - fn len(&self) -> Result { + fn len(&self) -> DbResult { len::(&self.db, self.tx_ro) } #[inline] - fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn first(&self) -> DbResult<(T::Key, T::Value)> { first::(&self.db, self.tx_ro) } #[inline] - fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn last(&self) -> DbResult<(T::Key, T::Value)> { last::(&self.db, self.tx_ro) } #[inline] - fn is_empty(&self) -> Result { + fn is_empty(&self) -> DbResult { is_empty::(&self.db, self.tx_ro) } } @@ -164,45 +151,45 @@ unsafe impl DatabaseRo for HeedTableRo<'_, T> { // `HeedTableRw`'s write transaction is `!Send`. unsafe impl DatabaseRo for HeedTableRw<'_, '_, T> { #[inline] - fn get(&self, key: &T::Key) -> Result { + fn get(&self, key: &T::Key) -> DbResult { get::(&self.db, &self.tx_rw.borrow(), key) } #[inline] - fn len(&self) -> Result { + fn len(&self) -> DbResult { len::(&self.db, &self.tx_rw.borrow()) } #[inline] - fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn first(&self) -> DbResult<(T::Key, T::Value)> { first::(&self.db, &self.tx_rw.borrow()) } #[inline] - fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn last(&self) -> DbResult<(T::Key, T::Value)> { last::(&self.db, &self.tx_rw.borrow()) } #[inline] - fn is_empty(&self) -> Result { + fn is_empty(&self) -> DbResult { is_empty::(&self.db, &self.tx_rw.borrow()) } } impl DatabaseRw for HeedTableRw<'_, '_, T> { #[inline] - fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> { + fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()> { Ok(self.db.put(&mut self.tx_rw.borrow_mut(), key, value)?) } #[inline] - fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> { + fn delete(&mut self, key: &T::Key) -> DbResult<()> { self.db.delete(&mut self.tx_rw.borrow_mut(), key)?; Ok(()) } #[inline] - fn take(&mut self, key: &T::Key) -> Result { + fn take(&mut self, key: &T::Key) -> DbResult { // LMDB/heed does not return the value on deletion. // So, fetch it first - then delete. let value = get::(&self.db, &self.tx_rw.borrow(), key)?; @@ -216,7 +203,7 @@ impl DatabaseRw for HeedTableRw<'_, '_, T> { } #[inline] - fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { + fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)> { let tx_rw = &mut self.tx_rw.borrow_mut(); // Get the value first... @@ -235,7 +222,7 @@ impl DatabaseRw for HeedTableRw<'_, '_, T> { } #[inline] - fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { + fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)> { let tx_rw = &mut self.tx_rw.borrow_mut(); // Get the value first... diff --git a/storage/database/src/backend/heed/env.rs b/storage/database/src/backend/heed/env.rs index 568379e5..b603013b 100644 --- a/storage/database/src/backend/heed/env.rs +++ b/storage/database/src/backend/heed/env.rs @@ -18,7 +18,7 @@ use crate::{ config::{Config, SyncMode}, database::{DatabaseIter, DatabaseRo, DatabaseRw}, env::{Env, EnvInner}, - error::{InitError, RuntimeError}, + error::{DbResult, InitError, RuntimeError}, key::{Key, KeyCompare}, resize::ResizeAlgorithm, table::Table, @@ -203,7 +203,7 @@ impl Env for ConcreteEnv { &self.config } - fn sync(&self) -> Result<(), RuntimeError> { + fn sync(&self) -> DbResult<()> { Ok(self.env.read().unwrap().force_sync()?) } @@ -253,12 +253,12 @@ where type Rw<'a> = RefCell>; #[inline] - fn tx_ro(&self) -> Result, RuntimeError> { + fn tx_ro(&self) -> DbResult> { Ok(self.read_txn()?) } #[inline] - fn tx_rw(&self) -> Result, RuntimeError> { + fn tx_rw(&self) -> DbResult> { Ok(RefCell::new(self.write_txn()?)) } @@ -266,7 +266,7 @@ where fn open_db_ro( &self, tx_ro: &Self::Ro<'_>, - ) -> Result + DatabaseIter, RuntimeError> { + ) -> DbResult + DatabaseIter> { // Open up a read-only database using our table's const metadata. // // INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`], @@ -281,10 +281,7 @@ where } #[inline] - fn open_db_rw( - &self, - tx_rw: &Self::Rw<'_>, - ) -> Result, RuntimeError> { + fn open_db_rw(&self, tx_rw: &Self::Rw<'_>) -> DbResult> { // Open up a read/write database using our table's const metadata. // // INVARIANT: LMDB caches the ordering / comparison function from [`EnvInner::create_db`], @@ -296,7 +293,7 @@ where }) } - fn create_db(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError> { + fn create_db(&self, tx_rw: &Self::Rw<'_>) -> DbResult<()> { // Create a database using our: // - [`Table`]'s const metadata. // - (potentially) our [`Key`] comparison function @@ -328,7 +325,7 @@ where } #[inline] - fn clear_db(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError> { + fn clear_db(&self, tx_rw: &mut Self::Rw<'_>) -> DbResult<()> { let tx_rw = tx_rw.get_mut(); // Open the table. We don't care about flags or key diff --git a/storage/database/src/backend/heed/transaction.rs b/storage/database/src/backend/heed/transaction.rs index d32f3707..b7c0f54d 100644 --- a/storage/database/src/backend/heed/transaction.rs +++ b/storage/database/src/backend/heed/transaction.rs @@ -4,31 +4,31 @@ use std::cell::RefCell; //---------------------------------------------------------------------------------------------------- Import use crate::{ - error::RuntimeError, + error::DbResult, transaction::{TxRo, TxRw}, }; //---------------------------------------------------------------------------------------------------- TxRo impl TxRo<'_> for heed::RoTxn<'_> { - fn commit(self) -> Result<(), RuntimeError> { + fn commit(self) -> DbResult<()> { Ok(heed::RoTxn::commit(self)?) } } //---------------------------------------------------------------------------------------------------- TxRw impl TxRo<'_> for RefCell> { - fn commit(self) -> Result<(), RuntimeError> { + fn commit(self) -> DbResult<()> { TxRw::commit(self) } } impl TxRw<'_> for RefCell> { - fn commit(self) -> Result<(), RuntimeError> { + fn commit(self) -> DbResult<()> { Ok(heed::RwTxn::commit(self.into_inner())?) } /// This function is infallible. - fn abort(self) -> Result<(), RuntimeError> { + fn abort(self) -> DbResult<()> { heed::RwTxn::abort(self.into_inner()); Ok(()) } diff --git a/storage/database/src/backend/redb/database.rs b/storage/database/src/backend/redb/database.rs index dafb2417..0be58ef0 100644 --- a/storage/database/src/backend/redb/database.rs +++ b/storage/database/src/backend/redb/database.rs @@ -11,7 +11,7 @@ use crate::{ types::{RedbTableRo, RedbTableRw}, }, database::{DatabaseIter, DatabaseRo, DatabaseRw}, - error::RuntimeError, + error::{DbResult, RuntimeError}, table::Table, }; @@ -25,7 +25,7 @@ use crate::{ fn get( db: &impl ReadableTable, StorableRedb>, key: &T::Key, -) -> Result { +) -> DbResult { Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value()) } @@ -33,7 +33,7 @@ fn get( #[inline] fn len( db: &impl ReadableTable, StorableRedb>, -) -> Result { +) -> DbResult { Ok(db.len()?) } @@ -41,7 +41,7 @@ fn len( #[inline] fn first( db: &impl ReadableTable, StorableRedb>, -) -> Result<(T::Key, T::Value), RuntimeError> { +) -> DbResult<(T::Key, T::Value)> { let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) } @@ -50,7 +50,7 @@ fn first( #[inline] fn last( db: &impl ReadableTable, StorableRedb>, -) -> Result<(T::Key, T::Value), RuntimeError> { +) -> DbResult<(T::Key, T::Value)> { let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) } @@ -59,7 +59,7 @@ fn last( #[inline] fn is_empty( db: &impl ReadableTable, StorableRedb>, -) -> Result { +) -> DbResult { Ok(db.is_empty()?) } @@ -69,7 +69,7 @@ impl DatabaseIter for RedbTableRo { fn get_range<'a, Range>( &'a self, range: Range, - ) -> Result> + 'a, RuntimeError> + ) -> DbResult> + 'a> where Range: RangeBounds + 'a, { @@ -80,10 +80,7 @@ impl DatabaseIter for RedbTableRo { } #[inline] - fn iter( - &self, - ) -> Result> + '_, RuntimeError> - { + fn iter(&self) -> DbResult> + '_> { Ok(ReadableTable::iter(self)?.map(|result| { let (key, value) = result?; Ok((key.value(), value.value())) @@ -91,9 +88,7 @@ impl DatabaseIter for RedbTableRo { } #[inline] - fn keys( - &self, - ) -> Result> + '_, RuntimeError> { + fn keys(&self) -> DbResult> + '_> { Ok(ReadableTable::iter(self)?.map(|result| { let (key, _value) = result?; Ok(key.value()) @@ -101,9 +96,7 @@ impl DatabaseIter for RedbTableRo { } #[inline] - fn values( - &self, - ) -> Result> + '_, RuntimeError> { + fn values(&self) -> DbResult> + '_> { Ok(ReadableTable::iter(self)?.map(|result| { let (_key, value) = result?; Ok(value.value()) @@ -115,27 +108,27 @@ impl DatabaseIter for RedbTableRo { // SAFETY: Both `redb`'s transaction and table types are `Send + Sync`. unsafe impl DatabaseRo for RedbTableRo { #[inline] - fn get(&self, key: &T::Key) -> Result { + fn get(&self, key: &T::Key) -> DbResult { get::(self, key) } #[inline] - fn len(&self) -> Result { + fn len(&self) -> DbResult { len::(self) } #[inline] - fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn first(&self) -> DbResult<(T::Key, T::Value)> { first::(self) } #[inline] - fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn last(&self) -> DbResult<(T::Key, T::Value)> { last::(self) } #[inline] - fn is_empty(&self) -> Result { + fn is_empty(&self) -> DbResult { is_empty::(self) } } @@ -144,27 +137,27 @@ unsafe impl DatabaseRo for RedbTableRo // SAFETY: Both `redb`'s transaction and table types are `Send + Sync`. unsafe impl DatabaseRo for RedbTableRw<'_, T::Key, T::Value> { #[inline] - fn get(&self, key: &T::Key) -> Result { + fn get(&self, key: &T::Key) -> DbResult { get::(self, key) } #[inline] - fn len(&self) -> Result { + fn len(&self) -> DbResult { len::(self) } #[inline] - fn first(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn first(&self) -> DbResult<(T::Key, T::Value)> { first::(self) } #[inline] - fn last(&self) -> Result<(T::Key, T::Value), RuntimeError> { + fn last(&self) -> DbResult<(T::Key, T::Value)> { last::(self) } #[inline] - fn is_empty(&self) -> Result { + fn is_empty(&self) -> DbResult { is_empty::(self) } } @@ -173,19 +166,19 @@ impl DatabaseRw for RedbTableRw<'_, T::Key, T::Value> { // `redb` returns the value after function calls so we end with Ok(()) instead. #[inline] - fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError> { + fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()> { redb::Table::insert(self, key, value)?; Ok(()) } #[inline] - fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError> { + fn delete(&mut self, key: &T::Key) -> DbResult<()> { redb::Table::remove(self, key)?; Ok(()) } #[inline] - fn take(&mut self, key: &T::Key) -> Result { + fn take(&mut self, key: &T::Key) -> DbResult { if let Some(value) = redb::Table::remove(self, key)? { Ok(value.value()) } else { @@ -194,13 +187,13 @@ impl DatabaseRw for RedbTableRw<'_, T::Key, T::Value> { } #[inline] - fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { + fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)> { let (key, value) = redb::Table::pop_first(self)?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) } #[inline] - fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError> { + fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)> { let (key, value) = redb::Table::pop_last(self)?.ok_or(RuntimeError::KeyNotFound)?; Ok((key.value(), value.value())) } diff --git a/storage/database/src/backend/redb/env.rs b/storage/database/src/backend/redb/env.rs index a405ea72..4bd49d68 100644 --- a/storage/database/src/backend/redb/env.rs +++ b/storage/database/src/backend/redb/env.rs @@ -6,7 +6,7 @@ use crate::{ config::{Config, SyncMode}, database::{DatabaseIter, DatabaseRo, DatabaseRw}, env::{Env, EnvInner}, - error::{InitError, RuntimeError}, + error::{DbResult, InitError, RuntimeError}, table::Table, TxRw, }; @@ -105,7 +105,7 @@ impl Env for ConcreteEnv { &self.config } - fn sync(&self) -> Result<(), RuntimeError> { + fn sync(&self) -> DbResult<()> { // `redb`'s syncs are tied with write transactions, // so just create one, don't do anything and commit. let mut tx_rw = self.env.begin_write()?; @@ -127,12 +127,12 @@ where type Rw<'a> = redb::WriteTransaction; #[inline] - fn tx_ro(&self) -> Result { + fn tx_ro(&self) -> DbResult { Ok(self.0.begin_read()?) } #[inline] - fn tx_rw(&self) -> Result { + fn tx_rw(&self) -> DbResult { // `redb` has sync modes on the TX level, unlike heed, // which sets it at the Environment level. // @@ -146,7 +146,7 @@ where fn open_db_ro( &self, tx_ro: &Self::Ro<'_>, - ) -> Result + DatabaseIter, RuntimeError> { + ) -> DbResult + DatabaseIter> { // Open up a read-only database using our `T: Table`'s const metadata. let table: redb::TableDefinition<'static, StorableRedb, StorableRedb> = redb::TableDefinition::new(T::NAME); @@ -155,10 +155,7 @@ where } #[inline] - fn open_db_rw( - &self, - tx_rw: &Self::Rw<'_>, - ) -> Result, RuntimeError> { + fn open_db_rw(&self, tx_rw: &Self::Rw<'_>) -> DbResult> { // Open up a read/write database using our `T: Table`'s const metadata. let table: redb::TableDefinition<'static, StorableRedb, StorableRedb> = redb::TableDefinition::new(T::NAME); @@ -168,14 +165,14 @@ where Ok(tx_rw.open_table(table)?) } - fn create_db(&self, tx_rw: &redb::WriteTransaction) -> Result<(), RuntimeError> { + fn create_db(&self, tx_rw: &redb::WriteTransaction) -> DbResult<()> { // INVARIANT: `redb` creates tables if they don't exist. self.open_db_rw::(tx_rw)?; Ok(()) } #[inline] - fn clear_db(&self, tx_rw: &mut redb::WriteTransaction) -> Result<(), RuntimeError> { + fn clear_db(&self, tx_rw: &mut redb::WriteTransaction) -> DbResult<()> { let table: redb::TableDefinition< 'static, StorableRedb<::Key>, diff --git a/storage/database/src/backend/redb/storable.rs b/storage/database/src/backend/redb/storable.rs index abf2e71b..f0412efb 100644 --- a/storage/database/src/backend/redb/storable.rs +++ b/storage/database/src/backend/redb/storable.rs @@ -34,8 +34,14 @@ impl redb::Value for StorableRedb where T: Storable + 'static, { - type SelfType<'a> = T where Self: 'a; - type AsBytes<'a> = &'a [u8] where Self: 'a; + type SelfType<'a> + = T + where + Self: 'a; + type AsBytes<'a> + = &'a [u8] + where + Self: 'a; #[inline] fn fixed_width() -> Option { diff --git a/storage/database/src/backend/redb/transaction.rs b/storage/database/src/backend/redb/transaction.rs index 5048851d..8d93986d 100644 --- a/storage/database/src/backend/redb/transaction.rs +++ b/storage/database/src/backend/redb/transaction.rs @@ -2,14 +2,14 @@ //---------------------------------------------------------------------------------------------------- Import use crate::{ - error::RuntimeError, + error::DbResult, transaction::{TxRo, TxRw}, }; //---------------------------------------------------------------------------------------------------- TxRo impl TxRo<'_> for redb::ReadTransaction { /// This function is infallible. - fn commit(self) -> Result<(), RuntimeError> { + fn commit(self) -> DbResult<()> { // `redb`'s read transactions cleanup automatically when all references are dropped. // // There is `close()`: @@ -22,11 +22,11 @@ impl TxRo<'_> for redb::ReadTransaction { //---------------------------------------------------------------------------------------------------- TxRw impl TxRw<'_> for redb::WriteTransaction { - fn commit(self) -> Result<(), RuntimeError> { + fn commit(self) -> DbResult<()> { Ok(self.commit()?) } - fn abort(self) -> Result<(), RuntimeError> { + fn abort(self) -> DbResult<()> { Ok(self.abort()?) } } diff --git a/storage/database/src/config/sync_mode.rs b/storage/database/src/config/sync_mode.rs index 5a0cba52..dbb34e7f 100644 --- a/storage/database/src/config/sync_mode.rs +++ b/storage/database/src/config/sync_mode.rs @@ -9,7 +9,6 @@ //! based on these values. //---------------------------------------------------------------------------------------------------- Import - #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/storage/database/src/database.rs b/storage/database/src/database.rs index 6fbb7aaa..c019972b 100644 --- a/storage/database/src/database.rs +++ b/storage/database/src/database.rs @@ -3,7 +3,10 @@ //---------------------------------------------------------------------------------------------------- Import use std::ops::RangeBounds; -use crate::{error::RuntimeError, table::Table}; +use crate::{ + error::{DbResult, RuntimeError}, + table::Table, +}; //---------------------------------------------------------------------------------------------------- DatabaseIter /// Generic post-fix documentation for `DatabaseIter` methods. @@ -48,27 +51,22 @@ pub trait DatabaseIter { fn get_range<'a, Range>( &'a self, range: Range, - ) -> Result> + 'a, RuntimeError> + ) -> DbResult> + 'a> where Range: RangeBounds + 'a; /// Get an [`Iterator`] that returns the `(key, value)` types for this database. #[doc = doc_iter!()] #[expect(clippy::iter_not_returning_iterator)] - fn iter( - &self, - ) -> Result> + '_, RuntimeError>; + fn iter(&self) -> DbResult> + '_>; /// Get an [`Iterator`] that returns _only_ the `key` type for this database. #[doc = doc_iter!()] - fn keys(&self) - -> Result> + '_, RuntimeError>; + fn keys(&self) -> DbResult> + '_>; /// Get an [`Iterator`] that returns _only_ the `value` type for this database. #[doc = doc_iter!()] - fn values( - &self, - ) -> Result> + '_, RuntimeError>; + fn values(&self) -> DbResult> + '_>; } //---------------------------------------------------------------------------------------------------- DatabaseRo @@ -76,7 +74,7 @@ pub trait DatabaseIter { macro_rules! doc_database { () => { r"# Errors -This will return [`RuntimeError::KeyNotFound`] if: +This will return [`crate::RuntimeError::KeyNotFound`] if: - Input does not exist OR - Database is empty" }; @@ -111,7 +109,7 @@ This will return [`RuntimeError::KeyNotFound`] if: pub unsafe trait DatabaseRo { /// Get the value corresponding to a key. #[doc = doc_database!()] - fn get(&self, key: &T::Key) -> Result; + fn get(&self, key: &T::Key) -> DbResult; /// Returns `true` if the database contains a value for the specified key. /// @@ -120,7 +118,7 @@ pub unsafe trait DatabaseRo { /// as in that case, `Ok(false)` will be returned. /// /// Other errors may still occur. - fn contains(&self, key: &T::Key) -> Result { + fn contains(&self, key: &T::Key) -> DbResult { match self.get(key) { Ok(_) => Ok(true), Err(RuntimeError::KeyNotFound) => Ok(false), @@ -132,21 +130,21 @@ pub unsafe trait DatabaseRo { /// /// # Errors /// This will never return [`RuntimeError::KeyNotFound`]. - fn len(&self) -> Result; + fn len(&self) -> DbResult; /// Returns the first `(key, value)` pair in the database. #[doc = doc_database!()] - fn first(&self) -> Result<(T::Key, T::Value), RuntimeError>; + fn first(&self) -> DbResult<(T::Key, T::Value)>; /// Returns the last `(key, value)` pair in the database. #[doc = doc_database!()] - fn last(&self) -> Result<(T::Key, T::Value), RuntimeError>; + fn last(&self) -> DbResult<(T::Key, T::Value)>; /// Returns `true` if the database contains no `(key, value)` pairs. /// /// # Errors /// This can only return [`RuntimeError::Io`] on errors. - fn is_empty(&self) -> Result; + fn is_empty(&self) -> DbResult; } //---------------------------------------------------------------------------------------------------- DatabaseRw @@ -161,7 +159,7 @@ pub trait DatabaseRw: DatabaseRo { #[doc = doc_database!()] /// /// This will never [`RuntimeError::KeyExists`]. - fn put(&mut self, key: &T::Key, value: &T::Value) -> Result<(), RuntimeError>; + fn put(&mut self, key: &T::Key, value: &T::Value) -> DbResult<()>; /// Delete a key-value pair in the database. /// @@ -170,7 +168,7 @@ pub trait DatabaseRw: DatabaseRo { #[doc = doc_database!()] /// /// This will never [`RuntimeError::KeyExists`]. - fn delete(&mut self, key: &T::Key) -> Result<(), RuntimeError>; + fn delete(&mut self, key: &T::Key) -> DbResult<()>; /// Delete and return a key-value pair in the database. /// @@ -178,7 +176,7 @@ pub trait DatabaseRw: DatabaseRo { /// it will serialize the `T::Value` and return it. /// #[doc = doc_database!()] - fn take(&mut self, key: &T::Key) -> Result; + fn take(&mut self, key: &T::Key) -> DbResult; /// Fetch the value, and apply a function to it - or delete the entry. /// @@ -192,7 +190,7 @@ pub trait DatabaseRw: DatabaseRo { /// - If `f` returns `None`, the entry will be [`DatabaseRw::delete`]d /// #[doc = doc_database!()] - fn update(&mut self, key: &T::Key, mut f: F) -> Result<(), RuntimeError> + fn update(&mut self, key: &T::Key, mut f: F) -> DbResult<()> where F: FnMut(T::Value) -> Option, { @@ -207,10 +205,10 @@ pub trait DatabaseRw: DatabaseRo { /// Removes and returns the first `(key, value)` pair in the database. /// #[doc = doc_database!()] - fn pop_first(&mut self) -> Result<(T::Key, T::Value), RuntimeError>; + fn pop_first(&mut self) -> DbResult<(T::Key, T::Value)>; /// Removes and returns the last `(key, value)` pair in the database. /// #[doc = doc_database!()] - fn pop_last(&mut self) -> Result<(T::Key, T::Value), RuntimeError>; + fn pop_last(&mut self) -> DbResult<(T::Key, T::Value)>; } diff --git a/storage/database/src/env.rs b/storage/database/src/env.rs index 1ae6aa1f..56b92cbd 100644 --- a/storage/database/src/env.rs +++ b/storage/database/src/env.rs @@ -6,7 +6,7 @@ use std::num::NonZeroUsize; use crate::{ config::Config, database::{DatabaseIter, DatabaseRo, DatabaseRw}, - error::{InitError, RuntimeError}, + error::{DbResult, InitError}, resize::ResizeAlgorithm, table::Table, transaction::{TxRo, TxRw}, @@ -39,7 +39,7 @@ pub trait Env: Sized { /// /// # Invariant /// If this is `false`, that means this [`Env`] - /// must _never_ return a [`RuntimeError::ResizeNeeded`]. + /// must _never_ return a [`crate::RuntimeError::ResizeNeeded`]. /// /// If this is `true`, [`Env::resize_map`] & [`Env::current_map_size`] /// _must_ be re-implemented, as it just panics by default. @@ -88,7 +88,7 @@ pub trait Env: Sized { /// This will error if the database file could not be opened. /// /// This is the only [`Env`] function that will return - /// an [`InitError`] instead of a [`RuntimeError`]. + /// an [`InitError`] instead of a [`crate::RuntimeError`]. fn open(config: Config) -> Result; /// Return the [`Config`] that this database was [`Env::open`]ed with. @@ -107,7 +107,7 @@ pub trait Env: Sized { /// /// # Errors /// If there is a synchronization error, this should return an error. - fn sync(&self) -> Result<(), RuntimeError>; + fn sync(&self) -> DbResult<()>; /// Resize the database's memory map to a /// new (bigger) size using a [`ResizeAlgorithm`]. @@ -218,14 +218,14 @@ pub trait EnvInner<'env> { /// Create a read-only transaction. /// /// # Errors - /// This will only return [`RuntimeError::Io`] if it errors. - fn tx_ro(&self) -> Result, RuntimeError>; + /// This will only return [`crate::RuntimeError::Io`] if it errors. + fn tx_ro(&self) -> DbResult>; /// Create a read/write transaction. /// /// # Errors - /// This will only return [`RuntimeError::Io`] if it errors. - fn tx_rw(&self) -> Result, RuntimeError>; + /// This will only return [`crate::RuntimeError::Io`] if it errors. + fn tx_rw(&self) -> DbResult>; /// Open a database in read-only mode. /// @@ -269,17 +269,17 @@ pub trait EnvInner<'env> { /// ``` /// /// # Errors - /// This will only return [`RuntimeError::Io`] on normal errors. + /// This will only return [`crate::RuntimeError::Io`] on normal errors. /// /// If the specified table is not created upon before this function is called, - /// this will return [`RuntimeError::TableNotFound`]. + /// this will return [`crate::RuntimeError::TableNotFound`]. /// /// # Invariant #[doc = doc_heed_create_db_invariant!()] fn open_db_ro( &self, tx_ro: &Self::Ro<'_>, - ) -> Result + DatabaseIter, RuntimeError>; + ) -> DbResult + DatabaseIter>; /// Open a database in read/write mode. /// @@ -293,25 +293,22 @@ pub trait EnvInner<'env> { /// passed as a generic to this function. /// /// # Errors - /// This will only return [`RuntimeError::Io`] on errors. + /// This will only return [`crate::RuntimeError::Io`] on errors. /// /// # Invariant #[doc = doc_heed_create_db_invariant!()] - fn open_db_rw( - &self, - tx_rw: &Self::Rw<'_>, - ) -> Result, RuntimeError>; + fn open_db_rw(&self, tx_rw: &Self::Rw<'_>) -> DbResult>; /// Create a database table. /// /// This will create the database [`Table`] passed as a generic to this function. /// /// # Errors - /// This will only return [`RuntimeError::Io`] on errors. + /// This will only return [`crate::RuntimeError::Io`] on errors. /// /// # Invariant #[doc = doc_heed_create_db_invariant!()] - fn create_db(&self, tx_rw: &Self::Rw<'_>) -> Result<(), RuntimeError>; + fn create_db(&self, tx_rw: &Self::Rw<'_>) -> DbResult<()>; /// Clear all `(key, value)`'s from a database table. /// @@ -322,9 +319,9 @@ pub trait EnvInner<'env> { /// function's effects can be aborted using [`TxRw::abort`]. /// /// # Errors - /// This will return [`RuntimeError::Io`] on normal errors. + /// This will return [`crate::RuntimeError::Io`] on normal errors. /// /// If the specified table is not created upon before this function is called, - /// this will return [`RuntimeError::TableNotFound`]. - fn clear_db(&self, tx_rw: &mut Self::Rw<'_>) -> Result<(), RuntimeError>; + /// this will return [`crate::RuntimeError::TableNotFound`]. + fn clear_db(&self, tx_rw: &mut Self::Rw<'_>) -> DbResult<()>; } diff --git a/storage/database/src/error.rs b/storage/database/src/error.rs index 3471ac74..82f80b9a 100644 --- a/storage/database/src/error.rs +++ b/storage/database/src/error.rs @@ -7,6 +7,9 @@ use std::fmt::Debug; /// Alias for a thread-safe boxed error. type BoxError = Box; +/// [`Result`] with [`RuntimeError`] as the error. +pub type DbResult = Result; + //---------------------------------------------------------------------------------------------------- InitError /// Errors that occur during ([`Env::open`](crate::env::Env::open)). /// diff --git a/storage/database/src/lib.rs b/storage/database/src/lib.rs index 45bfc53c..8e48fca0 100644 --- a/storage/database/src/lib.rs +++ b/storage/database/src/lib.rs @@ -50,7 +50,7 @@ pub use constants::{ }; pub use database::{DatabaseIter, DatabaseRo, DatabaseRw}; pub use env::{Env, EnvInner}; -pub use error::{InitError, RuntimeError}; +pub use error::{DbResult, InitError, RuntimeError}; pub use key::{Key, KeyCompare}; pub use storable::{Storable, StorableBytes, StorableStr, StorableVec}; pub use table::Table; diff --git a/storage/database/src/table.rs b/storage/database/src/table.rs index 3ad0e793..6d0daa20 100644 --- a/storage/database/src/table.rs +++ b/storage/database/src/table.rs @@ -1,7 +1,6 @@ //! Database table abstraction; `trait Table`. //---------------------------------------------------------------------------------------------------- Import - use crate::{key::Key, storable::Storable}; //---------------------------------------------------------------------------------------------------- Table diff --git a/storage/database/src/tables.rs b/storage/database/src/tables.rs index 83a00e16..56203ad0 100644 --- a/storage/database/src/tables.rs +++ b/storage/database/src/tables.rs @@ -211,7 +211,7 @@ macro_rules! define_tables { /// /// # Errors /// This returns errors on regular database errors. - fn all_tables_empty(&self) -> Result; + fn all_tables_empty(&self) -> $crate::DbResult; } /// Object containing all opened [`Table`](cuprate_database::Table)s in read + iter mode. @@ -293,7 +293,7 @@ macro_rules! define_tables { } )* - fn all_tables_empty(&self) -> Result { + fn all_tables_empty(&self) -> $crate::DbResult { $( if !$crate::DatabaseRo::is_empty(&self.$index)? { return Ok(false); @@ -369,7 +369,7 @@ macro_rules! define_tables { /// /// # Errors /// This will only return [`cuprate_database::RuntimeError::Io`] if it errors. - fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result; + fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> $crate::DbResult; /// Open all tables in read-write mode. /// @@ -378,7 +378,7 @@ macro_rules! define_tables { /// /// # Errors /// This will only return [`cuprate_database::RuntimeError::Io`] on errors. - fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result; + fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult; /// Create all database tables. /// @@ -386,7 +386,7 @@ macro_rules! define_tables { /// /// # Errors /// This will only return [`cuprate_database::RuntimeError::Io`] on errors. - fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError>; + fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<()>; } impl<'env, Ei> OpenTables<'env> for Ei @@ -396,19 +396,19 @@ macro_rules! define_tables { type Ro<'tx> = >::Ro<'tx>; type Rw<'tx> = >::Rw<'tx>; - fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> Result { + fn open_tables(&self, tx_ro: &Self::Ro<'_>) -> $crate::DbResult { Ok(($( Self::open_db_ro::<[<$table:camel>]>(self, tx_ro)?, )*)) } - fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> Result { + fn open_tables_mut(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult { Ok(($( Self::open_db_rw::<[<$table:camel>]>(self, tx_rw)?, )*)) } - fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> Result<(), $crate::RuntimeError> { + fn create_tables(&self, tx_rw: &Self::Rw<'_>) -> $crate::DbResult<()> { let result = Ok(($( Self::create_db::<[<$table:camel>]>(self, tx_rw), )*)); diff --git a/storage/database/src/transaction.rs b/storage/database/src/transaction.rs index 8f33983d..16d1c518 100644 --- a/storage/database/src/transaction.rs +++ b/storage/database/src/transaction.rs @@ -1,7 +1,7 @@ //! Database transaction abstraction; `trait TxRo`, `trait TxRw`. //---------------------------------------------------------------------------------------------------- Import -use crate::error::RuntimeError; +use crate::error::DbResult; //---------------------------------------------------------------------------------------------------- TxRo /// Read-only database transaction. @@ -16,7 +16,7 @@ pub trait TxRo<'tx> { /// /// # Errors /// This operation will always return `Ok(())` with the `redb` backend. - fn commit(self) -> Result<(), RuntimeError>; + fn commit(self) -> DbResult<()>; } //---------------------------------------------------------------------------------------------------- TxRw @@ -32,12 +32,12 @@ pub trait TxRw<'tx> { /// This operation will always return `Ok(())` with the `redb` backend. /// /// If `Env::MANUAL_RESIZE == true`, - /// [`RuntimeError::ResizeNeeded`] may be returned. - fn commit(self) -> Result<(), RuntimeError>; + /// [`crate::RuntimeError::ResizeNeeded`] may be returned. + fn commit(self) -> DbResult<()>; /// Abort the transaction, erasing any writes that have occurred. /// /// # Errors /// This operation will always return `Ok(())` with the `heed` backend. - fn abort(self) -> Result<(), RuntimeError>; + fn abort(self) -> DbResult<()>; } diff --git a/storage/service/src/service/read.rs b/storage/service/src/service/read.rs index 0ab68539..187ffa4c 100644 --- a/storage/service/src/service/read.rs +++ b/storage/service/src/service/read.rs @@ -7,7 +7,7 @@ use futures::channel::oneshot; use rayon::ThreadPool; use tower::Service; -use cuprate_database::{ConcreteEnv, RuntimeError}; +use cuprate_database::{ConcreteEnv, DbResult, RuntimeError}; use cuprate_helper::asynch::InfallibleOneshotReceiver; /// The [`rayon::ThreadPool`] service. @@ -24,7 +24,7 @@ pub struct DatabaseReadService { pool: Arc, /// The function used to handle request. - inner_handler: Arc Result + Send + Sync + 'static>, + inner_handler: Arc DbResult + Send + Sync + 'static>, } // Deriving [`Clone`] means `Req` & `Res` need to be `Clone`, even if they aren't. @@ -51,7 +51,7 @@ where pub fn new( env: Arc, pool: Arc, - req_handler: impl Fn(&ConcreteEnv, Req) -> Result + Send + Sync + 'static, + req_handler: impl Fn(&ConcreteEnv, Req) -> DbResult + Send + Sync + 'static, ) -> Self { let inner_handler = Arc::new(move |req| req_handler(&env, req)); @@ -69,9 +69,9 @@ where { type Response = Res; type Error = RuntimeError; - type Future = InfallibleOneshotReceiver>; + type Future = InfallibleOneshotReceiver>; - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } diff --git a/storage/service/src/service/write.rs b/storage/service/src/service/write.rs index 607c4aa6..6bcd7255 100644 --- a/storage/service/src/service/write.rs +++ b/storage/service/src/service/write.rs @@ -6,7 +6,7 @@ use std::{ use futures::channel::oneshot; -use cuprate_database::{ConcreteEnv, Env, RuntimeError}; +use cuprate_database::{ConcreteEnv, DbResult, Env, RuntimeError}; use cuprate_helper::asynch::InfallibleOneshotReceiver; //---------------------------------------------------------------------------------------------------- Constants @@ -26,8 +26,7 @@ pub struct DatabaseWriteHandle { /// Sender channel to the database write thread-pool. /// /// We provide the response channel for the thread-pool. - pub(super) sender: - crossbeam::channel::Sender<(Req, oneshot::Sender>)>, + pub(super) sender: crossbeam::channel::Sender<(Req, oneshot::Sender>)>, } impl Clone for DatabaseWriteHandle { @@ -48,7 +47,7 @@ where #[inline(never)] // Only called once. pub fn init( env: Arc, - inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result + Send + 'static, + inner_handler: impl Fn(&ConcreteEnv, &Req) -> DbResult + Send + 'static, ) -> Self { // Initialize `Request/Response` channels. let (sender, receiver) = crossbeam::channel::unbounded(); @@ -66,10 +65,10 @@ where impl tower::Service for DatabaseWriteHandle { type Response = Res; type Error = RuntimeError; - type Future = InfallibleOneshotReceiver>; + type Future = InfallibleOneshotReceiver>; #[inline] - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } @@ -89,8 +88,8 @@ impl tower::Service for DatabaseWriteHandle { /// The main function of the writer thread. fn database_writer( env: &ConcreteEnv, - receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender>)>, - inner_handler: impl Fn(&ConcreteEnv, &Req) -> Result, + receiver: &crossbeam::channel::Receiver<(Req, oneshot::Sender>)>, + inner_handler: impl Fn(&ConcreteEnv, &Req) -> DbResult, ) where Req: Send + 'static, Res: Debug + Send + 'static, diff --git a/storage/txpool/src/ops/key_images.rs b/storage/txpool/src/ops/key_images.rs index 04aa1b44..76cae141 100644 --- a/storage/txpool/src/ops/key_images.rs +++ b/storage/txpool/src/ops/key_images.rs @@ -1,7 +1,7 @@ //! Tx-pool key image ops. use monero_serai::transaction::Input; -use cuprate_database::{DatabaseRw, RuntimeError}; +use cuprate_database::{DatabaseRw, DbResult}; use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHash}; @@ -34,7 +34,7 @@ pub(super) fn add_tx_key_images( pub(super) fn remove_tx_key_images( inputs: &[Input], kis_table: &mut impl DatabaseRw, -) -> Result<(), RuntimeError> { +) -> DbResult<()> { for ki in inputs.iter().map(ki_from_input) { kis_table.delete(&ki)?; } diff --git a/storage/txpool/src/ops/tx_read.rs b/storage/txpool/src/ops/tx_read.rs index 55690750..24101f77 100644 --- a/storage/txpool/src/ops/tx_read.rs +++ b/storage/txpool/src/ops/tx_read.rs @@ -5,7 +5,7 @@ use std::sync::Mutex; use monero_serai::transaction::Transaction; -use cuprate_database::{DatabaseRo, RuntimeError}; +use cuprate_database::{DatabaseRo, DbResult}; use cuprate_types::{TransactionVerificationData, TxVersion}; use crate::{ @@ -17,7 +17,7 @@ use crate::{ pub fn get_transaction_verification_data( tx_hash: &TransactionHash, tables: &impl Tables, -) -> Result { +) -> DbResult { let tx_blob = tables.transaction_blobs().get(tx_hash)?.0; let tx_info = tables.transaction_infos().get(tx_hash)?; @@ -45,7 +45,7 @@ pub fn get_transaction_verification_data( pub fn in_stem_pool( tx_hash: &TransactionHash, tx_infos: &impl DatabaseRo, -) -> Result { +) -> DbResult { Ok(tx_infos .get(tx_hash)? .flags diff --git a/storage/txpool/src/ops/tx_write.rs b/storage/txpool/src/ops/tx_write.rs index dc5ab463..8f426fb7 100644 --- a/storage/txpool/src/ops/tx_write.rs +++ b/storage/txpool/src/ops/tx_write.rs @@ -4,7 +4,7 @@ use bytemuck::TransparentWrapper; use monero_serai::transaction::{NotPruned, Transaction}; -use cuprate_database::{DatabaseRw, RuntimeError, StorableVec}; +use cuprate_database::{DatabaseRw, DbResult, StorableVec}; use cuprate_types::TransactionVerificationData; use crate::{ @@ -67,10 +67,7 @@ pub fn add_transaction( } /// Removes a transaction from the transaction pool. -pub fn remove_transaction( - tx_hash: &TransactionHash, - tables: &mut impl TablesMut, -) -> Result<(), RuntimeError> { +pub fn remove_transaction(tx_hash: &TransactionHash, tables: &mut impl TablesMut) -> DbResult<()> { // Remove the tx blob from table 0. let tx_blob = tables.transaction_blobs_mut().take(tx_hash)?.0; diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs index 0de1e7d0..44a29b3c 100644 --- a/storage/txpool/src/service/read.rs +++ b/storage/txpool/src/service/read.rs @@ -11,7 +11,7 @@ use std::{ use rayon::ThreadPool; -use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; +use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, RuntimeError}; use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; use crate::{ @@ -137,7 +137,7 @@ fn filter_known_tx_blob_hashes( // A closure that returns `true` if a tx with a certain blob hash is unknown. // This also fills in `stem_tx_hashes`. - let mut tx_unknown = |blob_hash| -> Result { + let mut tx_unknown = |blob_hash| -> DbResult { match tx_blob_hashes.get(&blob_hash) { Ok(tx_hash) => { if in_stem_pool(&tx_hash, &tx_infos)? { diff --git a/storage/txpool/src/service/types.rs b/storage/txpool/src/service/types.rs index 5c6b97ce..af1ca98b 100644 --- a/storage/txpool/src/service/types.rs +++ b/storage/txpool/src/service/types.rs @@ -2,7 +2,7 @@ //! //! Only used internally for our [`tower::Service`] impls. -use cuprate_database::RuntimeError; +use cuprate_database::DbResult; use cuprate_database_service::{DatabaseReadService, DatabaseWriteHandle}; use crate::service::interface::{ @@ -12,7 +12,7 @@ use crate::service::interface::{ /// The actual type of the response. /// /// Either our [`TxpoolReadResponse`], or a database error occurred. -pub(super) type ReadResponseResult = Result; +pub(super) type ReadResponseResult = DbResult; /// The transaction pool database write service. pub type TxpoolWriteHandle = DatabaseWriteHandle; diff --git a/storage/txpool/src/service/write.rs b/storage/txpool/src/service/write.rs index 13ab81fa..23c5a8a4 100644 --- a/storage/txpool/src/service/write.rs +++ b/storage/txpool/src/service/write.rs @@ -1,6 +1,8 @@ use std::sync::Arc; -use cuprate_database::{ConcreteEnv, DatabaseRo, DatabaseRw, Env, EnvInner, RuntimeError, TxRw}; +use cuprate_database::{ + ConcreteEnv, DatabaseRo, DatabaseRw, DbResult, Env, EnvInner, RuntimeError, TxRw, +}; use cuprate_database_service::DatabaseWriteHandle; use cuprate_types::TransactionVerificationData; @@ -25,7 +27,7 @@ pub(super) fn init_write_service(env: Arc) -> TxpoolWriteHandle { fn handle_txpool_request( env: &ConcreteEnv, req: &TxpoolWriteRequest, -) -> Result { +) -> DbResult { match req { TxpoolWriteRequest::AddTransaction { tx, state_stem } => { add_transaction(env, tx, *state_stem) @@ -50,7 +52,7 @@ fn add_transaction( env: &ConcreteEnv, tx: &TransactionVerificationData, state_stem: bool, -) -> Result { +) -> DbResult { let env_inner = env.env_inner(); let tx_rw = env_inner.tx_rw()?; @@ -83,7 +85,7 @@ fn add_transaction( fn remove_transaction( env: &ConcreteEnv, tx_hash: &TransactionHash, -) -> Result { +) -> DbResult { let env_inner = env.env_inner(); let tx_rw = env_inner.tx_rw()?; @@ -105,10 +107,7 @@ fn remove_transaction( } /// [`TxpoolWriteRequest::Promote`] -fn promote( - env: &ConcreteEnv, - tx_hash: &TransactionHash, -) -> Result { +fn promote(env: &ConcreteEnv, tx_hash: &TransactionHash) -> DbResult { let env_inner = env.env_inner(); let tx_rw = env_inner.tx_rw()?; @@ -134,10 +133,7 @@ fn promote( } /// [`TxpoolWriteRequest::NewBlock`] -fn new_block( - env: &ConcreteEnv, - spent_key_images: &[KeyImage], -) -> Result { +fn new_block(env: &ConcreteEnv, spent_key_images: &[KeyImage]) -> DbResult { let env_inner = env.env_inner(); let tx_rw = env_inner.tx_rw()?; diff --git a/test-utils/src/data/constants.rs b/test-utils/src/data/constants.rs index fff04416..78413edf 100644 --- a/test-utils/src/data/constants.rs +++ b/test-utils/src/data/constants.rs @@ -104,7 +104,7 @@ macro_rules! const_tx_blob { hash: $hash:literal, // Transaction hash data_path: $data_path:literal, // Path to the transaction blob version: $version:literal, // Transaction version - timelock: $timelock:expr, // Transaction's timelock (use the real type `Timelock`) + timelock: $timelock:expr_2021, // Transaction's timelock (use the real type `Timelock`) input_len: $input_len:literal, // Amount of inputs output_len: $output_len:literal, // Amount of outputs ) => { diff --git a/test-utils/src/rpc/data/macros.rs b/test-utils/src/rpc/data/macros.rs index 63a214c6..5f87c53a 100644 --- a/test-utils/src/rpc/data/macros.rs +++ b/test-utils/src/rpc/data/macros.rs @@ -25,11 +25,11 @@ macro_rules! define_request_and_response { // The request type (and any doc comments, derives, etc). $( #[$request_attr:meta] )* - Request = $request:expr; + Request = $request:expr_2021; // The response type (and any doc comments, derives, etc). $( #[$response_attr:meta] )* - Response = $response:expr; + Response = $response:expr_2021; ) => { paste::paste! { #[doc = $crate::rpc::data::macros::define_request_and_response_doc!( "response" => [<$name:upper _RESPONSE>], From ecd077b402cd907e9ffc23eb5904a3cab7e479de Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 3 Dec 2024 15:17:21 +0000 Subject: [PATCH 2/3] cuprated: config & args (#304) * init config * split sections * finish initial config. * fix clap * misc changes * fix doc * fix test & clippy * fix test 2 * try fix windows * testing * testing 2 * fix windows test * fix windows: the remix. * review comments * fix imports * rename & fix default config file * fix cargo hack * enable serde on `cuprate-helper` * changes from matrix chats * fix ci * fix doc * fix doc test * move Cuprated.toml * remove default.rs * `size` -> `bytes` * `addressbook_path` -> `address_book_path` * fix config output * fix ci * Update binaries/cuprated/src/config/args.rs Co-authored-by: hinto-janai --------- Co-authored-by: hinto-janai --- Cargo.lock | 40 +++- Cargo.toml | 2 + binaries/cuprated/Cargo.toml | 59 +++--- binaries/cuprated/Cuprated.toml | 67 +++++++ binaries/cuprated/src/config.rs | 158 ++++++++++++++++ binaries/cuprated/src/config/args.rs | 55 ++++++ binaries/cuprated/src/config/fs.rs | 21 +++ binaries/cuprated/src/config/p2p.rs | 178 ++++++++++++++++++ binaries/cuprated/src/config/storage.rs | 67 +++++++ .../cuprated/src/config/tracing_config.rs | 42 +++++ binaries/cuprated/src/constants.rs | 8 + binaries/cuprated/src/main.rs | 2 + helper/Cargo.toml | 2 + helper/src/fs.rs | 95 ++++++---- helper/src/network.rs | 34 +++- p2p/address-book/Cargo.toml | 2 +- p2p/address-book/src/book/tests.rs | 2 +- p2p/address-book/src/lib.rs | 9 +- p2p/address-book/src/store.rs | 11 +- p2p/p2p/src/block_downloader.rs | 16 +- p2p/p2p/src/block_downloader/tests.rs | 8 +- storage/blockchain/Cargo.toml | 2 +- storage/blockchain/README.md | 2 +- storage/blockchain/src/config.rs | 74 +++++--- storage/blockchain/src/ops/mod.rs | 2 +- storage/blockchain/src/service/mod.rs | 2 +- storage/blockchain/src/service/tests.rs | 3 +- storage/blockchain/src/tests.rs | 4 +- storage/txpool/Cargo.toml | 2 +- storage/txpool/README.md | 2 +- storage/txpool/src/config.rs | 84 +++++---- storage/txpool/src/ops.rs | 2 +- storage/txpool/src/service.rs | 2 +- 33 files changed, 888 insertions(+), 171 deletions(-) create mode 100644 binaries/cuprated/Cuprated.toml create mode 100644 binaries/cuprated/src/config/args.rs create mode 100644 binaries/cuprated/src/config/fs.rs create mode 100644 binaries/cuprated/src/config/p2p.rs create mode 100644 binaries/cuprated/src/config/storage.rs create mode 100644 binaries/cuprated/src/config/tracing_config.rs diff --git a/Cargo.lock b/Cargo.lock index ac36c565..0d55c8aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -446,6 +446,7 @@ checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstyle", "clap_lex", + "terminal_size", ] [[package]] @@ -933,6 +934,7 @@ dependencies = [ "libc", "monero-serai", "rayon", + "serde", "tokio", "windows", ] @@ -1188,7 +1190,6 @@ dependencies = [ "cuprate-consensus", "cuprate-consensus-context", "cuprate-consensus-rules", - "cuprate-constants", "cuprate-cryptonight", "cuprate-dandelion-tower", "cuprate-database", @@ -1230,6 +1231,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", + "toml", "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", "tracing-subscriber", @@ -2904,6 +2906,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -3121,6 +3132,16 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "terminal_size" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + [[package]] name = "thiserror" version = "1.0.66" @@ -3262,11 +3283,26 @@ dependencies = [ "tracing", ] +[[package]] +name = "toml" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + [[package]] name = "toml_datetime" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -3275,6 +3311,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", + "serde", + "serde_spanned", "toml_datetime", "winnow", ] diff --git a/Cargo.toml b/Cargo.toml index 3cc3ab18..9be1528c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,7 @@ members = [ ] [profile.release] +panic = "abort" lto = true # Build with LTO strip = "none" # Keep panic stack traces codegen-units = 1 # Optimize for binary speed over compile times @@ -144,6 +145,7 @@ tokio-util = { version = "0.7", default-features = false } tokio-stream = { version = "0.1", default-features = false } tokio = { version = "1", default-features = false } tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # +toml = { version = "0.8", default-features = false } tracing-subscriber = { version = "0.3", default-features = false } tracing = { version = "0.1", default-features = false } diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index 9ebdd780..acf8827b 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -2,7 +2,7 @@ name = "cuprated" version = "0.0.1" edition = "2021" -description = "The Cuprate Monero Rust node." +description = "The Cuprate Rust Monero node." license = "AGPL-3.0-only" authors = ["Boog900", "hinto-janai", "SyntheticBird45"] repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" @@ -12,29 +12,29 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" cuprate-consensus = { workspace = true } cuprate-fast-sync = { workspace = true } cuprate-consensus-context = { workspace = true } -cuprate-consensus-rules = { workspace = true } -cuprate-constants = { workspace = true } -cuprate-cryptonight = { workspace = true } -cuprate-helper = { workspace = true } -cuprate-epee-encoding = { workspace = true } -cuprate-fixed-bytes = { workspace = true } -cuprate-levin = { workspace = true } -cuprate-wire = { workspace = true } -cuprate-p2p = { workspace = true } -cuprate-p2p-core = { workspace = true } -cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } -cuprate-async-buffer = { workspace = true } -cuprate-address-book = { workspace = true } -cuprate-blockchain = { workspace = true } -cuprate-database-service = { workspace = true } -cuprate-txpool = { workspace = true } -cuprate-database = { workspace = true } -cuprate-pruning = { workspace = true } -cuprate-test-utils = { workspace = true } -cuprate-types = { workspace = true } -cuprate-json-rpc = { workspace = true } -cuprate-rpc-interface = { workspace = true } -cuprate-rpc-types = { workspace = true } +cuprate-consensus-rules = { workspace = true } +cuprate-cryptonight = { workspace = true } +cuprate-helper = { workspace = true, features = ["serde"] } +cuprate-epee-encoding = { workspace = true } +cuprate-fixed-bytes = { workspace = true } +cuprate-levin = { workspace = true } +cuprate-wire = { workspace = true } +cuprate-p2p = { workspace = true } +cuprate-p2p-core = { workspace = true } +cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } +cuprate-async-buffer = { workspace = true } +cuprate-address-book = { workspace = true } +cuprate-blockchain = { workspace = true } +cuprate-database-service = { workspace = true, features = ["serde"] } +cuprate-txpool = { workspace = true } +cuprate-database = { workspace = true, features = ["serde"] } +cuprate-pruning = { workspace = true } +cuprate-test-utils = { workspace = true } +cuprate-types = { workspace = true } +cuprate-json-rpc = { workspace = true } +cuprate-rpc-interface = { workspace = true } +cuprate-rpc-types = { workspace = true } + # TODO: after v1.0.0, remove unneeded dependencies. anyhow = { workspace = true } @@ -44,7 +44,7 @@ borsh = { workspace = true } bytemuck = { workspace = true } bytes = { workspace = true } cfg-if = { workspace = true } -clap = { workspace = true, features = ["cargo"] } +clap = { workspace = true, features = ["cargo", "help", "wrap_help"] } chrono = { workspace = true } crypto-bigint = { workspace = true } crossbeam = { workspace = true } @@ -71,15 +71,10 @@ thread_local = { workspace = true } tokio-util = { workspace = true } tokio-stream = { workspace = true } tokio = { workspace = true } +toml = { workspace = true, features = ["parse", "display"]} tower = { workspace = true } tracing-subscriber = { workspace = true, features = ["std", "fmt", "default"] } -tracing = { workspace = true } +tracing = { workspace = true, features = ["default"] } [lints] workspace = true - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" diff --git a/binaries/cuprated/Cuprated.toml b/binaries/cuprated/Cuprated.toml new file mode 100644 index 00000000..d248ce1f --- /dev/null +++ b/binaries/cuprated/Cuprated.toml @@ -0,0 +1,67 @@ +# ____ _ +# / ___| _ _ __ _ __ __ _| |_ ___ +# | | | | | | '_ \| '__/ _` | __/ _ \ +# | |__| |_| | |_) | | | (_| | || __/ +# \____\__,_| .__/|_| \__,_|\__\___| +# |_| +# + +## The network to run on, valid values: "Mainnet", "Testnet", "Stagenet". +network = "Mainnet" + +## Tracing config. +[tracing] +## The minimum level for log events to be displayed. +level = "info" + +## Clear-net config. +[p2p.clear_net] +## The number of outbound connections we should make and maintain. +outbound_connections = 64 +## The number of extra connections we should make under load from the rest of Cuprate, i.e. when syncing. +extra_outbound_connections = 8 +## The maximum number of incoming we should allow. +max_inbound_connections = 128 +## The percent of outbound connections that should be to nodes we have not connected to before. +gray_peers_percent = 0.7 +## The port to accept connections on, if left `0` no connections will be accepted. +p2p_port = 0 +## The IP address to listen to connections on. +listen_on = "0.0.0.0" + +## The Clear-net addressbook config. +[p2p.clear_net.address_book_config] +## The size of the white peer list, which contains peers we have made a connection to before. +max_white_list_length = 1_000 +## The size of the gray peer list, which contains peers we have not made a connection to before. +max_gray_list_length = 5_000 +## The amount of time between address book saves. +peer_save_period = { secs = 90, nanos = 0 } + +## The block downloader config. +[p2p.block_downloader] +## The size of the buffer of sequential blocks waiting to be verified and added to the chain (bytes). +buffer_bytes = 50_000_000 +## The size of the queue of blocks which are waiting for a parent block to be downloaded (bytes). +in_progress_queue_bytes = 50_000_000 +## The target size of a batch of blocks (bytes), must not exceed 100MB. +target_batch_bytes= 5_000_000 +## The amount of time between checking the pool of connected peers for free peers to download blocks. +check_client_pool_interval = { secs = 30, nanos = 0 } + +## Storage config +[storage] +## The amount of reader threads to spawn. +reader_threads = "OnePerThread" + +## Txpool storage config. +[storage.txpool] +## The database sync mode for the txpool. +sync_mode = "Async" +## The maximum size of all the txs in the pool (bytes). +max_txpool_byte_size = 100_000_000 + +## Blockchain storage config. +[storage.blockchain] +## The database sync mode for the blockchain. +sync_mode = "Async" diff --git a/binaries/cuprated/src/config.rs b/binaries/cuprated/src/config.rs index d613c1fc..c6267a69 100644 --- a/binaries/cuprated/src/config.rs +++ b/binaries/cuprated/src/config.rs @@ -1 +1,159 @@ //! cuprated config +use std::{ + fs::{read_to_string, File}, + io, + path::Path, + time::Duration, +}; + +use clap::Parser; +use serde::{Deserialize, Serialize}; + +use cuprate_consensus::ContextConfig; +use cuprate_helper::{ + fs::{CUPRATE_CONFIG_DIR, DEFAULT_CONFIG_FILE_NAME}, + network::Network, +}; +use cuprate_p2p::block_downloader::BlockDownloaderConfig; +use cuprate_p2p_core::{ClearNet, ClearNetServerCfg}; + +mod args; +mod fs; +mod p2p; +mod storage; +mod tracing_config; + +use crate::config::fs::FileSystemConfig; +use p2p::P2PConfig; +use storage::StorageConfig; +use tracing_config::TracingConfig; + +/// Reads the args & config file, returning a [`Config`]. +pub fn read_config_and_args() -> Config { + let args = args::Args::parse(); + args.do_quick_requests(); + + let config: Config = if let Some(config_file) = &args.config_file { + // If a config file was set in the args try to read it and exit if we can't. + match Config::read_from_path(config_file) { + Ok(config) => config, + Err(e) => { + eprintln!("Failed to read config from file: {e}"); + std::process::exit(1); + } + } + } else { + // First attempt to read the config file from the current directory. + std::env::current_dir() + .map(|path| path.join(DEFAULT_CONFIG_FILE_NAME)) + .map_err(Into::into) + .and_then(Config::read_from_path) + .inspect_err(|e| tracing::debug!("Failed to read config from current dir: {e}")) + // otherwise try the main config directory. + .or_else(|_| { + let file = CUPRATE_CONFIG_DIR.join(DEFAULT_CONFIG_FILE_NAME); + Config::read_from_path(file) + }) + .inspect_err(|e| { + tracing::debug!("Failed to read config from config dir: {e}"); + eprintln!("Failed to find/read config file, using default config."); + }) + .unwrap_or_default() + }; + + args.apply_args(config) +} + +/// The config for all of Cuprate. +#[derive(Default, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct Config { + /// The network we should run on. + network: Network, + + /// [`tracing`] config. + tracing: TracingConfig, + + /// The P2P network config. + p2p: P2PConfig, + + /// The storage config. + storage: StorageConfig, + + fs: FileSystemConfig, +} + +impl Config { + /// Attempts to read a config file in [`toml`] format from the given [`Path`]. + /// + /// # Errors + /// + /// Will return an [`Err`] if the file cannot be read or if the file is not a valid [`toml`] config. + fn read_from_path(file: impl AsRef) -> Result { + let file_text = read_to_string(file.as_ref())?; + + Ok(toml::from_str(&file_text) + .inspect(|_| eprintln!("Using config at: {}", file.as_ref().to_string_lossy())) + .inspect_err(|e| { + eprintln!("{e}"); + eprintln!( + "Failed to parse config file at: {}", + file.as_ref().to_string_lossy() + ); + })?) + } + + /// Returns the current [`Network`] we are running on. + pub const fn network(&self) -> Network { + self.network + } + + /// The [`ClearNet`], [`cuprate_p2p::P2PConfig`]. + pub fn clearnet_p2p_config(&self) -> cuprate_p2p::P2PConfig { + cuprate_p2p::P2PConfig { + network: self.network, + seeds: p2p::clear_net_seed_nodes(self.network), + outbound_connections: self.p2p.clear_net.general.outbound_connections, + extra_outbound_connections: self.p2p.clear_net.general.extra_outbound_connections, + max_inbound_connections: self.p2p.clear_net.general.max_inbound_connections, + gray_peers_percent: self.p2p.clear_net.general.gray_peers_percent, + server_config: Some(ClearNetServerCfg { + ip: self.p2p.clear_net.listen_on, + }), + p2p_port: self.p2p.clear_net.general.p2p_port, + // TODO: set this if a public RPC server is set. + rpc_port: 0, + address_book_config: self + .p2p + .clear_net + .general + .address_book_config(&self.fs.cache_directory, self.network), + } + } + + /// The [`ContextConfig`]. + pub const fn context_config(&self) -> ContextConfig { + match self.network { + Network::Mainnet => ContextConfig::main_net(), + Network::Stagenet => ContextConfig::stage_net(), + Network::Testnet => ContextConfig::test_net(), + } + } + + /// The [`cuprate_blockchain`] config. + pub fn blockchain_config(&self) -> cuprate_blockchain::config::Config { + let blockchain = &self.storage.blockchain; + + // We don't set reader threads as we manually make the reader threadpool. + cuprate_blockchain::config::ConfigBuilder::default() + .network(self.network) + .data_directory(self.fs.data_directory.clone()) + .sync_mode(blockchain.shared.sync_mode) + .build() + } + + /// The [`BlockDownloaderConfig`]. + pub fn block_downloader_config(&self) -> BlockDownloaderConfig { + self.p2p.block_downloader.clone().into() + } +} diff --git a/binaries/cuprated/src/config/args.rs b/binaries/cuprated/src/config/args.rs new file mode 100644 index 00000000..c4c2f9fd --- /dev/null +++ b/binaries/cuprated/src/config/args.rs @@ -0,0 +1,55 @@ +use std::{io::Write, path::PathBuf, process::exit}; + +use clap::builder::TypedValueParser; + +use cuprate_helper::network::Network; + +use crate::{config::Config, constants::EXAMPLE_CONFIG}; + +/// Cuprate Args. +#[derive(clap::Parser, Debug)] +#[command(version, about)] +pub struct Args { + /// The network to run on. + #[arg( + long, + default_value_t = Network::Mainnet, + value_parser = clap::builder::PossibleValuesParser::new(["mainnet", "testnet", "stagenet"]) + .map(|s| s.parse::().unwrap()), + )] + pub network: Network, + /// The amount of outbound clear-net connections to maintain. + #[arg(long)] + pub outbound_connections: Option, + /// The PATH of the `cuprated` config file. + #[arg(long)] + pub config_file: Option, + /// Generate a config file and print it to stdout. + #[arg(long)] + pub generate_config: bool, +} + +impl Args { + /// Complete any quick requests asked for in [`Args`]. + /// + /// May cause the process to [`exit`]. + pub fn do_quick_requests(&self) { + if self.generate_config { + println!("{EXAMPLE_CONFIG}"); + exit(0); + } + } + + /// Apply the [`Args`] to the given [`Config`]. + /// + /// This may exit the program if a config value was set that requires an early exit. + pub const fn apply_args(&self, mut config: Config) -> Config { + config.network = self.network; + + if let Some(outbound_connections) = self.outbound_connections { + config.p2p.clear_net.general.outbound_connections = outbound_connections; + } + + config + } +} diff --git a/binaries/cuprated/src/config/fs.rs b/binaries/cuprated/src/config/fs.rs new file mode 100644 index 00000000..f8f61307 --- /dev/null +++ b/binaries/cuprated/src/config/fs.rs @@ -0,0 +1,21 @@ +use std::path::PathBuf; + +use serde::{Deserialize, Serialize}; + +use cuprate_helper::fs::{CUPRATE_CACHE_DIR, CUPRATE_DATA_DIR}; + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct FileSystemConfig { + pub data_directory: PathBuf, + pub cache_directory: PathBuf, +} + +impl Default for FileSystemConfig { + fn default() -> Self { + Self { + data_directory: CUPRATE_DATA_DIR.to_path_buf(), + cache_directory: CUPRATE_CACHE_DIR.to_path_buf(), + } + } +} diff --git a/binaries/cuprated/src/config/p2p.rs b/binaries/cuprated/src/config/p2p.rs new file mode 100644 index 00000000..51f8d0d6 --- /dev/null +++ b/binaries/cuprated/src/config/p2p.rs @@ -0,0 +1,178 @@ +use std::{ + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + path::Path, + time::Duration, +}; + +use serde::{Deserialize, Serialize}; + +use cuprate_helper::{fs::address_book_path, network::Network}; + +/// P2P config. +#[derive(Default, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct P2PConfig { + /// Clear-net config. + pub clear_net: ClearNetConfig, + /// Block downloader config. + pub block_downloader: BlockDownloaderConfig, +} + +#[derive(Clone, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct BlockDownloaderConfig { + /// The size in bytes of the buffer between the block downloader and the place which + /// is consuming the downloaded blocks. + pub buffer_bytes: usize, + /// The size of the in progress queue (in bytes) at which we stop requesting more blocks. + pub in_progress_queue_bytes: usize, + /// The [`Duration`] between checking the client pool for free peers. + pub check_client_pool_interval: Duration, + /// The target size of a single batch of blocks (in bytes). + pub target_batch_bytes: usize, +} + +impl From for cuprate_p2p::block_downloader::BlockDownloaderConfig { + fn from(value: BlockDownloaderConfig) -> Self { + Self { + buffer_bytes: value.buffer_bytes, + in_progress_queue_bytes: value.in_progress_queue_bytes, + check_client_pool_interval: value.check_client_pool_interval, + target_batch_bytes: value.target_batch_bytes, + initial_batch_len: 1, + } + } +} + +impl Default for BlockDownloaderConfig { + fn default() -> Self { + Self { + buffer_bytes: 50_000_000, + in_progress_queue_bytes: 50_000_000, + check_client_pool_interval: Duration::from_secs(30), + target_batch_bytes: 5_000_000, + } + } +} + +/// The config values for P2P clear-net. +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct ClearNetConfig { + /// The server config. + pub listen_on: IpAddr, + #[serde(flatten)] + pub general: SharedNetConfig, +} + +impl Default for ClearNetConfig { + fn default() -> Self { + Self { + listen_on: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + general: Default::default(), + } + } +} + +/// Network config values shared between all network zones. +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct SharedNetConfig { + /// The number of outbound connections to make and try keep. + pub outbound_connections: usize, + /// The amount of extra connections we can make if we are under load from the rest of Cuprate. + pub extra_outbound_connections: usize, + /// The maximum amount of inbound connections + pub max_inbound_connections: usize, + /// The percent of connections that should be to peers we haven't connected to before. + pub gray_peers_percent: f64, + /// port to use to accept p2p connections. + pub p2p_port: u16, + /// The address book config. + address_book_config: AddressBookConfig, +} + +impl SharedNetConfig { + /// Returns the [`AddressBookConfig`]. + pub fn address_book_config( + &self, + cache_dir: &Path, + network: Network, + ) -> cuprate_address_book::AddressBookConfig { + cuprate_address_book::AddressBookConfig { + max_white_list_length: self.address_book_config.max_white_list_length, + max_gray_list_length: self.address_book_config.max_gray_list_length, + peer_store_directory: address_book_path(cache_dir, network), + peer_save_period: self.address_book_config.peer_save_period, + } + } +} + +impl Default for SharedNetConfig { + fn default() -> Self { + Self { + outbound_connections: 64, + extra_outbound_connections: 8, + max_inbound_connections: 128, + gray_peers_percent: 0.7, + p2p_port: 0, + address_book_config: AddressBookConfig::default(), + } + } +} + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct AddressBookConfig { + max_white_list_length: usize, + max_gray_list_length: usize, + peer_save_period: Duration, +} + +impl Default for AddressBookConfig { + fn default() -> Self { + Self { + max_white_list_length: 1_000, + max_gray_list_length: 5_000, + peer_save_period: Duration::from_secs(30), + } + } +} + +/// Seed nodes for [`ClearNet`](cuprate_p2p_core::ClearNet). +pub fn clear_net_seed_nodes(network: Network) -> Vec { + let seeds = match network { + Network::Mainnet => [ + "176.9.0.187:18080", + "88.198.163.90:18080", + "66.85.74.134:18080", + "51.79.173.165:18080", + "192.99.8.110:18080", + "37.187.74.171:18080", + "77.172.183.193:18080", + ] + .as_slice(), + Network::Stagenet => [ + "176.9.0.187:38080", + "51.79.173.165:38080", + "192.99.8.110:38080", + "37.187.74.171:38080", + "77.172.183.193:38080", + ] + .as_slice(), + Network::Testnet => [ + "176.9.0.187:28080", + "51.79.173.165:28080", + "192.99.8.110:28080", + "37.187.74.171:28080", + "77.172.183.193:28080", + ] + .as_slice(), + }; + + seeds + .iter() + .map(|s| s.parse()) + .collect::>() + .unwrap() +} diff --git a/binaries/cuprated/src/config/storage.rs b/binaries/cuprated/src/config/storage.rs new file mode 100644 index 00000000..b3e3c1f4 --- /dev/null +++ b/binaries/cuprated/src/config/storage.rs @@ -0,0 +1,67 @@ +use std::path::PathBuf; + +use serde::{Deserialize, Serialize}; + +use cuprate_database::config::SyncMode; +use cuprate_database_service::ReaderThreads; +use cuprate_helper::fs::CUPRATE_DATA_DIR; + +/// The storage config. +#[derive(Default, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct StorageConfig { + /// The amount of reader threads to spawn between the tx-pool and blockchain. + pub reader_threads: ReaderThreads, + /// The tx-pool config. + pub txpool: TxpoolConfig, + /// The blockchain config. + pub blockchain: BlockchainConfig, +} + +/// The blockchain config. +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct BlockchainConfig { + #[serde(flatten)] + pub shared: SharedStorageConfig, +} + +impl Default for BlockchainConfig { + fn default() -> Self { + Self { + shared: SharedStorageConfig { + sync_mode: SyncMode::Async, + }, + } + } +} + +/// The tx-pool config. +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct TxpoolConfig { + #[serde(flatten)] + pub shared: SharedStorageConfig, + + /// The maximum size of the tx-pool. + pub max_txpool_byte_size: usize, +} + +impl Default for TxpoolConfig { + fn default() -> Self { + Self { + shared: SharedStorageConfig { + sync_mode: SyncMode::Async, + }, + max_txpool_byte_size: 100_000_000, + } + } +} + +/// Config values shared between the tx-pool and blockchain. +#[derive(Default, Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct SharedStorageConfig { + /// The [`SyncMode`] of the database. + pub sync_mode: SyncMode, +} diff --git a/binaries/cuprated/src/config/tracing_config.rs b/binaries/cuprated/src/config/tracing_config.rs new file mode 100644 index 00000000..859d516a --- /dev/null +++ b/binaries/cuprated/src/config/tracing_config.rs @@ -0,0 +1,42 @@ +use serde::{Deserialize, Serialize}; +use tracing::level_filters::LevelFilter; + +/// [`tracing`] config. +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields, default)] +pub struct TracingConfig { + /// The default minimum log level. + #[serde(with = "level_filter_serde")] + level: LevelFilter, +} + +impl Default for TracingConfig { + fn default() -> Self { + Self { + level: LevelFilter::INFO, + } + } +} + +mod level_filter_serde { + use std::str::FromStr; + + use serde::{Deserialize, Deserializer, Serializer}; + use tracing::level_filters::LevelFilter; + + #[expect(clippy::trivially_copy_pass_by_ref, reason = "serde")] + pub fn serialize(level_filter: &LevelFilter, s: S) -> Result + where + S: Serializer, + { + s.serialize_str(&level_filter.to_string()) + } + + pub fn deserialize<'de, D>(d: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(d)?; + LevelFilter::from_str(&s).map_err(serde::de::Error::custom) + } +} diff --git a/binaries/cuprated/src/constants.rs b/binaries/cuprated/src/constants.rs index 2f3c7bb6..057e8bd0 100644 --- a/binaries/cuprated/src/constants.rs +++ b/binaries/cuprated/src/constants.rs @@ -18,9 +18,12 @@ pub const VERSION_BUILD: &str = if cfg!(debug_assertions) { pub const PANIC_CRITICAL_SERVICE_ERROR: &str = "A service critical to Cuprate's function returned an unexpected error."; +pub const EXAMPLE_CONFIG: &str = include_str!("../Cuprated.toml"); + #[cfg(test)] mod test { use super::*; + use crate::config::Config; #[test] fn version() { @@ -35,4 +38,9 @@ mod test { assert_eq!(VERSION_BUILD, "0.0.1-release"); } } + + #[test] + fn generate_config_text_is_valid() { + let config: Config = toml::from_str(EXAMPLE_CONFIG).unwrap(); + } } diff --git a/binaries/cuprated/src/main.rs b/binaries/cuprated/src/main.rs index d5c832e2..617c5b61 100644 --- a/binaries/cuprated/src/main.rs +++ b/binaries/cuprated/src/main.rs @@ -29,6 +29,8 @@ fn main() { // Initialize global static `LazyLock` data. statics::init_lazylock_statics(); + let _config = config::read_config_and_args(); + // TODO: everything else. todo!() } diff --git a/helper/Cargo.toml b/helper/Cargo.toml index 1b3158f1..716beb34 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -35,6 +35,8 @@ futures = { workspace = true, optional = true, features = ["std"] } monero-serai = { workspace = true, optional = true } rayon = { workspace = true, optional = true } +serde = { workspace = true, optional = true, features = ["derive"] } + # This is kinda a stupid work around. # [thread] needs to activate one of these libs (windows|libc) # although it depends on what target we're building for. diff --git a/helper/src/fs.rs b/helper/src/fs.rs index 5d62a644..f694f62d 100644 --- a/helper/src/fs.rs +++ b/helper/src/fs.rs @@ -28,7 +28,12 @@ //! - //---------------------------------------------------------------------------------------------------- Use -use std::{path::PathBuf, sync::LazyLock}; +use std::{ + path::{Path, PathBuf}, + sync::LazyLock, +}; + +use crate::network::Network; //---------------------------------------------------------------------------------------------------- Const /// Cuprate's main directory. @@ -58,6 +63,9 @@ pub const CUPRATE_DIR: &str = { } }; +/// The default name of Cuprate's config file. +pub const DEFAULT_CONFIG_FILE_NAME: &str = "Cuprated.toml"; + //---------------------------------------------------------------------------------------------------- Directories /// Create a `LazyLock` for common PATHs used by Cuprate. /// @@ -150,32 +158,61 @@ impl_path_lazylock! { CUPRATE_DATA_DIR, data_dir, "", +} - /// Cuprate's blockchain directory. - /// - /// This is the PATH used for any Cuprate blockchain files. - /// - /// | OS | PATH | - /// |---------|----------------------------------------------------------------| - /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\blockchain\` | - /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/blockchain/` | - /// | Linux | `/home/alice/.local/share/cuprate/blockchain/` | - CUPRATE_BLOCKCHAIN_DIR, - data_dir, - "blockchain", +/// Joins the [`Network`] to the [`Path`]. +/// +/// This will keep the path the same for [`Network::Mainnet`]. +fn path_with_network(path: &Path, network: Network) -> PathBuf { + match network { + Network::Mainnet => path.to_path_buf(), + network => path.join(network.to_string()), + } +} - /// Cuprate's transaction pool directory. - /// - /// This is the PATH used for any Cuprate txpool files. - /// - /// | OS | PATH | - /// |---------|------------------------------------------------------------| - /// | Windows | `C:\Users\Alice\AppData\Roaming\Cuprate\txpool\` | - /// | macOS | `/Users/Alice/Library/Application Support/Cuprate/txpool/` | - /// | Linux | `/home/alice/.local/share/cuprate/txpool/` | - CUPRATE_TXPOOL_DIR, - data_dir, - "txpool", +/// Cuprate's blockchain directory. +/// +/// This is the PATH used for any Cuprate blockchain files. +/// +/// ```rust +/// use cuprate_helper::{network::Network, fs::{CUPRATE_DATA_DIR, blockchain_path}}; +/// +/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Mainnet).as_path(), CUPRATE_DATA_DIR.join("blockchain")); +/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Stagenet).as_path(), CUPRATE_DATA_DIR.join(Network::Stagenet.to_string()).join("blockchain")); +/// assert_eq!(blockchain_path(&**CUPRATE_DATA_DIR, Network::Testnet).as_path(), CUPRATE_DATA_DIR.join(Network::Testnet.to_string()).join("blockchain")); +/// ``` +pub fn blockchain_path(data_dir: &Path, network: Network) -> PathBuf { + path_with_network(data_dir, network).join("blockchain") +} + +/// Cuprate's txpool directory. +/// +/// This is the PATH used for any Cuprate txpool files. +/// +/// ```rust +/// use cuprate_helper::{network::Network, fs::{CUPRATE_DATA_DIR, txpool_path}}; +/// +/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Mainnet).as_path(), CUPRATE_DATA_DIR.join("txpool")); +/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Stagenet).as_path(), CUPRATE_DATA_DIR.join(Network::Stagenet.to_string()).join("txpool")); +/// assert_eq!(txpool_path(&**CUPRATE_DATA_DIR, Network::Testnet).as_path(), CUPRATE_DATA_DIR.join(Network::Testnet.to_string()).join("txpool")); +/// ``` +pub fn txpool_path(data_dir: &Path, network: Network) -> PathBuf { + path_with_network(data_dir, network).join("txpool") +} + +/// Cuprate's address-book directory. +/// +/// This is the PATH used for any Cuprate address-book files. +/// +/// ```rust +/// use cuprate_helper::{network::Network, fs::{CUPRATE_CACHE_DIR, address_book_path}}; +/// +/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Mainnet).as_path(), CUPRATE_CACHE_DIR.join("addressbook")); +/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Stagenet).as_path(), CUPRATE_CACHE_DIR.join(Network::Stagenet.to_string()).join("addressbook")); +/// assert_eq!(address_book_path(&**CUPRATE_CACHE_DIR, Network::Testnet).as_path(), CUPRATE_CACHE_DIR.join(Network::Testnet.to_string()).join("addressbook")); +/// ``` +pub fn address_book_path(cache_dir: &Path, network: Network) -> PathBuf { + path_with_network(cache_dir, network).join("addressbook") } //---------------------------------------------------------------------------------------------------- Tests @@ -197,29 +234,21 @@ mod test { (&*CUPRATE_CACHE_DIR, ""), (&*CUPRATE_CONFIG_DIR, ""), (&*CUPRATE_DATA_DIR, ""), - (&*CUPRATE_BLOCKCHAIN_DIR, ""), - (&*CUPRATE_TXPOOL_DIR, ""), ]; if cfg!(target_os = "windows") { array[0].1 = r"AppData\Local\Cuprate"; array[1].1 = r"AppData\Roaming\Cuprate"; array[2].1 = r"AppData\Roaming\Cuprate"; - array[3].1 = r"AppData\Roaming\Cuprate\blockchain"; - array[4].1 = r"AppData\Roaming\Cuprate\txpool"; } else if cfg!(target_os = "macos") { array[0].1 = "Library/Caches/Cuprate"; array[1].1 = "Library/Application Support/Cuprate"; array[2].1 = "Library/Application Support/Cuprate"; - array[3].1 = "Library/Application Support/Cuprate/blockchain"; - array[4].1 = "Library/Application Support/Cuprate/txpool"; } else { // Assumes Linux. array[0].1 = ".cache/cuprate"; array[1].1 = ".config/cuprate"; array[2].1 = ".local/share/cuprate"; - array[3].1 = ".local/share/cuprate/blockchain"; - array[4].1 = ".local/share/cuprate/txpool"; }; for (path, expected) in array { diff --git a/helper/src/network.rs b/helper/src/network.rs index f3224b33..d2961170 100644 --- a/helper/src/network.rs +++ b/helper/src/network.rs @@ -5,6 +5,12 @@ //! into it's own crate. //! //! `#[no_std]` compatible. +// TODO: move to types crate. + +use core::{ + fmt::{Display, Formatter}, + str::FromStr, +}; const MAINNET_NETWORK_ID: [u8; 16] = [ 0x12, 0x30, 0xF1, 0x71, 0x61, 0x04, 0x41, 0x61, 0x17, 0x31, 0x00, 0x82, 0x16, 0xA1, 0xA1, 0x10, @@ -17,7 +23,8 @@ const STAGENET_NETWORK_ID: [u8; 16] = [ ]; /// An enum representing every Monero network. -#[derive(Debug, Clone, Copy, Default)] +#[derive(Debug, Clone, Copy, Default, Ord, PartialOrd, Eq, PartialEq)] +#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] pub enum Network { /// Mainnet #[default] @@ -38,3 +45,28 @@ impl Network { } } } + +#[derive(Debug, PartialEq, Eq)] +pub struct ParseNetworkError; + +impl FromStr for Network { + type Err = ParseNetworkError; + + fn from_str(s: &str) -> Result { + match s { + "mainnet" | "Mainnet" => Ok(Self::Mainnet), + "testnet" | "Testnet" => Ok(Self::Testnet), + "stagenet" | "Stagenet" => Ok(Self::Stagenet), + _ => Err(ParseNetworkError), + } + } +} +impl Display for Network { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + f.write_str(match self { + Self::Mainnet => "mainnet", + Self::Testnet => "testnet", + Self::Stagenet => "stagenet", + }) + } +} diff --git a/p2p/address-book/Cargo.toml b/p2p/address-book/Cargo.toml index a88819f7..d57cfded 100644 --- a/p2p/address-book/Cargo.toml +++ b/p2p/address-book/Cargo.toml @@ -23,7 +23,7 @@ indexmap = { workspace = true, features = ["std"] } rand = { workspace = true, features = ["std", "std_rng"] } -borsh = { workspace = true, features = ["derive", "std"]} +borsh = { workspace = true, features = ["derive", "std"] } [dev-dependencies] cuprate-test-utils = { workspace = true } diff --git a/p2p/address-book/src/book/tests.rs b/p2p/address-book/src/book/tests.rs index 216fcfac..b2c4c493 100644 --- a/p2p/address-book/src/book/tests.rs +++ b/p2p/address-book/src/book/tests.rs @@ -15,7 +15,7 @@ fn test_cfg() -> AddressBookConfig { AddressBookConfig { max_white_list_length: 100, max_gray_list_length: 500, - peer_store_file: PathBuf::new(), + peer_store_directory: PathBuf::new(), peer_save_period: Duration::from_secs(60), } } diff --git a/p2p/address-book/src/lib.rs b/p2p/address-book/src/lib.rs index c0903485..054be462 100644 --- a/p2p/address-book/src/lib.rs +++ b/p2p/address-book/src/lib.rs @@ -29,8 +29,8 @@ pub struct AddressBookConfig { /// /// Gray peers are peers we are yet to make a connection to. pub max_gray_list_length: usize, - /// The location to store the address book. - pub peer_store_file: PathBuf, + /// The location to store the peer store files. + pub peer_store_directory: PathBuf, /// The amount of time between saving the address book to disk. pub peer_save_period: Duration, } @@ -63,11 +63,6 @@ pub enum AddressBookError { pub async fn init_address_book( cfg: AddressBookConfig, ) -> Result, std::io::Error> { - tracing::info!( - "Loading peers from file: {} ", - cfg.peer_store_file.display() - ); - let (white_list, gray_list) = match store::read_peers_from_disk::(&cfg).await { Ok(res) => res, Err(e) if e.kind() == ErrorKind::NotFound => (vec![], vec![]), diff --git a/p2p/address-book/src/store.rs b/p2p/address-book/src/store.rs index 9abf0c3e..47994ae5 100644 --- a/p2p/address-book/src/store.rs +++ b/p2p/address-book/src/store.rs @@ -39,7 +39,9 @@ pub(crate) fn save_peers_to_disk( }) .unwrap(); - let file = cfg.peer_store_file.clone(); + let file = cfg + .peer_store_directory + .join(format!("{}_p2p_state", Z::NAME)); spawn_blocking(move || fs::write(&file, &data)) } @@ -52,7 +54,12 @@ pub(crate) async fn read_peers_from_disk( ), std::io::Error, > { - let file = cfg.peer_store_file.clone(); + let file = cfg + .peer_store_directory + .join(format!("{}_p2p_state", Z::NAME)); + + tracing::info!("Loading peers from file: {} ", file.display()); + let data = spawn_blocking(move || fs::read(file)).await.unwrap()?; let de_ser: DeserPeerDataV1 = from_slice(&data)?; diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index faac4d5d..db103000 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -62,15 +62,15 @@ pub struct BlockBatch { pub struct BlockDownloaderConfig { /// The size in bytes of the buffer between the block downloader and the place which /// is consuming the downloaded blocks. - pub buffer_size: usize, + pub buffer_bytes: usize, /// The size of the in progress queue (in bytes) at which we stop requesting more blocks. - pub in_progress_queue_size: usize, + pub in_progress_queue_bytes: usize, /// The [`Duration`] between checking the client pool for free peers. pub check_client_pool_interval: Duration, /// The target size of a single batch of blocks (in bytes). - pub target_batch_size: usize, + pub target_batch_bytes: usize, /// The initial amount of blocks to request (in number of blocks) - pub initial_batch_size: usize, + pub initial_batch_len: usize, } /// An error that occurred in the [`BlockDownloader`]. @@ -145,7 +145,7 @@ where + 'static, C::Future: Send + 'static, { - let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size); + let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_bytes); let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config); @@ -242,7 +242,7 @@ where Self { peer_set, our_chain_svc, - amount_of_blocks_to_request: config.initial_batch_size, + amount_of_blocks_to_request: config.initial_batch_len, amount_of_blocks_to_request_updated_at: 0, amount_of_empty_chain_entries: 0, block_download_tasks: JoinSet::new(), @@ -381,7 +381,7 @@ where } // If our ready queue is too large send duplicate requests for the blocks we are waiting on. - if self.block_queue.size() >= self.config.in_progress_queue_size { + if self.block_queue.size() >= self.config.in_progress_queue_bytes { return self.request_inflight_batch_again(client); } @@ -565,7 +565,7 @@ where self.amount_of_blocks_to_request = calculate_next_block_batch_size( block_batch.size, block_batch.blocks.len(), - self.config.target_batch_size, + self.config.target_batch_bytes, ); tracing::debug!( diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index 6799482d..707b858d 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -66,11 +66,11 @@ proptest! { genesis: *blockchain.blocks.first().unwrap().0 }, BlockDownloaderConfig { - buffer_size: 1_000, - in_progress_queue_size: 10_000, + buffer_bytes: 1_000, + in_progress_queue_bytes: 10_000, check_client_pool_interval: Duration::from_secs(5), - target_batch_size: 5_000, - initial_batch_size: 1, + target_batch_bytes: 5_000, + initial_batch_len: 1, }); let blocks = stream.map(|blocks| blocks.blocks).concat().await; diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 6fd973cd..92b4374b 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -15,7 +15,7 @@ default = ["heed"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] +serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde", "cuprate-helper/serde"] [dependencies] cuprate-database = { workspace = true } diff --git a/storage/blockchain/README.md b/storage/blockchain/README.md index 3f97a3d6..8ab8b432 100644 --- a/storage/blockchain/README.md +++ b/storage/blockchain/README.md @@ -76,7 +76,7 @@ use cuprate_blockchain::{ let tmp_dir = tempfile::tempdir()?; let db_dir = tmp_dir.path().to_owned(); let config = ConfigBuilder::new() - .db_directory(db_dir.into()) + .data_directory(db_dir.into()) .build(); // Initialize the database environment. diff --git a/storage/blockchain/src/config.rs b/storage/blockchain/src/config.rs index e4b76068..4bef2cb0 100644 --- a/storage/blockchain/src/config.rs +++ b/storage/blockchain/src/config.rs @@ -25,7 +25,7 @@ //! //! let config = ConfigBuilder::new() //! // Use a custom database directory. -//! .db_directory(db_dir.into()) +//! .data_directory(db_dir.into()) //! // Use as many reader threads as possible (when using `service`). //! .reader_threads(ReaderThreads::OnePerThread) //! // Use the fastest sync mode. @@ -41,13 +41,16 @@ //! ``` //---------------------------------------------------------------------------------------------------- Import -use std::{borrow::Cow, path::Path}; +use std::{borrow::Cow, path::PathBuf}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use cuprate_database::{config::SyncMode, resize::ResizeAlgorithm}; -use cuprate_helper::fs::CUPRATE_BLOCKCHAIN_DIR; +use cuprate_helper::{ + fs::{blockchain_path, CUPRATE_DATA_DIR}, + network::Network, +}; // re-exports pub use cuprate_database_service::ReaderThreads; @@ -59,8 +62,9 @@ pub use cuprate_database_service::ReaderThreads; #[derive(Debug, Clone, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ConfigBuilder { - /// [`Config::db_directory`]. - db_directory: Option>, + network: Network, + + data_dir: Option, /// [`Config::cuprate_database_config`]. db_config: cuprate_database::config::ConfigBuilder, @@ -76,10 +80,12 @@ impl ConfigBuilder { /// after this function to use default values. pub fn new() -> Self { Self { - db_directory: None, - db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( - &*CUPRATE_BLOCKCHAIN_DIR, - )), + network: Network::default(), + data_dir: None, + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(blockchain_path( + &CUPRATE_DATA_DIR, + Network::Mainnet, + ))), reader_threads: None, } } @@ -87,21 +93,21 @@ impl ConfigBuilder { /// Build into a [`Config`]. /// /// # Default values - /// If [`ConfigBuilder::db_directory`] was not called, - /// the default [`CUPRATE_BLOCKCHAIN_DIR`] will be used. + /// If [`ConfigBuilder::data_directory`] was not called, + /// [`blockchain_path`] with [`CUPRATE_DATA_DIR`] [`Network::Mainnet`] will be used. /// /// For all other values, [`Default::default`] is used. pub fn build(self) -> Config { // INVARIANT: all PATH safety checks are done // in `helper::fs`. No need to do them here. - let db_directory = self - .db_directory - .unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)); + let data_dir = self + .data_dir + .unwrap_or_else(|| CUPRATE_DATA_DIR.to_path_buf()); let reader_threads = self.reader_threads.unwrap_or_default(); let db_config = self .db_config - .db_directory(db_directory) + .db_directory(Cow::Owned(blockchain_path(&data_dir, self.network))) .reader_threads(reader_threads.as_threads()) .build(); @@ -111,10 +117,17 @@ impl ConfigBuilder { } } - /// Set a custom database directory (and file) [`Path`]. + /// Change the network this blockchain database is for. #[must_use] - pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self { - self.db_directory = Some(db_directory); + pub const fn network(mut self, network: Network) -> Self { + self.network = network; + self + } + + /// Set a custom database directory (and file) [`PathBuf`]. + #[must_use] + pub fn data_directory(mut self, db_directory: PathBuf) -> Self { + self.data_dir = Some(db_directory); self } @@ -145,9 +158,7 @@ impl ConfigBuilder { /// Good default for testing, and resource-available machines. #[must_use] pub fn fast(mut self) -> Self { - self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)) - .fast(); + self.db_config = self.db_config.fast(); self.reader_threads = Some(ReaderThreads::OnePerThread); self @@ -159,9 +170,7 @@ impl ConfigBuilder { /// Good default for resource-limited machines, e.g. a cheap VPS. #[must_use] pub fn low_power(mut self) -> Self { - self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_BLOCKCHAIN_DIR)) - .low_power(); + self.db_config = self.db_config.low_power(); self.reader_threads = Some(ReaderThreads::One); self @@ -170,10 +179,13 @@ impl ConfigBuilder { impl Default for ConfigBuilder { fn default() -> Self { - let db_directory = Cow::Borrowed(&**CUPRATE_BLOCKCHAIN_DIR); Self { - db_directory: Some(db_directory.clone()), - db_config: cuprate_database::config::ConfigBuilder::new(db_directory), + network: Network::default(), + data_dir: Some(CUPRATE_DATA_DIR.to_path_buf()), + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(blockchain_path( + &CUPRATE_DATA_DIR, + Network::default(), + ))), reader_threads: Some(ReaderThreads::default()), } } @@ -201,7 +213,7 @@ impl Config { /// Create a new [`Config`] with sane default settings. /// /// The [`cuprate_database::config::Config::db_directory`] - /// will be set to [`CUPRATE_BLOCKCHAIN_DIR`]. + /// will be set to [`blockchain_path`] with [`CUPRATE_DATA_DIR`] [`Network::Mainnet`]. /// /// All other values will be [`Default::default`]. /// @@ -213,14 +225,14 @@ impl Config { /// resize::ResizeAlgorithm, /// DATABASE_DATA_FILENAME, /// }; - /// use cuprate_helper::fs::*; + /// use cuprate_helper::{fs::*, network::Network}; /// /// use cuprate_blockchain::config::*; /// /// let config = Config::new(); /// - /// assert_eq!(config.db_config.db_directory(), &*CUPRATE_BLOCKCHAIN_DIR); - /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_BLOCKCHAIN_DIR)); + /// assert_eq!(config.db_config.db_directory().as_ref(), blockchain_path(&CUPRATE_DATA_DIR, Network::Mainnet).as_path()); + /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_DATA_DIR)); /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); /// assert_eq!(config.db_config.sync_mode, SyncMode::default()); /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); diff --git a/storage/blockchain/src/ops/mod.rs b/storage/blockchain/src/ops/mod.rs index 285aa244..96b25aff 100644 --- a/storage/blockchain/src/ops/mod.rs +++ b/storage/blockchain/src/ops/mod.rs @@ -71,7 +71,7 @@ //! let tmp_dir = tempfile::tempdir()?; //! let db_dir = tmp_dir.path().to_owned(); //! let config = ConfigBuilder::new() -//! .db_directory(db_dir.into()) +//! .data_directory(db_dir.into()) //! .build(); //! //! // Initialize the database environment. diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index c5eb80c2..d6a811bd 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -77,7 +77,7 @@ //! let tmp_dir = tempfile::tempdir()?; //! let db_dir = tmp_dir.path().to_owned(); //! let config = ConfigBuilder::new() -//! .db_directory(db_dir.into()) +//! .data_directory(db_dir.into()) //! .build(); //! //! // Initialize the database thread-pool. diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index 719f3613..38db6652 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -7,7 +7,6 @@ //---------------------------------------------------------------------------------------------------- Use use std::{ - borrow::Cow, collections::{HashMap, HashSet}, sync::Arc, }; @@ -46,7 +45,7 @@ fn init_service() -> ( ) { let tempdir = tempfile::tempdir().unwrap(); let config = ConfigBuilder::new() - .db_directory(Cow::Owned(tempdir.path().into())) + .data_directory(tempdir.path().into()) .low_power() .build(); let (reader, writer, env) = init(config).unwrap(); diff --git a/storage/blockchain/src/tests.rs b/storage/blockchain/src/tests.rs index 1fe20637..4192f812 100644 --- a/storage/blockchain/src/tests.rs +++ b/storage/blockchain/src/tests.rs @@ -5,7 +5,7 @@ //! - only used internally //---------------------------------------------------------------------------------------------------- Import -use std::{borrow::Cow, fmt::Debug}; +use std::fmt::Debug; use pretty_assertions::assert_eq; @@ -74,7 +74,7 @@ impl AssertTableLen { pub(crate) fn tmp_concrete_env() -> (impl Env, tempfile::TempDir) { let tempdir = tempfile::tempdir().unwrap(); let config = ConfigBuilder::new() - .db_directory(Cow::Owned(tempdir.path().into())) + .data_directory(tempdir.path().into()) .low_power() .build(); let env = crate::open(config).unwrap(); diff --git a/storage/txpool/Cargo.toml b/storage/txpool/Cargo.toml index c9082655..0fb43b24 100644 --- a/storage/txpool/Cargo.toml +++ b/storage/txpool/Cargo.toml @@ -15,7 +15,7 @@ default = ["heed"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] +serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde", "cuprate-helper/serde"] [dependencies] cuprate-database = { workspace = true, features = ["heed"] } diff --git a/storage/txpool/README.md b/storage/txpool/README.md index d14f445b..ca4f737c 100644 --- a/storage/txpool/README.md +++ b/storage/txpool/README.md @@ -78,7 +78,7 @@ use cuprate_txpool::{ let tmp_dir = tempfile::tempdir()?; let db_dir = tmp_dir.path().to_owned(); let config = ConfigBuilder::new() - .db_directory(db_dir.into()) + .data_directory(db_dir.into()) .build(); // Initialize the database environment. diff --git a/storage/txpool/src/config.rs b/storage/txpool/src/config.rs index 1ef0d734..724ae21a 100644 --- a/storage/txpool/src/config.rs +++ b/storage/txpool/src/config.rs @@ -1,15 +1,18 @@ //! The transaction pool [`Config`]. -use std::{borrow::Cow, path::Path}; +use std::{borrow::Cow, path::PathBuf}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; use cuprate_database::{ config::{Config as DbConfig, SyncMode}, resize::ResizeAlgorithm, }; use cuprate_database_service::ReaderThreads; -use cuprate_helper::fs::CUPRATE_TXPOOL_DIR; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; +use cuprate_helper::{ + fs::{txpool_path, CUPRATE_DATA_DIR}, + network::Network, +}; /// The default transaction pool weight limit. const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024; @@ -21,8 +24,9 @@ const DEFAULT_TXPOOL_WEIGHT_LIMIT: usize = 600 * 1024 * 1024; #[derive(Debug, Clone, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ConfigBuilder { - /// [`Config::db_directory`]. - db_directory: Option>, + network: Network, + + data_dir: Option, /// [`Config::cuprate_database_config`]. db_config: cuprate_database::config::ConfigBuilder, @@ -41,10 +45,12 @@ impl ConfigBuilder { /// after this function to use default values. pub fn new() -> Self { Self { - db_directory: None, - db_config: cuprate_database::config::ConfigBuilder::new(Cow::Borrowed( - &*CUPRATE_TXPOOL_DIR, - )), + network: Network::default(), + data_dir: None, + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(txpool_path( + &CUPRATE_DATA_DIR, + Network::Mainnet, + ))), reader_threads: None, max_txpool_weight: None, } @@ -53,16 +59,16 @@ impl ConfigBuilder { /// Build into a [`Config`]. /// /// # Default values - /// If [`ConfigBuilder::db_directory`] was not called, - /// the default [`CUPRATE_TXPOOL_DIR`] will be used. + /// If [`ConfigBuilder::data_directory`] was not called, + /// [`txpool_path`] with [`CUPRATE_DATA_DIR`] and [`Network::Mainnet`] will be used. /// /// For all other values, [`Default::default`] is used. pub fn build(self) -> Config { // INVARIANT: all PATH safety checks are done // in `helper::fs`. No need to do them here. - let db_directory = self - .db_directory - .unwrap_or_else(|| Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)); + let data_dir = self + .data_dir + .unwrap_or_else(|| CUPRATE_DATA_DIR.to_path_buf()); let reader_threads = self.reader_threads.unwrap_or_default(); @@ -72,7 +78,7 @@ impl ConfigBuilder { let db_config = self .db_config - .db_directory(db_directory) + .db_directory(Cow::Owned(txpool_path(&data_dir, self.network))) .reader_threads(reader_threads.as_threads()) .build(); @@ -83,6 +89,13 @@ impl ConfigBuilder { } } + /// Change the network this database is for. + #[must_use] + pub const fn network(mut self, network: Network) -> Self { + self.network = network; + self + } + /// Sets a new maximum weight for the transaction pool. #[must_use] pub const fn max_txpool_weight(mut self, max_txpool_weight: usize) -> Self { @@ -90,10 +103,10 @@ impl ConfigBuilder { self } - /// Set a custom database directory (and file) [`Path`]. + /// Set a custom data directory [`PathBuf`]. #[must_use] - pub fn db_directory(mut self, db_directory: Cow<'static, Path>) -> Self { - self.db_directory = Some(db_directory); + pub fn data_directory(mut self, db_directory: PathBuf) -> Self { + self.data_dir = Some(db_directory); self } @@ -124,9 +137,7 @@ impl ConfigBuilder { /// Good default for testing, and resource-available machines. #[must_use] pub fn fast(mut self) -> Self { - self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)) - .fast(); + self.db_config = self.db_config.fast(); self.reader_threads = Some(ReaderThreads::OnePerThread); self @@ -138,9 +149,7 @@ impl ConfigBuilder { /// Good default for resource-limited machines, e.g. a cheap VPS. #[must_use] pub fn low_power(mut self) -> Self { - self.db_config = - cuprate_database::config::ConfigBuilder::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)) - .low_power(); + self.db_config = self.db_config.low_power(); self.reader_threads = Some(ReaderThreads::One); self @@ -149,10 +158,13 @@ impl ConfigBuilder { impl Default for ConfigBuilder { fn default() -> Self { - let db_directory = Cow::Borrowed(CUPRATE_TXPOOL_DIR.as_path()); Self { - db_directory: Some(db_directory.clone()), - db_config: cuprate_database::config::ConfigBuilder::new(db_directory), + network: Network::default(), + data_dir: Some(CUPRATE_DATA_DIR.to_path_buf()), + db_config: cuprate_database::config::ConfigBuilder::new(Cow::Owned(txpool_path( + &CUPRATE_DATA_DIR, + Network::Mainnet, + ))), reader_threads: Some(ReaderThreads::default()), max_txpool_weight: Some(DEFAULT_TXPOOL_WEIGHT_LIMIT), } @@ -184,7 +196,7 @@ impl Config { /// Create a new [`Config`] with sane default settings. /// /// The [`DbConfig::db_directory`] - /// will be set to [`CUPRATE_TXPOOL_DIR`]. + /// will be set to [`txpool_path`] with [`CUPRATE_DATA_DIR`] and [`Network::Mainnet`]. /// /// All other values will be [`Default::default`]. /// @@ -197,25 +209,21 @@ impl Config { /// DATABASE_DATA_FILENAME, /// }; /// use cuprate_database_service::ReaderThreads; - /// use cuprate_helper::fs::*; + /// use cuprate_helper::{fs::*, network::Network}; /// /// use cuprate_txpool::Config; /// /// let config = Config::new(); /// - /// assert_eq!(config.db_config.db_directory(), &*CUPRATE_TXPOOL_DIR); - /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_TXPOOL_DIR)); + /// assert_eq!(config.db_config.db_directory(), txpool_path(&CUPRATE_DATA_DIR, Network::Mainnet).as_path()); + /// assert!(config.db_config.db_file().starts_with(&*CUPRATE_DATA_DIR)); /// assert!(config.db_config.db_file().ends_with(DATABASE_DATA_FILENAME)); /// assert_eq!(config.db_config.sync_mode, SyncMode::default()); /// assert_eq!(config.db_config.resize_algorithm, ResizeAlgorithm::default()); /// assert_eq!(config.reader_threads, ReaderThreads::default()); /// ``` pub fn new() -> Self { - Self { - db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)), - reader_threads: ReaderThreads::default(), - max_txpool_weight: 0, - } + ConfigBuilder::new().build() } } diff --git a/storage/txpool/src/ops.rs b/storage/txpool/src/ops.rs index 289a8bbf..badc4f6d 100644 --- a/storage/txpool/src/ops.rs +++ b/storage/txpool/src/ops.rs @@ -51,7 +51,7 @@ //! let tmp_dir = tempfile::tempdir()?; //! let db_dir = tmp_dir.path().to_owned(); //! let config = ConfigBuilder::new() -//! .db_directory(db_dir.into()) +//! .data_directory(db_dir.into()) //! .build(); //! //! // Initialize the database environment. diff --git a/storage/txpool/src/service.rs b/storage/txpool/src/service.rs index a82de5bf..03ce2f03 100644 --- a/storage/txpool/src/service.rs +++ b/storage/txpool/src/service.rs @@ -83,7 +83,7 @@ //! let tmp_dir = tempfile::tempdir()?; //! let db_dir = tmp_dir.path().to_owned(); //! let config = ConfigBuilder::new() -//! .db_directory(db_dir.into()) +//! .data_directory(db_dir.into()) //! .build(); //! //! // Initialize the database thread-pool. From 7b8756fa80e386fb04173d8220c15c86bf9f9888 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 3 Dec 2024 20:21:05 +0000 Subject: [PATCH 3/3] cuprated: P2P protocol request handler (#303) * add cuprated skeleton * fmt and add deny exception * add main chain batch handler * add blockchain init * very rough block manager * misc changes * move more config values * add new tables & types * add function to fully add an alt block * resolve current todo!s * add new requests * WIP: starting re-orgs * add last service request * commit Cargo.lock * add test * more docs + cleanup + alt blocks request * clippy + fmt * document types * move tx_fee to helper * more doc updates * fmt * fix imports * remove config files * fix merge errors * fix generated coins * handle more p2p requests + alt blocks * clean up handler code * add function for incoming blocks * add docs to handler functions * broadcast new blocks + add commands * add fluffy block handler * fix new block handling * small cleanup * increase outbound peer count * fix merge * clean up the blockchain manger * add more docs + cleanup imports * fix typo * fix doc * remove unrelated changes * add `get_objects` handler * add `get_chain` handler * add `fluffy_missing_txs` handler * add `new_fluffy_block` handler * improve interface globals * manger -> manager * enums instead of bools * move chain service to separate file * more review fixes * sort imports + docs * init dandelion integration * add dandelion start function * finish incoming tx handler * Add tx blob hash table * Add missing txpool requests * handle duplicate stem txs * check txpool on incoming block * add request to remove tx in new blocks from the pool * tell the txpool about incoming blocks * fix merge * typos * remove blockchain height from txpool * fix merge * fix merge * handle incoming txs in p2p request handler * Allow `IncomingTxHandler` to be given later * add p2p clearnet init * fix build * misc changes * doc updates * more doc updates * sort imports * review changes * Result -> DbResult * use `NonZero` * review fixes * remove `rust-2024-compatibility` lint --- Cargo.lock | 1 + Cargo.toml | 1 - binaries/cuprated/src/p2p.rs | 49 ++ .../cuprated/src/p2p/core_sync_service.rs | 49 ++ binaries/cuprated/src/p2p/request_handler.rs | 421 ++++++++++++++++++ binaries/cuprated/src/txpool.rs | 2 +- binaries/cuprated/src/txpool/incoming_tx.rs | 5 + net/wire/src/p2p/protocol.rs | 2 +- p2p/p2p-core/src/protocol.rs | 4 + p2p/p2p-core/src/protocol/try_from.rs | 3 + p2p/p2p/src/constants.rs | 6 +- storage/blockchain/Cargo.toml | 1 + storage/blockchain/src/ops/block.rs | 68 ++- storage/blockchain/src/ops/blockchain.rs | 42 +- storage/blockchain/src/service/read.rs | 174 ++++++-- types/src/blockchain.rs | 66 ++- types/src/lib.rs | 4 +- types/src/types.rs | 7 + 18 files changed, 861 insertions(+), 44 deletions(-) create mode 100644 binaries/cuprated/src/p2p/core_sync_service.rs diff --git a/Cargo.lock b/Cargo.lock index 0d55c8aa..c8701f35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -704,6 +704,7 @@ version = "0.0.0" dependencies = [ "bitflags 2.6.0", "bytemuck", + "bytes", "cuprate-constants", "cuprate-database", "cuprate-database-service", diff --git a/Cargo.toml b/Cargo.toml index 9be1528c..18130577 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -376,7 +376,6 @@ unused_lifetimes = "deny" unused_macro_rules = "deny" ambiguous_glob_imports = "deny" unused_unsafe = "deny" -rust_2024_compatibility = "deny" # Warm let_underscore = { level = "deny", priority = -1 } diff --git a/binaries/cuprated/src/p2p.rs b/binaries/cuprated/src/p2p.rs index cdf1cef7..aeb98b60 100644 --- a/binaries/cuprated/src/p2p.rs +++ b/binaries/cuprated/src/p2p.rs @@ -1,8 +1,57 @@ //! P2P //! //! Will handle initiating the P2P and contains a protocol request handler. +use futures::{FutureExt, TryFutureExt}; +use tokio::sync::oneshot; +use tower::ServiceExt; +use cuprate_blockchain::service::BlockchainReadHandle; +use cuprate_consensus::BlockChainContextService; +use cuprate_p2p::{NetworkInterface, P2PConfig}; +use cuprate_p2p_core::ClearNet; +use cuprate_txpool::service::TxpoolReadHandle; + +use crate::txpool::IncomingTxHandler; + +mod core_sync_service; mod network_address; pub mod request_handler; pub use network_address::CrossNetworkInternalPeerId; + +/// Starts the P2P clearnet network, returning a [`NetworkInterface`] to interact with it. +/// +/// A [`oneshot::Sender`] is also returned to provide the [`IncomingTxHandler`], until this is provided network +/// handshakes can not be completed. +pub async fn start_clearnet_p2p( + blockchain_read_handle: BlockchainReadHandle, + blockchain_context_service: BlockChainContextService, + txpool_read_handle: TxpoolReadHandle, + config: P2PConfig, +) -> Result< + ( + NetworkInterface, + oneshot::Sender, + ), + tower::BoxError, +> { + let (incoming_tx_handler_tx, incoming_tx_handler_rx) = oneshot::channel(); + + let request_handler_maker = request_handler::P2pProtocolRequestHandlerMaker { + blockchain_read_handle, + blockchain_context_service: blockchain_context_service.clone(), + txpool_read_handle, + incoming_tx_handler: None, + incoming_tx_handler_fut: incoming_tx_handler_rx.shared(), + }; + + Ok(( + cuprate_p2p::initialize_network( + request_handler_maker.map_response(|s| s.map_err(Into::into)), + core_sync_service::CoreSyncService(blockchain_context_service), + config, + ) + .await?, + incoming_tx_handler_tx, + )) +} diff --git a/binaries/cuprated/src/p2p/core_sync_service.rs b/binaries/cuprated/src/p2p/core_sync_service.rs new file mode 100644 index 00000000..d3c3ca1e --- /dev/null +++ b/binaries/cuprated/src/p2p/core_sync_service.rs @@ -0,0 +1,49 @@ +use std::task::{Context, Poll}; + +use futures::{future::BoxFuture, FutureExt, TryFutureExt}; +use tower::Service; + +use cuprate_consensus::{ + BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService, +}; +use cuprate_helper::{cast::usize_to_u64, map::split_u128_into_low_high_bits}; +use cuprate_p2p_core::services::{CoreSyncDataRequest, CoreSyncDataResponse}; +use cuprate_wire::CoreSyncData; + +/// The core sync service. +#[derive(Clone)] +pub struct CoreSyncService(pub BlockChainContextService); + +impl Service for CoreSyncService { + type Response = CoreSyncDataResponse; + type Error = tower::BoxError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.0.poll_ready(cx) + } + + fn call(&mut self, _: CoreSyncDataRequest) -> Self::Future { + self.0 + .call(BlockChainContextRequest::Context) + .map_ok(|res| { + let BlockChainContextResponse::Context(context) = res else { + unreachable!() + }; + + let context = context.unchecked_blockchain_context(); + let (cumulative_difficulty, cumulative_difficulty_top64) = + split_u128_into_low_high_bits(context.cumulative_difficulty); + + CoreSyncDataResponse(CoreSyncData { + cumulative_difficulty, + cumulative_difficulty_top64, + current_height: usize_to_u64(context.chain_height), + pruning_seed: 0, + top_id: context.top_hash, + top_version: context.current_hf.as_u8(), + }) + }) + .boxed() + } +} diff --git a/binaries/cuprated/src/p2p/request_handler.rs b/binaries/cuprated/src/p2p/request_handler.rs index 8b137891..7d72fa37 100644 --- a/binaries/cuprated/src/p2p/request_handler.rs +++ b/binaries/cuprated/src/p2p/request_handler.rs @@ -1 +1,422 @@ +use std::{ + collections::HashSet, + future::{ready, Ready}, + hash::Hash, + task::{Context, Poll}, +}; +use bytes::Bytes; +use futures::{ + future::{BoxFuture, Shared}, + FutureExt, +}; +use monero_serai::{block::Block, transaction::Transaction}; +use tokio::sync::{broadcast, oneshot, watch}; +use tokio_stream::wrappers::WatchStream; +use tower::{Service, ServiceExt}; + +use cuprate_blockchain::service::BlockchainReadHandle; +use cuprate_consensus::{ + transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse, + BlockChainContextService, +}; +use cuprate_dandelion_tower::TxState; +use cuprate_fixed_bytes::ByteArrayVec; +use cuprate_helper::cast::u64_to_usize; +use cuprate_helper::{ + asynch::rayon_spawn_async, + cast::usize_to_u64, + map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}, +}; +use cuprate_p2p::constants::{ + MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MAX_BLOCK_BATCH_LEN, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN, +}; +use cuprate_p2p_core::{ + client::{InternalPeerID, PeerInformation}, + NetZoneAddress, NetworkZone, ProtocolRequest, ProtocolResponse, +}; +use cuprate_txpool::service::TxpoolReadHandle; +use cuprate_types::{ + blockchain::{BlockchainReadRequest, BlockchainResponse}, + BlockCompleteEntry, TransactionBlobs, TxsInBlock, +}; +use cuprate_wire::protocol::{ + ChainRequest, ChainResponse, FluffyMissingTransactionsRequest, GetObjectsRequest, + GetObjectsResponse, NewFluffyBlock, NewTransactions, +}; + +use crate::{ + blockchain::interface::{self as blockchain_interface, IncomingBlockError}, + constants::PANIC_CRITICAL_SERVICE_ERROR, + p2p::CrossNetworkInternalPeerId, + txpool::{IncomingTxError, IncomingTxHandler, IncomingTxs}, +}; + +/// The P2P protocol request handler [`MakeService`](tower::MakeService). +#[derive(Clone)] +pub struct P2pProtocolRequestHandlerMaker { + pub blockchain_read_handle: BlockchainReadHandle, + pub blockchain_context_service: BlockChainContextService, + pub txpool_read_handle: TxpoolReadHandle, + + /// The [`IncomingTxHandler`], wrapped in an [`Option`] as there is a cyclic reference between [`P2pProtocolRequestHandlerMaker`] + /// and the [`IncomingTxHandler`]. + pub incoming_tx_handler: Option, + + /// A [`Future`](std::future::Future) that produces the [`IncomingTxHandler`]. + pub incoming_tx_handler_fut: Shared>, +} + +impl Service> for P2pProtocolRequestHandlerMaker +where + InternalPeerID: Into, +{ + type Response = P2pProtocolRequestHandler; + type Error = tower::BoxError; + type Future = Ready>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + if self.incoming_tx_handler.is_none() { + return self + .incoming_tx_handler_fut + .poll_unpin(cx) + .map(|incoming_tx_handler| { + self.incoming_tx_handler = Some(incoming_tx_handler?); + Ok(()) + }); + } + + Poll::Ready(Ok(())) + } + + fn call(&mut self, peer_information: PeerInformation) -> Self::Future { + let Some(incoming_tx_handler) = self.incoming_tx_handler.clone() else { + panic!("poll_ready was not called or did not return `Poll::Ready`") + }; + + // TODO: check sync info? + + let blockchain_read_handle = self.blockchain_read_handle.clone(); + let txpool_read_handle = self.txpool_read_handle.clone(); + + ready(Ok(P2pProtocolRequestHandler { + peer_information, + blockchain_read_handle, + blockchain_context_service: self.blockchain_context_service.clone(), + txpool_read_handle, + incoming_tx_handler, + })) + } +} + +/// The P2P protocol request handler. +#[derive(Clone)] +pub struct P2pProtocolRequestHandler { + peer_information: PeerInformation, + blockchain_read_handle: BlockchainReadHandle, + blockchain_context_service: BlockChainContextService, + txpool_read_handle: TxpoolReadHandle, + incoming_tx_handler: IncomingTxHandler, +} + +impl Service for P2pProtocolRequestHandler +where + InternalPeerID: Into, +{ + type Response = ProtocolResponse; + type Error = anyhow::Error; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, request: ProtocolRequest) -> Self::Future { + match request { + ProtocolRequest::GetObjects(r) => { + get_objects(r, self.blockchain_read_handle.clone()).boxed() + } + ProtocolRequest::GetChain(r) => { + get_chain(r, self.blockchain_read_handle.clone()).boxed() + } + ProtocolRequest::FluffyMissingTxs(r) => { + fluffy_missing_txs(r, self.blockchain_read_handle.clone()).boxed() + } + ProtocolRequest::NewBlock(_) => ready(Err(anyhow::anyhow!( + "Peer sent a full block when we support fluffy blocks" + ))) + .boxed(), + ProtocolRequest::NewFluffyBlock(r) => new_fluffy_block( + self.peer_information.clone(), + r, + self.blockchain_read_handle.clone(), + self.txpool_read_handle.clone(), + ) + .boxed(), + ProtocolRequest::NewTransactions(r) => new_transactions( + self.peer_information.clone(), + r, + self.blockchain_context_service.clone(), + self.incoming_tx_handler.clone(), + ) + .boxed(), + ProtocolRequest::GetTxPoolCompliment(_) => ready(Ok(ProtocolResponse::NA)).boxed(), // TODO: should we support this? + } + } +} + +//---------------------------------------------------------------------------------------------------- Handler functions + +/// [`ProtocolRequest::GetObjects`] +async fn get_objects( + request: GetObjectsRequest, + mut blockchain_read_handle: BlockchainReadHandle, +) -> anyhow::Result { + if request.blocks.len() > MAX_BLOCK_BATCH_LEN { + anyhow::bail!("Peer requested more blocks than allowed.") + } + + let block_hashes: Vec<[u8; 32]> = (&request.blocks).into(); + // deallocate the backing `Bytes`. + drop(request); + + let BlockchainResponse::BlockCompleteEntries { + blocks, + missing_hashes, + blockchain_height, + } = blockchain_read_handle + .ready() + .await? + .call(BlockchainReadRequest::BlockCompleteEntries(block_hashes)) + .await? + else { + unreachable!(); + }; + + Ok(ProtocolResponse::GetObjects(GetObjectsResponse { + blocks, + missed_ids: ByteArrayVec::from(missing_hashes), + current_blockchain_height: usize_to_u64(blockchain_height), + })) +} + +/// [`ProtocolRequest::GetChain`] +async fn get_chain( + request: ChainRequest, + mut blockchain_read_handle: BlockchainReadHandle, +) -> anyhow::Result { + if request.block_ids.len() > MAX_BLOCKS_IDS_IN_CHAIN_ENTRY { + anyhow::bail!("Peer sent too many block hashes in chain request.") + } + + let block_hashes: Vec<[u8; 32]> = (&request.block_ids).into(); + let want_pruned_data = request.prune; + // deallocate the backing `Bytes`. + drop(request); + + let BlockchainResponse::NextChainEntry { + start_height, + chain_height, + block_ids, + block_weights, + cumulative_difficulty, + first_block_blob, + } = blockchain_read_handle + .ready() + .await? + .call(BlockchainReadRequest::NextChainEntry(block_hashes, 10_000)) + .await? + else { + unreachable!(); + }; + + let Some(start_height) = start_height else { + anyhow::bail!("The peers chain has a different genesis block than ours."); + }; + + let (cumulative_difficulty_low64, cumulative_difficulty_top64) = + split_u128_into_low_high_bits(cumulative_difficulty); + + Ok(ProtocolResponse::GetChain(ChainResponse { + start_height: usize_to_u64(std::num::NonZero::get(start_height)), + total_height: usize_to_u64(chain_height), + cumulative_difficulty_low64, + cumulative_difficulty_top64, + m_block_ids: ByteArrayVec::from(block_ids), + first_block: first_block_blob.map_or(Bytes::new(), Bytes::from), + // only needed when pruned + m_block_weights: if want_pruned_data { + block_weights.into_iter().map(usize_to_u64).collect() + } else { + vec![] + }, + })) +} + +/// [`ProtocolRequest::FluffyMissingTxs`] +async fn fluffy_missing_txs( + mut request: FluffyMissingTransactionsRequest, + mut blockchain_read_handle: BlockchainReadHandle, +) -> anyhow::Result { + let tx_indexes = std::mem::take(&mut request.missing_tx_indices); + let block_hash: [u8; 32] = *request.block_hash; + let current_blockchain_height = request.current_blockchain_height; + + // deallocate the backing `Bytes`. + drop(request); + + let BlockchainResponse::TxsInBlock(res) = blockchain_read_handle + .ready() + .await? + .call(BlockchainReadRequest::TxsInBlock { + block_hash, + tx_indexes, + }) + .await? + else { + unreachable!(); + }; + + let Some(TxsInBlock { block, txs }) = res else { + anyhow::bail!("The peer requested txs out of range."); + }; + + Ok(ProtocolResponse::NewFluffyBlock(NewFluffyBlock { + b: BlockCompleteEntry { + block: Bytes::from(block), + txs: TransactionBlobs::Normal(txs.into_iter().map(Bytes::from).collect()), + pruned: false, + // only needed for pruned blocks. + block_weight: 0, + }, + current_blockchain_height, + })) +} + +/// [`ProtocolRequest::NewFluffyBlock`] +async fn new_fluffy_block( + peer_information: PeerInformation, + request: NewFluffyBlock, + mut blockchain_read_handle: BlockchainReadHandle, + mut txpool_read_handle: TxpoolReadHandle, +) -> anyhow::Result { + // TODO: check context service here and ignore the block? + let current_blockchain_height = request.current_blockchain_height; + + peer_information + .core_sync_data + .lock() + .unwrap() + .current_height = current_blockchain_height; + + let (block, txs) = rayon_spawn_async(move || -> Result<_, anyhow::Error> { + let block = Block::read(&mut request.b.block.as_ref())?; + + let tx_blobs = request + .b + .txs + .take_normal() + .ok_or(anyhow::anyhow!("Peer sent pruned txs in fluffy block"))?; + + let txs = tx_blobs + .into_iter() + .map(|tx_blob| { + if tx_blob.len() > MAX_TRANSACTION_BLOB_SIZE { + anyhow::bail!("Peer sent a transaction over the size limit."); + } + + let tx = Transaction::read(&mut tx_blob.as_ref())?; + + Ok((tx.hash(), tx)) + }) + .collect::>()?; + + // The backing `Bytes` will be deallocated when this closure returns. + + Ok((block, txs)) + }) + .await?; + + let res = blockchain_interface::handle_incoming_block( + block, + txs, + &mut blockchain_read_handle, + &mut txpool_read_handle, + ) + .await; + + match res { + Ok(_) => Ok(ProtocolResponse::NA), + Err(IncomingBlockError::UnknownTransactions(block_hash, missing_tx_indices)) => Ok( + ProtocolResponse::FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest { + block_hash: block_hash.into(), + current_blockchain_height, + missing_tx_indices: missing_tx_indices.into_iter().map(usize_to_u64).collect(), + }), + ), + Err(IncomingBlockError::Orphan) => { + // Block's parent was unknown, could be syncing? + Ok(ProtocolResponse::NA) + } + Err(e) => Err(e.into()), + } +} + +/// [`ProtocolRequest::NewTransactions`] +async fn new_transactions( + peer_information: PeerInformation, + request: NewTransactions, + mut blockchain_context_service: BlockChainContextService, + mut incoming_tx_handler: IncomingTxHandler, +) -> anyhow::Result +where + A: NetZoneAddress, + InternalPeerID: Into, +{ + let BlockChainContextResponse::Context(context) = blockchain_context_service + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(BlockChainContextRequest::Context) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + else { + unreachable!() + }; + + let context = context.unchecked_blockchain_context(); + + // If we are more than 2 blocks behind the peer then ignore the txs - we are probably still syncing. + if usize_to_u64(context.chain_height + 2) + < peer_information + .core_sync_data + .lock() + .unwrap() + .current_height + { + return Ok(ProtocolResponse::NA); + } + + let state = if request.dandelionpp_fluff { + TxState::Fluff + } else { + TxState::Stem { + from: peer_information.id.into(), + } + }; + + // Drop all the data except the stuff we still need. + let NewTransactions { txs, .. } = request; + + let res = incoming_tx_handler + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(IncomingTxs { txs, state }) + .await; + + match res { + Ok(()) => Ok(ProtocolResponse::NA), + Err(e) => Err(e.into()), + } +} diff --git a/binaries/cuprated/src/txpool.rs b/binaries/cuprated/src/txpool.rs index 9592c2bf..20769567 100644 --- a/binaries/cuprated/src/txpool.rs +++ b/binaries/cuprated/src/txpool.rs @@ -12,4 +12,4 @@ mod dandelion; mod incoming_tx; mod txs_being_handled; -pub use incoming_tx::IncomingTxHandler; +pub use incoming_tx::{IncomingTxError, IncomingTxHandler, IncomingTxs}; diff --git a/binaries/cuprated/src/txpool/incoming_tx.rs b/binaries/cuprated/src/txpool/incoming_tx.rs index e2041598..bf7684e4 100644 --- a/binaries/cuprated/src/txpool/incoming_tx.rs +++ b/binaries/cuprated/src/txpool/incoming_tx.rs @@ -43,9 +43,13 @@ use crate::{ }; /// An error that can happen handling an incoming tx. +#[derive(Debug, thiserror::Error)] pub enum IncomingTxError { + #[error("Error parsing tx: {0}")] Parse(std::io::Error), + #[error(transparent)] Consensus(ExtendedConsensusError), + #[error("Duplicate tx in message")] DuplicateTransaction, } @@ -67,6 +71,7 @@ pub(super) type TxId = [u8; 32]; /// The service than handles incoming transaction pool transactions. /// /// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes. +#[derive(Clone)] pub struct IncomingTxHandler { /// A store of txs currently being handled in incoming tx requests. pub(super) txs_being_handled: TxsBeingHandled, diff --git a/net/wire/src/p2p/protocol.rs b/net/wire/src/p2p/protocol.rs index 1d1d45ab..cc4b49d3 100644 --- a/net/wire/src/p2p/protocol.rs +++ b/net/wire/src/p2p/protocol.rs @@ -159,7 +159,7 @@ epee_object!( current_blockchain_height: u64, ); -/// A request for Txs we are missing from our `TxPool` +/// A request for txs we are missing from an incoming block. #[derive(Debug, Clone, PartialEq, Eq)] pub struct FluffyMissingTransactionsRequest { /// The Block we are missing the Txs in diff --git a/p2p/p2p-core/src/protocol.rs b/p2p/p2p-core/src/protocol.rs index 7d8d431b..82aac824 100644 --- a/p2p/p2p-core/src/protocol.rs +++ b/p2p/p2p-core/src/protocol.rs @@ -116,6 +116,7 @@ pub enum ProtocolResponse { GetChain(ChainResponse), NewFluffyBlock(NewFluffyBlock), NewTransactions(NewTransactions), + FluffyMissingTransactionsRequest(FluffyMissingTransactionsRequest), NA, } @@ -139,6 +140,9 @@ impl PeerResponse { ProtocolResponse::GetChain(_) => MessageID::GetChain, ProtocolResponse::NewFluffyBlock(_) => MessageID::NewBlock, ProtocolResponse::NewTransactions(_) => MessageID::NewFluffyBlock, + ProtocolResponse::FluffyMissingTransactionsRequest(_) => { + MessageID::FluffyMissingTxs + } ProtocolResponse::NA => return None, }, diff --git a/p2p/p2p-core/src/protocol/try_from.rs b/p2p/p2p-core/src/protocol/try_from.rs index d3a7260f..2dfc41db 100644 --- a/p2p/p2p-core/src/protocol/try_from.rs +++ b/p2p/p2p-core/src/protocol/try_from.rs @@ -71,6 +71,9 @@ impl TryFrom for ProtocolMessage { ProtocolResponse::NewFluffyBlock(val) => Self::NewFluffyBlock(val), ProtocolResponse::GetChain(val) => Self::ChainEntryResponse(val), ProtocolResponse::GetObjects(val) => Self::GetObjectsResponse(val), + ProtocolResponse::FluffyMissingTransactionsRequest(val) => { + Self::FluffyMissingTransactionsRequest(val) + } ProtocolResponse::NA => return Err(MessageConversionError), }) } diff --git a/p2p/p2p/src/constants.rs b/p2p/p2p/src/constants.rs index f70d64c9..a81557c2 100644 --- a/p2p/p2p/src/constants.rs +++ b/p2p/p2p/src/constants.rs @@ -52,7 +52,7 @@ pub(crate) const INITIAL_CHAIN_REQUESTS_TO_SEND: usize = 3; /// The enforced maximum amount of blocks to request in a batch. /// /// Requesting more than this will cause the peer to disconnect and potentially lead to bans. -pub(crate) const MAX_BLOCK_BATCH_LEN: usize = 100; +pub const MAX_BLOCK_BATCH_LEN: usize = 100; /// The timeout that the block downloader will use for requests. pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_secs(30); @@ -61,13 +61,13 @@ pub(crate) const BLOCK_DOWNLOADER_REQUEST_TIMEOUT: Duration = Duration::from_sec /// be less than. /// /// ref: -pub(crate) const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000; +pub const MAX_TRANSACTION_BLOB_SIZE: usize = 1_000_000; /// The maximum amount of block IDs allowed in a chain entry response. /// /// ref: // TODO: link to the protocol book when this section is added. -pub(crate) const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000; +pub const MAX_BLOCKS_IDS_IN_CHAIN_ENTRY: usize = 25_000; /// The amount of failures downloading a specific batch before we stop attempting to download it. pub(crate) const MAX_DOWNLOAD_FAILURES: usize = 5; diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index 92b4374b..c9359248 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -34,6 +34,7 @@ serde = { workspace = true, optional = true } tower = { workspace = true } thread_local = { workspace = true } rayon = { workspace = true } +bytes = { workspace = true } [dev-dependencies] cuprate-constants = { workspace = true } diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 5e541878..2dc88aa4 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -2,21 +2,23 @@ //---------------------------------------------------------------------------------------------------- Import use bytemuck::TransparentWrapper; +use bytes::Bytes; use monero_serai::{ block::{Block, BlockHeader}, transaction::Transaction, }; use cuprate_database::{ - DbResult, RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, + DbResult, RuntimeError, StorableVec, {DatabaseIter, DatabaseRo, DatabaseRw}, }; +use cuprate_helper::cast::usize_to_u64; use cuprate_helper::{ map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}, tx::tx_fee, }; use cuprate_types::{ - AltBlockInformation, ChainId, ExtendedBlockHeader, HardFork, VerifiedBlockInformation, - VerifiedTransactionInformation, + AltBlockInformation, BlockCompleteEntry, ChainId, ExtendedBlockHeader, HardFork, + TransactionBlobs, VerifiedBlockInformation, VerifiedTransactionInformation, }; use crate::{ @@ -27,7 +29,7 @@ use crate::{ output::get_rct_num_outputs, tx::{add_tx, remove_tx}, }, - tables::{BlockHeights, BlockInfos, Tables, TablesMut}, + tables::{BlockHeights, BlockInfos, Tables, TablesIter, TablesMut}, types::{BlockHash, BlockHeight, BlockInfo}, }; @@ -222,6 +224,64 @@ pub fn pop_block( Ok((block_height, block_info.block_hash, block)) } +//---------------------------------------------------------------------------------------------------- `get_block_blob_with_tx_indexes` +/// Retrieve a block's raw bytes, the index of the miner transaction and the number of non miner-txs in the block. +/// +#[doc = doc_error!()] +pub fn get_block_blob_with_tx_indexes( + block_height: &BlockHeight, + tables: &impl Tables, +) -> Result<(Vec, u64, usize), RuntimeError> { + let miner_tx_idx = tables.block_infos().get(block_height)?.mining_tx_index; + + let block_txs = tables.block_txs_hashes().get(block_height)?.0; + let numb_txs = block_txs.len(); + + // Get the block header + let mut block = tables.block_header_blobs().get(block_height)?.0; + + // Add the miner tx to the blob. + let mut miner_tx_blob = tables.tx_blobs().get(&miner_tx_idx)?.0; + block.append(&mut miner_tx_blob); + + // Add the blocks tx hashes. + monero_serai::io::write_varint(&block_txs.len(), &mut block) + .expect("The number of txs per block will not exceed u64::MAX"); + + let block_txs_bytes = bytemuck::must_cast_slice(&block_txs); + block.extend_from_slice(block_txs_bytes); + + Ok((block, miner_tx_idx, numb_txs)) +} + +//---------------------------------------------------------------------------------------------------- `get_block_extended_header_*` +/// Retrieve a [`BlockCompleteEntry`] from the database. +/// +#[doc = doc_error!()] +pub fn get_block_complete_entry( + block_hash: &BlockHash, + tables: &impl TablesIter, +) -> Result { + let block_height = tables.block_heights().get(block_hash)?; + let (block_blob, miner_tx_idx, numb_non_miner_txs) = + get_block_blob_with_tx_indexes(&block_height, tables)?; + + let first_tx_idx = miner_tx_idx + 1; + + let tx_blobs = tables + .tx_blobs_iter() + .get_range(first_tx_idx..(usize_to_u64(numb_non_miner_txs) + first_tx_idx))? + .map(|tx_blob| Ok(Bytes::from(tx_blob?.0))) + .collect::>()?; + + Ok(BlockCompleteEntry { + block: Bytes::from(block_blob), + txs: TransactionBlobs::Normal(tx_blobs), + pruned: false, + block_weight: 0, + }) +} + //---------------------------------------------------------------------------------------------------- `get_block_extended_header_*` /// Retrieve a [`ExtendedBlockHeader`] from the database. /// diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index 71633635..54dd752a 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -4,9 +4,9 @@ use cuprate_database::{DatabaseRo, DbResult, RuntimeError}; use crate::{ - ops::macros::doc_error, + ops::{block::block_exists, macros::doc_error}, tables::{BlockHeights, BlockInfos}, - types::BlockHeight, + types::{BlockHash, BlockHeight}, }; //---------------------------------------------------------------------------------------------------- Free Functions @@ -76,6 +76,44 @@ pub fn cumulative_generated_coins( } } +/// Find the split point between our chain and a list of [`BlockHash`]s from another chain. +/// +/// This function accepts chains in chronological and reverse chronological order, however +/// if the wrong order is specified the return value is meaningless. +/// +/// For chronologically ordered chains this will return the index of the first unknown, for reverse +/// chronologically ordered chains this will return the index of the first known. +/// +/// If all blocks are known for chronologically ordered chains or unknown for reverse chronologically +/// ordered chains then the length of the chain will be returned. +#[doc = doc_error!()] +#[inline] +pub fn find_split_point( + block_ids: &[BlockHash], + chronological_order: bool, + table_block_heights: &impl DatabaseRo, +) -> Result { + let mut err = None; + + // Do a binary search to find the first unknown/known block in the batch. + let idx = block_ids.partition_point(|block_id| { + match block_exists(block_id, table_block_heights) { + Ok(exists) => exists == chronological_order, + Err(e) => { + err.get_or_insert(e); + // if this happens the search is scrapped, just return `false` back. + false + } + } + }); + + if let Some(e) = err { + return Err(e); + } + + Ok(idx) +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index 76577590..84b1b921 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -10,23 +10,26 @@ //---------------------------------------------------------------------------------------------------- Import use std::{ + cmp::min, collections::{HashMap, HashSet}, sync::Arc, }; use rayon::{ - iter::{IntoParallelIterator, ParallelIterator}, + iter::{Either, IntoParallelIterator, ParallelIterator}, prelude::*, ThreadPool, }; use thread_local::ThreadLocal; -use cuprate_database::{ConcreteEnv, DatabaseRo, DbResult, Env, EnvInner, RuntimeError}; +use cuprate_database::{ + ConcreteEnv, DatabaseIter, DatabaseRo, DbResult, Env, EnvInner, RuntimeError, +}; use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, - Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain, + Chain, ChainId, ExtendedBlockHeader, OutputHistogramInput, OutputOnChain, TxsInBlock, }; use crate::{ @@ -36,9 +39,10 @@ use crate::{ get_alt_chain_history_ranges, }, block::{ - block_exists, get_block_extended_header_from_height, get_block_height, get_block_info, + block_exists, get_block_blob_with_tx_indexes, get_block_complete_entry, + get_block_extended_header_from_height, get_block_height, get_block_info, }, - blockchain::{cumulative_generated_coins, top_block_height}, + blockchain::{cumulative_generated_coins, find_split_point, top_block_height}, key_image::key_image_exists, output::id_to_output_on_chain, }, @@ -46,7 +50,7 @@ use crate::{ free::{compact_history_genesis_not_included, compact_history_index_to_height_offset}, types::{BlockchainReadHandle, ResponseResult}, }, - tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables}, + tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables, TablesIter}, types::{ AltBlockHeight, Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId, }, @@ -100,6 +104,7 @@ fn map_request( /* SOMEDAY: pre-request handling, run some code for each request? */ match request { + R::BlockCompleteEntries(block_hashes) => block_complete_entries(env, block_hashes), R::BlockExtendedHeader(block) => block_extended_header(env, block), R::BlockHash(block, chain) => block_hash(env, block, chain), R::FindBlock(block_hash) => find_block(env, block_hash), @@ -113,7 +118,12 @@ fn map_request( R::NumberOutputsWithAmount(vec) => number_outputs_with_amount(env, vec), R::KeyImagesSpent(set) => key_images_spent(env, set), R::CompactChainHistory => compact_chain_history(env), + R::NextChainEntry(block_hashes, amount) => next_chain_entry(env, &block_hashes, amount), R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids), + R::TxsInBlock { + block_hash, + tx_indexes, + } => txs_in_block(env, block_hash, tx_indexes), R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id), R::Block { height } => block(env, height), R::BlockByHash(hash) => block_by_hash(env, hash), @@ -198,6 +208,38 @@ macro_rules! get_tables { // TODO: The overhead of parallelism may be too much for every request, perfomace test to find optimal // amount of parallelism. +/// [`BlockchainReadRequest::BlockCompleteEntries`]. +fn block_complete_entries(env: &ConcreteEnv, block_hashes: Vec) -> ResponseResult { + // Prepare tx/tables in `ThreadLocal`. + let env_inner = env.env_inner(); + let tx_ro = thread_local(env); + let tables = thread_local(env); + + let (missing_hashes, blocks) = block_hashes + .into_par_iter() + .map(|block_hash| { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + + match get_block_complete_entry(&block_hash, tables) { + Err(RuntimeError::KeyNotFound) => Ok(Either::Left(block_hash)), + res => res.map(Either::Right), + } + }) + .collect::>()?; + + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + + let blockchain_height = crate::ops::blockchain::chain_height(tables.block_heights())?; + + Ok(BlockchainResponse::BlockCompleteEntries { + blocks, + missing_hashes, + blockchain_height, + }) +} + /// [`BlockchainReadRequest::BlockExtendedHeader`]. #[inline] fn block_extended_header(env: &ConcreteEnv, block_height: BlockHeight) -> ResponseResult { @@ -335,7 +377,7 @@ fn block_extended_header_in_range( } }) }) - .collect::, _>>()? + .collect::>>()? } }; @@ -534,6 +576,76 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult { }) } +/// [`BlockchainReadRequest::NextChainEntry`] +/// +/// # Invariant +/// `block_ids` must be sorted in reverse chronological block order, or else +/// the returned result is unspecified and meaningless, as this function +/// performs a binary search. +fn next_chain_entry( + env: &ConcreteEnv, + block_ids: &[BlockHash], + next_entry_size: usize, +) -> ResponseResult { + // Single-threaded, no `ThreadLocal` required. + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro()?; + + let tables = env_inner.open_tables(&tx_ro)?; + let table_block_heights = tables.block_heights(); + let table_block_infos = tables.block_infos_iter(); + + let idx = find_split_point(block_ids, false, table_block_heights)?; + + // This will happen if we have a different genesis block. + if idx == block_ids.len() { + return Ok(BlockchainResponse::NextChainEntry { + start_height: None, + chain_height: 0, + block_ids: vec![], + block_weights: vec![], + cumulative_difficulty: 0, + first_block_blob: None, + }); + } + + // The returned chain entry must overlap with one of the blocks we were told about. + let first_known_block_hash = block_ids[idx]; + let first_known_height = table_block_heights.get(&first_known_block_hash)?; + + let chain_height = crate::ops::blockchain::chain_height(table_block_heights)?; + let last_height_in_chain_entry = min(first_known_height + next_entry_size, chain_height); + + let (block_ids, block_weights) = table_block_infos + .get_range(first_known_height..last_height_in_chain_entry)? + .map(|block_info| { + let block_info = block_info?; + + Ok((block_info.block_hash, block_info.weight)) + }) + .collect::, Vec<_>)>>()?; + + let top_block_info = table_block_infos.get(&(chain_height - 1))?; + + let first_block_blob = if block_ids.len() >= 2 { + Some(get_block_blob_with_tx_indexes(&(first_known_height + 1), &tables)?.0) + } else { + None + }; + + Ok(BlockchainResponse::NextChainEntry { + start_height: std::num::NonZero::new(first_known_height), + chain_height, + block_ids, + block_weights, + cumulative_difficulty: combine_low_high_bits_to_u128( + top_block_info.cumulative_difficulty_low, + top_block_info.cumulative_difficulty_high, + ), + first_block_blob, + }) +} + /// [`BlockchainReadRequest::FindFirstUnknown`] /// /// # Invariant @@ -546,24 +658,7 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes let table_block_heights = env_inner.open_db_ro::(&tx_ro)?; - let mut err = None; - - // Do a binary search to find the first unknown block in the batch. - let idx = - block_ids.partition_point( - |block_id| match block_exists(block_id, &table_block_heights) { - Ok(exists) => exists, - Err(e) => { - err.get_or_insert(e); - // if this happens the search is scrapped, just return `false` back. - false - } - }, - ); - - if let Some(e) = err { - return Err(e); - } + let idx = find_split_point(block_ids, true, &table_block_heights)?; Ok(if idx == block_ids.len() { BlockchainResponse::FindFirstUnknown(None) @@ -576,6 +671,33 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes }) } +/// [`BlockchainReadRequest::TxsInBlock`] +fn txs_in_block(env: &ConcreteEnv, block_hash: [u8; 32], missing_txs: Vec) -> ResponseResult { + // Single-threaded, no `ThreadLocal` required. + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro()?; + let tables = env_inner.open_tables(&tx_ro)?; + + let block_height = tables.block_heights().get(&block_hash)?; + + let (block, miner_tx_index, numb_txs) = get_block_blob_with_tx_indexes(&block_height, &tables)?; + let first_tx_index = miner_tx_index + 1; + + if numb_txs < missing_txs.len() { + return Ok(BlockchainResponse::TxsInBlock(None)); + } + + let txs = missing_txs + .into_iter() + .map(|index_offset| Ok(tables.tx_blobs().get(&(first_tx_index + index_offset))?.0)) + .collect::>()?; + + Ok(BlockchainResponse::TxsInBlock(Some(TxsInBlock { + block, + txs, + }))) +} + /// [`BlockchainReadRequest::AltBlocksInChain`] fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult { // Prepare tx/tables in `ThreadLocal`. @@ -613,7 +735,7 @@ fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult { ) }) }) - .collect::>()?; + .collect::>()?; Ok(BlockchainResponse::AltBlocksInChain(blocks)) } diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index c39c0bd8..7518935d 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -11,9 +11,9 @@ use std::{ use monero_serai::block::Block; use crate::{ - types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}, - AltBlockInformation, ChainId, ChainInfo, CoinbaseTxSum, OutputHistogramEntry, - OutputHistogramInput, + types::{Chain, ExtendedBlockHeader, OutputOnChain, TxsInBlock, VerifiedBlockInformation}, + AltBlockInformation, BlockCompleteEntry, ChainId, ChainInfo, CoinbaseTxSum, + OutputHistogramEntry, OutputHistogramInput, }; //---------------------------------------------------------------------------------------------------- ReadRequest @@ -27,6 +27,11 @@ use crate::{ /// See `Response` for the expected responses per `Request`. #[derive(Debug, Clone, PartialEq, Eq)] pub enum BlockchainReadRequest { + /// Request [`BlockCompleteEntry`]s. + /// + /// The input is the block hashes. + BlockCompleteEntries(Vec<[u8; 32]>), + /// Request a block's extended header. /// /// The input is the block's height. @@ -96,6 +101,16 @@ pub enum BlockchainReadRequest { /// A request for the compact chain history. CompactChainHistory, + /// A request for the next chain entry. + /// + /// Input is a list of block hashes and the amount of block hashes to return in the next chain entry. + /// + /// # Invariant + /// The [`Vec`] containing the block IDs must be sorted in reverse chronological block + /// order, or else the returned response is unspecified and meaningless, + /// as this request performs a binary search + NextChainEntry(Vec<[u8; 32]>, usize), + /// A request to find the first unknown block ID in a list of block IDs. /// /// # Invariant @@ -104,6 +119,16 @@ pub enum BlockchainReadRequest { /// as this request performs a binary search. FindFirstUnknown(Vec<[u8; 32]>), + /// A request for transactions from a specific block. + TxsInBlock { + /// The block to get transactions from. + block_hash: [u8; 32], + /// The indexes of the transactions from the block. + /// This is not the global index of the txs, instead it is the local index as they appear in + /// the block. + tx_indexes: Vec, + }, + /// A request for all alt blocks in the chain with the given [`ChainId`]. AltBlocksInChain(ChainId), @@ -182,6 +207,16 @@ pub enum BlockchainWriteRequest { #[expect(clippy::large_enum_variant)] pub enum BlockchainResponse { //------------------------------------------------------ Reads + /// Response to [`BlockchainReadRequest::BlockCompleteEntries`]. + BlockCompleteEntries { + /// The [`BlockCompleteEntry`]s that we had. + blocks: Vec, + /// The hashes of blocks that were requested, but we don't have. + missing_hashes: Vec<[u8; 32]>, + /// Our blockchain height. + blockchain_height: usize, + }, + /// Response to [`BlockchainReadRequest::BlockExtendedHeader`]. /// /// Inner value is the extended headed of the requested block. @@ -248,6 +283,24 @@ pub enum BlockchainResponse { cumulative_difficulty: u128, }, + /// Response to [`BlockchainReadRequest::NextChainEntry`]. + /// + /// If all blocks were unknown `start_height` will be [`None`], the other fields will be meaningless. + NextChainEntry { + /// The start height of this entry, [`None`] if we could not find the split point. + start_height: Option>, + /// The current chain height. + chain_height: usize, + /// The next block hashes in the entry. + block_ids: Vec<[u8; 32]>, + /// The block weights of the next blocks. + block_weights: Vec, + /// The current cumulative difficulty of our chain. + cumulative_difficulty: u128, + /// The block blob of the 2nd block in `block_ids`, if there is one. + first_block_blob: Option>, + }, + /// Response to [`BlockchainReadRequest::FindFirstUnknown`]. /// /// Contains the index of the first unknown block and its expected height. @@ -255,7 +308,12 @@ pub enum BlockchainResponse { /// This will be [`None`] if all blocks were known. FindFirstUnknown(Option<(usize, usize)>), - /// Response to [`BlockchainReadRequest::AltBlocksInChain`]. + /// The response for [`BlockchainReadRequest::TxsInBlock`]. + /// + /// Will return [`None`] if the request contained an index out of range. + TxsInBlock(Option), + + /// The response for [`BlockchainReadRequest::AltBlocksInChain`]. /// /// Contains all the alt blocks in the alt-chain in chronological order. AltBlocksInChain(Vec), diff --git a/types/src/lib.rs b/types/src/lib.rs index a5a04f9d..7aaf0b9e 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -26,8 +26,8 @@ pub use transaction_verification_data::{ pub use types::{ AddAuxPow, AltBlockInformation, AuxPow, Chain, ChainId, ChainInfo, CoinbaseTxSum, ExtendedBlockHeader, FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry, - OutputHistogramEntry, OutputHistogramInput, OutputOnChain, VerifiedBlockInformation, - VerifiedTransactionInformation, + OutputHistogramEntry, OutputHistogramInput, OutputOnChain, TxsInBlock, + VerifiedBlockInformation, VerifiedTransactionInformation, }; //---------------------------------------------------------------------------------------------------- Feature-gated diff --git a/types/src/types.rs b/types/src/types.rs index 720ad0a7..8a5b5aad 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -259,6 +259,13 @@ pub struct AddAuxPow { pub aux_pow: Vec, } +/// The inner response for a request for txs in a block. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TxsInBlock { + pub block: Vec, + pub txs: Vec>, +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test {