diff --git a/Cargo.lock b/Cargo.lock index 4fa7589..3caf437 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -501,7 +501,6 @@ dependencies = [ "cuprate-p2p-core", "cuprate-pruning", "cuprate-test-utils", - "cuprate-wire", "futures", "indexmap", "rand", @@ -540,6 +539,7 @@ dependencies = [ "monero-serai", "pretty_assertions", "proptest", + "rand", "rayon", "serde", "tempfile", @@ -552,6 +552,7 @@ dependencies = [ name = "cuprate-consensus" version = "0.1.0" dependencies = [ + "cfg-if", "cuprate-consensus-rules", "cuprate-helper", "cuprate-test-utils", @@ -579,6 +580,7 @@ dependencies = [ name = "cuprate-consensus-rules" version = "0.1.0" dependencies = [ + "cfg-if", "crypto-bigint", "cuprate-cryptonight", "cuprate-helper", @@ -670,15 +672,14 @@ dependencies = [ "cuprate-blockchain", "cuprate-consensus", "cuprate-consensus-rules", + "cuprate-helper", "cuprate-types", "hex", "hex-literal", "monero-serai", - "rayon", "sha3", "thiserror", "tokio", - "tokio-test", "tower", ] @@ -698,6 +699,7 @@ version = "0.1.0" dependencies = [ "chrono", "crossbeam", + "curve25519-dalek", "dirs", "futures", "libc", @@ -723,6 +725,7 @@ version = "0.1.0" dependencies = [ "bitflags 2.6.0", "bytes", + "cfg-if", "cuprate-helper", "futures", "proptest", @@ -773,6 +776,7 @@ version = "0.1.0" dependencies = [ "async-trait", "borsh", + "cfg-if", "cuprate-helper", "cuprate-pruning", "cuprate-test-utils", @@ -787,7 +791,6 @@ dependencies = [ "tokio-util", "tower", "tracing", - "tracing-subscriber", ] [[package]] @@ -837,7 +840,6 @@ version = "0.1.0" dependencies = [ "async-trait", "borsh", - "bytes", "cuprate-helper", "cuprate-p2p-core", "cuprate-types", diff --git a/Cargo.toml b/Cargo.toml index fb0896d..254d3ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -210,7 +210,6 @@ unseparated_literal_suffix = "deny" unnecessary_safety_doc = "deny" unnecessary_safety_comment = "deny" unnecessary_self_imports = "deny" -tests_outside_test_module = "deny" string_to_string = "deny" rest_pat_in_fully_bound_structs = "deny" redundant_type_annotations = "deny" @@ -264,6 +263,7 @@ empty_enum_variants_with_brackets = "deny" empty_drop = "deny" clone_on_ref_ptr = "deny" upper_case_acronyms = "deny" +allow_attributes = "deny" # Hot # inline_always = "deny" diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index bd3994a..12d97ee 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -12,6 +12,7 @@ cuprate-helper = { path = "../helper", default-features = false, features = ["st cuprate-consensus-rules = { path = "./rules", features = ["rayon"] } cuprate-types = { path = "../types" } +cfg-if = { workspace = true } thiserror = { workspace = true } tower = { workspace = true, features = ["util"] } tracing = { workspace = true, features = ["std", "attributes"] } @@ -19,7 +20,6 @@ futures = { workspace = true, features = ["std", "async-await"] } randomx-rs = { workspace = true } monero-serai = { workspace = true, features = ["std"] } -curve25519-dalek = { workspace = true } rayon = { workspace = true } thread_local = { workspace = true } @@ -34,8 +34,12 @@ cuprate-test-utils = { path = "../test-utils" } cuprate-consensus-rules = {path = "./rules", features = ["proptest"]} hex-literal = { workspace = true } +curve25519-dalek = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"]} tokio-test = { workspace = true } proptest = { workspace = true } -proptest-derive = { workspace = true } \ No newline at end of file +proptest-derive = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/consensus/fast-sync/Cargo.toml b/consensus/fast-sync/Cargo.toml index 32fce11..1d7d97b 100644 --- a/consensus/fast-sync/Cargo.toml +++ b/consensus/fast-sync/Cargo.toml @@ -9,19 +9,22 @@ name = "cuprate-fast-sync-create-hashes" path = "src/create.rs" [dependencies] -clap = { workspace = true, features = ["derive", "std"] } -cuprate-blockchain = { path = "../../storage/blockchain" } -cuprate-consensus = { path = ".." } +cuprate-blockchain = { path = "../../storage/blockchain" } +cuprate-consensus = { path = ".." } cuprate-consensus-rules = { path = "../rules" } -cuprate-types = { path = "../../types" } -hex.workspace = true -hex-literal.workspace = true -monero-serai.workspace = true -rayon.workspace = true -sha3 = "0.10.8" -thiserror.workspace = true -tokio = { workspace = true, features = ["full"] } -tower.workspace = true +cuprate-types = { path = "../../types" } +cuprate-helper = { path = "../../helper", features = ["cast"] } + +clap = { workspace = true, features = ["derive", "std"] } +hex = { workspace = true } +hex-literal = { workspace = true } +monero-serai = { workspace = true } +sha3 = { version = "0.10.8" } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } +tower = { workspace = true } [dev-dependencies] -tokio-test = "0.4.4" + +[lints] +workspace = true \ No newline at end of file diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index 0d6d03f..8c47b8e 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -1,3 +1,8 @@ +#![expect( + unused_crate_dependencies, + reason = "binary shares same Cargo.toml as library" +)] + use std::{fmt::Write, fs::write}; use clap::Parser; @@ -70,15 +75,12 @@ async fn main() { let mut height = 0_usize; while height < height_target { - match read_batch(&mut read_handle, height).await { - Ok(block_ids) => { - let hash = hash_of_hashes(block_ids.as_slice()); - hashes_of_hashes.push(hash); - } - Err(_) => { - println!("Failed to read next batch from database"); - break; - } + if let Ok(block_ids) = read_batch(&mut read_handle, height).await { + let hash = hash_of_hashes(block_ids.as_slice()); + hashes_of_hashes.push(hash); + } else { + println!("Failed to read next batch from database"); + break; } height += BATCH_SIZE; } @@ -88,5 +90,5 @@ async fn main() { let generated = generate_hex(&hashes_of_hashes); write("src/data/hashes_of_hashes", generated).expect("Could not write file"); - println!("Generated hashes up to block height {}", height); + println!("Generated hashes up to block height {height}"); } diff --git a/consensus/fast-sync/src/data/hashes_of_hashes b/consensus/fast-sync/src/data/hashes_of_hashes index 74fec4c..2e5e99a 100644 --- a/consensus/fast-sync/src/data/hashes_of_hashes +++ b/consensus/fast-sync/src/data/hashes_of_hashes @@ -1,12 +1,12 @@ [ - hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"), - hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"), - hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"), - hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"), - hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"), - hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"), - hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"), - hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"), - hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"), - hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"), + hex_literal::hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"), + hex_literal::hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"), + hex_literal::hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"), + hex_literal::hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"), + hex_literal::hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"), + hex_literal::hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"), + hex_literal::hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"), + hex_literal::hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"), + hex_literal::hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"), + hex_literal::hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"), ] diff --git a/consensus/fast-sync/src/fast_sync.rs b/consensus/fast-sync/src/fast_sync.rs index 35fa674..b4fc12b 100644 --- a/consensus/fast-sync/src/fast_sync.rs +++ b/consensus/fast-sync/src/fast_sync.rs @@ -6,8 +6,6 @@ use std::{ task::{Context, Poll}, }; -#[allow(unused_imports)] -use hex_literal::hex; use monero_serai::{ block::Block, transaction::{Input, Transaction}, @@ -19,6 +17,7 @@ use cuprate_consensus::{ transactions::new_tx_verification_data, }; use cuprate_consensus_rules::{miner_tx::MinerTxError, ConsensusError}; +use cuprate_helper::cast::u64_to_usize; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; use crate::{hash_of_hashes, BlockId, HashOfHashes}; @@ -31,9 +30,9 @@ const BATCH_SIZE: usize = 512; #[cfg(test)] static HASHES_OF_HASHES: &[HashOfHashes] = &[ - hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"), - hex!("0102030405060708010203040506070801020304050607080102030405060708"), - hex!("0102030405060708010203040506070801020304050607080102030405060708"), + hex_literal::hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"), + hex_literal::hex!("0102030405060708010203040506070801020304050607080102030405060708"), + hex_literal::hex!("0102030405060708010203040506070801020304050607080102030405060708"), ]; #[cfg(test)] @@ -44,14 +43,14 @@ fn max_height() -> u64 { (HASHES_OF_HASHES.len() * BATCH_SIZE) as u64 } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub struct ValidBlockId(BlockId); fn valid_block_ids(block_ids: &[BlockId]) -> Vec { block_ids.iter().map(|b| ValidBlockId(*b)).collect() } -#[allow(clippy::large_enum_variant)] +#[expect(clippy::large_enum_variant)] pub enum FastSyncRequest { ValidateHashes { start_height: u64, @@ -64,8 +63,8 @@ pub enum FastSyncRequest { }, } -#[allow(clippy::large_enum_variant)] -#[derive(Debug, PartialEq)] +#[expect(clippy::large_enum_variant)] +#[derive(Debug, PartialEq, Eq)] pub enum FastSyncResponse { ValidateHashes { validated_hashes: Vec, @@ -74,7 +73,7 @@ pub enum FastSyncResponse { ValidateBlock(VerifiedBlockInformation), } -#[derive(thiserror::Error, Debug, PartialEq)] +#[derive(thiserror::Error, Debug, PartialEq, Eq)] pub enum FastSyncError { #[error("Block does not match its expected hash")] BlockHashMismatch, @@ -127,9 +126,9 @@ where + Send + 'static, { - #[allow(dead_code)] - pub(crate) fn new(context_svc: C) -> FastSyncService { - FastSyncService { context_svc } + #[expect(dead_code)] + pub(crate) const fn new(context_svc: C) -> Self { + Self { context_svc } } } @@ -161,7 +160,7 @@ where FastSyncRequest::ValidateHashes { start_height, block_ids, - } => validate_hashes(start_height, &block_ids).await, + } => validate_hashes(start_height, &block_ids), FastSyncRequest::ValidateBlock { block, txs, token } => { validate_block(context_svc, block, txs, token).await } @@ -170,11 +169,13 @@ where } } -async fn validate_hashes( +fn validate_hashes( start_height: u64, block_ids: &[BlockId], ) -> Result { - if start_height as usize % BATCH_SIZE != 0 { + let start_height_usize = u64_to_usize(start_height); + + if start_height_usize % BATCH_SIZE != 0 { return Err(FastSyncError::InvalidStartHeight); } @@ -182,9 +183,9 @@ async fn validate_hashes( return Err(FastSyncError::OutOfRange); } - let stop_height = start_height as usize + block_ids.len(); + let stop_height = start_height_usize + block_ids.len(); - let batch_from = start_height as usize / BATCH_SIZE; + let batch_from = start_height_usize / BATCH_SIZE; let batch_to = cmp::min(stop_height / BATCH_SIZE, HASHES_OF_HASHES.len()); let n_batches = batch_to - batch_from; @@ -285,7 +286,7 @@ where block_blob, txs: verified_txs, block_hash, - pow_hash: [0u8; 32], + pow_hash: [0_u8; 32], height: *height, generated_coins, weight, @@ -299,46 +300,36 @@ where #[cfg(test)] mod tests { use super::*; - use tokio_test::block_on; #[test] fn test_validate_hashes_errors() { - let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]]; + let ids = [[1_u8; 32], [2_u8; 32], [3_u8; 32], [4_u8; 32], [5_u8; 32]]; assert_eq!( - block_on(validate_hashes(3, &[])), + validate_hashes(3, &[]), Err(FastSyncError::InvalidStartHeight) ); assert_eq!( - block_on(validate_hashes(3, &ids)), + validate_hashes(3, &ids), Err(FastSyncError::InvalidStartHeight) ); - assert_eq!( - block_on(validate_hashes(20, &[])), - Err(FastSyncError::OutOfRange) - ); - assert_eq!( - block_on(validate_hashes(20, &ids)), - Err(FastSyncError::OutOfRange) - ); + assert_eq!(validate_hashes(20, &[]), Err(FastSyncError::OutOfRange)); + assert_eq!(validate_hashes(20, &ids), Err(FastSyncError::OutOfRange)); + assert_eq!(validate_hashes(4, &[]), Err(FastSyncError::NothingToDo)); assert_eq!( - block_on(validate_hashes(4, &[])), - Err(FastSyncError::NothingToDo) - ); - assert_eq!( - block_on(validate_hashes(4, &ids[..3])), + validate_hashes(4, &ids[..3]), Err(FastSyncError::NothingToDo) ); } #[test] fn test_validate_hashes_success() { - let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]]; + let ids = [[1_u8; 32], [2_u8; 32], [3_u8; 32], [4_u8; 32], [5_u8; 32]]; let validated_hashes = valid_block_ids(&ids[0..4]); let unknown_hashes = ids[4..].to_vec(); assert_eq!( - block_on(validate_hashes(0, &ids)), + validate_hashes(0, &ids), Ok(FastSyncResponse::ValidateHashes { validated_hashes, unknown_hashes @@ -349,15 +340,10 @@ mod tests { #[test] fn test_validate_hashes_mismatch() { let ids = [ - [1u8; 32], [2u8; 32], [3u8; 32], [5u8; 32], [1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], + [1_u8; 32], [2_u8; 32], [3_u8; 32], [5_u8; 32], [1_u8; 32], [2_u8; 32], [3_u8; 32], + [4_u8; 32], ]; - assert_eq!( - block_on(validate_hashes(0, &ids)), - Err(FastSyncError::Mismatch) - ); - assert_eq!( - block_on(validate_hashes(4, &ids)), - Err(FastSyncError::Mismatch) - ); + assert_eq!(validate_hashes(0, &ids), Err(FastSyncError::Mismatch)); + assert_eq!(validate_hashes(4, &ids), Err(FastSyncError::Mismatch)); } } diff --git a/consensus/fast-sync/src/lib.rs b/consensus/fast-sync/src/lib.rs index f82b163..8dbdc64 100644 --- a/consensus/fast-sync/src/lib.rs +++ b/consensus/fast-sync/src/lib.rs @@ -1,3 +1,9 @@ +// Used in `create.rs` +use clap as _; +use cuprate_blockchain as _; +use hex as _; +use tokio as _; + pub mod fast_sync; pub mod util; diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index 279442b..ed97d33 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Boog900"] [features] default = [] -proptest = ["dep:proptest", "dep:proptest-derive", "cuprate-types/proptest"] +proptest = ["cuprate-types/proptest"] rayon = ["dep:rayon"] [dependencies] @@ -24,15 +24,16 @@ hex = { workspace = true, features = ["std"] } hex-literal = { workspace = true } crypto-bigint = { workspace = true } +cfg-if = { workspace = true } tracing = { workspace = true, features = ["std"] } thiserror = { workspace = true } rayon = { workspace = true, optional = true } -proptest = {workspace = true, optional = true} -proptest-derive = {workspace = true, optional = true} - [dev-dependencies] -proptest = {workspace = true} -proptest-derive = {workspace = true} -tokio = {version = "1.40.0", features = ["rt-multi-thread", "macros"]} +proptest = { workspace = true } +proptest-derive = { workspace = true } +tokio = { version = "1.35.0", features = ["rt-multi-thread", "macros"] } + +[lints] +workspace = true diff --git a/consensus/rules/src/blocks.rs b/consensus/rules/src/blocks.rs index e118e9a..5e55ce2 100644 --- a/consensus/rules/src/blocks.rs +++ b/consensus/rules/src/blocks.rs @@ -44,22 +44,22 @@ pub enum BlockError { MinerTxError(#[from] MinerTxError), } -/// A trait to represent the RandomX VM. +/// A trait to represent the `RandomX` VM. pub trait RandomX { type Error; fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error>; } -/// Returns if this height is a RandomX seed height. -pub fn is_randomx_seed_height(height: usize) -> bool { +/// Returns if this height is a `RandomX` seed height. +pub const fn is_randomx_seed_height(height: usize) -> bool { height % RX_SEEDHASH_EPOCH_BLOCKS == 0 } -/// Returns the RandomX seed height for this block. +/// Returns the `RandomX` seed height for this block. /// /// ref: -pub fn randomx_seed_height(height: usize) -> usize { +pub const fn randomx_seed_height(height: usize) -> usize { if height <= RX_SEEDHASH_EPOCH_BLOCKS + RX_SEEDHASH_EPOCH_LAG { 0 } else { @@ -122,10 +122,10 @@ pub fn check_block_pow(hash: &[u8; 32], difficulty: u128) -> Result<(), BlockErr /// Returns the penalty free zone /// /// -pub fn penalty_free_zone(hf: &HardFork) -> usize { - if hf == &HardFork::V1 { +pub fn penalty_free_zone(hf: HardFork) -> usize { + if hf == HardFork::V1 { PENALTY_FREE_ZONE_1 - } else if hf >= &HardFork::V2 && hf < &HardFork::V5 { + } else if hf >= HardFork::V2 && hf < HardFork::V5 { PENALTY_FREE_ZONE_2 } else { PENALTY_FREE_ZONE_5 @@ -135,7 +135,7 @@ pub fn penalty_free_zone(hf: &HardFork) -> usize { /// Sanity check on the block blob size. /// /// ref: -fn block_size_sanity_check( +const fn block_size_sanity_check( block_blob_len: usize, effective_median: usize, ) -> Result<(), BlockError> { @@ -149,7 +149,7 @@ fn block_size_sanity_check( /// Sanity check on the block weight. /// /// ref: -pub fn check_block_weight( +pub const fn check_block_weight( block_weight: usize, median_for_block_reward: usize, ) -> Result<(), BlockError> { @@ -163,7 +163,7 @@ pub fn check_block_weight( /// Sanity check on number of txs in the block. /// /// ref: -fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> { +const fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> { if number_none_miner_txs + 1 > 0x10000000 { Err(BlockError::TooManyTxs) } else { @@ -175,10 +175,10 @@ fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> { /// /// ref: fn check_prev_id(block: &Block, top_hash: &[u8; 32]) -> Result<(), BlockError> { - if &block.header.previous != top_hash { - Err(BlockError::PreviousIDIncorrect) - } else { + if &block.header.previous == top_hash { Ok(()) + } else { + Err(BlockError::PreviousIDIncorrect) } } @@ -273,7 +273,7 @@ pub fn check_block( block_weight, block_chain_ctx.median_weight_for_block_reward, block_chain_ctx.already_generated_coins, - &block_chain_ctx.current_hf, + block_chain_ctx.current_hf, )?; Ok((vote, generated_coins)) diff --git a/consensus/rules/src/decomposed_amount.rs b/consensus/rules/src/decomposed_amount.rs index a8821f3..ebed8b0 100644 --- a/consensus/rules/src/decomposed_amount.rs +++ b/consensus/rules/src/decomposed_amount.rs @@ -1,6 +1,6 @@ #[rustfmt::skip] /// Decomposed amount table. -pub static DECOMPOSED_AMOUNTS: [u64; 172] = [ +pub(crate) static DECOMPOSED_AMOUNTS: [u64; 172] = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 600, 700, 800, 900, @@ -40,8 +40,8 @@ mod tests { #[test] fn decomposed_amounts_return_decomposed() { - for amount in DECOMPOSED_AMOUNTS.iter() { - assert!(is_decomposed_amount(amount)) + for amount in &DECOMPOSED_AMOUNTS { + assert!(is_decomposed_amount(amount)); } } diff --git a/consensus/rules/src/genesis.rs b/consensus/rules/src/genesis.rs index b796119..e1cf4f8 100644 --- a/consensus/rules/src/genesis.rs +++ b/consensus/rules/src/genesis.rs @@ -8,7 +8,7 @@ use monero_serai::{ use cuprate_helper::network::Network; -const fn genesis_nonce(network: &Network) -> u32 { +const fn genesis_nonce(network: Network) -> u32 { match network { Network::Mainnet => 10000, Network::Testnet => 10001, @@ -16,7 +16,7 @@ const fn genesis_nonce(network: &Network) -> u32 { } } -fn genesis_miner_tx(network: &Network) -> Transaction { +fn genesis_miner_tx(network: Network) -> Transaction { Transaction::read(&mut hex::decode(match network { Network::Mainnet | Network::Testnet => "013c01ff0001ffffffffffff03029b2e4c0281c0b02e7c53291a94d1d0cbff8883f8024f5142ee494ffbbd08807121017767aafcde9be00dcfd098715ebcf7f410daebc582fda69d24a28e9d0bc890d1", Network::Stagenet => "013c01ff0001ffffffffffff0302df5d56da0c7d643ddd1ce61901c7bdc5fb1738bfe39fbe69c28a3a7032729c0f2101168d0c4ca86fb55a4cf6a36d31431be1c53a3bd7411bb24e8832410289fa6f3b" @@ -26,7 +26,7 @@ fn genesis_miner_tx(network: &Network) -> Transaction { /// Generates the Monero genesis block. /// /// ref: -pub fn generate_genesis_block(network: &Network) -> Block { +pub fn generate_genesis_block(network: Network) -> Block { Block { header: BlockHeader { hardfork_version: 1, @@ -47,19 +47,19 @@ mod tests { #[test] fn generate_genesis_blocks() { assert_eq!( - &generate_genesis_block(&Network::Mainnet).hash(), + &generate_genesis_block(Network::Mainnet).hash(), hex::decode("418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3") .unwrap() .as_slice() ); assert_eq!( - &generate_genesis_block(&Network::Testnet).hash(), + &generate_genesis_block(Network::Testnet).hash(), hex::decode("48ca7cd3c8de5b6a4d53d2861fbdaedca141553559f9be9520068053cda8430b") .unwrap() .as_slice() ); assert_eq!( - &generate_genesis_block(&Network::Stagenet).hash(), + &generate_genesis_block(Network::Stagenet).hash(), hex::decode("76ee3cc98646292206cd3e86f74d88b4dcc1d937088645e9b0cbca84b7ce74eb") .unwrap() .as_slice() diff --git a/consensus/rules/src/hard_forks.rs b/consensus/rules/src/hard_forks.rs index 4f786e4..7e9a881 100644 --- a/consensus/rules/src/hard_forks.rs +++ b/consensus/rules/src/hard_forks.rs @@ -25,10 +25,10 @@ pub fn check_block_version_vote( ) -> Result<(), HardForkError> { // self = current hf if hf != version { - Err(HardForkError::VersionIncorrect)?; + return Err(HardForkError::VersionIncorrect); } if hf > vote { - Err(HardForkError::VoteTooLow)?; + return Err(HardForkError::VoteTooLow); } Ok(()) @@ -41,8 +41,8 @@ pub struct HFInfo { threshold: usize, } impl HFInfo { - pub const fn new(height: usize, threshold: usize) -> HFInfo { - HFInfo { height, threshold } + pub const fn new(height: usize, threshold: usize) -> Self { + Self { height, threshold } } } @@ -51,7 +51,7 @@ impl HFInfo { pub struct HFsInfo([HFInfo; NUMB_OF_HARD_FORKS]); impl HFsInfo { - pub fn info_for_hf(&self, hf: &HardFork) -> HFInfo { + pub const fn info_for_hf(&self, hf: &HardFork) -> HFInfo { self.0[*hf as usize - 1] } @@ -62,7 +62,7 @@ impl HFsInfo { /// Returns the main-net hard-fork information. /// /// ref: - pub const fn main_net() -> HFsInfo { + pub const fn main_net() -> Self { Self([ HFInfo::new(0, 0), HFInfo::new(1009827, 0), @@ -86,7 +86,7 @@ impl HFsInfo { /// Returns the test-net hard-fork information. /// /// ref: - pub const fn test_net() -> HFsInfo { + pub const fn test_net() -> Self { Self([ HFInfo::new(0, 0), HFInfo::new(624634, 0), @@ -110,7 +110,7 @@ impl HFsInfo { /// Returns the test-net hard-fork information. /// /// ref: - pub const fn stage_net() -> HFsInfo { + pub const fn stage_net() -> Self { Self([ HFInfo::new(0, 0), HFInfo::new(32000, 0), @@ -165,8 +165,8 @@ impl Display for HFVotes { } impl HFVotes { - pub fn new(window_size: usize) -> HFVotes { - HFVotes { + pub fn new(window_size: usize) -> Self { + Self { votes: [0; NUMB_OF_HARD_FORKS], vote_list: VecDeque::with_capacity(window_size), window_size, @@ -251,6 +251,6 @@ impl HFVotes { /// Returns the votes needed for a hard-fork. /// /// ref: -pub fn votes_needed(threshold: usize, window: usize) -> usize { +pub const fn votes_needed(threshold: usize, window: usize) -> usize { (threshold * window).div_ceil(100) } diff --git a/consensus/rules/src/hard_forks/tests.rs b/consensus/rules/src/hard_forks/tests.rs index 00dd036..1a24627 100644 --- a/consensus/rules/src/hard_forks/tests.rs +++ b/consensus/rules/src/hard_forks/tests.rs @@ -51,7 +51,7 @@ proptest! { prop_assert_eq!(hf_votes.total_votes(), hf_votes.vote_list.len()); let mut votes = [0_usize; NUMB_OF_HARD_FORKS]; - for vote in hf_votes.vote_list.iter() { + for vote in &hf_votes.vote_list { // manually go through the list of votes tallying votes[*vote as usize - 1] += 1; } @@ -61,9 +61,9 @@ proptest! { #[test] fn window_size_kept_constant(mut hf_votes in arb_full_hf_votes(), new_votes in any::>()) { - for new_vote in new_votes.into_iter() { + for new_vote in new_votes { hf_votes.add_vote_for_hf(&new_vote); - prop_assert_eq!(hf_votes.total_votes(), TEST_WINDOW_SIZE) + prop_assert_eq!(hf_votes.total_votes(), TEST_WINDOW_SIZE); } } diff --git a/consensus/rules/src/lib.rs b/consensus/rules/src/lib.rs index a5f8800..876e2f7 100644 --- a/consensus/rules/src/lib.rs +++ b/consensus/rules/src/lib.rs @@ -1,3 +1,12 @@ +cfg_if::cfg_if! { + // Used in external `tests/`. + if #[cfg(test)] { + use proptest as _; + use proptest_derive as _; + use tokio as _; + } +} + use std::time::{SystemTime, UNIX_EPOCH}; pub mod batch_verifier; diff --git a/consensus/rules/src/miner_tx.rs b/consensus/rules/src/miner_tx.rs index 663c95e..e6b51d2 100644 --- a/consensus/rules/src/miner_tx.rs +++ b/consensus/rules/src/miner_tx.rs @@ -40,7 +40,7 @@ const MINER_TX_TIME_LOCKED_BLOCKS: usize = 60; /// the block. /// /// ref: -fn calculate_base_reward(already_generated_coins: u64, hf: &HardFork) -> u64 { +fn calculate_base_reward(already_generated_coins: u64, hf: HardFork) -> u64 { let target_mins = hf.block_time().as_secs() / 60; let emission_speed_factor = 20 - (target_mins - 1); ((MONEY_SUPPLY - already_generated_coins) >> emission_speed_factor) @@ -54,7 +54,7 @@ pub fn calculate_block_reward( block_weight: usize, median_bw: usize, already_generated_coins: u64, - hf: &HardFork, + hf: HardFork, ) -> u64 { let base_reward = calculate_base_reward(already_generated_coins, hf); @@ -75,9 +75,9 @@ pub fn calculate_block_reward( /// Checks the miner transactions version. /// /// ref: -fn check_miner_tx_version(tx_version: &TxVersion, hf: &HardFork) -> Result<(), MinerTxError> { +fn check_miner_tx_version(tx_version: TxVersion, hf: HardFork) -> Result<(), MinerTxError> { // The TxVersion enum checks if the version is not 1 or 2 - if hf >= &HardFork::V12 && tx_version != &TxVersion::RingCT { + if hf >= HardFork::V12 && tx_version != TxVersion::RingCT { Err(MinerTxError::VersionInvalid) } else { Ok(()) @@ -94,31 +94,31 @@ fn check_inputs(inputs: &[Input], chain_height: usize) -> Result<(), MinerTxErro match &inputs[0] { Input::Gen(height) => { - if height != &chain_height { - Err(MinerTxError::InputsHeightIncorrect) - } else { + if height == &chain_height { Ok(()) + } else { + Err(MinerTxError::InputsHeightIncorrect) } } - _ => Err(MinerTxError::InputNotOfTypeGen), + Input::ToKey { .. } => Err(MinerTxError::InputNotOfTypeGen), } } /// Checks the miner transaction has a correct time lock. /// /// ref: -fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> { +const fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> { match time_lock { &Timelock::Block(till_height) => { // Lock times above this amount are timestamps not blocks. // This is just for safety though and shouldn't actually be hit. if till_height > 500_000_000 { - Err(MinerTxError::InvalidLockTime)?; + return Err(MinerTxError::InvalidLockTime); } - if till_height != chain_height + MINER_TX_TIME_LOCKED_BLOCKS { - Err(MinerTxError::InvalidLockTime) - } else { + if till_height == chain_height + MINER_TX_TIME_LOCKED_BLOCKS { Ok(()) + } else { + Err(MinerTxError::InvalidLockTime) } } _ => Err(MinerTxError::InvalidLockTime), @@ -131,18 +131,18 @@ fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), Mine /// && fn sum_outputs( outputs: &[Output], - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, ) -> Result { let mut sum: u64 = 0; for out in outputs { let amt = out.amount.unwrap_or(0); - if tx_version == &TxVersion::RingSignatures && amt == 0 { + if tx_version == TxVersion::RingSignatures && amt == 0 { return Err(MinerTxError::OutputAmountIncorrect); } - if hf == &HardFork::V3 && !is_decomposed_amount(&amt) { + if hf == HardFork::V3 && !is_decomposed_amount(&amt) { return Err(MinerTxError::OutputNotDecomposed); } sum = sum.checked_add(amt).ok_or(MinerTxError::OutputsOverflow)?; @@ -157,9 +157,9 @@ fn check_total_output_amt( total_output: u64, reward: u64, fees: u64, - hf: &HardFork, + hf: HardFork, ) -> Result { - if hf == &HardFork::V1 || hf >= &HardFork::V12 { + if hf == HardFork::V1 || hf >= HardFork::V12 { if total_output != reward + fees { return Err(MinerTxError::OutputAmountIncorrect); } @@ -185,16 +185,16 @@ pub fn check_miner_tx( block_weight: usize, median_bw: usize, already_generated_coins: u64, - hf: &HardFork, + hf: HardFork, ) -> Result { let tx_version = TxVersion::from_raw(tx.version()).ok_or(MinerTxError::VersionInvalid)?; - check_miner_tx_version(&tx_version, hf)?; + check_miner_tx_version(tx_version, hf)?; // ref: match tx { Transaction::V1 { .. } => (), Transaction::V2 { proofs, .. } => { - if hf >= &HardFork::V12 && proofs.is_some() { + if hf >= HardFork::V12 && proofs.is_some() { return Err(MinerTxError::RCTTypeNotNULL); } } @@ -207,7 +207,7 @@ pub fn check_miner_tx( check_output_types(&tx.prefix().outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?; let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf); - let total_outs = sum_outputs(&tx.prefix().outputs, hf, &tx_version)?; + let total_outs = sum_outputs(&tx.prefix().outputs, hf, tx_version)?; check_total_output_amt(total_outs, reward, total_fees, hf) } @@ -221,7 +221,7 @@ mod tests { proptest! { #[test] fn tail_emission(generated_coins in any::(), hf in any::()) { - prop_assert!(calculate_base_reward(generated_coins, &hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60) + prop_assert!(calculate_base_reward(generated_coins, hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60); } } } diff --git a/consensus/rules/src/transactions.rs b/consensus/rules/src/transactions.rs index 9c6ad51..b4eac19 100644 --- a/consensus/rules/src/transactions.rs +++ b/consensus/rules/src/transactions.rs @@ -99,11 +99,8 @@ fn check_output_keys(outputs: &[Output]) -> Result<(), TransactionError> { /// /// /// -pub(crate) fn check_output_types( - outputs: &[Output], - hf: &HardFork, -) -> Result<(), TransactionError> { - if hf == &HardFork::V15 { +pub(crate) fn check_output_types(outputs: &[Output], hf: HardFork) -> Result<(), TransactionError> { + if hf == HardFork::V15 { for outs in outputs.windows(2) { if outs[0].view_tag.is_some() != outs[1].view_tag.is_some() { return Err(TransactionError::OutputTypeInvalid); @@ -113,8 +110,8 @@ pub(crate) fn check_output_types( } for out in outputs { - if hf <= &HardFork::V14 && out.view_tag.is_some() - || hf >= &HardFork::V16 && out.view_tag.is_none() + if hf <= HardFork::V14 && out.view_tag.is_some() + || hf >= HardFork::V16 && out.view_tag.is_none() { return Err(TransactionError::OutputTypeInvalid); } @@ -125,12 +122,12 @@ pub(crate) fn check_output_types( /// Checks the individual outputs amount for version 1 txs. /// /// ref: -fn check_output_amount_v1(amount: u64, hf: &HardFork) -> Result<(), TransactionError> { +fn check_output_amount_v1(amount: u64, hf: HardFork) -> Result<(), TransactionError> { if amount == 0 { return Err(TransactionError::ZeroOutputForV1); } - if hf >= &HardFork::V2 && !is_decomposed_amount(&amount) { + if hf >= HardFork::V2 && !is_decomposed_amount(&amount) { return Err(TransactionError::AmountNotDecomposed); } @@ -140,7 +137,7 @@ fn check_output_amount_v1(amount: u64, hf: &HardFork) -> Result<(), TransactionE /// Checks the individual outputs amount for version 2 txs. /// /// ref: -fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> { +const fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> { if amount == 0 { Ok(()) } else { @@ -154,8 +151,8 @@ fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> { /// && fn sum_outputs( outputs: &[Output], - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, ) -> Result { let mut sum: u64 = 0; @@ -181,15 +178,15 @@ fn sum_outputs( /// && fn check_number_of_outputs( outputs: usize, - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, bp_or_bpp: bool, ) -> Result<(), TransactionError> { - if tx_version == &TxVersion::RingSignatures { + if tx_version == TxVersion::RingSignatures { return Ok(()); } - if hf >= &HardFork::V12 && outputs < 2 { + if hf >= HardFork::V12 && outputs < 2 { return Err(TransactionError::InvalidNumberOfOutputs); } @@ -207,8 +204,8 @@ fn check_number_of_outputs( /// && fn check_outputs_semantics( outputs: &[Output], - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, bp_or_bpp: bool, ) -> Result { check_output_types(outputs, hf)?; @@ -223,11 +220,11 @@ fn check_outputs_semantics( /// Checks if an outputs unlock time has passed. /// /// -pub fn output_unlocked( +pub const fn output_unlocked( time_lock: &Timelock, current_chain_height: usize, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> bool { match *time_lock { Timelock::None => true, @@ -243,7 +240,7 @@ pub fn output_unlocked( /// Returns if a locked output, which uses a block height, can be spent. /// /// ref: -fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool { +const fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool { // current_chain_height = 1 + top height unlock_height <= current_chain_height } @@ -251,10 +248,10 @@ fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> b /// Returns if a locked output, which uses a block height, can be spent. /// /// ref: -fn check_timestamp_time_lock( +const fn check_timestamp_time_lock( unlock_timestamp: u64, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> bool { current_time_lock_timestamp + hf.block_time().as_secs() >= unlock_timestamp } @@ -269,19 +266,19 @@ fn check_all_time_locks( time_locks: &[Timelock], current_chain_height: usize, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { time_locks.iter().try_for_each(|time_lock| { - if !output_unlocked( + if output_unlocked( time_lock, current_chain_height, current_time_lock_timestamp, hf, ) { + Ok(()) + } else { tracing::debug!("Transaction invalid: one or more inputs locked, lock: {time_lock:?}."); Err(TransactionError::OneOrMoreRingMembersLocked) - } else { - Ok(()) } }) } @@ -292,11 +289,11 @@ fn check_all_time_locks( /// /// ref: /// && -pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), TransactionError> { - if hf == &HardFork::V15 { +pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: HardFork) -> Result<(), TransactionError> { + if hf == HardFork::V15 { // Hard-fork 15 allows both v14 and v16 rules - return check_decoy_info(decoy_info, &HardFork::V14) - .or_else(|_| check_decoy_info(decoy_info, &HardFork::V16)); + return check_decoy_info(decoy_info, HardFork::V14) + .or_else(|_| check_decoy_info(decoy_info, HardFork::V16)); } let current_minimum_decoys = minimum_decoys(hf); @@ -310,13 +307,13 @@ pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), Tra if decoy_info.mixable > 1 { return Err(TransactionError::MoreThanOneMixableInputWithUnmixable); } - } else if hf >= &HardFork::V8 && decoy_info.min_decoys != current_minimum_decoys { + } else if hf >= HardFork::V8 && decoy_info.min_decoys != current_minimum_decoys { // From V8 enforce the minimum used number of rings is the default minimum. return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys); } // From v12 all inputs must have the same number of decoys. - if hf >= &HardFork::V12 && decoy_info.min_decoys != decoy_info.max_decoys { + if hf >= HardFork::V12 && decoy_info.min_decoys != decoy_info.max_decoys { return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys); } @@ -334,19 +331,19 @@ fn check_key_images(input: &Input) -> Result<(), TransactionError> { return Err(TransactionError::KeyImageIsNotInPrimeSubGroup); } } - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } Ok(()) } -/// Checks that the input is of type [`Input::ToKey`] aka txin_to_key. +/// Checks that the input is of type [`Input::ToKey`] aka `txin_to_key`. /// /// ref: -fn check_input_type(input: &Input) -> Result<(), TransactionError> { +const fn check_input_type(input: &Input) -> Result<(), TransactionError> { match input { Input::ToKey { .. } => Ok(()), - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => Err(TransactionError::IncorrectInputType), } } @@ -362,15 +359,15 @@ fn check_input_has_decoys(input: &Input) -> Result<(), TransactionError> { Ok(()) } } - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => Err(TransactionError::IncorrectInputType), } } /// Checks that the ring members for the input are unique after hard-fork 6. /// /// ref: -fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), TransactionError> { - if hf >= &HardFork::V6 { +fn check_ring_members_unique(input: &Input, hf: HardFork) -> Result<(), TransactionError> { + if hf >= HardFork::V6 { match input { Input::ToKey { key_offsets, .. } => key_offsets.iter().skip(1).try_for_each(|offset| { if *offset == 0 { @@ -379,7 +376,7 @@ fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), Transac Ok(()) } }), - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => Err(TransactionError::IncorrectInputType), } } else { Ok(()) @@ -389,23 +386,22 @@ fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), Transac /// Checks that from hf 7 the inputs are sorted by key image. /// /// ref: -fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), TransactionError> { +fn check_inputs_sorted(inputs: &[Input], hf: HardFork) -> Result<(), TransactionError> { let get_ki = |inp: &Input| match inp { Input::ToKey { key_image, .. } => Ok(key_image.compress().to_bytes()), - _ => Err(TransactionError::IncorrectInputType), + Input::Gen(_) => Err(TransactionError::IncorrectInputType), }; - if hf >= &HardFork::V7 { + if hf >= HardFork::V7 { for inps in inputs.windows(2) { match get_ki(&inps[0])?.cmp(&get_ki(&inps[1])?) { Ordering::Greater => (), _ => return Err(TransactionError::InputsAreNotOrdered), } } - Ok(()) - } else { - Ok(()) } + + Ok(()) } /// Checks the youngest output is at least 10 blocks old. @@ -414,9 +410,9 @@ fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), Transactio fn check_10_block_lock( youngest_used_out_height: usize, current_chain_height: usize, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { - if hf >= &HardFork::V12 { + if hf >= HardFork::V12 { if youngest_used_out_height + 10 > current_chain_height { tracing::debug!( "Transaction invalid: One or more ring members younger than 10 blocks." @@ -442,7 +438,7 @@ fn sum_inputs_check_overflow(inputs: &[Input]) -> Result .checked_add(amount.unwrap_or(0)) .ok_or(TransactionError::InputsOverflow)?; } - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } } @@ -454,7 +450,7 @@ fn sum_inputs_check_overflow(inputs: &[Input]) -> Result /// Semantic rules are rules that don't require blockchain context, the hard-fork does not require blockchain context as: /// - The tx-pool will use the current hard-fork /// - When syncing the hard-fork is in the block header. -fn check_inputs_semantics(inputs: &[Input], hf: &HardFork) -> Result { +fn check_inputs_semantics(inputs: &[Input], hf: HardFork) -> Result { // if inputs.is_empty() { return Err(TransactionError::NoInputs); @@ -481,14 +477,14 @@ fn check_inputs_contextual( inputs: &[Input], tx_ring_members_info: &TxRingMembersInfo, current_chain_height: usize, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { // This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members. // When picking ring members monerod will only look in the DB at past blocks so an output has to be younger // than this transaction to be used in this tx. if tx_ring_members_info.youngest_used_out_height >= current_chain_height { tracing::debug!("Transaction invalid: One or more ring members too young."); - Err(TransactionError::OneOrMoreRingMembersLocked)?; + return Err(TransactionError::OneOrMoreRingMembersLocked); } check_10_block_lock( @@ -500,7 +496,7 @@ fn check_inputs_contextual( if let Some(decoys_info) = &tx_ring_members_info.decoy_info { check_decoy_info(decoys_info, hf)?; } else { - assert_eq!(hf, &HardFork::V1); + assert_eq!(hf, HardFork::V1); } for input in inputs { @@ -517,22 +513,22 @@ fn check_inputs_contextual( /// fn check_tx_version( decoy_info: &Option, - version: &TxVersion, - hf: &HardFork, + version: TxVersion, + hf: HardFork, ) -> Result<(), TransactionError> { if let Some(decoy_info) = decoy_info { let max = max_tx_version(hf); - if version > &max { + if version > max { return Err(TransactionError::TransactionVersionInvalid); } let min = min_tx_version(hf); - if version < &min && decoy_info.not_mixable == 0 { + if version < min && decoy_info.not_mixable == 0 { return Err(TransactionError::TransactionVersionInvalid); } } else { // This will only happen for hard-fork 1 when only RingSignatures are allowed. - if version != &TxVersion::RingSignatures { + if version != TxVersion::RingSignatures { return Err(TransactionError::TransactionVersionInvalid); } } @@ -541,8 +537,8 @@ fn check_tx_version( } /// Returns the default maximum tx version for the given hard-fork. -fn max_tx_version(hf: &HardFork) -> TxVersion { - if hf <= &HardFork::V3 { +fn max_tx_version(hf: HardFork) -> TxVersion { + if hf <= HardFork::V3 { TxVersion::RingSignatures } else { TxVersion::RingCT @@ -550,15 +546,15 @@ fn max_tx_version(hf: &HardFork) -> TxVersion { } /// Returns the default minimum tx version for the given hard-fork. -fn min_tx_version(hf: &HardFork) -> TxVersion { - if hf >= &HardFork::V6 { +fn min_tx_version(hf: HardFork) -> TxVersion { + if hf >= HardFork::V6 { TxVersion::RingCT } else { TxVersion::RingSignatures } } -fn transaction_weight_limit(hf: &HardFork) -> usize { +fn transaction_weight_limit(hf: HardFork) -> usize { penalty_free_zone(hf) / 2 - 600 } @@ -575,14 +571,14 @@ pub fn check_transaction_semantic( tx_blob_size: usize, tx_weight: usize, tx_hash: &[u8; 32], - hf: &HardFork, + hf: HardFork, verifier: impl BatchVerifier, ) -> Result { // if tx_blob_size > MAX_TX_BLOB_SIZE - || (hf >= &HardFork::V8 && tx_weight > transaction_weight_limit(hf)) + || (hf >= HardFork::V8 && tx_weight > transaction_weight_limit(hf)) { - Err(TransactionError::TooBig)?; + return Err(TransactionError::TooBig); } let tx_version = @@ -602,13 +598,13 @@ pub fn check_transaction_semantic( Transaction::V2 { proofs: None, .. } | Transaction::V1 { .. } => false, }; - let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, &tx_version, bp_or_bpp)?; + let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, tx_version, bp_or_bpp)?; let inputs_sum = check_inputs_semantics(&tx.prefix().inputs, hf)?; let fee = match tx { Transaction::V1 { .. } => { if outputs_sum >= inputs_sum { - Err(TransactionError::OutputsTooHigh)?; + return Err(TransactionError::OutputsTooHigh); } inputs_sum - outputs_sum } @@ -633,13 +629,12 @@ pub fn check_transaction_semantic( /// This function also does _not_ check for duplicate key-images: . /// /// `current_time_lock_timestamp` must be: . - pub fn check_transaction_contextual( tx: &Transaction, tx_ring_members_info: &TxRingMembersInfo, current_chain_height: usize, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { let tx_version = TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?; @@ -650,7 +645,7 @@ pub fn check_transaction_contextual( current_chain_height, hf, )?; - check_tx_version(&tx_ring_members_info.decoy_info, &tx_version, hf)?; + check_tx_version(&tx_ring_members_info.decoy_info, tx_version, hf)?; check_all_time_locks( &tx_ring_members_info.time_locked_outs, diff --git a/consensus/rules/src/transactions/contextual_data.rs b/consensus/rules/src/transactions/contextual_data.rs index 282093d..73bc12e 100644 --- a/consensus/rules/src/transactions/contextual_data.rs +++ b/consensus/rules/src/transactions/contextual_data.rs @@ -26,7 +26,7 @@ pub fn get_absolute_offsets(relative_offsets: &[u64]) -> Result, Transa Ok(offsets) } -/// Inserts the output IDs that are needed to verify the transaction inputs into the provided HashMap. +/// Inserts the output IDs that are needed to verify the transaction inputs into the provided `HashMap`. /// /// This will error if the inputs are empty /// @@ -49,7 +49,7 @@ pub fn insert_ring_member_ids( .entry(amount.unwrap_or(0)) .or_default() .extend(get_absolute_offsets(key_offsets)?), - _ => return Err(TransactionError::IncorrectInputType), + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } } Ok(()) @@ -60,7 +60,7 @@ pub fn insert_ring_member_ids( pub enum Rings { /// Legacy, pre-ringCT, rings. Legacy(Vec>), - /// RingCT rings, (outkey, amount commitment). + /// `RingCT` rings, (outkey, amount commitment). RingCT(Vec>), } @@ -103,15 +103,15 @@ impl DecoyInfo { /// /// So: /// - /// amount_outs_on_chain(inputs`[X]`) == outputs_with_amount`[X]` + /// `amount_outs_on_chain(inputs[X]) == outputs_with_amount[X]` /// /// Do not rely on this function to do consensus checks! /// pub fn new( inputs: &[Input], outputs_with_amount: impl Fn(u64) -> usize, - hf: &HardFork, - ) -> Result { + hf: HardFork, + ) -> Result { let mut min_decoys = usize::MAX; let mut max_decoys = usize::MIN; let mut mixable = 0; @@ -119,7 +119,7 @@ impl DecoyInfo { let minimum_decoys = minimum_decoys(hf); - for inp in inputs.iter() { + for inp in inputs { match inp { Input::ToKey { key_offsets, @@ -149,11 +149,11 @@ impl DecoyInfo { min_decoys = min(min_decoys, numb_decoys); max_decoys = max(max_decoys, numb_decoys); } - _ => return Err(TransactionError::IncorrectInputType), + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } } - Ok(DecoyInfo { + Ok(Self { mixable, not_mixable, min_decoys, @@ -166,7 +166,7 @@ impl DecoyInfo { /// **There are exceptions to this always being the minimum decoys** /// /// ref: -pub(crate) fn minimum_decoys(hf: &HardFork) -> usize { +pub(crate) fn minimum_decoys(hf: HardFork) -> usize { use HardFork as HF; match hf { HF::V1 => panic!("hard-fork 1 does not use these rules!"), diff --git a/consensus/rules/src/transactions/ring_ct.rs b/consensus/rules/src/transactions/ring_ct.rs index 62f71dd..32cedd4 100644 --- a/consensus/rules/src/transactions/ring_ct.rs +++ b/consensus/rules/src/transactions/ring_ct.rs @@ -40,10 +40,10 @@ pub enum RingCTError { CLSAGError(#[from] ClsagError), } -/// Checks the RingCT type is allowed for the current hard fork. +/// Checks the `RingCT` type is allowed for the current hard fork. /// /// -fn check_rct_type(ty: &RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), RingCTError> { +fn check_rct_type(ty: RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), RingCTError> { use HardFork as F; use RctType as T; @@ -125,11 +125,11 @@ pub(crate) fn ring_ct_semantic_checks( proofs: &RctProofs, tx_hash: &[u8; 32], verifier: impl BatchVerifier, - hf: &HardFork, + hf: HardFork, ) -> Result<(), RingCTError> { let rct_type = proofs.rct_type(); - check_rct_type(&rct_type, *hf, tx_hash)?; + check_rct_type(rct_type, hf, tx_hash)?; check_output_range_proofs(proofs, verifier)?; if rct_type != RctType::AggregateMlsagBorromean { @@ -154,7 +154,7 @@ pub(crate) fn check_input_signatures( }; if rings.is_empty() { - Err(RingCTError::RingInvalid)?; + return Err(RingCTError::RingInvalid); } let pseudo_outs = match &proofs.prunable { @@ -222,20 +222,20 @@ mod tests { #[test] fn grandfathered_bulletproofs2() { assert!(check_rct_type( - &RctType::MlsagBulletproofsCompactAmount, + RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &[0; 32] ) .is_err()); assert!(check_rct_type( - &RctType::MlsagBulletproofsCompactAmount, + RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &GRANDFATHERED_TRANSACTIONS[0] ) .is_ok()); assert!(check_rct_type( - &RctType::MlsagBulletproofsCompactAmount, + RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &GRANDFATHERED_TRANSACTIONS[1] ) diff --git a/consensus/rules/src/transactions/ring_signatures.rs b/consensus/rules/src/transactions/ring_signatures.rs index 7d4b8f9..a226f5e 100644 --- a/consensus/rules/src/transactions/ring_signatures.rs +++ b/consensus/rules/src/transactions/ring_signatures.rs @@ -17,7 +17,7 @@ use crate::try_par_iter; /// Verifies the ring signature. /// /// ref: -pub fn check_input_signatures( +pub(crate) fn check_input_signatures( inputs: &[Input], signatures: &[RingSignature], rings: &Rings, @@ -45,7 +45,7 @@ pub fn check_input_signatures( Ok(()) })?; } - _ => panic!("tried to verify v1 tx with a non v1 ring"), + Rings::RingCT(_) => panic!("tried to verify v1 tx with a non v1 ring"), } Ok(()) } diff --git a/consensus/rules/src/transactions/tests.rs b/consensus/rules/src/transactions/tests.rs index 4da8fd5..936d843 100644 --- a/consensus/rules/src/transactions/tests.rs +++ b/consensus/rules/src/transactions/tests.rs @@ -16,13 +16,13 @@ use crate::decomposed_amount::DECOMPOSED_AMOUNTS; #[test] fn test_check_output_amount_v1() { - for amount in DECOMPOSED_AMOUNTS.iter() { - assert!(check_output_amount_v1(*amount, &HardFork::V2).is_ok()) + for amount in &DECOMPOSED_AMOUNTS { + assert!(check_output_amount_v1(*amount, HardFork::V2).is_ok()); } proptest!(|(amount in any::().prop_filter("value_decomposed", |val| !is_decomposed_amount(val)))| { - prop_assert!(check_output_amount_v1(amount, &HardFork::V2).is_err()); - prop_assert!(check_output_amount_v1(amount, &HardFork::V1).is_ok()) + prop_assert!(check_output_amount_v1(amount, HardFork::V2).is_err()); + prop_assert!(check_output_amount_v1(amount, HardFork::V1).is_ok()); }); } @@ -41,10 +41,10 @@ fn test_sum_outputs() { let outs = [output_10, outputs_20]; - let sum = sum_outputs(&outs, &HardFork::V16, &TxVersion::RingSignatures).unwrap(); + let sum = sum_outputs(&outs, HardFork::V16, TxVersion::RingSignatures).unwrap(); assert_eq!(sum, 30); - assert!(sum_outputs(&outs, &HardFork::V16, &TxVersion::RingCT).is_err()) + assert!(sum_outputs(&outs, HardFork::V16, TxVersion::RingCT).is_err()); } #[test] @@ -52,50 +52,50 @@ fn test_decoy_info() { let decoy_info = DecoyInfo { mixable: 0, not_mixable: 0, - min_decoys: minimum_decoys(&HardFork::V8), - max_decoys: minimum_decoys(&HardFork::V8) + 1, + min_decoys: minimum_decoys(HardFork::V8), + max_decoys: minimum_decoys(HardFork::V8) + 1, }; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok()); - assert!(check_decoy_info(&decoy_info, &HardFork::V16).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_ok()); + assert!(check_decoy_info(&decoy_info, HardFork::V16).is_err()); let mut decoy_info = DecoyInfo { mixable: 0, not_mixable: 0, - min_decoys: minimum_decoys(&HardFork::V8) - 1, - max_decoys: minimum_decoys(&HardFork::V8) + 1, + min_decoys: minimum_decoys(HardFork::V8) - 1, + max_decoys: minimum_decoys(HardFork::V8) + 1, }; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_err()); decoy_info.not_mixable = 1; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_ok()); decoy_info.mixable = 2; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_err()); let mut decoy_info = DecoyInfo { mixable: 0, not_mixable: 0, - min_decoys: minimum_decoys(&HardFork::V12), - max_decoys: minimum_decoys(&HardFork::V12) + 1, + min_decoys: minimum_decoys(HardFork::V12), + max_decoys: minimum_decoys(HardFork::V12) + 1, }; - assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V12).is_err()); decoy_info.max_decoys = decoy_info.min_decoys; - assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_ok()); + assert!(check_decoy_info(&decoy_info, HardFork::V12).is_ok()); } #[test] fn test_torsion_ki() { - for &key_image in EIGHT_TORSION[1..].iter() { + for &key_image in &EIGHT_TORSION[1..] { assert!(check_key_images(&Input::ToKey { key_image, amount: None, key_offsets: vec![], }) - .is_err()) + .is_err()); } } @@ -109,7 +109,7 @@ prop_compose! { prop_compose! { /// Returns a valid torsioned point. fn random_torsioned_point()(point in random_point(), torsion in 1..8_usize ) -> EdwardsPoint { - point + curve25519_dalek::constants::EIGHT_TORSION[torsion] + point + EIGHT_TORSION[torsion] } } @@ -175,7 +175,7 @@ prop_compose! { /// Returns a [`Timelock`] that is unlocked given a height and time. fn unlocked_timelock(height: u64, time_for_time_lock: u64)( ty in 0..3, - lock_height in 0..(height+1), + lock_height in 0..=height, time_for_time_lock in 0..(time_for_time_lock+121), ) -> Timelock { match ty { @@ -203,33 +203,33 @@ proptest! { hf_no_view_tags in hf_in_range(1..14), hf_view_tags in hf_in_range(16..17), ) { - prop_assert!(check_output_types(&view_tag_outs, &hf_view_tags).is_ok()); - prop_assert!(check_output_types(&view_tag_outs, &hf_no_view_tags).is_err()); + prop_assert!(check_output_types(&view_tag_outs, hf_view_tags).is_ok()); + prop_assert!(check_output_types(&view_tag_outs, hf_no_view_tags).is_err()); - prop_assert!(check_output_types(&non_view_tag_outs, &hf_no_view_tags).is_ok()); - prop_assert!(check_output_types(&non_view_tag_outs, &hf_view_tags).is_err()); + prop_assert!(check_output_types(&non_view_tag_outs, hf_no_view_tags).is_ok()); + prop_assert!(check_output_types(&non_view_tag_outs, hf_view_tags).is_err()); - prop_assert!(check_output_types(&non_view_tag_outs, &HardFork::V15).is_ok()); - prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_ok()); + prop_assert!(check_output_types(&non_view_tag_outs, HardFork::V15).is_ok()); + prop_assert!(check_output_types(&view_tag_outs, HardFork::V15).is_ok()); view_tag_outs.append(&mut non_view_tag_outs); - prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_err()); + prop_assert!(check_output_types(&view_tag_outs, HardFork::V15).is_err()); } #[test] fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize) { - prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_ok()); + prop_assert!(check_number_of_outputs(valid_numb_outs, HardFork::V16, TxVersion::RingCT, true).is_ok()); } #[test] fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX) { - prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_err()); + prop_assert!(check_number_of_outputs(numb_outs, HardFork::V16, TxVersion::RingCT, true).is_err()); } #[test] fn test_check_output_amount_v2(amt in 1..u64::MAX) { prop_assert!(check_output_amount_v2(amt).is_err()); - prop_assert!(check_output_amount_v2(0).is_ok()) + prop_assert!(check_output_amount_v2(0).is_ok()); } #[test] @@ -241,9 +241,9 @@ proptest! { #[test] fn test_timestamp_time_lock(timestamp in 500_000_001..u64::MAX) { - prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, &HardFork::V16)); - prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, &HardFork::V16)); - prop_assert!(check_timestamp_time_lock(timestamp, timestamp, &HardFork::V16)); + prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, HardFork::V16)); + prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, HardFork::V16)); + prop_assert!(check_timestamp_time_lock(timestamp, timestamp, HardFork::V16)); } #[test] @@ -251,11 +251,11 @@ proptest! { mut locked_locks in vec(locked_timelock(5_000, 100_000_000), 1..50), mut unlocked_locks in vec(unlocked_timelock(5_000, 100_000_000), 1..50) ) { - assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, &HardFork::V16).is_err()); - assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_ok()); + assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, HardFork::V16).is_err()); + assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, HardFork::V16).is_ok()); unlocked_locks.append(&mut locked_locks); - assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_err()); + assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, HardFork::V16).is_err()); } #[test] diff --git a/consensus/src/batch_verifier.rs b/consensus/src/batch_verifier.rs index 69018ac..101f981 100644 --- a/consensus/src/batch_verifier.rs +++ b/consensus/src/batch_verifier.rs @@ -1,4 +1,4 @@ -use std::{cell::RefCell, ops::DerefMut}; +use std::cell::RefCell; use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier; use rayon::prelude::*; @@ -13,8 +13,8 @@ pub struct MultiThreadedBatchVerifier { impl MultiThreadedBatchVerifier { /// Create a new multithreaded batch verifier, - pub fn new(numb_threads: usize) -> MultiThreadedBatchVerifier { - MultiThreadedBatchVerifier { + pub fn new(numb_threads: usize) -> Self { + Self { internal: ThreadLocal::with_capacity(numb_threads), } } @@ -42,6 +42,6 @@ impl BatchVerifier for &'_ MultiThreadedBatchVerifier { .get_or(|| RefCell::new(InternalBatchVerifier::new())) .borrow_mut(); - stmt(verifier.deref_mut()) + stmt(&mut verifier) } } diff --git a/consensus/src/block.rs b/consensus/src/block.rs index e785a6b..3d0db99 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -72,17 +72,17 @@ impl PreparedBlockExPow { /// This errors if either the `block`'s: /// - Hard-fork values are invalid /// - Miner transaction is missing a miner input - pub fn new(block: Block) -> Result { + pub fn new(block: Block) -> Result { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header) .map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?; let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, - )))? + ))); }; - Ok(PreparedBlockExPow { + Ok(Self { block_blob: block.serialize(), hf_vote, hf_version, @@ -123,20 +123,17 @@ impl PreparedBlock { /// /// The randomX VM must be Some if RX is needed or this will panic. /// The randomX VM must also be initialised with the correct seed. - fn new( - block: Block, - randomx_vm: Option<&R>, - ) -> Result { + fn new(block: Block, randomx_vm: Option<&R>) -> Result { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header) .map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?; let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, - )))? + ))); }; - Ok(PreparedBlock { + Ok(Self { block_blob: block.serialize(), hf_vote, hf_version, @@ -156,17 +153,17 @@ impl PreparedBlock { /// Creates a new [`PreparedBlock`] from a [`PreparedBlockExPow`]. /// - /// This function will give an invalid PoW hash if `randomx_vm` is not initialised + /// This function will give an invalid proof-of-work hash if `randomx_vm` is not initialised /// with the correct seed. /// /// # Panics /// This function will panic if `randomx_vm` is - /// [`None`] even though RandomX is needed. + /// [`None`] even though `RandomX` is needed. fn new_prepped( block: PreparedBlockExPow, randomx_vm: Option<&R>, - ) -> Result { - Ok(PreparedBlock { + ) -> Result { + Ok(Self { block_blob: block.block_blob, hf_vote: block.hf_vote, hf_version: block.hf_version, @@ -218,7 +215,6 @@ pub enum VerifyBlockRequest { } /// A response from a verify block request. -#[allow(clippy::large_enum_variant)] // The largest variant is most common ([`MainChain`]) pub enum VerifyBlockResponse { /// This block is valid. MainChain(VerifiedBlockInformation), @@ -254,12 +250,8 @@ where D::Future: Send + 'static, { /// Creates a new block verifier. - pub(crate) fn new( - context_svc: C, - tx_verifier_svc: TxV, - database: D, - ) -> BlockVerifierService { - BlockVerifierService { + pub(crate) const fn new(context_svc: C, tx_verifier_svc: TxV, database: D) -> Self { + Self { context_svc, tx_verifier_svc, _database: database, diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs index b20b4f2..3a5ea7c 100644 --- a/consensus/src/block/alt_block.rs +++ b/consensus/src/block/alt_block.rs @@ -36,8 +36,8 @@ use crate::{ /// /// Returns [`AltBlockInformation`], which contains the cumulative difficulty of the alt chain. /// -/// This function only checks the block's PoW and its weight. -pub async fn sanity_check_alt_block( +/// This function only checks the block's proof-of-work and its weight. +pub(crate) async fn sanity_check_alt_block( block: Block, txs: HashMap<[u8; 32], TransactionVerificationData>, mut context_svc: C, @@ -66,15 +66,17 @@ where // Check if the block's miner input is formed correctly. let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, - )))? + )) + .into()); }; if *height != alt_context_cache.chain_height { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputsHeightIncorrect, - )))? + )) + .into()); } // prep the alt block. @@ -103,10 +105,10 @@ where if let Some(median_timestamp) = difficulty_cache.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)) { - check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)? + check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)?; }; - let next_difficulty = difficulty_cache.next_difficulty(&prepped_block.hf_version); + let next_difficulty = difficulty_cache.next_difficulty(prepped_block.hf_version); // make sure the block's PoW is valid for this difficulty. check_block_pow(&prepped_block.pow_hash, next_difficulty).map_err(ConsensusError::Block)?; @@ -127,12 +129,12 @@ where // Check the block weight is below the limit. check_block_weight( block_weight, - alt_weight_cache.median_for_block_reward(&prepped_block.hf_version), + alt_weight_cache.median_for_block_reward(prepped_block.hf_version), ) .map_err(ConsensusError::Block)?; let long_term_weight = weight::calculate_block_long_term_weight( - &prepped_block.hf_version, + prepped_block.hf_version, block_weight, alt_weight_cache.median_long_term_weight(), ); @@ -232,9 +234,9 @@ where } }; - Ok(Some( - alt_chain_context.cached_rx_vm.insert(cached_vm).1.clone(), - )) + Ok(Some(Arc::clone( + &alt_chain_context.cached_rx_vm.insert(cached_vm).1, + ))) } /// Returns the [`DifficultyCache`] for the alt chain. diff --git a/consensus/src/block/batch_prepare.rs b/consensus/src/block/batch_prepare.rs index d32cd76..9c77848 100644 --- a/consensus/src/block/batch_prepare.rs +++ b/consensus/src/block/batch_prepare.rs @@ -68,16 +68,17 @@ where // Make sure no blocks in the batch have a higher hard fork than the last block. if block_0.hf_version > top_hf_in_batch { - Err(ConsensusError::Block(BlockError::HardForkError( + return Err(ConsensusError::Block(BlockError::HardForkError( HardForkError::VersionIncorrect, - )))?; + )) + .into()); } if block_0.block_hash != block_1.block.header.previous || block_0.height != block_1.height - 1 { tracing::debug!("Blocks do not follow each other, verification failed."); - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); } // Cache any potential RX VM seeds as we may need them for future blocks in the batch. @@ -85,7 +86,7 @@ where new_rx_vm = Some((block_0.height, block_0.block_hash)); } - timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version)) + timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version)); } // Get the current blockchain context. @@ -117,15 +118,16 @@ where if context.chain_height != blocks[0].height { tracing::debug!("Blocks do not follow main chain, verification failed."); - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputsHeightIncorrect, - )))?; + )) + .into()); } if context.top_hash != blocks[0].block.header.previous { tracing::debug!("Blocks do not follow main chain, verification failed."); - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); } let mut rx_vms = if top_hf_in_batch < HardFork::V12 { @@ -156,7 +158,7 @@ where context_svc .oneshot(BlockChainContextRequest::NewRXVM(( new_vm_seed, - new_vm.clone(), + Arc::clone(&new_vm), ))) .await?; diff --git a/consensus/src/context.rs b/consensus/src/context.rs index 9e71304..5bdb1ce 100644 --- a/consensus/src/context.rs +++ b/consensus/src/context.rs @@ -56,8 +56,8 @@ pub struct ContextConfig { impl ContextConfig { /// Get the config for main-net. - pub fn main_net() -> ContextConfig { - ContextConfig { + pub const fn main_net() -> Self { + Self { hard_fork_cfg: HardForkConfig::main_net(), difficulty_cfg: DifficultyCacheConfig::main_net(), weights_config: BlockWeightsCacheConfig::main_net(), @@ -65,8 +65,8 @@ impl ContextConfig { } /// Get the config for stage-net. - pub fn stage_net() -> ContextConfig { - ContextConfig { + pub const fn stage_net() -> Self { + Self { hard_fork_cfg: HardForkConfig::stage_net(), // These 2 have the same config as main-net. difficulty_cfg: DifficultyCacheConfig::main_net(), @@ -75,8 +75,8 @@ impl ContextConfig { } /// Get the config for test-net. - pub fn test_net() -> ContextConfig { - ContextConfig { + pub const fn test_net() -> Self { + Self { hard_fork_cfg: HardForkConfig::test_net(), // These 2 have the same config as main-net. difficulty_cfg: DifficultyCacheConfig::main_net(), @@ -155,7 +155,7 @@ impl RawBlockChainContext { /// Returns the next blocks long term weight from its block weight. pub fn next_block_long_term_weight(&self, block_weight: usize) -> usize { weight::calculate_block_long_term_weight( - &self.current_hf, + self.current_hf, block_weight, self.median_long_term_weight, ) @@ -191,7 +191,7 @@ impl BlockChainContext { } /// Returns the blockchain context without checking the validity token. - pub fn unchecked_blockchain_context(&self) -> &RawBlockChainContext { + pub const fn unchecked_blockchain_context(&self) -> &RawBlockChainContext { &self.raw } } @@ -222,7 +222,7 @@ pub struct NewBlockData { pub enum BlockChainContextRequest { /// Get the current blockchain context. GetContext, - /// Gets the current RandomX VM. + /// Gets the current `RandomX` VM. GetCurrentRxVm, /// Get the next difficulties for these blocks. /// @@ -288,7 +288,7 @@ pub enum BlockChainContextRequest { /// This variant is private and is not callable from outside this crate, the block verifier service will /// handle getting the randomX VM of an alt chain. AltChainRxVM { - /// The height the RandomX VM is needed for. + /// The height the `RandomX` VM is needed for. height: usize, /// The chain to look in for the seed. chain: Chain, @@ -312,7 +312,7 @@ pub enum BlockChainContextRequest { pub enum BlockChainContextResponse { /// Blockchain context response. Context(BlockChainContext), - /// A map of seed height to RandomX VMs. + /// A map of seed height to `RandomX` VMs. RxVms(HashMap>), /// A list of difficulties. BatchDifficulties(Vec), diff --git a/consensus/src/context/alt_chains.rs b/consensus/src/context/alt_chains.rs index 937e847..cd945c8 100644 --- a/consensus/src/context/alt_chains.rs +++ b/consensus/src/context/alt_chains.rs @@ -68,29 +68,33 @@ impl AltChainContextCache { } /// A map of top IDs to alt chains. -pub struct AltChainMap { +pub(crate) struct AltChainMap { alt_cache_map: HashMap<[u8; 32], Box>, } impl AltChainMap { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { alt_cache_map: HashMap::new(), } } - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { self.alt_cache_map.clear(); } /// Add an alt chain cache to the map. - pub fn add_alt_cache(&mut self, prev_id: [u8; 32], alt_cache: Box) { + pub(crate) fn add_alt_cache( + &mut self, + prev_id: [u8; 32], + alt_cache: Box, + ) { self.alt_cache_map.insert(prev_id, alt_cache); } /// Attempts to take an [`AltChainContextCache`] from the map, returning [`None`] if no cache is /// present. - pub async fn get_alt_chain_context( + pub(crate) async fn get_alt_chain_context( &mut self, prev_id: [u8; 32], database: D, @@ -109,7 +113,7 @@ impl AltChainMap { let Some((parent_chain, top_height)) = res else { // Couldn't find prev_id - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); }; Ok(Box::new(AltChainContextCache { @@ -125,7 +129,7 @@ impl AltChainMap { } /// Builds a [`DifficultyCache`] for an alt chain. -pub async fn get_alt_chain_difficulty_cache( +pub(crate) async fn get_alt_chain_difficulty_cache( prev_id: [u8; 32], main_chain_difficulty_cache: &DifficultyCache, mut database: D, @@ -142,7 +146,7 @@ pub async fn get_alt_chain_difficulty_cache( let Some((chain, top_height)) = res else { // Can't find prev_id - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); }; Ok(match chain { @@ -172,7 +176,7 @@ pub async fn get_alt_chain_difficulty_cache( } /// Builds a [`BlockWeightsCache`] for an alt chain. -pub async fn get_alt_chain_weight_cache( +pub(crate) async fn get_alt_chain_weight_cache( prev_id: [u8; 32], main_chain_weight_cache: &BlockWeightsCache, mut database: D, @@ -189,7 +193,7 @@ pub async fn get_alt_chain_weight_cache( let Some((chain, top_height)) = res else { // Can't find prev_id - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); }; Ok(match chain { diff --git a/consensus/src/context/difficulty.rs b/consensus/src/context/difficulty.rs index eb67cf5..9316dc5 100644 --- a/consensus/src/context/difficulty.rs +++ b/consensus/src/context/difficulty.rs @@ -43,24 +43,24 @@ impl DifficultyCacheConfig { /// /// # Notes /// You probably do not need this, use [`DifficultyCacheConfig::main_net`] instead. - pub const fn new(window: usize, cut: usize, lag: usize) -> DifficultyCacheConfig { - DifficultyCacheConfig { window, cut, lag } + pub const fn new(window: usize, cut: usize, lag: usize) -> Self { + Self { window, cut, lag } } /// Returns the total amount of blocks we need to track to calculate difficulty - pub fn total_block_count(&self) -> usize { + pub const fn total_block_count(&self) -> usize { self.window + self.lag } /// The amount of blocks we account for after removing the outliers. - pub fn accounted_window_len(&self) -> usize { + pub const fn accounted_window_len(&self) -> usize { self.window - 2 * self.cut } /// Returns the config needed for [`Mainnet`](cuprate_helper::network::Network::Mainnet). This is also the /// config for all other current networks. - pub const fn main_net() -> DifficultyCacheConfig { - DifficultyCacheConfig { + pub const fn main_net() -> Self { + Self { window: DIFFICULTY_WINDOW, cut: DIFFICULTY_CUT, lag: DIFFICULTY_LAG, @@ -112,7 +112,7 @@ impl DifficultyCache { timestamps.len() ); - let diff = DifficultyCache { + let diff = Self { timestamps, cumulative_difficulties, last_accounted_height: chain_height - 1, @@ -203,8 +203,8 @@ impl DifficultyCache { /// Returns the required difficulty for the next block. /// - /// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/difficulty.html#calculating-difficulty - pub fn next_difficulty(&self, hf: &HardFork) -> u128 { + /// See: + pub fn next_difficulty(&self, hf: HardFork) -> u128 { next_difficulty( &self.config, &self.timestamps, @@ -223,7 +223,7 @@ impl DifficultyCache { pub fn next_difficulties( &self, blocks: Vec<(u64, HardFork)>, - current_hf: &HardFork, + current_hf: HardFork, ) -> Vec { let mut timestamps = self.timestamps.clone(); let mut cumulative_difficulties = self.cumulative_difficulties.clone(); @@ -232,8 +232,6 @@ impl DifficultyCache { difficulties.push(self.next_difficulty(current_hf)); - let mut diff_info_popped = Vec::new(); - for (new_timestamp, hf) in blocks { timestamps.push_back(new_timestamp); @@ -241,17 +239,15 @@ impl DifficultyCache { cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap()); if timestamps.len() > self.config.total_block_count() { - diff_info_popped.push(( - timestamps.pop_front().unwrap(), - cumulative_difficulties.pop_front().unwrap(), - )); + timestamps.pop_front().unwrap(); + cumulative_difficulties.pop_front().unwrap(); } difficulties.push(next_difficulty( &self.config, ×tamps, &cumulative_difficulties, - &hf, + hf, )); } @@ -295,12 +291,12 @@ impl DifficultyCache { } } -/// Calculates the next difficulty with the inputted config/timestamps/cumulative_difficulties. +/// Calculates the next difficulty with the inputted `config/timestamps/cumulative_difficulties`. fn next_difficulty( config: &DifficultyCacheConfig, timestamps: &VecDeque, cumulative_difficulties: &VecDeque, - hf: &HardFork, + hf: HardFork, ) -> u128 { if timestamps.len() <= 1 { return 1; diff --git a/consensus/src/context/hardforks.rs b/consensus/src/context/hardforks.rs index 682933d..16ae763 100644 --- a/consensus/src/context/hardforks.rs +++ b/consensus/src/context/hardforks.rs @@ -28,7 +28,7 @@ pub struct HardForkConfig { impl HardForkConfig { /// Config for main-net. - pub const fn main_net() -> HardForkConfig { + pub const fn main_net() -> Self { Self { info: HFsInfo::main_net(), window: DEFAULT_WINDOW_SIZE, @@ -36,7 +36,7 @@ impl HardForkConfig { } /// Config for stage-net. - pub const fn stage_net() -> HardForkConfig { + pub const fn stage_net() -> Self { Self { info: HFsInfo::stage_net(), window: DEFAULT_WINDOW_SIZE, @@ -44,7 +44,7 @@ impl HardForkConfig { } /// Config for test-net. - pub const fn test_net() -> HardForkConfig { + pub const fn test_net() -> Self { Self { info: HFsInfo::test_net(), window: DEFAULT_WINDOW_SIZE, @@ -54,7 +54,7 @@ impl HardForkConfig { /// A struct that keeps track of the current hard-fork and current votes. #[derive(Debug, Clone, Eq, PartialEq)] -pub struct HardForkState { +pub(crate) struct HardForkState { /// The current active hard-fork. pub(crate) current_hardfork: HardFork, @@ -83,7 +83,7 @@ impl HardForkState { get_votes_in_range(database.clone(), block_start..chain_height, config.window).await?; if chain_height > config.window { - debug_assert_eq!(votes.total_votes(), config.window) + debug_assert_eq!(votes.total_votes(), config.window); } let BlockchainResponse::BlockExtendedHeader(ext_header) = database @@ -97,7 +97,7 @@ impl HardForkState { let current_hardfork = ext_header.version; - let mut hfs = HardForkState { + let mut hfs = Self { config, current_hardfork, votes, @@ -122,7 +122,7 @@ impl HardForkState { /// # Invariant /// /// This _must_ only be used on a main-chain cache. - pub async fn pop_blocks_main_chain( + pub(crate) async fn pop_blocks_main_chain( &mut self, numb_blocks: usize, database: D, @@ -159,7 +159,7 @@ impl HardForkState { } /// Add a new block to the cache. - pub fn new_block(&mut self, vote: HardFork, height: usize) { + pub(crate) fn new_block(&mut self, vote: HardFork, height: usize) { // We don't _need_ to take in `height` but it's for safety, so we don't silently loose track // of blocks. assert_eq!(self.last_height + 1, height); @@ -183,7 +183,7 @@ impl HardForkState { /// Checks if the next hard-fork should be activated and activates it if it should. /// - /// https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork + /// fn check_set_new_hf(&mut self) { self.current_hardfork = self.votes.current_fork( &self.current_hardfork, @@ -194,7 +194,7 @@ impl HardForkState { } /// Returns the current hard-fork. - pub fn current_hardfork(&self) -> HardFork { + pub(crate) const fn current_hardfork(&self) -> HardFork { self.current_hardfork } } @@ -218,7 +218,7 @@ async fn get_votes_in_range( panic!("Database sent incorrect response!"); }; - for hf_info in vote_list.into_iter() { + for hf_info in vote_list { votes.add_vote_for_hf(&HardFork::from_vote(hf_info.vote)); } diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index b1ab102..c6375fc 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -1,6 +1,6 @@ -//! RandomX VM Cache +//! `RandomX` VM Cache //! -//! This module keeps track of the RandomX VM to calculate the next blocks PoW, if the block needs a randomX VM and potentially +//! This module keeps track of the `RandomX` VM to calculate the next blocks proof-of-work, if the block needs a randomX VM and potentially //! more VMs around this height. //! use std::{ @@ -34,11 +34,11 @@ const RX_SEEDS_CACHED: usize = 2; /// A multithreaded randomX VM. #[derive(Debug)] pub struct RandomXVm { - /// These RandomX VMs all share the same cache. + /// These `RandomX` VMs all share the same cache. vms: ThreadLocal, - /// The RandomX cache. + /// The `RandomX` cache. cache: RandomXCache, - /// The flags used to start the RandomX VMs. + /// The flags used to start the `RandomX` VMs. flags: RandomXFlag, } @@ -50,7 +50,7 @@ impl RandomXVm { let cache = RandomXCache::new(flags, seed.as_slice())?; - Ok(RandomXVm { + Ok(Self { vms: ThreadLocal::new(), cache, flags, @@ -69,10 +69,10 @@ impl RandomX for RandomXVm { } } -/// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a +/// The randomX VMs cache, keeps the VM needed to calculate the current block's proof-of-work hash (if a VM is needed) and a /// couple more around this VM. #[derive(Clone, Debug)] -pub struct RandomXVmCache { +pub(crate) struct RandomXVmCache { /// The top [`RX_SEEDS_CACHED`] RX seeds. pub(crate) seeds: VecDeque<(usize, [u8; 32])>, /// The VMs for `seeds` (if after hf 12, otherwise this will be empty). @@ -117,7 +117,7 @@ impl RandomXVmCache { HashMap::new() }; - Ok(RandomXVmCache { + Ok(Self { seeds, vms, cached_vm: None, @@ -125,14 +125,14 @@ impl RandomXVmCache { } /// Add a randomX VM to the cache, with the seed it was created with. - pub fn add_vm(&mut self, vm: ([u8; 32], Arc)) { + pub(crate) fn add_vm(&mut self, vm: ([u8; 32], Arc)) { self.cached_vm.replace(vm); } /// Creates a RX VM for an alt chain, looking at the main chain RX VMs to see if we can use one /// of them first. - pub async fn get_alt_vm( - &mut self, + pub(crate) async fn get_alt_vm( + &self, height: usize, chain: Chain, database: D, @@ -152,7 +152,7 @@ impl RandomXVmCache { break; }; - return Ok(vm.clone()); + return Ok(Arc::clone(vm)); } } @@ -161,8 +161,8 @@ impl RandomXVmCache { Ok(alt_vm) } - /// Get the main-chain RandomX VMs. - pub async fn get_vms(&mut self) -> HashMap> { + /// Get the main-chain `RandomX` VMs. + pub(crate) async fn get_vms(&mut self) -> HashMap> { match self.seeds.len().checked_sub(self.vms.len()) { // No difference in the amount of seeds to VMs. Some(0) => (), @@ -206,23 +206,23 @@ impl RandomXVmCache { }) .collect() }) - .await + .await; } } self.vms.clone() } - /// Removes all the RandomX VMs above the `new_height`. - pub fn pop_blocks_main_chain(&mut self, new_height: usize) { + /// Removes all the `RandomX` VMs above the `new_height`. + pub(crate) fn pop_blocks_main_chain(&mut self, new_height: usize) { self.seeds.retain(|(height, _)| *height < new_height); self.vms.retain(|height, _| *height < new_height); } /// Add a new block to the VM cache. /// - /// hash is the block hash not the blocks PoW hash. - pub fn new_block(&mut self, height: usize, hash: &[u8; 32]) { + /// hash is the block hash not the blocks proof-of-work hash. + pub(crate) fn new_block(&mut self, height: usize, hash: &[u8; 32]) { if is_randomx_seed_height(height) { tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",); @@ -235,7 +235,7 @@ impl RandomXVmCache { self.seeds .iter() .any(|(cached_height, _)| height == cached_height) - }) + }); } } } @@ -258,7 +258,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: usize, mut amount: usize // We don't include the lag as we only want seeds not the specific seed for this height. let seed_height = (last_height - 1) & !(RX_SEEDHASH_EPOCH_BLOCKS - 1); seeds.push(seed_height); - last_height = seed_height + last_height = seed_height; } seeds diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index bc54285..82b466c 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -36,7 +36,7 @@ pub(super) struct ContextTaskRequest { } /// The Context task that keeps the blockchain context and handles requests. -pub struct ContextTask { +pub(crate) struct ContextTask { /// A token used to invalidate previous contexts when a new /// block is added to the chain. current_validity_token: ValidityToken, @@ -65,7 +65,7 @@ pub struct ContextTask { impl ContextTask { /// Initialize the [`ContextTask`], this will need to pull a lot of data from the database so may take a /// while to complete. - pub async fn init_context( + pub(crate) async fn init_context( cfg: ContextConfig, mut database: D, ) -> Result { @@ -131,7 +131,7 @@ impl ContextTask { rx_vms::RandomXVmCache::init_from_chain_height(chain_height, ¤t_hf, db).await }); - let context_svc = ContextTask { + let context_svc = Self { current_validity_token: ValidityToken::new(), difficulty_cache: difficulty_cache_handle.await.unwrap()?, weight_cache: weight_cache_handle.await.unwrap()?, @@ -148,7 +148,7 @@ impl ContextTask { } /// Handles a [`BlockChainContextRequest`] and returns a [`BlockChainContextResponse`]. - pub async fn handle_req( + pub(crate) async fn handle_req( &mut self, req: BlockChainContextRequest, ) -> Result { @@ -164,17 +164,17 @@ impl ContextTask { context_to_verify_block: ContextToVerifyBlock { median_weight_for_block_reward: self .weight_cache - .median_for_block_reward(¤t_hf), + .median_for_block_reward(current_hf), effective_median_weight: self .weight_cache - .effective_median_block_weight(¤t_hf), + .effective_median_block_weight(current_hf), top_hash: self.top_block_hash, median_block_timestamp: self .difficulty_cache .median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)), chain_height: self.chain_height, current_hf, - next_difficulty: self.difficulty_cache.next_difficulty(¤t_hf), + next_difficulty: self.difficulty_cache.next_difficulty(current_hf), already_generated_coins: self.already_generated_coins, }, cumulative_difficulty: self.difficulty_cache.cumulative_difficulty(), @@ -191,7 +191,7 @@ impl ContextTask { let next_diffs = self .difficulty_cache - .next_difficulties(blocks, &self.hardfork_state.current_hardfork()); + .next_difficulties(blocks, self.hardfork_state.current_hardfork()); BlockChainContextResponse::BatchDifficulties(next_diffs) } BlockChainContextRequest::NewRXVM(vm) => { @@ -330,10 +330,10 @@ impl ContextTask { /// Run the [`ContextTask`], the task will listen for requests on the passed in channel. When the channel closes the /// task will finish. - pub async fn run(mut self, mut rx: mpsc::Receiver) { + pub(crate) async fn run(mut self, mut rx: mpsc::Receiver) { while let Some(req) = rx.recv().await { let res = self.handle_req(req.req).instrument(req.span).await; - let _ = req.tx.send(res); + drop(req.tx.send(res)); } tracing::info!("Shutting down blockchain context task."); diff --git a/consensus/src/context/tokens.rs b/consensus/src/context/tokens.rs index 882d3b5..d222303 100644 --- a/consensus/src/context/tokens.rs +++ b/consensus/src/context/tokens.rs @@ -15,8 +15,8 @@ pub struct ValidityToken { impl ValidityToken { /// Creates a new [`ValidityToken`] - pub fn new() -> ValidityToken { - ValidityToken { + pub fn new() -> Self { + Self { token: CancellationToken::new(), } } @@ -28,6 +28,6 @@ impl ValidityToken { /// Sets the data to invalid. pub fn set_data_invalid(self) { - self.token.cancel() + self.token.cancel(); } } diff --git a/consensus/src/context/weight.rs b/consensus/src/context/weight.rs index 4c89139..e95ae60 100644 --- a/consensus/src/context/weight.rs +++ b/consensus/src/context/weight.rs @@ -38,16 +38,16 @@ pub struct BlockWeightsCacheConfig { impl BlockWeightsCacheConfig { /// Creates a new [`BlockWeightsCacheConfig`] - pub const fn new(short_term_window: usize, long_term_window: usize) -> BlockWeightsCacheConfig { - BlockWeightsCacheConfig { + pub const fn new(short_term_window: usize, long_term_window: usize) -> Self { + Self { short_term_window, long_term_window, } } /// Returns the [`BlockWeightsCacheConfig`] for all networks (They are all the same as mainnet). - pub fn main_net() -> BlockWeightsCacheConfig { - BlockWeightsCacheConfig { + pub const fn main_net() -> Self { + Self { short_term_window: SHORT_TERM_WINDOW, long_term_window: LONG_TERM_WINDOW, } @@ -99,7 +99,7 @@ impl BlockWeightsCache { tracing::info!("Initialized block weight cache, chain-height: {:?}, long term weights length: {:?}, short term weights length: {:?}", chain_height, long_term_weights.len(), short_term_block_weights.len()); - Ok(BlockWeightsCache { + Ok(Self { short_term_block_weights: rayon_spawn_async(move || { RollingMedian::from_vec(short_term_block_weights, config.short_term_window) }) @@ -178,7 +178,7 @@ impl BlockWeightsCache { /// Add a new block to the cache. /// - /// The block_height **MUST** be one more than the last height the cache has + /// The `block_height` **MUST** be one more than the last height the cache has /// seen. pub fn new_block(&mut self, block_height: usize, block_weight: usize, long_term_weight: usize) { assert_eq!(self.tip_height + 1, block_height); @@ -208,8 +208,8 @@ impl BlockWeightsCache { /// Returns the effective median weight, used for block reward calculations and to calculate /// the block weight limit. /// - /// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#calculating-effective-median-weight - pub fn effective_median_block_weight(&self, hf: &HardFork) -> usize { + /// See: + pub fn effective_median_block_weight(&self, hf: HardFork) -> usize { calculate_effective_median_block_weight( hf, self.median_short_term_weight(), @@ -219,9 +219,9 @@ impl BlockWeightsCache { /// Returns the median weight used to calculate block reward punishment. /// - /// https://cuprate.github.io/monero-book/consensus_rules/blocks/reward.html#calculating-block-reward - pub fn median_for_block_reward(&self, hf: &HardFork) -> usize { - if hf < &HardFork::V12 { + /// + pub fn median_for_block_reward(&self, hf: HardFork) -> usize { + if hf < HardFork::V12 { self.median_short_term_weight() } else { self.effective_median_block_weight(hf) @@ -232,17 +232,17 @@ impl BlockWeightsCache { /// Calculates the effective median with the long term and short term median. fn calculate_effective_median_block_weight( - hf: &HardFork, + hf: HardFork, median_short_term_weight: usize, median_long_term_weight: usize, ) -> usize { - if hf < &HardFork::V10 { + if hf < HardFork::V10 { return median_short_term_weight.max(penalty_free_zone(hf)); } let long_term_median = median_long_term_weight.max(PENALTY_FREE_ZONE_5); let short_term_median = median_short_term_weight; - let effective_median = if hf >= &HardFork::V10 && hf < &HardFork::V15 { + let effective_median = if hf >= HardFork::V10 && hf < HardFork::V15 { min( max(PENALTY_FREE_ZONE_5, short_term_median), 50 * long_term_median, @@ -258,19 +258,19 @@ fn calculate_effective_median_block_weight( } /// Calculates a blocks long term weight. -pub fn calculate_block_long_term_weight( - hf: &HardFork, +pub(crate) fn calculate_block_long_term_weight( + hf: HardFork, block_weight: usize, long_term_median: usize, ) -> usize { - if hf < &HardFork::V10 { + if hf < HardFork::V10 { return block_weight; } let long_term_median = max(penalty_free_zone(hf), long_term_median); let (short_term_constraint, adjusted_block_weight) = - if hf >= &HardFork::V10 && hf < &HardFork::V15 { + if hf >= HardFork::V10 && hf < HardFork::V15 { let stc = long_term_median + long_term_median * 2 / 5; (stc, block_weight) } else { diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 004285d..e104cec 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -10,6 +10,16 @@ //! implement a database you need to have a service which accepts [`BlockchainReadRequest`] and responds //! with [`BlockchainResponse`]. //! + +cfg_if::cfg_if! { + // Used in external `tests/`. + if #[cfg(test)] { + use cuprate_test_utils as _; + use curve25519_dalek as _; + use hex_literal as _; + } +} + use cuprate_consensus_rules::ConsensusError; mod batch_verifier; @@ -34,6 +44,7 @@ pub use cuprate_types::{ /// An Error returned from one of the consensus services. #[derive(Debug, thiserror::Error)] +#[expect(variant_size_differences)] pub enum ExtendedConsensusError { /// A consensus error. #[error("{0}")] @@ -53,7 +64,8 @@ pub enum ExtendedConsensusError { } /// Initialize the 2 verifier [`tower::Service`]s (block and transaction). -pub async fn initialize_verifier( +#[expect(clippy::type_complexity)] +pub fn initialize_verifier( database: D, ctx_svc: Ctx, ) -> Result< @@ -112,7 +124,7 @@ pub mod __private { Response = BlockchainResponse, Error = tower::BoxError, >, - > crate::Database for T + > Database for T where T::Future: Future> + Send + 'static, { diff --git a/consensus/src/tests.rs b/consensus/src/tests.rs index 13598be..0efef82 100644 --- a/consensus/src/tests.rs +++ b/consensus/src/tests.rs @@ -1,2 +1,2 @@ mod context; -pub mod mock_db; +pub(crate) mod mock_db; diff --git a/consensus/src/tests/context/data.rs b/consensus/src/tests/context/data.rs index baa591c..28f61a4 100644 --- a/consensus/src/tests/context/data.rs +++ b/consensus/src/tests/context/data.rs @@ -1,11 +1,12 @@ use cuprate_consensus_rules::HardFork; -pub static HFS_2688888_2689608: [(HardFork, HardFork); 720] = +pub(crate) static HFS_2688888_2689608: [(HardFork, HardFork); 720] = include!("./data/hfs_2688888_2689608"); -pub static HFS_2678808_2688888: [(HardFork, HardFork); 10080] = +pub(crate) static HFS_2678808_2688888: [(HardFork, HardFork); 10080] = include!("./data/hfs_2678808_2688888"); -pub static BW_2850000_3050000: [(usize, usize); 200_000] = include!("./data/bw_2850000_3050000"); +pub(crate) static BW_2850000_3050000: [(usize, usize); 200_000] = + include!("./data/bw_2850000_3050000"); -pub static DIF_3000000_3002000: [(u128, u64); 2000] = include!("./data/dif_3000000_3002000"); +pub(crate) static DIF_3000000_3002000: [(u128, u64); 2000] = include!("./data/dif_3000000_3002000"); diff --git a/consensus/src/tests/context/difficulty.rs b/consensus/src/tests/context/difficulty.rs index a79ae9b..d5027f5 100644 --- a/consensus/src/tests/context/difficulty.rs +++ b/consensus/src/tests/context/difficulty.rs @@ -17,7 +17,7 @@ const TEST_LAG: usize = 2; const TEST_TOTAL_ACCOUNTED_BLOCKS: usize = TEST_WINDOW + TEST_LAG; -pub const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig = +pub(crate) const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig = DifficultyCacheConfig::new(TEST_WINDOW, TEST_CUT, TEST_LAG); #[tokio::test] @@ -35,7 +35,7 @@ async fn first_3_blocks_fixed_difficulty() -> Result<(), tower::BoxError> { .await?; for height in 1..3 { - assert_eq!(difficulty_cache.next_difficulty(&HardFork::V1), 1); + assert_eq!(difficulty_cache.next_difficulty(HardFork::V1), 1); difficulty_cache.new_block(height, 0, u128::MAX); } Ok(()) @@ -66,7 +66,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { for (cum_dif, timestamp) in DIF_3000000_3002000.iter().take(cfg.total_block_count()) { db_builder.add_block( DummyBlockExtendedHeader::default().with_difficulty_info(*timestamp, *cum_dif), - ) + ); } let mut diff_cache = DifficultyCache::init_from_chain_height( @@ -84,7 +84,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { { let diff = diff_info[1].0 - diff_info[0].0; - assert_eq!(diff_cache.next_difficulty(&HardFork::V16), diff); + assert_eq!(diff_cache.next_difficulty(HardFork::V16), diff); diff_cache.new_block(3_000_720 + i, diff_info[1].1, diff_info[1].0); } @@ -139,22 +139,22 @@ proptest! { no_lag_cache.cumulative_difficulties.pop_front(); } // get the difficulty - let next_diff_no_lag = no_lag_cache.next_difficulty(&hf); + let next_diff_no_lag = no_lag_cache.next_difficulty(hf); for _ in 0..TEST_LAG { // add new blocks to the lagged cache diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } // they both should now be the same - prop_assert_eq!(diff_cache.next_difficulty(&hf), next_diff_no_lag) + prop_assert_eq!(diff_cache.next_difficulty(hf), next_diff_no_lag); } #[test] fn next_difficulty_consistent(diff_cache in arb_difficulty_cache(TEST_TOTAL_ACCOUNTED_BLOCKS), hf in any::()) { - let first_call = diff_cache.next_difficulty(&hf); - prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf)); - prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf)); - prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf)); + let first_call = diff_cache.next_difficulty(hf); + prop_assert_eq!(first_call, diff_cache.next_difficulty(hf)); + prop_assert_eq!(first_call, diff_cache.next_difficulty(hf)); + prop_assert_eq!(first_call, diff_cache.next_difficulty(hf)); } #[test] @@ -178,7 +178,7 @@ proptest! { #[test] fn window_size_kept_constant(mut diff_cache in arb_difficulty_cache(TEST_TOTAL_ACCOUNTED_BLOCKS), new_blocks in any::>()) { - for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + for (timestamp, cumulative_difficulty) in new_blocks { diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty); prop_assert_eq!(diff_cache.timestamps.len(), TEST_TOTAL_ACCOUNTED_BLOCKS); prop_assert_eq!(diff_cache.cumulative_difficulties.len(), TEST_TOTAL_ACCOUNTED_BLOCKS); @@ -193,7 +193,7 @@ proptest! { ) { let cache = diff_cache.clone(); - diff_cache.next_difficulties(timestamps.into_iter().zip([hf].into_iter().cycle()).collect(), &hf); + diff_cache.next_difficulties(timestamps.into_iter().zip(std::iter::once(hf).cycle()).collect(), hf); prop_assert_eq!(diff_cache, cache); } @@ -204,12 +204,12 @@ proptest! { timestamps in any_with::>(size_range(0..1000).lift()), hf in any::(), ) { - let timestamps: Vec<_> = timestamps.into_iter().zip([hf].into_iter().cycle()).collect(); + let timestamps: Vec<_> = timestamps.into_iter().zip(std::iter::once(hf).cycle()).collect(); - let diffs = diff_cache.next_difficulties(timestamps.clone(), &hf); + let diffs = diff_cache.next_difficulties(timestamps.clone(), hf); for (timestamp, diff) in timestamps.into_iter().zip(diffs.into_iter()) { - prop_assert_eq!(diff_cache.next_difficulty(×tamp.1), diff); + prop_assert_eq!(diff_cache.next_difficulty(timestamp.1), diff); diff_cache.new_block(diff_cache.last_accounted_height +1, timestamp.0, diff + diff_cache.cumulative_difficulty()); } @@ -226,7 +226,7 @@ proptest! { let blocks_to_pop = new_blocks.len(); let mut new_cache = old_cache.clone(); - for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + for (timestamp, cumulative_difficulty) in new_blocks { database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty)); new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } @@ -250,7 +250,7 @@ proptest! { let blocks_to_pop = new_blocks.len(); let mut new_cache = old_cache.clone(); - for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + for (timestamp, cumulative_difficulty) in new_blocks { database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty)); new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } diff --git a/consensus/src/tests/context/hardforks.rs b/consensus/src/tests/context/hardforks.rs index ffdff59..17bd47f 100644 --- a/consensus/src/tests/context/hardforks.rs +++ b/consensus/src/tests/context/hardforks.rs @@ -31,7 +31,7 @@ const TEST_HFS: [HFInfo; NUMB_OF_HARD_FORKS] = [ HFInfo::new(150, 0), ]; -pub const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig { +pub(crate) const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig { window: TEST_WINDOW_SIZE, info: HFsInfo::new(TEST_HFS), }; diff --git a/consensus/src/tests/context/rx_vms.rs b/consensus/src/tests/context/rx_vms.rs index 5c198cf..b1eba8e 100644 --- a/consensus/src/tests/context/rx_vms.rs +++ b/consensus/src/tests/context/rx_vms.rs @@ -39,6 +39,7 @@ fn rx_heights_consistent() { } #[tokio::test] +#[expect(unused_qualifications, reason = "false positive in tokio macro")] async fn rx_vm_created_on_hf_12() { let db = DummyDatabaseBuilder::default().finish(Some(10)); diff --git a/consensus/src/tests/context/weight.rs b/consensus/src/tests/context/weight.rs index 6706d97..b23f8f8 100644 --- a/consensus/src/tests/context/weight.rs +++ b/consensus/src/tests/context/weight.rs @@ -8,7 +8,8 @@ use crate::{ }; use cuprate_types::Chain; -pub const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = BlockWeightsCacheConfig::new(100, 5000); +pub(crate) const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = + BlockWeightsCacheConfig::new(100, 5000); #[tokio::test] async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> { @@ -157,7 +158,7 @@ async fn calc_bw_ltw_2850000_3050000() { for (i, (weight, ltw)) in BW_2850000_3050000.iter().skip(100_000).enumerate() { let calc_ltw = calculate_block_long_term_weight( - &HardFork::V16, + HardFork::V16, *weight, weight_cache.median_long_term_weight(), ); diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index a260cf0..5ca53d8 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -1,3 +1,5 @@ +#![expect(non_local_definitions, reason = "proptest macro")] + use std::{ future::Future, pin::Pin, @@ -60,7 +62,7 @@ pub struct DummyBlockExtendedHeader { impl From for ExtendedBlockHeader { fn from(value: DummyBlockExtendedHeader) -> Self { - ExtendedBlockHeader { + Self { version: value.version.unwrap_or(HardFork::V1), vote: value.vote.unwrap_or(HardFork::V1).as_u8(), timestamp: value.timestamp.unwrap_or_default(), @@ -72,31 +74,23 @@ impl From for ExtendedBlockHeader { } impl DummyBlockExtendedHeader { - pub fn with_weight_into( - mut self, - weight: usize, - long_term_weight: usize, - ) -> DummyBlockExtendedHeader { + pub const fn with_weight_into(mut self, weight: usize, long_term_weight: usize) -> Self { self.block_weight = Some(weight); self.long_term_weight = Some(long_term_weight); self } - pub fn with_hard_fork_info( - mut self, - version: HardFork, - vote: HardFork, - ) -> DummyBlockExtendedHeader { + pub const fn with_hard_fork_info(mut self, version: HardFork, vote: HardFork) -> Self { self.vote = Some(vote); self.version = Some(version); self } - pub fn with_difficulty_info( + pub const fn with_difficulty_info( mut self, timestamp: u64, cumulative_difficulty: u128, - ) -> DummyBlockExtendedHeader { + ) -> Self { self.timestamp = Some(timestamp); self.cumulative_difficulty = Some(cumulative_difficulty); self @@ -104,16 +98,16 @@ impl DummyBlockExtendedHeader { } #[derive(Debug, Default)] -pub struct DummyDatabaseBuilder { +pub(crate) struct DummyDatabaseBuilder { blocks: Vec, } impl DummyDatabaseBuilder { - pub fn add_block(&mut self, block: DummyBlockExtendedHeader) { + pub(crate) fn add_block(&mut self, block: DummyBlockExtendedHeader) { self.blocks.push(block); } - pub fn finish(self, dummy_height: Option) -> DummyDatabase { + pub(crate) fn finish(self, dummy_height: Option) -> DummyDatabase { DummyDatabase { blocks: Arc::new(self.blocks.into()), dummy_height, @@ -122,14 +116,15 @@ impl DummyDatabaseBuilder { } #[derive(Clone, Debug)] -pub struct DummyDatabase { +pub(crate) struct DummyDatabase { blocks: Arc>>, dummy_height: Option, } impl DummyDatabase { - pub fn add_block(&mut self, block: DummyBlockExtendedHeader) { - self.blocks.write().unwrap().push(block) + #[expect(clippy::needless_pass_by_ref_mut)] + pub(crate) fn add_block(&mut self, block: DummyBlockExtendedHeader) { + self.blocks.write().unwrap().push(block); } } @@ -144,7 +139,7 @@ impl Service for DummyDatabase { } fn call(&mut self, req: BlockchainReadRequest) -> Self::Future { - let blocks = self.blocks.clone(); + let blocks = Arc::clone(&self.blocks); let dummy_height = self.dummy_height; async move { diff --git a/consensus/src/transactions.rs b/consensus/src/transactions.rs index 09f6884..f29c852 100644 --- a/consensus/src/transactions.rs +++ b/consensus/src/transactions.rs @@ -5,7 +5,6 @@ use std::{ collections::HashSet, future::Future, - ops::Deref, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -102,8 +101,8 @@ where D::Future: Send + 'static, { /// Creates a new [`TxVerifierService`]. - pub fn new(database: D) -> TxVerifierService { - TxVerifierService { database } + pub const fn new(database: D) -> Self { + Self { database } } } @@ -244,7 +243,7 @@ where if kis_spent { tracing::debug!("One or more key images in batch already spent."); - Err(ConsensusError::Transaction(TransactionError::KeyImageSpent))?; + return Err(ConsensusError::Transaction(TransactionError::KeyImageSpent).into()); } let mut verified_at_block_hashes = txs @@ -281,8 +280,8 @@ where let (txs_needing_full_verification, txs_needing_partial_verification) = transactions_needing_verification( txs, - verified_at_block_hashes, - &hf, + &verified_at_block_hashes, + hf, current_chain_height, time_for_time_lock, )?; @@ -302,11 +301,14 @@ where Ok(VerifyTxResponse::Ok) } -#[allow(clippy::type_complexity)] // I don't think the return is too complex +#[expect( + clippy::type_complexity, + reason = "I don't think the return is too complex" +)] fn transactions_needing_verification( txs: &[Arc], - hashes_in_main_chain: HashSet<[u8; 32]>, - current_hf: &HardFork, + hashes_in_main_chain: &HashSet<[u8; 32]>, + current_hf: HardFork, current_chain_height: usize, time_for_time_lock: u64, ) -> Result< @@ -321,27 +323,28 @@ fn transactions_needing_verification( // txs needing partial _contextual_ validation, not semantic. let mut partial_validation_transactions = Vec::new(); - for tx in txs.iter() { + for tx in txs { let guard = tx.cached_verification_state.lock().unwrap(); - match guard.deref() { + match &*guard { CachedVerificationState::NotVerified => { drop(guard); full_validation_transactions - .push((tx.clone(), VerificationNeeded::SemanticAndContextual)); + .push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual)); continue; } CachedVerificationState::ValidAtHashAndHF { block_hash, hf } => { - if current_hf != hf { + if current_hf != *hf { drop(guard); full_validation_transactions - .push((tx.clone(), VerificationNeeded::SemanticAndContextual)); + .push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual)); continue; } if !hashes_in_main_chain.contains(block_hash) { drop(guard); - full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual)); + full_validation_transactions + .push((Arc::clone(tx), VerificationNeeded::Contextual)); continue; } } @@ -350,21 +353,22 @@ fn transactions_needing_verification( hf, time_lock, } => { - if current_hf != hf { + if current_hf != *hf { drop(guard); full_validation_transactions - .push((tx.clone(), VerificationNeeded::SemanticAndContextual)); + .push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual)); continue; } if !hashes_in_main_chain.contains(block_hash) { drop(guard); - full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual)); + full_validation_transactions + .push((Arc::clone(tx), VerificationNeeded::Contextual)); continue; } // If the time lock is still locked then the transaction is invalid. - if !output_unlocked(time_lock, current_chain_height, time_for_time_lock, hf) { + if !output_unlocked(time_lock, current_chain_height, time_for_time_lock, *hf) { return Err(ConsensusError::Transaction( TransactionError::OneOrMoreRingMembersLocked, )); @@ -374,7 +378,7 @@ fn transactions_needing_verification( if tx.version == TxVersion::RingSignatures { drop(guard); - partial_validation_transactions.push(tx.clone()); + partial_validation_transactions.push(Arc::clone(tx)); continue; } } @@ -400,7 +404,7 @@ where batch_get_decoy_info(&txs, hf, database) .await? - .try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, &hf)?)))?; + .try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, hf)?)))?; Ok(()) } @@ -417,7 +421,7 @@ where D: Database + Clone + Sync + Send + 'static, { let txs_ring_member_info = - batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?; + batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), hf, database).await?; rayon_spawn_async(move || { let batch_verifier = MultiThreadedBatchVerifier::new(rayon::current_num_threads()); @@ -432,7 +436,7 @@ where tx.tx_blob.len(), tx.tx_weight, &tx.tx_hash, - &hf, + hf, &batch_verifier, )?; // make sure we calculated the right fee. @@ -445,7 +449,7 @@ where ring, current_chain_height, current_time_lock_timestamp, - &hf, + hf, )?; Ok::<_, ConsensusError>(()) diff --git a/consensus/src/transactions/contextual_data.rs b/consensus/src/transactions/contextual_data.rs index 82f9976..66c53b3 100644 --- a/consensus/src/transactions/contextual_data.rs +++ b/consensus/src/transactions/contextual_data.rs @@ -57,7 +57,7 @@ fn get_ring_members_for_inputs( }) .collect::>()?) } - _ => Err(TransactionError::IncorrectInputType), + Input::Gen(_) => Err(TransactionError::IncorrectInputType), }) .collect::>() } @@ -143,7 +143,7 @@ fn new_rings( /// them. pub async fn batch_get_ring_member_info( txs_verification_data: impl Iterator> + Clone, - hf: &HardFork, + hf: HardFork, mut database: D, ) -> Result, ExtendedConsensusError> { let mut output_ids = HashMap::new(); @@ -183,14 +183,14 @@ pub async fn batch_get_ring_member_info( ) .map_err(ConsensusError::Transaction)?; - let decoy_info = if hf != &HardFork::V1 { + let decoy_info = if hf == HardFork::V1 { + None + } else { // this data is only needed after hard-fork 1. Some( DecoyInfo::new(&tx_v_data.tx.prefix().inputs, numb_outputs, hf) .map_err(ConsensusError::Transaction)?, ) - } else { - None }; new_ring_member_info(ring_members_for_tx, decoy_info, tx_v_data.version) @@ -224,7 +224,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( .flat_map(|tx_info| { tx_info.tx.prefix().inputs.iter().map(|input| match input { Input::ToKey { amount, .. } => amount.unwrap_or(0), - _ => 0, + Input::Gen(_) => 0, }) }) .collect::>(); @@ -249,7 +249,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( DecoyInfo::new( &tx_v_data.tx.prefix().inputs, |amt| outputs_with_amount.get(&amt).copied().unwrap_or(0), - &hf, + hf, ) .map_err(ConsensusError::Transaction) })) diff --git a/consensus/src/transactions/free.rs b/consensus/src/transactions/free.rs index 67b675a..3613f29 100644 --- a/consensus/src/transactions/free.rs +++ b/consensus/src/transactions/free.rs @@ -39,7 +39,7 @@ pub fn new_tx_verification_data( /// Calculates the weight of a [`Transaction`]. /// /// This is more efficient that [`Transaction::weight`] if you already have the transaction blob. -pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { +pub(crate) fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { // the tx weight is only different from the blobs length for bp(+) txs. match &tx { @@ -64,7 +64,7 @@ pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { } /// Calculates the fee of the [`Transaction`]. -pub fn tx_fee(tx: &Transaction) -> Result { +pub(crate) fn tx_fee(tx: &Transaction) -> Result { let mut fee = 0_u64; match &tx { diff --git a/consensus/tests/verify_correct_txs.rs b/consensus/tests/verify_correct_txs.rs index 7afb370..4d6c179 100644 --- a/consensus/tests/verify_correct_txs.rs +++ b/consensus/tests/verify_correct_txs.rs @@ -1,3 +1,6 @@ +#![expect(unused_crate_dependencies, reason = "external test module")] +#![expect(clippy::allow_attributes, reason = "usage inside macro")] + use std::{ collections::{BTreeMap, HashMap}, future::ready, @@ -29,7 +32,7 @@ fn dummy_database(outputs: BTreeMap) -> impl Database + Clon BlockchainResponse::NumberOutputsWithAmount(HashMap::new()) } BlockchainReadRequest::Outputs(outs) => { - let idxs = outs.get(&0).unwrap(); + let idxs = &outs[&0]; let mut ret = HashMap::new(); diff --git a/helper/Cargo.toml b/helper/Cargo.toml index 997aa27..614bdb2 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -9,8 +9,8 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus" [features] -# All features on by default. -default = ["std", "atomic", "asynch", "cast", "fs", "num", "map", "time", "thread", "constants"] +# All features off by default. +default = [] std = [] atomic = ["dep:crossbeam"] asynch = ["dep:futures", "dep:rayon"] @@ -21,6 +21,7 @@ num = [] map = ["cast", "dep:monero-serai"] time = ["dep:chrono", "std"] thread = ["std", "dep:target_os_lib"] +tx = ["dep:monero-serai"] [dependencies] crossbeam = { workspace = true, optional = true } @@ -39,7 +40,8 @@ target_os_lib = { package = "windows", version = ">=0.51", features = ["Win32_Sy target_os_lib = { package = "libc", version = "0.2.158", optional = true } [dev-dependencies] -tokio = { workspace = true, features = ["full"] } +tokio = { workspace = true, features = ["full"] } +curve25519-dalek = { workspace = true } [lints] workspace = true diff --git a/helper/src/atomic.rs b/helper/src/atomic.rs index 4795896..aa66c0c 100644 --- a/helper/src/atomic.rs +++ b/helper/src/atomic.rs @@ -5,9 +5,6 @@ //---------------------------------------------------------------------------------------------------- Use use crossbeam::atomic::AtomicCell; -#[allow(unused_imports)] // docs -use std::sync::atomic::{Ordering, Ordering::Acquire, Ordering::Release}; - //---------------------------------------------------------------------------------------------------- Atomic Float /// Compile-time assertion that our floats are /// lock-free for the target we're building for. @@ -31,9 +28,13 @@ const _: () = { /// This is an alias for /// [`crossbeam::atomic::AtomicCell`](https://docs.rs/crossbeam/latest/crossbeam/atomic/struct.AtomicCell.html). /// -/// Note that there are no [`Ordering`] parameters, -/// atomic loads use [`Acquire`], -/// and atomic stores use [`Release`]. +/// Note that there are no [Ordering] parameters, +/// atomic loads use [Acquire], +/// and atomic stores use [Release]. +/// +/// [Ordering]: std::sync::atomic::Ordering +/// [Acquire]: std::sync::atomic::Ordering::Acquire +/// [Release]: std::sync::atomic::Ordering::Release pub type AtomicF32 = AtomicCell; /// An atomic [`f64`]. @@ -41,9 +42,13 @@ pub type AtomicF32 = AtomicCell; /// This is an alias for /// [`crossbeam::atomic::AtomicCell`](https://docs.rs/crossbeam/latest/crossbeam/atomic/struct.AtomicCell.html). /// -/// Note that there are no [`Ordering`] parameters, -/// atomic loads use [`Acquire`], -/// and atomic stores use [`Release`]. +/// Note that there are no [Ordering] parameters, +/// atomic loads use [Acquire], +/// and atomic stores use [Release]. +/// +/// [Ordering]: std::sync::atomic::Ordering +/// [Acquire]: std::sync::atomic::Ordering::Acquire +/// [Release]: std::sync::atomic::Ordering::Release pub type AtomicF64 = AtomicCell; //---------------------------------------------------------------------------------------------------- TESTS diff --git a/helper/src/lib.rs b/helper/src/lib.rs index de0d955..f29c499 100644 --- a/helper/src/lib.rs +++ b/helper/src/lib.rs @@ -31,6 +31,8 @@ pub mod thread; #[cfg(feature = "time")] pub mod time; +#[cfg(feature = "tx")] +pub mod tx; //---------------------------------------------------------------------------------------------------- Private Usage //---------------------------------------------------------------------------------------------------- diff --git a/helper/src/map.rs b/helper/src/map.rs index 7805ea6..8cf0978 100644 --- a/helper/src/map.rs +++ b/helper/src/map.rs @@ -29,7 +29,7 @@ use crate::cast::{u64_to_usize, usize_to_u64}; /// ``` #[inline] pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) { - #[allow(clippy::cast_possible_truncation)] + #[expect(clippy::cast_possible_truncation)] (value as u64, (value >> 64) as u64) } diff --git a/helper/src/num.rs b/helper/src/num.rs index 674ed35..399c38d 100644 --- a/helper/src/num.rs +++ b/helper/src/num.rs @@ -91,7 +91,7 @@ where /// /// # Invariant /// If not sorted the output will be invalid. -#[allow(clippy::debug_assert_with_mut_call)] +#[expect(clippy::debug_assert_with_mut_call)] pub fn median(array: impl AsRef<[T]>) -> T where T: Add diff --git a/helper/src/thread.rs b/helper/src/thread.rs index 04a2606..8ba025d 100644 --- a/helper/src/thread.rs +++ b/helper/src/thread.rs @@ -6,7 +6,6 @@ use std::{cmp::max, num::NonZeroUsize}; //---------------------------------------------------------------------------------------------------- Thread Count & Percent -#[allow(non_snake_case)] /// Get the total amount of system threads. /// /// ```rust @@ -28,10 +27,15 @@ macro_rules! impl_thread_percent { $( $(#[$doc])* pub fn $fn_name() -> NonZeroUsize { - // unwrap here is okay because: - // - THREADS().get() is always non-zero - // - max() guards against 0 - #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss)] + // unwrap here is okay because: + // - THREADS().get() is always non-zero + // - max() guards against 0 + #[expect( + clippy::cast_possible_truncation, + clippy::cast_sign_loss, + clippy::cast_precision_loss, + reason = "we need to round integers" + )] NonZeroUsize::new(max(1, (threads().get() as f64 * $percent).floor() as usize)).unwrap() } )* diff --git a/helper/src/time.rs b/helper/src/time.rs index ce39c2d..c7b12c2 100644 --- a/helper/src/time.rs +++ b/helper/src/time.rs @@ -129,7 +129,7 @@ pub const fn secs_to_clock(seconds: u32) -> (u8, u8, u8) { debug_assert!(m < 60); debug_assert!(s < 60); - #[allow(clippy::cast_possible_truncation)] // checked above + #[expect(clippy::cast_possible_truncation, reason = "checked above")] (h as u8, m, s) } @@ -154,7 +154,7 @@ pub fn time() -> u32 { /// /// This is guaranteed to return a value between `0..=86399` pub fn time_utc() -> u32 { - #[allow(clippy::cast_sign_loss)] // checked in function calls + #[expect(clippy::cast_sign_loss, reason = "checked in function calls")] unix_clock(chrono::offset::Local::now().timestamp() as u64) } diff --git a/helper/src/tx.rs b/helper/src/tx.rs new file mode 100644 index 0000000..53706ec --- /dev/null +++ b/helper/src/tx.rs @@ -0,0 +1,70 @@ +//! Utils for working with [`Transaction`] + +use monero_serai::transaction::{Input, Transaction}; + +/// Calculates the fee of the [`Transaction`]. +/// +/// # Panics +/// This will panic if the inputs overflow or the transaction outputs too much, so should only +/// be used on known to be valid txs. +pub fn tx_fee(tx: &Transaction) -> u64 { + let mut fee = 0_u64; + + match &tx { + Transaction::V1 { prefix, .. } => { + for input in &prefix.inputs { + match input { + Input::Gen(_) => return 0, + Input::ToKey { amount, .. } => { + fee = fee.checked_add(amount.unwrap_or(0)).unwrap(); + } + } + } + + for output in &prefix.outputs { + fee = fee.checked_sub(output.amount.unwrap_or(0)).unwrap(); + } + } + Transaction::V2 { proofs, .. } => { + fee = proofs.as_ref().unwrap().base.fee; + } + }; + + fee +} + +#[cfg(test)] +mod test { + use curve25519_dalek::{edwards::CompressedEdwardsY, EdwardsPoint}; + use monero_serai::transaction::{NotPruned, Output, Timelock, TransactionPrefix}; + + use super::*; + + #[test] + #[should_panic(expected = "called `Option::unwrap()` on a `None` value")] + fn tx_fee_panic() { + let input = Input::ToKey { + amount: Some(u64::MAX), + key_offsets: vec![], + key_image: EdwardsPoint::default(), + }; + + let output = Output { + amount: Some(u64::MAX), + key: CompressedEdwardsY::default(), + view_tag: None, + }; + + let tx = Transaction::::V1 { + prefix: TransactionPrefix { + additional_timelock: Timelock::None, + inputs: vec![input; 2], + outputs: vec![output], + extra: vec![], + }, + signatures: vec![], + }; + + tx_fee(&tx); + } +} diff --git a/net/epee-encoding/Cargo.toml b/net/epee-encoding/Cargo.toml index d4a4aca..c021e42 100644 --- a/net/epee-encoding/Cargo.toml +++ b/net/epee-encoding/Cargo.toml @@ -25,3 +25,6 @@ thiserror = { workspace = true, optional = true} [dev-dependencies] hex = { workspace = true, features = ["default"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/net/epee-encoding/src/container_as_blob.rs b/net/epee-encoding/src/container_as_blob.rs index 701ec66..83078c2 100644 --- a/net/epee-encoding/src/container_as_blob.rs +++ b/net/epee-encoding/src/container_as_blob.rs @@ -9,7 +9,7 @@ pub struct ContainerAsBlob(Vec); impl From> for ContainerAsBlob { fn from(value: Vec) -> Self { - ContainerAsBlob(value) + Self(value) } } @@ -36,9 +36,7 @@ impl EpeeValue for ContainerAsBlob { )); } - Ok(ContainerAsBlob( - bytes.chunks(T::SIZE).map(T::from_bytes).collect(), - )) + Ok(Self(bytes.chunks(T::SIZE).map(T::from_bytes).collect())) } fn should_write(&self) -> bool { @@ -46,10 +44,10 @@ impl EpeeValue for ContainerAsBlob { } fn epee_default_value() -> Option { - Some(ContainerAsBlob(vec![])) + Some(Self(vec![])) } - fn write(self, w: &mut B) -> crate::Result<()> { + fn write(self, w: &mut B) -> Result<()> { let mut buf = BytesMut::with_capacity(self.0.len() * T::SIZE); self.0.iter().for_each(|tt| tt.push_bytes(&mut buf)); buf.write(w) diff --git a/net/epee-encoding/src/error.rs b/net/epee-encoding/src/error.rs index 4b3c7b0..756cd13 100644 --- a/net/epee-encoding/src/error.rs +++ b/net/epee-encoding/src/error.rs @@ -7,6 +7,7 @@ use core::{ pub type Result = core::result::Result; #[cfg_attr(feature = "std", derive(thiserror::Error))] +#[expect(clippy::error_impl_error, reason = "FIXME: rename this type")] pub enum Error { #[cfg_attr(feature = "std", error("IO error: {0}"))] IO(&'static str), @@ -17,19 +18,18 @@ pub enum Error { } impl Error { - fn field_name(&self) -> &'static str { + const fn field_name(&self) -> &'static str { match self { - Error::IO(_) => "io", - Error::Format(_) => "format", - Error::Value(_) => "value", + Self::IO(_) => "io", + Self::Format(_) => "format", + Self::Value(_) => "value", } } fn field_data(&self) -> &str { match self { - Error::IO(data) => data, - Error::Format(data) => data, - Error::Value(data) => data, + Self::IO(data) | Self::Format(data) => data, + Self::Value(data) => data, } } } @@ -44,12 +44,12 @@ impl Debug for Error { impl From for Error { fn from(_: TryFromIntError) -> Self { - Error::Value("Int is too large".to_string()) + Self::Value("Int is too large".to_string()) } } impl From for Error { fn from(_: Utf8Error) -> Self { - Error::Value("Invalid utf8 str".to_string()) + Self::Value("Invalid utf8 str".to_string()) } } diff --git a/net/epee-encoding/src/io.rs b/net/epee-encoding/src/io.rs index 110a1ec..c118145 100644 --- a/net/epee-encoding/src/io.rs +++ b/net/epee-encoding/src/io.rs @@ -3,7 +3,7 @@ use bytes::{Buf, BufMut}; use crate::error::*; #[inline] -pub fn checked_read_primitive( +pub(crate) fn checked_read_primitive( b: &mut B, read: impl Fn(&mut B) -> R, ) -> Result { @@ -11,16 +11,20 @@ pub fn checked_read_primitive( } #[inline] -pub fn checked_read(b: &mut B, read: impl Fn(&mut B) -> R, size: usize) -> Result { +pub(crate) fn checked_read( + b: &mut B, + read: impl Fn(&mut B) -> R, + size: usize, +) -> Result { if b.remaining() < size { - Err(Error::IO("Not enough bytes in buffer to build object."))?; + Err(Error::IO("Not enough bytes in buffer to build object.")) + } else { + Ok(read(b)) } - - Ok(read(b)) } #[inline] -pub fn checked_write_primitive( +pub(crate) fn checked_write_primitive( b: &mut B, write: impl Fn(&mut B, T), t: T, @@ -29,16 +33,16 @@ pub fn checked_write_primitive( } #[inline] -pub fn checked_write( +pub(crate) fn checked_write( b: &mut B, write: impl Fn(&mut B, T), t: T, size: usize, ) -> Result<()> { if b.remaining_mut() < size { - Err(Error::IO("Not enough capacity to write object."))?; + Err(Error::IO("Not enough capacity to write object.")) + } else { + write(b, t); + Ok(()) } - - write(b, t); - Ok(()) } diff --git a/net/epee-encoding/src/lib.rs b/net/epee-encoding/src/lib.rs index fa3449b..d55a546 100644 --- a/net/epee-encoding/src/lib.rs +++ b/net/epee-encoding/src/lib.rs @@ -59,9 +59,12 @@ //! //! ``` +#[cfg(test)] +use hex as _; + extern crate alloc; -use core::{ops::Deref, str::from_utf8 as str_from_utf8}; +use core::str::from_utf8 as str_from_utf8; use bytes::{Buf, BufMut, Bytes, BytesMut}; @@ -130,7 +133,7 @@ pub fn to_bytes(val: T) -> Result { fn read_header(r: &mut B) -> Result<()> { let buf = checked_read(r, |b: &mut B| b.copy_to_bytes(HEADER.len()), HEADER.len())?; - if buf.deref() != HEADER { + if &*buf != HEADER { return Err(Error::Format("Data does not contain header")); } Ok(()) @@ -185,7 +188,7 @@ fn read_object(r: &mut B, skipped_objects: &mut u8) -> Re for _ in 0..number_o_field { let field_name_bytes = read_field_name_bytes(r)?; - let field_name = str_from_utf8(field_name_bytes.deref())?; + let field_name = str_from_utf8(&field_name_bytes)?; if !object_builder.add_field(field_name, r)? { skip_epee_value(r, skipped_objects)?; @@ -289,7 +292,7 @@ where B: BufMut, { write_varint(usize_to_u64(iterator.len()), w)?; - for item in iterator.into_iter() { + for item in iterator { item.write(w)?; } Ok(()) @@ -329,10 +332,7 @@ impl EpeeObject for SkipObject { fn skip_epee_value(r: &mut B, skipped_objects: &mut u8) -> Result<()> { let marker = read_marker(r)?; - let mut len = 1; - if marker.is_seq { - len = read_varint(r)?; - } + let len = if marker.is_seq { read_varint(r)? } else { 1 }; if let Some(size) = marker.inner_marker.size() { let bytes_to_skip = size diff --git a/net/epee-encoding/src/marker.rs b/net/epee-encoding/src/marker.rs index d8ffc4b..16eaa6a 100644 --- a/net/epee-encoding/src/marker.rs +++ b/net/epee-encoding/src/marker.rs @@ -19,13 +19,13 @@ pub enum InnerMarker { } impl InnerMarker { - pub fn size(&self) -> Option { + pub const fn size(&self) -> Option { Some(match self { - InnerMarker::I64 | InnerMarker::U64 | InnerMarker::F64 => 8, - InnerMarker::I32 | InnerMarker::U32 => 4, - InnerMarker::I16 | InnerMarker::U16 => 2, - InnerMarker::I8 | InnerMarker::U8 | InnerMarker::Bool => 1, - InnerMarker::String | InnerMarker::Object => return None, + Self::I64 | Self::U64 | Self::F64 => 8, + Self::I32 | Self::U32 => 4, + Self::I16 | Self::U16 => 2, + Self::I8 | Self::U8 | Self::Bool => 1, + Self::String | Self::Object => return None, }) } } @@ -40,23 +40,23 @@ pub struct Marker { impl Marker { pub(crate) const fn new(inner_marker: InnerMarker) -> Self { - Marker { + Self { inner_marker, is_seq: false, } } + + #[must_use] pub const fn into_seq(self) -> Self { - if self.is_seq { - panic!("Sequence of sequence not allowed!"); - } + assert!(!self.is_seq, "Sequence of sequence not allowed!"); if matches!(self.inner_marker, InnerMarker::U8) { - return Marker { + return Self { inner_marker: InnerMarker::String, is_seq: false, }; } - Marker { + Self { inner_marker: self.inner_marker, is_seq: true, } @@ -112,7 +112,7 @@ impl TryFrom for Marker { _ => return Err(Error::Format("Unknown value Marker")), }; - Ok(Marker { + Ok(Self { inner_marker, is_seq, }) diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs index 000d89c..816203e 100644 --- a/net/epee-encoding/src/value.rs +++ b/net/epee-encoding/src/value.rs @@ -71,7 +71,7 @@ impl EpeeValue for Vec { let individual_marker = Marker::new(marker.inner_marker); - let mut res = Vec::with_capacity(len); + let mut res = Self::with_capacity(len); for _ in 0..len { res.push(T::read(r, &individual_marker)?); } @@ -83,7 +83,7 @@ impl EpeeValue for Vec { } fn epee_default_value() -> Option { - Some(Vec::new()) + Some(Self::new()) } fn write(self, w: &mut B) -> Result<()> { @@ -181,7 +181,7 @@ impl EpeeValue for Vec { } fn epee_default_value() -> Option { - Some(Vec::new()) + Some(Self::new()) } fn should_write(&self) -> bool { @@ -216,7 +216,7 @@ impl EpeeValue for Bytes { } fn epee_default_value() -> Option { - Some(Bytes::new()) + Some(Self::new()) } fn should_write(&self) -> bool { @@ -247,14 +247,14 @@ impl EpeeValue for BytesMut { return Err(Error::IO("Not enough bytes to fill object")); } - let mut bytes = BytesMut::zeroed(len); + let mut bytes = Self::zeroed(len); r.copy_to_slice(&mut bytes); Ok(bytes) } fn epee_default_value() -> Option { - Some(BytesMut::new()) + Some(Self::new()) } fn should_write(&self) -> bool { @@ -285,12 +285,11 @@ impl EpeeValue for ByteArrayVec { return Err(Error::IO("Not enough bytes to fill object")); } - ByteArrayVec::try_from(r.copy_to_bytes(len)) - .map_err(|_| Error::Format("Field has invalid length")) + Self::try_from(r.copy_to_bytes(len)).map_err(|_| Error::Format("Field has invalid length")) } fn epee_default_value() -> Option { - Some(ByteArrayVec::try_from(Bytes::new()).unwrap()) + Some(Self::try_from(Bytes::new()).unwrap()) } fn should_write(&self) -> bool { @@ -320,8 +319,7 @@ impl EpeeValue for ByteArray { return Err(Error::IO("Not enough bytes to fill object")); } - ByteArray::try_from(r.copy_to_bytes(N)) - .map_err(|_| Error::Format("Field has invalid length")) + Self::try_from(r.copy_to_bytes(N)).map_err(|_| Error::Format("Field has invalid length")) } fn write(self, w: &mut B) -> Result<()> { @@ -335,7 +333,7 @@ impl EpeeValue for String { fn read(r: &mut B, marker: &Marker) -> Result { let bytes = Vec::::read(r, marker)?; - String::from_utf8(bytes).map_err(|_| Error::Format("Invalid string")) + Self::from_utf8(bytes).map_err(|_| Error::Format("Invalid string")) } fn should_write(&self) -> bool { @@ -343,7 +341,7 @@ impl EpeeValue for String { } fn epee_default_value() -> Option { - Some(String::new()) + Some(Self::new()) } fn write(self, w: &mut B) -> Result<()> { @@ -383,7 +381,7 @@ impl EpeeValue for Vec<[u8; N]> { let individual_marker = Marker::new(marker.inner_marker); - let mut res = Vec::with_capacity(len); + let mut res = Self::with_capacity(len); for _ in 0..len { res.push(<[u8; N]>::read(r, &individual_marker)?); } @@ -395,7 +393,7 @@ impl EpeeValue for Vec<[u8; N]> { } fn epee_default_value() -> Option { - Some(Vec::new()) + Some(Self::new()) } fn write(self, w: &mut B) -> Result<()> { diff --git a/net/epee-encoding/src/varint.rs b/net/epee-encoding/src/varint.rs index ae9c569..3f191dc 100644 --- a/net/epee-encoding/src/varint.rs +++ b/net/epee-encoding/src/varint.rs @@ -21,14 +21,14 @@ const FITS_IN_FOUR_BYTES: u64 = 2_u64.pow(32 - SIZE_OF_SIZE_MARKER) - 1; /// ``` pub fn read_varint(r: &mut B) -> Result { if !r.has_remaining() { - Err(Error::IO("Not enough bytes to build VarInt"))? + return Err(Error::IO("Not enough bytes to build VarInt")); } let vi_start = r.get_u8(); let len = 1 << (vi_start & 0b11); if r.remaining() < len - 1 { - Err(Error::IO("Not enough bytes to build VarInt"))? + return Err(Error::IO("Not enough bytes to build VarInt")); } let mut vi = u64::from(vi_start >> 2); @@ -67,12 +67,15 @@ pub fn write_varint(number: u64, w: &mut B) -> Result<()> { }; if w.remaining_mut() < 1 << size_marker { - Err(Error::IO("Not enough capacity to write VarInt"))?; + return Err(Error::IO("Not enough capacity to write VarInt")); } let number = (number << 2) | size_marker; - // Although `as` is unsafe we just checked the length. + #[expect( + clippy::cast_possible_truncation, + reason = "Although `as` is unsafe we just checked the length." + )] match size_marker { 0 => w.put_u8(number as u8), 1 => w.put_u16_le(number as u16), diff --git a/net/epee-encoding/tests/alt_name.rs b/net/epee-encoding/tests/alt_name.rs index 8a9bc6f..3ddd1ef 100644 --- a/net/epee-encoding/tests/alt_name.rs +++ b/net/epee-encoding/tests/alt_name.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; struct AltName { diff --git a/net/epee-encoding/tests/duplicate_key.rs b/net/epee-encoding/tests/duplicate_key.rs index 0ed87af..fd8ccc9 100644 --- a/net/epee-encoding/tests/duplicate_key.rs +++ b/net/epee-encoding/tests/duplicate_key.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes}; struct T { diff --git a/net/epee-encoding/tests/epee_default.rs b/net/epee-encoding/tests/epee_default.rs index c221b28..778bbc0 100644 --- a/net/epee-encoding/tests/epee_default.rs +++ b/net/epee-encoding/tests/epee_default.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; pub struct Optional { @@ -58,7 +60,7 @@ fn epee_non_default_does_encode() { let val: Optional = from_bytes(&mut bytes).unwrap(); assert_eq!(val.optional_val, -3); - assert_eq!(val.val, 8) + assert_eq!(val.val, 8); } #[test] @@ -70,5 +72,5 @@ fn epee_value_not_present_with_default() { let val: Optional = from_bytes(&mut bytes).unwrap(); assert_eq!(val.optional_val, -4); - assert_eq!(val.val, 76) + assert_eq!(val.val, 76); } diff --git a/net/epee-encoding/tests/flattened.rs b/net/epee-encoding/tests/flattened.rs index a737370..dfb951f 100644 --- a/net/epee-encoding/tests/flattened.rs +++ b/net/epee-encoding/tests/flattened.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; struct Child { @@ -37,6 +39,7 @@ epee_object!( ); #[test] +#[expect(clippy::float_cmp)] fn epee_flatten() { let val2 = ParentChild { h: 38.9, diff --git a/net/epee-encoding/tests/options.rs b/net/epee-encoding/tests/options.rs index 5bae9a9..d242124 100644 --- a/net/epee-encoding/tests/options.rs +++ b/net/epee-encoding/tests/options.rs @@ -1,5 +1,6 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; -use std::ops::Deref; #[derive(Clone)] struct T { @@ -28,6 +29,6 @@ fn optional_val_in_data() { ]; let t: T = from_bytes(&mut &bytes[..]).unwrap(); let bytes2 = to_bytes(t.clone()).unwrap(); - assert_eq!(bytes.as_slice(), bytes2.deref()); + assert_eq!(bytes.as_slice(), &*bytes2); assert_eq!(t.val.unwrap(), 21); } diff --git a/net/epee-encoding/tests/p2p.rs b/net/epee-encoding/tests/p2p.rs index 2f74ef6..ba17386 100644 --- a/net/epee-encoding/tests/p2p.rs +++ b/net/epee-encoding/tests/p2p.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; #[derive(Eq, PartialEq, Debug, Clone)] @@ -5,7 +7,7 @@ pub struct SupportFlags(u32); impl From for SupportFlags { fn from(value: u32) -> Self { - SupportFlags(value) + Self(value) } } diff --git a/net/epee-encoding/tests/rpc.rs b/net/epee-encoding/tests/rpc.rs index 973498e..b366854 100644 --- a/net/epee-encoding/tests/rpc.rs +++ b/net/epee-encoding/tests/rpc.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes}; #[derive(Clone, Debug, PartialEq)] diff --git a/net/epee-encoding/tests/seq.rs b/net/epee-encoding/tests/seq.rs index a4685d0..b4ae788 100644 --- a/net/epee-encoding/tests/seq.rs +++ b/net/epee-encoding/tests/seq.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes}; struct ObjSeq { diff --git a/net/epee-encoding/tests/stack_overflow.rs b/net/epee-encoding/tests/stack_overflow.rs index c53420a..78a1120 100644 --- a/net/epee-encoding/tests/stack_overflow.rs +++ b/net/epee-encoding/tests/stack_overflow.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "outer test module")] + use cuprate_epee_encoding::{epee_object, from_bytes}; struct D { @@ -737,5 +739,5 @@ fn stack_overflow() { let obj: Result = from_bytes(&mut bytes.as_slice()); - assert!(obj.is_err()) + assert!(obj.is_err()); } diff --git a/net/fixed-bytes/Cargo.toml b/net/fixed-bytes/Cargo.toml index 4c5a1af..7844570 100644 --- a/net/fixed-bytes/Cargo.toml +++ b/net/fixed-bytes/Cargo.toml @@ -17,3 +17,6 @@ serde = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] serde_json = { workspace = true, features = ["std"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/net/fixed-bytes/src/lib.rs b/net/fixed-bytes/src/lib.rs index 2e8f1bc..b1b064b 100644 --- a/net/fixed-bytes/src/lib.rs +++ b/net/fixed-bytes/src/lib.rs @@ -22,17 +22,15 @@ pub enum FixedByteError { } impl FixedByteError { - fn field_name(&self) -> &'static str { + const fn field_name(&self) -> &'static str { match self { - FixedByteError::InvalidLength => "input", + Self::InvalidLength => "input", } } - fn field_data(&self) -> &'static str { + const fn field_data(&self) -> &'static str { match self { - FixedByteError::InvalidLength => { - "Cannot create fix byte array, input has invalid length." - } + Self::InvalidLength => "Cannot create fix byte array, input has invalid length.", } } } @@ -82,7 +80,7 @@ impl ByteArray { impl From<[u8; N]> for ByteArray { fn from(value: [u8; N]) -> Self { - ByteArray(Bytes::copy_from_slice(&value)) + Self(Bytes::copy_from_slice(&value)) } } @@ -101,7 +99,7 @@ impl TryFrom for ByteArray { if value.len() != N { return Err(FixedByteError::InvalidLength); } - Ok(ByteArray(value)) + Ok(Self(value)) } } @@ -112,7 +110,7 @@ impl TryFrom> for ByteArray { if value.len() != N { return Err(FixedByteError::InvalidLength); } - Ok(ByteArray(Bytes::from(value))) + Ok(Self(Bytes::from(value))) } } @@ -142,11 +140,11 @@ impl<'de, const N: usize> Deserialize<'de> for ByteArrayVec { } impl ByteArrayVec { - pub fn len(&self) -> usize { + pub const fn len(&self) -> usize { self.0.len() / N } - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.len() == 0 } @@ -162,6 +160,7 @@ impl ByteArrayVec { /// /// # Panics /// Panics if at > len. + #[must_use] pub fn split_off(&mut self, at: usize) -> Self { Self(self.0.split_off(at * N)) } @@ -169,9 +168,9 @@ impl ByteArrayVec { impl From<&ByteArrayVec> for Vec<[u8; N]> { fn from(value: &ByteArrayVec) -> Self { - let mut out = Vec::with_capacity(value.len()); + let mut out = Self::with_capacity(value.len()); for i in 0..value.len() { - out.push(value[i]) + out.push(value[i]); } out @@ -181,11 +180,11 @@ impl From<&ByteArrayVec> for Vec<[u8; N]> { impl From> for ByteArrayVec { fn from(value: Vec<[u8; N]>) -> Self { let mut bytes = BytesMut::with_capacity(N * value.len()); - for i in value.into_iter() { - bytes.extend_from_slice(&i) + for i in value { + bytes.extend_from_slice(&i); } - ByteArrayVec(bytes.freeze()) + Self(bytes.freeze()) } } @@ -197,13 +196,13 @@ impl TryFrom for ByteArrayVec { return Err(FixedByteError::InvalidLength); } - Ok(ByteArrayVec(value)) + Ok(Self(value)) } } impl From<[u8; N]> for ByteArrayVec { fn from(value: [u8; N]) -> Self { - ByteArrayVec(Bytes::copy_from_slice(value.as_slice())) + Self(Bytes::copy_from_slice(value.as_slice())) } } @@ -211,11 +210,11 @@ impl From<[[u8; N]; LEN]> for ByteArrayVec fn from(value: [[u8; N]; LEN]) -> Self { let mut bytes = BytesMut::with_capacity(N * LEN); - for val in value.into_iter() { + for val in value { bytes.put_slice(val.as_slice()); } - ByteArrayVec(bytes.freeze()) + Self(bytes.freeze()) } } @@ -227,7 +226,7 @@ impl TryFrom> for ByteArrayVec { return Err(FixedByteError::InvalidLength); } - Ok(ByteArrayVec(Bytes::from(value))) + Ok(Self(Bytes::from(value))) } } @@ -235,9 +234,12 @@ impl Index for ByteArrayVec { type Output = [u8; N]; fn index(&self, index: usize) -> &Self::Output { - if (index + 1) * N > self.0.len() { - panic!("Index out of range, idx: {}, length: {}", index, self.len()); - } + assert!( + (index + 1) * N <= self.0.len(), + "Index out of range, idx: {}, length: {}", + index, + self.len() + ); self.0[index * N..(index + 1) * N] .as_ref() diff --git a/net/levin/Cargo.toml b/net/levin/Cargo.toml index 1c585b9..68c32e5 100644 --- a/net/levin/Cargo.toml +++ b/net/levin/Cargo.toml @@ -14,6 +14,7 @@ tracing = ["dep:tracing", "tokio-util/tracing"] [dependencies] cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } +cfg-if = { workspace = true } thiserror = { workspace = true } bytes = { workspace = true, features = ["std"] } bitflags = { workspace = true } @@ -26,4 +27,7 @@ proptest = { workspace = true } rand = { workspace = true, features = ["std", "std_rng"] } tokio-util = { workspace = true, features = ["io-util"]} tokio = { workspace = true, features = ["full"] } -futures = { workspace = true, features = ["std"] } \ No newline at end of file +futures = { workspace = true, features = ["std"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/net/levin/src/codec.rs b/net/levin/src/codec.rs index 1177733..4c7695e 100644 --- a/net/levin/src/codec.rs +++ b/net/levin/src/codec.rs @@ -47,7 +47,7 @@ pub struct LevinBucketCodec { impl Default for LevinBucketCodec { fn default() -> Self { - LevinBucketCodec { + Self { state: LevinBucketState::WaitingForHeader, protocol: Protocol::default(), handshake_message_seen: false, @@ -56,8 +56,8 @@ impl Default for LevinBucketCodec { } impl LevinBucketCodec { - pub fn new(protocol: Protocol) -> Self { - LevinBucketCodec { + pub const fn new(protocol: Protocol) -> Self { + Self { state: LevinBucketState::WaitingForHeader, protocol, handshake_message_seen: false, @@ -112,8 +112,10 @@ impl Decoder for LevinBucketCodec { } } - let _ = - std::mem::replace(&mut self.state, LevinBucketState::WaitingForBody(head)); + drop(std::mem::replace( + &mut self.state, + LevinBucketState::WaitingForBody(head), + )); } LevinBucketState::WaitingForBody(head) => { let body_len = u64_to_usize(head.size); @@ -145,7 +147,7 @@ impl Encoder> for LevinBucketCodec { type Error = BucketError; fn encode(&mut self, item: Bucket, dst: &mut BytesMut) -> Result<(), Self::Error> { if let Some(additional) = (HEADER_SIZE + item.body.len()).checked_sub(dst.capacity()) { - dst.reserve(additional) + dst.reserve(additional); } item.header.write_bytes_into(dst); diff --git a/net/levin/src/header.rs b/net/levin/src/header.rs index 7acd085..057eee8 100644 --- a/net/levin/src/header.rs +++ b/net/levin/src/header.rs @@ -13,7 +13,7 @@ // copies or substantial portions of the Software. // -//! This module provides a struct BucketHead for the header of a levin protocol +//! This module provides a struct `BucketHead` for the header of a levin protocol //! message. use bitflags::bitflags; @@ -62,7 +62,7 @@ bitflags! { impl From for Flags { fn from(value: u32) -> Self { - Flags(value) + Self(value) } } @@ -99,9 +99,9 @@ impl BucketHead { /// /// # Panics /// This function will panic if there aren't enough bytes to fill the header. - /// Currently [HEADER_SIZE] - pub fn from_bytes(buf: &mut BytesMut) -> BucketHead { - BucketHead { + /// Currently [`HEADER_SIZE`] + pub fn from_bytes(buf: &mut BytesMut) -> Self { + Self { signature: buf.get_u64_le(), size: buf.get_u64_le(), have_to_return_data: buf.get_u8() != 0, diff --git a/net/levin/src/lib.rs b/net/levin/src/lib.rs index ab03bfb..a3f4b69 100644 --- a/net/levin/src/lib.rs +++ b/net/levin/src/lib.rs @@ -33,6 +33,16 @@ #![deny(unused_mut)] //#![deny(missing_docs)] +cfg_if::cfg_if! { + // Used in `tests/`. + if #[cfg(test)] { + use futures as _; + use proptest as _; + use rand as _; + use tokio as _; + } +} + use std::fmt::Debug; use bytes::{Buf, Bytes}; @@ -99,7 +109,7 @@ pub struct Protocol { impl Default for Protocol { fn default() -> Self { - Protocol { + Self { version: MONERO_PROTOCOL_VERSION, signature: MONERO_LEVIN_SIGNATURE, max_packet_size_before_handshake: MONERO_MAX_PACKET_SIZE_BEFORE_HANDSHAKE, @@ -130,22 +140,22 @@ pub enum MessageType { impl MessageType { /// Returns if the message requires a response - pub fn have_to_return_data(&self) -> bool { + pub const fn have_to_return_data(&self) -> bool { match self { - MessageType::Request => true, - MessageType::Response | MessageType::Notification => false, + Self::Request => true, + Self::Response | Self::Notification => false, } } - /// Returns the `MessageType` given the flags and have_to_return_data fields - pub fn from_flags_and_have_to_return( + /// Returns the `MessageType` given the flags and `have_to_return_data` fields + pub const fn from_flags_and_have_to_return( flags: Flags, have_to_return: bool, ) -> Result { Ok(match (flags, have_to_return) { - (Flags::REQUEST, true) => MessageType::Request, - (Flags::REQUEST, false) => MessageType::Notification, - (Flags::RESPONSE, false) => MessageType::Response, + (Flags::REQUEST, true) => Self::Request, + (Flags::REQUEST, false) => Self::Notification, + (Flags::RESPONSE, false) => Self::Response, _ => { return Err(BucketError::InvalidHeaderFlags( "Unable to assign a message type to this bucket", @@ -154,10 +164,10 @@ impl MessageType { }) } - pub fn as_flags(&self) -> header::Flags { + pub const fn as_flags(&self) -> Flags { match self { - MessageType::Request | MessageType::Notification => header::Flags::REQUEST, - MessageType::Response => header::Flags::RESPONSE, + Self::Request | Self::Notification => Flags::REQUEST, + Self::Response => Flags::RESPONSE, } } } @@ -173,7 +183,7 @@ pub struct BucketBuilder { } impl BucketBuilder { - pub fn new(protocol: &Protocol) -> Self { + pub const fn new(protocol: &Protocol) -> Self { Self { signature: Some(protocol.signature), ty: None, @@ -185,27 +195,27 @@ impl BucketBuilder { } pub fn set_signature(&mut self, sig: u64) { - self.signature = Some(sig) + self.signature = Some(sig); } pub fn set_message_type(&mut self, ty: MessageType) { - self.ty = Some(ty) + self.ty = Some(ty); } pub fn set_command(&mut self, command: C) { - self.command = Some(command) + self.command = Some(command); } pub fn set_return_code(&mut self, code: i32) { - self.return_code = Some(code) + self.return_code = Some(code); } pub fn set_protocol_version(&mut self, version: u32) { - self.protocol_version = Some(version) + self.protocol_version = Some(version); } pub fn set_body(&mut self, body: Bytes) { - self.body = Some(body) + self.body = Some(body); } pub fn finish(self) -> Bucket { diff --git a/net/levin/src/message.rs b/net/levin/src/message.rs index 19aa1b5..32be653 100644 --- a/net/levin/src/message.rs +++ b/net/levin/src/message.rs @@ -33,13 +33,13 @@ pub enum LevinMessage { impl From for LevinMessage { fn from(value: T) -> Self { - LevinMessage::Body(value) + Self::Body(value) } } impl From> for LevinMessage { fn from(value: Bucket) -> Self { - LevinMessage::Bucket(value) + Self::Bucket(value) } } @@ -58,7 +58,7 @@ pub struct Dummy(pub usize); impl From for LevinMessage { fn from(value: Dummy) -> Self { - LevinMessage::Dummy(value.0) + Self::Dummy(value.0) } } @@ -76,12 +76,11 @@ pub fn make_fragmented_messages( fragment_size: usize, message: T, ) -> Result>, BucketError> { - if fragment_size * 2 < HEADER_SIZE { - panic!( - "Fragment size: {fragment_size}, is too small, must be at least {}", - 2 * HEADER_SIZE - ); - } + assert!( + fragment_size * 2 >= HEADER_SIZE, + "Fragment size: {fragment_size}, is too small, must be at least {}", + 2 * HEADER_SIZE + ); let mut builder = BucketBuilder::new(protocol); message.encode(&mut builder)?; diff --git a/net/levin/tests/fragmented_message.rs b/net/levin/tests/fragmented_message.rs index 512fd46..f34b145 100644 --- a/net/levin/tests/fragmented_message.rs +++ b/net/levin/tests/fragmented_message.rs @@ -1,3 +1,9 @@ +#![expect( + clippy::tests_outside_test_module, + unused_crate_dependencies, + reason = "outer test module" +)] + use bytes::{Buf, BufMut, Bytes, BytesMut}; use futures::{SinkExt, StreamExt}; use proptest::{prelude::any_with, prop_assert_eq, proptest, sample::size_range}; @@ -58,12 +64,12 @@ impl LevinBody for TestBody { ) -> Result { let size = u64_to_usize(body.get_u64_le()); // bucket - Ok(TestBody::Bytes(size, body.copy_to_bytes(size))) + Ok(Self::Bytes(size, body.copy_to_bytes(size))) } fn encode(self, builder: &mut BucketBuilder) -> Result<(), BucketError> { match self { - TestBody::Bytes(len, bytes) => { + Self::Bytes(len, bytes) => { let mut buf = BytesMut::new(); buf.put_u64_le(len as u64); buf.extend_from_slice(bytes.as_ref()); @@ -141,12 +147,12 @@ proptest! { message2.extend_from_slice(&fragments[0].body[(33 + 8)..]); for frag in fragments.iter().skip(1) { - message2.extend_from_slice(frag.body.as_ref()) + message2.extend_from_slice(frag.body.as_ref()); } prop_assert_eq!(message.as_slice(), &message2[0..message.len()], "numb_fragments: {}", fragments.len()); - for byte in message2[message.len()..].iter(){ + for byte in &message2[message.len()..]{ prop_assert_eq!(*byte, 0); } } diff --git a/net/wire/Cargo.toml b/net/wire/Cargo.toml index cbeb551..0b77cf1 100644 --- a/net/wire/Cargo.toml +++ b/net/wire/Cargo.toml @@ -15,7 +15,7 @@ cuprate-levin = { path = "../levin" } cuprate-epee-encoding = { path = "../epee-encoding" } cuprate-fixed-bytes = { path = "../fixed-bytes" } cuprate-types = { path = "../../types", default-features = false, features = ["epee"] } -cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] } +cuprate-helper = { path = "../../helper", default-features = false, features = ["map"] } bitflags = { workspace = true, features = ["std"] } bytes = { workspace = true, features = ["std"] } @@ -24,3 +24,5 @@ thiserror = { workspace = true } [dev-dependencies] hex = { workspace = true, features = ["std"]} +[lints] +workspace = true diff --git a/net/wire/src/network_address.rs b/net/wire/src/network_address.rs index 632739a..ad599b7 100644 --- a/net/wire/src/network_address.rs +++ b/net/wire/src/network_address.rs @@ -51,38 +51,38 @@ impl EpeeObject for NetworkAddress { } impl NetworkAddress { - pub fn get_zone(&self) -> NetZone { + pub const fn get_zone(&self) -> NetZone { match self { - NetworkAddress::Clear(_) => NetZone::Public, + Self::Clear(_) => NetZone::Public, } } - pub fn is_loopback(&self) -> bool { + pub const fn is_loopback(&self) -> bool { // TODO false } - pub fn is_local(&self) -> bool { + pub const fn is_local(&self) -> bool { // TODO false } - pub fn port(&self) -> u16 { + pub const fn port(&self) -> u16 { match self { - NetworkAddress::Clear(ip) => ip.port(), + Self::Clear(ip) => ip.port(), } } } impl From for NetworkAddress { fn from(value: net::SocketAddrV4) -> Self { - NetworkAddress::Clear(value.into()) + Self::Clear(value.into()) } } impl From for NetworkAddress { fn from(value: net::SocketAddrV6) -> Self { - NetworkAddress::Clear(value.into()) + Self::Clear(value.into()) } } diff --git a/net/wire/src/network_address/epee_builder.rs b/net/wire/src/network_address/epee_builder.rs index 36db824..c1d1742 100644 --- a/net/wire/src/network_address/epee_builder.rs +++ b/net/wire/src/network_address/epee_builder.rs @@ -74,7 +74,7 @@ impl From for TaggedNetworkAddress { fn from(value: NetworkAddress) -> Self { match value { NetworkAddress::Clear(addr) => match addr { - SocketAddr::V4(addr) => TaggedNetworkAddress { + SocketAddr::V4(addr) => Self { ty: Some(1), addr: Some(AllFieldsNetworkAddress { m_ip: Some(u32::from_be_bytes(addr.ip().octets())), @@ -82,7 +82,7 @@ impl From for TaggedNetworkAddress { addr: None, }), }, - SocketAddr::V6(addr) => TaggedNetworkAddress { + SocketAddr::V6(addr) => Self { ty: Some(2), addr: Some(AllFieldsNetworkAddress { addr: Some(addr.ip().octets()), diff --git a/net/wire/src/p2p.rs b/net/wire/src/p2p.rs index 3829d17..a7cd784 100644 --- a/net/wire/src/p2p.rs +++ b/net/wire/src/p2p.rs @@ -55,27 +55,27 @@ pub enum LevinCommand { impl std::fmt::Display for LevinCommand { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - if let LevinCommand::Unknown(id) = self { - return f.write_str(&format!("unknown id: {}", id)); + if let Self::Unknown(id) = self { + return f.write_str(&format!("unknown id: {id}")); } f.write_str(match self { - LevinCommand::Handshake => "handshake", - LevinCommand::TimedSync => "timed sync", - LevinCommand::Ping => "ping", - LevinCommand::SupportFlags => "support flags", + Self::Handshake => "handshake", + Self::TimedSync => "timed sync", + Self::Ping => "ping", + Self::SupportFlags => "support flags", - LevinCommand::NewBlock => "new block", - LevinCommand::NewTransactions => "new transactions", - LevinCommand::GetObjectsRequest => "get objects request", - LevinCommand::GetObjectsResponse => "get objects response", - LevinCommand::ChainRequest => "chain request", - LevinCommand::ChainResponse => "chain response", - LevinCommand::NewFluffyBlock => "new fluffy block", - LevinCommand::FluffyMissingTxsRequest => "fluffy missing transaction request", - LevinCommand::GetTxPoolCompliment => "get transaction pool compliment", + Self::NewBlock => "new block", + Self::NewTransactions => "new transactions", + Self::GetObjectsRequest => "get objects request", + Self::GetObjectsResponse => "get objects response", + Self::ChainRequest => "chain request", + Self::ChainResponse => "chain response", + Self::NewFluffyBlock => "new fluffy block", + Self::FluffyMissingTxsRequest => "fluffy missing transaction request", + Self::GetTxPoolCompliment => "get transaction pool compliment", - LevinCommand::Unknown(_) => unreachable!(), + Self::Unknown(_) => unreachable!(), }) } } @@ -83,50 +83,51 @@ impl std::fmt::Display for LevinCommand { impl LevinCommandTrait for LevinCommand { fn bucket_size_limit(&self) -> u64 { // https://github.com/monero-project/monero/blob/00fd416a99686f0956361d1cd0337fe56e58d4a7/src/cryptonote_basic/connection_context.cpp#L37 + #[expect(clippy::match_same_arms, reason = "formatting is more clear")] match self { - LevinCommand::Handshake => 65536, - LevinCommand::TimedSync => 65536, - LevinCommand::Ping => 4096, - LevinCommand::SupportFlags => 4096, + Self::Handshake => 65536, + Self::TimedSync => 65536, + Self::Ping => 4096, + Self::SupportFlags => 4096, - LevinCommand::NewBlock => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) - LevinCommand::NewTransactions => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) - LevinCommand::GetObjectsRequest => 1024 * 1024 * 2, // 2 MB - LevinCommand::GetObjectsResponse => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) - LevinCommand::ChainRequest => 512 * 1024, // 512 kB - LevinCommand::ChainResponse => 1024 * 1024 * 4, // 4 MB - LevinCommand::NewFluffyBlock => 1024 * 1024 * 4, // 4 MB - LevinCommand::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB - LevinCommand::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB + Self::NewBlock => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) + Self::NewTransactions => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) + Self::GetObjectsRequest => 1024 * 1024 * 2, // 2 MB + Self::GetObjectsResponse => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though) + Self::ChainRequest => 512 * 1024, // 512 kB + Self::ChainResponse => 1024 * 1024 * 4, // 4 MB + Self::NewFluffyBlock => 1024 * 1024 * 4, // 4 MB + Self::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB + Self::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB - LevinCommand::Unknown(_) => u64::MAX, + Self::Unknown(_) => u64::MAX, } } fn is_handshake(&self) -> bool { - matches!(self, LevinCommand::Handshake) + matches!(self, Self::Handshake) } } impl From for LevinCommand { fn from(value: u32) -> Self { match value { - 1001 => LevinCommand::Handshake, - 1002 => LevinCommand::TimedSync, - 1003 => LevinCommand::Ping, - 1007 => LevinCommand::SupportFlags, + 1001 => Self::Handshake, + 1002 => Self::TimedSync, + 1003 => Self::Ping, + 1007 => Self::SupportFlags, - 2001 => LevinCommand::NewBlock, - 2002 => LevinCommand::NewTransactions, - 2003 => LevinCommand::GetObjectsRequest, - 2004 => LevinCommand::GetObjectsResponse, - 2006 => LevinCommand::ChainRequest, - 2007 => LevinCommand::ChainResponse, - 2008 => LevinCommand::NewFluffyBlock, - 2009 => LevinCommand::FluffyMissingTxsRequest, - 2010 => LevinCommand::GetTxPoolCompliment, + 2001 => Self::NewBlock, + 2002 => Self::NewTransactions, + 2003 => Self::GetObjectsRequest, + 2004 => Self::GetObjectsResponse, + 2006 => Self::ChainRequest, + 2007 => Self::ChainResponse, + 2008 => Self::NewFluffyBlock, + 2009 => Self::FluffyMissingTxsRequest, + 2010 => Self::GetTxPoolCompliment, - x => LevinCommand::Unknown(x), + x => Self::Unknown(x), } } } @@ -191,19 +192,19 @@ pub enum ProtocolMessage { } impl ProtocolMessage { - pub fn command(&self) -> LevinCommand { + pub const fn command(&self) -> LevinCommand { use LevinCommand as C; match self { - ProtocolMessage::NewBlock(_) => C::NewBlock, - ProtocolMessage::NewFluffyBlock(_) => C::NewFluffyBlock, - ProtocolMessage::GetObjectsRequest(_) => C::GetObjectsRequest, - ProtocolMessage::GetObjectsResponse(_) => C::GetObjectsResponse, - ProtocolMessage::ChainRequest(_) => C::ChainRequest, - ProtocolMessage::ChainEntryResponse(_) => C::ChainResponse, - ProtocolMessage::NewTransactions(_) => C::NewTransactions, - ProtocolMessage::FluffyMissingTransactionsRequest(_) => C::FluffyMissingTxsRequest, - ProtocolMessage::GetTxPoolCompliment(_) => C::GetTxPoolCompliment, + Self::NewBlock(_) => C::NewBlock, + Self::NewFluffyBlock(_) => C::NewFluffyBlock, + Self::GetObjectsRequest(_) => C::GetObjectsRequest, + Self::GetObjectsResponse(_) => C::GetObjectsResponse, + Self::ChainRequest(_) => C::ChainRequest, + Self::ChainEntryResponse(_) => C::ChainResponse, + Self::NewTransactions(_) => C::NewTransactions, + Self::FluffyMissingTransactionsRequest(_) => C::FluffyMissingTxsRequest, + Self::GetTxPoolCompliment(_) => C::GetTxPoolCompliment, } } @@ -230,26 +231,26 @@ impl ProtocolMessage { use LevinCommand as C; match self { - ProtocolMessage::NewBlock(val) => build_message(C::NewBlock, val, builder)?, - ProtocolMessage::NewTransactions(val) => { - build_message(C::NewTransactions, val, builder)? + Self::NewBlock(val) => build_message(C::NewBlock, val, builder)?, + Self::NewTransactions(val) => { + build_message(C::NewTransactions, val, builder)?; } - ProtocolMessage::GetObjectsRequest(val) => { - build_message(C::GetObjectsRequest, val, builder)? + Self::GetObjectsRequest(val) => { + build_message(C::GetObjectsRequest, val, builder)?; } - ProtocolMessage::GetObjectsResponse(val) => { - build_message(C::GetObjectsResponse, val, builder)? + Self::GetObjectsResponse(val) => { + build_message(C::GetObjectsResponse, val, builder)?; } - ProtocolMessage::ChainRequest(val) => build_message(C::ChainRequest, val, builder)?, - ProtocolMessage::ChainEntryResponse(val) => { - build_message(C::ChainResponse, val, builder)? + Self::ChainRequest(val) => build_message(C::ChainRequest, val, builder)?, + Self::ChainEntryResponse(val) => { + build_message(C::ChainResponse, val, builder)?; } - ProtocolMessage::NewFluffyBlock(val) => build_message(C::NewFluffyBlock, val, builder)?, - ProtocolMessage::FluffyMissingTransactionsRequest(val) => { - build_message(C::FluffyMissingTxsRequest, val, builder)? + Self::NewFluffyBlock(val) => build_message(C::NewFluffyBlock, val, builder)?, + Self::FluffyMissingTransactionsRequest(val) => { + build_message(C::FluffyMissingTxsRequest, val, builder)?; } - ProtocolMessage::GetTxPoolCompliment(val) => { - build_message(C::GetTxPoolCompliment, val, builder)? + Self::GetTxPoolCompliment(val) => { + build_message(C::GetTxPoolCompliment, val, builder)?; } } Ok(()) @@ -265,14 +266,14 @@ pub enum AdminRequestMessage { } impl AdminRequestMessage { - pub fn command(&self) -> LevinCommand { + pub const fn command(&self) -> LevinCommand { use LevinCommand as C; match self { - AdminRequestMessage::Handshake(_) => C::Handshake, - AdminRequestMessage::Ping => C::Ping, - AdminRequestMessage::SupportFlags => C::SupportFlags, - AdminRequestMessage::TimedSync(_) => C::TimedSync, + Self::Handshake(_) => C::Handshake, + Self::Ping => C::Ping, + Self::SupportFlags => C::SupportFlags, + Self::TimedSync(_) => C::TimedSync, } } @@ -286,13 +287,13 @@ impl AdminRequestMessage { cuprate_epee_encoding::from_bytes::(buf) .map_err(|e| BucketError::BodyDecodingError(e.into()))?; - AdminRequestMessage::Ping + Self::Ping } C::SupportFlags => { cuprate_epee_encoding::from_bytes::(buf) .map_err(|e| BucketError::BodyDecodingError(e.into()))?; - AdminRequestMessage::SupportFlags + Self::SupportFlags } _ => return Err(BucketError::UnknownCommand), }) @@ -302,11 +303,11 @@ impl AdminRequestMessage { use LevinCommand as C; match self { - AdminRequestMessage::Handshake(val) => build_message(C::Handshake, val, builder)?, - AdminRequestMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?, - AdminRequestMessage::Ping => build_message(C::Ping, EmptyMessage, builder)?, - AdminRequestMessage::SupportFlags => { - build_message(C::SupportFlags, EmptyMessage, builder)? + Self::Handshake(val) => build_message(C::Handshake, val, builder)?, + Self::TimedSync(val) => build_message(C::TimedSync, val, builder)?, + Self::Ping => build_message(C::Ping, EmptyMessage, builder)?, + Self::SupportFlags => { + build_message(C::SupportFlags, EmptyMessage, builder)?; } } Ok(()) @@ -322,14 +323,14 @@ pub enum AdminResponseMessage { } impl AdminResponseMessage { - pub fn command(&self) -> LevinCommand { + pub const fn command(&self) -> LevinCommand { use LevinCommand as C; match self { - AdminResponseMessage::Handshake(_) => C::Handshake, - AdminResponseMessage::Ping(_) => C::Ping, - AdminResponseMessage::SupportFlags(_) => C::SupportFlags, - AdminResponseMessage::TimedSync(_) => C::TimedSync, + Self::Handshake(_) => C::Handshake, + Self::Ping(_) => C::Ping, + Self::SupportFlags(_) => C::SupportFlags, + Self::TimedSync(_) => C::TimedSync, } } @@ -349,11 +350,11 @@ impl AdminResponseMessage { use LevinCommand as C; match self { - AdminResponseMessage::Handshake(val) => build_message(C::Handshake, val, builder)?, - AdminResponseMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?, - AdminResponseMessage::Ping(val) => build_message(C::Ping, val, builder)?, - AdminResponseMessage::SupportFlags(val) => { - build_message(C::SupportFlags, val, builder)? + Self::Handshake(val) => build_message(C::Handshake, val, builder)?, + Self::TimedSync(val) => build_message(C::TimedSync, val, builder)?, + Self::Ping(val) => build_message(C::Ping, val, builder)?, + Self::SupportFlags(val) => { + build_message(C::SupportFlags, val, builder)?; } } Ok(()) @@ -368,23 +369,23 @@ pub enum Message { } impl Message { - pub fn is_request(&self) -> bool { - matches!(self, Message::Request(_)) + pub const fn is_request(&self) -> bool { + matches!(self, Self::Request(_)) } - pub fn is_response(&self) -> bool { - matches!(self, Message::Response(_)) + pub const fn is_response(&self) -> bool { + matches!(self, Self::Response(_)) } - pub fn is_protocol(&self) -> bool { - matches!(self, Message::Protocol(_)) + pub const fn is_protocol(&self) -> bool { + matches!(self, Self::Protocol(_)) } - pub fn command(&self) -> LevinCommand { + pub const fn command(&self) -> LevinCommand { match self { - Message::Request(mes) => mes.command(), - Message::Response(mes) => mes.command(), - Message::Protocol(mes) => mes.command(), + Self::Request(mes) => mes.command(), + Self::Response(mes) => mes.command(), + Self::Protocol(mes) => mes.command(), } } } @@ -398,27 +399,25 @@ impl LevinBody for Message { command: LevinCommand, ) -> Result { Ok(match typ { - MessageType::Request => Message::Request(AdminRequestMessage::decode(body, command)?), - MessageType::Response => { - Message::Response(AdminResponseMessage::decode(body, command)?) - } - MessageType::Notification => Message::Protocol(ProtocolMessage::decode(body, command)?), + MessageType::Request => Self::Request(AdminRequestMessage::decode(body, command)?), + MessageType::Response => Self::Response(AdminResponseMessage::decode(body, command)?), + MessageType::Notification => Self::Protocol(ProtocolMessage::decode(body, command)?), }) } fn encode(self, builder: &mut BucketBuilder) -> Result<(), BucketError> { match self { - Message::Protocol(pro) => { + Self::Protocol(pro) => { builder.set_message_type(MessageType::Notification); builder.set_return_code(0); pro.build(builder) } - Message::Request(req) => { + Self::Request(req) => { builder.set_message_type(MessageType::Request); builder.set_return_code(0); req.build(builder) } - Message::Response(res) => { + Self::Response(res) => { builder.set_message_type(MessageType::Response); builder.set_return_code(1); res.build(builder) diff --git a/net/wire/src/p2p/admin.rs b/net/wire/src/p2p/admin.rs index 173c293..67a8e21 100644 --- a/net/wire/src/p2p/admin.rs +++ b/net/wire/src/p2p/admin.rs @@ -45,7 +45,7 @@ pub struct HandshakeResponse { pub node_data: BasicNodeData, /// Core Sync Data pub payload_data: CoreSyncData, - /// PeerList + /// `PeerList` pub local_peerlist_new: Vec, } @@ -56,7 +56,7 @@ epee_object!( local_peerlist_new: Vec, ); -/// A TimedSync Request +/// A `TimedSync` Request #[derive(Debug, Clone, PartialEq, Eq)] pub struct TimedSyncRequest { /// Core Sync Data @@ -68,12 +68,12 @@ epee_object!( payload_data: CoreSyncData, ); -/// A TimedSync Response +/// A `TimedSync` Response #[derive(Debug, Clone, PartialEq, Eq)] pub struct TimedSyncResponse { /// Core Sync Data pub payload_data: CoreSyncData, - /// PeerList + /// `PeerList` pub local_peerlist_new: Vec, } diff --git a/net/wire/src/p2p/common.rs b/net/wire/src/p2p/common.rs index d585d07..d95a620 100644 --- a/net/wire/src/p2p/common.rs +++ b/net/wire/src/p2p/common.rs @@ -18,6 +18,7 @@ use bitflags::bitflags; use cuprate_epee_encoding::epee_object; +use cuprate_helper::map::split_u128_into_low_high_bits; pub use cuprate_types::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs}; use crate::NetworkAddress; @@ -34,7 +35,7 @@ bitflags! { impl From for PeerSupportFlags { fn from(value: u32) -> Self { - PeerSupportFlags(value) + Self(value) } } @@ -113,16 +114,17 @@ epee_object! { } impl CoreSyncData { - pub fn new( + pub const fn new( cumulative_difficulty_128: u128, current_height: u64, pruning_seed: u32, top_id: [u8; 32], top_version: u8, - ) -> CoreSyncData { - let cumulative_difficulty = cumulative_difficulty_128 as u64; - let cumulative_difficulty_top64 = (cumulative_difficulty_128 >> 64) as u64; - CoreSyncData { + ) -> Self { + let (cumulative_difficulty, cumulative_difficulty_top64) = + split_u128_into_low_high_bits(cumulative_difficulty_128); + + Self { cumulative_difficulty, cumulative_difficulty_top64, current_height, @@ -139,7 +141,7 @@ impl CoreSyncData { } } -/// PeerListEntryBase, information kept on a peer which will be entered +/// `PeerListEntryBase`, information kept on a peer which will be entered /// in a peer list/store. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct PeerListEntryBase { diff --git a/net/wire/src/p2p/protocol.rs b/net/wire/src/p2p/protocol.rs index 73694d5..1d1d45a 100644 --- a/net/wire/src/p2p/protocol.rs +++ b/net/wire/src/p2p/protocol.rs @@ -127,7 +127,7 @@ pub struct ChainResponse { impl ChainResponse { #[inline] - pub fn cumulative_difficulty(&self) -> u128 { + pub const fn cumulative_difficulty(&self) -> u128 { let cumulative_difficulty = self.cumulative_difficulty_top64 as u128; cumulative_difficulty << 64 | self.cumulative_difficulty_low64 as u128 } @@ -159,7 +159,7 @@ epee_object!( current_blockchain_height: u64, ); -/// A request for Txs we are missing from our TxPool +/// A request for Txs we are missing from our `TxPool` #[derive(Debug, Clone, PartialEq, Eq)] pub struct FluffyMissingTransactionsRequest { /// The Block we are missing the Txs in @@ -177,7 +177,7 @@ epee_object!( missing_tx_indices: Vec as ContainerAsBlob, ); -/// TxPoolCompliment +/// `TxPoolCompliment` #[derive(Debug, Clone, PartialEq, Eq)] pub struct GetTxPoolCompliment { /// Tx Hashes diff --git a/p2p/address-book/Cargo.toml b/p2p/address-book/Cargo.toml index 9cff78a..0871163 100644 --- a/p2p/address-book/Cargo.toml +++ b/p2p/address-book/Cargo.toml @@ -8,7 +8,6 @@ authors = ["Boog900"] [dependencies] cuprate-pruning = { path = "../../pruning" } -cuprate-wire = { path= "../../net/wire" } cuprate-p2p-core = { path = "../p2p-core" } tower = { workspace = true, features = ["util"] } @@ -29,3 +28,6 @@ borsh = { workspace = true, features = ["derive", "std"]} cuprate-test-utils = {path = "../../test-utils"} tokio = { workspace = true, features = ["rt-multi-thread", "macros"]} + +[lints] +workspace = true \ No newline at end of file diff --git a/p2p/address-book/src/book.rs b/p2p/address-book/src/book.rs index 2f0ce6d..9c22981 100644 --- a/p2p/address-book/src/book.rs +++ b/p2p/address-book/src/book.rs @@ -36,7 +36,7 @@ use crate::{ mod tests; /// An entry in the connected list. -pub struct ConnectionPeerEntry { +pub(crate) struct ConnectionPeerEntry { addr: Option, id: u64, handle: ConnectionHandle, @@ -109,14 +109,14 @@ impl AddressBook { match handle.poll_unpin(cx) { Poll::Pending => return, Poll::Ready(Ok(Err(e))) => { - tracing::error!("Could not save peer list to disk, got error: {}", e) + tracing::error!("Could not save peer list to disk, got error: {e}"); } Poll::Ready(Err(e)) => { if e.is_panic() { panic::resume_unwind(e.into_panic()) } } - _ => (), + Poll::Ready(_) => (), } } // the task is finished. @@ -144,6 +144,7 @@ impl AddressBook { let mut internal_addr_disconnected = Vec::new(); let mut addrs_to_ban = Vec::new(); + #[expect(clippy::iter_over_hash_type, reason = "ordering doesn't matter here")] for (internal_addr, peer) in &mut self.connected_peers { if let Some(time) = peer.handle.check_should_ban() { match internal_addr { @@ -158,7 +159,7 @@ impl AddressBook { } } - for (addr, time) in addrs_to_ban.into_iter() { + for (addr, time) in addrs_to_ban { self.ban_peer(addr, time); } @@ -172,12 +173,7 @@ impl AddressBook { .remove(&addr); // If the amount of peers with this ban id is 0 remove the whole set. - if self - .connected_peers_ban_id - .get(&addr.ban_id()) - .unwrap() - .is_empty() - { + if self.connected_peers_ban_id[&addr.ban_id()].is_empty() { self.connected_peers_ban_id.remove(&addr.ban_id()); } // remove the peer from the anchor list. @@ -188,7 +184,7 @@ impl AddressBook { fn ban_peer(&mut self, addr: Z::Addr, time: Duration) { if self.banned_peers.contains_key(&addr.ban_id()) { - tracing::error!("Tried to ban peer twice, this shouldn't happen.") + tracing::error!("Tried to ban peer twice, this shouldn't happen."); } if let Some(connected_peers_with_ban_id) = self.connected_peers_ban_id.get(&addr.ban_id()) { @@ -242,10 +238,10 @@ impl AddressBook { peer_list.retain_mut(|peer| { peer.adr.make_canonical(); - if !peer.adr.should_add_to_peer_list() { - false - } else { + if peer.adr.should_add_to_peer_list() { !self.is_peer_banned(&peer.adr) + } else { + false } // TODO: check rpc/ p2p ports not the same }); @@ -391,7 +387,7 @@ impl Service> for AddressBook { rpc_credits_per_hash, }, ) - .map(|_| AddressBookResponse::Ok), + .map(|()| AddressBookResponse::Ok), AddressBookRequest::IncomingPeerList(peer_list) => { self.handle_incoming_peer_list(peer_list); Ok(AddressBookResponse::Ok) diff --git a/p2p/address-book/src/book/tests.rs b/p2p/address-book/src/book/tests.rs index 1abea04..aefbd84 100644 --- a/p2p/address-book/src/book/tests.rs +++ b/p2p/address-book/src/book/tests.rs @@ -109,7 +109,7 @@ async fn add_new_peer_already_connected() { }, ), Err(AddressBookError::PeerAlreadyConnected) - ) + ); } #[tokio::test] @@ -143,5 +143,5 @@ async fn banned_peer_removed_from_peer_lists() { .unwrap() .into_inner(), TestNetZoneAddr(1) - ) + ); } diff --git a/p2p/address-book/src/peer_list.rs b/p2p/address-book/src/peer_list.rs index f0a905a..9b98a8a 100644 --- a/p2p/address-book/src/peer_list.rs +++ b/p2p/address-book/src/peer_list.rs @@ -7,31 +7,31 @@ use cuprate_p2p_core::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress, use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT}; #[cfg(test)] -pub mod tests; +pub(crate) mod tests; /// A Peer list in the address book. /// /// This could either be the white list or gray list. #[derive(Debug)] -pub struct PeerList { +pub(crate) struct PeerList { /// The peers with their peer data. pub peers: IndexMap>, /// An index of Pruning seed to address, so can quickly grab peers with the blocks /// we want. /// - /// Pruning seeds are sorted by first their log_stripes and then their stripe. + /// Pruning seeds are sorted by first their `log_stripes` and then their stripe. /// This means the first peers in this list will store more blocks than peers /// later on. So when we need a peer with a certain block we look at the peers /// storing more blocks first then work our way to the peers storing less. /// pruning_seeds: BTreeMap>, - /// A hashmap linking ban_ids to addresses. + /// A hashmap linking `ban_ids` to addresses. ban_ids: HashMap<::BanID, Vec>, } impl PeerList { /// Creates a new peer list. - pub fn new(list: Vec>) -> PeerList { + pub(crate) fn new(list: Vec>) -> Self { let mut peers = IndexMap::with_capacity(list.len()); let mut pruning_seeds = BTreeMap::new(); let mut ban_ids = HashMap::with_capacity(list.len()); @@ -49,7 +49,7 @@ impl PeerList { peers.insert(peer.adr, peer); } - PeerList { + Self { peers, pruning_seeds, ban_ids, @@ -57,21 +57,20 @@ impl PeerList { } /// Gets the length of the peer list - pub fn len(&self) -> usize { + pub(crate) fn len(&self) -> usize { self.peers.len() } /// Adds a new peer to the peer list - pub fn add_new_peer(&mut self, peer: ZoneSpecificPeerListEntryBase) { + pub(crate) fn add_new_peer(&mut self, peer: ZoneSpecificPeerListEntryBase) { if self.peers.insert(peer.adr, peer).is_none() { - // It's more clear with this - #[allow(clippy::unwrap_or_default)] + #[expect(clippy::unwrap_or_default, reason = "It's more clear with this")] self.pruning_seeds .entry(peer.pruning_seed) .or_insert_with(Vec::new) .push(peer.adr); - #[allow(clippy::unwrap_or_default)] + #[expect(clippy::unwrap_or_default)] self.ban_ids .entry(peer.adr.ban_id()) .or_insert_with(Vec::new) @@ -85,7 +84,7 @@ impl PeerList { /// list. /// /// The given peer will be removed from the peer list. - pub fn take_random_peer( + pub(crate) fn take_random_peer( &mut self, r: &mut R, block_needed: Option, @@ -127,7 +126,7 @@ impl PeerList { None } - pub fn get_random_peers( + pub(crate) fn get_random_peers( &self, r: &mut R, len: usize, @@ -142,7 +141,7 @@ impl PeerList { } /// Returns a mutable reference to a peer. - pub fn get_peer_mut( + pub(crate) fn get_peer_mut( &mut self, peer: &Z::Addr, ) -> Option<&mut ZoneSpecificPeerListEntryBase> { @@ -150,7 +149,7 @@ impl PeerList { } /// Returns true if the list contains this peer. - pub fn contains_peer(&self, peer: &Z::Addr) -> bool { + pub(crate) fn contains_peer(&self, peer: &Z::Addr) -> bool { self.peers.contains_key(peer) } @@ -189,11 +188,11 @@ impl PeerList { /// MUST NOT BE USED ALONE fn remove_peer_from_all_idxs(&mut self, peer: &ZoneSpecificPeerListEntryBase) { self.remove_peer_pruning_idx(peer); - self.remove_peer_ban_idx(peer) + self.remove_peer_ban_idx(peer); } /// Removes a peer from the peer list - pub fn remove_peer( + pub(crate) fn remove_peer( &mut self, peer: &Z::Addr, ) -> Option> { @@ -203,7 +202,7 @@ impl PeerList { } /// Removes all peers with a specific ban id. - pub fn remove_peers_with_ban_id(&mut self, ban_id: &::BanID) { + pub(crate) fn remove_peers_with_ban_id(&mut self, ban_id: &::BanID) { let Some(addresses) = self.ban_ids.get(ban_id) else { // No peers to ban return; @@ -217,8 +216,8 @@ impl PeerList { /// Tries to reduce the peer list to `new_len`. /// /// This function could keep the list bigger than `new_len` if `must_keep_peers`s length - /// is larger than new_len, in that case we will remove as much as we can. - pub fn reduce_list(&mut self, must_keep_peers: &HashSet, new_len: usize) { + /// is larger than `new_len`, in that case we will remove as much as we can. + pub(crate) fn reduce_list(&mut self, must_keep_peers: &HashSet, new_len: usize) { if new_len >= self.len() { return; } diff --git a/p2p/address-book/src/peer_list/tests.rs b/p2p/address-book/src/peer_list/tests.rs index 8d2d220..4b13ae7 100644 --- a/p2p/address-book/src/peer_list/tests.rs +++ b/p2p/address-book/src/peer_list/tests.rs @@ -14,7 +14,7 @@ fn make_fake_peer( ) -> ZoneSpecificPeerListEntryBase { ZoneSpecificPeerListEntryBase { adr: TestNetZoneAddr(id), - id: id as u64, + id: u64::from(id), last_seen: 0, pruning_seed: PruningSeed::decompress(pruning_seed.unwrap_or(0)).unwrap(), rpc_port: 0, @@ -22,14 +22,14 @@ fn make_fake_peer( } } -pub fn make_fake_peer_list( +pub(crate) fn make_fake_peer_list( start_idx: u32, numb_o_peers: u32, ) -> PeerList> { let mut peer_list = Vec::with_capacity(numb_o_peers as usize); for idx in start_idx..(start_idx + numb_o_peers) { - peer_list.push(make_fake_peer(idx, None)) + peer_list.push(make_fake_peer(idx, None)); } PeerList::new(peer_list) @@ -50,7 +50,7 @@ fn make_fake_peer_list_with_random_pruning_seeds( } else { r.gen_range(384..=391) }), - )) + )); } PeerList::new(peer_list) } @@ -70,7 +70,7 @@ fn peer_list_reduce_length() { #[test] fn peer_list_reduce_length_with_peers_we_need() { let mut peer_list = make_fake_peer_list(0, 500); - let must_keep_peers = HashSet::from_iter(peer_list.peers.keys().copied()); + let must_keep_peers = peer_list.peers.keys().copied().collect::>(); let target_len = 49; @@ -92,7 +92,7 @@ fn peer_list_remove_specific_peer() { let peers = peer_list.peers; for (_, addrs) in pruning_idxs { - addrs.iter().for_each(|adr| assert_ne!(adr, &peer.adr)) + addrs.iter().for_each(|adr| assert_ne!(adr, &peer.adr)); } assert!(!peers.contains_key(&peer.adr)); @@ -104,13 +104,13 @@ fn peer_list_pruning_idxs_are_correct() { let mut total_len = 0; for (seed, list) in peer_list.pruning_seeds { - for peer in list.iter() { + for peer in &list { assert_eq!(peer_list.peers.get(peer).unwrap().pruning_seed, seed); total_len += 1; } } - assert_eq!(total_len, peer_list.peers.len()) + assert_eq!(total_len, peer_list.peers.len()); } #[test] @@ -122,11 +122,7 @@ fn peer_list_add_new_peer() { assert_eq!(peer_list.len(), 11); assert_eq!(peer_list.peers.get(&new_peer.adr), Some(&new_peer)); - assert!(peer_list - .pruning_seeds - .get(&new_peer.pruning_seed) - .unwrap() - .contains(&new_peer.adr)); + assert!(peer_list.pruning_seeds[&new_peer.pruning_seed].contains(&new_peer.adr)); } #[test] @@ -164,7 +160,7 @@ fn peer_list_get_peer_with_block() { assert!(peer .pruning_seed .get_next_unpruned_block(1, 1_000_000) - .is_ok()) + .is_ok()); } #[test] diff --git a/p2p/address-book/src/store.rs b/p2p/address-book/src/store.rs index abc42d6..07c117e 100644 --- a/p2p/address-book/src/store.rs +++ b/p2p/address-book/src/store.rs @@ -1,3 +1,8 @@ +#![expect( + single_use_lifetimes, + reason = "false positive on generated derive code on `SerPeerDataV1`" +)] + use std::fs; use borsh::{from_slice, to_vec, BorshDeserialize, BorshSerialize}; @@ -21,7 +26,7 @@ struct DeserPeerDataV1 { gray_list: Vec>, } -pub fn save_peers_to_disk( +pub(crate) fn save_peers_to_disk( cfg: &AddressBookConfig, white_list: &PeerList, gray_list: &PeerList, @@ -38,7 +43,7 @@ pub fn save_peers_to_disk( spawn_blocking(move || fs::write(&file, &data)) } -pub async fn read_peers_from_disk( +pub(crate) async fn read_peers_from_disk( cfg: &AddressBookConfig, ) -> Result< ( diff --git a/p2p/dandelion-tower/Cargo.toml b/p2p/dandelion-tower/Cargo.toml index 976dad6..92e4915 100644 --- a/p2p/dandelion-tower/Cargo.toml +++ b/p2p/dandelion-tower/Cargo.toml @@ -24,4 +24,7 @@ thiserror = { workspace = true } [dev-dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync"] } -proptest = { workspace = true, features = ["default"] } \ No newline at end of file +proptest = { workspace = true, features = ["default"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/p2p/dandelion-tower/src/config.rs b/p2p/dandelion-tower/src/config.rs index 6266d60..46c780a 100644 --- a/p2p/dandelion-tower/src/config.rs +++ b/p2p/dandelion-tower/src/config.rs @@ -8,7 +8,7 @@ use std::{ /// (1 - ep) is the probability that a transaction travels for `k` hops before a nodes embargo timeout fires, this constant is (1 - ep). const EMBARGO_FULL_TRAVEL_PROBABILITY: f64 = 0.90; -/// The graph type to use for dandelion routing, the dandelion paper recommends [Graph::FourRegular]. +/// The graph type to use for dandelion routing, the dandelion paper recommends [`Graph::FourRegular`]. /// /// The decision between line graphs and 4-regular graphs depend on the priorities of the system, if /// linkability of transactions is a first order concern then line graphs may be better, however 4-regular graphs @@ -66,7 +66,7 @@ impl DandelionConfig { /// Returns the number of outbound peers to use to stem transactions. /// /// This value depends on the [`Graph`] chosen. - pub fn number_of_stems(&self) -> usize { + pub const fn number_of_stems(&self) -> usize { match self.graph { Graph::Line => 1, Graph::FourRegular => 2, diff --git a/p2p/dandelion-tower/src/lib.rs b/p2p/dandelion-tower/src/lib.rs index 60b5ea5..2c8de71 100644 --- a/p2p/dandelion-tower/src/lib.rs +++ b/p2p/dandelion-tower/src/lib.rs @@ -26,7 +26,7 @@ //! The diffuse service should have a request of [`DiffuseRequest`](traits::DiffuseRequest) and it's error //! should be [`tower::BoxError`]. //! -//! ## Outbound Peer TryStream +//! ## Outbound Peer `TryStream` //! //! The outbound peer [`TryStream`](futures::TryStream) should provide a stream of randomly selected outbound //! peers, these peers will then be used to route stem txs to. @@ -37,7 +37,7 @@ //! ## Peer Service //! //! This service represents a connection to an individual peer, this should be returned from the Outbound Peer -//! TryStream. This should immediately send the transaction to the peer when requested, it should _not_ set +//! `TryStream`. This should immediately send the transaction to the peer when requested, it should _not_ set //! a timer. //! //! The peer service should have a request of [`StemRequest`](traits::StemRequest) and its error diff --git a/p2p/dandelion-tower/src/pool/incoming_tx.rs b/p2p/dandelion-tower/src/pool/incoming_tx.rs index c9a30de..13cdffe 100644 --- a/p2p/dandelion-tower/src/pool/incoming_tx.rs +++ b/p2p/dandelion-tower/src/pool/incoming_tx.rs @@ -30,7 +30,7 @@ pub struct IncomingTxBuilder impl IncomingTxBuilder { /// Creates a new [`IncomingTxBuilder`]. - pub fn new(tx: Tx, tx_id: TxId) -> Self { + pub const fn new(tx: Tx, tx_id: TxId) -> Self { Self { tx, tx_id, diff --git a/p2p/dandelion-tower/src/pool/manager.rs b/p2p/dandelion-tower/src/pool/manager.rs index 9e1572e..2ac3302 100644 --- a/p2p/dandelion-tower/src/pool/manager.rs +++ b/p2p/dandelion-tower/src/pool/manager.rs @@ -88,9 +88,7 @@ where .insert(peer.clone()); } - let state = from - .map(|from| TxState::Stem { from }) - .unwrap_or(TxState::Local); + let state = from.map_or(TxState::Local, |from| TxState::Stem { from }); let fut = self .dandelion_router @@ -280,13 +278,15 @@ where }; if let Err(e) = self.handle_incoming_tx(tx, routing_state, tx_id).await { + #[expect(clippy::let_underscore_must_use, reason = "dropped receivers can be ignored")] let _ = res_tx.send(()); tracing::error!("Error handling transaction in dandelion pool: {e}"); return; } - let _ = res_tx.send(()); + #[expect(clippy::let_underscore_must_use)] + let _ = res_tx.send(()); } } } diff --git a/p2p/dandelion-tower/src/router.rs b/p2p/dandelion-tower/src/router.rs index edeccae..88702be 100644 --- a/p2p/dandelion-tower/src/router.rs +++ b/p2p/dandelion-tower/src/router.rs @@ -140,7 +140,7 @@ where State::Stem }; - DandelionRouter { + Self { outbound_peer_discover: Box::pin(outbound_peer_discover), broadcast_svc, current_state, @@ -198,7 +198,7 @@ where fn stem_tx( &mut self, tx: Tx, - from: Id, + from: &Id, ) -> BoxFuture<'static, Result> { if self.stem_peers.is_empty() { tracing::debug!("Stem peers are empty, fluffing stem transaction."); @@ -216,7 +216,7 @@ where }); let Some(peer) = self.stem_peers.get_mut(stem_route) else { - self.stem_routes.remove(&from); + self.stem_routes.remove(from); continue; }; @@ -302,7 +302,7 @@ where tracing::debug!( parent: span, "Peer returned an error on `poll_ready`: {e}, removing from router.", - ) + ); }) .is_ok(), Poll::Pending => { @@ -341,7 +341,7 @@ where State::Stem => { tracing::trace!(parent: &self.span, "Steming transaction"); - self.stem_tx(req.tx, from) + self.stem_tx(req.tx, &from) } }, TxState::Local => { diff --git a/p2p/dandelion-tower/src/tests/mod.rs b/p2p/dandelion-tower/src/tests/mod.rs index 1c6a3e0..601ee25 100644 --- a/p2p/dandelion-tower/src/tests/mod.rs +++ b/p2p/dandelion-tower/src/tests/mod.rs @@ -12,7 +12,7 @@ use crate::{ OutboundPeer, State, }; -pub fn mock_discover_svc() -> ( +pub(crate) fn mock_discover_svc() -> ( impl Stream< Item = Result< OutboundPeer< @@ -49,7 +49,7 @@ pub fn mock_discover_svc() -> ( (discover, rx) } -pub fn mock_broadcast_svc() -> ( +pub(crate) fn mock_broadcast_svc() -> ( impl Service< Req, Future = impl Future> + Send + 'static, @@ -70,8 +70,8 @@ pub fn mock_broadcast_svc() -> ( ) } -#[allow(clippy::type_complexity)] // just test code. -pub fn mock_in_memory_backing_pool< +#[expect(clippy::type_complexity, reason = "just test code.")] +pub(crate) fn mock_in_memory_backing_pool< Tx: Clone + Send + 'static, TxID: Clone + Hash + Eq + Send + 'static, >() -> ( @@ -85,11 +85,11 @@ pub fn mock_in_memory_backing_pool< Arc>>, ) { let txs = Arc::new(std::sync::Mutex::new(HashMap::new())); - let txs_2 = txs.clone(); + let txs_2 = Arc::clone(&txs); ( service_fn(move |req: TxStoreRequest| { - let txs = txs.clone(); + let txs = Arc::clone(&txs); async move { match req { TxStoreRequest::Get(tx_id) => { diff --git a/p2p/dandelion-tower/src/tests/pool.rs b/p2p/dandelion-tower/src/tests/pool.rs index b7fa55e..70f642a 100644 --- a/p2p/dandelion-tower/src/tests/pool.rs +++ b/p2p/dandelion-tower/src/tests/pool.rs @@ -39,5 +39,5 @@ async fn basic_functionality() { // TODO: the DandelionPoolManager doesn't handle adding txs to the pool, add more tests here to test // all functionality. //assert!(pool.lock().unwrap().contains_key(&1)); - assert!(broadcast_rx.try_recv().is_ok()) + assert!(broadcast_rx.try_recv().is_ok()); } diff --git a/p2p/p2p-core/Cargo.toml b/p2p/p2p-core/Cargo.toml index 9ef8e24..8341fe9 100644 --- a/p2p/p2p-core/Cargo.toml +++ b/p2p/p2p-core/Cargo.toml @@ -14,13 +14,14 @@ cuprate-helper = { path = "../../helper", features = ["asynch"], default-feature cuprate-wire = { path = "../../net/wire", features = ["tracing"] } cuprate-pruning = { path = "../../pruning" } -tokio = { workspace = true, features = ["net", "sync", "macros", "time"]} +tokio = { workspace = true, features = ["net", "sync", "macros", "time", "rt", "rt-multi-thread"]} tokio-util = { workspace = true, features = ["codec"] } tokio-stream = { workspace = true, features = ["sync"]} futures = { workspace = true, features = ["std"] } async-trait = { workspace = true } tower = { workspace = true, features = ["util", "tracing"] } +cfg-if = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true, features = ["std", "attributes"] } hex-literal = { workspace = true } @@ -28,9 +29,10 @@ hex-literal = { workspace = true } borsh = { workspace = true, features = ["derive", "std"], optional = true } [dev-dependencies] -cuprate-test-utils = {path = "../../test-utils"} +cuprate-test-utils = { path = "../../test-utils" } hex = { workspace = true, features = ["std"] } -tokio = { workspace = true, features = ["net", "rt-multi-thread", "rt", "macros"]} tokio-test = { workspace = true } -tracing-subscriber = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/p2p/p2p-core/src/client.rs b/p2p/p2p-core/src/client.rs index 662a8ee..8685189 100644 --- a/p2p/p2p-core/src/client.rs +++ b/p2p/p2p-core/src/client.rs @@ -43,8 +43,8 @@ pub enum InternalPeerID { impl Display for InternalPeerID { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - InternalPeerID::KnownAddr(addr) => addr.fmt(f), - InternalPeerID::Unknown(id) => f.write_str(&format!("Unknown, ID: {id}")), + Self::KnownAddr(addr) => addr.fmt(f), + Self::Unknown(id) => f.write_str(&format!("Unknown, ID: {id}")), } } } @@ -113,7 +113,7 @@ impl Client { fn set_err(&self, err: PeerError) -> tower::BoxError { let err_str = err.to_string(); match self.error.try_insert_err(err) { - Ok(_) => err_str, + Ok(()) => err_str, Err(e) => e.to_string(), } .into() @@ -169,9 +169,8 @@ impl Service for Client { TrySendError::Closed(req) | TrySendError::Full(req) => { self.set_err(PeerError::ClientChannelClosed); - let _ = req - .response_channel - .send(Err(PeerError::ClientChannelClosed.into())); + let resp = Err(PeerError::ClientChannelClosed.into()); + drop(req.response_channel.send(resp)); } } } @@ -216,7 +215,7 @@ where tracing::debug!("Sending back response"); - let _ = req.response_channel.send(Ok(res)); + drop(req.response_channel.send(Ok(res))); } } .instrument(task_span), diff --git a/p2p/p2p-core/src/client/connection.rs b/p2p/p2p-core/src/client/connection.rs index f3f3f6b..f7b9be5 100644 --- a/p2p/p2p-core/src/client/connection.rs +++ b/p2p/p2p-core/src/client/connection.rs @@ -26,7 +26,7 @@ use crate::{ }; /// A request to the connection task from a [`Client`](crate::client::Client). -pub struct ConnectionTaskRequest { +pub(crate) struct ConnectionTaskRequest { /// The request. pub request: PeerRequest, /// The response channel. @@ -36,7 +36,7 @@ pub struct ConnectionTaskRequest { } /// The connection state. -pub enum State { +pub(crate) enum State { /// Waiting for a request from Cuprate or the connected peer. WaitingForRequest, /// Waiting for a response from the peer. @@ -53,7 +53,7 @@ pub enum State { /// Returns if the [`LevinCommand`] is the correct response message for our request. /// /// e.g. that we didn't get a block for a txs request. -fn levin_command_response(message_id: &MessageID, command: LevinCommand) -> bool { +const fn levin_command_response(message_id: MessageID, command: LevinCommand) -> bool { matches!( (message_id, command), (MessageID::Handshake, LevinCommand::Handshake) @@ -71,7 +71,7 @@ fn levin_command_response(message_id: &MessageID, command: LevinCommand) -> bool } /// This represents a connection to a peer. -pub struct Connection { +pub(crate) struct Connection { /// The peer sink - where we send messages to the peer. peer_sink: Z::Sink, @@ -104,15 +104,15 @@ where BrdcstStrm: Stream + Send + 'static, { /// Create a new connection struct. - pub fn new( + pub(crate) fn new( peer_sink: Z::Sink, client_rx: mpsc::Receiver, broadcast_stream: BrdcstStrm, peer_request_handler: PeerRequestHandler, connection_guard: ConnectionGuard, error: SharedError, - ) -> Connection { - Connection { + ) -> Self { + Self { peer_sink, state: State::WaitingForRequest, request_timeout: None, @@ -174,15 +174,14 @@ where if let Err(e) = res { // can't clone the error so turn it to a string first, hacky but oh well. let err_str = e.to_string(); - let _ = req.response_channel.send(Err(err_str.clone().into())); + drop(req.response_channel.send(Err(err_str.into()))); return Err(e); - } else { - // We still need to respond even if the response is this. - let _ = req - .response_channel - .send(Ok(PeerResponse::Protocol(ProtocolResponse::NA))); } + // We still need to respond even if the response is this. + let resp = Ok(PeerResponse::Protocol(ProtocolResponse::NA)); + drop(req.response_channel.send(resp)); + Ok(()) } @@ -215,7 +214,7 @@ where }; // Check if the message is a response to our request. - if levin_command_response(request_id, mes.command()) { + if levin_command_response(*request_id, mes.command()) { // TODO: Do more checks before returning response. let State::WaitingForResponse { tx, .. } = @@ -224,9 +223,11 @@ where panic!("Not in correct state, can't receive response!") }; - let _ = tx.send(Ok(mes + let resp = Ok(mes .try_into() - .map_err(|_| PeerError::PeerSentInvalidMessage)?)); + .map_err(|_| PeerError::PeerSentInvalidMessage)?); + + drop(tx.send(resp)); self.request_timeout = None; @@ -282,7 +283,7 @@ where tokio::select! { biased; - _ = self.request_timeout.as_mut().expect("Request timeout was not set!") => { + () = self.request_timeout.as_mut().expect("Request timeout was not set!") => { Err(PeerError::ClientChannelClosed) } broadcast_req = self.broadcast_stream.next() => { @@ -306,8 +307,11 @@ where /// Runs the Connection handler logic, this should be put in a separate task. /// /// `eager_protocol_messages` are protocol messages that we received during a handshake. - pub async fn run(mut self, mut stream: Str, eager_protocol_messages: Vec) - where + pub(crate) async fn run( + mut self, + mut stream: Str, + eager_protocol_messages: Vec, + ) where Str: FusedStream> + Unpin, { tracing::debug!( @@ -348,6 +352,7 @@ where /// Shutdowns the connection, flushing pending requests and setting the error slot, if it hasn't been /// set already. + #[expect(clippy::significant_drop_tightening)] fn shutdown(mut self, err: PeerError) { tracing::debug!("Connection task shutting down: {}", err); @@ -362,11 +367,11 @@ where if let State::WaitingForResponse { tx, .. } = std::mem::replace(&mut self.state, State::WaitingForRequest) { - let _ = tx.send(Err(err_str.clone().into())); + drop(tx.send(Err(err_str.clone().into()))); } while let Ok(req) = client_rx.try_recv() { - let _ = req.response_channel.send(Err(err_str.clone().into())); + drop(req.response_channel.send(Err(err_str.clone().into()))); } self.connection_guard.connection_closed(); diff --git a/p2p/p2p-core/src/client/connector.rs b/p2p/p2p-core/src/client/connector.rs index d937165..553f5a4 100644 --- a/p2p/p2p-core/src/client/connector.rs +++ b/p2p/p2p-core/src/client/connector.rs @@ -40,7 +40,9 @@ impl Connector { /// Create a new connector from a handshaker. - pub fn new(handshaker: HandShaker) -> Self { + pub const fn new( + handshaker: HandShaker, + ) -> Self { Self { handshaker } } } diff --git a/p2p/p2p-core/src/client/handshaker.rs b/p2p/p2p-core/src/client/handshaker.rs index 67a58d4..d6873a8 100644 --- a/p2p/p2p-core/src/client/handshaker.rs +++ b/p2p/p2p-core/src/client/handshaker.rs @@ -113,7 +113,7 @@ impl HandShaker { /// Creates a new handshaker. - fn new( + const fn new( address_book: AdrBook, peer_sync_svc: PSync, core_sync_svc: CSync, @@ -226,11 +226,12 @@ pub async fn ping(addr: N::Addr) -> Result Err(BucketError::IO(std::io::Error::new( std::io::ErrorKind::ConnectionAborted, "The peer stream returned None", - )))? + )) + .into()) } /// This function completes a handshake with the requested peer. -#[allow(clippy::too_many_arguments)] +#[expect(clippy::too_many_arguments)] async fn handshake( req: DoHandshakeRequest, @@ -403,7 +404,10 @@ where break 'check_out_addr None; }; - // u32 does not make sense as a port so just truncate it. + #[expect( + clippy::cast_possible_truncation, + reason = "u32 does not make sense as a port so just truncate it." + )] outbound_address.set_port(peer_node_data.my_port as u16); let Ok(Ok(ping_peer_id)) = timeout( @@ -508,7 +512,7 @@ where info.id, info.handle.clone(), connection_tx.clone(), - semaphore.clone(), + Arc::clone(&semaphore), address_book, core_sync_svc, peer_sync_svc, @@ -671,7 +675,7 @@ async fn wait_for_message( _ => { return Err(HandshakeError::PeerSentInvalidMessage( "Peer sent an admin request before responding to the handshake", - )) + )); } } } @@ -686,16 +690,17 @@ async fn wait_for_message( )); } - _ => Err(HandshakeError::PeerSentInvalidMessage( + Message::Response(_) => Err(HandshakeError::PeerSentInvalidMessage( "Peer sent an incorrect message", )), - }? + }?; } Err(BucketError::IO(std::io::Error::new( std::io::ErrorKind::ConnectionAborted, "The peer stream returned None", - )))? + )) + .into()) } /// Sends a [`AdminResponseMessage::SupportFlags`] down the peer sink. diff --git a/p2p/p2p-core/src/client/handshaker/builder.rs b/p2p/p2p-core/src/client/handshaker/builder.rs index a40f396..069811d 100644 --- a/p2p/p2p-core/src/client/handshaker/builder.rs +++ b/p2p/p2p-core/src/client/handshaker/builder.rs @@ -87,14 +87,13 @@ impl where NAdrBook: AddressBook + Clone, { - let HandshakerBuilder { + let Self { core_sync_svc, peer_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, .. } = self; @@ -106,7 +105,7 @@ impl our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, + _zone: PhantomData, } } @@ -130,14 +129,13 @@ impl where NCSync: CoreSyncSvc + Clone, { - let HandshakerBuilder { + let Self { address_book, peer_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, .. } = self; @@ -149,7 +147,7 @@ impl our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, + _zone: PhantomData, } } @@ -167,14 +165,13 @@ impl where NPSync: PeerSyncSvc + Clone, { - let HandshakerBuilder { + let Self { address_book, core_sync_svc, protocol_request_svc, our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, .. } = self; @@ -186,7 +183,7 @@ impl our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, + _zone: PhantomData, } } @@ -204,14 +201,13 @@ impl where NProtoHdlr: ProtocolRequestHandler + Clone, { - let HandshakerBuilder { + let Self { address_book, core_sync_svc, peer_sync_svc, our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, .. } = self; @@ -223,7 +219,7 @@ impl our_basic_node_data, broadcast_stream_maker, connection_parent_span, - _zone, + _zone: PhantomData, } } @@ -242,14 +238,13 @@ impl BrdcstStrm: Stream + Send + 'static, NBrdcstStrmMkr: Fn(InternalPeerID) -> BrdcstStrm + Clone + Send + 'static, { - let HandshakerBuilder { + let Self { address_book, core_sync_svc, peer_sync_svc, protocol_request_svc, our_basic_node_data, connection_parent_span, - _zone, .. } = self; @@ -261,7 +256,7 @@ impl our_basic_node_data, broadcast_stream_maker: new_broadcast_stream_maker, connection_parent_span, - _zone, + _zone: PhantomData, } } @@ -270,6 +265,7 @@ impl /// ## Default Connection Parent Span /// /// The default connection span will be [`Span::none`]. + #[must_use] pub fn with_connection_parent_span(self, connection_parent_span: Span) -> Self { Self { connection_parent_span: Some(connection_parent_span), diff --git a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs index ae97cdc..e3c4335 100644 --- a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs +++ b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs @@ -42,8 +42,8 @@ pub struct DummyCoreSyncSvc(CoreSyncData); impl DummyCoreSyncSvc { /// Returns a [`DummyCoreSyncSvc`] that will just return the mainnet genesis [`CoreSyncData`]. - pub fn static_mainnet_genesis() -> DummyCoreSyncSvc { - DummyCoreSyncSvc(CoreSyncData { + pub const fn static_mainnet_genesis() -> Self { + Self(CoreSyncData { cumulative_difficulty: 1, cumulative_difficulty_top64: 0, current_height: 1, @@ -56,8 +56,8 @@ impl DummyCoreSyncSvc { } /// Returns a [`DummyCoreSyncSvc`] that will just return the testnet genesis [`CoreSyncData`]. - pub fn static_testnet_genesis() -> DummyCoreSyncSvc { - DummyCoreSyncSvc(CoreSyncData { + pub const fn static_testnet_genesis() -> Self { + Self(CoreSyncData { cumulative_difficulty: 1, cumulative_difficulty_top64: 0, current_height: 1, @@ -70,8 +70,8 @@ impl DummyCoreSyncSvc { } /// Returns a [`DummyCoreSyncSvc`] that will just return the stagenet genesis [`CoreSyncData`]. - pub fn static_stagenet_genesis() -> DummyCoreSyncSvc { - DummyCoreSyncSvc(CoreSyncData { + pub const fn static_stagenet_genesis() -> Self { + Self(CoreSyncData { cumulative_difficulty: 1, cumulative_difficulty_top64: 0, current_height: 1, @@ -84,8 +84,8 @@ impl DummyCoreSyncSvc { } /// Returns a [`DummyCoreSyncSvc`] that will return the provided [`CoreSyncData`]. - pub fn static_custom(data: CoreSyncData) -> DummyCoreSyncSvc { - DummyCoreSyncSvc(data) + pub const fn static_custom(data: CoreSyncData) -> Self { + Self(data) } } diff --git a/p2p/p2p-core/src/client/request_handler.rs b/p2p/p2p-core/src/client/request_handler.rs index 284f954..7059eed 100644 --- a/p2p/p2p-core/src/client/request_handler.rs +++ b/p2p/p2p-core/src/client/request_handler.rs @@ -46,7 +46,7 @@ pub(crate) struct PeerRequestHandler { pub peer_info: PeerInformation, } -impl PeerRequestHandler +impl PeerRequestHandler where Z: NetworkZone, A: AddressBook, @@ -55,7 +55,7 @@ where PR: ProtocolRequestHandler, { /// Handles an incoming [`PeerRequest`] to our node. - pub async fn handle_peer_request( + pub(crate) async fn handle_peer_request( &mut self, req: PeerRequest, ) -> Result { diff --git a/p2p/p2p-core/src/client/timeout_monitor.rs b/p2p/p2p-core/src/client/timeout_monitor.rs index 5228ede..6dbb4a2 100644 --- a/p2p/p2p-core/src/client/timeout_monitor.rs +++ b/p2p/p2p-core/src/client/timeout_monitor.rs @@ -1,6 +1,6 @@ //! Timeout Monitor //! -//! This module holds the task that sends periodic [TimedSync](PeerRequest::TimedSync) requests to a peer to make +//! This module holds the task that sends periodic [`TimedSync`](PeerRequest::TimedSync) requests to a peer to make //! sure the connection is still active. use std::sync::Arc; @@ -64,7 +64,7 @@ where return Ok(()); } - let Ok(permit) = semaphore.clone().try_acquire_owned() else { + let Ok(permit) = Arc::clone(&semaphore).try_acquire_owned() else { // If we can't get a permit the connection is currently waiting for a response, so no need to // do a timed sync. continue; diff --git a/p2p/p2p-core/src/error.rs b/p2p/p2p-core/src/error.rs index 65303ad..d0de923 100644 --- a/p2p/p2p-core/src/error.rs +++ b/p2p/p2p-core/src/error.rs @@ -4,7 +4,7 @@ pub struct SharedError(Arc>); impl Clone for SharedError { fn clone(&self) -> Self { - Self(self.0.clone()) + Self(Arc::clone(&self.0)) } } diff --git a/p2p/p2p-core/src/handles.rs b/p2p/p2p-core/src/handles.rs index da47b65..06dc212 100644 --- a/p2p/p2p-core/src/handles.rs +++ b/p2p/p2p-core/src/handles.rs @@ -18,11 +18,12 @@ pub struct HandleBuilder { impl HandleBuilder { /// Create a new builder. - pub fn new() -> Self { + pub const fn new() -> Self { Self { permit: None } } /// Sets the permit for this connection. + #[must_use] pub fn with_permit(mut self, permit: Option) -> Self { self.permit = permit; self @@ -40,7 +41,7 @@ impl HandleBuilder { _permit: self.permit, }, ConnectionHandle { - token: token.clone(), + token, ban: Arc::new(OnceLock::new()), }, ) @@ -66,13 +67,13 @@ impl ConnectionGuard { /// /// This will be called on [`Drop::drop`]. pub fn connection_closed(&self) { - self.token.cancel() + self.token.cancel(); } } impl Drop for ConnectionGuard { fn drop(&mut self) { - self.token.cancel() + self.token.cancel(); } } @@ -90,6 +91,10 @@ impl ConnectionHandle { } /// Bans the peer for the given `duration`. pub fn ban_peer(&self, duration: Duration) { + #[expect( + clippy::let_underscore_must_use, + reason = "error means peer is already banned; fine to ignore" + )] let _ = self.ban.set(BanPeer(duration)); self.token.cancel(); } @@ -103,6 +108,6 @@ impl ConnectionHandle { } /// Sends the signal to the connection task to disconnect. pub fn send_close_signal(&self) { - self.token.cancel() + self.token.cancel(); } } diff --git a/p2p/p2p-core/src/lib.rs b/p2p/p2p-core/src/lib.rs index 83cc4d2..04e8676 100644 --- a/p2p/p2p-core/src/lib.rs +++ b/p2p/p2p-core/src/lib.rs @@ -6,7 +6,7 @@ //! //! # Network Zones //! -//! This crate abstracts over network zones, Tor/I2p/clearnet with the [NetworkZone] trait. Currently only clearnet is implemented: [ClearNet]. +//! This crate abstracts over network zones, Tor/I2p/clearnet with the [`NetworkZone`] trait. Currently only clearnet is implemented: [`ClearNet`]. //! //! # Usage //! @@ -56,6 +56,16 @@ //! .unwrap(); //! # }); //! ``` + +cfg_if::cfg_if! { + // Used in `tests/` + if #[cfg(test)] { + use cuprate_test_utils as _; + use tokio_test as _; + use hex as _; + } +} + use std::{fmt::Debug, future::Future, hash::Hash}; use futures::{Sink, Stream}; @@ -102,7 +112,7 @@ pub trait NetZoneAddress: + Unpin + 'static { - /// Cuprate needs to be able to ban peers by IP addresses and not just by SocketAddr as + /// Cuprate needs to be able to ban peers by IP addresses and not just by `SocketAddr` as /// that include the port, to be able to facilitate this network addresses must have a ban ID /// which for hidden services could just be the address it self but for clear net addresses will /// be the IP address. diff --git a/p2p/p2p-core/src/network_zones/clear.rs b/p2p/p2p-core/src/network_zones/clear.rs index 192e363..261d5ad 100644 --- a/p2p/p2p-core/src/network_zones/clear.rs +++ b/p2p/p2p-core/src/network_zones/clear.rs @@ -19,7 +19,7 @@ impl NetZoneAddress for SocketAddr { type BanID = IpAddr; fn set_port(&mut self, port: u16) { - SocketAddr::set_port(self, port) + Self::set_port(self, port); } fn ban_id(&self) -> Self::BanID { @@ -54,8 +54,13 @@ impl NetworkZone for ClearNet { const NAME: &'static str = "ClearNet"; const SEEDS: &'static [Self::Addr] = &[ - ip_v4(37, 187, 74, 171, 18080), + ip_v4(176, 9, 0, 187, 18080), + ip_v4(88, 198, 163, 90, 18080), + ip_v4(66, 85, 74, 134, 18080), + ip_v4(51, 79, 173, 165, 18080), ip_v4(192, 99, 8, 110, 18080), + ip_v4(37, 187, 74, 171, 18080), + ip_v4(77, 172, 183, 193, 18080), ]; const ALLOW_SYNC: bool = true; diff --git a/p2p/p2p-core/src/protocol.rs b/p2p/p2p-core/src/protocol.rs index 5e4f4d7..7d8d431 100644 --- a/p2p/p2p-core/src/protocol.rs +++ b/p2p/p2p-core/src/protocol.rs @@ -8,7 +8,7 @@ //! //! Here is every P2P request/response. //! -//! *note admin messages are already request/response so "Handshake" is actually made of a HandshakeRequest & HandshakeResponse +//! *note admin messages are already request/response so "Handshake" is actually made of a `HandshakeRequest` & `HandshakeResponse` //! //! ```md //! Admin: @@ -78,15 +78,15 @@ pub enum PeerRequest { } impl PeerRequest { - pub fn id(&self) -> MessageID { + pub const fn id(&self) -> MessageID { match self { - PeerRequest::Admin(admin_req) => match admin_req { + Self::Admin(admin_req) => match admin_req { AdminRequestMessage::Handshake(_) => MessageID::Handshake, AdminRequestMessage::TimedSync(_) => MessageID::TimedSync, AdminRequestMessage::Ping => MessageID::Ping, AdminRequestMessage::SupportFlags => MessageID::SupportFlags, }, - PeerRequest::Protocol(protocol_request) => match protocol_request { + Self::Protocol(protocol_request) => match protocol_request { ProtocolRequest::GetObjects(_) => MessageID::GetObjects, ProtocolRequest::GetChain(_) => MessageID::GetChain, ProtocolRequest::FluffyMissingTxs(_) => MessageID::FluffyMissingTxs, @@ -98,10 +98,10 @@ impl PeerRequest { } } - pub fn needs_response(&self) -> bool { + pub const fn needs_response(&self) -> bool { !matches!( self, - PeerRequest::Protocol( + Self::Protocol( ProtocolRequest::NewBlock(_) | ProtocolRequest::NewFluffyBlock(_) | ProtocolRequest::NewTransactions(_) @@ -126,15 +126,15 @@ pub enum PeerResponse { } impl PeerResponse { - pub fn id(&self) -> Option { + pub const fn id(&self) -> Option { Some(match self { - PeerResponse::Admin(admin_res) => match admin_res { + Self::Admin(admin_res) => match admin_res { AdminResponseMessage::Handshake(_) => MessageID::Handshake, AdminResponseMessage::TimedSync(_) => MessageID::TimedSync, AdminResponseMessage::Ping(_) => MessageID::Ping, AdminResponseMessage::SupportFlags(_) => MessageID::SupportFlags, }, - PeerResponse::Protocol(protocol_res) => match protocol_res { + Self::Protocol(protocol_res) => match protocol_res { ProtocolResponse::GetObjects(_) => MessageID::GetObjects, ProtocolResponse::GetChain(_) => MessageID::GetChain, ProtocolResponse::NewFluffyBlock(_) => MessageID::NewBlock, diff --git a/p2p/p2p-core/src/protocol/try_from.rs b/p2p/p2p-core/src/protocol/try_from.rs index 8a0b67d..d3a7260 100644 --- a/p2p/p2p-core/src/protocol/try_from.rs +++ b/p2p/p2p-core/src/protocol/try_from.rs @@ -11,15 +11,13 @@ pub struct MessageConversionError; impl From for ProtocolMessage { fn from(value: ProtocolRequest) -> Self { match value { - ProtocolRequest::GetObjects(val) => ProtocolMessage::GetObjectsRequest(val), - ProtocolRequest::GetChain(val) => ProtocolMessage::ChainRequest(val), - ProtocolRequest::FluffyMissingTxs(val) => { - ProtocolMessage::FluffyMissingTransactionsRequest(val) - } - ProtocolRequest::GetTxPoolCompliment(val) => ProtocolMessage::GetTxPoolCompliment(val), - ProtocolRequest::NewBlock(val) => ProtocolMessage::NewBlock(val), - ProtocolRequest::NewFluffyBlock(val) => ProtocolMessage::NewFluffyBlock(val), - ProtocolRequest::NewTransactions(val) => ProtocolMessage::NewTransactions(val), + ProtocolRequest::GetObjects(val) => Self::GetObjectsRequest(val), + ProtocolRequest::GetChain(val) => Self::ChainRequest(val), + ProtocolRequest::FluffyMissingTxs(val) => Self::FluffyMissingTransactionsRequest(val), + ProtocolRequest::GetTxPoolCompliment(val) => Self::GetTxPoolCompliment(val), + ProtocolRequest::NewBlock(val) => Self::NewBlock(val), + ProtocolRequest::NewFluffyBlock(val) => Self::NewFluffyBlock(val), + ProtocolRequest::NewTransactions(val) => Self::NewTransactions(val), } } } @@ -29,15 +27,13 @@ impl TryFrom for ProtocolRequest { fn try_from(value: ProtocolMessage) -> Result { Ok(match value { - ProtocolMessage::GetObjectsRequest(val) => ProtocolRequest::GetObjects(val), - ProtocolMessage::ChainRequest(val) => ProtocolRequest::GetChain(val), - ProtocolMessage::FluffyMissingTransactionsRequest(val) => { - ProtocolRequest::FluffyMissingTxs(val) - } - ProtocolMessage::GetTxPoolCompliment(val) => ProtocolRequest::GetTxPoolCompliment(val), - ProtocolMessage::NewBlock(val) => ProtocolRequest::NewBlock(val), - ProtocolMessage::NewFluffyBlock(val) => ProtocolRequest::NewFluffyBlock(val), - ProtocolMessage::NewTransactions(val) => ProtocolRequest::NewTransactions(val), + ProtocolMessage::GetObjectsRequest(val) => Self::GetObjects(val), + ProtocolMessage::ChainRequest(val) => Self::GetChain(val), + ProtocolMessage::FluffyMissingTransactionsRequest(val) => Self::FluffyMissingTxs(val), + ProtocolMessage::GetTxPoolCompliment(val) => Self::GetTxPoolCompliment(val), + ProtocolMessage::NewBlock(val) => Self::NewBlock(val), + ProtocolMessage::NewFluffyBlock(val) => Self::NewFluffyBlock(val), + ProtocolMessage::NewTransactions(val) => Self::NewTransactions(val), ProtocolMessage::GetObjectsResponse(_) | ProtocolMessage::ChainEntryResponse(_) => { return Err(MessageConversionError) } @@ -48,8 +44,8 @@ impl TryFrom for ProtocolRequest { impl From for Message { fn from(value: PeerRequest) -> Self { match value { - PeerRequest::Admin(val) => Message::Request(val), - PeerRequest::Protocol(val) => Message::Protocol(val.into()), + PeerRequest::Admin(val) => Self::Request(val), + PeerRequest::Protocol(val) => Self::Protocol(val.into()), } } } @@ -59,8 +55,8 @@ impl TryFrom for PeerRequest { fn try_from(value: Message) -> Result { match value { - Message::Request(req) => Ok(PeerRequest::Admin(req)), - Message::Protocol(pro) => Ok(PeerRequest::Protocol(pro.try_into()?)), + Message::Request(req) => Ok(Self::Admin(req)), + Message::Protocol(pro) => Ok(Self::Protocol(pro.try_into()?)), Message::Response(_) => Err(MessageConversionError), } } @@ -71,10 +67,10 @@ impl TryFrom for ProtocolMessage { fn try_from(value: ProtocolResponse) -> Result { Ok(match value { - ProtocolResponse::NewTransactions(val) => ProtocolMessage::NewTransactions(val), - ProtocolResponse::NewFluffyBlock(val) => ProtocolMessage::NewFluffyBlock(val), - ProtocolResponse::GetChain(val) => ProtocolMessage::ChainEntryResponse(val), - ProtocolResponse::GetObjects(val) => ProtocolMessage::GetObjectsResponse(val), + ProtocolResponse::NewTransactions(val) => Self::NewTransactions(val), + ProtocolResponse::NewFluffyBlock(val) => Self::NewFluffyBlock(val), + ProtocolResponse::GetChain(val) => Self::ChainEntryResponse(val), + ProtocolResponse::GetObjects(val) => Self::GetObjectsResponse(val), ProtocolResponse::NA => return Err(MessageConversionError), }) } @@ -85,10 +81,10 @@ impl TryFrom for ProtocolResponse { fn try_from(value: ProtocolMessage) -> Result { Ok(match value { - ProtocolMessage::NewTransactions(val) => ProtocolResponse::NewTransactions(val), - ProtocolMessage::NewFluffyBlock(val) => ProtocolResponse::NewFluffyBlock(val), - ProtocolMessage::ChainEntryResponse(val) => ProtocolResponse::GetChain(val), - ProtocolMessage::GetObjectsResponse(val) => ProtocolResponse::GetObjects(val), + ProtocolMessage::NewTransactions(val) => Self::NewTransactions(val), + ProtocolMessage::NewFluffyBlock(val) => Self::NewFluffyBlock(val), + ProtocolMessage::ChainEntryResponse(val) => Self::GetChain(val), + ProtocolMessage::GetObjectsResponse(val) => Self::GetObjects(val), ProtocolMessage::ChainRequest(_) | ProtocolMessage::FluffyMissingTransactionsRequest(_) | ProtocolMessage::GetObjectsRequest(_) @@ -103,8 +99,8 @@ impl TryFrom for PeerResponse { fn try_from(value: Message) -> Result { match value { - Message::Response(res) => Ok(PeerResponse::Admin(res)), - Message::Protocol(pro) => Ok(PeerResponse::Protocol(pro.try_into()?)), + Message::Response(res) => Ok(Self::Admin(res)), + Message::Protocol(pro) => Ok(Self::Protocol(pro.try_into()?)), Message::Request(_) => Err(MessageConversionError), } } @@ -115,8 +111,8 @@ impl TryFrom for Message { fn try_from(value: PeerResponse) -> Result { Ok(match value { - PeerResponse::Admin(val) => Message::Response(val), - PeerResponse::Protocol(val) => Message::Protocol(val.try_into()?), + PeerResponse::Admin(val) => Self::Response(val), + PeerResponse::Protocol(val) => Self::Protocol(val.try_into()?), }) } } diff --git a/p2p/p2p-core/src/services.rs b/p2p/p2p-core/src/services.rs index 6d66cfa..ba87684 100644 --- a/p2p/p2p-core/src/services.rs +++ b/p2p/p2p-core/src/services.rs @@ -52,7 +52,7 @@ pub struct ZoneSpecificPeerListEntryBase { pub rpc_credits_per_hash: u32, } -impl From> for cuprate_wire::PeerListEntryBase { +impl From> for PeerListEntryBase { fn from(value: ZoneSpecificPeerListEntryBase) -> Self { Self { adr: value.adr.into(), @@ -74,9 +74,7 @@ pub enum PeerListConversionError { PruningSeed(#[from] PruningError), } -impl TryFrom - for ZoneSpecificPeerListEntryBase -{ +impl TryFrom for ZoneSpecificPeerListEntryBase { type Error = PeerListConversionError; fn try_from(value: PeerListEntryBase) -> Result { diff --git a/p2p/p2p-core/tests/fragmented_handshake.rs b/p2p/p2p-core/tests/fragmented_handshake.rs index c19a2a6..1235df9 100644 --- a/p2p/p2p-core/tests/fragmented_handshake.rs +++ b/p2p/p2p-core/tests/fragmented_handshake.rs @@ -1,4 +1,7 @@ //! This file contains a test for a handshake with monerod but uses fragmented messages. + +#![expect(unused_crate_dependencies, reason = "external test module")] + use std::{ net::SocketAddr, pin::Pin, @@ -21,6 +24,13 @@ use tokio_util::{ use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; +use cuprate_test_utils::monerod::monerod; +use cuprate_wire::{ + common::PeerSupportFlags, + levin::{message::make_fragmented_messages, LevinMessage, Protocol}, + BasicNodeData, Message, MoneroWireCodec, +}; + use cuprate_p2p_core::{ client::{ handshaker::HandshakerBuilder, ConnectRequest, Connector, DoHandshakeRequest, @@ -28,13 +38,6 @@ use cuprate_p2p_core::{ }, ClearNetServerCfg, ConnectionDirection, NetworkZone, }; -use cuprate_wire::{ - common::PeerSupportFlags, - levin::{message::make_fragmented_messages, LevinMessage, Protocol}, - BasicNodeData, Message, MoneroWireCodec, -}; - -use cuprate_test_utils::monerod::monerod; /// A network zone equal to clear net where every message sent is turned into a fragmented message. /// Does not support sending fragmented or dummy messages manually. @@ -184,7 +187,7 @@ async fn fragmented_handshake_monerod_to_cuprate() { let next_connection_fut = timeout(Duration::from_secs(30), listener.next()); if let Some(Ok((addr, stream, sink))) = next_connection_fut.await.unwrap() { - let _ = handshaker + handshaker .ready() .await .unwrap() diff --git a/p2p/p2p-core/tests/handles.rs b/p2p/p2p-core/tests/handles.rs index 47d70b0..2a2e2be 100644 --- a/p2p/p2p-core/tests/handles.rs +++ b/p2p/p2p-core/tests/handles.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "external test module")] + use std::{sync::Arc, time::Duration}; use tokio::sync::Semaphore; diff --git a/p2p/p2p-core/tests/handshake.rs b/p2p/p2p-core/tests/handshake.rs index 5ce6153..86d62ed 100644 --- a/p2p/p2p-core/tests/handshake.rs +++ b/p2p/p2p-core/tests/handshake.rs @@ -1,3 +1,5 @@ +#![expect(unused_crate_dependencies, reason = "external test module")] + use std::time::Duration; use futures::StreamExt; @@ -9,6 +11,10 @@ use tokio_util::codec::{FramedRead, FramedWrite}; use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; +use cuprate_test_utils::{ + monerod::monerod, + test_netzone::{TestNetZone, TestNetZoneAddr}, +}; use cuprate_wire::{common::PeerSupportFlags, BasicNodeData, MoneroWireCodec}; use cuprate_p2p_core::{ @@ -19,12 +25,8 @@ use cuprate_p2p_core::{ ClearNet, ClearNetServerCfg, ConnectionDirection, NetworkZone, }; -use cuprate_test_utils::{ - monerod::monerod, - test_netzone::{TestNetZone, TestNetZoneAddr}, -}; - #[tokio::test] +#[expect(clippy::significant_drop_tightening)] async fn handshake_cuprate_to_cuprate() { // Tests a Cuprate <-> Cuprate handshake by making 2 handshake services and making them talk to // each other. @@ -147,7 +149,7 @@ async fn handshake_monerod_to_cuprate() { let next_connection_fut = timeout(Duration::from_secs(30), listener.next()); if let Some(Ok((addr, stream, sink))) = next_connection_fut.await.unwrap() { - let _ = handshaker + handshaker .ready() .await .unwrap() diff --git a/p2p/p2p-core/tests/sending_receiving.rs b/p2p/p2p-core/tests/sending_receiving.rs index e035daf..8c90c83 100644 --- a/p2p/p2p-core/tests/sending_receiving.rs +++ b/p2p/p2p-core/tests/sending_receiving.rs @@ -1,6 +1,9 @@ +#![expect(unused_crate_dependencies, reason = "external test module")] + use tower::{Service, ServiceExt}; use cuprate_helper::network::Network; +use cuprate_test_utils::monerod::monerod; use cuprate_wire::{common::PeerSupportFlags, protocol::GetObjectsRequest, BasicNodeData}; use cuprate_p2p_core::{ @@ -9,8 +12,6 @@ use cuprate_p2p_core::{ ClearNet, ProtocolRequest, ProtocolResponse, }; -use cuprate_test_utils::monerod::monerod; - #[tokio::test] async fn get_single_block_from_monerod() { let monerod = monerod(["--out-peers=0"]).await; diff --git a/p2p/p2p/Cargo.toml b/p2p/p2p/Cargo.toml index 7cbbdcb..ef85277 100644 --- a/p2p/p2p/Cargo.toml +++ b/p2p/p2p/Cargo.toml @@ -39,3 +39,6 @@ cuprate-test-utils = { path = "../../test-utils" } indexmap = { workspace = true } proptest = { workspace = true } tokio-test = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index d295016..39980a0 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -78,7 +78,7 @@ pub struct BlockDownloaderConfig { /// An error that occurred in the [`BlockDownloader`]. #[derive(Debug, thiserror::Error)] -pub enum BlockDownloadError { +pub(crate) enum BlockDownloadError { #[error("A request to a peer timed out.")] TimedOut, #[error("The block buffer was closed.")] @@ -219,7 +219,7 @@ struct BlockDownloader { /// The running chain entry tasks. /// /// Returns a result of the chain entry or an error. - #[allow(clippy::type_complexity)] + #[expect(clippy::type_complexity)] chain_entry_task: JoinSet, ChainEntry), BlockDownloadError>>, /// The current inflight requests. @@ -273,7 +273,7 @@ where } /// Checks if we can make use of any peers that are currently pending requests. - async fn check_pending_peers( + fn check_pending_peers( &mut self, chain_tracker: &mut ChainTracker, pending_peers: &mut BTreeMap>>, @@ -287,7 +287,8 @@ where continue; } - if let Some(peer) = self.try_handle_free_client(chain_tracker, peer).await { + let client = self.try_handle_free_client(chain_tracker, peer); + if let Some(peer) = client { // This peer is ok however it does not have the data we currently need, this will only happen // because of its pruning seed so just skip over all peers with this pruning seed. peers.push(peer); @@ -303,7 +304,7 @@ where /// for them. /// /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the batch according to its pruning seed. - async fn request_inflight_batch_again( + fn request_inflight_batch_again( &mut self, client: ClientPoolDropGuard, ) -> Option> { @@ -354,7 +355,7 @@ where /// /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according /// to its pruning seed. - async fn request_block_batch( + fn request_block_batch( &mut self, chain_tracker: &mut ChainTracker, client: ClientPoolDropGuard, @@ -399,7 +400,7 @@ where // If our ready queue is too large send duplicate requests for the blocks we are waiting on. if self.block_queue.size() >= self.config.in_progress_queue_size { - return self.request_inflight_batch_again(client).await; + return self.request_inflight_batch_again(client); } // No failed requests that we can handle, request some new blocks. @@ -434,7 +435,7 @@ where /// /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according /// to its pruning seed. - async fn try_handle_free_client( + fn try_handle_free_client( &mut self, chain_tracker: &mut ChainTracker, client: ClientPoolDropGuard, @@ -472,7 +473,7 @@ where } // Request a batch of blocks instead. - self.request_block_batch(chain_tracker, client).await + self.request_block_batch(chain_tracker, client) } /// Checks the [`ClientPool`] for free peers. @@ -516,7 +517,7 @@ where .push(client); } - self.check_pending_peers(chain_tracker, pending_peers).await; + self.check_pending_peers(chain_tracker, pending_peers); Ok(()) } @@ -574,7 +575,7 @@ where .or_default() .push(client); - self.check_pending_peers(chain_tracker, pending_peers).await; + self.check_pending_peers(chain_tracker, pending_peers); return Ok(()); }; @@ -611,7 +612,7 @@ where .or_default() .push(client); - self.check_pending_peers(chain_tracker, pending_peers).await; + self.check_pending_peers(chain_tracker, pending_peers); Ok(()) } @@ -679,7 +680,7 @@ where .or_default() .push(client); - self.check_pending_peers(&mut chain_tracker, &mut pending_peers).await; + self.check_pending_peers(&mut chain_tracker, &mut pending_peers); } Err(_) => self.amount_of_empty_chain_entries += 1 } @@ -698,7 +699,7 @@ struct BlockDownloadTaskResponse { } /// Returns if a peer has all the blocks in a range, according to its [`PruningSeed`]. -fn client_has_block_in_range( +const fn client_has_block_in_range( pruning_seed: &PruningSeed, start_height: usize, length: usize, diff --git a/p2p/p2p/src/block_downloader/block_queue.rs b/p2p/p2p/src/block_downloader/block_queue.rs index 5a92f49..5dd1b0d 100644 --- a/p2p/p2p/src/block_downloader/block_queue.rs +++ b/p2p/p2p/src/block_downloader/block_queue.rs @@ -13,7 +13,7 @@ use super::{BlockBatch, BlockDownloadError}; /// /// Also, the [`Ord`] impl is reversed so older blocks (lower height) come first in a [`BinaryHeap`]. #[derive(Debug, Clone)] -pub struct ReadyQueueBatch { +pub(crate) struct ReadyQueueBatch { /// The start height of the batch. pub start_height: usize, /// The batch of blocks. @@ -43,7 +43,7 @@ impl Ord for ReadyQueueBatch { /// The block queue that holds downloaded block batches, adding them to the [`async_buffer`] when the /// oldest batch has been downloaded. -pub struct BlockQueue { +pub(crate) struct BlockQueue { /// A queue of ready batches. ready_batches: BinaryHeap, /// The size, in bytes, of all the batches in [`Self::ready_batches`]. @@ -55,8 +55,8 @@ pub struct BlockQueue { impl BlockQueue { /// Creates a new [`BlockQueue`]. - pub fn new(buffer_appender: BufferAppender) -> BlockQueue { - BlockQueue { + pub(crate) const fn new(buffer_appender: BufferAppender) -> Self { + Self { ready_batches: BinaryHeap::new(), ready_batches_size: 0, buffer_appender, @@ -64,12 +64,12 @@ impl BlockQueue { } /// Returns the oldest batch that has not been put in the [`async_buffer`] yet. - pub fn oldest_ready_batch(&self) -> Option { + pub(crate) fn oldest_ready_batch(&self) -> Option { self.ready_batches.peek().map(|batch| batch.start_height) } /// Returns the size of all the batches that have not been put into the [`async_buffer`] yet. - pub fn size(&self) -> usize { + pub(crate) const fn size(&self) -> usize { self.ready_batches_size } @@ -77,7 +77,7 @@ impl BlockQueue { /// /// `oldest_in_flight_start_height` should be the start height of the oldest batch that is still inflight, if /// there are no batches inflight then this should be [`None`]. - pub async fn add_incoming_batch( + pub(crate) async fn add_incoming_batch( &mut self, new_batch: ReadyQueueBatch, oldest_in_flight_start_height: Option, diff --git a/p2p/p2p/src/block_downloader/chain_tracker.rs b/p2p/p2p/src/block_downloader/chain_tracker.rs index aacb163..a2f03c5 100644 --- a/p2p/p2p/src/block_downloader/chain_tracker.rs +++ b/p2p/p2p/src/block_downloader/chain_tracker.rs @@ -20,7 +20,7 @@ pub(crate) struct ChainEntry { /// A batch of blocks to retrieve. #[derive(Clone)] -pub struct BlocksToRetrieve { +pub(crate) struct BlocksToRetrieve { /// The block IDs to get. pub ids: ByteArrayVec<32>, /// The hash of the last block before this batch. @@ -39,7 +39,7 @@ pub struct BlocksToRetrieve { /// An error returned from the [`ChainTracker`]. #[derive(Debug, Clone)] -pub enum ChainTrackerError { +pub(crate) enum ChainTrackerError { /// The new chain entry is invalid. NewEntryIsInvalid, /// The new chain entry does not follow from the top of our chain tracker. @@ -50,7 +50,7 @@ pub enum ChainTrackerError { /// /// This struct allows following a single chain. It takes in [`ChainEntry`]s and /// allows getting [`BlocksToRetrieve`]. -pub struct ChainTracker { +pub(crate) struct ChainTracker { /// A list of [`ChainEntry`]s, in order. entries: VecDeque>, /// The height of the first block, in the first entry in [`Self::entries`]. @@ -65,7 +65,7 @@ pub struct ChainTracker { impl ChainTracker { /// Creates a new chain tracker. - pub fn new( + pub(crate) fn new( new_entry: ChainEntry, first_height: usize, our_genesis: [u8; 32], @@ -76,9 +76,9 @@ impl ChainTracker { entries.push_back(new_entry); Self { - top_seen_hash, entries, first_height, + top_seen_hash, previous_hash, our_genesis, } @@ -86,17 +86,17 @@ impl ChainTracker { /// Returns `true` if the peer is expected to have the next block after our highest seen block /// according to their pruning seed. - pub fn should_ask_for_next_chain_entry(&self, seed: &PruningSeed) -> bool { + pub(crate) fn should_ask_for_next_chain_entry(&self, seed: &PruningSeed) -> bool { seed.has_full_block(self.top_height(), CRYPTONOTE_MAX_BLOCK_HEIGHT) } /// Returns the simple history, the highest seen block and the genesis block. - pub fn get_simple_history(&self) -> [[u8; 32]; 2] { + pub(crate) const fn get_simple_history(&self) -> [[u8; 32]; 2] { [self.top_seen_hash, self.our_genesis] } /// Returns the height of the highest block we are tracking. - pub fn top_height(&self) -> usize { + pub(crate) fn top_height(&self) -> usize { let top_block_idx = self .entries .iter() @@ -110,7 +110,7 @@ impl ChainTracker { /// /// # Panics /// This function panics if `batch_size` is `0`. - pub fn block_requests_queued(&self, batch_size: usize) -> usize { + pub(crate) fn block_requests_queued(&self, batch_size: usize) -> usize { self.entries .iter() .map(|entry| entry.ids.len().div_ceil(batch_size)) @@ -118,7 +118,10 @@ impl ChainTracker { } /// Attempts to add an incoming [`ChainEntry`] to the chain tracker. - pub fn add_entry(&mut self, mut chain_entry: ChainEntry) -> Result<(), ChainTrackerError> { + pub(crate) fn add_entry( + &mut self, + mut chain_entry: ChainEntry, + ) -> Result<(), ChainTrackerError> { if chain_entry.ids.is_empty() { // The peer must send at lest one overlapping block. chain_entry.handle.ban_peer(MEDIUM_BAN); @@ -154,7 +157,7 @@ impl ChainTracker { /// Returns a batch of blocks to request. /// /// The returned batches length will be less than or equal to `max_blocks` - pub fn blocks_to_get( + pub(crate) fn blocks_to_get( &mut self, pruning_seed: &PruningSeed, max_blocks: usize, diff --git a/p2p/p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs index ea57ead..bbb14b3 100644 --- a/p2p/p2p/src/block_downloader/download_batch.rs +++ b/p2p/p2p/src/block_downloader/download_batch.rs @@ -30,6 +30,7 @@ use crate::{ attempt = _attempt ) )] +#[expect(clippy::used_underscore_binding)] pub async fn download_batch_task( client: ClientPoolDropGuard, ids: ByteArrayVec<32>, @@ -103,6 +104,7 @@ async fn request_batch_from_peer( Ok((client, batch)) } +#[expect(clippy::needless_pass_by_value)] fn deserialize_batch( blocks_response: GetObjectsResponse, expected_start_height: usize, diff --git a/p2p/p2p/src/block_downloader/request_chain.rs b/p2p/p2p/src/block_downloader/request_chain.rs index 4b0b47e..bde40ce 100644 --- a/p2p/p2p/src/block_downloader/request_chain.rs +++ b/p2p/p2p/src/block_downloader/request_chain.rs @@ -30,7 +30,7 @@ use crate::{ /// /// Because the block downloader only follows and downloads one chain we only have to send the block hash of /// top block we have found and the genesis block, this is then called `short_history`. -pub async fn request_chain_entry_from_peer( +pub(crate) async fn request_chain_entry_from_peer( mut client: ClientPoolDropGuard, short_history: [[u8; 32]; 2], ) -> Result<(ClientPoolDropGuard, ChainEntry), BlockDownloadError> { @@ -179,7 +179,7 @@ where Some(res) => { // res has already been set, replace it if this peer claims higher cumulative difficulty if res.0.cumulative_difficulty() < task_res.0.cumulative_difficulty() { - let _ = mem::replace(res, task_res); + drop(mem::replace(res, task_res)); } } None => { diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index 86a9a46..a5c5e92 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -47,6 +47,7 @@ proptest! { let tokio_pool = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); + #[expect(clippy::significant_drop_tightening)] tokio_pool.block_on(async move { timeout(Duration::from_secs(600), async move { let client_pool = ClientPool::new(); @@ -54,7 +55,7 @@ proptest! { let mut peer_ids = Vec::with_capacity(peers); for _ in 0..peers { - let client = mock_block_downloader_client(blockchain.clone()); + let client = mock_block_downloader_client(Arc::clone(&blockchain)); peer_ids.push(client.info.id); @@ -156,7 +157,7 @@ prop_compose! { for (height, mut block) in blocks.into_iter().enumerate() { if let Some(last) = blockchain.last() { block.0.header.previous = *last.0; - block.0.miner_transaction.prefix_mut().inputs = vec![Input::Gen(height)] + block.0.miner_transaction.prefix_mut().inputs = vec![Input::Gen(height)]; } blockchain.insert(block.0.hash(), block); @@ -173,7 +174,7 @@ fn mock_block_downloader_client(blockchain: Arc) -> Client( +pub(crate) fn init_broadcast_channels( config: BroadcastConfig, ) -> ( BroadcastSvc, @@ -193,7 +193,7 @@ impl Service> for BroadcastSvc { }; // An error here means _all_ receivers were dropped which we assume will never happen. - let _ = match direction { + drop(match direction { Some(ConnectionDirection::Inbound) => { self.tx_broadcast_channel_inbound.send(nex_tx_info) } @@ -201,10 +201,10 @@ impl Service> for BroadcastSvc { self.tx_broadcast_channel_outbound.send(nex_tx_info) } None => { - let _ = self.tx_broadcast_channel_outbound.send(nex_tx_info.clone()); + drop(self.tx_broadcast_channel_outbound.send(nex_tx_info.clone())); self.tx_broadcast_channel_inbound.send(nex_tx_info) } - }; + }); } } @@ -246,7 +246,7 @@ struct BroadcastTxInfo { /// /// This is given to the connection task to await on for broadcast messages. #[pin_project::pin_project] -pub struct BroadcastMessageStream { +pub(crate) struct BroadcastMessageStream { /// The peer that is holding this stream. addr: InternalPeerID, @@ -336,8 +336,9 @@ impl Stream for BroadcastMessageStream { Poll::Ready(Some(BroadcastMessage::NewTransaction(txs))) } else { tracing::trace!("Diffusion flush timer expired but no txs to diffuse"); - // poll next_flush now to register the waker with it + // poll next_flush now to register the waker with it. // the waker will already be registered with the block broadcast channel. + #[expect(clippy::let_underscore_must_use)] let _ = this.next_flush.poll(cx); Poll::Pending } @@ -458,7 +459,7 @@ mod tests { let match_tx = |mes, txs| match mes { BroadcastMessage::NewTransaction(tx) => assert_eq!(tx.txs.as_slice(), txs), - _ => panic!("Block broadcast?"), + BroadcastMessage::NewFluffyBlock(_) => panic!("Block broadcast?"), }; let next = outbound_stream.next().await.unwrap(); @@ -520,7 +521,7 @@ mod tests { let match_tx = |mes, txs| match mes { BroadcastMessage::NewTransaction(tx) => assert_eq!(tx.txs.as_slice(), txs), - _ => panic!("Block broadcast?"), + BroadcastMessage::NewFluffyBlock(_) => panic!("Block broadcast?"), }; let next = outbound_stream.next().await.unwrap(); @@ -536,6 +537,6 @@ mod tests { futures::future::select(inbound_stream_from.next(), outbound_stream_from.next()) ) .await - .is_err()) + .is_err()); } } diff --git a/p2p/p2p/src/client_pool.rs b/p2p/p2p/src/client_pool.rs index 51f57e9..3405224 100644 --- a/p2p/p2p/src/client_pool.rs +++ b/p2p/p2p/src/client_pool.rs @@ -8,7 +8,7 @@ //! returns the peer to the pool when it is dropped. //! //! Internally the pool is a [`DashMap`] which means care should be taken in `async` code -//! as internally this uses blocking RwLocks. +//! as internally this uses blocking `RwLock`s. use std::sync::Arc; use dashmap::DashMap; @@ -24,7 +24,7 @@ use cuprate_p2p_core::{ pub(crate) mod disconnect_monitor; mod drop_guard_client; -pub use drop_guard_client::ClientPoolDropGuard; +pub(crate) use drop_guard_client::ClientPoolDropGuard; /// The client pool, which holds currently connected free peers. /// @@ -38,16 +38,17 @@ pub struct ClientPool { impl ClientPool { /// Returns a new [`ClientPool`] wrapped in an [`Arc`]. - pub fn new() -> Arc> { + pub fn new() -> Arc { let (tx, rx) = mpsc::unbounded_channel(); - let pool = Arc::new(ClientPool { + let pool = Arc::new(Self { clients: DashMap::new(), new_connection_tx: tx, }); tokio::spawn( - disconnect_monitor::disconnect_monitor(rx, pool.clone()).instrument(Span::current()), + disconnect_monitor::disconnect_monitor(rx, Arc::clone(&pool)) + .instrument(Span::current()), ); pool @@ -69,8 +70,7 @@ impl ClientPool { return; } - let res = self.clients.insert(id, client); - assert!(res.is_none()); + assert!(self.clients.insert(id, client).is_none()); // We have to check this again otherwise we could have a race condition where a // peer is disconnected after the first check, the disconnect monitor tries to remove it, @@ -121,7 +121,6 @@ impl ClientPool { /// Note that the returned iterator is not guaranteed to contain every peer asked for. /// /// See [`Self::borrow_client`] for borrowing a single client. - #[allow(private_interfaces)] // TODO: Remove me when 2024 Rust pub fn borrow_clients<'a, 'b>( self: &'a Arc, peers: &'b [InternalPeerID], @@ -133,7 +132,7 @@ impl ClientPool { mod sealed { /// TODO: Remove me when 2024 Rust /// - /// https://rust-lang.github.io/rfcs/3498-lifetime-capture-rules-2024.html#the-captures-trick + /// pub trait Captures {} impl Captures for T {} diff --git a/p2p/p2p/src/client_pool/disconnect_monitor.rs b/p2p/p2p/src/client_pool/disconnect_monitor.rs index f45d5e3..f54b560 100644 --- a/p2p/p2p/src/client_pool/disconnect_monitor.rs +++ b/p2p/p2p/src/client_pool/disconnect_monitor.rs @@ -78,6 +78,6 @@ impl Future for PeerDisconnectFut { this.closed_fut .poll(cx) - .map(|_| this.peer_id.take().unwrap()) + .map(|()| this.peer_id.take().unwrap()) } } diff --git a/p2p/p2p/src/connection_maintainer.rs b/p2p/p2p/src/connection_maintainer.rs index 3dfd5e8..be89973 100644 --- a/p2p/p2p/src/connection_maintainer.rs +++ b/p2p/p2p/src/connection_maintainer.rs @@ -99,12 +99,17 @@ where /// Connects to random seeds to get peers and immediately disconnects #[instrument(level = "info", skip(self))] + #[expect( + clippy::significant_drop_in_scrutinee, + clippy::significant_drop_tightening + )] async fn connect_to_random_seeds(&mut self) -> Result<(), OutboundConnectorError> { let seeds = N::SEEDS.choose_multiple(&mut thread_rng(), MAX_SEED_CONNECTIONS); - if seeds.len() == 0 { - panic!("No seed nodes available to get peers from"); - } + assert!( + seeds.len() != 0, + "No seed nodes available to get peers from" + ); let mut allowed_errors = seeds.len(); @@ -129,7 +134,7 @@ where } while let Some(res) = handshake_futs.join_next().await { - if matches!(res, Err(_) | Ok(Err(_)) | Ok(Ok(Err(_)))) { + if matches!(res, Err(_) | Ok(Err(_) | Ok(Err(_)))) { allowed_errors -= 1; } } @@ -144,7 +149,7 @@ where /// Connects to a given outbound peer. #[instrument(level = "info", skip_all)] async fn connect_to_outbound_peer(&mut self, permit: OwnedSemaphorePermit, addr: N::Addr) { - let client_pool = self.client_pool.clone(); + let client_pool = Arc::clone(&self.client_pool); let connection_fut = self .connector_svc .ready() @@ -157,6 +162,7 @@ where tokio::spawn( async move { + #[expect(clippy::significant_drop_in_scrutinee)] if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, connection_fut).await { client_pool.add_new_client(peer); } @@ -166,14 +172,16 @@ where } /// Handles a request from the peer set for more peers. + #[expect( + clippy::significant_drop_tightening, + reason = "we need to hold onto a permit" + )] async fn handle_peer_request( &mut self, req: &MakeConnectionRequest, ) -> Result<(), OutboundConnectorError> { // try to get a permit. - let permit = self - .outbound_semaphore - .clone() + let permit = Arc::clone(&self.outbound_semaphore) .try_acquire_owned() .or_else(|_| { // if we can't get a permit add one if we are below the max number of connections. @@ -183,7 +191,9 @@ where } else { self.outbound_semaphore.add_permits(1); self.extra_peers += 1; - Ok(self.outbound_semaphore.clone().try_acquire_owned().unwrap()) + Ok(Arc::clone(&self.outbound_semaphore) + .try_acquire_owned() + .unwrap()) } })?; @@ -272,12 +282,12 @@ where tracing::info!("Shutting down outbound connector, make connection channel closed."); return; }; - // We can't really do much about errors in this function. + #[expect(clippy::let_underscore_must_use, reason = "We can't really do much about errors in this function.")] let _ = self.handle_peer_request(&peer_req).await; }, // This future is not cancellation safe as you will lose your space in the queue but as we are the only place // that actually requires permits that should be ok. - Ok(permit) = self.outbound_semaphore.clone().acquire_owned() => { + Ok(permit) = Arc::clone(&self.outbound_semaphore).acquire_owned() => { if self.handle_free_permit(permit).await.is_err() { // if we got an error then we still have a permit free so to prevent this from just looping // uncontrollably add a timeout. diff --git a/p2p/p2p/src/inbound_server.rs b/p2p/p2p/src/inbound_server.rs index 80ff38e..0d50d54 100644 --- a/p2p/p2p/src/inbound_server.rs +++ b/p2p/p2p/src/inbound_server.rs @@ -100,7 +100,7 @@ where }; // If we're still behind our maximum limit, Initiate handshake. - if let Ok(permit) = semaphore.clone().try_acquire_owned() { + if let Ok(permit) = Arc::clone(&semaphore).try_acquire_owned() { tracing::debug!("Permit free for incoming connection, attempting handshake."); let fut = handshaker.ready().await?.call(DoHandshakeRequest { @@ -111,11 +111,12 @@ where permit: Some(permit), }); - let cloned_pool = client_pool.clone(); + let cloned_pool = Arc::clone(&client_pool); tokio::spawn( async move { - if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, fut).await { + let client = timeout(HANDSHAKE_TIMEOUT, fut).await; + if let Ok(Ok(peer)) = client { cloned_pool.add_new_client(peer); } } @@ -133,8 +134,10 @@ where let fut = timeout(PING_REQUEST_TIMEOUT, peer_stream.next()); // Ok if timeout did not elapsed -> Some if there is a message -> Ok if it has been decoded - if let Ok(Some(Ok(Message::Request(AdminRequestMessage::Ping)))) = fut.await - { + if matches!( + fut.await, + Ok(Some(Ok(Message::Request(AdminRequestMessage::Ping)))) + ) { let response = peer_sink .send( Message::Response(AdminResponseMessage::Ping(PingResponse { @@ -148,7 +151,7 @@ where if let Err(err) = response { tracing::debug!( "Unable to respond to ping request from peer ({addr}): {err}" - ) + ); } } } diff --git a/p2p/p2p/src/lib.rs b/p2p/p2p/src/lib.rs index be18c2a..2f51c6c 100644 --- a/p2p/p2p/src/lib.rs +++ b/p2p/p2p/src/lib.rs @@ -103,7 +103,7 @@ where let outbound_connector = Connector::new(outbound_handshaker); let outbound_connection_maintainer = connection_maintainer::OutboundConnectionKeeper::new( config.clone(), - client_pool.clone(), + Arc::clone(&client_pool), make_connection_rx, address_book.clone(), outbound_connector, @@ -118,17 +118,17 @@ where ); background_tasks.spawn( inbound_server::inbound_server( - client_pool.clone(), + Arc::clone(&client_pool), inbound_handshaker, address_book.clone(), config, ) .map(|res| { if let Err(e) = res { - tracing::error!("Error in inbound connection listener: {e}") + tracing::error!("Error in inbound connection listener: {e}"); } - tracing::info!("Inbound connection listener shutdown") + tracing::info!("Inbound connection listener shutdown"); }) .instrument(Span::current()), ); @@ -155,7 +155,7 @@ pub struct NetworkInterface { /// on that claimed chain. top_block_watch: watch::Receiver, /// A channel to request extra connections. - #[allow(dead_code)] // will be used eventually + #[expect(dead_code, reason = "will be used eventually")] make_connection_tx: mpsc::Sender, /// The address book service. address_book: BoxCloneService, AddressBookResponse, tower::BoxError>, @@ -184,7 +184,7 @@ impl NetworkInterface { C::Future: Send + 'static, { block_downloader::download_blocks( - self.pool.clone(), + Arc::clone(&self.pool), self.sync_states_svc.clone(), our_chain_service, config, diff --git a/p2p/p2p/src/sync_states.rs b/p2p/p2p/src/sync_states.rs index 70ef6ca..0c03795 100644 --- a/p2p/p2p/src/sync_states.rs +++ b/p2p/p2p/src/sync_states.rs @@ -40,7 +40,7 @@ pub struct NewSyncInfo { /// This is the service that handles: /// 1. Finding out if we need to sync /// 1. Giving the peers that should be synced _from_, to the requester -pub struct PeerSyncSvc { +pub(crate) struct PeerSyncSvc { /// A map of cumulative difficulties to peers. cumulative_difficulties: BTreeMap>>, /// A map of peers to cumulative difficulties. @@ -56,7 +56,7 @@ pub struct PeerSyncSvc { impl PeerSyncSvc { /// Creates a new [`PeerSyncSvc`] with a [`Receiver`](watch::Receiver) that will be updated with /// the highest seen sync data, this makes no guarantees about which peer will be chosen in case of a tie. - pub fn new() -> (Self, watch::Receiver) { + pub(crate) fn new() -> (Self, watch::Receiver) { let (watch_tx, mut watch_rx) = watch::channel(NewSyncInfo { chain_height: 0, top_hash: [0; 32], @@ -108,9 +108,7 @@ impl PeerSyncSvc { if let Some(block_needed) = block_needed { // we just use CRYPTONOTE_MAX_BLOCK_HEIGHT as the blockchain height, this only means // we don't take into account the tip blocks which are not pruned. - self.peers - .get(peer) - .unwrap() + self.peers[peer] .1 .has_full_block(block_needed, CRYPTONOTE_MAX_BLOCK_HEIGHT) } else { @@ -126,7 +124,7 @@ impl PeerSyncSvc { &mut self, peer_id: InternalPeerID, handle: ConnectionHandle, - core_sync_data: CoreSyncData, + core_sync_data: &CoreSyncData, ) -> Result<(), tower::BoxError> { tracing::trace!( "Received new core sync data from peer, top hash: {}", @@ -176,7 +174,7 @@ impl PeerSyncSvc { self.closed_connections.push(PeerDisconnectFut { closed_fut: handle.closed(), peer_id: Some(peer_id), - }) + }); } self.cumulative_difficulties @@ -190,11 +188,15 @@ impl PeerSyncSvc { || self .last_peer_in_watcher_handle .as_ref() - .is_some_and(|handle| handle.is_closed()) + .is_some_and(ConnectionHandle::is_closed) { tracing::debug!( "Updating sync watcher channel with new highest seen cumulative difficulty: {new_cumulative_difficulty}" ); + #[expect( + clippy::let_underscore_must_use, + reason = "dropped receivers can be ignored" + )] let _ = self.new_height_watcher.send(NewSyncInfo { top_hash: core_sync_data.top_id, chain_height: core_sync_data.current_height, @@ -228,8 +230,8 @@ impl Service> for PeerSyncSvc { block_needed, ))), PeerSyncRequest::IncomingCoreSyncData(peer_id, handle, sync_data) => self - .update_peer_sync_info(peer_id, handle, sync_data) - .map(|_| PeerSyncResponse::Ok), + .update_peer_sync_info(peer_id, handle, &sync_data) + .map(|()| PeerSyncResponse::Ok), }; ready(res) @@ -413,6 +415,6 @@ mod tests { assert!( peers.contains(&InternalPeerID::Unknown(0)) && peers.contains(&InternalPeerID::Unknown(1)) - ) + ); } } diff --git a/pruning/Cargo.toml b/pruning/Cargo.toml index 3f5bd27..497c04b 100644 --- a/pruning/Cargo.toml +++ b/pruning/Cargo.toml @@ -13,3 +13,6 @@ borsh = ["dep:borsh"] thiserror = { workspace = true } borsh = { workspace = true, features = ["derive", "std"], optional = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/pruning/src/lib.rs b/pruning/src/lib.rs index fdd159c..1f5ee2a 100644 --- a/pruning/src/lib.rs +++ b/pruning/src/lib.rs @@ -71,7 +71,7 @@ impl PruningSeed { /// /// See: [`DecompressedPruningSeed::new`] pub fn new_pruned(stripe: u32, log_stripes: u32) -> Result { - Ok(PruningSeed::Pruned(DecompressedPruningSeed::new( + Ok(Self::Pruned(DecompressedPruningSeed::new( stripe, log_stripes, )?)) @@ -81,9 +81,7 @@ impl PruningSeed { /// /// An error means the pruning seed was invalid. pub fn decompress(seed: u32) -> Result { - Ok(DecompressedPruningSeed::decompress(seed)? - .map(PruningSeed::Pruned) - .unwrap_or(PruningSeed::NotPruned)) + Ok(DecompressedPruningSeed::decompress(seed)?.map_or(Self::NotPruned, Self::Pruned)) } /// Decompresses the seed, performing the same checks as [`PruningSeed::decompress`] and some more according to @@ -103,34 +101,34 @@ impl PruningSeed { } /// Compresses this pruning seed to a u32. - pub fn compress(&self) -> u32 { + pub const fn compress(&self) -> u32 { match self { - PruningSeed::NotPruned => 0, - PruningSeed::Pruned(seed) => seed.compress(), + Self::NotPruned => 0, + Self::Pruned(seed) => seed.compress(), } } /// Returns the `log_stripes` for this seed, if this seed is pruned otherwise [`None`] is returned. - pub fn get_log_stripes(&self) -> Option { + pub const fn get_log_stripes(&self) -> Option { match self { - PruningSeed::NotPruned => None, - PruningSeed::Pruned(seed) => Some(seed.log_stripes), + Self::NotPruned => None, + Self::Pruned(seed) => Some(seed.log_stripes), } } /// Returns the `stripe` for this seed, if this seed is pruned otherwise [`None`] is returned. - pub fn get_stripe(&self) -> Option { + pub const fn get_stripe(&self) -> Option { match self { - PruningSeed::NotPruned => None, - PruningSeed::Pruned(seed) => Some(seed.stripe), + Self::NotPruned => None, + Self::Pruned(seed) => Some(seed.stripe), } } /// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block. - pub fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { + pub const fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { match self { - PruningSeed::NotPruned => true, - PruningSeed::Pruned(seed) => seed.has_full_block(height, blockchain_height), + Self::NotPruned => true, + Self::Pruned(seed) => seed.has_full_block(height, blockchain_height), } } @@ -155,10 +153,8 @@ impl PruningSeed { blockchain_height: usize, ) -> Result, PruningError> { Ok(match self { - PruningSeed::NotPruned => None, - PruningSeed::Pruned(seed) => { - seed.get_next_pruned_block(block_height, blockchain_height)? - } + Self::NotPruned => None, + Self::Pruned(seed) => seed.get_next_pruned_block(block_height, blockchain_height)?, }) } @@ -181,10 +177,8 @@ impl PruningSeed { blockchain_height: usize, ) -> Result { Ok(match self { - PruningSeed::NotPruned => block_height, - PruningSeed::Pruned(seed) => { - seed.get_next_unpruned_block(block_height, blockchain_height)? - } + Self::NotPruned => block_height, + Self::Pruned(seed) => seed.get_next_unpruned_block(block_height, blockchain_height)?, }) } } @@ -199,11 +193,11 @@ impl Ord for PruningSeed { fn cmp(&self, other: &Self) -> Ordering { match (self, other) { // Make sure pruning seeds storing more blocks are greater. - (PruningSeed::NotPruned, PruningSeed::NotPruned) => Ordering::Equal, - (PruningSeed::NotPruned, PruningSeed::Pruned(_)) => Ordering::Greater, - (PruningSeed::Pruned(_), PruningSeed::NotPruned) => Ordering::Less, + (Self::NotPruned, Self::NotPruned) => Ordering::Equal, + (Self::NotPruned, Self::Pruned(_)) => Ordering::Greater, + (Self::Pruned(_), Self::NotPruned) => Ordering::Less, - (PruningSeed::Pruned(seed1), PruningSeed::Pruned(seed2)) => seed1.cmp(seed2), + (Self::Pruned(seed1), Self::Pruned(seed2)) => seed1.cmp(seed2), } } } @@ -222,7 +216,7 @@ pub struct DecompressedPruningSeed { log_stripes: u32, /// The specific portion this peer keeps. /// - /// *MUST* be between 1..=2^log_stripes + /// *MUST* be between `1..=2^log_stripes` stripe: u32, } @@ -268,13 +262,13 @@ impl DecompressedPruningSeed { /// a valid seed you currently MUST pass in a number 1 to 8 for `stripe` /// and 3 for `log_stripes`.* /// - pub fn new(stripe: u32, log_stripes: u32) -> Result { + pub const fn new(stripe: u32, log_stripes: u32) -> Result { if log_stripes > PRUNING_SEED_LOG_STRIPES_MASK { Err(PruningError::LogStripesOutOfRange) } else if !(stripe > 0 && stripe <= (1 << log_stripes)) { Err(PruningError::StripeOutOfRange) } else { - Ok(DecompressedPruningSeed { + Ok(Self { log_stripes, stripe, }) @@ -286,7 +280,7 @@ impl DecompressedPruningSeed { /// Will return Ok(None) if the pruning seed means no pruning. /// /// An error means the pruning seed was invalid. - pub fn decompress(seed: u32) -> Result, PruningError> { + pub const fn decompress(seed: u32) -> Result, PruningError> { if seed == 0 { // No pruning. return Ok(None); @@ -299,20 +293,20 @@ impl DecompressedPruningSeed { return Err(PruningError::StripeOutOfRange); } - Ok(Some(DecompressedPruningSeed { + Ok(Some(Self { log_stripes, stripe, })) } /// Compresses the pruning seed into a u32. - pub fn compress(&self) -> u32 { + pub const fn compress(&self) -> u32 { (self.log_stripes << PRUNING_SEED_LOG_STRIPES_SHIFT) | ((self.stripe - 1) << PRUNING_SEED_STRIPE_SHIFT) } /// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block. - pub fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { + pub const fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { match get_block_pruning_stripe(height, blockchain_height, self.log_stripes) { Some(block_stripe) => self.stripe == block_stripe, None => true, @@ -419,7 +413,7 @@ impl DecompressedPruningSeed { // We can get the end of our "non-pruning" cycle by getting the next stripe's first un-pruned block height. // So we calculate the next un-pruned block for the next stripe and return it as our next pruned block let next_stripe = 1 + (self.stripe & ((1 << self.log_stripes) - 1)); - let seed = DecompressedPruningSeed::new(next_stripe, self.log_stripes) + let seed = Self::new(next_stripe, self.log_stripes) .expect("We just made sure this stripe is in range for this log_stripe"); let calculated_height = seed.get_next_unpruned_block(block_height, blockchain_height)?; @@ -433,7 +427,7 @@ impl DecompressedPruningSeed { } } -fn get_block_pruning_stripe( +const fn get_block_pruning_stripe( block_height: usize, blockchain_height: usize, log_stripe: u32, @@ -441,9 +435,14 @@ fn get_block_pruning_stripe( if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height { None } else { + #[expect( + clippy::cast_possible_truncation, + clippy::cast_sign_loss, + reason = "it's trivial to prove it's ok to us `as` here" + )] Some( (((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & ((1 << log_stripe) as usize - 1)) - + 1) as u32, // it's trivial to prove it's ok to us `as` here + + 1) as u32, ) } } @@ -483,16 +482,17 @@ mod tests { #[test] fn get_pruning_log_stripe() { let all_valid_seeds = make_all_pruning_seeds(); - for seed in all_valid_seeds.iter() { - assert_eq!(seed.get_log_stripes().unwrap(), 3) + for seed in &all_valid_seeds { + assert_eq!(seed.get_log_stripes().unwrap(), 3); } } #[test] fn get_pruning_stripe() { let all_valid_seeds = make_all_pruning_seeds(); + #[expect(clippy::cast_possible_truncation)] for (i, seed) in all_valid_seeds.iter().enumerate() { - assert_eq!(seed.get_stripe().unwrap(), i as u32 + 1) + assert_eq!(seed.get_stripe().unwrap(), i as u32 + 1); } } @@ -554,7 +554,7 @@ mod tests { assert_eq!( seed.get_next_unpruned_block(0, blockchain_height).unwrap(), i * 4096 - ) + ); } for (i, seed) in all_valid_seeds.iter().enumerate() { @@ -562,7 +562,7 @@ mod tests { seed.get_next_unpruned_block((i + 1) * 4096, blockchain_height) .unwrap(), i * 4096 + 32768 - ) + ); } for (i, seed) in all_valid_seeds.iter().enumerate() { @@ -570,15 +570,15 @@ mod tests { seed.get_next_unpruned_block((i + 8) * 4096, blockchain_height) .unwrap(), i * 4096 + 32768 - ) + ); } - for seed in all_valid_seeds.iter() { + for seed in &all_valid_seeds { assert_eq!( seed.get_next_unpruned_block(76437863 - 1, blockchain_height) .unwrap(), 76437863 - 1 - ) + ); } let zero_seed = PruningSeed::NotPruned; @@ -591,7 +591,7 @@ mod tests { let seed = PruningSeed::decompress(384).unwrap(); // the next unpruned block is the first tip block - assert_eq!(seed.get_next_unpruned_block(5000, 11000).unwrap(), 5500) + assert_eq!(seed.get_next_unpruned_block(5000, 11000).unwrap(), 5500); } #[test] @@ -605,7 +605,7 @@ mod tests { .unwrap() .unwrap(), 0 - ) + ); } for (i, seed) in all_valid_seeds.iter().enumerate() { @@ -614,7 +614,7 @@ mod tests { .unwrap() .unwrap(), (i + 1) * 4096 - ) + ); } for (i, seed) in all_valid_seeds.iter().enumerate() { @@ -623,15 +623,15 @@ mod tests { .unwrap() .unwrap(), (i + 9) * 4096 - ) + ); } - for seed in all_valid_seeds.iter() { + for seed in &all_valid_seeds { assert_eq!( seed.get_next_pruned_block(76437863 - 1, blockchain_height) .unwrap(), None - ) + ); } let zero_seed = PruningSeed::NotPruned; @@ -644,6 +644,6 @@ mod tests { let seed = PruningSeed::decompress(384).unwrap(); // there is no next pruned block - assert_eq!(seed.get_next_pruned_block(5000, 10000).unwrap(), None) + assert_eq!(seed.get_next_pruned_block(5000, 10000).unwrap(), None); } } diff --git a/rpc/interface/src/route/bin.rs b/rpc/interface/src/route/bin.rs index 90d06c8..f7e3a01 100644 --- a/rpc/interface/src/route/bin.rs +++ b/rpc/interface/src/route/bin.rs @@ -28,7 +28,6 @@ macro_rules! generate_endpoints_with_input { ),*) => { paste::paste! { $( /// TODO - #[allow(unused_mut)] pub(crate) async fn $endpoint( State(handler): State, mut request: Bytes, @@ -55,7 +54,6 @@ macro_rules! generate_endpoints_with_no_input { ),*) => { paste::paste! { $( /// TODO - #[allow(unused_mut)] pub(crate) async fn $endpoint( State(handler): State, ) -> Result { diff --git a/rpc/interface/src/router_builder.rs b/rpc/interface/src/router_builder.rs index 2e80c43..d18a694 100644 --- a/rpc/interface/src/router_builder.rs +++ b/rpc/interface/src/router_builder.rs @@ -69,7 +69,6 @@ macro_rules! generate_router_builder { /// .all() /// .build(); /// ``` - #[allow(clippy::struct_excessive_bools)] #[derive(Clone)] pub struct RouterBuilder { router: Router, diff --git a/rpc/interface/src/rpc_handler_dummy.rs b/rpc/interface/src/rpc_handler_dummy.rs index 0b01835..9d5009e 100644 --- a/rpc/interface/src/rpc_handler_dummy.rs +++ b/rpc/interface/src/rpc_handler_dummy.rs @@ -57,7 +57,7 @@ impl Service for RpcHandlerDummy { use cuprate_rpc_types::json::JsonRpcRequest as Req; use cuprate_rpc_types::json::JsonRpcResponse as Resp; - #[allow(clippy::default_trait_access)] + #[expect(clippy::default_trait_access)] let resp = match req { Req::GetBlockCount(_) => Resp::GetBlockCount(Default::default()), Req::OnGetBlockHash(_) => Resp::OnGetBlockHash(Default::default()), @@ -112,7 +112,7 @@ impl Service for RpcHandlerDummy { use cuprate_rpc_types::bin::BinRequest as Req; use cuprate_rpc_types::bin::BinResponse as Resp; - #[allow(clippy::default_trait_access)] + #[expect(clippy::default_trait_access)] let resp = match req { Req::GetBlocks(_) => Resp::GetBlocks(Default::default()), Req::GetBlocksByHeight(_) => Resp::GetBlocksByHeight(Default::default()), @@ -142,7 +142,7 @@ impl Service for RpcHandlerDummy { use cuprate_rpc_types::other::OtherRequest as Req; use cuprate_rpc_types::other::OtherResponse as Resp; - #[allow(clippy::default_trait_access)] + #[expect(clippy::default_trait_access)] let resp = match req { Req::GetHeight(_) => Resp::GetHeight(Default::default()), Req::GetTransactions(_) => Resp::GetTransactions(Default::default()), diff --git a/rpc/json-rpc/src/tests.rs b/rpc/json-rpc/src/tests.rs index 3ee6088..99ce126 100644 --- a/rpc/json-rpc/src/tests.rs +++ b/rpc/json-rpc/src/tests.rs @@ -52,7 +52,7 @@ where } /// Tests an input JSON string matches an expected type `T`. -#[allow(clippy::needless_pass_by_value)] // serde signature +#[expect(clippy::needless_pass_by_value, reason = "serde signature")] fn assert_de(json: &'static str, expected: T) where T: DeserializeOwned + std::fmt::Debug + Clone + PartialEq, diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index 0dbddea..a68d3e1 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -138,7 +138,6 @@ define_request! { )] /// /// This response's variant depends upon [`PoolInfoExtent`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum GetBlocksResponse { @@ -157,7 +156,6 @@ impl Default for GetBlocksResponse { } /// Data within [`GetBlocksResponse::PoolInfoNone`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct GetBlocksResponsePoolInfoNone { @@ -183,7 +181,6 @@ epee_object! { } /// Data within [`GetBlocksResponse::PoolInfoIncremental`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct GetBlocksResponsePoolInfoIncremental { @@ -215,7 +212,6 @@ epee_object! { } /// Data within [`GetBlocksResponse::PoolInfoFull`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct GetBlocksResponsePoolInfoFull { @@ -248,7 +244,6 @@ epee_object! { /// [`EpeeObjectBuilder`] for [`GetBlocksResponse`]. /// /// Not for public usage. -#[allow(dead_code, missing_docs)] #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct __GetBlocksResponseEpeeBuilder { @@ -354,7 +349,6 @@ impl EpeeObjectBuilder for __GetBlocksResponseEpeeBuilder { } #[cfg(feature = "epee")] -#[allow(clippy::cognitive_complexity)] impl EpeeObject for GetBlocksResponse { type Builder = __GetBlocksResponseEpeeBuilder; @@ -397,7 +391,6 @@ impl EpeeObject for GetBlocksResponse { /// See also: [`BinResponse`]. #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(untagged))] -#[allow(missing_docs)] #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum BinRequest { GetBlocks(GetBlocksRequest), @@ -444,7 +437,6 @@ impl RpcCallValue for BinRequest { #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(untagged))] -#[allow(missing_docs)] pub enum BinResponse { GetBlocks(GetBlocksResponse), GetBlocksByHeight(GetBlocksByHeightResponse), diff --git a/rpc/types/src/free.rs b/rpc/types/src/free.rs index 45fb2f7..a41c853 100644 --- a/rpc/types/src/free.rs +++ b/rpc/types/src/free.rs @@ -5,16 +5,16 @@ /// Returns `true` if the input `u` is equal to `0`. #[inline] -#[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&` -#[allow(dead_code)] // TODO: see if needed after handlers. +#[expect(clippy::trivially_copy_pass_by_ref, reason = "serde signature")] +#[expect(dead_code, reason = "TODO: see if needed after handlers.")] pub(crate) const fn is_zero(u: &u64) -> bool { *u == 0 } /// Returns `true` the input `u` is equal to `1`. #[inline] -#[allow(clippy::trivially_copy_pass_by_ref)] // serde needs `&` -#[allow(dead_code)] // TODO: see if needed after handlers. +#[expect(clippy::trivially_copy_pass_by_ref, reason = "serde signature")] +#[expect(dead_code, reason = "TODO: see if needed after handlers.")] pub(crate) const fn is_one(u: &u64) -> bool { *u == 1 } diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index cfefcf9..fb6e44b 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -1581,7 +1581,6 @@ define_request_and_response! { feature = "serde", serde(rename_all = "snake_case", tag = "method", content = "params") )] -#[allow(missing_docs)] pub enum JsonRpcRequest { GetBlockCount(GetBlockCountRequest), OnGetBlockHash(OnGetBlockHashRequest), @@ -1714,7 +1713,6 @@ impl RpcCallValue for JsonRpcRequest { #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(untagged, rename_all = "snake_case"))] -#[allow(missing_docs)] pub enum JsonRpcResponse { GetBlockCount(GetBlockCountResponse), OnGetBlockHash(OnGetBlockHashResponse), diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index 51ea3cc..be1069e 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -1,5 +1,9 @@ #![doc = include_str!("../README.md")] #![cfg_attr(docsrs, feature(doc_cfg))] +#![allow( + clippy::allow_attributes, + reason = "macros (internal + serde) make this lint hard to satisfy" +)] mod constants; mod defaults; diff --git a/rpc/types/src/macros.rs b/rpc/types/src/macros.rs index 60ffa90..85f4272 100644 --- a/rpc/types/src/macros.rs +++ b/rpc/types/src/macros.rs @@ -94,6 +94,7 @@ macro_rules! define_request_and_response { } ) => { paste::paste! { $crate::macros::define_request! { + #[allow(dead_code, missing_docs, reason = "inside a macro")] #[doc = $crate::macros::define_request_and_response_doc!( "response" => [<$type_name Response>], $monero_daemon_rpc_doc_link, @@ -118,8 +119,7 @@ macro_rules! define_request_and_response { } $crate::macros::define_response! { - #[allow(dead_code)] - #[allow(missing_docs)] + #[allow(dead_code, missing_docs, reason = "inside a macro")] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[doc = $crate::macros::define_request_and_response_doc!( @@ -236,7 +236,7 @@ macro_rules! define_request { )* } ) => { - #[allow(dead_code, missing_docs)] + #[allow(dead_code, missing_docs, reason = "inside a macro")] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] $( #[$attr] )* diff --git a/rpc/types/src/misc/distribution.rs b/rpc/types/src/misc/distribution.rs index 55d509e..faac7ad 100644 --- a/rpc/types/src/misc/distribution.rs +++ b/rpc/types/src/misc/distribution.rs @@ -76,7 +76,6 @@ impl Default for Distribution { } /// Data within [`Distribution::Uncompressed`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct DistributionUncompressed { @@ -99,7 +98,6 @@ epee_object! { } /// Data within [`Distribution::CompressedBinary`]. -#[allow(dead_code, missing_docs)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct DistributionCompressedBinary { @@ -132,7 +130,7 @@ epee_object! { /// 1. Compresses the distribution array /// 2. Serializes the compressed data #[cfg(feature = "serde")] -#[allow(clippy::ptr_arg)] +#[expect(clippy::ptr_arg)] fn serialize_distribution_as_compressed_data(v: &Vec, s: S) -> Result where S: serde::Serializer, @@ -162,7 +160,6 @@ where /// [`EpeeObjectBuilder`] for [`Distribution`]. /// /// Not for public usage. -#[allow(dead_code, missing_docs)] #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct __DistributionEpeeBuilder { diff --git a/rpc/types/src/misc/mod.rs b/rpc/types/src/misc/mod.rs index c5c1840..e09f847 100644 --- a/rpc/types/src/misc/mod.rs +++ b/rpc/types/src/misc/mod.rs @@ -15,7 +15,7 @@ mod binary_string; mod distribution; mod key_image_spent_status; -#[allow(clippy::module_inception)] +#[expect(clippy::module_inception)] mod misc; mod pool_info_extent; mod status; diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index 28c95d2..5b04089 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -973,7 +973,6 @@ define_request_and_response! { #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(untagged))] -#[allow(missing_docs)] pub enum OtherRequest { GetHeight(GetHeightRequest), GetTransactions(GetTransactionsRequest), @@ -1092,7 +1091,6 @@ impl RpcCallValue for OtherRequest { #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(untagged))] -#[allow(missing_docs)] pub enum OtherResponse { GetHeight(GetHeightResponse), GetTransactions(GetTransactionsResponse), diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index e039903..6eecb89 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -15,21 +15,19 @@ default = ["heed", "service"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -service = ["dep:thread_local", "dep:rayon"] +service = ["dep:thread_local", "dep:rayon", "cuprate-helper/thread"] [dependencies] -# FIXME: -# We only need the `thread` feature if `service` is enabled. -# Figure out how to enable features of an already pulled in dependency conditionally. cuprate-database = { path = "../database" } cuprate-database-service = { path = "../service" } -cuprate-helper = { path = "../../helper", features = ["fs", "thread", "map"] } +cuprate-helper = { path = "../../helper", features = ["fs", "map"] } cuprate-types = { path = "../../types", features = ["blockchain"] } +cuprate-pruning = { path = "../../pruning" } bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } curve25519-dalek = { workspace = true } -cuprate-pruning = { path = "../../pruning" } +rand = { workspace = true } monero-serai = { workspace = true, features = ["std"] } serde = { workspace = true, optional = true } diff --git a/storage/blockchain/src/ops/alt_block/block.rs b/storage/blockchain/src/ops/alt_block/block.rs new file mode 100644 index 0000000..6bd01cb --- /dev/null +++ b/storage/blockchain/src/ops/alt_block/block.rs @@ -0,0 +1,337 @@ +use bytemuck::TransparentWrapper; +use monero_serai::block::{Block, BlockHeader}; + +use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}; +use cuprate_types::{AltBlockInformation, Chain, ChainId, ExtendedBlockHeader, HardFork}; + +use crate::{ + ops::{ + alt_block::{add_alt_transaction_blob, get_alt_transaction, update_alt_chain_info}, + block::get_block_info, + macros::doc_error, + }, + tables::{Tables, TablesMut}, + types::{AltBlockHeight, BlockHash, BlockHeight, CompactAltBlockInfo}, +}; + +/// Flush all alt-block data from all the alt-block tables. +/// +/// This function completely empties the alt block tables. +pub fn flush_alt_blocks<'a, E: cuprate_database::EnvInner<'a>>( + env_inner: &E, + tx_rw: &mut E::Rw<'_>, +) -> Result<(), RuntimeError> { + use crate::tables::{ + AltBlockBlobs, AltBlockHeights, AltBlocksInfo, AltChainInfos, AltTransactionBlobs, + AltTransactionInfos, + }; + + env_inner.clear_db::(tx_rw)?; + env_inner.clear_db::(tx_rw)?; + env_inner.clear_db::(tx_rw)?; + env_inner.clear_db::(tx_rw)?; + env_inner.clear_db::(tx_rw)?; + env_inner.clear_db::(tx_rw) +} + +/// Add a [`AltBlockInformation`] to the database. +/// +/// This extracts all the data from the input block and +/// maps/adds them to the appropriate database tables. +/// +#[doc = doc_error!()] +/// +/// # Panics +/// This function will panic if: +/// - `alt_block.height` is == `0` +/// - `alt_block.txs.len()` != `alt_block.block.transactions.len()` +/// +pub fn add_alt_block( + alt_block: &AltBlockInformation, + tables: &mut impl TablesMut, +) -> Result<(), RuntimeError> { + let alt_block_height = AltBlockHeight { + chain_id: alt_block.chain_id.into(), + height: alt_block.height, + }; + + tables + .alt_block_heights_mut() + .put(&alt_block.block_hash, &alt_block_height)?; + + update_alt_chain_info(&alt_block_height, &alt_block.block.header.previous, tables)?; + + let (cumulative_difficulty_low, cumulative_difficulty_high) = + split_u128_into_low_high_bits(alt_block.cumulative_difficulty); + + let alt_block_info = CompactAltBlockInfo { + block_hash: alt_block.block_hash, + pow_hash: alt_block.pow_hash, + height: alt_block.height, + weight: alt_block.weight, + long_term_weight: alt_block.long_term_weight, + cumulative_difficulty_low, + cumulative_difficulty_high, + }; + + tables + .alt_blocks_info_mut() + .put(&alt_block_height, &alt_block_info)?; + + tables.alt_block_blobs_mut().put( + &alt_block_height, + StorableVec::wrap_ref(&alt_block.block_blob), + )?; + + assert_eq!(alt_block.txs.len(), alt_block.block.transactions.len()); + for tx in &alt_block.txs { + add_alt_transaction_blob(tx, tables)?; + } + + Ok(()) +} + +/// Retrieves an [`AltBlockInformation`] from the database. +/// +/// This function will look at only the blocks with the given [`AltBlockHeight::chain_id`], no others +/// even if they are technically part of this chain. +#[doc = doc_error!()] +pub fn get_alt_block( + alt_block_height: &AltBlockHeight, + tables: &impl Tables, +) -> Result { + let block_info = tables.alt_blocks_info().get(alt_block_height)?; + + let block_blob = tables.alt_block_blobs().get(alt_block_height)?.0; + + let block = Block::read(&mut block_blob.as_slice())?; + + let txs = block + .transactions + .iter() + .map(|tx_hash| get_alt_transaction(tx_hash, tables)) + .collect::>()?; + + Ok(AltBlockInformation { + block, + block_blob, + txs, + block_hash: block_info.block_hash, + pow_hash: block_info.pow_hash, + height: block_info.height, + weight: block_info.weight, + long_term_weight: block_info.long_term_weight, + cumulative_difficulty: combine_low_high_bits_to_u128( + block_info.cumulative_difficulty_low, + block_info.cumulative_difficulty_high, + ), + chain_id: alt_block_height.chain_id.into(), + }) +} + +/// Retrieves the hash of the block at the given `block_height` on the alt chain with +/// the given [`ChainId`]. +/// +/// This function will get blocks from the whole chain, for example if you were to ask for height +/// `0` with any [`ChainId`] (as long that chain actually exists) you will get the main chain genesis. +/// +#[doc = doc_error!()] +pub fn get_alt_block_hash( + block_height: &BlockHeight, + alt_chain: ChainId, + tables: &impl Tables, +) -> Result { + let alt_chains = tables.alt_chain_infos(); + + // First find what [`ChainId`] this block would be stored under. + let original_chain = { + let mut chain = alt_chain.into(); + loop { + let chain_info = alt_chains.get(&chain)?; + + if chain_info.common_ancestor_height < *block_height { + break Chain::Alt(chain.into()); + } + + match chain_info.parent_chain.into() { + Chain::Main => break Chain::Main, + Chain::Alt(alt_chain_id) => { + chain = alt_chain_id.into(); + continue; + } + } + } + }; + + // Get the block hash. + match original_chain { + Chain::Main => { + get_block_info(block_height, tables.block_infos()).map(|info| info.block_hash) + } + Chain::Alt(chain_id) => tables + .alt_blocks_info() + .get(&AltBlockHeight { + chain_id: chain_id.into(), + height: *block_height, + }) + .map(|info| info.block_hash), + } +} + +/// Retrieves the [`ExtendedBlockHeader`] of the alt-block with an exact [`AltBlockHeight`]. +/// +/// This function will look at only the blocks with the given [`AltBlockHeight::chain_id`], no others +/// even if they are technically part of this chain. +/// +#[doc = doc_error!()] +pub fn get_alt_block_extended_header_from_height( + height: &AltBlockHeight, + table: &impl Tables, +) -> Result { + let block_info = table.alt_blocks_info().get(height)?; + + let block_blob = table.alt_block_blobs().get(height)?.0; + + let block_header = BlockHeader::read(&mut block_blob.as_slice())?; + + Ok(ExtendedBlockHeader { + version: HardFork::from_version(block_header.hardfork_version) + .expect("Block in DB must have correct version"), + vote: block_header.hardfork_version, + timestamp: block_header.timestamp, + cumulative_difficulty: combine_low_high_bits_to_u128( + block_info.cumulative_difficulty_low, + block_info.cumulative_difficulty_high, + ), + block_weight: block_info.weight, + long_term_weight: block_info.long_term_weight, + }) +} + +#[cfg(test)] +mod tests { + use std::num::NonZero; + + use cuprate_database::{Env, EnvInner, TxRw}; + use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; + use cuprate_types::{Chain, ChainId}; + + use crate::{ + ops::{ + alt_block::{ + add_alt_block, flush_alt_blocks, get_alt_block, + get_alt_block_extended_header_from_height, get_alt_block_hash, + get_alt_chain_history_ranges, + }, + block::{add_block, pop_block}, + }, + tables::{OpenTables, Tables}, + tests::{assert_all_tables_are_empty, map_verified_block_to_alt, tmp_concrete_env}, + types::AltBlockHeight, + }; + + #[expect(clippy::range_plus_one)] + #[test] + fn all_alt_blocks() { + let (env, _tmp) = tmp_concrete_env(); + let env_inner = env.env_inner(); + assert_all_tables_are_empty(&env); + + let chain_id = ChainId(NonZero::new(1).unwrap()); + + // Add initial block. + { + let tx_rw = env_inner.tx_rw().unwrap(); + let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap(); + + let mut initial_block = BLOCK_V1_TX2.clone(); + initial_block.height = 0; + + add_block(&initial_block, &mut tables).unwrap(); + + drop(tables); + TxRw::commit(tx_rw).unwrap(); + } + + let alt_blocks = [ + map_verified_block_to_alt(BLOCK_V9_TX3.clone(), chain_id), + map_verified_block_to_alt(BLOCK_V16_TX0.clone(), chain_id), + ]; + + // Add alt-blocks + { + let tx_rw = env_inner.tx_rw().unwrap(); + let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap(); + + let mut prev_hash = BLOCK_V1_TX2.block_hash; + for (i, mut alt_block) in alt_blocks.into_iter().enumerate() { + let height = i + 1; + + alt_block.height = height; + alt_block.block.header.previous = prev_hash; + alt_block.block_blob = alt_block.block.serialize(); + + add_alt_block(&alt_block, &mut tables).unwrap(); + + let alt_height = AltBlockHeight { + chain_id: chain_id.into(), + height, + }; + + let alt_block_2 = get_alt_block(&alt_height, &tables).unwrap(); + assert_eq!(alt_block.block, alt_block_2.block); + + let headers = get_alt_chain_history_ranges( + 0..(height + 1), + chain_id, + tables.alt_chain_infos(), + ) + .unwrap(); + + assert_eq!(headers.len(), 2); + assert_eq!(headers[1], (Chain::Main, 0..1)); + assert_eq!(headers[0], (Chain::Alt(chain_id), 1..(height + 1))); + + prev_hash = alt_block.block_hash; + + let header = + get_alt_block_extended_header_from_height(&alt_height, &tables).unwrap(); + + assert_eq!(header.timestamp, alt_block.block.header.timestamp); + assert_eq!(header.block_weight, alt_block.weight); + assert_eq!(header.long_term_weight, alt_block.long_term_weight); + assert_eq!( + header.cumulative_difficulty, + alt_block.cumulative_difficulty + ); + assert_eq!( + header.version.as_u8(), + alt_block.block.header.hardfork_version + ); + assert_eq!(header.vote, alt_block.block.header.hardfork_signal); + + let block_hash = get_alt_block_hash(&height, chain_id, &tables).unwrap(); + + assert_eq!(block_hash, alt_block.block_hash); + } + + drop(tables); + TxRw::commit(tx_rw).unwrap(); + } + + { + let mut tx_rw = env_inner.tx_rw().unwrap(); + + flush_alt_blocks(&env_inner, &mut tx_rw).unwrap(); + + let mut tables = env_inner.open_tables_mut(&tx_rw).unwrap(); + pop_block(None, &mut tables).unwrap(); + + drop(tables); + TxRw::commit(tx_rw).unwrap(); + } + + assert_all_tables_are_empty(&env); + } +} diff --git a/storage/blockchain/src/ops/alt_block/chain.rs b/storage/blockchain/src/ops/alt_block/chain.rs new file mode 100644 index 0000000..5b5f3cb --- /dev/null +++ b/storage/blockchain/src/ops/alt_block/chain.rs @@ -0,0 +1,117 @@ +use std::cmp::{max, min}; + +use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError}; +use cuprate_types::{Chain, ChainId}; + +use crate::{ + ops::macros::{doc_add_alt_block_inner_invariant, doc_error}, + tables::{AltChainInfos, TablesMut}, + types::{AltBlockHeight, AltChainInfo, BlockHash, BlockHeight}, +}; + +/// Updates the [`AltChainInfo`] with information on a new alt-block. +/// +#[doc = doc_add_alt_block_inner_invariant!()] +#[doc = doc_error!()] +/// +/// # Panics +/// +/// This will panic if [`AltBlockHeight::height`] == `0`. +pub fn update_alt_chain_info( + alt_block_height: &AltBlockHeight, + prev_hash: &BlockHash, + tables: &mut impl TablesMut, +) -> Result<(), RuntimeError> { + let parent_chain = match tables.alt_block_heights().get(prev_hash) { + Ok(alt_parent_height) => Chain::Alt(alt_parent_height.chain_id.into()), + Err(RuntimeError::KeyNotFound) => Chain::Main, + Err(e) => return Err(e), + }; + + // try update the info if one exists for this chain. + let update = tables + .alt_chain_infos_mut() + .update(&alt_block_height.chain_id, |mut info| { + if info.chain_height < alt_block_height.height + 1 { + // If the chain height is increasing we only need to update the chain height. + info.chain_height = alt_block_height.height + 1; + } else { + // If the chain height is not increasing we are popping blocks and need to update the + // split point. + info.common_ancestor_height = alt_block_height.height.checked_sub(1).unwrap(); + info.parent_chain = parent_chain.into(); + } + + info.chain_height = alt_block_height.height + 1; + Some(info) + }); + + match update { + Ok(()) => return Ok(()), + Err(RuntimeError::KeyNotFound) => (), + Err(e) => return Err(e), + } + + // If one doesn't already exist add it. + + tables.alt_chain_infos_mut().put( + &alt_block_height.chain_id, + &AltChainInfo { + parent_chain: parent_chain.into(), + common_ancestor_height: alt_block_height.height.checked_sub(1).unwrap(), + chain_height: alt_block_height.height + 1, + }, + ) +} + +/// Get the height history of an alt-chain in reverse chronological order. +/// +/// Height history is a list of height ranges with the corresponding [`Chain`] they are stored under. +/// For example if your range goes from height `0` the last entry in the list will be [`Chain::Main`] +/// upto the height where the first split occurs. +#[doc = doc_error!()] +pub fn get_alt_chain_history_ranges( + range: std::ops::Range, + alt_chain: ChainId, + alt_chain_infos: &impl DatabaseRo, +) -> Result)>, RuntimeError> { + let mut ranges = Vec::with_capacity(5); + + let mut i = range.end; + let mut current_chain_id = alt_chain.into(); + while i > range.start { + let chain_info = alt_chain_infos.get(¤t_chain_id)?; + + let start_height = max(range.start, chain_info.common_ancestor_height + 1); + let end_height = min(i, chain_info.chain_height); + + ranges.push(( + Chain::Alt(current_chain_id.into()), + start_height..end_height, + )); + i = chain_info.common_ancestor_height + 1; + + match chain_info.parent_chain.into() { + Chain::Main => { + ranges.push((Chain::Main, range.start..i)); + break; + } + Chain::Alt(alt_chain_id) => { + let alt_chain_id = alt_chain_id.into(); + + // This shouldn't be possible to hit, however in a test with custom (invalid) block data + // this caused an infinite loop. + if alt_chain_id == current_chain_id { + return Err(RuntimeError::Io(std::io::Error::other( + "Loop detected in ChainIDs, invalid alt chain.", + ))); + } + + current_chain_id = alt_chain_id; + continue; + } + } + } + + Ok(ranges) +} diff --git a/storage/blockchain/src/ops/alt_block/mod.rs b/storage/blockchain/src/ops/alt_block/mod.rs new file mode 100644 index 0000000..1654d27 --- /dev/null +++ b/storage/blockchain/src/ops/alt_block/mod.rs @@ -0,0 +1,58 @@ +//! Alternative Block/Chain Ops +//! +//! Alternative chains are chains that potentially have more proof-of-work than the main-chain +//! which we are tracking to potentially re-org to. +//! +//! Cuprate uses an ID system for alt-chains. When a split is made from the main-chain we generate +//! a random [`ChainID`](cuprate_types::ChainId) and assign it to the chain: +//! +//! ```text +//! | +//! | +//! | split +//! |------------- +//! | | +//! | | +//! \|/ \|/ +//! main-chain ChainID(X) +//! ``` +//! +//! In that example if we were to receive an alt-block which immediately follows the top block of `ChainID(X)` +//! then that block will also be stored under `ChainID(X)`. However, if it follows from another block from `ChainID(X)` +//! we will split into a chain with a different ID: +//! +//! ```text +//! | +//! | +//! | split +//! |------------- +//! | | split +//! | |-------------| +//! | | | +//! | | | +//! | | | +//! \|/ \|/ \|/ +//! main-chain ChainID(X) ChainID(Z) +//! ``` +//! +//! As you can see if we wanted to get all the alt-blocks in `ChainID(Z)` that now includes some blocks from `ChainID(X)` as well. +//! [`get_alt_chain_history_ranges`] covers this and is the method to get the ranges of heights needed from each [`ChainID`](cuprate_types::ChainId) +//! to get all the alt-blocks in a given [`ChainID`](cuprate_types::ChainId). +//! +//! Although this should be kept in mind as a possibility, because Cuprate's block downloader will only track a single chain it is +//! unlikely that we will be tracking [`ChainID`](cuprate_types::ChainId)s that don't immediately connect to the main-chain. +//! +//! ## Why not use the block's `previous` field? +//! +//! Although that would be easier, it makes getting a range of block extremely slow, as we have to build the weight cache to verify +//! blocks, roughly 100,000 block headers needed, this cost is too high. +mod block; +mod chain; +mod tx; + +pub use block::{ + add_alt_block, flush_alt_blocks, get_alt_block, get_alt_block_extended_header_from_height, + get_alt_block_hash, +}; +pub use chain::{get_alt_chain_history_ranges, update_alt_chain_info}; +pub use tx::{add_alt_transaction_blob, get_alt_transaction}; diff --git a/storage/blockchain/src/ops/alt_block/tx.rs b/storage/blockchain/src/ops/alt_block/tx.rs new file mode 100644 index 0000000..4185c6c --- /dev/null +++ b/storage/blockchain/src/ops/alt_block/tx.rs @@ -0,0 +1,76 @@ +use bytemuck::TransparentWrapper; +use monero_serai::transaction::Transaction; + +use cuprate_database::{DatabaseRo, DatabaseRw, RuntimeError, StorableVec}; +use cuprate_types::VerifiedTransactionInformation; + +use crate::{ + ops::macros::{doc_add_alt_block_inner_invariant, doc_error}, + tables::{Tables, TablesMut}, + types::{AltTransactionInfo, TxHash}, +}; + +/// Adds a [`VerifiedTransactionInformation`] from an alt-block +/// if it is not already in the DB. +/// +/// If the transaction is in the main-chain this function will still fill in the +/// [`AltTransactionInfos`](crate::tables::AltTransactionInfos) table, as that +/// table holds data which we don't keep around for main-chain txs. +/// +#[doc = doc_add_alt_block_inner_invariant!()] +#[doc = doc_error!()] +pub fn add_alt_transaction_blob( + tx: &VerifiedTransactionInformation, + tables: &mut impl TablesMut, +) -> Result<(), RuntimeError> { + tables.alt_transaction_infos_mut().put( + &tx.tx_hash, + &AltTransactionInfo { + tx_weight: tx.tx_weight, + fee: tx.fee, + tx_hash: tx.tx_hash, + }, + )?; + + if tables.tx_ids().get(&tx.tx_hash).is_ok() + || tables.alt_transaction_blobs().get(&tx.tx_hash).is_ok() + { + return Ok(()); + } + + tables + .alt_transaction_blobs_mut() + .put(&tx.tx_hash, StorableVec::wrap_ref(&tx.tx_blob))?; + + Ok(()) +} + +/// Retrieve a [`VerifiedTransactionInformation`] from the database. +/// +#[doc = doc_error!()] +pub fn get_alt_transaction( + tx_hash: &TxHash, + tables: &impl Tables, +) -> Result { + let tx_info = tables.alt_transaction_infos().get(tx_hash)?; + + let tx_blob = match tables.alt_transaction_blobs().get(tx_hash) { + Ok(blob) => blob.0, + Err(RuntimeError::KeyNotFound) => { + let tx_id = tables.tx_ids().get(tx_hash)?; + + let blob = tables.tx_blobs().get(&tx_id)?; + + blob.0 + } + Err(e) => return Err(e), + }; + + Ok(VerifiedTransactionInformation { + tx: Transaction::read(&mut tx_blob.as_slice()).unwrap(), + tx_blob, + tx_weight: tx_info.tx_weight, + fee: tx_info.fee, + tx_hash: tx_info.tx_hash, + }) +} diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 91d6e57..6d32fd8 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -2,16 +2,26 @@ //---------------------------------------------------------------------------------------------------- Import use bytemuck::TransparentWrapper; -use monero_serai::block::Block; +use monero_serai::{ + block::{Block, BlockHeader}, + transaction::Transaction, +}; use cuprate_database::{ RuntimeError, StorableVec, {DatabaseRo, DatabaseRw}, }; -use cuprate_helper::map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}; -use cuprate_types::{ExtendedBlockHeader, HardFork, VerifiedBlockInformation}; +use cuprate_helper::{ + map::{combine_low_high_bits_to_u128, split_u128_into_low_high_bits}, + tx::tx_fee, +}; +use cuprate_types::{ + AltBlockInformation, ChainId, ExtendedBlockHeader, HardFork, VerifiedBlockInformation, + VerifiedTransactionInformation, +}; use crate::{ ops::{ + alt_block, blockchain::{chain_height, cumulative_generated_coins}, macros::doc_error, output::get_rct_num_outputs, @@ -33,11 +43,6 @@ use crate::{ /// This function will panic if: /// - `block.height > u32::MAX` (not normally possible) /// - `block.height` is not != [`chain_height`] -/// -/// # Already exists -/// This function will operate normally even if `block` already -/// exists, i.e., this function will not return `Err` even if you -/// call this function infinitely with the same block. // no inline, too big. pub fn add_block( block: &VerifiedBlockInformation, @@ -74,10 +79,10 @@ pub fn add_block( //------------------------------------------------------ Transaction / Outputs / Key Images // Add the miner transaction first. - { + let mining_tx_index = { let tx = &block.block.miner_transaction; - add_tx(tx, &tx.serialize(), &tx.hash(), &chain_height, tables)?; - } + add_tx(tx, &tx.serialize(), &tx.hash(), &chain_height, tables)? + }; for tx in &block.txs { add_tx(&tx.tx, &tx.tx_blob, &tx.tx_hash, &chain_height, tables)?; @@ -107,16 +112,23 @@ pub fn add_block( cumulative_rct_outs, timestamp: block.block.header.timestamp, block_hash: block.block_hash, - // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64` - weight: block.weight as u64, - long_term_weight: block.long_term_weight as u64, + weight: block.weight, + long_term_weight: block.long_term_weight, + mining_tx_index, }, )?; - // Block blobs. - tables - .block_blobs_mut() - .put(&block.height, StorableVec::wrap_ref(&block.block_blob))?; + // Block header blob. + tables.block_header_blobs_mut().put( + &block.height, + StorableVec::wrap_ref(&block.block.header.serialize()), + )?; + + // Block transaction hashes + tables.block_txs_hashes_mut().put( + &block.height, + StorableVec::wrap_ref(&block.block.transactions), + )?; // Block heights. tables @@ -130,37 +142,87 @@ pub fn add_block( /// Remove the top/latest block from the database. /// /// The removed block's data is returned. +/// +/// If a [`ChainId`] is specified the popped block will be added to the alt block tables under +/// that [`ChainId`]. Otherwise, the block will be completely removed from the DB. #[doc = doc_error!()] /// /// In `pop_block()`'s case, [`RuntimeError::KeyNotFound`] /// will be returned if there are no blocks left. // no inline, too big pub fn pop_block( + move_to_alt_chain: Option, tables: &mut impl TablesMut, ) -> Result<(BlockHeight, BlockHash, Block), RuntimeError> { //------------------------------------------------------ Block Info // Remove block data from tables. - let (block_height, block_hash) = { - let (block_height, block_info) = tables.block_infos_mut().pop_last()?; - (block_height, block_info.block_hash) - }; + let (block_height, block_info) = tables.block_infos_mut().pop_last()?; // Block heights. - tables.block_heights_mut().delete(&block_hash)?; + tables.block_heights_mut().delete(&block_info.block_hash)?; // Block blobs. - // We deserialize the block blob into a `Block`, such - // that we can remove the associated transactions later. - let block_blob = tables.block_blobs_mut().take(&block_height)?.0; - let block = Block::read(&mut block_blob.as_slice())?; + // + // We deserialize the block header blob and mining transaction blob + // to form a `Block`, such that we can remove the associated transactions + // later. + let block_header = tables.block_header_blobs_mut().take(&block_height)?.0; + let block_txs_hashes = tables.block_txs_hashes_mut().take(&block_height)?.0; + let miner_transaction = tables.tx_blobs().get(&block_info.mining_tx_index)?.0; + let block = Block { + header: BlockHeader::read(&mut block_header.as_slice())?, + miner_transaction: Transaction::read(&mut miner_transaction.as_slice())?, + transactions: block_txs_hashes, + }; //------------------------------------------------------ Transaction / Outputs / Key Images remove_tx(&block.miner_transaction.hash(), tables)?; - for tx_hash in &block.transactions { - remove_tx(tx_hash, tables)?; + + let remove_tx_iter = block.transactions.iter().map(|tx_hash| { + let (_, tx) = remove_tx(tx_hash, tables)?; + Ok::<_, RuntimeError>(tx) + }); + + if let Some(chain_id) = move_to_alt_chain { + let txs = remove_tx_iter + .map(|result| { + let tx = result?; + Ok(VerifiedTransactionInformation { + tx_weight: tx.weight(), + tx_blob: tx.serialize(), + tx_hash: tx.hash(), + fee: tx_fee(&tx), + tx, + }) + }) + .collect::, RuntimeError>>()?; + + alt_block::add_alt_block( + &AltBlockInformation { + block: block.clone(), + block_blob: block.serialize(), + txs, + block_hash: block_info.block_hash, + // We know the PoW is valid for this block so just set it so it will always verify as valid. + pow_hash: [0; 32], + height: block_height, + weight: block_info.weight, + long_term_weight: block_info.long_term_weight, + cumulative_difficulty: combine_low_high_bits_to_u128( + block_info.cumulative_difficulty_low, + block_info.cumulative_difficulty_high, + ), + chain_id, + }, + tables, + )?; + } else { + for result in remove_tx_iter { + drop(result?); + } } - Ok((block_height, block_hash, block)) + Ok((block_height, block_info.block_hash, block)) } //---------------------------------------------------------------------------------------------------- `get_block_extended_header_*` @@ -183,31 +245,32 @@ pub fn get_block_extended_header( /// Same as [`get_block_extended_header`] but with a [`BlockHeight`]. #[doc = doc_error!()] -#[allow(clippy::missing_panics_doc)] // The panic is only possible with a corrupt DB +#[expect( + clippy::missing_panics_doc, + reason = "The panic is only possible with a corrupt DB" +)] #[inline] pub fn get_block_extended_header_from_height( block_height: &BlockHeight, tables: &impl Tables, ) -> Result { let block_info = tables.block_infos().get(block_height)?; - let block_blob = tables.block_blobs().get(block_height)?.0; - let block = Block::read(&mut block_blob.as_slice())?; + let block_header_blob = tables.block_header_blobs().get(block_height)?.0; + let block_header = BlockHeader::read(&mut block_header_blob.as_slice())?; let cumulative_difficulty = combine_low_high_bits_to_u128( block_info.cumulative_difficulty_low, block_info.cumulative_difficulty_high, ); - // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64` - #[allow(clippy::cast_possible_truncation)] Ok(ExtendedBlockHeader { cumulative_difficulty, - version: HardFork::from_version(block.header.hardfork_version) + version: HardFork::from_version(block_header.hardfork_version) .expect("Stored block must have a valid hard-fork"), - vote: block.header.hardfork_signal, - timestamp: block.header.timestamp, - block_weight: block_info.weight as usize, - long_term_weight: block_info.long_term_weight as usize, + vote: block_header.hardfork_signal, + timestamp: block_header.timestamp, + block_weight: block_info.weight, + long_term_weight: block_info.long_term_weight, }) } @@ -260,25 +323,21 @@ pub fn block_exists( //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] -#[allow( - clippy::significant_drop_tightening, - clippy::cognitive_complexity, - clippy::too_many_lines -)] +#[expect(clippy::too_many_lines)] mod test { use pretty_assertions::assert_eq; use cuprate_database::{Env, EnvInner, TxRw}; use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; - use super::*; - use crate::{ ops::tx::{get_tx, tx_exists}, tables::OpenTables, tests::{assert_all_tables_are_empty, tmp_concrete_env, AssertTableLen}, }; + use super::*; + /// Tests all above block functions. /// /// Note that this doesn't test the correctness of values added, as the @@ -330,7 +389,8 @@ mod test { // Assert only the proper tables were added to. AssertTableLen { block_infos: 3, - block_blobs: 3, + block_header_blobs: 3, + block_txs_hashes: 3, block_heights: 3, key_images: 69, num_outputs: 41, @@ -413,7 +473,8 @@ mod test { for block_hash in block_hashes.into_iter().rev() { println!("pop_block(): block_hash: {}", hex::encode(block_hash)); - let (_popped_height, popped_hash, _popped_block) = pop_block(&mut tables).unwrap(); + let (_popped_height, popped_hash, _popped_block) = + pop_block(None, &mut tables).unwrap(); assert_eq!(block_hash, popped_hash); diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index ed368ad..04f8b26 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -25,7 +25,7 @@ use crate::{ pub fn chain_height( table_block_heights: &impl DatabaseRo, ) -> Result { - #[allow(clippy::cast_possible_truncation)] // we enforce 64-bit + #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")] table_block_heights.len().map(|height| height as usize) } @@ -48,7 +48,7 @@ pub fn top_block_height( ) -> Result { match table_block_heights.len()? { 0 => Err(RuntimeError::KeyNotFound), - #[allow(clippy::cast_possible_truncation)] // we enforce 64-bit + #[expect(clippy::cast_possible_truncation, reason = "we enforce 64-bit")] height => Ok(height as usize - 1), } } @@ -138,7 +138,8 @@ mod test { // Assert reads are correct. AssertTableLen { block_infos: 3, - block_blobs: 3, + block_header_blobs: 3, + block_txs_hashes: 3, block_heights: 3, key_images: 69, num_outputs: 41, diff --git a/storage/blockchain/src/ops/macros.rs b/storage/blockchain/src/ops/macros.rs index b7cdba4..18ec506 100644 --- a/storage/blockchain/src/ops/macros.rs +++ b/storage/blockchain/src/ops/macros.rs @@ -31,3 +31,25 @@ When calling this function, ensure that either: }; } pub(super) use doc_add_block_inner_invariant; + +/// Generate `# Invariant` documentation for internal alt block `fn`'s +/// that should be called directly with caution. +/// +/// This is pretty much the same as [`doc_add_block_inner_invariant`], +/// it's not worth the effort to reduce the duplication. +macro_rules! doc_add_alt_block_inner_invariant { + () => { + r#"# ⚠️ Invariant ⚠️ +This function mainly exists to be used internally by the parent function [`crate::ops::alt_block::add_alt_block`]. + +`add_alt_block()` makes sure all data related to the input is mutated, while +this function _does not_, it specifically mutates _particular_ tables. + +This is usually undesired - although this function is still available to call directly. + +When calling this function, ensure that either: +1. This effect (incomplete database mutation) is what is desired, or that... +2. ...the other tables will also be mutated to a correct state"# + }; +} +pub(super) use doc_add_alt_block_inner_invariant; diff --git a/storage/blockchain/src/ops/mod.rs b/storage/blockchain/src/ops/mod.rs index 4ff7dff..285aa24 100644 --- a/storage/blockchain/src/ops/mod.rs +++ b/storage/blockchain/src/ops/mod.rs @@ -94,7 +94,7 @@ //! // Read the data, assert it is correct. //! let tx_rw = env_inner.tx_rw()?; //! let mut tables = env_inner.open_tables_mut(&tx_rw)?; -//! let (height, hash, serai_block) = pop_block(&mut tables)?; +//! let (height, hash, serai_block) = pop_block(None, &mut tables)?; //! //! assert_eq!(height, 0); //! assert_eq!(serai_block, block.block); @@ -102,6 +102,7 @@ //! # Ok(()) } //! ``` +pub mod alt_block; pub mod block; pub mod blockchain; pub mod key_image; diff --git a/storage/blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs index f3453e4..1c7c1d7 100644 --- a/storage/blockchain/src/ops/output.rs +++ b/storage/blockchain/src/ops/output.rs @@ -316,7 +316,8 @@ mod test { // Assert proper tables were added to. AssertTableLen { block_infos: 0, - block_blobs: 0, + block_header_blobs: 0, + block_txs_hashes: 0, block_heights: 0, key_images: 0, num_outputs: 1, diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index e7dbdcf..c9799a2 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -366,7 +366,8 @@ mod test { // Assert only the proper tables were added to. AssertTableLen { block_infos: 0, - block_blobs: 0, + block_header_blobs: 0, + block_txs_hashes: 0, block_heights: 0, key_images: 4, // added to key images pruned_tx_blobs: 0, diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index 2e7c908..d8a878c 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -4,11 +4,14 @@ use std::sync::Arc; use cuprate_database::{ConcreteEnv, InitError}; +use cuprate_types::{AltBlockInformation, VerifiedBlockInformation}; -use crate::service::{init_read_service, init_write_service}; use crate::{ config::Config, - service::types::{BlockchainReadHandle, BlockchainWriteHandle}, + service::{ + init_read_service, init_write_service, + types::{BlockchainReadHandle, BlockchainWriteHandle}, + }, }; //---------------------------------------------------------------------------------------------------- Init @@ -81,6 +84,44 @@ pub(super) const fn compact_history_genesis_not_included INITIAL_BLOCKS && !(top_block_height - INITIAL_BLOCKS + 2).is_power_of_two() } +//---------------------------------------------------------------------------------------------------- Map Block +/// Maps [`AltBlockInformation`] to [`VerifiedBlockInformation`] +/// +/// # Panics +/// This will panic if the block is invalid, so should only be used on blocks that have been popped from +/// the main-chain. +pub(super) fn map_valid_alt_block_to_verified_block( + alt_block: AltBlockInformation, +) -> VerifiedBlockInformation { + let total_fees = alt_block.txs.iter().map(|tx| tx.fee).sum::(); + let total_miner_output = alt_block + .block + .miner_transaction + .prefix() + .outputs + .iter() + .map(|out| out.amount.unwrap_or(0)) + .sum::(); + + VerifiedBlockInformation { + block: alt_block.block, + block_blob: alt_block.block_blob, + txs: alt_block + .txs + .into_iter() + .map(TryInto::try_into) + .collect::>() + .unwrap(), + block_hash: alt_block.block_hash, + pow_hash: alt_block.pow_hash, + height: alt_block.height, + generated_coins: total_miner_output - total_fees, + weight: alt_block.weight, + long_term_weight: alt_block.long_term_weight, + cumulative_difficulty: alt_block.cumulative_difficulty, + } +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index c774ee4..aa322d0 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -98,7 +98,7 @@ //! //! // Block write was OK. //! let response = response_channel.await?; -//! assert_eq!(response, BlockchainResponse::WriteBlockOk); +//! assert_eq!(response, BlockchainResponse::Ok); //! //! // Now, let's try getting the block hash //! // of the block we just wrote. diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index 207da41..b0e7e04 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -8,6 +8,7 @@ use std::{ use rayon::{ iter::{IntoParallelIterator, ParallelIterator}, + prelude::*, ThreadPool, }; use thread_local::ThreadLocal; @@ -17,11 +18,15 @@ use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThre use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, - Chain, ExtendedBlockHeader, OutputOnChain, + Chain, ChainId, ExtendedBlockHeader, OutputOnChain, }; use crate::{ ops::{ + alt_block::{ + get_alt_block, get_alt_block_extended_header_from_height, get_alt_block_hash, + get_alt_chain_history_ranges, + }, block::{ block_exists, get_block_extended_header_from_height, get_block_height, get_block_info, }, @@ -33,8 +38,10 @@ use crate::{ free::{compact_history_genesis_not_included, compact_history_index_to_height_offset}, types::{BlockchainReadHandle, ResponseResult}, }, - tables::{BlockHeights, BlockInfos, OpenTables, Tables}, - types::{Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId}, + tables::{AltBlockHeights, BlockHeights, BlockInfos, OpenTables, Tables}, + types::{ + AltBlockHeight, Amount, AmountIndex, BlockHash, BlockHeight, KeyImage, PreRctOutputId, + }, }; //---------------------------------------------------------------------------------------------------- init_read_service @@ -87,7 +94,7 @@ fn map_request( match request { R::BlockExtendedHeader(block) => block_extended_header(env, block), R::BlockHash(block, chain) => block_hash(env, block, chain), - R::FindBlock(_) => todo!("Add alt blocks to DB"), + R::FindBlock(block_hash) => find_block(env, block_hash), R::FilterUnknownHashes(hashes) => filter_unknown_hashes(env, hashes), R::BlockExtendedHeaderInRange(range, chain) => { block_extended_header_in_range(env, range, chain) @@ -99,6 +106,7 @@ fn map_request( R::KeyImagesSpent(set) => key_images_spent(env, set), R::CompactChainHistory => compact_chain_history(env), R::FindFirstUnknown(block_ids) => find_first_unknown(env, &block_ids), + R::AltBlocksInChain(chain_id) => alt_blocks_in_chain(env, chain_id), } /* SOMEDAY: post-request handling, run some code for each request? */ @@ -142,7 +150,6 @@ fn thread_local(env: &impl Env) -> ThreadLocal { macro_rules! get_tables { ($env_inner:ident, $tx_ro:ident, $tables:ident) => {{ $tables.get_or_try(|| { - #[allow(clippy::significant_drop_in_scrutinee)] match $env_inner.open_tables($tx_ro) { // SAFETY: see above macro doc comment. Ok(tables) => Ok(unsafe { crate::unsafe_sendable::UnsafeSendable::new(tables) }), @@ -198,12 +205,41 @@ fn block_hash(env: &ConcreteEnv, block_height: BlockHeight, chain: Chain) -> Res let block_hash = match chain { Chain::Main => get_block_info(&block_height, &table_block_infos)?.block_hash, - Chain::Alt(_) => todo!("Add alt blocks to DB"), + Chain::Alt(chain) => { + get_alt_block_hash(&block_height, chain, &env_inner.open_tables(&tx_ro)?)? + } }; Ok(BlockchainResponse::BlockHash(block_hash)) } +/// [`BlockchainReadRequest::FindBlock`] +fn find_block(env: &ConcreteEnv, block_hash: BlockHash) -> ResponseResult { + // Single-threaded, no `ThreadLocal` required. + let env_inner = env.env_inner(); + let tx_ro = env_inner.tx_ro()?; + + let table_block_heights = env_inner.open_db_ro::(&tx_ro)?; + + // Check the main chain first. + match table_block_heights.get(&block_hash) { + Ok(height) => return Ok(BlockchainResponse::FindBlock(Some((Chain::Main, height)))), + Err(RuntimeError::KeyNotFound) => (), + Err(e) => return Err(e), + } + + let table_alt_block_heights = env_inner.open_db_ro::(&tx_ro)?; + + match table_alt_block_heights.get(&block_hash) { + Ok(height) => Ok(BlockchainResponse::FindBlock(Some(( + Chain::Alt(height.chain_id.into()), + height.height, + )))), + Err(RuntimeError::KeyNotFound) => Ok(BlockchainResponse::FindBlock(None)), + Err(e) => Err(e), + } +} + /// [`BlockchainReadRequest::FilterUnknownHashes`]. #[inline] fn filter_unknown_hashes(env: &ConcreteEnv, mut hashes: HashSet) -> ResponseResult { @@ -254,7 +290,37 @@ fn block_extended_header_in_range( get_block_extended_header_from_height(&block_height, tables) }) .collect::, RuntimeError>>()?, - Chain::Alt(_) => todo!("Add alt blocks to DB"), + Chain::Alt(chain_id) => { + let ranges = { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + let alt_chains = tables.alt_chain_infos(); + + get_alt_chain_history_ranges(range, chain_id, alt_chains)? + }; + + ranges + .par_iter() + .rev() + .flat_map(|(chain, range)| { + range.clone().into_par_iter().map(|height| { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + + match *chain { + Chain::Main => get_block_extended_header_from_height(&height, tables), + Chain::Alt(chain_id) => get_alt_block_extended_header_from_height( + &AltBlockHeight { + chain_id: chain_id.into(), + height, + }, + tables, + ), + } + }) + }) + .collect::, _>>()? + } }; Ok(BlockchainResponse::BlockExtendedHeaderInRange(vec)) @@ -339,8 +405,10 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec) -> Respon let tables = thread_local(env); // Cache the amount of RCT outputs once. - // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64` - #[allow(clippy::cast_possible_truncation)] + #[expect( + clippy::cast_possible_truncation, + reason = "INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`" + )] let num_rct_outputs = { let tx_ro = env_inner.tx_ro()?; let tables = env_inner.open_tables(&tx_ro)?; @@ -360,8 +428,10 @@ fn number_outputs_with_amount(env: &ConcreteEnv, amounts: Vec) -> Respon } else { // v1 transactions. match tables.num_outputs().get(&amount) { - // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64` - #[allow(clippy::cast_possible_truncation)] + #[expect( + clippy::cast_possible_truncation, + reason = "INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`" + )] Ok(count) => Ok((amount, count as usize)), // If we get a request for an `amount` that doesn't exist, // we return `0` instead of an error. @@ -489,3 +559,45 @@ fn find_first_unknown(env: &ConcreteEnv, block_ids: &[BlockHash]) -> ResponseRes BlockchainResponse::FindFirstUnknown(Some((idx, last_known_height + 1))) }) } + +/// [`BlockchainReadRequest::AltBlocksInChain`] +fn alt_blocks_in_chain(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult { + // Prepare tx/tables in `ThreadLocal`. + let env_inner = env.env_inner(); + let tx_ro = thread_local(env); + let tables = thread_local(env); + + // Get the history of this alt-chain. + let history = { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + get_alt_chain_history_ranges(0..usize::MAX, chain_id, tables.alt_chain_infos())? + }; + + // Get all the blocks until we join the main-chain. + let blocks = history + .par_iter() + .rev() + .skip(1) + .flat_map(|(chain_id, range)| { + let Chain::Alt(chain_id) = chain_id else { + panic!("Should not have main chain blocks here we skipped last range"); + }; + + range.clone().into_par_iter().map(|height| { + let tx_ro = tx_ro.get_or_try(|| env_inner.tx_ro())?; + let tables = get_tables!(env_inner, tx_ro, tables)?.as_ref(); + + get_alt_block( + &AltBlockHeight { + chain_id: (*chain_id).into(), + height, + }, + tables, + ) + }) + }) + .collect::>()?; + + Ok(BlockchainResponse::AltBlocksInChain(blocks)) +} diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index b68b544..719f361 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -13,13 +13,14 @@ use std::{ }; use pretty_assertions::assert_eq; +use rand::Rng; use tower::{Service, ServiceExt}; use cuprate_database::{ConcreteEnv, DatabaseIter, DatabaseRo, Env, EnvInner, RuntimeError}; use cuprate_test_utils::data::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3}; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest}, - Chain, OutputOnChain, VerifiedBlockInformation, + Chain, ChainId, OutputOnChain, VerifiedBlockInformation, }; use crate::{ @@ -31,7 +32,7 @@ use crate::{ }, service::{init, BlockchainReadHandle, BlockchainWriteHandle}, tables::{OpenTables, Tables, TablesIter}, - tests::AssertTableLen, + tests::{map_verified_block_to_alt, AssertTableLen}, types::{Amount, AmountIndex, PreRctOutputId}, }; @@ -58,7 +59,10 @@ fn init_service() -> ( /// - Receive response(s) /// - Assert proper tables were mutated /// - Assert read requests lead to expected responses -#[allow(clippy::future_not_send)] // INVARIANT: tests are using a single threaded runtime +#[expect( + clippy::future_not_send, + reason = "INVARIANT: tests are using a single threaded runtime" +)] async fn test_template( // Which block(s) to add? blocks: &[&VerifiedBlockInformation], @@ -84,7 +88,7 @@ async fn test_template( let request = BlockchainWriteRequest::WriteBlock(block); let response_channel = writer.call(request); let response = response_channel.await.unwrap(); - assert_eq!(response, BlockchainResponse::WriteBlockOk); + assert_eq!(response, BlockchainResponse::Ok); } //----------------------------------------------------------------------- Reset the transaction @@ -164,8 +168,10 @@ async fn test_template( num_req .iter() .map(|amount| match tables.num_outputs().get(amount) { - // INVARIANT: #[cfg] @ lib.rs asserts `usize == u64` - #[allow(clippy::cast_possible_truncation)] + #[expect( + clippy::cast_possible_truncation, + reason = "INVARIANT: #[cfg] @ lib.rs asserts `usize == u64`" + )] Ok(count) => (*amount, count as usize), Err(RuntimeError::KeyNotFound) => (*amount, 0), Err(e) => panic!("{e:?}"), @@ -235,42 +241,38 @@ async fn test_template( //----------------------------------------------------------------------- Output checks // Create the map of amounts and amount indices. - // - // FIXME: There's definitely a better way to map - // `Vec` -> `HashMap>` let (map, output_count) = { - let mut ids = tables - .outputs_iter() - .keys() - .unwrap() - .map(Result::unwrap) - .collect::>(); - - ids.extend( - tables - .rct_outputs_iter() - .keys() - .unwrap() - .map(Result::unwrap) - .map(|amount_index| PreRctOutputId { - amount: 0, - amount_index, - }), - ); + let mut map = HashMap::>::new(); // Used later to compare the amount of Outputs // returned in the Response is equal to the amount // we asked for. - let output_count = ids.len(); + let mut output_count: usize = 0; - let mut map = HashMap::>::new(); - for id in ids { - map.entry(id.amount) - .and_modify(|set| { - set.insert(id.amount_index); - }) - .or_insert_with(|| HashSet::from([id.amount_index])); - } + tables + .outputs_iter() + .keys() + .unwrap() + .map(Result::unwrap) + .chain( + tables + .rct_outputs_iter() + .keys() + .unwrap() + .map(Result::unwrap) + .map(|amount_index| PreRctOutputId { + amount: 0, + amount_index, + }), + ) + .for_each(|id| { + output_count += 1; + map.entry(id.amount) + .and_modify(|set| { + set.insert(id.amount_index); + }) + .or_insert_with(|| HashSet::from([id.amount_index])); + }); (map, output_count) }; @@ -304,7 +306,10 @@ async fn test_template( // Assert we get back the same map of // `Amount`'s and `AmountIndex`'s. let mut response_output_count = 0; - #[allow(clippy::iter_over_hash_type)] // order doesn't matter in this test + #[expect( + clippy::iter_over_hash_type, + reason = "order doesn't matter in this test" + )] for (amount, output_map) in response { let amount_index_set = &map[&amount]; @@ -338,7 +343,8 @@ async fn v1_tx2() { 14_535_350_982_449, AssertTableLen { block_infos: 1, - block_blobs: 1, + block_header_blobs: 1, + block_txs_hashes: 1, block_heights: 1, key_images: 65, num_outputs: 41, @@ -364,7 +370,8 @@ async fn v9_tx3() { 3_403_774_022_163, AssertTableLen { block_infos: 1, - block_blobs: 1, + block_header_blobs: 1, + block_txs_hashes: 1, block_heights: 1, key_images: 4, num_outputs: 0, @@ -390,7 +397,8 @@ async fn v16_tx0() { 600_000_000_000, AssertTableLen { block_infos: 1, - block_blobs: 1, + block_header_blobs: 1, + block_txs_hashes: 1, block_heights: 1, key_images: 0, num_outputs: 0, @@ -407,3 +415,92 @@ async fn v16_tx0() { ) .await; } + +/// Tests the alt-chain requests and responses. +#[tokio::test] +async fn alt_chain_requests() { + let (reader, mut writer, _, _tempdir) = init_service(); + + // Set up the test by adding blocks to the main-chain. + for (i, mut block) in [BLOCK_V9_TX3.clone(), BLOCK_V16_TX0.clone()] + .into_iter() + .enumerate() + { + block.height = i; + + let request = BlockchainWriteRequest::WriteBlock(block); + writer.call(request).await.unwrap(); + } + + // Generate the alt-blocks. + let mut prev_hash = BLOCK_V9_TX3.block_hash; + let mut chain_id = 1; + let alt_blocks = [&BLOCK_V16_TX0, &BLOCK_V9_TX3, &BLOCK_V1_TX2] + .into_iter() + .enumerate() + .map(|(i, block)| { + let mut block = (**block).clone(); + block.height = i + 1; + block.block.header.previous = prev_hash; + block.block_blob = block.block.serialize(); + + prev_hash = block.block_hash; + // Randomly either keep the [`ChainId`] the same or change it to a new value. + chain_id += rand::thread_rng().gen_range(0..=1); + + map_verified_block_to_alt(block, ChainId(chain_id.try_into().unwrap())) + }) + .collect::>(); + + for block in &alt_blocks { + // Request a block to be written, assert it was written. + let request = BlockchainWriteRequest::WriteAltBlock(block.clone()); + let response_channel = writer.call(request); + let response = response_channel.await.unwrap(); + assert_eq!(response, BlockchainResponse::Ok); + } + + // Get the full alt-chain + let request = BlockchainReadRequest::AltBlocksInChain(ChainId(chain_id.try_into().unwrap())); + let response = reader.clone().oneshot(request).await.unwrap(); + + let BlockchainResponse::AltBlocksInChain(blocks) = response else { + panic!("Wrong response type was returned"); + }; + + assert_eq!(blocks.len(), alt_blocks.len()); + for (got_block, alt_block) in blocks.into_iter().zip(alt_blocks) { + assert_eq!(got_block.block_blob, alt_block.block_blob); + assert_eq!(got_block.block_hash, alt_block.block_hash); + assert_eq!(got_block.chain_id, alt_block.chain_id); + assert_eq!(got_block.txs, alt_block.txs); + } + + // Flush all alt blocks. + let request = BlockchainWriteRequest::FlushAltBlocks; + let response = writer.ready().await.unwrap().call(request).await.unwrap(); + assert_eq!(response, BlockchainResponse::Ok); + + // Pop blocks from the main chain + let request = BlockchainWriteRequest::PopBlocks(1); + let response = writer.ready().await.unwrap().call(request).await.unwrap(); + + let BlockchainResponse::PopBlocks(old_main_chain_id) = response else { + panic!("Wrong response type was returned"); + }; + + // Check we have popped the top block. + let request = BlockchainReadRequest::ChainHeight; + let response = reader.clone().oneshot(request).await.unwrap(); + assert!(matches!(response, BlockchainResponse::ChainHeight(1, _))); + + // Attempt to add the popped block back. + let request = BlockchainWriteRequest::ReverseReorg(old_main_chain_id); + let response = writer.ready().await.unwrap().call(request).await.unwrap(); + assert_eq!(response, BlockchainResponse::Ok); + + // Check we have the popped block back. + let request = BlockchainReadRequest::ChainHeight; + let response = reader.clone().oneshot(request).await.unwrap(); + assert!(matches!(response, BlockchainResponse::ChainHeight(2, _))); +} diff --git a/storage/blockchain/src/service/write.rs b/storage/blockchain/src/service/write.rs index 816afc4..07162d2 100644 --- a/storage/blockchain/src/service/write.rs +++ b/storage/blockchain/src/service/write.rs @@ -1,20 +1,30 @@ //! Database writer thread definitions and logic. - //---------------------------------------------------------------------------------------------------- Import use std::sync::Arc; -use cuprate_database::{ConcreteEnv, Env, EnvInner, RuntimeError, TxRw}; +use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError, TxRw}; use cuprate_database_service::DatabaseWriteHandle; use cuprate_types::{ blockchain::{BlockchainResponse, BlockchainWriteRequest}, - VerifiedBlockInformation, + AltBlockInformation, Chain, ChainId, VerifiedBlockInformation, }; use crate::{ - service::types::{BlockchainWriteHandle, ResponseResult}, - tables::OpenTables, + service::{ + free::map_valid_alt_block_to_verified_block, + types::{BlockchainWriteHandle, ResponseResult}, + }, + tables::{OpenTables, Tables}, + types::AltBlockHeight, }; +/// Write functions within this module abort if the write transaction +/// could not be aborted successfully to maintain atomicity. +/// +/// This is the panic message if the `abort()` fails. +const TX_RW_ABORT_FAIL: &str = + "Could not maintain blockchain database atomicity by aborting write transaction"; + //---------------------------------------------------------------------------------------------------- init_write_service /// Initialize the blockchain write service from a [`ConcreteEnv`]. pub fn init_write_service(env: Arc) -> BlockchainWriteHandle { @@ -29,6 +39,12 @@ fn handle_blockchain_request( ) -> Result { match req { BlockchainWriteRequest::WriteBlock(block) => write_block(env, block), + BlockchainWriteRequest::WriteAltBlock(alt_block) => write_alt_block(env, alt_block), + BlockchainWriteRequest::PopBlocks(numb_blocks) => pop_blocks(env, *numb_blocks), + BlockchainWriteRequest::ReverseReorg(old_main_chain_id) => { + reverse_reorg(env, *old_main_chain_id) + } + BlockchainWriteRequest::FlushAltBlocks => flush_alt_blocks(env), } } @@ -55,13 +71,140 @@ fn write_block(env: &ConcreteEnv, block: &VerifiedBlockInformation) -> ResponseR match result { Ok(()) => { TxRw::commit(tx_rw)?; - Ok(BlockchainResponse::WriteBlockOk) + Ok(BlockchainResponse::Ok) } Err(e) => { - // INVARIANT: ensure database atomicity by aborting - // the transaction on `add_block()` failures. - TxRw::abort(tx_rw) - .expect("could not maintain database atomicity by aborting write transaction"); + TxRw::abort(tx_rw).expect(TX_RW_ABORT_FAIL); + Err(e) + } + } +} + +/// [`BlockchainWriteRequest::WriteAltBlock`]. +#[inline] +fn write_alt_block(env: &ConcreteEnv, block: &AltBlockInformation) -> ResponseResult { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + + let result = { + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + crate::ops::alt_block::add_alt_block(block, &mut tables_mut) + }; + + match result { + Ok(()) => { + TxRw::commit(tx_rw)?; + Ok(BlockchainResponse::Ok) + } + Err(e) => { + TxRw::abort(tx_rw).expect(TX_RW_ABORT_FAIL); + Err(e) + } + } +} + +/// [`BlockchainWriteRequest::PopBlocks`]. +fn pop_blocks(env: &ConcreteEnv, numb_blocks: usize) -> ResponseResult { + let env_inner = env.env_inner(); + let mut tx_rw = env_inner.tx_rw()?; + + // FIXME: turn this function into a try block once stable. + let mut result = || { + // flush all the current alt blocks as they may reference blocks to be popped. + crate::ops::alt_block::flush_alt_blocks(&env_inner, &mut tx_rw)?; + + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + // generate a `ChainId` for the popped blocks. + let old_main_chain_id = ChainId(rand::random()); + + // pop the blocks + for _ in 0..numb_blocks { + crate::ops::block::pop_block(Some(old_main_chain_id), &mut tables_mut)?; + } + + Ok(old_main_chain_id) + }; + + match result() { + Ok(old_main_chain_id) => { + TxRw::commit(tx_rw)?; + Ok(BlockchainResponse::PopBlocks(old_main_chain_id)) + } + Err(e) => { + TxRw::abort(tx_rw).expect(TX_RW_ABORT_FAIL); + Err(e) + } + } +} + +/// [`BlockchainWriteRequest::ReverseReorg`]. +fn reverse_reorg(env: &ConcreteEnv, chain_id: ChainId) -> ResponseResult { + let env_inner = env.env_inner(); + let mut tx_rw = env_inner.tx_rw()?; + + // FIXME: turn this function into a try block once stable. + let mut result = || { + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + + let chain_info = tables_mut.alt_chain_infos().get(&chain_id.into())?; + // Although this doesn't guarantee the chain was popped from the main-chain, it's an easy + // thing for us to check. + assert_eq!(Chain::from(chain_info.parent_chain), Chain::Main); + + let top_block_height = + crate::ops::blockchain::top_block_height(tables_mut.block_heights())?; + + // pop any blocks that were added as part of a re-org. + for _ in chain_info.common_ancestor_height..top_block_height { + crate::ops::block::pop_block(None, &mut tables_mut)?; + } + + // Add the old main chain blocks back to the main chain. + for height in (chain_info.common_ancestor_height + 1)..chain_info.chain_height { + let alt_block = crate::ops::alt_block::get_alt_block( + &AltBlockHeight { + chain_id: chain_id.into(), + height, + }, + &tables_mut, + )?; + let verified_block = map_valid_alt_block_to_verified_block(alt_block); + crate::ops::block::add_block(&verified_block, &mut tables_mut)?; + } + + drop(tables_mut); + crate::ops::alt_block::flush_alt_blocks(&env_inner, &mut tx_rw)?; + + Ok(()) + }; + + match result() { + Ok(()) => { + TxRw::commit(tx_rw)?; + Ok(BlockchainResponse::Ok) + } + Err(e) => { + TxRw::abort(tx_rw).expect(TX_RW_ABORT_FAIL); + Err(e) + } + } +} + +/// [`BlockchainWriteRequest::FlushAltBlocks`]. +#[inline] +fn flush_alt_blocks(env: &ConcreteEnv) -> ResponseResult { + let env_inner = env.env_inner(); + let mut tx_rw = env_inner.tx_rw()?; + + let result = crate::ops::alt_block::flush_alt_blocks(&env_inner, &mut tx_rw); + + match result { + Ok(()) => { + TxRw::commit(tx_rw)?; + Ok(BlockchainResponse::Ok) + } + Err(e) => { + TxRw::abort(tx_rw).expect(TX_RW_ABORT_FAIL); Err(e) } } diff --git a/storage/blockchain/src/tables.rs b/storage/blockchain/src/tables.rs index 122ac31..b9fc5ed 100644 --- a/storage/blockchain/src/tables.rs +++ b/storage/blockchain/src/tables.rs @@ -9,7 +9,7 @@ //! Table structs are `CamelCase`, and their static string //! names used by the actual database backend are `snake_case`. //! -//! For example: [`BlockBlobs`] -> `block_blobs`. +//! For example: [`BlockHeaderBlobs`] -> `block_header_blobs`. //! //! # Traits //! This module also contains a set of traits for @@ -17,9 +17,10 @@ //---------------------------------------------------------------------------------------------------- Import use crate::types::{ - Amount, AmountIndex, AmountIndices, BlockBlob, BlockHash, BlockHeight, BlockInfo, KeyImage, - Output, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, RctOutput, TxBlob, TxHash, - TxId, UnlockTime, + AltBlockHeight, AltChainInfo, AltTransactionInfo, Amount, AmountIndex, AmountIndices, + BlockBlob, BlockHash, BlockHeaderBlob, BlockHeight, BlockInfo, BlockTxHashes, + CompactAltBlockInfo, KeyImage, Output, PreRctOutputId, PrunableBlob, PrunableHash, PrunedBlob, + RawChainId, RctOutput, TxBlob, TxHash, TxId, UnlockTime, }; //---------------------------------------------------------------------------------------------------- Tables @@ -29,22 +30,28 @@ use crate::types::{ // - If adding/changing a table also edit: // - the tests in `src/backend/tests.rs` cuprate_database::define_tables! { - /// Serialized block blobs (bytes). + /// Serialized block header blobs (bytes). /// - /// Contains the serialized version of all blocks. - 0 => BlockBlobs, - BlockHeight => BlockBlob, + /// Contains the serialized version of all blocks headers. + 0 => BlockHeaderBlobs, + BlockHeight => BlockHeaderBlob, + + /// Block transactions hashes + /// + /// Contains all the transaction hashes of all blocks. + 1 => BlockTxsHashes, + BlockHeight => BlockTxHashes, /// Block heights. /// /// Contains the height of all blocks. - 1 => BlockHeights, + 2 => BlockHeights, BlockHash => BlockHeight, /// Block information. /// /// Contains metadata of all blocks. - 2 => BlockInfos, + 3 => BlockInfos, BlockHeight => BlockInfo, /// Set of key images. @@ -53,38 +60,38 @@ cuprate_database::define_tables! { /// /// This table has `()` as the value type, as in, /// it is a set of key images. - 3 => KeyImages, + 4 => KeyImages, KeyImage => (), /// Maps an output's amount to the number of outputs with that amount. /// /// For example, if there are 5 outputs with `amount = 123` /// then calling `get(123)` on this table will return 5. - 4 => NumOutputs, + 5 => NumOutputs, Amount => u64, /// Pre-RCT output data. - 5 => Outputs, + 6 => Outputs, PreRctOutputId => Output, /// Pruned transaction blobs (bytes). /// /// Contains the pruned portion of serialized transaction data. - 6 => PrunedTxBlobs, + 7 => PrunedTxBlobs, TxId => PrunedBlob, /// Prunable transaction blobs (bytes). /// /// Contains the prunable portion of serialized transaction data. // SOMEDAY: impl when `monero-serai` supports pruning - 7 => PrunableTxBlobs, + 8 => PrunableTxBlobs, TxId => PrunableBlob, /// Prunable transaction hashes. /// /// Contains the prunable portion of transaction hashes. // SOMEDAY: impl when `monero-serai` supports pruning - 8 => PrunableHashes, + 9 => PrunableHashes, TxId => PrunableHash, // SOMEDAY: impl a properties table: @@ -94,41 +101,75 @@ cuprate_database::define_tables! { // StorableString => StorableVec, /// RCT output data. - 9 => RctOutputs, + 10 => RctOutputs, AmountIndex => RctOutput, /// Transaction blobs (bytes). /// /// Contains the serialized version of all transactions. // SOMEDAY: remove when `monero-serai` supports pruning - 10 => TxBlobs, + 11 => TxBlobs, TxId => TxBlob, /// Transaction indices. /// /// Contains the indices all transactions. - 11 => TxIds, + 12 => TxIds, TxHash => TxId, /// Transaction heights. /// /// Contains the block height associated with all transactions. - 12 => TxHeights, + 13 => TxHeights, TxId => BlockHeight, /// Transaction outputs. /// /// Contains the list of `AmountIndex`'s of the /// outputs associated with all transactions. - 13 => TxOutputs, + 14 => TxOutputs, TxId => AmountIndices, /// Transaction unlock time. /// /// Contains the unlock time of transactions IF they have one. /// Transactions without unlock times will not exist in this table. - 14 => TxUnlockTime, + 15 => TxUnlockTime, TxId => UnlockTime, + + /// Information on alt-chains. + 16 => AltChainInfos, + RawChainId => AltChainInfo, + + /// Alt-block heights. + /// + /// Contains the height of all alt-blocks. + 17 => AltBlockHeights, + BlockHash => AltBlockHeight, + + /// Alt-block information. + /// + /// Contains information on all alt-blocks. + 18 => AltBlocksInfo, + AltBlockHeight => CompactAltBlockInfo, + + /// Alt-block blobs. + /// + /// Contains the raw bytes of all alt-blocks. + 19 => AltBlockBlobs, + AltBlockHeight => BlockBlob, + + /// Alt-block transaction blobs. + /// + /// Contains the raw bytes of alt transactions, if those transactions are not in the main-chain. + 20 => AltTransactionBlobs, + TxHash => TxBlob, + + /// Alt-block transaction information. + /// + /// Contains information on all alt transactions, even if they are in the main-chain. + 21 => AltTransactionInfos, + TxHash => AltTransactionInfo, } //---------------------------------------------------------------------------------------------------- Tests diff --git a/storage/blockchain/src/tests.rs b/storage/blockchain/src/tests.rs index 65527e1..1fe2063 100644 --- a/storage/blockchain/src/tests.rs +++ b/storage/blockchain/src/tests.rs @@ -9,7 +9,8 @@ use std::{borrow::Cow, fmt::Debug}; use pretty_assertions::assert_eq; -use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner}; +use cuprate_database::{DatabaseRo, Env, EnvInner}; +use cuprate_types::{AltBlockInformation, ChainId, VerifiedBlockInformation}; use crate::{ config::ConfigBuilder, @@ -25,7 +26,8 @@ use crate::{ #[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub(crate) struct AssertTableLen { pub(crate) block_infos: u64, - pub(crate) block_blobs: u64, + pub(crate) block_header_blobs: u64, + pub(crate) block_txs_hashes: u64, pub(crate) block_heights: u64, pub(crate) key_images: u64, pub(crate) num_outputs: u64, @@ -45,7 +47,8 @@ impl AssertTableLen { pub(crate) fn assert(self, tables: &impl Tables) { let other = Self { block_infos: tables.block_infos().len().unwrap(), - block_blobs: tables.block_blobs().len().unwrap(), + block_header_blobs: tables.block_header_blobs().len().unwrap(), + block_txs_hashes: tables.block_txs_hashes().len().unwrap(), block_heights: tables.block_heights().len().unwrap(), key_images: tables.key_images().len().unwrap(), num_outputs: tables.num_outputs().len().unwrap(), @@ -68,8 +71,7 @@ impl AssertTableLen { /// Create an `Env` in a temporarily directory. /// The directory is automatically removed after the `TempDir` is dropped. /// -/// FIXME: changing this to `-> impl Env` causes lifetime errors... -pub(crate) fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) { +pub(crate) fn tmp_concrete_env() -> (impl Env, tempfile::TempDir) { let tempdir = tempfile::tempdir().unwrap(); let config = ConfigBuilder::new() .db_directory(Cow::Owned(tempdir.path().into())) @@ -81,10 +83,28 @@ pub(crate) fn tmp_concrete_env() -> (ConcreteEnv, tempfile::TempDir) { } /// Assert all the tables in the environment are empty. -pub(crate) fn assert_all_tables_are_empty(env: &ConcreteEnv) { +pub(crate) fn assert_all_tables_are_empty(env: &impl Env) { let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro().unwrap(); let tables = env_inner.open_tables(&tx_ro).unwrap(); assert!(tables.all_tables_empty().unwrap()); assert_eq!(crate::ops::tx::get_num_tx(tables.tx_ids()).unwrap(), 0); } + +pub(crate) fn map_verified_block_to_alt( + verified_block: VerifiedBlockInformation, + chain_id: ChainId, +) -> AltBlockInformation { + AltBlockInformation { + block: verified_block.block, + block_blob: verified_block.block_blob, + txs: verified_block.txs, + block_hash: verified_block.block_hash, + pow_hash: verified_block.pow_hash, + height: verified_block.height, + weight: verified_block.weight, + long_term_weight: verified_block.long_term_weight, + cumulative_difficulty: verified_block.cumulative_difficulty, + chain_id, + } +} diff --git a/storage/blockchain/src/types.rs b/storage/blockchain/src/types.rs index eb1dc64..86ef91c 100644 --- a/storage/blockchain/src/types.rs +++ b/storage/blockchain/src/types.rs @@ -41,12 +41,14 @@ #![forbid(unsafe_code)] // if you remove this line i will steal your monero //---------------------------------------------------------------------------------------------------- Import -use bytemuck::{Pod, Zeroable}; +use std::num::NonZero; +use bytemuck::{Pod, Zeroable}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use cuprate_database::{Key, StorableVec}; +use cuprate_types::{Chain, ChainId}; //---------------------------------------------------------------------------------------------------- Aliases // These type aliases exist as many Monero-related types are the exact same. @@ -64,6 +66,12 @@ pub type AmountIndices = StorableVec; /// A serialized block. pub type BlockBlob = StorableVec; +/// A serialized block header +pub type BlockHeaderBlob = StorableVec; + +/// A block transaction hashes +pub type BlockTxHashes = StorableVec<[u8; 32]>; + /// A block's hash. pub type BlockHash = [u8; 32]; @@ -164,6 +172,7 @@ impl Key for PreRctOutputId {} /// block_hash: [54; 32], /// cumulative_rct_outs: 2389, /// long_term_weight: 2389, +/// mining_tx_index: 23 /// }; /// let b = Storable::as_bytes(&a); /// let c: BlockInfo = Storable::from_bytes(b); @@ -173,7 +182,7 @@ impl Key for PreRctOutputId {} /// # Size & Alignment /// ```rust /// # use cuprate_blockchain::types::*; -/// assert_eq!(size_of::(), 88); +/// assert_eq!(size_of::(), 96); /// assert_eq!(align_of::(), 8); /// ``` #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -187,7 +196,7 @@ pub struct BlockInfo { /// The adjusted block size, in bytes. /// /// See [`block_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#blocks-weight). - pub weight: u64, + pub weight: usize, /// Least-significant 64 bits of the 128-bit cumulative difficulty. pub cumulative_difficulty_low: u64, /// Most-significant 64 bits of the 128-bit cumulative difficulty. @@ -199,7 +208,9 @@ pub struct BlockInfo { /// The long term block weight, based on the median weight of the preceding `100_000` blocks. /// /// See [`long_term_weight`](https://monero-book.cuprate.org/consensus_rules/blocks/weights.html#long-term-block-weight). - pub long_term_weight: u64, + pub long_term_weight: usize, + /// [`TxId`] (u64) of the block coinbase transaction. + pub mining_tx_index: TxId, } //---------------------------------------------------------------------------------------------------- OutputFlags @@ -324,6 +335,259 @@ pub struct RctOutput { } // TODO: local_index? +//---------------------------------------------------------------------------------------------------- RawChain +/// [`Chain`] in a format which can be stored in the DB. +/// +/// Implements [`Into`] and [`From`] for [`Chain`]. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// use cuprate_types::Chain; +/// +/// // Assert Storable is correct. +/// let a: RawChain = Chain::Main.into(); +/// let b = Storable::as_bytes(&a); +/// let c: RawChain = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 8); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(transparent)] +pub struct RawChain(u64); + +impl From for RawChain { + fn from(value: Chain) -> Self { + match value { + Chain::Main => Self(0), + Chain::Alt(chain_id) => Self(chain_id.0.get()), + } + } +} + +impl From for Chain { + fn from(value: RawChain) -> Self { + NonZero::new(value.0).map_or(Self::Main, |id| Self::Alt(ChainId(id))) + } +} + +impl From for RawChain { + fn from(value: RawChainId) -> Self { + // A [`ChainID`] with an inner value of `0` is invalid. + assert_ne!(value.0, 0); + + Self(value.0) + } +} + +//---------------------------------------------------------------------------------------------------- RawChainId +/// [`ChainId`] in a format which can be stored in the DB. +/// +/// Implements [`Into`] and [`From`] for [`ChainId`]. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// use cuprate_types::ChainId; +/// +/// // Assert Storable is correct. +/// let a: RawChainId = ChainId(10.try_into().unwrap()).into(); +/// let b = Storable::as_bytes(&a); +/// let c: RawChainId = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 8); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(transparent)] +pub struct RawChainId(u64); + +impl From for RawChainId { + fn from(value: ChainId) -> Self { + Self(value.0.get()) + } +} + +impl From for ChainId { + fn from(value: RawChainId) -> Self { + Self(NonZero::new(value.0).expect("RawChainId cannot have a value of `0`")) + } +} + +impl Key for RawChainId {} + +//---------------------------------------------------------------------------------------------------- AltChainInfo +/// Information on an alternative chain. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// use cuprate_types::Chain; +/// +/// // Assert Storable is correct. +/// let a: AltChainInfo = AltChainInfo { +/// parent_chain: Chain::Main.into(), +/// common_ancestor_height: 0, +/// chain_height: 1, +/// }; +/// let b = Storable::as_bytes(&a); +/// let c: AltChainInfo = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 24); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct AltChainInfo { + /// The chain this alt chain forks from. + pub parent_chain: RawChain, + /// The height of the first block we share with the parent chain. + pub common_ancestor_height: usize, + /// The chain height of the blocks in this alt chain. + pub chain_height: usize, +} + +//---------------------------------------------------------------------------------------------------- AltBlockHeight +/// Represents the height of a block on an alt-chain. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// use cuprate_types::ChainId; +/// +/// // Assert Storable is correct. +/// let a: AltBlockHeight = AltBlockHeight { +/// chain_id: ChainId(1.try_into().unwrap()).into(), +/// height: 1, +/// }; +/// let b = Storable::as_bytes(&a); +/// let c: AltBlockHeight = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 16); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct AltBlockHeight { + /// The [`ChainId`] of the chain this alt block is on, in raw form. + pub chain_id: RawChainId, + /// The height of this alt-block. + pub height: usize, +} + +impl Key for AltBlockHeight {} + +//---------------------------------------------------------------------------------------------------- CompactAltBlockInfo +/// Represents information on an alt-chain. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// +/// // Assert Storable is correct. +/// let a: CompactAltBlockInfo = CompactAltBlockInfo { +/// block_hash: [1; 32], +/// pow_hash: [2; 32], +/// height: 10, +/// weight: 20, +/// long_term_weight: 30, +/// cumulative_difficulty_low: 40, +/// cumulative_difficulty_high: 50, +/// }; +/// +/// let b = Storable::as_bytes(&a); +/// let c: CompactAltBlockInfo = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 104); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct CompactAltBlockInfo { + /// The block's hash. + pub block_hash: [u8; 32], + /// The block's proof-of-work hash. + pub pow_hash: [u8; 32], + /// The block's height. + pub height: usize, + /// The adjusted block size, in bytes. + pub weight: usize, + /// The long term block weight, which is the weight factored in with previous block weights. + pub long_term_weight: usize, + /// The low 64 bits of the cumulative difficulty. + pub cumulative_difficulty_low: u64, + /// The high 64 bits of the cumulative difficulty. + pub cumulative_difficulty_high: u64, +} + +//---------------------------------------------------------------------------------------------------- AltTransactionInfo +/// Represents information on an alt transaction. +/// +/// ```rust +/// # use std::borrow::*; +/// # use cuprate_blockchain::{*, types::*}; +/// use cuprate_database::Storable; +/// +/// // Assert Storable is correct. +/// let a: AltTransactionInfo = AltTransactionInfo { +/// tx_weight: 1, +/// fee: 6, +/// tx_hash: [6; 32], +/// }; +/// +/// let b = Storable::as_bytes(&a); +/// let c: AltTransactionInfo = Storable::from_bytes(b); +/// assert_eq!(a, c); +/// ``` +/// +/// # Size & Alignment +/// ```rust +/// # use cuprate_blockchain::types::*; +/// assert_eq!(size_of::(), 48); +/// assert_eq!(align_of::(), 8); +/// ``` +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] +#[repr(C)] +pub struct AltTransactionInfo { + /// The transaction's weight. + pub tx_weight: usize, + /// The transaction's total fees. + pub fee: u64, + /// The transaction's hash. + pub tx_hash: [u8; 32], +} + //---------------------------------------------------------------------------------------------------- Tests #[cfg(test)] mod test { diff --git a/storage/blockchain/src/unsafe_sendable.rs b/storage/blockchain/src/unsafe_sendable.rs index 9447293..76c7899 100644 --- a/storage/blockchain/src/unsafe_sendable.rs +++ b/storage/blockchain/src/unsafe_sendable.rs @@ -26,7 +26,7 @@ use bytemuck::TransparentWrapper; /// Notably, `heed`'s table type uses this inside `service`. pub(crate) struct UnsafeSendable(T); -#[allow(clippy::non_send_fields_in_send_ty)] +#[expect(clippy::non_send_fields_in_send_ty)] // SAFETY: Users ensure that their usage of this type is safe. unsafe impl Send for UnsafeSendable {} @@ -41,7 +41,7 @@ impl UnsafeSendable { } /// Extract the inner `T`. - #[allow(dead_code)] + #[expect(dead_code)] pub(crate) fn into_inner(self) -> T { self.0 } diff --git a/storage/database/src/backend/heed/env.rs b/storage/database/src/backend/heed/env.rs index 8c71e61..568379e 100644 --- a/storage/database/src/backend/heed/env.rs +++ b/storage/database/src/backend/heed/env.rs @@ -144,7 +144,7 @@ impl Env for ConcreteEnv { // (current disk size) + (a bit of leeway) // to account for empty databases where we // need to write same tables. - #[allow(clippy::cast_possible_truncation)] // only 64-bit targets + #[expect(clippy::cast_possible_truncation, reason = "only 64-bit targets")] let disk_size_bytes = match std::fs::File::open(&config.db_file) { Ok(file) => file.metadata()?.len() as usize, // The database file doesn't exist, 0 bytes. diff --git a/storage/database/src/backend/heed/error.rs b/storage/database/src/backend/heed/error.rs index bbaeaf0..fdeab70 100644 --- a/storage/database/src/backend/heed/error.rs +++ b/storage/database/src/backend/heed/error.rs @@ -57,7 +57,10 @@ impl From for crate::InitError { } //---------------------------------------------------------------------------------------------------- RuntimeError -#[allow(clippy::fallible_impl_from)] // We need to panic sometimes. +#[expect( + clippy::fallible_impl_from, + reason = "We need to panic sometimes for safety" +)] impl From for crate::RuntimeError { /// # Panics /// This will panic on unrecoverable errors for safety. diff --git a/storage/database/src/backend/tests.rs b/storage/database/src/backend/tests.rs index e219c42..0c0fe05 100644 --- a/storage/database/src/backend/tests.rs +++ b/storage/database/src/backend/tests.rs @@ -194,7 +194,7 @@ fn db_read_write() { // Insert keys. let mut key = KEY; - #[allow(clippy::explicit_counter_loop)] // we need the +1 side effect + #[expect(clippy::explicit_counter_loop, reason = "we need the +1 side effect")] for _ in 0..N { table.put(&key, &VALUE).unwrap(); key += 1; @@ -269,7 +269,7 @@ fn db_read_write() { assert_ne!(table.get(&KEY).unwrap(), NEW_VALUE); - #[allow(unused_assignments)] + #[expect(unused_assignments)] table .update(&KEY, |mut value| { value = NEW_VALUE; diff --git a/storage/database/src/config/mod.rs b/storage/database/src/config/mod.rs index c6ed0c0..7d65233 100644 --- a/storage/database/src/config/mod.rs +++ b/storage/database/src/config/mod.rs @@ -33,7 +33,7 @@ //! # Ok(()) } //! ``` -#[allow(clippy::module_inception)] +#[expect(clippy::module_inception)] mod config; pub use config::{Config, ConfigBuilder, READER_THREADS_DEFAULT}; diff --git a/storage/database/src/database.rs b/storage/database/src/database.rs index 4a45f7c..6fbb7aa 100644 --- a/storage/database/src/database.rs +++ b/storage/database/src/database.rs @@ -54,7 +54,7 @@ pub trait DatabaseIter { /// Get an [`Iterator`] that returns the `(key, value)` types for this database. #[doc = doc_iter!()] - #[allow(clippy::iter_not_returning_iterator)] + #[expect(clippy::iter_not_returning_iterator)] fn iter( &self, ) -> Result> + '_, RuntimeError>; diff --git a/storage/database/src/env.rs b/storage/database/src/env.rs index 8294443..1ae6aa1 100644 --- a/storage/database/src/env.rs +++ b/storage/database/src/env.rs @@ -122,7 +122,7 @@ pub trait Env: Sized { /// This function _must_ be re-implemented if [`Env::MANUAL_RESIZE`] is `true`. /// /// Otherwise, this function will panic with `unreachable!()`. - #[allow(unused_variables)] + #[expect(unused_variables)] fn resize_map(&self, resize_algorithm: Option) -> NonZeroUsize { unreachable!() } diff --git a/storage/database/src/resize.rs b/storage/database/src/resize.rs index 6ef9974..b217478 100644 --- a/storage/database/src/resize.rs +++ b/storage/database/src/resize.rs @@ -261,7 +261,7 @@ pub fn percent(current_size_bytes: usize, percent: f32) -> NonZeroUsize { let page_size = *PAGE_SIZE; // INVARIANT: Allow `f32` <-> `usize` casting, we handle all cases. - #[allow( + #[expect( clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss diff --git a/storage/service/src/reader_threads.rs b/storage/service/src/reader_threads.rs index 72f619a..a182e48 100644 --- a/storage/service/src/reader_threads.rs +++ b/storage/service/src/reader_threads.rs @@ -153,7 +153,7 @@ impl ReaderThreads { }, // We handle the casting loss. - #[allow( + #[expect( clippy::cast_precision_loss, clippy::cast_possible_truncation, clippy::cast_sign_loss diff --git a/storage/txpool/src/service/interface.rs b/storage/txpool/src/service/interface.rs index 93235c0..450b28d 100644 --- a/storage/txpool/src/service/interface.rs +++ b/storage/txpool/src/service/interface.rs @@ -18,7 +18,7 @@ pub enum TxpoolReadRequest { //---------------------------------------------------------------------------------------------------- TxpoolReadResponse /// The transaction pool [`tower::Service`] read response type. -#[allow(clippy::large_enum_variant)] +#[expect(clippy::large_enum_variant)] pub enum TxpoolReadResponse { /// A response containing the raw bytes of a transaction. // TODO: use bytes::Bytes. diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs index 5654164..f006813 100644 --- a/storage/txpool/src/service/read.rs +++ b/storage/txpool/src/service/read.rs @@ -50,7 +50,7 @@ fn init_read_service_with_pool(env: Arc, pool: Arc) -> /// 1. `Request` is mapped to a handler function /// 2. Handler function is called /// 3. [`TxpoolReadResponse`] is returned -#[allow(clippy::needless_pass_by_value)] +#[expect(clippy::needless_pass_by_value)] fn map_request( env: &ConcreteEnv, // Access to the database request: TxpoolReadRequest, // The request we must fulfill diff --git a/storage/txpool/src/types.rs b/storage/txpool/src/types.rs index 09b0ce0..4da2d0f 100644 --- a/storage/txpool/src/types.rs +++ b/storage/txpool/src/types.rs @@ -39,7 +39,7 @@ pub struct TransactionInfo { pub weight: usize, /// [`TxStateFlags`] of this transaction. pub flags: TxStateFlags, - #[allow(clippy::pub_underscore_fields)] + #[expect(clippy::pub_underscore_fields)] /// Explicit padding so that we have no implicit padding bytes in `repr(C)`. /// /// Allows potential future expansion of this type. @@ -92,7 +92,7 @@ impl From for CachedVerificationState { } } -#[allow(clippy::fallible_impl_from)] // only panics in invalid states +#[expect(clippy::fallible_impl_from, reason = "only panics in invalid states")] impl From for RawCachedVerificationState { fn from(value: CachedVerificationState) -> Self { match value { diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index a96a9cf..abf7ee4 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Boog900", "hinto-janai"] [dependencies] cuprate-types = { path = "../types" } -cuprate-helper = { path = "../helper", features = ["map"] } +cuprate-helper = { path = "../helper", features = ["map", "tx"] } cuprate-wire = { path = "../net/wire" } cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] } @@ -22,11 +22,13 @@ tokio = { workspace = true, features = ["full"] } tokio-util = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -bytes = { workspace = true, features = ["std"] } tempfile = { workspace = true } paste = { workspace = true } borsh = { workspace = true, features = ["derive"]} [dev-dependencies] hex = { workspace = true } -pretty_assertions = { workspace = true } \ No newline at end of file +pretty_assertions = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/test-utils/src/data/mod.rs b/test-utils/src/data/mod.rs index b9d42fb..3be409f 100644 --- a/test-utils/src/data/mod.rs +++ b/test-utils/src/data/mod.rs @@ -25,13 +25,11 @@ //! let tx: VerifiedTransactionInformation = TX_V1_SIG0.clone(); //! ``` -mod constants; pub use constants::{ BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_BBD604, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73, TX_B6B439, TX_D7FEBD, TX_E2D393, TX_E57440, }; +pub use statics::{BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3, TX_V1_SIG0, TX_V1_SIG2, TX_V2_RCT3}; +mod constants; mod statics; -pub use statics::{ - tx_fee, BLOCK_V16_TX0, BLOCK_V1_TX2, BLOCK_V9_TX3, TX_V1_SIG0, TX_V1_SIG2, TX_V2_RCT3, -}; diff --git a/test-utils/src/data/statics.rs b/test-utils/src/data/statics.rs index 8b98171..c67c7eb 100644 --- a/test-utils/src/data/statics.rs +++ b/test-utils/src/data/statics.rs @@ -8,12 +8,12 @@ //---------------------------------------------------------------------------------------------------- Import use std::sync::LazyLock; -use cuprate_helper::map::combine_low_high_bits_to_u128; -use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; use hex_literal::hex; -use monero_serai::transaction::Input; use monero_serai::{block::Block, transaction::Transaction}; +use cuprate_helper::{map::combine_low_high_bits_to_u128, tx::tx_fee}; +use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; + use crate::data::constants::{ BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73, TX_B6B439, TX_D7FEBD, TX_E2D393, TX_E57440, @@ -110,36 +110,6 @@ fn to_tx_verification_data(tx_blob: impl AsRef<[u8]>) -> VerifiedTransactionInfo } } -/// Calculates the fee of the [`Transaction`]. -/// -/// # Panics -/// This will panic if the inputs overflow or the transaction outputs too much. -pub fn tx_fee(tx: &Transaction) -> u64 { - let mut fee = 0_u64; - - match &tx { - Transaction::V1 { prefix, .. } => { - for input in &prefix.inputs { - match input { - Input::Gen(_) => return 0, - Input::ToKey { amount, .. } => { - fee = fee.checked_add(amount.unwrap_or(0)).unwrap(); - } - } - } - - for output in &prefix.outputs { - fee.checked_sub(output.amount.unwrap_or(0)).unwrap(); - } - } - Transaction::V2 { proofs, .. } => { - fee = proofs.as_ref().unwrap().base.fee; - } - }; - - fee -} - //---------------------------------------------------------------------------------------------------- Blocks /// Generate a `static LazyLock`. /// @@ -148,8 +118,8 @@ pub fn tx_fee(tx: &Transaction) -> u64 { /// /// This requires some static block/tx input (from data) and some fields. /// This data can be accessed more easily via: -/// - A block explorer (https://xmrchain.net) -/// - Monero RPC (see cuprate_test_utils::rpc for this) +/// - A block explorer () +/// - Monero RPC (see `cuprate_test_utils::rpc` for this) /// /// See below for actual usage. macro_rules! verified_block_information { @@ -311,12 +281,12 @@ transaction_verification_data! { //---------------------------------------------------------------------------------------------------- TESTS #[cfg(test)] mod tests { - use super::*; - use pretty_assertions::assert_eq; use crate::rpc::client::HttpRpcClient; + use super::*; + /// Assert the defined blocks are the same compared to ones received from a local RPC call. #[ignore] // FIXME: doesn't work in CI, we need a real unrestricted node #[tokio::test] diff --git a/test-utils/src/monerod.rs b/test-utils/src/monerod.rs index 9ffa08d..abad4c9 100644 --- a/test-utils/src/monerod.rs +++ b/test-utils/src/monerod.rs @@ -178,6 +178,7 @@ impl Drop for SpawnedMoneroD { println!("------END-MONEROD-LOGS------"); } + #[expect(clippy::manual_assert, reason = "`if` is more clear")] if error && !panicking() { // `println` only outputs in a test when panicking so if there is an error while // dropping monerod but not an error in the test then we need to panic to make sure diff --git a/test-utils/src/rpc/client.rs b/test-utils/src/rpc/client.rs index fbe6fb9..ce7fb09 100644 --- a/test-utils/src/rpc/client.rs +++ b/test-utils/src/rpc/client.rs @@ -1,18 +1,16 @@ //! HTTP RPC client. //---------------------------------------------------------------------------------------------------- Use +use monero_rpc::Rpc; +use monero_serai::block::Block; +use monero_simple_request_rpc::SimpleRequestRpc; use serde::Deserialize; use serde_json::json; use tokio::task::spawn_blocking; -use monero_rpc::Rpc; -use monero_serai::block::Block; -use monero_simple_request_rpc::SimpleRequestRpc; - +use cuprate_helper::tx::tx_fee; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; -use crate::data::tx_fee; - //---------------------------------------------------------------------------------------------------- Constants /// The default URL used for Monero RPC connections. pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081"; @@ -47,13 +45,13 @@ impl HttpRpcClient { } /// The address used for this [`HttpRpcClient`]. - #[allow(dead_code)] + #[allow(clippy::allow_attributes, dead_code, reason = "expect doesn't work")] const fn address(&self) -> &String { &self.address } /// Access to the inner RPC client for other usage. - #[allow(dead_code)] + #[expect(dead_code)] const fn rpc(&self) -> &SimpleRequestRpc { &self.rpc } @@ -184,9 +182,10 @@ impl HttpRpcClient { //---------------------------------------------------------------------------------------------------- TESTS #[cfg(test)] mod tests { - use super::*; use hex_literal::hex; + use super::*; + /// Assert the default address is localhost. #[tokio::test] async fn localhost() { @@ -197,7 +196,7 @@ mod tests { #[ignore] // FIXME: doesn't work in CI, we need a real unrestricted node #[tokio::test] async fn get() { - #[allow(clippy::too_many_arguments)] + #[expect(clippy::too_many_arguments)] async fn assert_eq( rpc: &HttpRpcClient, height: usize, diff --git a/test-utils/src/rpc/data/macros.rs b/test-utils/src/rpc/data/macros.rs index 632917a..63a214c 100644 --- a/test-utils/src/rpc/data/macros.rs +++ b/test-utils/src/rpc/data/macros.rs @@ -156,13 +156,5 @@ macro_rules! define_request_and_response_doc_test { "```\n", ) }; - - // No doc test. - ( - $name:ident, - $test:ident, - ) => { - "" - }; } pub(super) use define_request_and_response_doc_test; diff --git a/test-utils/src/rpc/data/other.rs b/test-utils/src/rpc/data/other.rs index 80a48ab..9af6d8b 100644 --- a/test-utils/src/rpc/data/other.rs +++ b/test-utils/src/rpc/data/other.rs @@ -8,8 +8,7 @@ define_request_and_response! { // `(other)` adds a JSON sanity-check test. get_height (other), GET_HEIGHT: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "hash": "68bb1a1cff8e2a44c3221e8e1aff80bc6ca45d06fa8eff4d2a3a7ac31d4efe3f", @@ -53,8 +52,7 @@ r#"{ define_request_and_response! { get_alt_blocks_hashes (other), GET_ALT_BLOCKS_HASHES: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "blks_hashes": ["8ee10db35b1baf943f201b303890a29e7d45437bd76c2bd4df0d2f2ee34be109"], @@ -134,8 +132,7 @@ r#"{ define_request_and_response! { stop_mining (other), STOP_MINING: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "status": "OK", @@ -146,8 +143,7 @@ r#"{ define_request_and_response! { mining_status (other), MINING_STATUS: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "active": false, @@ -173,8 +169,7 @@ r#"{ define_request_and_response! { save_bc (other), SAVE_BC: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "status": "OK", @@ -185,8 +180,7 @@ r#"{ define_request_and_response! { get_peer_list (other), GET_PEER_LIST: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "gray_list": [{ @@ -291,8 +285,7 @@ r#"{ define_request_and_response! { get_transaction_pool (other), GET_TRANSACTION_POOL: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "credits": 0, @@ -598,8 +591,7 @@ r#"{ define_request_and_response! { get_transaction_pool_stats (other), GET_TRANSACTION_POOL_STATS: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "credits": 0, @@ -657,8 +649,7 @@ r#"{ define_request_and_response! { stop_daemon (other), STOP_DAEMON: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "status": "OK" @@ -668,8 +659,7 @@ r#"{ define_request_and_response! { get_limit (other), GET_LIMIT: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "limit_down": 1280000, @@ -713,8 +703,7 @@ r#"{ define_request_and_response! { get_net_stats (other), GET_NET_STATS: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "start_time": 1721251858, @@ -801,8 +790,7 @@ r#"{ define_request_and_response! { UNDOCUMENTED_ENDPOINT (other), GET_TRANSACTION_POOL_HASHES: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "credits": 0, @@ -835,8 +823,7 @@ r#"{ define_request_and_response! { UNDOCUMENTED_ENDPOINT (other), GET_PUBLIC_NODES: &str, - Request = -r#"{}"#; + Request = "{}"; Response = r#"{ "status": "OK", diff --git a/test-utils/src/test_netzone.rs b/test-utils/src/test_netzone.rs index f1f7582..791533c 100644 --- a/test-utils/src/test_netzone.rs +++ b/test-utils/src/test_netzone.rs @@ -86,9 +86,8 @@ impl, MoneroWireCodec>; type Listener = Pin< Box< - dyn Stream< - Item = Result<(Option, Self::Stream, Self::Sink), std::io::Error>, - > + Send + dyn Stream, Self::Stream, Self::Sink), Error>> + + Send + 'static, >, >; diff --git a/types/src/block_complete_entry.rs b/types/src/block_complete_entry.rs index ba5fc2b..77ed82d 100644 --- a/types/src/block_complete_entry.rs +++ b/types/src/block_complete_entry.rs @@ -1,7 +1,6 @@ //! Contains [`BlockCompleteEntry`] and the related types. //---------------------------------------------------------------------------------------------------- Import -#[cfg(feature = "epee")] use bytes::Bytes; #[cfg(feature = "serde")] diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index b502c3f..f2b96db 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -2,14 +2,16 @@ //! //! Tests that assert particular requests lead to particular //! responses are also tested in Cuprate's blockchain database crate. - //---------------------------------------------------------------------------------------------------- Import use std::{ collections::{HashMap, HashSet}, ops::Range, }; -use crate::types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}; +use crate::{ + types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}, + AltBlockInformation, ChainId, +}; //---------------------------------------------------------------------------------------------------- ReadRequest /// A read request to the blockchain database. @@ -92,26 +94,49 @@ pub enum BlockchainReadRequest { CompactChainHistory, /// A request to find the first unknown block ID in a list of block IDs. - //// + /// /// # Invariant /// The [`Vec`] containing the block IDs must be sorted in chronological block /// order, or else the returned response is unspecified and meaningless, /// as this request performs a binary search. FindFirstUnknown(Vec<[u8; 32]>), + + /// A request for all alt blocks in the chain with the given [`ChainId`]. + AltBlocksInChain(ChainId), } //---------------------------------------------------------------------------------------------------- WriteRequest /// A write request to the blockchain database. -/// -/// There is currently only 1 write request to the database, -/// as such, the only valid [`BlockchainResponse`] to this request is -/// the proper response for a [`BlockchainResponse::WriteBlockOk`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum BlockchainWriteRequest { /// Request that a block be written to the database. /// /// Input is an already verified block. WriteBlock(VerifiedBlockInformation), + + /// Write an alternative block to the database, + /// + /// Input is the alternative block. + WriteAltBlock(AltBlockInformation), + + /// A request to pop some blocks from the top of the main chain + /// + /// Input is the amount of blocks to pop. + /// + /// This request flushes all alt-chains from the cache before adding the popped blocks to the + /// alt cache. + PopBlocks(usize), + + /// A request to reverse the re-org process. + /// + /// The inner value is the [`ChainId`] of the old main chain. + /// + /// # Invariant + /// It is invalid to call this with a [`ChainId`] that was not returned from [`BlockchainWriteRequest::PopBlocks`]. + ReverseReorg(ChainId), + + /// A request to flush all alternative blocks. + FlushAltBlocks, } //---------------------------------------------------------------------------------------------------- Response @@ -197,12 +222,24 @@ pub enum BlockchainResponse { /// This will be [`None`] if all blocks were known. FindFirstUnknown(Option<(usize, usize)>), - //------------------------------------------------------ Writes - /// Response to [`BlockchainWriteRequest::WriteBlock`]. + /// The response for [`BlockchainReadRequest::AltBlocksInChain`]. /// - /// This response indicates that the requested block has - /// successfully been written to the database without error. - WriteBlockOk, + /// Contains all the alt blocks in the alt-chain in chronological order. + AltBlocksInChain(Vec), + + //------------------------------------------------------ Writes + /// A generic Ok response to indicate a request was successfully handled. + /// + /// currently the response for: + /// - [`BlockchainWriteRequest::WriteBlock`] + /// - [`BlockchainWriteRequest::WriteAltBlock`] + /// - [`BlockchainWriteRequest::ReverseReorg`] + /// - [`BlockchainWriteRequest::FlushAltBlocks`] + Ok, + /// The response for [`BlockchainWriteRequest::PopBlocks`]. + /// + /// The inner value is the alt-chain ID for the old main chain blocks. + PopBlocks(ChainId), } //---------------------------------------------------------------------------------------------------- Tests diff --git a/types/src/hard_fork.rs b/types/src/hard_fork.rs index 412448e..8b2cd78 100644 --- a/types/src/hard_fork.rs +++ b/types/src/hard_fork.rs @@ -27,7 +27,6 @@ pub enum HardForkError { } /// An identifier for every hard-fork Monero has had. -#[allow(missing_docs)] #[derive(Default, Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Hash)] #[cfg_attr(any(feature = "proptest"), derive(proptest_derive::Arbitrary))] #[repr(u8)] diff --git a/types/src/types.rs b/types/src/types.rs index 4b6e2e1..a60ce6c 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -1,6 +1,8 @@ //! Various shared data types in Cuprate. //---------------------------------------------------------------------------------------------------- Import +use std::num::NonZero; + use curve25519_dalek::edwards::EdwardsPoint; use monero_serai::{ block::Block, @@ -38,8 +40,7 @@ pub struct ExtendedBlockHeader { //---------------------------------------------------------------------------------------------------- VerifiedTransactionInformation /// Verified information of a transaction. /// -/// - If this is in a [`VerifiedBlockInformation`] this represents a valid transaction -/// - If this is in an [`AltBlockInformation`] this represents a potentially valid transaction +/// This represents a valid transaction #[derive(Clone, Debug, PartialEq, Eq)] pub struct VerifiedTransactionInformation { /// The transaction itself. @@ -79,6 +80,7 @@ pub struct VerifiedBlockInformation { /// [`Block::hash`]. pub block_hash: [u8; 32], /// The block's proof-of-work hash. + // TODO: make this an option. pub pow_hash: [u8; 32], /// The block's height. pub height: usize, @@ -97,7 +99,7 @@ pub struct VerifiedBlockInformation { /// /// The inner value is meaningless. #[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] -pub struct ChainId(pub u64); +pub struct ChainId(pub NonZero); //---------------------------------------------------------------------------------------------------- Chain /// An identifier for a chain.