diff --git a/Cargo.lock b/Cargo.lock index 054ca85..72325bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -552,6 +552,7 @@ dependencies = [ name = "cuprate-consensus" version = "0.1.0" dependencies = [ + "cfg-if", "cuprate-consensus-rules", "cuprate-helper", "cuprate-test-utils", @@ -579,6 +580,7 @@ dependencies = [ name = "cuprate-consensus-rules" version = "0.1.0" dependencies = [ + "cfg-if", "crypto-bigint", "cuprate-cryptonight", "cuprate-helper", @@ -670,15 +672,14 @@ dependencies = [ "cuprate-blockchain", "cuprate-consensus", "cuprate-consensus-rules", + "cuprate-helper", "cuprate-types", "hex", "hex-literal", "monero-serai", - "rayon", "sha3", "thiserror", "tokio", - "tokio-test", "tower", ] diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index bd3994a..12d97ee 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -12,6 +12,7 @@ cuprate-helper = { path = "../helper", default-features = false, features = ["st cuprate-consensus-rules = { path = "./rules", features = ["rayon"] } cuprate-types = { path = "../types" } +cfg-if = { workspace = true } thiserror = { workspace = true } tower = { workspace = true, features = ["util"] } tracing = { workspace = true, features = ["std", "attributes"] } @@ -19,7 +20,6 @@ futures = { workspace = true, features = ["std", "async-await"] } randomx-rs = { workspace = true } monero-serai = { workspace = true, features = ["std"] } -curve25519-dalek = { workspace = true } rayon = { workspace = true } thread_local = { workspace = true } @@ -34,8 +34,12 @@ cuprate-test-utils = { path = "../test-utils" } cuprate-consensus-rules = {path = "./rules", features = ["proptest"]} hex-literal = { workspace = true } +curve25519-dalek = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"]} tokio-test = { workspace = true } proptest = { workspace = true } -proptest-derive = { workspace = true } \ No newline at end of file +proptest-derive = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/consensus/fast-sync/Cargo.toml b/consensus/fast-sync/Cargo.toml index 32fce11..1d7d97b 100644 --- a/consensus/fast-sync/Cargo.toml +++ b/consensus/fast-sync/Cargo.toml @@ -9,19 +9,22 @@ name = "cuprate-fast-sync-create-hashes" path = "src/create.rs" [dependencies] -clap = { workspace = true, features = ["derive", "std"] } -cuprate-blockchain = { path = "../../storage/blockchain" } -cuprate-consensus = { path = ".." } +cuprate-blockchain = { path = "../../storage/blockchain" } +cuprate-consensus = { path = ".." } cuprate-consensus-rules = { path = "../rules" } -cuprate-types = { path = "../../types" } -hex.workspace = true -hex-literal.workspace = true -monero-serai.workspace = true -rayon.workspace = true -sha3 = "0.10.8" -thiserror.workspace = true -tokio = { workspace = true, features = ["full"] } -tower.workspace = true +cuprate-types = { path = "../../types" } +cuprate-helper = { path = "../../helper", features = ["cast"] } + +clap = { workspace = true, features = ["derive", "std"] } +hex = { workspace = true } +hex-literal = { workspace = true } +monero-serai = { workspace = true } +sha3 = { version = "0.10.8" } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } +tower = { workspace = true } [dev-dependencies] -tokio-test = "0.4.4" + +[lints] +workspace = true \ No newline at end of file diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index 0d6d03f..8c47b8e 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -1,3 +1,8 @@ +#![expect( + unused_crate_dependencies, + reason = "binary shares same Cargo.toml as library" +)] + use std::{fmt::Write, fs::write}; use clap::Parser; @@ -70,15 +75,12 @@ async fn main() { let mut height = 0_usize; while height < height_target { - match read_batch(&mut read_handle, height).await { - Ok(block_ids) => { - let hash = hash_of_hashes(block_ids.as_slice()); - hashes_of_hashes.push(hash); - } - Err(_) => { - println!("Failed to read next batch from database"); - break; - } + if let Ok(block_ids) = read_batch(&mut read_handle, height).await { + let hash = hash_of_hashes(block_ids.as_slice()); + hashes_of_hashes.push(hash); + } else { + println!("Failed to read next batch from database"); + break; } height += BATCH_SIZE; } @@ -88,5 +90,5 @@ async fn main() { let generated = generate_hex(&hashes_of_hashes); write("src/data/hashes_of_hashes", generated).expect("Could not write file"); - println!("Generated hashes up to block height {}", height); + println!("Generated hashes up to block height {height}"); } diff --git a/consensus/fast-sync/src/data/hashes_of_hashes b/consensus/fast-sync/src/data/hashes_of_hashes index 74fec4c..2e5e99a 100644 --- a/consensus/fast-sync/src/data/hashes_of_hashes +++ b/consensus/fast-sync/src/data/hashes_of_hashes @@ -1,12 +1,12 @@ [ - hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"), - hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"), - hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"), - hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"), - hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"), - hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"), - hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"), - hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"), - hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"), - hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"), + hex_literal::hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"), + hex_literal::hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"), + hex_literal::hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"), + hex_literal::hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"), + hex_literal::hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"), + hex_literal::hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"), + hex_literal::hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"), + hex_literal::hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"), + hex_literal::hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"), + hex_literal::hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"), ] diff --git a/consensus/fast-sync/src/fast_sync.rs b/consensus/fast-sync/src/fast_sync.rs index 35fa674..b4fc12b 100644 --- a/consensus/fast-sync/src/fast_sync.rs +++ b/consensus/fast-sync/src/fast_sync.rs @@ -6,8 +6,6 @@ use std::{ task::{Context, Poll}, }; -#[allow(unused_imports)] -use hex_literal::hex; use monero_serai::{ block::Block, transaction::{Input, Transaction}, @@ -19,6 +17,7 @@ use cuprate_consensus::{ transactions::new_tx_verification_data, }; use cuprate_consensus_rules::{miner_tx::MinerTxError, ConsensusError}; +use cuprate_helper::cast::u64_to_usize; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; use crate::{hash_of_hashes, BlockId, HashOfHashes}; @@ -31,9 +30,9 @@ const BATCH_SIZE: usize = 512; #[cfg(test)] static HASHES_OF_HASHES: &[HashOfHashes] = &[ - hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"), - hex!("0102030405060708010203040506070801020304050607080102030405060708"), - hex!("0102030405060708010203040506070801020304050607080102030405060708"), + hex_literal::hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"), + hex_literal::hex!("0102030405060708010203040506070801020304050607080102030405060708"), + hex_literal::hex!("0102030405060708010203040506070801020304050607080102030405060708"), ]; #[cfg(test)] @@ -44,14 +43,14 @@ fn max_height() -> u64 { (HASHES_OF_HASHES.len() * BATCH_SIZE) as u64 } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub struct ValidBlockId(BlockId); fn valid_block_ids(block_ids: &[BlockId]) -> Vec { block_ids.iter().map(|b| ValidBlockId(*b)).collect() } -#[allow(clippy::large_enum_variant)] +#[expect(clippy::large_enum_variant)] pub enum FastSyncRequest { ValidateHashes { start_height: u64, @@ -64,8 +63,8 @@ pub enum FastSyncRequest { }, } -#[allow(clippy::large_enum_variant)] -#[derive(Debug, PartialEq)] +#[expect(clippy::large_enum_variant)] +#[derive(Debug, PartialEq, Eq)] pub enum FastSyncResponse { ValidateHashes { validated_hashes: Vec, @@ -74,7 +73,7 @@ pub enum FastSyncResponse { ValidateBlock(VerifiedBlockInformation), } -#[derive(thiserror::Error, Debug, PartialEq)] +#[derive(thiserror::Error, Debug, PartialEq, Eq)] pub enum FastSyncError { #[error("Block does not match its expected hash")] BlockHashMismatch, @@ -127,9 +126,9 @@ where + Send + 'static, { - #[allow(dead_code)] - pub(crate) fn new(context_svc: C) -> FastSyncService { - FastSyncService { context_svc } + #[expect(dead_code)] + pub(crate) const fn new(context_svc: C) -> Self { + Self { context_svc } } } @@ -161,7 +160,7 @@ where FastSyncRequest::ValidateHashes { start_height, block_ids, - } => validate_hashes(start_height, &block_ids).await, + } => validate_hashes(start_height, &block_ids), FastSyncRequest::ValidateBlock { block, txs, token } => { validate_block(context_svc, block, txs, token).await } @@ -170,11 +169,13 @@ where } } -async fn validate_hashes( +fn validate_hashes( start_height: u64, block_ids: &[BlockId], ) -> Result { - if start_height as usize % BATCH_SIZE != 0 { + let start_height_usize = u64_to_usize(start_height); + + if start_height_usize % BATCH_SIZE != 0 { return Err(FastSyncError::InvalidStartHeight); } @@ -182,9 +183,9 @@ async fn validate_hashes( return Err(FastSyncError::OutOfRange); } - let stop_height = start_height as usize + block_ids.len(); + let stop_height = start_height_usize + block_ids.len(); - let batch_from = start_height as usize / BATCH_SIZE; + let batch_from = start_height_usize / BATCH_SIZE; let batch_to = cmp::min(stop_height / BATCH_SIZE, HASHES_OF_HASHES.len()); let n_batches = batch_to - batch_from; @@ -285,7 +286,7 @@ where block_blob, txs: verified_txs, block_hash, - pow_hash: [0u8; 32], + pow_hash: [0_u8; 32], height: *height, generated_coins, weight, @@ -299,46 +300,36 @@ where #[cfg(test)] mod tests { use super::*; - use tokio_test::block_on; #[test] fn test_validate_hashes_errors() { - let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]]; + let ids = [[1_u8; 32], [2_u8; 32], [3_u8; 32], [4_u8; 32], [5_u8; 32]]; assert_eq!( - block_on(validate_hashes(3, &[])), + validate_hashes(3, &[]), Err(FastSyncError::InvalidStartHeight) ); assert_eq!( - block_on(validate_hashes(3, &ids)), + validate_hashes(3, &ids), Err(FastSyncError::InvalidStartHeight) ); - assert_eq!( - block_on(validate_hashes(20, &[])), - Err(FastSyncError::OutOfRange) - ); - assert_eq!( - block_on(validate_hashes(20, &ids)), - Err(FastSyncError::OutOfRange) - ); + assert_eq!(validate_hashes(20, &[]), Err(FastSyncError::OutOfRange)); + assert_eq!(validate_hashes(20, &ids), Err(FastSyncError::OutOfRange)); + assert_eq!(validate_hashes(4, &[]), Err(FastSyncError::NothingToDo)); assert_eq!( - block_on(validate_hashes(4, &[])), - Err(FastSyncError::NothingToDo) - ); - assert_eq!( - block_on(validate_hashes(4, &ids[..3])), + validate_hashes(4, &ids[..3]), Err(FastSyncError::NothingToDo) ); } #[test] fn test_validate_hashes_success() { - let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]]; + let ids = [[1_u8; 32], [2_u8; 32], [3_u8; 32], [4_u8; 32], [5_u8; 32]]; let validated_hashes = valid_block_ids(&ids[0..4]); let unknown_hashes = ids[4..].to_vec(); assert_eq!( - block_on(validate_hashes(0, &ids)), + validate_hashes(0, &ids), Ok(FastSyncResponse::ValidateHashes { validated_hashes, unknown_hashes @@ -349,15 +340,10 @@ mod tests { #[test] fn test_validate_hashes_mismatch() { let ids = [ - [1u8; 32], [2u8; 32], [3u8; 32], [5u8; 32], [1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], + [1_u8; 32], [2_u8; 32], [3_u8; 32], [5_u8; 32], [1_u8; 32], [2_u8; 32], [3_u8; 32], + [4_u8; 32], ]; - assert_eq!( - block_on(validate_hashes(0, &ids)), - Err(FastSyncError::Mismatch) - ); - assert_eq!( - block_on(validate_hashes(4, &ids)), - Err(FastSyncError::Mismatch) - ); + assert_eq!(validate_hashes(0, &ids), Err(FastSyncError::Mismatch)); + assert_eq!(validate_hashes(4, &ids), Err(FastSyncError::Mismatch)); } } diff --git a/consensus/fast-sync/src/lib.rs b/consensus/fast-sync/src/lib.rs index f82b163..8dbdc64 100644 --- a/consensus/fast-sync/src/lib.rs +++ b/consensus/fast-sync/src/lib.rs @@ -1,3 +1,9 @@ +// Used in `create.rs` +use clap as _; +use cuprate_blockchain as _; +use hex as _; +use tokio as _; + pub mod fast_sync; pub mod util; diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index 8ba321d..575bed7 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Boog900"] [features] default = [] -proptest = ["dep:proptest", "dep:proptest-derive", "cuprate-types/proptest"] +proptest = ["cuprate-types/proptest"] rayon = ["dep:rayon"] [dependencies] @@ -24,15 +24,16 @@ hex = { workspace = true, features = ["std"] } hex-literal = { workspace = true } crypto-bigint = { workspace = true } +cfg-if = { workspace = true } tracing = { workspace = true, features = ["std"] } thiserror = { workspace = true } rayon = { workspace = true, optional = true } -proptest = {workspace = true, optional = true} -proptest-derive = {workspace = true, optional = true} - [dev-dependencies] -proptest = {workspace = true} -proptest-derive = {workspace = true} -tokio = {version = "1.35.0", features = ["rt-multi-thread", "macros"]} \ No newline at end of file +proptest = { workspace = true } +proptest-derive = { workspace = true } +tokio = { version = "1.35.0", features = ["rt-multi-thread", "macros"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/consensus/rules/src/blocks.rs b/consensus/rules/src/blocks.rs index e118e9a..5e55ce2 100644 --- a/consensus/rules/src/blocks.rs +++ b/consensus/rules/src/blocks.rs @@ -44,22 +44,22 @@ pub enum BlockError { MinerTxError(#[from] MinerTxError), } -/// A trait to represent the RandomX VM. +/// A trait to represent the `RandomX` VM. pub trait RandomX { type Error; fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error>; } -/// Returns if this height is a RandomX seed height. -pub fn is_randomx_seed_height(height: usize) -> bool { +/// Returns if this height is a `RandomX` seed height. +pub const fn is_randomx_seed_height(height: usize) -> bool { height % RX_SEEDHASH_EPOCH_BLOCKS == 0 } -/// Returns the RandomX seed height for this block. +/// Returns the `RandomX` seed height for this block. /// /// ref: -pub fn randomx_seed_height(height: usize) -> usize { +pub const fn randomx_seed_height(height: usize) -> usize { if height <= RX_SEEDHASH_EPOCH_BLOCKS + RX_SEEDHASH_EPOCH_LAG { 0 } else { @@ -122,10 +122,10 @@ pub fn check_block_pow(hash: &[u8; 32], difficulty: u128) -> Result<(), BlockErr /// Returns the penalty free zone /// /// -pub fn penalty_free_zone(hf: &HardFork) -> usize { - if hf == &HardFork::V1 { +pub fn penalty_free_zone(hf: HardFork) -> usize { + if hf == HardFork::V1 { PENALTY_FREE_ZONE_1 - } else if hf >= &HardFork::V2 && hf < &HardFork::V5 { + } else if hf >= HardFork::V2 && hf < HardFork::V5 { PENALTY_FREE_ZONE_2 } else { PENALTY_FREE_ZONE_5 @@ -135,7 +135,7 @@ pub fn penalty_free_zone(hf: &HardFork) -> usize { /// Sanity check on the block blob size. /// /// ref: -fn block_size_sanity_check( +const fn block_size_sanity_check( block_blob_len: usize, effective_median: usize, ) -> Result<(), BlockError> { @@ -149,7 +149,7 @@ fn block_size_sanity_check( /// Sanity check on the block weight. /// /// ref: -pub fn check_block_weight( +pub const fn check_block_weight( block_weight: usize, median_for_block_reward: usize, ) -> Result<(), BlockError> { @@ -163,7 +163,7 @@ pub fn check_block_weight( /// Sanity check on number of txs in the block. /// /// ref: -fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> { +const fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> { if number_none_miner_txs + 1 > 0x10000000 { Err(BlockError::TooManyTxs) } else { @@ -175,10 +175,10 @@ fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> { /// /// ref: fn check_prev_id(block: &Block, top_hash: &[u8; 32]) -> Result<(), BlockError> { - if &block.header.previous != top_hash { - Err(BlockError::PreviousIDIncorrect) - } else { + if &block.header.previous == top_hash { Ok(()) + } else { + Err(BlockError::PreviousIDIncorrect) } } @@ -273,7 +273,7 @@ pub fn check_block( block_weight, block_chain_ctx.median_weight_for_block_reward, block_chain_ctx.already_generated_coins, - &block_chain_ctx.current_hf, + block_chain_ctx.current_hf, )?; Ok((vote, generated_coins)) diff --git a/consensus/rules/src/decomposed_amount.rs b/consensus/rules/src/decomposed_amount.rs index a8821f3..ebed8b0 100644 --- a/consensus/rules/src/decomposed_amount.rs +++ b/consensus/rules/src/decomposed_amount.rs @@ -1,6 +1,6 @@ #[rustfmt::skip] /// Decomposed amount table. -pub static DECOMPOSED_AMOUNTS: [u64; 172] = [ +pub(crate) static DECOMPOSED_AMOUNTS: [u64; 172] = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 600, 700, 800, 900, @@ -40,8 +40,8 @@ mod tests { #[test] fn decomposed_amounts_return_decomposed() { - for amount in DECOMPOSED_AMOUNTS.iter() { - assert!(is_decomposed_amount(amount)) + for amount in &DECOMPOSED_AMOUNTS { + assert!(is_decomposed_amount(amount)); } } diff --git a/consensus/rules/src/genesis.rs b/consensus/rules/src/genesis.rs index b796119..e1cf4f8 100644 --- a/consensus/rules/src/genesis.rs +++ b/consensus/rules/src/genesis.rs @@ -8,7 +8,7 @@ use monero_serai::{ use cuprate_helper::network::Network; -const fn genesis_nonce(network: &Network) -> u32 { +const fn genesis_nonce(network: Network) -> u32 { match network { Network::Mainnet => 10000, Network::Testnet => 10001, @@ -16,7 +16,7 @@ const fn genesis_nonce(network: &Network) -> u32 { } } -fn genesis_miner_tx(network: &Network) -> Transaction { +fn genesis_miner_tx(network: Network) -> Transaction { Transaction::read(&mut hex::decode(match network { Network::Mainnet | Network::Testnet => "013c01ff0001ffffffffffff03029b2e4c0281c0b02e7c53291a94d1d0cbff8883f8024f5142ee494ffbbd08807121017767aafcde9be00dcfd098715ebcf7f410daebc582fda69d24a28e9d0bc890d1", Network::Stagenet => "013c01ff0001ffffffffffff0302df5d56da0c7d643ddd1ce61901c7bdc5fb1738bfe39fbe69c28a3a7032729c0f2101168d0c4ca86fb55a4cf6a36d31431be1c53a3bd7411bb24e8832410289fa6f3b" @@ -26,7 +26,7 @@ fn genesis_miner_tx(network: &Network) -> Transaction { /// Generates the Monero genesis block. /// /// ref: -pub fn generate_genesis_block(network: &Network) -> Block { +pub fn generate_genesis_block(network: Network) -> Block { Block { header: BlockHeader { hardfork_version: 1, @@ -47,19 +47,19 @@ mod tests { #[test] fn generate_genesis_blocks() { assert_eq!( - &generate_genesis_block(&Network::Mainnet).hash(), + &generate_genesis_block(Network::Mainnet).hash(), hex::decode("418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3") .unwrap() .as_slice() ); assert_eq!( - &generate_genesis_block(&Network::Testnet).hash(), + &generate_genesis_block(Network::Testnet).hash(), hex::decode("48ca7cd3c8de5b6a4d53d2861fbdaedca141553559f9be9520068053cda8430b") .unwrap() .as_slice() ); assert_eq!( - &generate_genesis_block(&Network::Stagenet).hash(), + &generate_genesis_block(Network::Stagenet).hash(), hex::decode("76ee3cc98646292206cd3e86f74d88b4dcc1d937088645e9b0cbca84b7ce74eb") .unwrap() .as_slice() diff --git a/consensus/rules/src/hard_forks.rs b/consensus/rules/src/hard_forks.rs index 4f786e4..7e9a881 100644 --- a/consensus/rules/src/hard_forks.rs +++ b/consensus/rules/src/hard_forks.rs @@ -25,10 +25,10 @@ pub fn check_block_version_vote( ) -> Result<(), HardForkError> { // self = current hf if hf != version { - Err(HardForkError::VersionIncorrect)?; + return Err(HardForkError::VersionIncorrect); } if hf > vote { - Err(HardForkError::VoteTooLow)?; + return Err(HardForkError::VoteTooLow); } Ok(()) @@ -41,8 +41,8 @@ pub struct HFInfo { threshold: usize, } impl HFInfo { - pub const fn new(height: usize, threshold: usize) -> HFInfo { - HFInfo { height, threshold } + pub const fn new(height: usize, threshold: usize) -> Self { + Self { height, threshold } } } @@ -51,7 +51,7 @@ impl HFInfo { pub struct HFsInfo([HFInfo; NUMB_OF_HARD_FORKS]); impl HFsInfo { - pub fn info_for_hf(&self, hf: &HardFork) -> HFInfo { + pub const fn info_for_hf(&self, hf: &HardFork) -> HFInfo { self.0[*hf as usize - 1] } @@ -62,7 +62,7 @@ impl HFsInfo { /// Returns the main-net hard-fork information. /// /// ref: - pub const fn main_net() -> HFsInfo { + pub const fn main_net() -> Self { Self([ HFInfo::new(0, 0), HFInfo::new(1009827, 0), @@ -86,7 +86,7 @@ impl HFsInfo { /// Returns the test-net hard-fork information. /// /// ref: - pub const fn test_net() -> HFsInfo { + pub const fn test_net() -> Self { Self([ HFInfo::new(0, 0), HFInfo::new(624634, 0), @@ -110,7 +110,7 @@ impl HFsInfo { /// Returns the test-net hard-fork information. /// /// ref: - pub const fn stage_net() -> HFsInfo { + pub const fn stage_net() -> Self { Self([ HFInfo::new(0, 0), HFInfo::new(32000, 0), @@ -165,8 +165,8 @@ impl Display for HFVotes { } impl HFVotes { - pub fn new(window_size: usize) -> HFVotes { - HFVotes { + pub fn new(window_size: usize) -> Self { + Self { votes: [0; NUMB_OF_HARD_FORKS], vote_list: VecDeque::with_capacity(window_size), window_size, @@ -251,6 +251,6 @@ impl HFVotes { /// Returns the votes needed for a hard-fork. /// /// ref: -pub fn votes_needed(threshold: usize, window: usize) -> usize { +pub const fn votes_needed(threshold: usize, window: usize) -> usize { (threshold * window).div_ceil(100) } diff --git a/consensus/rules/src/hard_forks/tests.rs b/consensus/rules/src/hard_forks/tests.rs index 00dd036..1a24627 100644 --- a/consensus/rules/src/hard_forks/tests.rs +++ b/consensus/rules/src/hard_forks/tests.rs @@ -51,7 +51,7 @@ proptest! { prop_assert_eq!(hf_votes.total_votes(), hf_votes.vote_list.len()); let mut votes = [0_usize; NUMB_OF_HARD_FORKS]; - for vote in hf_votes.vote_list.iter() { + for vote in &hf_votes.vote_list { // manually go through the list of votes tallying votes[*vote as usize - 1] += 1; } @@ -61,9 +61,9 @@ proptest! { #[test] fn window_size_kept_constant(mut hf_votes in arb_full_hf_votes(), new_votes in any::>()) { - for new_vote in new_votes.into_iter() { + for new_vote in new_votes { hf_votes.add_vote_for_hf(&new_vote); - prop_assert_eq!(hf_votes.total_votes(), TEST_WINDOW_SIZE) + prop_assert_eq!(hf_votes.total_votes(), TEST_WINDOW_SIZE); } } diff --git a/consensus/rules/src/lib.rs b/consensus/rules/src/lib.rs index a5f8800..876e2f7 100644 --- a/consensus/rules/src/lib.rs +++ b/consensus/rules/src/lib.rs @@ -1,3 +1,12 @@ +cfg_if::cfg_if! { + // Used in external `tests/`. + if #[cfg(test)] { + use proptest as _; + use proptest_derive as _; + use tokio as _; + } +} + use std::time::{SystemTime, UNIX_EPOCH}; pub mod batch_verifier; diff --git a/consensus/rules/src/miner_tx.rs b/consensus/rules/src/miner_tx.rs index 663c95e..e6b51d2 100644 --- a/consensus/rules/src/miner_tx.rs +++ b/consensus/rules/src/miner_tx.rs @@ -40,7 +40,7 @@ const MINER_TX_TIME_LOCKED_BLOCKS: usize = 60; /// the block. /// /// ref: -fn calculate_base_reward(already_generated_coins: u64, hf: &HardFork) -> u64 { +fn calculate_base_reward(already_generated_coins: u64, hf: HardFork) -> u64 { let target_mins = hf.block_time().as_secs() / 60; let emission_speed_factor = 20 - (target_mins - 1); ((MONEY_SUPPLY - already_generated_coins) >> emission_speed_factor) @@ -54,7 +54,7 @@ pub fn calculate_block_reward( block_weight: usize, median_bw: usize, already_generated_coins: u64, - hf: &HardFork, + hf: HardFork, ) -> u64 { let base_reward = calculate_base_reward(already_generated_coins, hf); @@ -75,9 +75,9 @@ pub fn calculate_block_reward( /// Checks the miner transactions version. /// /// ref: -fn check_miner_tx_version(tx_version: &TxVersion, hf: &HardFork) -> Result<(), MinerTxError> { +fn check_miner_tx_version(tx_version: TxVersion, hf: HardFork) -> Result<(), MinerTxError> { // The TxVersion enum checks if the version is not 1 or 2 - if hf >= &HardFork::V12 && tx_version != &TxVersion::RingCT { + if hf >= HardFork::V12 && tx_version != TxVersion::RingCT { Err(MinerTxError::VersionInvalid) } else { Ok(()) @@ -94,31 +94,31 @@ fn check_inputs(inputs: &[Input], chain_height: usize) -> Result<(), MinerTxErro match &inputs[0] { Input::Gen(height) => { - if height != &chain_height { - Err(MinerTxError::InputsHeightIncorrect) - } else { + if height == &chain_height { Ok(()) + } else { + Err(MinerTxError::InputsHeightIncorrect) } } - _ => Err(MinerTxError::InputNotOfTypeGen), + Input::ToKey { .. } => Err(MinerTxError::InputNotOfTypeGen), } } /// Checks the miner transaction has a correct time lock. /// /// ref: -fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> { +const fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> { match time_lock { &Timelock::Block(till_height) => { // Lock times above this amount are timestamps not blocks. // This is just for safety though and shouldn't actually be hit. if till_height > 500_000_000 { - Err(MinerTxError::InvalidLockTime)?; + return Err(MinerTxError::InvalidLockTime); } - if till_height != chain_height + MINER_TX_TIME_LOCKED_BLOCKS { - Err(MinerTxError::InvalidLockTime) - } else { + if till_height == chain_height + MINER_TX_TIME_LOCKED_BLOCKS { Ok(()) + } else { + Err(MinerTxError::InvalidLockTime) } } _ => Err(MinerTxError::InvalidLockTime), @@ -131,18 +131,18 @@ fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), Mine /// && fn sum_outputs( outputs: &[Output], - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, ) -> Result { let mut sum: u64 = 0; for out in outputs { let amt = out.amount.unwrap_or(0); - if tx_version == &TxVersion::RingSignatures && amt == 0 { + if tx_version == TxVersion::RingSignatures && amt == 0 { return Err(MinerTxError::OutputAmountIncorrect); } - if hf == &HardFork::V3 && !is_decomposed_amount(&amt) { + if hf == HardFork::V3 && !is_decomposed_amount(&amt) { return Err(MinerTxError::OutputNotDecomposed); } sum = sum.checked_add(amt).ok_or(MinerTxError::OutputsOverflow)?; @@ -157,9 +157,9 @@ fn check_total_output_amt( total_output: u64, reward: u64, fees: u64, - hf: &HardFork, + hf: HardFork, ) -> Result { - if hf == &HardFork::V1 || hf >= &HardFork::V12 { + if hf == HardFork::V1 || hf >= HardFork::V12 { if total_output != reward + fees { return Err(MinerTxError::OutputAmountIncorrect); } @@ -185,16 +185,16 @@ pub fn check_miner_tx( block_weight: usize, median_bw: usize, already_generated_coins: u64, - hf: &HardFork, + hf: HardFork, ) -> Result { let tx_version = TxVersion::from_raw(tx.version()).ok_or(MinerTxError::VersionInvalid)?; - check_miner_tx_version(&tx_version, hf)?; + check_miner_tx_version(tx_version, hf)?; // ref: match tx { Transaction::V1 { .. } => (), Transaction::V2 { proofs, .. } => { - if hf >= &HardFork::V12 && proofs.is_some() { + if hf >= HardFork::V12 && proofs.is_some() { return Err(MinerTxError::RCTTypeNotNULL); } } @@ -207,7 +207,7 @@ pub fn check_miner_tx( check_output_types(&tx.prefix().outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?; let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf); - let total_outs = sum_outputs(&tx.prefix().outputs, hf, &tx_version)?; + let total_outs = sum_outputs(&tx.prefix().outputs, hf, tx_version)?; check_total_output_amt(total_outs, reward, total_fees, hf) } @@ -221,7 +221,7 @@ mod tests { proptest! { #[test] fn tail_emission(generated_coins in any::(), hf in any::()) { - prop_assert!(calculate_base_reward(generated_coins, &hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60) + prop_assert!(calculate_base_reward(generated_coins, hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60); } } } diff --git a/consensus/rules/src/transactions.rs b/consensus/rules/src/transactions.rs index 9c6ad51..b4eac19 100644 --- a/consensus/rules/src/transactions.rs +++ b/consensus/rules/src/transactions.rs @@ -99,11 +99,8 @@ fn check_output_keys(outputs: &[Output]) -> Result<(), TransactionError> { /// /// /// -pub(crate) fn check_output_types( - outputs: &[Output], - hf: &HardFork, -) -> Result<(), TransactionError> { - if hf == &HardFork::V15 { +pub(crate) fn check_output_types(outputs: &[Output], hf: HardFork) -> Result<(), TransactionError> { + if hf == HardFork::V15 { for outs in outputs.windows(2) { if outs[0].view_tag.is_some() != outs[1].view_tag.is_some() { return Err(TransactionError::OutputTypeInvalid); @@ -113,8 +110,8 @@ pub(crate) fn check_output_types( } for out in outputs { - if hf <= &HardFork::V14 && out.view_tag.is_some() - || hf >= &HardFork::V16 && out.view_tag.is_none() + if hf <= HardFork::V14 && out.view_tag.is_some() + || hf >= HardFork::V16 && out.view_tag.is_none() { return Err(TransactionError::OutputTypeInvalid); } @@ -125,12 +122,12 @@ pub(crate) fn check_output_types( /// Checks the individual outputs amount for version 1 txs. /// /// ref: -fn check_output_amount_v1(amount: u64, hf: &HardFork) -> Result<(), TransactionError> { +fn check_output_amount_v1(amount: u64, hf: HardFork) -> Result<(), TransactionError> { if amount == 0 { return Err(TransactionError::ZeroOutputForV1); } - if hf >= &HardFork::V2 && !is_decomposed_amount(&amount) { + if hf >= HardFork::V2 && !is_decomposed_amount(&amount) { return Err(TransactionError::AmountNotDecomposed); } @@ -140,7 +137,7 @@ fn check_output_amount_v1(amount: u64, hf: &HardFork) -> Result<(), TransactionE /// Checks the individual outputs amount for version 2 txs. /// /// ref: -fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> { +const fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> { if amount == 0 { Ok(()) } else { @@ -154,8 +151,8 @@ fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> { /// && fn sum_outputs( outputs: &[Output], - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, ) -> Result { let mut sum: u64 = 0; @@ -181,15 +178,15 @@ fn sum_outputs( /// && fn check_number_of_outputs( outputs: usize, - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, bp_or_bpp: bool, ) -> Result<(), TransactionError> { - if tx_version == &TxVersion::RingSignatures { + if tx_version == TxVersion::RingSignatures { return Ok(()); } - if hf >= &HardFork::V12 && outputs < 2 { + if hf >= HardFork::V12 && outputs < 2 { return Err(TransactionError::InvalidNumberOfOutputs); } @@ -207,8 +204,8 @@ fn check_number_of_outputs( /// && fn check_outputs_semantics( outputs: &[Output], - hf: &HardFork, - tx_version: &TxVersion, + hf: HardFork, + tx_version: TxVersion, bp_or_bpp: bool, ) -> Result { check_output_types(outputs, hf)?; @@ -223,11 +220,11 @@ fn check_outputs_semantics( /// Checks if an outputs unlock time has passed. /// /// -pub fn output_unlocked( +pub const fn output_unlocked( time_lock: &Timelock, current_chain_height: usize, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> bool { match *time_lock { Timelock::None => true, @@ -243,7 +240,7 @@ pub fn output_unlocked( /// Returns if a locked output, which uses a block height, can be spent. /// /// ref: -fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool { +const fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool { // current_chain_height = 1 + top height unlock_height <= current_chain_height } @@ -251,10 +248,10 @@ fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> b /// Returns if a locked output, which uses a block height, can be spent. /// /// ref: -fn check_timestamp_time_lock( +const fn check_timestamp_time_lock( unlock_timestamp: u64, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> bool { current_time_lock_timestamp + hf.block_time().as_secs() >= unlock_timestamp } @@ -269,19 +266,19 @@ fn check_all_time_locks( time_locks: &[Timelock], current_chain_height: usize, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { time_locks.iter().try_for_each(|time_lock| { - if !output_unlocked( + if output_unlocked( time_lock, current_chain_height, current_time_lock_timestamp, hf, ) { + Ok(()) + } else { tracing::debug!("Transaction invalid: one or more inputs locked, lock: {time_lock:?}."); Err(TransactionError::OneOrMoreRingMembersLocked) - } else { - Ok(()) } }) } @@ -292,11 +289,11 @@ fn check_all_time_locks( /// /// ref: /// && -pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), TransactionError> { - if hf == &HardFork::V15 { +pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: HardFork) -> Result<(), TransactionError> { + if hf == HardFork::V15 { // Hard-fork 15 allows both v14 and v16 rules - return check_decoy_info(decoy_info, &HardFork::V14) - .or_else(|_| check_decoy_info(decoy_info, &HardFork::V16)); + return check_decoy_info(decoy_info, HardFork::V14) + .or_else(|_| check_decoy_info(decoy_info, HardFork::V16)); } let current_minimum_decoys = minimum_decoys(hf); @@ -310,13 +307,13 @@ pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), Tra if decoy_info.mixable > 1 { return Err(TransactionError::MoreThanOneMixableInputWithUnmixable); } - } else if hf >= &HardFork::V8 && decoy_info.min_decoys != current_minimum_decoys { + } else if hf >= HardFork::V8 && decoy_info.min_decoys != current_minimum_decoys { // From V8 enforce the minimum used number of rings is the default minimum. return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys); } // From v12 all inputs must have the same number of decoys. - if hf >= &HardFork::V12 && decoy_info.min_decoys != decoy_info.max_decoys { + if hf >= HardFork::V12 && decoy_info.min_decoys != decoy_info.max_decoys { return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys); } @@ -334,19 +331,19 @@ fn check_key_images(input: &Input) -> Result<(), TransactionError> { return Err(TransactionError::KeyImageIsNotInPrimeSubGroup); } } - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } Ok(()) } -/// Checks that the input is of type [`Input::ToKey`] aka txin_to_key. +/// Checks that the input is of type [`Input::ToKey`] aka `txin_to_key`. /// /// ref: -fn check_input_type(input: &Input) -> Result<(), TransactionError> { +const fn check_input_type(input: &Input) -> Result<(), TransactionError> { match input { Input::ToKey { .. } => Ok(()), - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => Err(TransactionError::IncorrectInputType), } } @@ -362,15 +359,15 @@ fn check_input_has_decoys(input: &Input) -> Result<(), TransactionError> { Ok(()) } } - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => Err(TransactionError::IncorrectInputType), } } /// Checks that the ring members for the input are unique after hard-fork 6. /// /// ref: -fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), TransactionError> { - if hf >= &HardFork::V6 { +fn check_ring_members_unique(input: &Input, hf: HardFork) -> Result<(), TransactionError> { + if hf >= HardFork::V6 { match input { Input::ToKey { key_offsets, .. } => key_offsets.iter().skip(1).try_for_each(|offset| { if *offset == 0 { @@ -379,7 +376,7 @@ fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), Transac Ok(()) } }), - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => Err(TransactionError::IncorrectInputType), } } else { Ok(()) @@ -389,23 +386,22 @@ fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), Transac /// Checks that from hf 7 the inputs are sorted by key image. /// /// ref: -fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), TransactionError> { +fn check_inputs_sorted(inputs: &[Input], hf: HardFork) -> Result<(), TransactionError> { let get_ki = |inp: &Input| match inp { Input::ToKey { key_image, .. } => Ok(key_image.compress().to_bytes()), - _ => Err(TransactionError::IncorrectInputType), + Input::Gen(_) => Err(TransactionError::IncorrectInputType), }; - if hf >= &HardFork::V7 { + if hf >= HardFork::V7 { for inps in inputs.windows(2) { match get_ki(&inps[0])?.cmp(&get_ki(&inps[1])?) { Ordering::Greater => (), _ => return Err(TransactionError::InputsAreNotOrdered), } } - Ok(()) - } else { - Ok(()) } + + Ok(()) } /// Checks the youngest output is at least 10 blocks old. @@ -414,9 +410,9 @@ fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), Transactio fn check_10_block_lock( youngest_used_out_height: usize, current_chain_height: usize, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { - if hf >= &HardFork::V12 { + if hf >= HardFork::V12 { if youngest_used_out_height + 10 > current_chain_height { tracing::debug!( "Transaction invalid: One or more ring members younger than 10 blocks." @@ -442,7 +438,7 @@ fn sum_inputs_check_overflow(inputs: &[Input]) -> Result .checked_add(amount.unwrap_or(0)) .ok_or(TransactionError::InputsOverflow)?; } - _ => Err(TransactionError::IncorrectInputType)?, + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } } @@ -454,7 +450,7 @@ fn sum_inputs_check_overflow(inputs: &[Input]) -> Result /// Semantic rules are rules that don't require blockchain context, the hard-fork does not require blockchain context as: /// - The tx-pool will use the current hard-fork /// - When syncing the hard-fork is in the block header. -fn check_inputs_semantics(inputs: &[Input], hf: &HardFork) -> Result { +fn check_inputs_semantics(inputs: &[Input], hf: HardFork) -> Result { // if inputs.is_empty() { return Err(TransactionError::NoInputs); @@ -481,14 +477,14 @@ fn check_inputs_contextual( inputs: &[Input], tx_ring_members_info: &TxRingMembersInfo, current_chain_height: usize, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { // This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members. // When picking ring members monerod will only look in the DB at past blocks so an output has to be younger // than this transaction to be used in this tx. if tx_ring_members_info.youngest_used_out_height >= current_chain_height { tracing::debug!("Transaction invalid: One or more ring members too young."); - Err(TransactionError::OneOrMoreRingMembersLocked)?; + return Err(TransactionError::OneOrMoreRingMembersLocked); } check_10_block_lock( @@ -500,7 +496,7 @@ fn check_inputs_contextual( if let Some(decoys_info) = &tx_ring_members_info.decoy_info { check_decoy_info(decoys_info, hf)?; } else { - assert_eq!(hf, &HardFork::V1); + assert_eq!(hf, HardFork::V1); } for input in inputs { @@ -517,22 +513,22 @@ fn check_inputs_contextual( /// fn check_tx_version( decoy_info: &Option, - version: &TxVersion, - hf: &HardFork, + version: TxVersion, + hf: HardFork, ) -> Result<(), TransactionError> { if let Some(decoy_info) = decoy_info { let max = max_tx_version(hf); - if version > &max { + if version > max { return Err(TransactionError::TransactionVersionInvalid); } let min = min_tx_version(hf); - if version < &min && decoy_info.not_mixable == 0 { + if version < min && decoy_info.not_mixable == 0 { return Err(TransactionError::TransactionVersionInvalid); } } else { // This will only happen for hard-fork 1 when only RingSignatures are allowed. - if version != &TxVersion::RingSignatures { + if version != TxVersion::RingSignatures { return Err(TransactionError::TransactionVersionInvalid); } } @@ -541,8 +537,8 @@ fn check_tx_version( } /// Returns the default maximum tx version for the given hard-fork. -fn max_tx_version(hf: &HardFork) -> TxVersion { - if hf <= &HardFork::V3 { +fn max_tx_version(hf: HardFork) -> TxVersion { + if hf <= HardFork::V3 { TxVersion::RingSignatures } else { TxVersion::RingCT @@ -550,15 +546,15 @@ fn max_tx_version(hf: &HardFork) -> TxVersion { } /// Returns the default minimum tx version for the given hard-fork. -fn min_tx_version(hf: &HardFork) -> TxVersion { - if hf >= &HardFork::V6 { +fn min_tx_version(hf: HardFork) -> TxVersion { + if hf >= HardFork::V6 { TxVersion::RingCT } else { TxVersion::RingSignatures } } -fn transaction_weight_limit(hf: &HardFork) -> usize { +fn transaction_weight_limit(hf: HardFork) -> usize { penalty_free_zone(hf) / 2 - 600 } @@ -575,14 +571,14 @@ pub fn check_transaction_semantic( tx_blob_size: usize, tx_weight: usize, tx_hash: &[u8; 32], - hf: &HardFork, + hf: HardFork, verifier: impl BatchVerifier, ) -> Result { // if tx_blob_size > MAX_TX_BLOB_SIZE - || (hf >= &HardFork::V8 && tx_weight > transaction_weight_limit(hf)) + || (hf >= HardFork::V8 && tx_weight > transaction_weight_limit(hf)) { - Err(TransactionError::TooBig)?; + return Err(TransactionError::TooBig); } let tx_version = @@ -602,13 +598,13 @@ pub fn check_transaction_semantic( Transaction::V2 { proofs: None, .. } | Transaction::V1 { .. } => false, }; - let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, &tx_version, bp_or_bpp)?; + let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, tx_version, bp_or_bpp)?; let inputs_sum = check_inputs_semantics(&tx.prefix().inputs, hf)?; let fee = match tx { Transaction::V1 { .. } => { if outputs_sum >= inputs_sum { - Err(TransactionError::OutputsTooHigh)?; + return Err(TransactionError::OutputsTooHigh); } inputs_sum - outputs_sum } @@ -633,13 +629,12 @@ pub fn check_transaction_semantic( /// This function also does _not_ check for duplicate key-images: . /// /// `current_time_lock_timestamp` must be: . - pub fn check_transaction_contextual( tx: &Transaction, tx_ring_members_info: &TxRingMembersInfo, current_chain_height: usize, current_time_lock_timestamp: u64, - hf: &HardFork, + hf: HardFork, ) -> Result<(), TransactionError> { let tx_version = TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?; @@ -650,7 +645,7 @@ pub fn check_transaction_contextual( current_chain_height, hf, )?; - check_tx_version(&tx_ring_members_info.decoy_info, &tx_version, hf)?; + check_tx_version(&tx_ring_members_info.decoy_info, tx_version, hf)?; check_all_time_locks( &tx_ring_members_info.time_locked_outs, diff --git a/consensus/rules/src/transactions/contextual_data.rs b/consensus/rules/src/transactions/contextual_data.rs index 282093d..73bc12e 100644 --- a/consensus/rules/src/transactions/contextual_data.rs +++ b/consensus/rules/src/transactions/contextual_data.rs @@ -26,7 +26,7 @@ pub fn get_absolute_offsets(relative_offsets: &[u64]) -> Result, Transa Ok(offsets) } -/// Inserts the output IDs that are needed to verify the transaction inputs into the provided HashMap. +/// Inserts the output IDs that are needed to verify the transaction inputs into the provided `HashMap`. /// /// This will error if the inputs are empty /// @@ -49,7 +49,7 @@ pub fn insert_ring_member_ids( .entry(amount.unwrap_or(0)) .or_default() .extend(get_absolute_offsets(key_offsets)?), - _ => return Err(TransactionError::IncorrectInputType), + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } } Ok(()) @@ -60,7 +60,7 @@ pub fn insert_ring_member_ids( pub enum Rings { /// Legacy, pre-ringCT, rings. Legacy(Vec>), - /// RingCT rings, (outkey, amount commitment). + /// `RingCT` rings, (outkey, amount commitment). RingCT(Vec>), } @@ -103,15 +103,15 @@ impl DecoyInfo { /// /// So: /// - /// amount_outs_on_chain(inputs`[X]`) == outputs_with_amount`[X]` + /// `amount_outs_on_chain(inputs[X]) == outputs_with_amount[X]` /// /// Do not rely on this function to do consensus checks! /// pub fn new( inputs: &[Input], outputs_with_amount: impl Fn(u64) -> usize, - hf: &HardFork, - ) -> Result { + hf: HardFork, + ) -> Result { let mut min_decoys = usize::MAX; let mut max_decoys = usize::MIN; let mut mixable = 0; @@ -119,7 +119,7 @@ impl DecoyInfo { let minimum_decoys = minimum_decoys(hf); - for inp in inputs.iter() { + for inp in inputs { match inp { Input::ToKey { key_offsets, @@ -149,11 +149,11 @@ impl DecoyInfo { min_decoys = min(min_decoys, numb_decoys); max_decoys = max(max_decoys, numb_decoys); } - _ => return Err(TransactionError::IncorrectInputType), + Input::Gen(_) => return Err(TransactionError::IncorrectInputType), } } - Ok(DecoyInfo { + Ok(Self { mixable, not_mixable, min_decoys, @@ -166,7 +166,7 @@ impl DecoyInfo { /// **There are exceptions to this always being the minimum decoys** /// /// ref: -pub(crate) fn minimum_decoys(hf: &HardFork) -> usize { +pub(crate) fn minimum_decoys(hf: HardFork) -> usize { use HardFork as HF; match hf { HF::V1 => panic!("hard-fork 1 does not use these rules!"), diff --git a/consensus/rules/src/transactions/ring_ct.rs b/consensus/rules/src/transactions/ring_ct.rs index 62f71dd..32cedd4 100644 --- a/consensus/rules/src/transactions/ring_ct.rs +++ b/consensus/rules/src/transactions/ring_ct.rs @@ -40,10 +40,10 @@ pub enum RingCTError { CLSAGError(#[from] ClsagError), } -/// Checks the RingCT type is allowed for the current hard fork. +/// Checks the `RingCT` type is allowed for the current hard fork. /// /// -fn check_rct_type(ty: &RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), RingCTError> { +fn check_rct_type(ty: RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), RingCTError> { use HardFork as F; use RctType as T; @@ -125,11 +125,11 @@ pub(crate) fn ring_ct_semantic_checks( proofs: &RctProofs, tx_hash: &[u8; 32], verifier: impl BatchVerifier, - hf: &HardFork, + hf: HardFork, ) -> Result<(), RingCTError> { let rct_type = proofs.rct_type(); - check_rct_type(&rct_type, *hf, tx_hash)?; + check_rct_type(rct_type, hf, tx_hash)?; check_output_range_proofs(proofs, verifier)?; if rct_type != RctType::AggregateMlsagBorromean { @@ -154,7 +154,7 @@ pub(crate) fn check_input_signatures( }; if rings.is_empty() { - Err(RingCTError::RingInvalid)?; + return Err(RingCTError::RingInvalid); } let pseudo_outs = match &proofs.prunable { @@ -222,20 +222,20 @@ mod tests { #[test] fn grandfathered_bulletproofs2() { assert!(check_rct_type( - &RctType::MlsagBulletproofsCompactAmount, + RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &[0; 32] ) .is_err()); assert!(check_rct_type( - &RctType::MlsagBulletproofsCompactAmount, + RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &GRANDFATHERED_TRANSACTIONS[0] ) .is_ok()); assert!(check_rct_type( - &RctType::MlsagBulletproofsCompactAmount, + RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &GRANDFATHERED_TRANSACTIONS[1] ) diff --git a/consensus/rules/src/transactions/ring_signatures.rs b/consensus/rules/src/transactions/ring_signatures.rs index 7d4b8f9..a226f5e 100644 --- a/consensus/rules/src/transactions/ring_signatures.rs +++ b/consensus/rules/src/transactions/ring_signatures.rs @@ -17,7 +17,7 @@ use crate::try_par_iter; /// Verifies the ring signature. /// /// ref: -pub fn check_input_signatures( +pub(crate) fn check_input_signatures( inputs: &[Input], signatures: &[RingSignature], rings: &Rings, @@ -45,7 +45,7 @@ pub fn check_input_signatures( Ok(()) })?; } - _ => panic!("tried to verify v1 tx with a non v1 ring"), + Rings::RingCT(_) => panic!("tried to verify v1 tx with a non v1 ring"), } Ok(()) } diff --git a/consensus/rules/src/transactions/tests.rs b/consensus/rules/src/transactions/tests.rs index 4da8fd5..936d843 100644 --- a/consensus/rules/src/transactions/tests.rs +++ b/consensus/rules/src/transactions/tests.rs @@ -16,13 +16,13 @@ use crate::decomposed_amount::DECOMPOSED_AMOUNTS; #[test] fn test_check_output_amount_v1() { - for amount in DECOMPOSED_AMOUNTS.iter() { - assert!(check_output_amount_v1(*amount, &HardFork::V2).is_ok()) + for amount in &DECOMPOSED_AMOUNTS { + assert!(check_output_amount_v1(*amount, HardFork::V2).is_ok()); } proptest!(|(amount in any::().prop_filter("value_decomposed", |val| !is_decomposed_amount(val)))| { - prop_assert!(check_output_amount_v1(amount, &HardFork::V2).is_err()); - prop_assert!(check_output_amount_v1(amount, &HardFork::V1).is_ok()) + prop_assert!(check_output_amount_v1(amount, HardFork::V2).is_err()); + prop_assert!(check_output_amount_v1(amount, HardFork::V1).is_ok()); }); } @@ -41,10 +41,10 @@ fn test_sum_outputs() { let outs = [output_10, outputs_20]; - let sum = sum_outputs(&outs, &HardFork::V16, &TxVersion::RingSignatures).unwrap(); + let sum = sum_outputs(&outs, HardFork::V16, TxVersion::RingSignatures).unwrap(); assert_eq!(sum, 30); - assert!(sum_outputs(&outs, &HardFork::V16, &TxVersion::RingCT).is_err()) + assert!(sum_outputs(&outs, HardFork::V16, TxVersion::RingCT).is_err()); } #[test] @@ -52,50 +52,50 @@ fn test_decoy_info() { let decoy_info = DecoyInfo { mixable: 0, not_mixable: 0, - min_decoys: minimum_decoys(&HardFork::V8), - max_decoys: minimum_decoys(&HardFork::V8) + 1, + min_decoys: minimum_decoys(HardFork::V8), + max_decoys: minimum_decoys(HardFork::V8) + 1, }; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok()); - assert!(check_decoy_info(&decoy_info, &HardFork::V16).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_ok()); + assert!(check_decoy_info(&decoy_info, HardFork::V16).is_err()); let mut decoy_info = DecoyInfo { mixable: 0, not_mixable: 0, - min_decoys: minimum_decoys(&HardFork::V8) - 1, - max_decoys: minimum_decoys(&HardFork::V8) + 1, + min_decoys: minimum_decoys(HardFork::V8) - 1, + max_decoys: minimum_decoys(HardFork::V8) + 1, }; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_err()); decoy_info.not_mixable = 1; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_ok()); decoy_info.mixable = 2; - assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V8).is_err()); let mut decoy_info = DecoyInfo { mixable: 0, not_mixable: 0, - min_decoys: minimum_decoys(&HardFork::V12), - max_decoys: minimum_decoys(&HardFork::V12) + 1, + min_decoys: minimum_decoys(HardFork::V12), + max_decoys: minimum_decoys(HardFork::V12) + 1, }; - assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_err()); + assert!(check_decoy_info(&decoy_info, HardFork::V12).is_err()); decoy_info.max_decoys = decoy_info.min_decoys; - assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_ok()); + assert!(check_decoy_info(&decoy_info, HardFork::V12).is_ok()); } #[test] fn test_torsion_ki() { - for &key_image in EIGHT_TORSION[1..].iter() { + for &key_image in &EIGHT_TORSION[1..] { assert!(check_key_images(&Input::ToKey { key_image, amount: None, key_offsets: vec![], }) - .is_err()) + .is_err()); } } @@ -109,7 +109,7 @@ prop_compose! { prop_compose! { /// Returns a valid torsioned point. fn random_torsioned_point()(point in random_point(), torsion in 1..8_usize ) -> EdwardsPoint { - point + curve25519_dalek::constants::EIGHT_TORSION[torsion] + point + EIGHT_TORSION[torsion] } } @@ -175,7 +175,7 @@ prop_compose! { /// Returns a [`Timelock`] that is unlocked given a height and time. fn unlocked_timelock(height: u64, time_for_time_lock: u64)( ty in 0..3, - lock_height in 0..(height+1), + lock_height in 0..=height, time_for_time_lock in 0..(time_for_time_lock+121), ) -> Timelock { match ty { @@ -203,33 +203,33 @@ proptest! { hf_no_view_tags in hf_in_range(1..14), hf_view_tags in hf_in_range(16..17), ) { - prop_assert!(check_output_types(&view_tag_outs, &hf_view_tags).is_ok()); - prop_assert!(check_output_types(&view_tag_outs, &hf_no_view_tags).is_err()); + prop_assert!(check_output_types(&view_tag_outs, hf_view_tags).is_ok()); + prop_assert!(check_output_types(&view_tag_outs, hf_no_view_tags).is_err()); - prop_assert!(check_output_types(&non_view_tag_outs, &hf_no_view_tags).is_ok()); - prop_assert!(check_output_types(&non_view_tag_outs, &hf_view_tags).is_err()); + prop_assert!(check_output_types(&non_view_tag_outs, hf_no_view_tags).is_ok()); + prop_assert!(check_output_types(&non_view_tag_outs, hf_view_tags).is_err()); - prop_assert!(check_output_types(&non_view_tag_outs, &HardFork::V15).is_ok()); - prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_ok()); + prop_assert!(check_output_types(&non_view_tag_outs, HardFork::V15).is_ok()); + prop_assert!(check_output_types(&view_tag_outs, HardFork::V15).is_ok()); view_tag_outs.append(&mut non_view_tag_outs); - prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_err()); + prop_assert!(check_output_types(&view_tag_outs, HardFork::V15).is_err()); } #[test] fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize) { - prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_ok()); + prop_assert!(check_number_of_outputs(valid_numb_outs, HardFork::V16, TxVersion::RingCT, true).is_ok()); } #[test] fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX) { - prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_err()); + prop_assert!(check_number_of_outputs(numb_outs, HardFork::V16, TxVersion::RingCT, true).is_err()); } #[test] fn test_check_output_amount_v2(amt in 1..u64::MAX) { prop_assert!(check_output_amount_v2(amt).is_err()); - prop_assert!(check_output_amount_v2(0).is_ok()) + prop_assert!(check_output_amount_v2(0).is_ok()); } #[test] @@ -241,9 +241,9 @@ proptest! { #[test] fn test_timestamp_time_lock(timestamp in 500_000_001..u64::MAX) { - prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, &HardFork::V16)); - prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, &HardFork::V16)); - prop_assert!(check_timestamp_time_lock(timestamp, timestamp, &HardFork::V16)); + prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, HardFork::V16)); + prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, HardFork::V16)); + prop_assert!(check_timestamp_time_lock(timestamp, timestamp, HardFork::V16)); } #[test] @@ -251,11 +251,11 @@ proptest! { mut locked_locks in vec(locked_timelock(5_000, 100_000_000), 1..50), mut unlocked_locks in vec(unlocked_timelock(5_000, 100_000_000), 1..50) ) { - assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, &HardFork::V16).is_err()); - assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_ok()); + assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, HardFork::V16).is_err()); + assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, HardFork::V16).is_ok()); unlocked_locks.append(&mut locked_locks); - assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_err()); + assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, HardFork::V16).is_err()); } #[test] diff --git a/consensus/src/batch_verifier.rs b/consensus/src/batch_verifier.rs index 69018ac..101f981 100644 --- a/consensus/src/batch_verifier.rs +++ b/consensus/src/batch_verifier.rs @@ -1,4 +1,4 @@ -use std::{cell::RefCell, ops::DerefMut}; +use std::cell::RefCell; use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier; use rayon::prelude::*; @@ -13,8 +13,8 @@ pub struct MultiThreadedBatchVerifier { impl MultiThreadedBatchVerifier { /// Create a new multithreaded batch verifier, - pub fn new(numb_threads: usize) -> MultiThreadedBatchVerifier { - MultiThreadedBatchVerifier { + pub fn new(numb_threads: usize) -> Self { + Self { internal: ThreadLocal::with_capacity(numb_threads), } } @@ -42,6 +42,6 @@ impl BatchVerifier for &'_ MultiThreadedBatchVerifier { .get_or(|| RefCell::new(InternalBatchVerifier::new())) .borrow_mut(); - stmt(verifier.deref_mut()) + stmt(&mut verifier) } } diff --git a/consensus/src/block.rs b/consensus/src/block.rs index e785a6b..3d0db99 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -72,17 +72,17 @@ impl PreparedBlockExPow { /// This errors if either the `block`'s: /// - Hard-fork values are invalid /// - Miner transaction is missing a miner input - pub fn new(block: Block) -> Result { + pub fn new(block: Block) -> Result { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header) .map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?; let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, - )))? + ))); }; - Ok(PreparedBlockExPow { + Ok(Self { block_blob: block.serialize(), hf_vote, hf_version, @@ -123,20 +123,17 @@ impl PreparedBlock { /// /// The randomX VM must be Some if RX is needed or this will panic. /// The randomX VM must also be initialised with the correct seed. - fn new( - block: Block, - randomx_vm: Option<&R>, - ) -> Result { + fn new(block: Block, randomx_vm: Option<&R>) -> Result { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header) .map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?; let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, - )))? + ))); }; - Ok(PreparedBlock { + Ok(Self { block_blob: block.serialize(), hf_vote, hf_version, @@ -156,17 +153,17 @@ impl PreparedBlock { /// Creates a new [`PreparedBlock`] from a [`PreparedBlockExPow`]. /// - /// This function will give an invalid PoW hash if `randomx_vm` is not initialised + /// This function will give an invalid proof-of-work hash if `randomx_vm` is not initialised /// with the correct seed. /// /// # Panics /// This function will panic if `randomx_vm` is - /// [`None`] even though RandomX is needed. + /// [`None`] even though `RandomX` is needed. fn new_prepped( block: PreparedBlockExPow, randomx_vm: Option<&R>, - ) -> Result { - Ok(PreparedBlock { + ) -> Result { + Ok(Self { block_blob: block.block_blob, hf_vote: block.hf_vote, hf_version: block.hf_version, @@ -218,7 +215,6 @@ pub enum VerifyBlockRequest { } /// A response from a verify block request. -#[allow(clippy::large_enum_variant)] // The largest variant is most common ([`MainChain`]) pub enum VerifyBlockResponse { /// This block is valid. MainChain(VerifiedBlockInformation), @@ -254,12 +250,8 @@ where D::Future: Send + 'static, { /// Creates a new block verifier. - pub(crate) fn new( - context_svc: C, - tx_verifier_svc: TxV, - database: D, - ) -> BlockVerifierService { - BlockVerifierService { + pub(crate) const fn new(context_svc: C, tx_verifier_svc: TxV, database: D) -> Self { + Self { context_svc, tx_verifier_svc, _database: database, diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs index b20b4f2..3a5ea7c 100644 --- a/consensus/src/block/alt_block.rs +++ b/consensus/src/block/alt_block.rs @@ -36,8 +36,8 @@ use crate::{ /// /// Returns [`AltBlockInformation`], which contains the cumulative difficulty of the alt chain. /// -/// This function only checks the block's PoW and its weight. -pub async fn sanity_check_alt_block( +/// This function only checks the block's proof-of-work and its weight. +pub(crate) async fn sanity_check_alt_block( block: Block, txs: HashMap<[u8; 32], TransactionVerificationData>, mut context_svc: C, @@ -66,15 +66,17 @@ where // Check if the block's miner input is formed correctly. let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, - )))? + )) + .into()); }; if *height != alt_context_cache.chain_height { - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputsHeightIncorrect, - )))? + )) + .into()); } // prep the alt block. @@ -103,10 +105,10 @@ where if let Some(median_timestamp) = difficulty_cache.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)) { - check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)? + check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)?; }; - let next_difficulty = difficulty_cache.next_difficulty(&prepped_block.hf_version); + let next_difficulty = difficulty_cache.next_difficulty(prepped_block.hf_version); // make sure the block's PoW is valid for this difficulty. check_block_pow(&prepped_block.pow_hash, next_difficulty).map_err(ConsensusError::Block)?; @@ -127,12 +129,12 @@ where // Check the block weight is below the limit. check_block_weight( block_weight, - alt_weight_cache.median_for_block_reward(&prepped_block.hf_version), + alt_weight_cache.median_for_block_reward(prepped_block.hf_version), ) .map_err(ConsensusError::Block)?; let long_term_weight = weight::calculate_block_long_term_weight( - &prepped_block.hf_version, + prepped_block.hf_version, block_weight, alt_weight_cache.median_long_term_weight(), ); @@ -232,9 +234,9 @@ where } }; - Ok(Some( - alt_chain_context.cached_rx_vm.insert(cached_vm).1.clone(), - )) + Ok(Some(Arc::clone( + &alt_chain_context.cached_rx_vm.insert(cached_vm).1, + ))) } /// Returns the [`DifficultyCache`] for the alt chain. diff --git a/consensus/src/block/batch_prepare.rs b/consensus/src/block/batch_prepare.rs index d32cd76..9c77848 100644 --- a/consensus/src/block/batch_prepare.rs +++ b/consensus/src/block/batch_prepare.rs @@ -68,16 +68,17 @@ where // Make sure no blocks in the batch have a higher hard fork than the last block. if block_0.hf_version > top_hf_in_batch { - Err(ConsensusError::Block(BlockError::HardForkError( + return Err(ConsensusError::Block(BlockError::HardForkError( HardForkError::VersionIncorrect, - )))?; + )) + .into()); } if block_0.block_hash != block_1.block.header.previous || block_0.height != block_1.height - 1 { tracing::debug!("Blocks do not follow each other, verification failed."); - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); } // Cache any potential RX VM seeds as we may need them for future blocks in the batch. @@ -85,7 +86,7 @@ where new_rx_vm = Some((block_0.height, block_0.block_hash)); } - timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version)) + timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version)); } // Get the current blockchain context. @@ -117,15 +118,16 @@ where if context.chain_height != blocks[0].height { tracing::debug!("Blocks do not follow main chain, verification failed."); - Err(ConsensusError::Block(BlockError::MinerTxError( + return Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputsHeightIncorrect, - )))?; + )) + .into()); } if context.top_hash != blocks[0].block.header.previous { tracing::debug!("Blocks do not follow main chain, verification failed."); - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?; + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); } let mut rx_vms = if top_hf_in_batch < HardFork::V12 { @@ -156,7 +158,7 @@ where context_svc .oneshot(BlockChainContextRequest::NewRXVM(( new_vm_seed, - new_vm.clone(), + Arc::clone(&new_vm), ))) .await?; diff --git a/consensus/src/context.rs b/consensus/src/context.rs index 9e71304..5bdb1ce 100644 --- a/consensus/src/context.rs +++ b/consensus/src/context.rs @@ -56,8 +56,8 @@ pub struct ContextConfig { impl ContextConfig { /// Get the config for main-net. - pub fn main_net() -> ContextConfig { - ContextConfig { + pub const fn main_net() -> Self { + Self { hard_fork_cfg: HardForkConfig::main_net(), difficulty_cfg: DifficultyCacheConfig::main_net(), weights_config: BlockWeightsCacheConfig::main_net(), @@ -65,8 +65,8 @@ impl ContextConfig { } /// Get the config for stage-net. - pub fn stage_net() -> ContextConfig { - ContextConfig { + pub const fn stage_net() -> Self { + Self { hard_fork_cfg: HardForkConfig::stage_net(), // These 2 have the same config as main-net. difficulty_cfg: DifficultyCacheConfig::main_net(), @@ -75,8 +75,8 @@ impl ContextConfig { } /// Get the config for test-net. - pub fn test_net() -> ContextConfig { - ContextConfig { + pub const fn test_net() -> Self { + Self { hard_fork_cfg: HardForkConfig::test_net(), // These 2 have the same config as main-net. difficulty_cfg: DifficultyCacheConfig::main_net(), @@ -155,7 +155,7 @@ impl RawBlockChainContext { /// Returns the next blocks long term weight from its block weight. pub fn next_block_long_term_weight(&self, block_weight: usize) -> usize { weight::calculate_block_long_term_weight( - &self.current_hf, + self.current_hf, block_weight, self.median_long_term_weight, ) @@ -191,7 +191,7 @@ impl BlockChainContext { } /// Returns the blockchain context without checking the validity token. - pub fn unchecked_blockchain_context(&self) -> &RawBlockChainContext { + pub const fn unchecked_blockchain_context(&self) -> &RawBlockChainContext { &self.raw } } @@ -222,7 +222,7 @@ pub struct NewBlockData { pub enum BlockChainContextRequest { /// Get the current blockchain context. GetContext, - /// Gets the current RandomX VM. + /// Gets the current `RandomX` VM. GetCurrentRxVm, /// Get the next difficulties for these blocks. /// @@ -288,7 +288,7 @@ pub enum BlockChainContextRequest { /// This variant is private and is not callable from outside this crate, the block verifier service will /// handle getting the randomX VM of an alt chain. AltChainRxVM { - /// The height the RandomX VM is needed for. + /// The height the `RandomX` VM is needed for. height: usize, /// The chain to look in for the seed. chain: Chain, @@ -312,7 +312,7 @@ pub enum BlockChainContextRequest { pub enum BlockChainContextResponse { /// Blockchain context response. Context(BlockChainContext), - /// A map of seed height to RandomX VMs. + /// A map of seed height to `RandomX` VMs. RxVms(HashMap>), /// A list of difficulties. BatchDifficulties(Vec), diff --git a/consensus/src/context/alt_chains.rs b/consensus/src/context/alt_chains.rs index 937e847..cd945c8 100644 --- a/consensus/src/context/alt_chains.rs +++ b/consensus/src/context/alt_chains.rs @@ -68,29 +68,33 @@ impl AltChainContextCache { } /// A map of top IDs to alt chains. -pub struct AltChainMap { +pub(crate) struct AltChainMap { alt_cache_map: HashMap<[u8; 32], Box>, } impl AltChainMap { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { alt_cache_map: HashMap::new(), } } - pub fn clear(&mut self) { + pub(crate) fn clear(&mut self) { self.alt_cache_map.clear(); } /// Add an alt chain cache to the map. - pub fn add_alt_cache(&mut self, prev_id: [u8; 32], alt_cache: Box) { + pub(crate) fn add_alt_cache( + &mut self, + prev_id: [u8; 32], + alt_cache: Box, + ) { self.alt_cache_map.insert(prev_id, alt_cache); } /// Attempts to take an [`AltChainContextCache`] from the map, returning [`None`] if no cache is /// present. - pub async fn get_alt_chain_context( + pub(crate) async fn get_alt_chain_context( &mut self, prev_id: [u8; 32], database: D, @@ -109,7 +113,7 @@ impl AltChainMap { let Some((parent_chain, top_height)) = res else { // Couldn't find prev_id - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); }; Ok(Box::new(AltChainContextCache { @@ -125,7 +129,7 @@ impl AltChainMap { } /// Builds a [`DifficultyCache`] for an alt chain. -pub async fn get_alt_chain_difficulty_cache( +pub(crate) async fn get_alt_chain_difficulty_cache( prev_id: [u8; 32], main_chain_difficulty_cache: &DifficultyCache, mut database: D, @@ -142,7 +146,7 @@ pub async fn get_alt_chain_difficulty_cache( let Some((chain, top_height)) = res else { // Can't find prev_id - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); }; Ok(match chain { @@ -172,7 +176,7 @@ pub async fn get_alt_chain_difficulty_cache( } /// Builds a [`BlockWeightsCache`] for an alt chain. -pub async fn get_alt_chain_weight_cache( +pub(crate) async fn get_alt_chain_weight_cache( prev_id: [u8; 32], main_chain_weight_cache: &BlockWeightsCache, mut database: D, @@ -189,7 +193,7 @@ pub async fn get_alt_chain_weight_cache( let Some((chain, top_height)) = res else { // Can't find prev_id - Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))? + return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into()); }; Ok(match chain { diff --git a/consensus/src/context/difficulty.rs b/consensus/src/context/difficulty.rs index eb67cf5..9316dc5 100644 --- a/consensus/src/context/difficulty.rs +++ b/consensus/src/context/difficulty.rs @@ -43,24 +43,24 @@ impl DifficultyCacheConfig { /// /// # Notes /// You probably do not need this, use [`DifficultyCacheConfig::main_net`] instead. - pub const fn new(window: usize, cut: usize, lag: usize) -> DifficultyCacheConfig { - DifficultyCacheConfig { window, cut, lag } + pub const fn new(window: usize, cut: usize, lag: usize) -> Self { + Self { window, cut, lag } } /// Returns the total amount of blocks we need to track to calculate difficulty - pub fn total_block_count(&self) -> usize { + pub const fn total_block_count(&self) -> usize { self.window + self.lag } /// The amount of blocks we account for after removing the outliers. - pub fn accounted_window_len(&self) -> usize { + pub const fn accounted_window_len(&self) -> usize { self.window - 2 * self.cut } /// Returns the config needed for [`Mainnet`](cuprate_helper::network::Network::Mainnet). This is also the /// config for all other current networks. - pub const fn main_net() -> DifficultyCacheConfig { - DifficultyCacheConfig { + pub const fn main_net() -> Self { + Self { window: DIFFICULTY_WINDOW, cut: DIFFICULTY_CUT, lag: DIFFICULTY_LAG, @@ -112,7 +112,7 @@ impl DifficultyCache { timestamps.len() ); - let diff = DifficultyCache { + let diff = Self { timestamps, cumulative_difficulties, last_accounted_height: chain_height - 1, @@ -203,8 +203,8 @@ impl DifficultyCache { /// Returns the required difficulty for the next block. /// - /// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/difficulty.html#calculating-difficulty - pub fn next_difficulty(&self, hf: &HardFork) -> u128 { + /// See: + pub fn next_difficulty(&self, hf: HardFork) -> u128 { next_difficulty( &self.config, &self.timestamps, @@ -223,7 +223,7 @@ impl DifficultyCache { pub fn next_difficulties( &self, blocks: Vec<(u64, HardFork)>, - current_hf: &HardFork, + current_hf: HardFork, ) -> Vec { let mut timestamps = self.timestamps.clone(); let mut cumulative_difficulties = self.cumulative_difficulties.clone(); @@ -232,8 +232,6 @@ impl DifficultyCache { difficulties.push(self.next_difficulty(current_hf)); - let mut diff_info_popped = Vec::new(); - for (new_timestamp, hf) in blocks { timestamps.push_back(new_timestamp); @@ -241,17 +239,15 @@ impl DifficultyCache { cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap()); if timestamps.len() > self.config.total_block_count() { - diff_info_popped.push(( - timestamps.pop_front().unwrap(), - cumulative_difficulties.pop_front().unwrap(), - )); + timestamps.pop_front().unwrap(); + cumulative_difficulties.pop_front().unwrap(); } difficulties.push(next_difficulty( &self.config, ×tamps, &cumulative_difficulties, - &hf, + hf, )); } @@ -295,12 +291,12 @@ impl DifficultyCache { } } -/// Calculates the next difficulty with the inputted config/timestamps/cumulative_difficulties. +/// Calculates the next difficulty with the inputted `config/timestamps/cumulative_difficulties`. fn next_difficulty( config: &DifficultyCacheConfig, timestamps: &VecDeque, cumulative_difficulties: &VecDeque, - hf: &HardFork, + hf: HardFork, ) -> u128 { if timestamps.len() <= 1 { return 1; diff --git a/consensus/src/context/hardforks.rs b/consensus/src/context/hardforks.rs index 682933d..16ae763 100644 --- a/consensus/src/context/hardforks.rs +++ b/consensus/src/context/hardforks.rs @@ -28,7 +28,7 @@ pub struct HardForkConfig { impl HardForkConfig { /// Config for main-net. - pub const fn main_net() -> HardForkConfig { + pub const fn main_net() -> Self { Self { info: HFsInfo::main_net(), window: DEFAULT_WINDOW_SIZE, @@ -36,7 +36,7 @@ impl HardForkConfig { } /// Config for stage-net. - pub const fn stage_net() -> HardForkConfig { + pub const fn stage_net() -> Self { Self { info: HFsInfo::stage_net(), window: DEFAULT_WINDOW_SIZE, @@ -44,7 +44,7 @@ impl HardForkConfig { } /// Config for test-net. - pub const fn test_net() -> HardForkConfig { + pub const fn test_net() -> Self { Self { info: HFsInfo::test_net(), window: DEFAULT_WINDOW_SIZE, @@ -54,7 +54,7 @@ impl HardForkConfig { /// A struct that keeps track of the current hard-fork and current votes. #[derive(Debug, Clone, Eq, PartialEq)] -pub struct HardForkState { +pub(crate) struct HardForkState { /// The current active hard-fork. pub(crate) current_hardfork: HardFork, @@ -83,7 +83,7 @@ impl HardForkState { get_votes_in_range(database.clone(), block_start..chain_height, config.window).await?; if chain_height > config.window { - debug_assert_eq!(votes.total_votes(), config.window) + debug_assert_eq!(votes.total_votes(), config.window); } let BlockchainResponse::BlockExtendedHeader(ext_header) = database @@ -97,7 +97,7 @@ impl HardForkState { let current_hardfork = ext_header.version; - let mut hfs = HardForkState { + let mut hfs = Self { config, current_hardfork, votes, @@ -122,7 +122,7 @@ impl HardForkState { /// # Invariant /// /// This _must_ only be used on a main-chain cache. - pub async fn pop_blocks_main_chain( + pub(crate) async fn pop_blocks_main_chain( &mut self, numb_blocks: usize, database: D, @@ -159,7 +159,7 @@ impl HardForkState { } /// Add a new block to the cache. - pub fn new_block(&mut self, vote: HardFork, height: usize) { + pub(crate) fn new_block(&mut self, vote: HardFork, height: usize) { // We don't _need_ to take in `height` but it's for safety, so we don't silently loose track // of blocks. assert_eq!(self.last_height + 1, height); @@ -183,7 +183,7 @@ impl HardForkState { /// Checks if the next hard-fork should be activated and activates it if it should. /// - /// https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork + /// fn check_set_new_hf(&mut self) { self.current_hardfork = self.votes.current_fork( &self.current_hardfork, @@ -194,7 +194,7 @@ impl HardForkState { } /// Returns the current hard-fork. - pub fn current_hardfork(&self) -> HardFork { + pub(crate) const fn current_hardfork(&self) -> HardFork { self.current_hardfork } } @@ -218,7 +218,7 @@ async fn get_votes_in_range( panic!("Database sent incorrect response!"); }; - for hf_info in vote_list.into_iter() { + for hf_info in vote_list { votes.add_vote_for_hf(&HardFork::from_vote(hf_info.vote)); } diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index b1ab102..c6375fc 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -1,6 +1,6 @@ -//! RandomX VM Cache +//! `RandomX` VM Cache //! -//! This module keeps track of the RandomX VM to calculate the next blocks PoW, if the block needs a randomX VM and potentially +//! This module keeps track of the `RandomX` VM to calculate the next blocks proof-of-work, if the block needs a randomX VM and potentially //! more VMs around this height. //! use std::{ @@ -34,11 +34,11 @@ const RX_SEEDS_CACHED: usize = 2; /// A multithreaded randomX VM. #[derive(Debug)] pub struct RandomXVm { - /// These RandomX VMs all share the same cache. + /// These `RandomX` VMs all share the same cache. vms: ThreadLocal, - /// The RandomX cache. + /// The `RandomX` cache. cache: RandomXCache, - /// The flags used to start the RandomX VMs. + /// The flags used to start the `RandomX` VMs. flags: RandomXFlag, } @@ -50,7 +50,7 @@ impl RandomXVm { let cache = RandomXCache::new(flags, seed.as_slice())?; - Ok(RandomXVm { + Ok(Self { vms: ThreadLocal::new(), cache, flags, @@ -69,10 +69,10 @@ impl RandomX for RandomXVm { } } -/// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a +/// The randomX VMs cache, keeps the VM needed to calculate the current block's proof-of-work hash (if a VM is needed) and a /// couple more around this VM. #[derive(Clone, Debug)] -pub struct RandomXVmCache { +pub(crate) struct RandomXVmCache { /// The top [`RX_SEEDS_CACHED`] RX seeds. pub(crate) seeds: VecDeque<(usize, [u8; 32])>, /// The VMs for `seeds` (if after hf 12, otherwise this will be empty). @@ -117,7 +117,7 @@ impl RandomXVmCache { HashMap::new() }; - Ok(RandomXVmCache { + Ok(Self { seeds, vms, cached_vm: None, @@ -125,14 +125,14 @@ impl RandomXVmCache { } /// Add a randomX VM to the cache, with the seed it was created with. - pub fn add_vm(&mut self, vm: ([u8; 32], Arc)) { + pub(crate) fn add_vm(&mut self, vm: ([u8; 32], Arc)) { self.cached_vm.replace(vm); } /// Creates a RX VM for an alt chain, looking at the main chain RX VMs to see if we can use one /// of them first. - pub async fn get_alt_vm( - &mut self, + pub(crate) async fn get_alt_vm( + &self, height: usize, chain: Chain, database: D, @@ -152,7 +152,7 @@ impl RandomXVmCache { break; }; - return Ok(vm.clone()); + return Ok(Arc::clone(vm)); } } @@ -161,8 +161,8 @@ impl RandomXVmCache { Ok(alt_vm) } - /// Get the main-chain RandomX VMs. - pub async fn get_vms(&mut self) -> HashMap> { + /// Get the main-chain `RandomX` VMs. + pub(crate) async fn get_vms(&mut self) -> HashMap> { match self.seeds.len().checked_sub(self.vms.len()) { // No difference in the amount of seeds to VMs. Some(0) => (), @@ -206,23 +206,23 @@ impl RandomXVmCache { }) .collect() }) - .await + .await; } } self.vms.clone() } - /// Removes all the RandomX VMs above the `new_height`. - pub fn pop_blocks_main_chain(&mut self, new_height: usize) { + /// Removes all the `RandomX` VMs above the `new_height`. + pub(crate) fn pop_blocks_main_chain(&mut self, new_height: usize) { self.seeds.retain(|(height, _)| *height < new_height); self.vms.retain(|height, _| *height < new_height); } /// Add a new block to the VM cache. /// - /// hash is the block hash not the blocks PoW hash. - pub fn new_block(&mut self, height: usize, hash: &[u8; 32]) { + /// hash is the block hash not the blocks proof-of-work hash. + pub(crate) fn new_block(&mut self, height: usize, hash: &[u8; 32]) { if is_randomx_seed_height(height) { tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",); @@ -235,7 +235,7 @@ impl RandomXVmCache { self.seeds .iter() .any(|(cached_height, _)| height == cached_height) - }) + }); } } } @@ -258,7 +258,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: usize, mut amount: usize // We don't include the lag as we only want seeds not the specific seed for this height. let seed_height = (last_height - 1) & !(RX_SEEDHASH_EPOCH_BLOCKS - 1); seeds.push(seed_height); - last_height = seed_height + last_height = seed_height; } seeds diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index bc54285..82b466c 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -36,7 +36,7 @@ pub(super) struct ContextTaskRequest { } /// The Context task that keeps the blockchain context and handles requests. -pub struct ContextTask { +pub(crate) struct ContextTask { /// A token used to invalidate previous contexts when a new /// block is added to the chain. current_validity_token: ValidityToken, @@ -65,7 +65,7 @@ pub struct ContextTask { impl ContextTask { /// Initialize the [`ContextTask`], this will need to pull a lot of data from the database so may take a /// while to complete. - pub async fn init_context( + pub(crate) async fn init_context( cfg: ContextConfig, mut database: D, ) -> Result { @@ -131,7 +131,7 @@ impl ContextTask { rx_vms::RandomXVmCache::init_from_chain_height(chain_height, ¤t_hf, db).await }); - let context_svc = ContextTask { + let context_svc = Self { current_validity_token: ValidityToken::new(), difficulty_cache: difficulty_cache_handle.await.unwrap()?, weight_cache: weight_cache_handle.await.unwrap()?, @@ -148,7 +148,7 @@ impl ContextTask { } /// Handles a [`BlockChainContextRequest`] and returns a [`BlockChainContextResponse`]. - pub async fn handle_req( + pub(crate) async fn handle_req( &mut self, req: BlockChainContextRequest, ) -> Result { @@ -164,17 +164,17 @@ impl ContextTask { context_to_verify_block: ContextToVerifyBlock { median_weight_for_block_reward: self .weight_cache - .median_for_block_reward(¤t_hf), + .median_for_block_reward(current_hf), effective_median_weight: self .weight_cache - .effective_median_block_weight(¤t_hf), + .effective_median_block_weight(current_hf), top_hash: self.top_block_hash, median_block_timestamp: self .difficulty_cache .median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)), chain_height: self.chain_height, current_hf, - next_difficulty: self.difficulty_cache.next_difficulty(¤t_hf), + next_difficulty: self.difficulty_cache.next_difficulty(current_hf), already_generated_coins: self.already_generated_coins, }, cumulative_difficulty: self.difficulty_cache.cumulative_difficulty(), @@ -191,7 +191,7 @@ impl ContextTask { let next_diffs = self .difficulty_cache - .next_difficulties(blocks, &self.hardfork_state.current_hardfork()); + .next_difficulties(blocks, self.hardfork_state.current_hardfork()); BlockChainContextResponse::BatchDifficulties(next_diffs) } BlockChainContextRequest::NewRXVM(vm) => { @@ -330,10 +330,10 @@ impl ContextTask { /// Run the [`ContextTask`], the task will listen for requests on the passed in channel. When the channel closes the /// task will finish. - pub async fn run(mut self, mut rx: mpsc::Receiver) { + pub(crate) async fn run(mut self, mut rx: mpsc::Receiver) { while let Some(req) = rx.recv().await { let res = self.handle_req(req.req).instrument(req.span).await; - let _ = req.tx.send(res); + drop(req.tx.send(res)); } tracing::info!("Shutting down blockchain context task."); diff --git a/consensus/src/context/tokens.rs b/consensus/src/context/tokens.rs index 882d3b5..d222303 100644 --- a/consensus/src/context/tokens.rs +++ b/consensus/src/context/tokens.rs @@ -15,8 +15,8 @@ pub struct ValidityToken { impl ValidityToken { /// Creates a new [`ValidityToken`] - pub fn new() -> ValidityToken { - ValidityToken { + pub fn new() -> Self { + Self { token: CancellationToken::new(), } } @@ -28,6 +28,6 @@ impl ValidityToken { /// Sets the data to invalid. pub fn set_data_invalid(self) { - self.token.cancel() + self.token.cancel(); } } diff --git a/consensus/src/context/weight.rs b/consensus/src/context/weight.rs index 4c89139..e95ae60 100644 --- a/consensus/src/context/weight.rs +++ b/consensus/src/context/weight.rs @@ -38,16 +38,16 @@ pub struct BlockWeightsCacheConfig { impl BlockWeightsCacheConfig { /// Creates a new [`BlockWeightsCacheConfig`] - pub const fn new(short_term_window: usize, long_term_window: usize) -> BlockWeightsCacheConfig { - BlockWeightsCacheConfig { + pub const fn new(short_term_window: usize, long_term_window: usize) -> Self { + Self { short_term_window, long_term_window, } } /// Returns the [`BlockWeightsCacheConfig`] for all networks (They are all the same as mainnet). - pub fn main_net() -> BlockWeightsCacheConfig { - BlockWeightsCacheConfig { + pub const fn main_net() -> Self { + Self { short_term_window: SHORT_TERM_WINDOW, long_term_window: LONG_TERM_WINDOW, } @@ -99,7 +99,7 @@ impl BlockWeightsCache { tracing::info!("Initialized block weight cache, chain-height: {:?}, long term weights length: {:?}, short term weights length: {:?}", chain_height, long_term_weights.len(), short_term_block_weights.len()); - Ok(BlockWeightsCache { + Ok(Self { short_term_block_weights: rayon_spawn_async(move || { RollingMedian::from_vec(short_term_block_weights, config.short_term_window) }) @@ -178,7 +178,7 @@ impl BlockWeightsCache { /// Add a new block to the cache. /// - /// The block_height **MUST** be one more than the last height the cache has + /// The `block_height` **MUST** be one more than the last height the cache has /// seen. pub fn new_block(&mut self, block_height: usize, block_weight: usize, long_term_weight: usize) { assert_eq!(self.tip_height + 1, block_height); @@ -208,8 +208,8 @@ impl BlockWeightsCache { /// Returns the effective median weight, used for block reward calculations and to calculate /// the block weight limit. /// - /// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#calculating-effective-median-weight - pub fn effective_median_block_weight(&self, hf: &HardFork) -> usize { + /// See: + pub fn effective_median_block_weight(&self, hf: HardFork) -> usize { calculate_effective_median_block_weight( hf, self.median_short_term_weight(), @@ -219,9 +219,9 @@ impl BlockWeightsCache { /// Returns the median weight used to calculate block reward punishment. /// - /// https://cuprate.github.io/monero-book/consensus_rules/blocks/reward.html#calculating-block-reward - pub fn median_for_block_reward(&self, hf: &HardFork) -> usize { - if hf < &HardFork::V12 { + /// + pub fn median_for_block_reward(&self, hf: HardFork) -> usize { + if hf < HardFork::V12 { self.median_short_term_weight() } else { self.effective_median_block_weight(hf) @@ -232,17 +232,17 @@ impl BlockWeightsCache { /// Calculates the effective median with the long term and short term median. fn calculate_effective_median_block_weight( - hf: &HardFork, + hf: HardFork, median_short_term_weight: usize, median_long_term_weight: usize, ) -> usize { - if hf < &HardFork::V10 { + if hf < HardFork::V10 { return median_short_term_weight.max(penalty_free_zone(hf)); } let long_term_median = median_long_term_weight.max(PENALTY_FREE_ZONE_5); let short_term_median = median_short_term_weight; - let effective_median = if hf >= &HardFork::V10 && hf < &HardFork::V15 { + let effective_median = if hf >= HardFork::V10 && hf < HardFork::V15 { min( max(PENALTY_FREE_ZONE_5, short_term_median), 50 * long_term_median, @@ -258,19 +258,19 @@ fn calculate_effective_median_block_weight( } /// Calculates a blocks long term weight. -pub fn calculate_block_long_term_weight( - hf: &HardFork, +pub(crate) fn calculate_block_long_term_weight( + hf: HardFork, block_weight: usize, long_term_median: usize, ) -> usize { - if hf < &HardFork::V10 { + if hf < HardFork::V10 { return block_weight; } let long_term_median = max(penalty_free_zone(hf), long_term_median); let (short_term_constraint, adjusted_block_weight) = - if hf >= &HardFork::V10 && hf < &HardFork::V15 { + if hf >= HardFork::V10 && hf < HardFork::V15 { let stc = long_term_median + long_term_median * 2 / 5; (stc, block_weight) } else { diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 004285d..e104cec 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -10,6 +10,16 @@ //! implement a database you need to have a service which accepts [`BlockchainReadRequest`] and responds //! with [`BlockchainResponse`]. //! + +cfg_if::cfg_if! { + // Used in external `tests/`. + if #[cfg(test)] { + use cuprate_test_utils as _; + use curve25519_dalek as _; + use hex_literal as _; + } +} + use cuprate_consensus_rules::ConsensusError; mod batch_verifier; @@ -34,6 +44,7 @@ pub use cuprate_types::{ /// An Error returned from one of the consensus services. #[derive(Debug, thiserror::Error)] +#[expect(variant_size_differences)] pub enum ExtendedConsensusError { /// A consensus error. #[error("{0}")] @@ -53,7 +64,8 @@ pub enum ExtendedConsensusError { } /// Initialize the 2 verifier [`tower::Service`]s (block and transaction). -pub async fn initialize_verifier( +#[expect(clippy::type_complexity)] +pub fn initialize_verifier( database: D, ctx_svc: Ctx, ) -> Result< @@ -112,7 +124,7 @@ pub mod __private { Response = BlockchainResponse, Error = tower::BoxError, >, - > crate::Database for T + > Database for T where T::Future: Future> + Send + 'static, { diff --git a/consensus/src/tests.rs b/consensus/src/tests.rs index 13598be..0efef82 100644 --- a/consensus/src/tests.rs +++ b/consensus/src/tests.rs @@ -1,2 +1,2 @@ mod context; -pub mod mock_db; +pub(crate) mod mock_db; diff --git a/consensus/src/tests/context/data.rs b/consensus/src/tests/context/data.rs index baa591c..28f61a4 100644 --- a/consensus/src/tests/context/data.rs +++ b/consensus/src/tests/context/data.rs @@ -1,11 +1,12 @@ use cuprate_consensus_rules::HardFork; -pub static HFS_2688888_2689608: [(HardFork, HardFork); 720] = +pub(crate) static HFS_2688888_2689608: [(HardFork, HardFork); 720] = include!("./data/hfs_2688888_2689608"); -pub static HFS_2678808_2688888: [(HardFork, HardFork); 10080] = +pub(crate) static HFS_2678808_2688888: [(HardFork, HardFork); 10080] = include!("./data/hfs_2678808_2688888"); -pub static BW_2850000_3050000: [(usize, usize); 200_000] = include!("./data/bw_2850000_3050000"); +pub(crate) static BW_2850000_3050000: [(usize, usize); 200_000] = + include!("./data/bw_2850000_3050000"); -pub static DIF_3000000_3002000: [(u128, u64); 2000] = include!("./data/dif_3000000_3002000"); +pub(crate) static DIF_3000000_3002000: [(u128, u64); 2000] = include!("./data/dif_3000000_3002000"); diff --git a/consensus/src/tests/context/difficulty.rs b/consensus/src/tests/context/difficulty.rs index a79ae9b..d5027f5 100644 --- a/consensus/src/tests/context/difficulty.rs +++ b/consensus/src/tests/context/difficulty.rs @@ -17,7 +17,7 @@ const TEST_LAG: usize = 2; const TEST_TOTAL_ACCOUNTED_BLOCKS: usize = TEST_WINDOW + TEST_LAG; -pub const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig = +pub(crate) const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig = DifficultyCacheConfig::new(TEST_WINDOW, TEST_CUT, TEST_LAG); #[tokio::test] @@ -35,7 +35,7 @@ async fn first_3_blocks_fixed_difficulty() -> Result<(), tower::BoxError> { .await?; for height in 1..3 { - assert_eq!(difficulty_cache.next_difficulty(&HardFork::V1), 1); + assert_eq!(difficulty_cache.next_difficulty(HardFork::V1), 1); difficulty_cache.new_block(height, 0, u128::MAX); } Ok(()) @@ -66,7 +66,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { for (cum_dif, timestamp) in DIF_3000000_3002000.iter().take(cfg.total_block_count()) { db_builder.add_block( DummyBlockExtendedHeader::default().with_difficulty_info(*timestamp, *cum_dif), - ) + ); } let mut diff_cache = DifficultyCache::init_from_chain_height( @@ -84,7 +84,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { { let diff = diff_info[1].0 - diff_info[0].0; - assert_eq!(diff_cache.next_difficulty(&HardFork::V16), diff); + assert_eq!(diff_cache.next_difficulty(HardFork::V16), diff); diff_cache.new_block(3_000_720 + i, diff_info[1].1, diff_info[1].0); } @@ -139,22 +139,22 @@ proptest! { no_lag_cache.cumulative_difficulties.pop_front(); } // get the difficulty - let next_diff_no_lag = no_lag_cache.next_difficulty(&hf); + let next_diff_no_lag = no_lag_cache.next_difficulty(hf); for _ in 0..TEST_LAG { // add new blocks to the lagged cache diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } // they both should now be the same - prop_assert_eq!(diff_cache.next_difficulty(&hf), next_diff_no_lag) + prop_assert_eq!(diff_cache.next_difficulty(hf), next_diff_no_lag); } #[test] fn next_difficulty_consistent(diff_cache in arb_difficulty_cache(TEST_TOTAL_ACCOUNTED_BLOCKS), hf in any::()) { - let first_call = diff_cache.next_difficulty(&hf); - prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf)); - prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf)); - prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf)); + let first_call = diff_cache.next_difficulty(hf); + prop_assert_eq!(first_call, diff_cache.next_difficulty(hf)); + prop_assert_eq!(first_call, diff_cache.next_difficulty(hf)); + prop_assert_eq!(first_call, diff_cache.next_difficulty(hf)); } #[test] @@ -178,7 +178,7 @@ proptest! { #[test] fn window_size_kept_constant(mut diff_cache in arb_difficulty_cache(TEST_TOTAL_ACCOUNTED_BLOCKS), new_blocks in any::>()) { - for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + for (timestamp, cumulative_difficulty) in new_blocks { diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty); prop_assert_eq!(diff_cache.timestamps.len(), TEST_TOTAL_ACCOUNTED_BLOCKS); prop_assert_eq!(diff_cache.cumulative_difficulties.len(), TEST_TOTAL_ACCOUNTED_BLOCKS); @@ -193,7 +193,7 @@ proptest! { ) { let cache = diff_cache.clone(); - diff_cache.next_difficulties(timestamps.into_iter().zip([hf].into_iter().cycle()).collect(), &hf); + diff_cache.next_difficulties(timestamps.into_iter().zip(std::iter::once(hf).cycle()).collect(), hf); prop_assert_eq!(diff_cache, cache); } @@ -204,12 +204,12 @@ proptest! { timestamps in any_with::>(size_range(0..1000).lift()), hf in any::(), ) { - let timestamps: Vec<_> = timestamps.into_iter().zip([hf].into_iter().cycle()).collect(); + let timestamps: Vec<_> = timestamps.into_iter().zip(std::iter::once(hf).cycle()).collect(); - let diffs = diff_cache.next_difficulties(timestamps.clone(), &hf); + let diffs = diff_cache.next_difficulties(timestamps.clone(), hf); for (timestamp, diff) in timestamps.into_iter().zip(diffs.into_iter()) { - prop_assert_eq!(diff_cache.next_difficulty(×tamp.1), diff); + prop_assert_eq!(diff_cache.next_difficulty(timestamp.1), diff); diff_cache.new_block(diff_cache.last_accounted_height +1, timestamp.0, diff + diff_cache.cumulative_difficulty()); } @@ -226,7 +226,7 @@ proptest! { let blocks_to_pop = new_blocks.len(); let mut new_cache = old_cache.clone(); - for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + for (timestamp, cumulative_difficulty) in new_blocks { database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty)); new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } @@ -250,7 +250,7 @@ proptest! { let blocks_to_pop = new_blocks.len(); let mut new_cache = old_cache.clone(); - for (timestamp, cumulative_difficulty) in new_blocks.into_iter() { + for (timestamp, cumulative_difficulty) in new_blocks { database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty)); new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } diff --git a/consensus/src/tests/context/hardforks.rs b/consensus/src/tests/context/hardforks.rs index ffdff59..17bd47f 100644 --- a/consensus/src/tests/context/hardforks.rs +++ b/consensus/src/tests/context/hardforks.rs @@ -31,7 +31,7 @@ const TEST_HFS: [HFInfo; NUMB_OF_HARD_FORKS] = [ HFInfo::new(150, 0), ]; -pub const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig { +pub(crate) const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig { window: TEST_WINDOW_SIZE, info: HFsInfo::new(TEST_HFS), }; diff --git a/consensus/src/tests/context/rx_vms.rs b/consensus/src/tests/context/rx_vms.rs index 5c198cf..b1eba8e 100644 --- a/consensus/src/tests/context/rx_vms.rs +++ b/consensus/src/tests/context/rx_vms.rs @@ -39,6 +39,7 @@ fn rx_heights_consistent() { } #[tokio::test] +#[expect(unused_qualifications, reason = "false positive in tokio macro")] async fn rx_vm_created_on_hf_12() { let db = DummyDatabaseBuilder::default().finish(Some(10)); diff --git a/consensus/src/tests/context/weight.rs b/consensus/src/tests/context/weight.rs index 6706d97..b23f8f8 100644 --- a/consensus/src/tests/context/weight.rs +++ b/consensus/src/tests/context/weight.rs @@ -8,7 +8,8 @@ use crate::{ }; use cuprate_types::Chain; -pub const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = BlockWeightsCacheConfig::new(100, 5000); +pub(crate) const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = + BlockWeightsCacheConfig::new(100, 5000); #[tokio::test] async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> { @@ -157,7 +158,7 @@ async fn calc_bw_ltw_2850000_3050000() { for (i, (weight, ltw)) in BW_2850000_3050000.iter().skip(100_000).enumerate() { let calc_ltw = calculate_block_long_term_weight( - &HardFork::V16, + HardFork::V16, *weight, weight_cache.median_long_term_weight(), ); diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index a260cf0..5ca53d8 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -1,3 +1,5 @@ +#![expect(non_local_definitions, reason = "proptest macro")] + use std::{ future::Future, pin::Pin, @@ -60,7 +62,7 @@ pub struct DummyBlockExtendedHeader { impl From for ExtendedBlockHeader { fn from(value: DummyBlockExtendedHeader) -> Self { - ExtendedBlockHeader { + Self { version: value.version.unwrap_or(HardFork::V1), vote: value.vote.unwrap_or(HardFork::V1).as_u8(), timestamp: value.timestamp.unwrap_or_default(), @@ -72,31 +74,23 @@ impl From for ExtendedBlockHeader { } impl DummyBlockExtendedHeader { - pub fn with_weight_into( - mut self, - weight: usize, - long_term_weight: usize, - ) -> DummyBlockExtendedHeader { + pub const fn with_weight_into(mut self, weight: usize, long_term_weight: usize) -> Self { self.block_weight = Some(weight); self.long_term_weight = Some(long_term_weight); self } - pub fn with_hard_fork_info( - mut self, - version: HardFork, - vote: HardFork, - ) -> DummyBlockExtendedHeader { + pub const fn with_hard_fork_info(mut self, version: HardFork, vote: HardFork) -> Self { self.vote = Some(vote); self.version = Some(version); self } - pub fn with_difficulty_info( + pub const fn with_difficulty_info( mut self, timestamp: u64, cumulative_difficulty: u128, - ) -> DummyBlockExtendedHeader { + ) -> Self { self.timestamp = Some(timestamp); self.cumulative_difficulty = Some(cumulative_difficulty); self @@ -104,16 +98,16 @@ impl DummyBlockExtendedHeader { } #[derive(Debug, Default)] -pub struct DummyDatabaseBuilder { +pub(crate) struct DummyDatabaseBuilder { blocks: Vec, } impl DummyDatabaseBuilder { - pub fn add_block(&mut self, block: DummyBlockExtendedHeader) { + pub(crate) fn add_block(&mut self, block: DummyBlockExtendedHeader) { self.blocks.push(block); } - pub fn finish(self, dummy_height: Option) -> DummyDatabase { + pub(crate) fn finish(self, dummy_height: Option) -> DummyDatabase { DummyDatabase { blocks: Arc::new(self.blocks.into()), dummy_height, @@ -122,14 +116,15 @@ impl DummyDatabaseBuilder { } #[derive(Clone, Debug)] -pub struct DummyDatabase { +pub(crate) struct DummyDatabase { blocks: Arc>>, dummy_height: Option, } impl DummyDatabase { - pub fn add_block(&mut self, block: DummyBlockExtendedHeader) { - self.blocks.write().unwrap().push(block) + #[expect(clippy::needless_pass_by_ref_mut)] + pub(crate) fn add_block(&mut self, block: DummyBlockExtendedHeader) { + self.blocks.write().unwrap().push(block); } } @@ -144,7 +139,7 @@ impl Service for DummyDatabase { } fn call(&mut self, req: BlockchainReadRequest) -> Self::Future { - let blocks = self.blocks.clone(); + let blocks = Arc::clone(&self.blocks); let dummy_height = self.dummy_height; async move { diff --git a/consensus/src/transactions.rs b/consensus/src/transactions.rs index 09f6884..f29c852 100644 --- a/consensus/src/transactions.rs +++ b/consensus/src/transactions.rs @@ -5,7 +5,6 @@ use std::{ collections::HashSet, future::Future, - ops::Deref, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -102,8 +101,8 @@ where D::Future: Send + 'static, { /// Creates a new [`TxVerifierService`]. - pub fn new(database: D) -> TxVerifierService { - TxVerifierService { database } + pub const fn new(database: D) -> Self { + Self { database } } } @@ -244,7 +243,7 @@ where if kis_spent { tracing::debug!("One or more key images in batch already spent."); - Err(ConsensusError::Transaction(TransactionError::KeyImageSpent))?; + return Err(ConsensusError::Transaction(TransactionError::KeyImageSpent).into()); } let mut verified_at_block_hashes = txs @@ -281,8 +280,8 @@ where let (txs_needing_full_verification, txs_needing_partial_verification) = transactions_needing_verification( txs, - verified_at_block_hashes, - &hf, + &verified_at_block_hashes, + hf, current_chain_height, time_for_time_lock, )?; @@ -302,11 +301,14 @@ where Ok(VerifyTxResponse::Ok) } -#[allow(clippy::type_complexity)] // I don't think the return is too complex +#[expect( + clippy::type_complexity, + reason = "I don't think the return is too complex" +)] fn transactions_needing_verification( txs: &[Arc], - hashes_in_main_chain: HashSet<[u8; 32]>, - current_hf: &HardFork, + hashes_in_main_chain: &HashSet<[u8; 32]>, + current_hf: HardFork, current_chain_height: usize, time_for_time_lock: u64, ) -> Result< @@ -321,27 +323,28 @@ fn transactions_needing_verification( // txs needing partial _contextual_ validation, not semantic. let mut partial_validation_transactions = Vec::new(); - for tx in txs.iter() { + for tx in txs { let guard = tx.cached_verification_state.lock().unwrap(); - match guard.deref() { + match &*guard { CachedVerificationState::NotVerified => { drop(guard); full_validation_transactions - .push((tx.clone(), VerificationNeeded::SemanticAndContextual)); + .push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual)); continue; } CachedVerificationState::ValidAtHashAndHF { block_hash, hf } => { - if current_hf != hf { + if current_hf != *hf { drop(guard); full_validation_transactions - .push((tx.clone(), VerificationNeeded::SemanticAndContextual)); + .push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual)); continue; } if !hashes_in_main_chain.contains(block_hash) { drop(guard); - full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual)); + full_validation_transactions + .push((Arc::clone(tx), VerificationNeeded::Contextual)); continue; } } @@ -350,21 +353,22 @@ fn transactions_needing_verification( hf, time_lock, } => { - if current_hf != hf { + if current_hf != *hf { drop(guard); full_validation_transactions - .push((tx.clone(), VerificationNeeded::SemanticAndContextual)); + .push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual)); continue; } if !hashes_in_main_chain.contains(block_hash) { drop(guard); - full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual)); + full_validation_transactions + .push((Arc::clone(tx), VerificationNeeded::Contextual)); continue; } // If the time lock is still locked then the transaction is invalid. - if !output_unlocked(time_lock, current_chain_height, time_for_time_lock, hf) { + if !output_unlocked(time_lock, current_chain_height, time_for_time_lock, *hf) { return Err(ConsensusError::Transaction( TransactionError::OneOrMoreRingMembersLocked, )); @@ -374,7 +378,7 @@ fn transactions_needing_verification( if tx.version == TxVersion::RingSignatures { drop(guard); - partial_validation_transactions.push(tx.clone()); + partial_validation_transactions.push(Arc::clone(tx)); continue; } } @@ -400,7 +404,7 @@ where batch_get_decoy_info(&txs, hf, database) .await? - .try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, &hf)?)))?; + .try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, hf)?)))?; Ok(()) } @@ -417,7 +421,7 @@ where D: Database + Clone + Sync + Send + 'static, { let txs_ring_member_info = - batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?; + batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), hf, database).await?; rayon_spawn_async(move || { let batch_verifier = MultiThreadedBatchVerifier::new(rayon::current_num_threads()); @@ -432,7 +436,7 @@ where tx.tx_blob.len(), tx.tx_weight, &tx.tx_hash, - &hf, + hf, &batch_verifier, )?; // make sure we calculated the right fee. @@ -445,7 +449,7 @@ where ring, current_chain_height, current_time_lock_timestamp, - &hf, + hf, )?; Ok::<_, ConsensusError>(()) diff --git a/consensus/src/transactions/contextual_data.rs b/consensus/src/transactions/contextual_data.rs index 82f9976..66c53b3 100644 --- a/consensus/src/transactions/contextual_data.rs +++ b/consensus/src/transactions/contextual_data.rs @@ -57,7 +57,7 @@ fn get_ring_members_for_inputs( }) .collect::>()?) } - _ => Err(TransactionError::IncorrectInputType), + Input::Gen(_) => Err(TransactionError::IncorrectInputType), }) .collect::>() } @@ -143,7 +143,7 @@ fn new_rings( /// them. pub async fn batch_get_ring_member_info( txs_verification_data: impl Iterator> + Clone, - hf: &HardFork, + hf: HardFork, mut database: D, ) -> Result, ExtendedConsensusError> { let mut output_ids = HashMap::new(); @@ -183,14 +183,14 @@ pub async fn batch_get_ring_member_info( ) .map_err(ConsensusError::Transaction)?; - let decoy_info = if hf != &HardFork::V1 { + let decoy_info = if hf == HardFork::V1 { + None + } else { // this data is only needed after hard-fork 1. Some( DecoyInfo::new(&tx_v_data.tx.prefix().inputs, numb_outputs, hf) .map_err(ConsensusError::Transaction)?, ) - } else { - None }; new_ring_member_info(ring_members_for_tx, decoy_info, tx_v_data.version) @@ -224,7 +224,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( .flat_map(|tx_info| { tx_info.tx.prefix().inputs.iter().map(|input| match input { Input::ToKey { amount, .. } => amount.unwrap_or(0), - _ => 0, + Input::Gen(_) => 0, }) }) .collect::>(); @@ -249,7 +249,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( DecoyInfo::new( &tx_v_data.tx.prefix().inputs, |amt| outputs_with_amount.get(&amt).copied().unwrap_or(0), - &hf, + hf, ) .map_err(ConsensusError::Transaction) })) diff --git a/consensus/src/transactions/free.rs b/consensus/src/transactions/free.rs index 67b675a..3613f29 100644 --- a/consensus/src/transactions/free.rs +++ b/consensus/src/transactions/free.rs @@ -39,7 +39,7 @@ pub fn new_tx_verification_data( /// Calculates the weight of a [`Transaction`]. /// /// This is more efficient that [`Transaction::weight`] if you already have the transaction blob. -pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { +pub(crate) fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { // the tx weight is only different from the blobs length for bp(+) txs. match &tx { @@ -64,7 +64,7 @@ pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { } /// Calculates the fee of the [`Transaction`]. -pub fn tx_fee(tx: &Transaction) -> Result { +pub(crate) fn tx_fee(tx: &Transaction) -> Result { let mut fee = 0_u64; match &tx { diff --git a/consensus/tests/verify_correct_txs.rs b/consensus/tests/verify_correct_txs.rs index 7afb370..4d6c179 100644 --- a/consensus/tests/verify_correct_txs.rs +++ b/consensus/tests/verify_correct_txs.rs @@ -1,3 +1,6 @@ +#![expect(unused_crate_dependencies, reason = "external test module")] +#![expect(clippy::allow_attributes, reason = "usage inside macro")] + use std::{ collections::{BTreeMap, HashMap}, future::ready, @@ -29,7 +32,7 @@ fn dummy_database(outputs: BTreeMap) -> impl Database + Clon BlockchainResponse::NumberOutputsWithAmount(HashMap::new()) } BlockchainReadRequest::Outputs(outs) => { - let idxs = outs.get(&0).unwrap(); + let idxs = &outs[&0]; let mut ret = HashMap::new(); diff --git a/types/src/block_complete_entry.rs b/types/src/block_complete_entry.rs index ba5fc2b..77ed82d 100644 --- a/types/src/block_complete_entry.rs +++ b/types/src/block_complete_entry.rs @@ -1,7 +1,6 @@ //! Contains [`BlockCompleteEntry`] and the related types. //---------------------------------------------------------------------------------------------------- Import -#[cfg(feature = "epee")] use bytes::Bytes; #[cfg(feature = "serde")]