diff --git a/Cargo.lock b/Cargo.lock index 1f998101..c35deecb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,17 +50,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" -[[package]] -name = "async-lock" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" -dependencies = [ - "event-listener", - "event-listener-strategy", - "pin-project-lite", -] - [[package]] name = "async-stream" version = "0.3.5" @@ -176,28 +165,12 @@ dependencies = [ "rustc-demangle", ] -[[package]] -name = "base58-monero" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978e81a45367d2409ecd33369a45dda2e9a3ca516153ec194de1fbda4b9fb79d" -dependencies = [ - "thiserror", - "tiny-keccak", -] - [[package]] name = "base64" version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" -[[package]] -name = "base64ct" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" - [[package]] name = "bincode" version = "1.3.3" @@ -401,15 +374,6 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" -[[package]] -name = "concurrent-queue" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" -dependencies = [ - "crossbeam-utils", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -500,12 +464,6 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - [[package]] name = "crypto-bigint" version = "0.5.5" @@ -513,6 +471,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "subtle", + "zeroize", ] [[package]] @@ -589,12 +548,10 @@ dependencies = [ "cuprate-test-utils", "cuprate-types", "curve25519-dalek", - "dalek-ff-group", "futures", "hex", "hex-literal", "monero-serai", - "multiexp", "proptest", "proptest-derive", "rand", @@ -617,11 +574,9 @@ dependencies = [ "cuprate-cryptonight", "cuprate-helper", "curve25519-dalek", - "dalek-ff-group", "hex", "hex-literal", "monero-serai", - "multiexp", "proptest", "proptest-derive", "rand", @@ -879,7 +834,9 @@ dependencies = [ "futures", "hex", "hex-literal", + "monero-rpc", "monero-serai", + "monero-simple-request-rpc", "paste", "pretty_assertions", "serde", @@ -897,6 +854,7 @@ version = "0.0.0" name = "cuprate-types" version = "0.0.0" dependencies = [ + "borsh", "bytes", "cuprate-epee-encoding", "cuprate-fixed-bytes", @@ -951,7 +909,7 @@ dependencies = [ [[package]] name = "dalek-ff-group" version = "0.4.1" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "crypto-bigint", "curve25519-dalek", @@ -1070,27 +1028,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "event-listener" -version = "5.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" -dependencies = [ - "event-listener", - "pin-project-lite", -] - [[package]] name = "fastrand" version = "2.1.0" @@ -1127,7 +1064,7 @@ dependencies = [ [[package]] name = "flexible-transcript" version = "0.3.2" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "blake2", "digest", @@ -1285,7 +1222,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.2.6", + "indexmap", "slab", "tokio", "tokio-util", @@ -1363,15 +1300,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest", -] - [[package]] name = "http" version = "1.1.0" @@ -1791,63 +1719,163 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "monero-address" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "monero-io", + "monero-primitives", + "std-shims", + "thiserror", + "zeroize", +] + +[[package]] +name = "monero-borromean" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "monero-primitives", + "std-shims", + "zeroize", +] + +[[package]] +name = "monero-bulletproofs" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "monero-primitives", + "rand_core", + "std-shims", + "thiserror", + "zeroize", +] + +[[package]] +name = "monero-clsag" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "dalek-ff-group", + "flexible-transcript", + "group", + "monero-generators", + "monero-io", + "monero-primitives", + "rand_chacha", + "rand_core", + "std-shims", + "subtle", + "thiserror", + "zeroize", +] + [[package]] name = "monero-generators" version = "0.4.0" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "curve25519-dalek", "dalek-ff-group", "group", + "monero-io", "sha3", "std-shims", "subtle", ] +[[package]] +name = "monero-io" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "std-shims", +] + +[[package]] +name = "monero-mlsag" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "monero-primitives", + "std-shims", + "thiserror", + "zeroize", +] + +[[package]] +name = "monero-primitives" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "curve25519-dalek", + "monero-generators", + "monero-io", + "sha3", + "std-shims", + "zeroize", +] + +[[package]] +name = "monero-rpc" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" +dependencies = [ + "async-trait", + "curve25519-dalek", + "hex", + "monero-address", + "monero-serai", + "serde", + "serde_json", + "std-shims", + "thiserror", + "zeroize", +] + [[package]] name = "monero-serai" version = "0.1.4-alpha" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ - "async-lock", - "async-trait", - "base58-monero", "curve25519-dalek", - "dalek-ff-group", - "digest_auth", - "flexible-transcript", - "group", - "hex", "hex-literal", + "monero-borromean", + "monero-bulletproofs", + "monero-clsag", "monero-generators", - "multiexp", - "pbkdf2", - "rand", - "rand_chacha", - "rand_core", - "rand_distr", - "serde", - "serde_json", - "sha3", - "simple-request", + "monero-io", + "monero-mlsag", + "monero-primitives", "std-shims", - "subtle", - "thiserror", - "tokio", "zeroize", ] [[package]] -name = "multiexp" -version = "0.4.0" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +name = "monero-simple-request-rpc" +version = "0.1.0" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ - "ff", - "group", - "rand_core", - "rustversion", - "std-shims", - "zeroize", + "async-trait", + "digest_auth", + "hex", + "monero-rpc", + "simple-request", + "tokio", ] [[package]] @@ -1907,12 +1935,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "parking" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" - [[package]] name = "parking_lot" version = "0.12.3" @@ -1936,35 +1958,12 @@ dependencies = [ "windows-targets 0.52.5", ] -[[package]] -name = "password-hash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" -dependencies = [ - "base64ct", - "rand_core", - "subtle", -] - [[package]] name = "paste" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "pbkdf2" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" -dependencies = [ - "digest", - "hmac", - "password-hash", - "sha2", -] - [[package]] name = "percent-encoding" version = "2.3.1" @@ -2540,7 +2539,7 @@ dependencies = [ [[package]] name = "simple-request" version = "0.1.0" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "http-body-util", "hyper", @@ -2596,7 +2595,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "std-shims" version = "0.1.1" -source = "git+https://github.com/Cuprate/serai.git?rev=d27d934#d27d93480aa8a849d84214ad4c71d83ce6fea0c1" +source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ "hashbrown", "spin", @@ -2722,15 +2721,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - [[package]] name = "tinystr" version = "0.7.6" diff --git a/Cargo.toml b/Cargo.toml index da82d9ea..9f0fa278 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,15 +58,13 @@ chrono = { version = "0.4.31", default-features = false } crypto-bigint = { version = "0.5.5", default-features = false } crossbeam = { version = "0.8.4", default-features = false } curve25519-dalek = { version = "4.1.3", default-features = false } -dalek-ff-group = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false } dashmap = { version = "5.5.3", default-features = false } dirs = { version = "5.0.1", default-features = false } futures = { version = "0.3.29", default-features = false } hex = { version = "0.4.3", default-features = false } hex-literal = { version = "0.4", default-features = false } indexmap = { version = "2.2.5", default-features = false } -monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false } -multiexp = { git = "https://github.com/Cuprate/serai.git", rev = "d27d934", default-features = false } +monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce", default-features = false } paste = { version = "1.0.14", default-features = false } pin-project = { version = "1.1.3", default-features = false } randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false } @@ -86,11 +84,13 @@ tracing-subscriber = { version = "0.3.17", default-features = false } tracing = { version = "0.1.40", default-features = false } ## workspace.dev-dependencies -tempfile = { version = "3" } -pretty_assertions = { version = "1.4.0" } -proptest = { version = "1" } -proptest-derive = { version = "0.4.0" } -tokio-test = { version = "0.4.4" } +monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } +monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } +tempfile = { version = "3" } +pretty_assertions = { version = "1.4.0" } +proptest = { version = "1" } +proptest-derive = { version = "0.4.0" } +tokio-test = { version = "0.4.4" } ## TODO: ## Potential dependencies. diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 521b98ca..bd3994a7 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -19,8 +19,6 @@ futures = { workspace = true, features = ["std", "async-await"] } randomx-rs = { workspace = true } monero-serai = { workspace = true, features = ["std"] } -multiexp = { workspace = true } -dalek-ff-group = { workspace = true } curve25519-dalek = { workspace = true } rayon = { workspace = true } diff --git a/consensus/fast-sync/src/create.rs b/consensus/fast-sync/src/create.rs index 8d4f9a6b..0d6d03f1 100644 --- a/consensus/fast-sync/src/create.rs +++ b/consensus/fast-sync/src/create.rs @@ -13,13 +13,13 @@ use cuprate_types::{ use cuprate_fast_sync::{hash_of_hashes, BlockId, HashOfHashes}; -const BATCH_SIZE: u64 = 512; +const BATCH_SIZE: usize = 512; async fn read_batch( handle: &mut BlockchainReadHandle, - height_from: u64, + height_from: usize, ) -> Result, RuntimeError> { - let mut block_ids = Vec::::with_capacity(BATCH_SIZE as usize); + let mut block_ids = Vec::::with_capacity(BATCH_SIZE); for height in height_from..(height_from + BATCH_SIZE) { let request = BlockchainReadRequest::BlockHash(height, Chain::Main); @@ -53,7 +53,7 @@ fn generate_hex(hashes: &[HashOfHashes]) -> String { #[command(version, about, long_about = None)] struct Args { #[arg(short, long)] - height: u64, + height: usize, } #[tokio::main] @@ -67,7 +67,7 @@ async fn main() { let mut hashes_of_hashes = Vec::new(); - let mut height = 0u64; + let mut height = 0_usize; while height < height_target { match read_batch(&mut read_handle, height).await { diff --git a/consensus/fast-sync/src/fast_sync.rs b/consensus/fast-sync/src/fast_sync.rs index a97040a6..b42ae642 100644 --- a/consensus/fast-sync/src/fast_sync.rs +++ b/consensus/fast-sync/src/fast_sync.rs @@ -244,7 +244,7 @@ where let block_blob = block.serialize(); - let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else { + let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else { return Err(FastSyncError::MinerTx(MinerTxError::InputNotOfTypeGen)); }; if *height != block_chain_ctx.chain_height { @@ -252,7 +252,7 @@ where } let mut verified_txs = Vec::with_capacity(txs.len()); - for tx in &block.txs { + for tx in &block.transactions { let tx = txs .remove(tx) .ok_or(FastSyncError::TxsIncludedWithBlockIncorrect)?; @@ -269,8 +269,8 @@ where let total_fees = verified_txs.iter().map(|tx| tx.fee).sum::(); let total_outputs = block - .miner_tx - .prefix + .miner_transaction + .prefix() .outputs .iter() .map(|output| output.amount.unwrap_or(0)) @@ -278,8 +278,8 @@ where let generated_coins = total_outputs - total_fees; - let weight = - block.miner_tx.weight() + verified_txs.iter().map(|tx| tx.tx_weight).sum::(); + let weight = block.miner_transaction.weight() + + verified_txs.iter().map(|tx| tx.tx_weight).sum::(); Ok(FastSyncResponse::ValidateBlock(VerifiedBlockInformation { block_blob, diff --git a/consensus/rules/Cargo.toml b/consensus/rules/Cargo.toml index fd86a61e..311bcc95 100644 --- a/consensus/rules/Cargo.toml +++ b/consensus/rules/Cargo.toml @@ -15,8 +15,6 @@ cuprate-helper = { path = "../../helper", default-features = false, features = [ cuprate-cryptonight = {path = "../../cryptonight"} monero-serai = { workspace = true, features = ["std"] } -multiexp = { workspace = true, features = ["std", "batch"] } -dalek-ff-group = { workspace = true, features = ["std"] } curve25519-dalek = { workspace = true, features = ["alloc", "zeroize", "precomputed-tables"] } rand = { workspace = true, features = ["std", "std_rng"] } diff --git a/consensus/rules/src/batch_verifier.rs b/consensus/rules/src/batch_verifier.rs index c8d3f104..bce6eb9d 100644 --- a/consensus/rules/src/batch_verifier.rs +++ b/consensus/rules/src/batch_verifier.rs @@ -1,4 +1,4 @@ -use multiexp::BatchVerifier as InternalBatchVerifier; +use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier; /// This trait represents a batch verifier. /// @@ -12,18 +12,12 @@ pub trait BatchVerifier { /// # Panics /// This function may panic if `stmt` contains calls to `rayon`'s parallel iterators, e.g. `par_iter()`. // TODO: remove the panics by adding a generic API upstream. - fn queue_statement( - &mut self, - stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R, - ) -> R; + fn queue_statement(&mut self, stmt: impl FnOnce(&mut InternalBatchVerifier) -> R) -> R; } // impl this for a single threaded batch verifier. -impl BatchVerifier for &'_ mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint> { - fn queue_statement( - &mut self, - stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R, - ) -> R { +impl BatchVerifier for &'_ mut InternalBatchVerifier { + fn queue_statement(&mut self, stmt: impl FnOnce(&mut InternalBatchVerifier) -> R) -> R { stmt(self) } } diff --git a/consensus/rules/src/blocks.rs b/consensus/rules/src/blocks.rs index ecd6a113..c36f68b8 100644 --- a/consensus/rules/src/blocks.rs +++ b/consensus/rules/src/blocks.rs @@ -21,8 +21,8 @@ pub const PENALTY_FREE_ZONE_1: usize = 20000; pub const PENALTY_FREE_ZONE_2: usize = 60000; pub const PENALTY_FREE_ZONE_5: usize = 300000; -pub const RX_SEEDHASH_EPOCH_BLOCKS: u64 = 2048; -pub const RX_SEEDHASH_EPOCH_LAG: u64 = 64; +pub const RX_SEEDHASH_EPOCH_BLOCKS: usize = 2048; +pub const RX_SEEDHASH_EPOCH_LAG: usize = 64; #[derive(Debug, Clone, Copy, PartialEq, Eq, thiserror::Error)] pub enum BlockError { @@ -52,14 +52,14 @@ pub trait RandomX { } /// Returns if this height is a RandomX seed height. -pub fn is_randomx_seed_height(height: u64) -> bool { +pub fn is_randomx_seed_height(height: usize) -> bool { height % RX_SEEDHASH_EPOCH_BLOCKS == 0 } /// Returns the RandomX seed height for this block. /// /// ref: -pub fn randomx_seed_height(height: u64) -> u64 { +pub fn randomx_seed_height(height: usize) -> usize { if height <= RX_SEEDHASH_EPOCH_BLOCKS + RX_SEEDHASH_EPOCH_LAG { 0 } else { @@ -75,7 +75,7 @@ pub fn randomx_seed_height(height: u64) -> u64 { pub fn calculate_pow_hash( randomx_vm: Option<&R>, buf: &[u8], - height: u64, + height: usize, hf: &HardFork, ) -> Result<[u8; 32], BlockError> { if height == 202612 { @@ -89,7 +89,8 @@ pub fn calculate_pow_hash( } else if hf < &HardFork::V10 { cryptonight_hash_v2(buf) } else if hf < &HardFork::V12 { - cryptonight_hash_r(buf, height) + // FIXME: https://github.com/Cuprate/cuprate/issues/167. + cryptonight_hash_r(buf, height as u64) } else { randomx_vm .expect("RandomX VM needed from hf 12") @@ -220,7 +221,7 @@ pub struct ContextToVerifyBlock { /// Contains the median timestamp over the last 60 blocks, if there is less than 60 blocks this should be [`None`] pub median_block_timestamp: Option, /// The current chain height. - pub chain_height: u64, + pub chain_height: usize, /// The current hard-fork. pub current_hf: HardFork, /// ref: @@ -263,11 +264,11 @@ pub fn check_block( check_block_weight(block_weight, block_chain_ctx.median_weight_for_block_reward)?; block_size_sanity_check(block_blob_len, block_chain_ctx.effective_median_weight)?; - check_amount_txs(block.txs.len())?; - check_txs_unique(&block.txs)?; + check_amount_txs(block.transactions.len())?; + check_txs_unique(&block.transactions)?; let generated_coins = check_miner_tx( - &block.miner_tx, + &block.miner_transaction, total_fees, block_chain_ctx.chain_height, block_weight, diff --git a/consensus/rules/src/genesis.rs b/consensus/rules/src/genesis.rs index 73bc9516..b7961191 100644 --- a/consensus/rules/src/genesis.rs +++ b/consensus/rules/src/genesis.rs @@ -29,14 +29,14 @@ fn genesis_miner_tx(network: &Network) -> Transaction { pub fn generate_genesis_block(network: &Network) -> Block { Block { header: BlockHeader { - major_version: 1, - minor_version: 0, + hardfork_version: 1, + hardfork_signal: 0, timestamp: 0, previous: [0; 32], nonce: genesis_nonce(network), }, - miner_tx: genesis_miner_tx(network), - txs: vec![], + miner_transaction: genesis_miner_tx(network), + transactions: vec![], } } diff --git a/consensus/rules/src/hard_forks.rs b/consensus/rules/src/hard_forks.rs index 016a51fd..6b983149 100644 --- a/consensus/rules/src/hard_forks.rs +++ b/consensus/rules/src/hard_forks.rs @@ -40,11 +40,11 @@ pub enum HardForkError { /// Information about a given hard-fork. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct HFInfo { - height: u64, - threshold: u64, + height: usize, + threshold: usize, } impl HFInfo { - pub const fn new(height: u64, threshold: u64) -> HFInfo { + pub const fn new(height: usize, threshold: usize) -> HFInfo { HFInfo { height, threshold } } } @@ -202,8 +202,8 @@ impl HardFork { #[inline] pub fn from_block_header(header: &BlockHeader) -> Result<(HardFork, HardFork), HardForkError> { Ok(( - HardFork::from_version(header.major_version)?, - HardFork::from_vote(header.minor_version), + HardFork::from_version(header.hardfork_version)?, + HardFork::from_vote(header.hardfork_signal), )) } @@ -245,7 +245,7 @@ impl HardFork { /// A struct holding the current voting state of the blockchain. #[derive(Debug, Clone, Eq, PartialEq)] pub struct HFVotes { - votes: [u64; NUMB_OF_HARD_FORKS], + votes: [usize; NUMB_OF_HARD_FORKS], vote_list: VecDeque, window_size: usize, } @@ -318,13 +318,13 @@ impl HFVotes { /// Returns the total votes for a hard-fork. /// /// ref: - pub fn votes_for_hf(&self, hf: &HardFork) -> u64 { + pub fn votes_for_hf(&self, hf: &HardFork) -> usize { self.votes[*hf as usize - 1..].iter().sum() } /// Returns the total amount of votes being tracked - pub fn total_votes(&self) -> u64 { - self.votes.iter().sum() + pub fn total_votes(&self) -> usize { + self.vote_list.len() } /// Checks if a future hard fork should be activated, returning the next hard-fork that should be @@ -334,8 +334,8 @@ impl HFVotes { pub fn current_fork( &self, current_hf: &HardFork, - current_height: u64, - window: u64, + current_height: usize, + window: usize, hfs_info: &HFsInfo, ) -> HardFork { let mut current_hf = *current_hf; @@ -361,6 +361,6 @@ impl HFVotes { /// Returns the votes needed for a hard-fork. /// /// ref: -pub fn votes_needed(threshold: u64, window: u64) -> u64 { +pub fn votes_needed(threshold: usize, window: usize) -> usize { (threshold * window).div_ceil(100) } diff --git a/consensus/rules/src/hard_forks/tests.rs b/consensus/rules/src/hard_forks/tests.rs index 77ed7515..00dd0360 100644 --- a/consensus/rules/src/hard_forks/tests.rs +++ b/consensus/rules/src/hard_forks/tests.rs @@ -4,7 +4,7 @@ use proptest::{arbitrary::any, prop_assert_eq, prop_compose, proptest}; use crate::hard_forks::{HFVotes, HardFork, NUMB_OF_HARD_FORKS}; -const TEST_WINDOW_SIZE: u64 = 25; +const TEST_WINDOW_SIZE: usize = 25; #[test] fn target_block_time() { @@ -35,9 +35,9 @@ prop_compose! { fn arb_full_hf_votes() ( // we can't use HardFork as for some reason it overflows the stack, so we use u8. - votes in any::<[u8; TEST_WINDOW_SIZE as usize]>() + votes in any::<[u8; TEST_WINDOW_SIZE]>() ) -> HFVotes { - let mut vote_count = HFVotes::new(TEST_WINDOW_SIZE as usize); + let mut vote_count = HFVotes::new(TEST_WINDOW_SIZE); for vote in votes { vote_count.add_vote_for_hf(&HardFork::from_vote(vote % 17)); } @@ -48,9 +48,9 @@ prop_compose! { proptest! { #[test] fn hf_vote_counter_total_correct(hf_votes in arb_full_hf_votes()) { - prop_assert_eq!(hf_votes.total_votes(), u64::try_from(hf_votes.vote_list.len()).unwrap()); + prop_assert_eq!(hf_votes.total_votes(), hf_votes.vote_list.len()); - let mut votes = [0_u64; NUMB_OF_HARD_FORKS]; + let mut votes = [0_usize; NUMB_OF_HARD_FORKS]; for vote in hf_votes.vote_list.iter() { // manually go through the list of votes tallying votes[*vote as usize - 1] += 1; diff --git a/consensus/rules/src/miner_tx.rs b/consensus/rules/src/miner_tx.rs index 90f1a7ee..e4927e39 100644 --- a/consensus/rules/src/miner_tx.rs +++ b/consensus/rules/src/miner_tx.rs @@ -1,7 +1,4 @@ -use monero_serai::{ - ringct::RctType, - transaction::{Input, Output, Timelock, Transaction}, -}; +use monero_serai::transaction::{Input, Output, Timelock, Transaction}; use crate::{is_decomposed_amount, transactions::check_output_types, HardFork, TxVersion}; @@ -35,7 +32,7 @@ const MONEY_SUPPLY: u64 = u64::MAX; /// The minimum block reward per minute, "tail-emission" const MINIMUM_REWARD_PER_MIN: u64 = 3 * 10_u64.pow(11); /// The value which `lock_time` should be for a coinbase output. -const MINER_TX_TIME_LOCKED_BLOCKS: u64 = 60; +const MINER_TX_TIME_LOCKED_BLOCKS: usize = 60; /// Calculates the base block reward without taking away the penalty for expanding /// the block. @@ -88,7 +85,7 @@ fn check_miner_tx_version(tx_version: &TxVersion, hf: &HardFork) -> Result<(), M /// Checks the miner transactions inputs. /// /// ref: -fn check_inputs(inputs: &[Input], chain_height: u64) -> Result<(), MinerTxError> { +fn check_inputs(inputs: &[Input], chain_height: usize) -> Result<(), MinerTxError> { if inputs.len() != 1 { return Err(MinerTxError::IncorrectNumbOfInputs); } @@ -108,15 +105,15 @@ fn check_inputs(inputs: &[Input], chain_height: u64) -> Result<(), MinerTxError> /// Checks the miner transaction has a correct time lock. /// /// ref: -fn check_time_lock(time_lock: &Timelock, chain_height: u64) -> Result<(), MinerTxError> { +fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> { match time_lock { - Timelock::Block(till_height) => { + &Timelock::Block(till_height) => { // Lock times above this amount are timestamps not blocks. // This is just for safety though and shouldn't actually be hit. - if till_height > &500_000_000 { + if till_height > 500_000_000 { Err(MinerTxError::InvalidLockTime)?; } - if u64::try_from(*till_height).unwrap() != chain_height + MINER_TX_TIME_LOCKED_BLOCKS { + if till_height != chain_height + MINER_TX_TIME_LOCKED_BLOCKS { Err(MinerTxError::InvalidLockTime) } else { Ok(()) @@ -182,28 +179,33 @@ fn check_total_output_amt( pub fn check_miner_tx( tx: &Transaction, total_fees: u64, - chain_height: u64, + chain_height: usize, block_weight: usize, median_bw: usize, already_generated_coins: u64, hf: &HardFork, ) -> Result { - let tx_version = TxVersion::from_raw(tx.prefix.version).ok_or(MinerTxError::VersionInvalid)?; + let tx_version = TxVersion::from_raw(tx.version()).ok_or(MinerTxError::VersionInvalid)?; check_miner_tx_version(&tx_version, hf)?; // ref: - if hf >= &HardFork::V12 && tx.rct_signatures.rct_type() != RctType::Null { - return Err(MinerTxError::RCTTypeNotNULL); + match tx { + Transaction::V1 { .. } => (), + Transaction::V2 { proofs, .. } => { + if hf >= &HardFork::V12 && proofs.is_some() { + return Err(MinerTxError::RCTTypeNotNULL); + } + } } - check_time_lock(&tx.prefix.timelock, chain_height)?; + check_time_lock(&tx.prefix().additional_timelock, chain_height)?; - check_inputs(&tx.prefix.inputs, chain_height)?; + check_inputs(&tx.prefix().inputs, chain_height)?; - check_output_types(&tx.prefix.outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?; + check_output_types(&tx.prefix().outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?; let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf); - let total_outs = sum_outputs(&tx.prefix.outputs, hf, &tx_version)?; + let total_outs = sum_outputs(&tx.prefix().outputs, hf, &tx_version)?; check_total_output_amt(total_outs, reward, total_fees, hf) } diff --git a/consensus/rules/src/transactions.rs b/consensus/rules/src/transactions.rs index 91697087..5a0676b0 100644 --- a/consensus/rules/src/transactions.rs +++ b/consensus/rules/src/transactions.rs @@ -91,7 +91,7 @@ impl TxVersion { /// /// ref: /// && - pub fn from_raw(version: u64) -> Option { + pub fn from_raw(version: u8) -> Option { Some(match version { 1 => TxVersion::RingSignatures, 2 => TxVersion::RingCT, @@ -205,7 +205,7 @@ fn check_number_of_outputs( outputs: usize, hf: &HardFork, tx_version: &TxVersion, - rct_type: &RctType, + bp_or_bpp: bool, ) -> Result<(), TransactionError> { if tx_version == &TxVersion::RingSignatures { return Ok(()); @@ -215,18 +215,10 @@ fn check_number_of_outputs( return Err(TransactionError::InvalidNumberOfOutputs); } - match rct_type { - RctType::Bulletproofs - | RctType::BulletproofsCompactAmount - | RctType::Clsag - | RctType::BulletproofsPlus => { - if outputs <= MAX_BULLETPROOFS_OUTPUTS { - Ok(()) - } else { - Err(TransactionError::InvalidNumberOfOutputs) - } - } - _ => Ok(()), + if bp_or_bpp && outputs > MAX_BULLETPROOFS_OUTPUTS { + Err(TransactionError::InvalidNumberOfOutputs) + } else { + Ok(()) } } @@ -239,11 +231,11 @@ fn check_outputs_semantics( outputs: &[Output], hf: &HardFork, tx_version: &TxVersion, - rct_type: &RctType, + bp_or_bpp: bool, ) -> Result { check_output_types(outputs, hf)?; check_output_keys(outputs)?; - check_number_of_outputs(outputs.len(), hf, tx_version, rct_type)?; + check_number_of_outputs(outputs.len(), hf, tx_version, bp_or_bpp)?; sum_outputs(outputs, hf, tx_version) } @@ -255,14 +247,14 @@ fn check_outputs_semantics( /// pub fn output_unlocked( time_lock: &Timelock, - current_chain_height: u64, + current_chain_height: usize, current_time_lock_timestamp: u64, hf: &HardFork, ) -> bool { match *time_lock { Timelock::None => true, Timelock::Block(unlock_height) => { - check_block_time_lock(unlock_height.try_into().unwrap(), current_chain_height) + check_block_time_lock(unlock_height, current_chain_height) } Timelock::Time(unlock_time) => { check_timestamp_time_lock(unlock_time, current_time_lock_timestamp, hf) @@ -273,7 +265,7 @@ pub fn output_unlocked( /// Returns if a locked output, which uses a block height, can be spent. /// /// ref: -fn check_block_time_lock(unlock_height: u64, current_chain_height: u64) -> bool { +fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool { // current_chain_height = 1 + top height unlock_height <= current_chain_height } @@ -297,7 +289,7 @@ fn check_timestamp_time_lock( /// fn check_all_time_locks( time_locks: &[Timelock], - current_chain_height: u64, + current_chain_height: usize, current_time_lock_timestamp: u64, hf: &HardFork, ) -> Result<(), TransactionError> { @@ -442,8 +434,8 @@ fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), Transactio /// /// ref: fn check_10_block_lock( - youngest_used_out_height: u64, - current_chain_height: u64, + youngest_used_out_height: usize, + current_chain_height: usize, hf: &HardFork, ) -> Result<(), TransactionError> { if hf >= &HardFork::V12 { @@ -510,7 +502,7 @@ fn check_inputs_semantics(inputs: &[Input], hf: &HardFork) -> Result Result<(), TransactionError> { // This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members. @@ -615,28 +607,41 @@ pub fn check_transaction_semantic( Err(TransactionError::TooBig)?; } - let tx_version = TxVersion::from_raw(tx.prefix.version) - .ok_or(TransactionError::TransactionVersionInvalid)?; + let tx_version = + TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?; - let outputs_sum = check_outputs_semantics( - &tx.prefix.outputs, - hf, - &tx_version, - &tx.rct_signatures.rct_type(), - )?; - let inputs_sum = check_inputs_semantics(&tx.prefix.inputs, hf)?; + let bp_or_bpp = match tx { + Transaction::V2 { + proofs: Some(proofs), + .. + } => match proofs.rct_type() { + RctType::AggregateMlsagBorromean | RctType::MlsagBorromean => false, + RctType::MlsagBulletproofs + | RctType::MlsagBulletproofsCompactAmount + | RctType::ClsagBulletproof + | RctType::ClsagBulletproofPlus => true, + }, + Transaction::V2 { proofs: None, .. } | Transaction::V1 { .. } => false, + }; - let fee = match tx_version { - TxVersion::RingSignatures => { + let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, &tx_version, bp_or_bpp)?; + let inputs_sum = check_inputs_semantics(&tx.prefix().inputs, hf)?; + + let fee = match tx { + Transaction::V1 { .. } => { if outputs_sum >= inputs_sum { Err(TransactionError::OutputsTooHigh)?; } inputs_sum - outputs_sum } - TxVersion::RingCT => { - ring_ct::ring_ct_semantic_checks(tx, tx_hash, verifier, hf)?; + Transaction::V2 { proofs, .. } => { + let proofs = proofs + .as_ref() + .ok_or(TransactionError::TransactionVersionInvalid)?; - tx.rct_signatures.base.fee + ring_ct::ring_ct_semantic_checks(proofs, tx_hash, verifier, hf)?; + + proofs.base.fee } }; @@ -654,15 +659,15 @@ pub fn check_transaction_semantic( pub fn check_transaction_contextual( tx: &Transaction, tx_ring_members_info: &TxRingMembersInfo, - current_chain_height: u64, + current_chain_height: usize, current_time_lock_timestamp: u64, hf: &HardFork, ) -> Result<(), TransactionError> { - let tx_version = TxVersion::from_raw(tx.prefix.version) - .ok_or(TransactionError::TransactionVersionInvalid)?; + let tx_version = + TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?; check_inputs_contextual( - &tx.prefix.inputs, + &tx.prefix().inputs, tx_ring_members_info, current_chain_height, hf, @@ -676,17 +681,22 @@ pub fn check_transaction_contextual( hf, )?; - match tx_version { - TxVersion::RingSignatures => ring_signatures::check_input_signatures( - &tx.prefix.inputs, - &tx.signatures, + match &tx { + Transaction::V1 { prefix, signatures } => ring_signatures::check_input_signatures( + &prefix.inputs, + signatures, &tx_ring_members_info.rings, - &tx.signature_hash(), + // This will only return None on v2 miner txs. + &tx.signature_hash() + .ok_or(TransactionError::TransactionVersionInvalid)?, ), - TxVersion::RingCT => Ok(ring_ct::check_input_signatures( - &tx.signature_hash(), - &tx.prefix.inputs, - &tx.rct_signatures, + Transaction::V2 { prefix, proofs } => Ok(ring_ct::check_input_signatures( + &tx.signature_hash() + .ok_or(TransactionError::TransactionVersionInvalid)?, + &prefix.inputs, + proofs + .as_ref() + .ok_or(TransactionError::TransactionVersionInvalid)?, &tx_ring_members_info.rings, )?), } diff --git a/consensus/rules/src/transactions/contextual_data.rs b/consensus/rules/src/transactions/contextual_data.rs index 6af3ad35..282093dc 100644 --- a/consensus/rules/src/transactions/contextual_data.rs +++ b/consensus/rules/src/transactions/contextual_data.rs @@ -70,7 +70,7 @@ pub struct TxRingMembersInfo { pub rings: Rings, /// Information on the structure of the decoys, must be [`None`] for txs before [`HardFork::V1`] pub decoy_info: Option, - pub youngest_used_out_height: u64, + pub youngest_used_out_height: usize, pub time_locked_outs: Vec, } diff --git a/consensus/rules/src/transactions/ring_ct.rs b/consensus/rules/src/transactions/ring_ct.rs index 38b56ebd..62f71ddf 100644 --- a/consensus/rules/src/transactions/ring_ct.rs +++ b/consensus/rules/src/transactions/ring_ct.rs @@ -1,13 +1,13 @@ use curve25519_dalek::{EdwardsPoint, Scalar}; use hex_literal::hex; use monero_serai::{ + generators::H, ringct::{ clsag::ClsagError, mlsag::{AggregateRingMatrixBuilder, MlsagError, RingMatrix}, - RctPrunable, RctSignatures, RctType, + RctProofs, RctPrunable, RctType, }, - transaction::{Input, Transaction}, - H, + transaction::Input, }; use rand::thread_rng; #[cfg(feature = "rayon")] @@ -48,12 +48,12 @@ fn check_rct_type(ty: &RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), use RctType as T; match ty { - T::MlsagAggregate | T::MlsagIndividual if hf >= F::V4 && hf < F::V9 => Ok(()), - T::Bulletproofs if hf >= F::V8 && hf < F::V11 => Ok(()), - T::BulletproofsCompactAmount if hf >= F::V10 && hf < F::V14 => Ok(()), - T::BulletproofsCompactAmount if GRANDFATHERED_TRANSACTIONS.contains(tx_hash) => Ok(()), - T::Clsag if hf >= F::V13 && hf < F::V16 => Ok(()), - T::BulletproofsPlus if hf >= F::V15 => Ok(()), + T::AggregateMlsagBorromean | T::MlsagBorromean if hf >= F::V4 && hf < F::V9 => Ok(()), + T::MlsagBulletproofs if hf >= F::V8 && hf < F::V11 => Ok(()), + T::MlsagBulletproofsCompactAmount if hf >= F::V10 && hf < F::V14 => Ok(()), + T::MlsagBulletproofsCompactAmount if GRANDFATHERED_TRANSACTIONS.contains(tx_hash) => Ok(()), + T::ClsagBulletproof if hf >= F::V13 && hf < F::V16 => Ok(()), + T::ClsagBulletproofPlus if hf >= F::V15 => Ok(()), _ => Err(RingCTError::TypeNotAllowed), } } @@ -61,20 +61,22 @@ fn check_rct_type(ty: &RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), /// Checks that the pseudo-outs sum to the same point as the output commitments. /// /// -fn simple_type_balances(rct_sig: &RctSignatures) -> Result<(), RingCTError> { - let pseudo_outs = if rct_sig.rct_type() == RctType::MlsagIndividual { +fn simple_type_balances(rct_sig: &RctProofs) -> Result<(), RingCTError> { + let pseudo_outs = if rct_sig.rct_type() == RctType::MlsagBorromean { &rct_sig.base.pseudo_outs } else { match &rct_sig.prunable { RctPrunable::Clsag { pseudo_outs, .. } + | RctPrunable::MlsagBulletproofsCompactAmount { pseudo_outs, .. } | RctPrunable::MlsagBulletproofs { pseudo_outs, .. } => pseudo_outs, - _ => panic!("RingCT type is not simple!"), + RctPrunable::MlsagBorromean { .. } => &rct_sig.base.pseudo_outs, + RctPrunable::AggregateMlsagBorromean { .. } => panic!("RingCT type is not simple!"), } }; let sum_inputs = pseudo_outs.iter().sum::(); - let sum_outputs = rct_sig.base.commitments.iter().sum::() - + Scalar::from(rct_sig.base.fee) * H(); + let sum_outputs = + rct_sig.base.commitments.iter().sum::() + Scalar::from(rct_sig.base.fee) * *H; if sum_inputs == sum_outputs { Ok(()) @@ -89,13 +91,12 @@ fn simple_type_balances(rct_sig: &RctSignatures) -> Result<(), RingCTError> { /// /// fn check_output_range_proofs( - rct_sig: &RctSignatures, + proofs: &RctProofs, mut verifier: impl BatchVerifier, ) -> Result<(), RingCTError> { - let commitments = &rct_sig.base.commitments; + let commitments = &proofs.base.commitments; - match &rct_sig.prunable { - RctPrunable::Null => Err(RingCTError::TypeNotAllowed)?, + match &proofs.prunable { RctPrunable::MlsagBorromean { borromean, .. } | RctPrunable::AggregateMlsagBorromean { borromean, .. } => try_par_iter(borromean) .zip(commitments) @@ -106,10 +107,11 @@ fn check_output_range_proofs( Err(RingCTError::BorromeanRangeInvalid) } }), - RctPrunable::MlsagBulletproofs { bulletproofs, .. } - | RctPrunable::Clsag { bulletproofs, .. } => { + RctPrunable::MlsagBulletproofs { bulletproof, .. } + | RctPrunable::MlsagBulletproofsCompactAmount { bulletproof, .. } + | RctPrunable::Clsag { bulletproof, .. } => { if verifier.queue_statement(|verifier| { - bulletproofs.batch_verify(&mut thread_rng(), verifier, (), commitments) + bulletproof.batch_verify(&mut thread_rng(), verifier, commitments) }) { Ok(()) } else { @@ -120,18 +122,18 @@ fn check_output_range_proofs( } pub(crate) fn ring_ct_semantic_checks( - tx: &Transaction, + proofs: &RctProofs, tx_hash: &[u8; 32], verifier: impl BatchVerifier, hf: &HardFork, ) -> Result<(), RingCTError> { - let rct_type = tx.rct_signatures.rct_type(); + let rct_type = proofs.rct_type(); check_rct_type(&rct_type, *hf, tx_hash)?; - check_output_range_proofs(&tx.rct_signatures, verifier)?; + check_output_range_proofs(proofs, verifier)?; - if rct_type != RctType::MlsagAggregate { - simple_type_balances(&tx.rct_signatures)?; + if rct_type != RctType::AggregateMlsagBorromean { + simple_type_balances(proofs)?; } Ok(()) @@ -144,7 +146,7 @@ pub(crate) fn ring_ct_semantic_checks( pub(crate) fn check_input_signatures( msg: &[u8; 32], inputs: &[Input], - rct_sig: &RctSignatures, + proofs: &RctProofs, rings: &Rings, ) -> Result<(), RingCTError> { let Rings::RingCT(rings) = rings else { @@ -155,15 +157,15 @@ pub(crate) fn check_input_signatures( Err(RingCTError::RingInvalid)?; } - let pseudo_outs = match &rct_sig.prunable { + let pseudo_outs = match &proofs.prunable { RctPrunable::MlsagBulletproofs { pseudo_outs, .. } + | RctPrunable::MlsagBulletproofsCompactAmount { pseudo_outs, .. } | RctPrunable::Clsag { pseudo_outs, .. } => pseudo_outs.as_slice(), - RctPrunable::MlsagBorromean { .. } => rct_sig.base.pseudo_outs.as_slice(), - RctPrunable::AggregateMlsagBorromean { .. } | RctPrunable::Null => &[], + RctPrunable::MlsagBorromean { .. } => proofs.base.pseudo_outs.as_slice(), + RctPrunable::AggregateMlsagBorromean { .. } => &[], }; - match &rct_sig.prunable { - RctPrunable::Null => Err(RingCTError::TypeNotAllowed)?, + match &proofs.prunable { RctPrunable::AggregateMlsagBorromean { mlsag, .. } => { let key_images = inputs .iter() @@ -176,11 +178,14 @@ pub(crate) fn check_input_signatures( .collect::>(); let mut matrix = - AggregateRingMatrixBuilder::new(&rct_sig.base.commitments, rct_sig.base.fee); + AggregateRingMatrixBuilder::new(&proofs.base.commitments, proofs.base.fee); + rings.iter().try_for_each(|ring| matrix.push_ring(ring))?; + Ok(mlsag.verify(msg, &matrix.build()?, &key_images)?) } RctPrunable::MlsagBorromean { mlsags, .. } + | RctPrunable::MlsagBulletproofsCompactAmount { mlsags, .. } | RctPrunable::MlsagBulletproofs { mlsags, .. } => try_par_iter(mlsags) .zip(pseudo_outs) .zip(inputs) @@ -216,18 +221,21 @@ mod tests { #[test] fn grandfathered_bulletproofs2() { - assert!( - check_rct_type(&RctType::BulletproofsCompactAmount, HardFork::V14, &[0; 32]).is_err() - ); + assert!(check_rct_type( + &RctType::MlsagBulletproofsCompactAmount, + HardFork::V14, + &[0; 32] + ) + .is_err()); assert!(check_rct_type( - &RctType::BulletproofsCompactAmount, + &RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &GRANDFATHERED_TRANSACTIONS[0] ) .is_ok()); assert!(check_rct_type( - &RctType::BulletproofsCompactAmount, + &RctType::MlsagBulletproofsCompactAmount, HardFork::V14, &GRANDFATHERED_TRANSACTIONS[1] ) diff --git a/consensus/rules/src/transactions/tests.rs b/consensus/rules/src/transactions/tests.rs index 1d7591b3..0bea08ce 100644 --- a/consensus/rules/src/transactions/tests.rs +++ b/consensus/rules/src/transactions/tests.rs @@ -97,31 +97,6 @@ fn test_torsion_ki() { } } -/// Returns a strategy that resolves to a [`RctType`] that uses -/// BPs(+). -#[allow(unreachable_code)] -#[allow(clippy::diverging_sub_expression)] -fn bulletproof_rct_type() -> BoxedStrategy { - return prop_oneof![ - Just(RctType::Bulletproofs), - Just(RctType::BulletproofsCompactAmount), - Just(RctType::Clsag), - Just(RctType::BulletproofsPlus), - ] - .boxed(); - - // Here to make sure this is updated when needed. - match unreachable!() { - RctType::Null => {} - RctType::MlsagAggregate => {} - RctType::MlsagIndividual => {} - RctType::Bulletproofs => {} - RctType::BulletproofsCompactAmount => {} - RctType::Clsag => {} - RctType::BulletproofsPlus => {} - }; -} - prop_compose! { /// Returns a valid prime-order point. fn random_point()(bytes in any::<[u8; 32]>()) -> EdwardsPoint { @@ -240,13 +215,13 @@ proptest! { } #[test] - fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize, rct_type in bulletproof_rct_type()) { - prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_ok()); + fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize) { + prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_ok()); } #[test] - fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX, rct_type in bulletproof_rct_type()) { - prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, &rct_type).is_err()); + fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX) { + prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_err()); } #[test] @@ -256,7 +231,7 @@ proptest! { } #[test] - fn test_block_unlock_time(height in 1..u64::MAX) { + fn test_block_unlock_time(height in 1..usize::MAX) { prop_assert!(check_block_time_lock(height, height)); prop_assert!(!check_block_time_lock(height, height - 1)); prop_assert!(check_block_time_lock(height, height+1)); diff --git a/consensus/src/batch_verifier.rs b/consensus/src/batch_verifier.rs index 44493a62..69018acc 100644 --- a/consensus/src/batch_verifier.rs +++ b/consensus/src/batch_verifier.rs @@ -1,12 +1,14 @@ use std::{cell::RefCell, ops::DerefMut}; -use multiexp::BatchVerifier as InternalBatchVerifier; +use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier; use rayon::prelude::*; use thread_local::ThreadLocal; +use cuprate_consensus_rules::batch_verifier::BatchVerifier; + /// A multithreaded batch verifier. pub struct MultiThreadedBatchVerifier { - internal: ThreadLocal>>, + internal: ThreadLocal>, } impl MultiThreadedBatchVerifier { @@ -22,19 +24,22 @@ impl MultiThreadedBatchVerifier { .into_iter() .map(RefCell::into_inner) .par_bridge() - .find_any(|batch_verifier| !batch_verifier.verify_vartime()) - .is_none() + .try_for_each(|batch_verifier| { + if batch_verifier.verify() { + Ok(()) + } else { + Err(()) + } + }) + .is_ok() } } -impl cuprate_consensus_rules::batch_verifier::BatchVerifier for &'_ MultiThreadedBatchVerifier { - fn queue_statement( - &mut self, - stmt: impl FnOnce(&mut InternalBatchVerifier<(), dalek_ff_group::EdwardsPoint>) -> R, - ) -> R { +impl BatchVerifier for &'_ MultiThreadedBatchVerifier { + fn queue_statement(&mut self, stmt: impl FnOnce(&mut InternalBatchVerifier) -> R) -> R { let mut verifier = self .internal - .get_or(|| RefCell::new(InternalBatchVerifier::new(32))) + .get_or(|| RefCell::new(InternalBatchVerifier::new())) .borrow_mut(); stmt(verifier.deref_mut()) diff --git a/consensus/src/block.rs b/consensus/src/block.rs index 1b36eb92..f5aac5ed 100644 --- a/consensus/src/block.rs +++ b/consensus/src/block.rs @@ -57,7 +57,7 @@ pub struct PreparedBlockExPow { /// The block's hash. pub block_hash: [u8; 32], /// The height of the block. - pub height: u64, + pub height: usize, /// The weight of the block's miner transaction. pub miner_tx_weight: usize, @@ -74,7 +74,7 @@ impl PreparedBlockExPow { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?; - let Some(Input::Gen(height)) = block.miner_tx.prefix.inputs.first() else { + let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else { Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, )))? @@ -88,7 +88,7 @@ impl PreparedBlockExPow { block_hash: block.hash(), height: *height, - miner_tx_weight: block.miner_tx.weight(), + miner_tx_weight: block.miner_transaction.weight(), block, }) } @@ -128,7 +128,7 @@ impl PreparedBlock { let (hf_version, hf_vote) = HardFork::from_block_header(&block.header).map_err(BlockError::HardForkError)?; - let [Input::Gen(height)] = &block.miner_tx.prefix.inputs[..] else { + let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, )))? @@ -142,12 +142,12 @@ impl PreparedBlock { block_hash: block.hash(), pow_hash: calculate_pow_hash( randomx_vm, - &block.serialize_hashable(), + &block.serialize_pow_hash(), *height, &hf_version, )?, - miner_tx_weight: block.miner_tx.weight(), + miner_tx_weight: block.miner_transaction.weight(), block, }) } @@ -172,12 +172,12 @@ impl PreparedBlock { block_hash: block.block_hash, pow_hash: calculate_pow_hash( randomx_vm, - &block.block.serialize_hashable(), + &block.block.serialize_pow_hash(), block.height, &block.hf_version, )?, - miner_tx_weight: block.block.miner_tx.weight(), + miner_tx_weight: block.block.miner_transaction.weight(), block: block.block, }) } @@ -359,8 +359,8 @@ where // Set up the block and just pass it to [`verify_prepped_main_chain_block`] - // We just use the raw `major_version` here, no need to turn it into a `HardFork`. - let rx_vms = if block.header.major_version < 12 { + // We just use the raw `hardfork_version` here, no need to turn it into a `HardFork`. + let rx_vms = if block.header.hardfork_version < 12 { HashMap::new() } else { let BlockChainContextResponse::RxVms(rx_vms) = context_svc @@ -443,12 +443,12 @@ where check_block_pow(&prepped_block.pow_hash, context.next_difficulty) .map_err(ConsensusError::Block)?; - if prepped_block.block.txs.len() != txs.len() { + if prepped_block.block.transactions.len() != txs.len() { return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); } - if !prepped_block.block.txs.is_empty() { - for (expected_tx_hash, tx) in prepped_block.block.txs.iter().zip(txs.iter()) { + if !prepped_block.block.transactions.is_empty() { + for (expected_tx_hash, tx) in prepped_block.block.transactions.iter().zip(txs.iter()) { if expected_tx_hash != &tx.tx_hash { return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); } diff --git a/consensus/src/block/alt_block.rs b/consensus/src/block/alt_block.rs index cf6f2132..89440834 100644 --- a/consensus/src/block/alt_block.rs +++ b/consensus/src/block/alt_block.rs @@ -63,7 +63,7 @@ where }; // Check if the block's miner input is formed correctly. - let [Input::Gen(height)] = &block.miner_tx.prefix.inputs[..] else { + let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else { Err(ConsensusError::Block(BlockError::MinerTxError( MinerTxError::InputNotOfTypeGen, )))? @@ -79,7 +79,7 @@ where let prepped_block = { let rx_vm = alt_rx_vm( alt_context_cache.chain_height, - block.header.major_version, + block.header.hardfork_version, alt_context_cache.parent_chain, &mut alt_context_cache, &mut context_svc, @@ -188,7 +188,7 @@ where /// /// If the `hf` is less than 12 (the height RX activates), then [`None`] is returned. async fn alt_rx_vm( - block_height: u64, + block_height: usize, hf: u8, parent_chain: Chain, alt_chain_context: &mut AltChainContextCache, diff --git a/consensus/src/block/free.rs b/consensus/src/block/free.rs index 8a61e801..46698e51 100644 --- a/consensus/src/block/free.rs +++ b/consensus/src/block/free.rs @@ -12,14 +12,14 @@ pub(crate) fn pull_ordered_transactions( block: &Block, mut txs: HashMap<[u8; 32], TransactionVerificationData>, ) -> Result, ExtendedConsensusError> { - if block.txs.len() != txs.len() { + if block.transactions.len() != txs.len() { return Err(ExtendedConsensusError::TxsIncludedWithBlockIncorrect); } let mut ordered_txs = Vec::with_capacity(txs.len()); - if !block.txs.is_empty() { - for tx_hash in &block.txs { + if !block.transactions.is_empty() { + for tx_hash in &block.transactions { let tx = txs .remove(tx_hash) .ok_or(ExtendedConsensusError::TxsIncludedWithBlockIncorrect)?; diff --git a/consensus/src/context.rs b/consensus/src/context.rs index fffbe90b..26be75c3 100644 --- a/consensus/src/context.rs +++ b/consensus/src/context.rs @@ -202,7 +202,7 @@ pub struct NewBlockData { /// The blocks hash. pub block_hash: [u8; 32], /// The blocks height. - pub height: u64, + pub height: usize, /// The blocks timestamp. pub timestamp: u64, /// The blocks weight. @@ -246,7 +246,7 @@ pub enum BlockChainContextRequest { /// # Panics /// /// This will panic if the number of blocks will pop the genesis block. - numb_blocks: u64, + numb_blocks: usize, }, /// Clear the alt chain context caches. ClearAltCache, @@ -289,7 +289,7 @@ pub enum BlockChainContextRequest { /// handle getting the randomX VM of an alt chain. AltChainRxVM { /// The height the RandomX VM is needed for. - height: u64, + height: usize, /// The chain to look in for the seed. chain: Chain, /// An internal token to prevent external crates calling this request. @@ -313,7 +313,7 @@ pub enum BlockChainContextResponse { /// Blockchain context response. Context(BlockChainContext), /// A map of seed height to RandomX VMs. - RxVms(HashMap>), + RxVms(HashMap>), /// A list of difficulties. BatchDifficulties(Vec), /// An alt chain context cache. diff --git a/consensus/src/context/alt_chains.rs b/consensus/src/context/alt_chains.rs index f0c391d6..5586226b 100644 --- a/consensus/src/context/alt_chains.rs +++ b/consensus/src/context/alt_chains.rs @@ -32,10 +32,10 @@ pub struct AltChainContextCache { pub difficulty_cache: Option, /// A cached RX VM. - pub cached_rx_vm: Option<(u64, Arc)>, + pub cached_rx_vm: Option<(usize, Arc)>, /// The chain height of the alt chain. - pub chain_height: u64, + pub chain_height: usize, /// The top hash of the alt chain. pub top_hash: [u8; 32], /// The [`ChainID`] of the alt chain. @@ -48,7 +48,7 @@ impl AltChainContextCache { /// Add a new block to the cache. pub fn add_new_block( &mut self, - height: u64, + height: usize, block_hash: [u8; 32], block_weight: usize, long_term_block_weight: usize, diff --git a/consensus/src/context/difficulty.rs b/consensus/src/context/difficulty.rs index 9ec0f1ea..eb67cf59 100644 --- a/consensus/src/context/difficulty.rs +++ b/consensus/src/context/difficulty.rs @@ -48,8 +48,8 @@ impl DifficultyCacheConfig { } /// Returns the total amount of blocks we need to track to calculate difficulty - pub fn total_block_count(&self) -> u64 { - (self.window + self.lag).try_into().unwrap() + pub fn total_block_count(&self) -> usize { + self.window + self.lag } /// The amount of blocks we account for after removing the outliers. @@ -78,7 +78,7 @@ pub struct DifficultyCache { /// The current cumulative difficulty of the chain. pub(crate) cumulative_difficulties: VecDeque, /// The last height we accounted for. - pub(crate) last_accounted_height: u64, + pub(crate) last_accounted_height: usize, /// The config pub(crate) config: DifficultyCacheConfig, } @@ -87,7 +87,7 @@ impl DifficultyCache { /// Initialize the difficulty cache from the specified chain height. #[instrument(name = "init_difficulty_cache", level = "info", skip(database, config))] pub async fn init_from_chain_height( - chain_height: u64, + chain_height: usize, config: DifficultyCacheConfig, database: D, chain: Chain, @@ -104,7 +104,7 @@ impl DifficultyCache { let (timestamps, cumulative_difficulties) = get_blocks_in_pow_info(database.clone(), block_start..chain_height, chain).await?; - debug_assert_eq!(timestamps.len() as u64, chain_height - block_start); + debug_assert_eq!(timestamps.len(), chain_height - block_start); tracing::info!( "Current chain height: {}, accounting for {} blocks timestamps", @@ -132,14 +132,10 @@ impl DifficultyCache { #[instrument(name = "pop_blocks_diff_cache", skip_all, fields(numb_blocks = numb_blocks))] pub async fn pop_blocks_main_chain( &mut self, - numb_blocks: u64, + numb_blocks: usize, database: D, ) -> Result<(), ExtendedConsensusError> { - let Some(retained_blocks) = self - .timestamps - .len() - .checked_sub(usize::try_from(numb_blocks).unwrap()) - else { + let Some(retained_blocks) = self.timestamps.len().checked_sub(numb_blocks) else { // More blocks to pop than we have in the cache, so just restart a new cache. *self = Self::init_from_chain_height( self.last_accounted_height - numb_blocks + 1, @@ -167,7 +163,7 @@ impl DifficultyCache { database, new_start_height // current_chain_height - self.timestamps.len() blocks are already in the cache. - ..(current_chain_height - u64::try_from(self.timestamps.len()).unwrap()), + ..(current_chain_height - self.timestamps.len()), Chain::Main, ) .await?; @@ -187,7 +183,7 @@ impl DifficultyCache { } /// Add a new block to the difficulty cache. - pub fn new_block(&mut self, height: u64, timestamp: u64, cumulative_difficulty: u128) { + pub fn new_block(&mut self, height: usize, timestamp: u64, cumulative_difficulty: u128) { assert_eq!(self.last_accounted_height + 1, height); self.last_accounted_height += 1; @@ -199,7 +195,7 @@ impl DifficultyCache { self.cumulative_difficulties .push_back(cumulative_difficulty); - if u64::try_from(self.timestamps.len()).unwrap() > self.config.total_block_count() { + if self.timestamps.len() > self.config.total_block_count() { self.timestamps.pop_front(); self.cumulative_difficulties.pop_front(); } @@ -244,7 +240,7 @@ impl DifficultyCache { let last_cum_diff = cumulative_difficulties.back().copied().unwrap_or(1); cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap()); - if u64::try_from(timestamps.len()).unwrap() > self.config.total_block_count() { + if timestamps.len() > self.config.total_block_count() { diff_info_popped.push(( timestamps.pop_front().unwrap(), cumulative_difficulties.pop_front().unwrap(), @@ -266,22 +262,21 @@ impl DifficultyCache { /// /// Will return [`None`] if there aren't enough blocks. pub fn median_timestamp(&self, numb_blocks: usize) -> Option { - let mut timestamps = - if self.last_accounted_height + 1 == u64::try_from(numb_blocks).unwrap() { - // if the chain height is equal to `numb_blocks` add the genesis block. - // otherwise if the chain height is less than `numb_blocks` None is returned - // and if it's more it would be excluded from calculations. - let mut timestamps = self.timestamps.clone(); - // all genesis blocks have a timestamp of 0. - // https://cuprate.github.io/monero-book/consensus_rules/genesis_block.html - timestamps.push_front(0); - timestamps.into() - } else { - self.timestamps - .range(self.timestamps.len().checked_sub(numb_blocks)?..) - .copied() - .collect::>() - }; + let mut timestamps = if self.last_accounted_height + 1 == numb_blocks { + // if the chain height is equal to `numb_blocks` add the genesis block. + // otherwise if the chain height is less than `numb_blocks` None is returned + // and if it's more it would be excluded from calculations. + let mut timestamps = self.timestamps.clone(); + // all genesis blocks have a timestamp of 0. + // https://cuprate.github.io/monero-book/consensus_rules/genesis_block.html + timestamps.push_front(0); + timestamps.into() + } else { + self.timestamps + .range(self.timestamps.len().checked_sub(numb_blocks)?..) + .copied() + .collect::>() + }; timestamps.sort_unstable(); debug_assert_eq!(timestamps.len(), numb_blocks); @@ -368,7 +363,7 @@ fn get_window_start_and_end( #[instrument(name = "get_blocks_timestamps", skip(database), level = "info")] async fn get_blocks_in_pow_info( database: D, - block_heights: Range, + block_heights: Range, chain: Chain, ) -> Result<(VecDeque, VecDeque), ExtendedConsensusError> { tracing::info!("Getting blocks timestamps"); diff --git a/consensus/src/context/hardforks.rs b/consensus/src/context/hardforks.rs index 7972a0ee..057e1c34 100644 --- a/consensus/src/context/hardforks.rs +++ b/consensus/src/context/hardforks.rs @@ -14,7 +14,7 @@ use crate::{Database, ExtendedConsensusError}; /// The default amount of hard-fork votes to track to decide on activation of a hard-fork. /// /// ref: -const DEFAULT_WINDOW_SIZE: u64 = 10080; // supermajority window check length - a week +const DEFAULT_WINDOW_SIZE: usize = 10080; // supermajority window check length - a week /// Configuration for hard-forks. /// @@ -23,7 +23,7 @@ pub struct HardForkConfig { /// The network we are on. pub(crate) info: HFsInfo, /// The amount of votes we are taking into account to decide on a fork activation. - pub(crate) window: u64, + pub(crate) window: usize, } impl HardForkConfig { @@ -64,14 +64,14 @@ pub struct HardForkState { pub(crate) votes: HFVotes, /// The last block height accounted for. - pub(crate) last_height: u64, + pub(crate) last_height: usize, } impl HardForkState { /// Initialize the [`HardForkState`] from the specified chain height. #[instrument(name = "init_hardfork_state", skip(config, database), level = "info")] pub async fn init_from_chain_height( - chain_height: u64, + chain_height: usize, config: HardForkConfig, mut database: D, ) -> Result { @@ -79,12 +79,8 @@ impl HardForkState { let block_start = chain_height.saturating_sub(config.window); - let votes = get_votes_in_range( - database.clone(), - block_start..chain_height, - usize::try_from(config.window).unwrap(), - ) - .await?; + let votes = + get_votes_in_range(database.clone(), block_start..chain_height, config.window).await?; if chain_height > config.window { debug_assert_eq!(votes.total_votes(), config.window) @@ -129,7 +125,7 @@ impl HardForkState { /// This _must_ only be used on a main-chain cache. pub async fn pop_blocks_main_chain( &mut self, - numb_blocks: u64, + numb_blocks: usize, database: D, ) -> Result<(), ExtendedConsensusError> { let Some(retained_blocks) = self.votes.total_votes().checked_sub(self.config.window) else { @@ -153,19 +149,18 @@ impl HardForkState { ..current_chain_height .saturating_sub(numb_blocks) .saturating_sub(retained_blocks), - usize::try_from(numb_blocks).unwrap(), + numb_blocks, ) .await?; - self.votes - .reverse_blocks(usize::try_from(numb_blocks).unwrap(), oldest_votes); + self.votes.reverse_blocks(numb_blocks, oldest_votes); self.last_height -= numb_blocks; Ok(()) } /// Add a new block to the cache. - pub fn new_block(&mut self, vote: HardFork, height: u64) { + pub fn new_block(&mut self, vote: HardFork, height: usize) { // We don't _need_ to take in `height` but it's for safety, so we don't silently loose track // of blocks. assert_eq!(self.last_height + 1, height); @@ -209,7 +204,7 @@ impl HardForkState { #[instrument(name = "get_votes", skip(database))] async fn get_votes_in_range( database: D, - block_heights: Range, + block_heights: Range, window_size: usize, ) -> Result { let mut votes = HFVotes::new(window_size); diff --git a/consensus/src/context/rx_vms.rs b/consensus/src/context/rx_vms.rs index 649146f9..01aa9738 100644 --- a/consensus/src/context/rx_vms.rs +++ b/consensus/src/context/rx_vms.rs @@ -74,9 +74,9 @@ impl RandomX for RandomXVM { #[derive(Clone, Debug)] pub struct RandomXVMCache { /// The top [`RX_SEEDS_CACHED`] RX seeds. - pub(crate) seeds: VecDeque<(u64, [u8; 32])>, + pub(crate) seeds: VecDeque<(usize, [u8; 32])>, /// The VMs for `seeds` (if after hf 12, otherwise this will be empty). - pub(crate) vms: HashMap>, + pub(crate) vms: HashMap>, /// A single cached VM that was given to us from a part of Cuprate. pub(crate) cached_vm: Option<([u8; 32], Arc)>, @@ -85,7 +85,7 @@ pub struct RandomXVMCache { impl RandomXVMCache { #[instrument(name = "init_rx_vm_cache", level = "info", skip(database))] pub async fn init_from_chain_height( - chain_height: u64, + chain_height: usize, hf: &HardFork, database: D, ) -> Result { @@ -94,7 +94,8 @@ impl RandomXVMCache { tracing::debug!("last {RX_SEEDS_CACHED} randomX seed heights: {seed_heights:?}",); - let seeds: VecDeque<(u64, [u8; 32])> = seed_heights.into_iter().zip(seed_hashes).collect(); + let seeds: VecDeque<(usize, [u8; 32])> = + seed_heights.into_iter().zip(seed_hashes).collect(); let vms = if hf >= &HardFork::V12 { tracing::debug!("Creating RandomX VMs"); @@ -132,7 +133,7 @@ impl RandomXVMCache { /// of them first. pub async fn get_alt_vm( &mut self, - height: u64, + height: usize, chain: Chain, database: D, ) -> Result, ExtendedConsensusError> { @@ -161,7 +162,7 @@ impl RandomXVMCache { } /// Get the main-chain RandomX VMs. - pub async fn get_vms(&mut self) -> HashMap> { + pub async fn get_vms(&mut self) -> HashMap> { match self.seeds.len().checked_sub(self.vms.len()) { // No difference in the amount of seeds to VMs. Some(0) => (), @@ -213,7 +214,7 @@ impl RandomXVMCache { } /// Removes all the RandomX VMs above the `new_height`. - pub fn pop_blocks_main_chain(&mut self, new_height: u64) { + pub fn pop_blocks_main_chain(&mut self, new_height: usize) { self.seeds.retain(|(height, _)| *height < new_height); self.vms.retain(|height, _| *height < new_height); } @@ -221,7 +222,7 @@ impl RandomXVMCache { /// Add a new block to the VM cache. /// /// hash is the block hash not the blocks PoW hash. - pub fn new_block(&mut self, height: u64, hash: &[u8; 32]) { + pub fn new_block(&mut self, height: usize, hash: &[u8; 32]) { if is_randomx_seed_height(height) { tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",); @@ -242,7 +243,7 @@ impl RandomXVMCache { /// Get the last `amount` of RX seeds, the top height returned here will not necessarily be the RX VM for the top block /// in the chain as VMs include some lag before a seed activates. -pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize) -> Vec { +pub(crate) fn get_last_rx_seed_heights(mut last_height: usize, mut amount: usize) -> Vec { let mut seeds = Vec::with_capacity(amount); if is_randomx_seed_height(last_height) { seeds.push(last_height); @@ -265,7 +266,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: u64, mut amount: usize) /// Gets the block hashes for the heights specified. async fn get_block_hashes( - heights: Vec, + heights: Vec, database: D, ) -> Result, ExtendedConsensusError> { let mut fut = FuturesOrdered::new(); diff --git a/consensus/src/context/task.rs b/consensus/src/context/task.rs index 79ddf4c8..8939446a 100644 --- a/consensus/src/context/task.rs +++ b/consensus/src/context/task.rs @@ -52,7 +52,7 @@ pub struct ContextTask { alt_chain_cache_map: AltChainMap, /// The current chain height. - chain_height: u64, + chain_height: usize, /// The top block hash. top_block_hash: [u8; 32], /// The total amount of coins generated. diff --git a/consensus/src/context/weight.rs b/consensus/src/context/weight.rs index 7cd5454e..4c891392 100644 --- a/consensus/src/context/weight.rs +++ b/consensus/src/context/weight.rs @@ -24,21 +24,21 @@ use cuprate_types::{ use crate::{Database, ExtendedConsensusError, HardFork}; /// The short term block weight window. -const SHORT_TERM_WINDOW: u64 = 100; +const SHORT_TERM_WINDOW: usize = 100; /// The long term block weight window. -const LONG_TERM_WINDOW: u64 = 100000; +const LONG_TERM_WINDOW: usize = 100000; /// Configuration for the block weight cache. /// #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct BlockWeightsCacheConfig { - short_term_window: u64, - long_term_window: u64, + short_term_window: usize, + long_term_window: usize, } impl BlockWeightsCacheConfig { /// Creates a new [`BlockWeightsCacheConfig`] - pub const fn new(short_term_window: u64, long_term_window: u64) -> BlockWeightsCacheConfig { + pub const fn new(short_term_window: usize, long_term_window: usize) -> BlockWeightsCacheConfig { BlockWeightsCacheConfig { short_term_window, long_term_window, @@ -67,7 +67,7 @@ pub struct BlockWeightsCache { long_term_weights: RollingMedian, /// The height of the top block. - pub(crate) tip_height: u64, + pub(crate) tip_height: usize, pub(crate) config: BlockWeightsCacheConfig, } @@ -76,7 +76,7 @@ impl BlockWeightsCache { /// Initialize the [`BlockWeightsCache`] at the the given chain height. #[instrument(name = "init_weight_cache", level = "info", skip(database, config))] pub async fn init_from_chain_height( - chain_height: u64, + chain_height: usize, config: BlockWeightsCacheConfig, database: D, chain: Chain, @@ -101,17 +101,11 @@ impl BlockWeightsCache { Ok(BlockWeightsCache { short_term_block_weights: rayon_spawn_async(move || { - RollingMedian::from_vec( - short_term_block_weights, - usize::try_from(config.short_term_window).unwrap(), - ) + RollingMedian::from_vec(short_term_block_weights, config.short_term_window) }) .await, long_term_weights: rayon_spawn_async(move || { - RollingMedian::from_vec( - long_term_weights, - usize::try_from(config.long_term_window).unwrap(), - ) + RollingMedian::from_vec(long_term_weights, config.long_term_window) }) .await, tip_height: chain_height - 1, @@ -125,10 +119,10 @@ impl BlockWeightsCache { #[instrument(name = "pop_blocks_weight_cache", skip_all, fields(numb_blocks = numb_blocks))] pub async fn pop_blocks_main_chain( &mut self, - numb_blocks: u64, + numb_blocks: usize, database: D, ) -> Result<(), ExtendedConsensusError> { - if self.long_term_weights.window_len() <= usize::try_from(numb_blocks).unwrap() { + if self.long_term_weights.window_len() <= numb_blocks { // More blocks to pop than we have in the cache, so just restart a new cache. *self = Self::init_from_chain_height( self.tip_height - numb_blocks + 1, @@ -150,7 +144,7 @@ impl BlockWeightsCache { let old_long_term_weights = get_long_term_weight_in_range( new_long_term_start_height // current_chain_height - self.long_term_weights.len() blocks are already in the cache. - ..(chain_height - u64::try_from(self.long_term_weights.window_len()).unwrap()), + ..(chain_height - self.long_term_weights.window_len()), database.clone(), Chain::Main, ) @@ -163,11 +157,11 @@ impl BlockWeightsCache { let old_short_term_weights = get_blocks_weight_in_range( new_short_term_start_height // current_chain_height - self.long_term_weights.len() blocks are already in the cache. - ..(chain_height - u64::try_from(self.short_term_block_weights.window_len()).unwrap()), + ..(chain_height - self.short_term_block_weights.window_len()), database, - Chain::Main + Chain::Main, ) - .await?; + .await?; for _ in 0..numb_blocks { self.short_term_block_weights.pop_back(); @@ -186,7 +180,7 @@ impl BlockWeightsCache { /// /// The block_height **MUST** be one more than the last height the cache has /// seen. - pub fn new_block(&mut self, block_height: u64, block_weight: usize, long_term_weight: usize) { + pub fn new_block(&mut self, block_height: usize, block_weight: usize, long_term_weight: usize) { assert_eq!(self.tip_height + 1, block_height); self.tip_height += 1; tracing::debug!( @@ -290,7 +284,7 @@ pub fn calculate_block_long_term_weight( /// Gets the block weights from the blocks with heights in the range provided. #[instrument(name = "get_block_weights", skip(database))] async fn get_blocks_weight_in_range( - range: Range, + range: Range, database: D, chain: Chain, ) -> Result, ExtendedConsensusError> { @@ -314,7 +308,7 @@ async fn get_blocks_weight_in_range( /// Gets the block long term weights from the blocks with heights in the range provided. #[instrument(name = "get_long_term_weights", skip(database), level = "info")] async fn get_long_term_weight_in_range( - range: Range, + range: Range, database: D, chain: Chain, ) -> Result, ExtendedConsensusError> { diff --git a/consensus/src/tests/context.rs b/consensus/src/tests/context.rs index 8c3841ee..bbf7bb0d 100644 --- a/consensus/src/tests/context.rs +++ b/consensus/src/tests/context.rs @@ -29,10 +29,10 @@ const TEST_CONTEXT_CONFIG: ContextConfig = ContextConfig { #[tokio::test] async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> { - const BLOCKCHAIN_HEIGHT: u64 = 6000; + const BLOCKCHAIN_HEIGHT: usize = 6000; let mut runner = TestRunner::default(); - let db = arb_dummy_database(BLOCKCHAIN_HEIGHT.try_into().unwrap()) + let db = arb_dummy_database(BLOCKCHAIN_HEIGHT) .new_tree(&mut runner) .unwrap() .current(); @@ -71,10 +71,10 @@ async fn context_invalidated_on_new_block() -> Result<(), tower::BoxError> { #[tokio::test] async fn context_height_correct() -> Result<(), tower::BoxError> { - const BLOCKCHAIN_HEIGHT: u64 = 6000; + const BLOCKCHAIN_HEIGHT: usize = 6000; let mut runner = TestRunner::default(); - let db = arb_dummy_database(BLOCKCHAIN_HEIGHT.try_into().unwrap()) + let db = arb_dummy_database(BLOCKCHAIN_HEIGHT) .new_tree(&mut runner) .unwrap() .current(); diff --git a/consensus/src/tests/context/difficulty.rs b/consensus/src/tests/context/difficulty.rs index b59f62ef..a79ae9b7 100644 --- a/consensus/src/tests/context/difficulty.rs +++ b/consensus/src/tests/context/difficulty.rs @@ -63,10 +63,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { let cfg = DifficultyCacheConfig::main_net(); let mut db_builder = DummyDatabaseBuilder::default(); - for (cum_dif, timestamp) in DIF_3000000_3002000 - .iter() - .take(cfg.total_block_count() as usize) - { + for (cum_dif, timestamp) in DIF_3000000_3002000.iter().take(cfg.total_block_count()) { db_builder.add_block( DummyBlockExtendedHeader::default().with_difficulty_info(*timestamp, *cum_dif), ) @@ -82,14 +79,14 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> { for (i, diff_info) in DIF_3000000_3002000 .windows(2) - .skip(cfg.total_block_count() as usize - 1) + .skip(cfg.total_block_count() - 1) .enumerate() { let diff = diff_info[1].0 - diff_info[0].0; assert_eq!(diff_cache.next_difficulty(&HardFork::V16), diff); - diff_cache.new_block(3_000_720 + i as u64, diff_info[1].1, diff_info[1].0); + diff_cache.new_block(3_000_720 + i, diff_info[1].1, diff_info[1].0); } Ok(()) @@ -104,7 +101,7 @@ prop_compose! { let (timestamps, mut cumulative_difficulties): (Vec<_>, Vec<_>) = blocks.into_iter().unzip(); cumulative_difficulties.sort_unstable(); DifficultyCache { - last_accounted_height: timestamps.len().try_into().unwrap(), + last_accounted_height: timestamps.len(), config: TEST_DIFFICULTY_CONFIG, timestamps: timestamps.into(), // we generate cumulative_difficulties in range 0..u64::MAX as if the generated values are close to u128::MAX @@ -165,7 +162,7 @@ proptest! { let mut timestamps: VecDeque = timestamps.into(); let diff_cache = DifficultyCache { - last_accounted_height: (TEST_WINDOW -1).try_into().unwrap(), + last_accounted_height: TEST_WINDOW -1, config: TEST_DIFFICULTY_CONFIG, timestamps: timestamps.clone(), // we dont need cumulative_difficulties @@ -234,7 +231,7 @@ proptest! { new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } - new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?; + new_cache.pop_blocks_main_chain(blocks_to_pop, database).await?; prop_assert_eq!(new_cache, old_cache); @@ -258,7 +255,7 @@ proptest! { new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty); } - new_cache.pop_blocks_main_chain(blocks_to_pop as u64, database).await?; + new_cache.pop_blocks_main_chain(blocks_to_pop, database).await?; prop_assert_eq!(new_cache, old_cache); diff --git a/consensus/src/tests/context/hardforks.rs b/consensus/src/tests/context/hardforks.rs index d003b3cc..ffdff598 100644 --- a/consensus/src/tests/context/hardforks.rs +++ b/consensus/src/tests/context/hardforks.rs @@ -10,7 +10,7 @@ use crate::{ }, }; -const TEST_WINDOW_SIZE: u64 = 25; +const TEST_WINDOW_SIZE: usize = 25; const TEST_HFS: [HFInfo; NUMB_OF_HARD_FORKS] = [ HFInfo::new(0, 0), @@ -79,7 +79,7 @@ async fn hf_v15_v16_correct() { for (i, (_, vote)) in HFS_2688888_2689608.into_iter().enumerate() { assert_eq!(state.current_hardfork, HardFork::V15); - state.new_block(vote, (2688888 + i) as u64); + state.new_block(vote, 2688888 + i); } assert_eq!(state.current_hardfork, HardFork::V16); @@ -91,8 +91,8 @@ proptest! { extra_hfs in vec(any::(), 0..100) ) { tokio_test::block_on(async move { - let numb_hfs = hfs.len() as u64; - let numb_pop_blocks = extra_hfs.len() as u64; + let numb_hfs = hfs.len(); + let numb_pop_blocks = extra_hfs.len(); let mut db_builder = DummyDatabaseBuilder::default(); @@ -102,7 +102,7 @@ proptest! { ); } - let db = db_builder.finish(Some(numb_hfs as usize)); + let db = db_builder.finish(Some(numb_hfs )); let mut state = HardForkState::init_from_chain_height( numb_hfs, @@ -114,7 +114,7 @@ proptest! { let state_clone = state.clone(); for (i, hf) in extra_hfs.into_iter().enumerate() { - state.new_block(hf, state.last_height + u64::try_from(i).unwrap() + 1); + state.new_block(hf, state.last_height + i + 1); } state.pop_blocks_main_chain(numb_pop_blocks, db).await?; diff --git a/consensus/src/tests/context/weight.rs b/consensus/src/tests/context/weight.rs index 83c8bb95..6706d97c 100644 --- a/consensus/src/tests/context/weight.rs +++ b/consensus/src/tests/context/weight.rs @@ -123,14 +123,14 @@ async fn weight_cache_calculates_correct_median() -> Result<(), tower::BoxError> .await?; for height in 1..=100 { - weight_cache.new_block(height as u64, height, height); + weight_cache.new_block(height, height, height); assert_eq!(weight_cache.median_short_term_weight(), height / 2); assert_eq!(weight_cache.median_long_term_weight(), height / 2); } for height in 101..=5000 { - weight_cache.new_block(height as u64, height, height); + weight_cache.new_block(height, height, height); assert_eq!(weight_cache.median_long_term_weight(), height / 2); } @@ -162,7 +162,7 @@ async fn calc_bw_ltw_2850000_3050000() { weight_cache.median_long_term_weight(), ); assert_eq!(calc_ltw, *ltw); - weight_cache.new_block((2950000 + i) as u64, *weight, *ltw); + weight_cache.new_block(2950000 + i, *weight, *ltw); } } diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index a6200039..b1383781 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -150,7 +150,7 @@ impl Service for DummyDatabase { async move { Ok(match req { BlockchainReadRequest::BlockExtendedHeader(id) => { - let mut id = usize::try_from(id).unwrap(); + let mut id = id; if let Some(dummy_height) = dummy_height { let block_len = blocks.read().unwrap().len(); @@ -173,8 +173,8 @@ impl Service for DummyDatabase { BlockchainResponse::BlockHash(hash) } BlockchainReadRequest::BlockExtendedHeaderInRange(range, _) => { - let mut end = usize::try_from(range.end).unwrap(); - let mut start = usize::try_from(range.start).unwrap(); + let mut end = range.end; + let mut start = range.start; if let Some(dummy_height) = dummy_height { let block_len = blocks.read().unwrap().len(); @@ -196,10 +196,7 @@ impl Service for DummyDatabase { ) } BlockchainReadRequest::ChainHeight => { - let height: u64 = dummy_height - .unwrap_or(blocks.read().unwrap().len()) - .try_into() - .unwrap(); + let height = dummy_height.unwrap_or(blocks.read().unwrap().len()); let mut top_hash = [0; 32]; top_hash[0..8].copy_from_slice(&height.to_le_bytes()); diff --git a/consensus/src/transactions.rs b/consensus/src/transactions.rs index 78104e95..978407ec 100644 --- a/consensus/src/transactions.rs +++ b/consensus/src/transactions.rs @@ -12,10 +12,7 @@ use std::{ }; use futures::FutureExt; -use monero_serai::{ - ringct::RctType, - transaction::{Input, Timelock, Transaction}, -}; +use monero_serai::transaction::{Input, Timelock, Transaction}; use rayon::prelude::*; use tower::{Service, ServiceExt}; use tracing::instrument; @@ -37,6 +34,7 @@ use crate::{ }; pub mod contextual_data; +mod free; /// A struct representing the type of validation that needs to be completed for this transaction. #[derive(Debug, Copy, Clone, Eq, PartialEq)] @@ -103,22 +101,17 @@ impl TransactionVerificationData { let tx_hash = tx.hash(); let tx_blob = tx.serialize(); - // the tx weight is only different from the blobs length for bp(+) txs. - let tx_weight = match tx.rct_signatures.rct_type() { - RctType::Bulletproofs - | RctType::BulletproofsCompactAmount - | RctType::Clsag - | RctType::BulletproofsPlus => tx.weight(), - _ => tx_blob.len(), - }; + let tx_weight = free::tx_weight(&tx, &tx_blob); + + let fee = free::tx_fee(&tx)?; Ok(TransactionVerificationData { tx_hash, tx_blob, tx_weight, - fee: tx.rct_signatures.base.fee, + fee, cached_verification_state: StdMutex::new(CachedVerificationState::NotVerified), - version: TxVersion::from_raw(tx.prefix.version) + version: TxVersion::from_raw(tx.version()) .ok_or(TransactionError::TransactionVersionInvalid)?, tx, }) @@ -133,7 +126,7 @@ pub enum VerifyTxRequest { // TODO: Can we use references to remove the Vec? wont play nicely with Service though txs: Vec>, /// The current chain height. - current_chain_height: u64, + current_chain_height: usize, /// The top block hash. top_hash: [u8; 32], /// The value for time to use to check time locked outputs. @@ -147,7 +140,7 @@ pub enum VerifyTxRequest { /// The transactions to verify. txs: Vec, /// The current chain height. - current_chain_height: u64, + current_chain_height: usize, /// The top block hash. top_hash: [u8; 32], /// The value for time to use to check time locked outputs. @@ -246,7 +239,7 @@ where async fn prep_and_verify_transactions( database: D, txs: Vec, - current_chain_height: u64, + current_chain_height: usize, top_hash: [u8; 32], time_for_time_lock: u64, hf: HardFork, @@ -281,7 +274,7 @@ where async fn verify_prepped_transactions( mut database: D, txs: &[Arc], - current_chain_height: u64, + current_chain_height: usize, top_hash: [u8; 32], time_for_time_lock: u64, hf: HardFork, @@ -296,7 +289,7 @@ where let mut spent_kis = HashSet::with_capacity(txs.len()); txs.iter().try_for_each(|tx| { - tx.tx.prefix.inputs.iter().try_for_each(|input| { + tx.tx.prefix().inputs.iter().try_for_each(|input| { if let Input::ToKey { key_image, .. } = input { if !spent_kis.insert(key_image.compress().0) { tracing::debug!("Duplicate key image found in batch."); @@ -382,7 +375,7 @@ fn transactions_needing_verification( txs: &[Arc], hashes_in_main_chain: HashSet<[u8; 32]>, current_hf: &HardFork, - current_chain_height: u64, + current_chain_height: usize, time_for_time_lock: u64, ) -> Result< ( @@ -473,7 +466,7 @@ where async fn verify_transactions( txs: Vec<(Arc, VerificationNeeded)>, - current_chain_height: u64, + current_chain_height: usize, top_hash: [u8; 32], current_time_lock_timestamp: u64, hf: HardFork, @@ -501,7 +494,7 @@ where &hf, &batch_verifier, )?; - // make sure monero-serai calculated the same fee. + // make sure we calculated the right fee. assert_eq!(fee, tx.fee); } diff --git a/consensus/src/transactions/contextual_data.rs b/consensus/src/transactions/contextual_data.rs index b17fbe0c..82f99768 100644 --- a/consensus/src/transactions/contextual_data.rs +++ b/consensus/src/transactions/contextual_data.rs @@ -149,7 +149,7 @@ pub async fn batch_get_ring_member_info( let mut output_ids = HashMap::new(); for tx_v_data in txs_verification_data.clone() { - insert_ring_member_ids(&tx_v_data.tx.prefix.inputs, &mut output_ids) + insert_ring_member_ids(&tx_v_data.tx.prefix().inputs, &mut output_ids) .map_err(ConsensusError::Transaction)?; } @@ -179,14 +179,14 @@ pub async fn batch_get_ring_member_info( let ring_members_for_tx = get_ring_members_for_inputs( |amt, idx| outputs.get(&amt)?.get(&idx).copied(), - &tx_v_data.tx.prefix.inputs, + &tx_v_data.tx.prefix().inputs, ) .map_err(ConsensusError::Transaction)?; let decoy_info = if hf != &HardFork::V1 { // this data is only needed after hard-fork 1. Some( - DecoyInfo::new(&tx_v_data.tx.prefix.inputs, numb_outputs, hf) + DecoyInfo::new(&tx_v_data.tx.prefix().inputs, numb_outputs, hf) .map_err(ConsensusError::Transaction)?, ) } else { @@ -222,7 +222,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( let unique_input_amounts = txs_verification_data .iter() .flat_map(|tx_info| { - tx_info.tx.prefix.inputs.iter().map(|input| match input { + tx_info.tx.prefix().inputs.iter().map(|input| match input { Input::ToKey { amount, .. } => amount.unwrap_or(0), _ => 0, }) @@ -247,7 +247,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>( Ok(txs_verification_data.iter().map(move |tx_v_data| { DecoyInfo::new( - &tx_v_data.tx.prefix.inputs, + &tx_v_data.tx.prefix().inputs, |amt| outputs_with_amount.get(&amt).copied().unwrap_or(0), &hf, ) diff --git a/consensus/src/transactions/free.rs b/consensus/src/transactions/free.rs new file mode 100644 index 00000000..5ffd16e8 --- /dev/null +++ b/consensus/src/transactions/free.rs @@ -0,0 +1,64 @@ +use monero_serai::{ + ringct::{bulletproofs::Bulletproof, RctType}, + transaction::{Input, Transaction}, +}; + +use cuprate_consensus_rules::transactions::TransactionError; + +/// Calculates the weight of a [`Transaction`]. +/// +/// This is more efficient that [`Transaction::weight`] if you already have the transaction blob. +pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize { + // the tx weight is only different from the blobs length for bp(+) txs. + + match &tx { + Transaction::V1 { .. } | Transaction::V2 { proofs: None, .. } => tx_blob.len(), + Transaction::V2 { + proofs: Some(proofs), + .. + } => match proofs.rct_type() { + RctType::AggregateMlsagBorromean | RctType::MlsagBorromean => tx_blob.len(), + RctType::MlsagBulletproofs + | RctType::MlsagBulletproofsCompactAmount + | RctType::ClsagBulletproof => { + tx_blob.len() + + Bulletproof::calculate_bp_clawback(false, tx.prefix().outputs.len()).0 + } + RctType::ClsagBulletproofPlus => { + tx_blob.len() + + Bulletproof::calculate_bp_clawback(true, tx.prefix().outputs.len()).0 + } + }, + } +} + +/// Calculates the fee of the [`Transaction`]. +pub fn tx_fee(tx: &Transaction) -> Result { + let mut fee = 0_u64; + + match &tx { + Transaction::V1 { prefix, .. } => { + for input in &prefix.inputs { + if let Input::ToKey { amount, .. } = input { + fee = fee + .checked_add(amount.unwrap_or(0)) + .ok_or(TransactionError::InputsOverflow)?; + } + } + + for output in &prefix.outputs { + fee.checked_sub(output.amount.unwrap_or(0)) + .ok_or(TransactionError::OutputsTooHigh)?; + } + } + Transaction::V2 { proofs, .. } => { + fee = proofs + .as_ref() + .ok_or(TransactionError::TransactionVersionInvalid)? + .base + .fee; + } + }; + + Ok(fee) +} diff --git a/p2p/address-book/src/book.rs b/p2p/address-book/src/book.rs index 4b5a1e76..2f0ce6db 100644 --- a/p2p/address-book/src/book.rs +++ b/p2p/address-book/src/book.rs @@ -260,7 +260,7 @@ impl AddressBook { fn take_random_white_peer( &mut self, - block_needed: Option, + block_needed: Option, ) -> Option> { tracing::debug!("Retrieving random white peer"); self.white_list @@ -269,7 +269,7 @@ impl AddressBook { fn take_random_gray_peer( &mut self, - block_needed: Option, + block_needed: Option, ) -> Option> { tracing::debug!("Retrieving random gray peer"); self.gray_list diff --git a/p2p/address-book/src/peer_list.rs b/p2p/address-book/src/peer_list.rs index e2a15d8a..f0a905ae 100644 --- a/p2p/address-book/src/peer_list.rs +++ b/p2p/address-book/src/peer_list.rs @@ -88,7 +88,7 @@ impl PeerList { pub fn take_random_peer( &mut self, r: &mut R, - block_needed: Option, + block_needed: Option, must_keep_peers: &HashSet, ) -> Option> { // Take a random peer and see if it's in the list of must_keep_peers, if it is try again. diff --git a/p2p/p2p-core/src/services.rs b/p2p/p2p-core/src/services.rs index b01bde0e..6d66cfa1 100644 --- a/p2p/p2p-core/src/services.rs +++ b/p2p/p2p-core/src/services.rs @@ -14,7 +14,7 @@ pub enum PeerSyncRequest { /// claim to have a higher cumulative difficulty. PeersToSyncFrom { current_cumulative_difficulty: u128, - block_needed: Option, + block_needed: Option, }, /// Add/update a peer's core sync data. IncomingCoreSyncData(InternalPeerID, ConnectionHandle, CoreSyncData), @@ -115,18 +115,18 @@ pub enum AddressBookRequest { /// Takes a random white peer from the peer list. If height is specified /// then the peer list should retrieve a peer that should have a full /// block at that height according to it's pruning seed - TakeRandomWhitePeer { height: Option }, + TakeRandomWhitePeer { height: Option }, /// Takes a random gray peer from the peer list. If height is specified /// then the peer list should retrieve a peer that should have a full /// block at that height according to it's pruning seed - TakeRandomGrayPeer { height: Option }, + TakeRandomGrayPeer { height: Option }, /// Takes a random peer from the peer list. If height is specified /// then the peer list should retrieve a peer that should have a full /// block at that height according to it's pruning seed. /// /// The address book will look in the white peer list first, then the gray /// one if no peer is found. - TakeRandomPeer { height: Option }, + TakeRandomPeer { height: Option }, /// Gets the specified number of white peers, or less if we don't have enough. GetWhitePeers(usize), /// Checks if the given peer is banned. diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index 5f530546..d2950161 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -121,7 +121,7 @@ pub enum ChainSvcResponse { /// The response for [`ChainSvcRequest::FindFirstUnknown`]. /// /// Contains the index of the first unknown block and its expected height. - FindFirstUnknown(Option<(usize, u64)>), + FindFirstUnknown(Option<(usize, usize)>), /// The response for [`ChainSvcRequest::CumulativeDifficulty`]. /// /// The current cumulative difficulty of our chain. @@ -207,7 +207,7 @@ struct BlockDownloader { /// The amount of blocks to request in the next batch. amount_of_blocks_to_request: usize, /// The height at which [`Self::amount_of_blocks_to_request`] was updated. - amount_of_blocks_to_request_updated_at: u64, + amount_of_blocks_to_request_updated_at: usize, /// The amount of consecutive empty chain entries we received. /// @@ -225,12 +225,12 @@ struct BlockDownloader { /// The current inflight requests. /// /// This is a map of batch start heights to block IDs and related information of the batch. - inflight_requests: BTreeMap>, + inflight_requests: BTreeMap>, /// A queue of start heights from failed batches that should be retried. /// /// Wrapped in [`Reverse`] so we prioritize early batches. - failed_batches: BinaryHeap>, + failed_batches: BinaryHeap>, block_queue: BlockQueue, @@ -524,7 +524,7 @@ where /// Handles a response to a request to get blocks from a peer. async fn handle_download_batch_res( &mut self, - start_height: u64, + start_height: usize, res: Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError>, chain_tracker: &mut ChainTracker, pending_peers: &mut BTreeMap>>, @@ -692,18 +692,19 @@ where /// The return value from the block download tasks. struct BlockDownloadTaskResponse { /// The start height of the batch. - start_height: u64, + start_height: usize, /// A result containing the batch or an error. result: Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError>, } /// Returns if a peer has all the blocks in a range, according to its [`PruningSeed`]. -fn client_has_block_in_range(pruning_seed: &PruningSeed, start_height: u64, length: usize) -> bool { +fn client_has_block_in_range( + pruning_seed: &PruningSeed, + start_height: usize, + length: usize, +) -> bool { pruning_seed.has_full_block(start_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) - && pruning_seed.has_full_block( - start_height + u64::try_from(length).unwrap(), - CRYPTONOTE_MAX_BLOCK_HEIGHT, - ) + && pruning_seed.has_full_block(start_height + length, CRYPTONOTE_MAX_BLOCK_HEIGHT) } /// Calculates the next amount of blocks to request in a batch. diff --git a/p2p/p2p/src/block_downloader/block_queue.rs b/p2p/p2p/src/block_downloader/block_queue.rs index 708eb3ed..5a92f493 100644 --- a/p2p/p2p/src/block_downloader/block_queue.rs +++ b/p2p/p2p/src/block_downloader/block_queue.rs @@ -15,7 +15,7 @@ use super::{BlockBatch, BlockDownloadError}; #[derive(Debug, Clone)] pub struct ReadyQueueBatch { /// The start height of the batch. - pub start_height: u64, + pub start_height: usize, /// The batch of blocks. pub block_batch: BlockBatch, } @@ -64,7 +64,7 @@ impl BlockQueue { } /// Returns the oldest batch that has not been put in the [`async_buffer`] yet. - pub fn oldest_ready_batch(&self) -> Option { + pub fn oldest_ready_batch(&self) -> Option { self.ready_batches.peek().map(|batch| batch.start_height) } @@ -80,13 +80,13 @@ impl BlockQueue { pub async fn add_incoming_batch( &mut self, new_batch: ReadyQueueBatch, - oldest_in_flight_start_height: Option, + oldest_in_flight_start_height: Option, ) -> Result<(), BlockDownloadError> { self.ready_batches_size += new_batch.block_batch.size; self.ready_batches.push(new_batch); // The height to stop pushing batches into the buffer. - let height_to_stop_at = oldest_in_flight_start_height.unwrap_or(u64::MAX); + let height_to_stop_at = oldest_in_flight_start_height.unwrap_or(usize::MAX); while self .ready_batches @@ -124,14 +124,14 @@ mod tests { use super::*; prop_compose! { - fn ready_batch_strategy()(start_height in 0_u64..500_000_000) -> ReadyQueueBatch { + fn ready_batch_strategy()(start_height in 0_usize..500_000_000) -> ReadyQueueBatch { let (_, peer_handle) = HandleBuilder::new().build(); ReadyQueueBatch { start_height, block_batch: BlockBatch { blocks: vec![], - size: start_height as usize, + size: start_height, peer_handle, }, } diff --git a/p2p/p2p/src/block_downloader/chain_tracker.rs b/p2p/p2p/src/block_downloader/chain_tracker.rs index 786a0deb..aacb1633 100644 --- a/p2p/p2p/src/block_downloader/chain_tracker.rs +++ b/p2p/p2p/src/block_downloader/chain_tracker.rs @@ -26,7 +26,7 @@ pub struct BlocksToRetrieve { /// The hash of the last block before this batch. pub prev_id: [u8; 32], /// The expected height of the first block in [`BlocksToRetrieve::ids`]. - pub start_height: u64, + pub start_height: usize, /// The peer who told us about this batch. pub peer_who_told_us: InternalPeerID, /// The peer who told us about this batch's handle. @@ -54,7 +54,7 @@ pub struct ChainTracker { /// A list of [`ChainEntry`]s, in order. entries: VecDeque>, /// The height of the first block, in the first entry in [`Self::entries`]. - first_height: u64, + first_height: usize, /// The hash of the last block in the last entry. top_seen_hash: [u8; 32], /// The hash of the block one below [`Self::first_height`]. @@ -67,7 +67,7 @@ impl ChainTracker { /// Creates a new chain tracker. pub fn new( new_entry: ChainEntry, - first_height: u64, + first_height: usize, our_genesis: [u8; 32], previous_hash: [u8; 32], ) -> Self { @@ -96,14 +96,14 @@ impl ChainTracker { } /// Returns the height of the highest block we are tracking. - pub fn top_height(&self) -> u64 { + pub fn top_height(&self) -> usize { let top_block_idx = self .entries .iter() .map(|entry| entry.ids.len()) .sum::(); - self.first_height + u64::try_from(top_block_idx).unwrap() + self.first_height + top_block_idx } /// Returns the total number of queued batches for a certain `batch_size`. @@ -171,15 +171,12 @@ impl ChainTracker { // - index of the next pruned block for this seed let end_idx = min( min(entry.ids.len(), max_blocks), - usize::try_from( pruning_seed .get_next_pruned_block(self.first_height, CRYPTONOTE_MAX_BLOCK_HEIGHT) .expect("We use local values to calculate height which should be below the sanity limit") // Use a big value as a fallback if the seed does no pruning. .unwrap_or(CRYPTONOTE_MAX_BLOCK_HEIGHT) - self.first_height, - ) - .unwrap(), ); if end_idx == 0 { @@ -198,7 +195,7 @@ impl ChainTracker { failures: 0, }; - self.first_height += u64::try_from(end_idx).unwrap(); + self.first_height += end_idx; // TODO: improve ByteArrayVec API. self.previous_hash = blocks.ids[blocks.ids.len() - 1]; diff --git a/p2p/p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs index fbf33b15..ea57eade 100644 --- a/p2p/p2p/src/block_downloader/download_batch.rs +++ b/p2p/p2p/src/block_downloader/download_batch.rs @@ -34,7 +34,7 @@ pub async fn download_batch_task( client: ClientPoolDropGuard, ids: ByteArrayVec<32>, previous_id: [u8; 32], - expected_start_height: u64, + expected_start_height: usize, _attempt: usize, ) -> BlockDownloadTaskResponse { BlockDownloadTaskResponse { @@ -51,7 +51,7 @@ async fn request_batch_from_peer( mut client: ClientPoolDropGuard, ids: ByteArrayVec<32>, previous_id: [u8; 32], - expected_start_height: u64, + expected_start_height: usize, ) -> Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError> { let request = PeerRequest::Protocol(ProtocolRequest::GetObjects(GetObjectsRequest { blocks: ids.clone(), @@ -105,7 +105,7 @@ async fn request_batch_from_peer( fn deserialize_batch( blocks_response: GetObjectsResponse, - expected_start_height: u64, + expected_start_height: usize, requested_ids: ByteArrayVec<32>, previous_id: [u8; 32], peer_handle: ConnectionHandle, @@ -115,7 +115,7 @@ fn deserialize_batch( .into_par_iter() .enumerate() .map(|(i, block_entry)| { - let expected_height = u64::try_from(i).unwrap() + expected_start_height; + let expected_height = i + expected_start_height; let mut size = block_entry.block.len(); @@ -125,7 +125,7 @@ fn deserialize_batch( let block_hash = block.hash(); // Check the block matches the one requested and the peer sent enough transactions. - if requested_ids[i] != block_hash || block.txs.len() != block_entry.txs.len() { + if requested_ids[i] != block_hash || block.transactions.len() != block_entry.txs.len() { return Err(BlockDownloadError::PeersResponseWasInvalid); } @@ -177,7 +177,7 @@ fn deserialize_batch( .collect::, _>>()?; // Make sure the transactions in the block were the ones the peer sent. - let mut expected_txs = block.txs.iter().collect::>(); + let mut expected_txs = block.transactions.iter().collect::>(); for tx in &txs { if !expected_txs.remove(&tx.hash()) { diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index f6ddbfc6..86a9a468 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -11,7 +11,6 @@ use futures::{FutureExt, StreamExt}; use indexmap::IndexMap; use monero_serai::{ block::{Block, BlockHeader}, - ringct::{RctBase, RctPrunable, RctSignatures}, transaction::{Input, Timelock, Transaction, TransactionPrefix}, }; use proptest::{collection::vec, prelude::*}; @@ -90,30 +89,20 @@ proptest! { prop_compose! { /// Returns a strategy to generate a [`Transaction`] that is valid for the block downloader. - fn dummy_transaction_stragtegy(height: u64) + fn dummy_transaction_stragtegy(height: usize) ( extra in vec(any::(), 0..1_000), timelock in 1_usize..50_000_000, ) -> Transaction { - Transaction { + Transaction::V1 { prefix: TransactionPrefix { - version: 1, - timelock: Timelock::Block(timelock), + additional_timelock: Timelock::Block(timelock), inputs: vec![Input::Gen(height)], outputs: vec![], extra, }, signatures: vec![], - rct_signatures: RctSignatures { - base: RctBase { - fee: 0, - pseudo_outs: vec![], - encrypted_amounts: vec![], - commitments: vec![], - }, - prunable: RctPrunable::Null - }, } } } @@ -121,25 +110,25 @@ prop_compose! { prop_compose! { /// Returns a strategy to generate a [`Block`] that is valid for the block downloader. fn dummy_block_stragtegy( - height: u64, + height: usize, previous: [u8; 32], ) ( - miner_tx in dummy_transaction_stragtegy(height), + miner_transaction in dummy_transaction_stragtegy(height), txs in vec(dummy_transaction_stragtegy(height), 0..25) ) -> (Block, Vec) { ( Block { header: BlockHeader { - major_version: 0, - minor_version: 0, + hardfork_version: 0, + hardfork_signal: 0, timestamp: 0, previous, nonce: 0, }, - miner_tx, - txs: txs.iter().map(Transaction::hash).collect(), + miner_transaction, + transactions: txs.iter().map(Transaction::hash).collect(), }, txs ) @@ -167,7 +156,7 @@ prop_compose! { for (height, mut block) in blocks.into_iter().enumerate() { if let Some(last) = blockchain.last() { block.0.header.previous = *last.0; - block.0.miner_tx.prefix.inputs = vec![Input::Gen(height as u64)] + block.0.miner_transaction.prefix_mut().inputs = vec![Input::Gen(height)] } blockchain.insert(block.0.hash(), block); diff --git a/p2p/p2p/src/connection_maintainer.rs b/p2p/p2p/src/connection_maintainer.rs index 2bcf2707..3dfd5e8d 100644 --- a/p2p/p2p/src/connection_maintainer.rs +++ b/p2p/p2p/src/connection_maintainer.rs @@ -38,7 +38,7 @@ enum OutboundConnectorError { /// set needs specific data that none of the currently connected peers have. pub struct MakeConnectionRequest { /// The block needed that no connected peers have due to pruning. - block_needed: Option, + block_needed: Option, } /// The outbound connection count keeper. diff --git a/p2p/p2p/src/sync_states.rs b/p2p/p2p/src/sync_states.rs index 1484941f..70ef6ca7 100644 --- a/p2p/p2p/src/sync_states.rs +++ b/p2p/p2p/src/sync_states.rs @@ -99,7 +99,7 @@ impl PeerSyncSvc { fn peers_to_sync_from( &self, current_cum_diff: u128, - block_needed: Option, + block_needed: Option, ) -> Vec> { self.cumulative_difficulties .range((current_cum_diff + 1)..) diff --git a/pruning/src/lib.rs b/pruning/src/lib.rs index 96c3609f..fdd159c6 100644 --- a/pruning/src/lib.rs +++ b/pruning/src/lib.rs @@ -22,13 +22,13 @@ use std::cmp::Ordering; use thiserror::Error; -pub const CRYPTONOTE_MAX_BLOCK_HEIGHT: u64 = 500000000; +pub const CRYPTONOTE_MAX_BLOCK_HEIGHT: usize = 500000000; /// The default log stripes for Monero pruning. pub const CRYPTONOTE_PRUNING_LOG_STRIPES: u32 = 3; /// The amount of blocks that peers keep before another stripe starts storing blocks. -pub const CRYPTONOTE_PRUNING_STRIPE_SIZE: u64 = 4096; +pub const CRYPTONOTE_PRUNING_STRIPE_SIZE: usize = 4096; /// The amount of blocks from the top of the chain that should not be pruned. -pub const CRYPTONOTE_PRUNING_TIP_BLOCKS: u64 = 5500; +pub const CRYPTONOTE_PRUNING_TIP_BLOCKS: usize = 5500; const PRUNING_SEED_LOG_STRIPES_SHIFT: u32 = 7; const PRUNING_SEED_STRIPE_SHIFT: u32 = 0; @@ -127,7 +127,7 @@ impl PruningSeed { } /// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block. - pub fn has_full_block(&self, height: u64, blockchain_height: u64) -> bool { + pub fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { match self { PruningSeed::NotPruned => true, PruningSeed::Pruned(seed) => seed.has_full_block(height, blockchain_height), @@ -151,9 +151,9 @@ impl PruningSeed { /// This function will also error if `block_height` > `blockchain_height` pub fn get_next_pruned_block( &self, - block_height: u64, - blockchain_height: u64, - ) -> Result, PruningError> { + block_height: usize, + blockchain_height: usize, + ) -> Result, PruningError> { Ok(match self { PruningSeed::NotPruned => None, PruningSeed::Pruned(seed) => { @@ -177,9 +177,9 @@ impl PruningSeed { /// pub fn get_next_unpruned_block( &self, - block_height: u64, - blockchain_height: u64, - ) -> Result { + block_height: usize, + blockchain_height: usize, + ) -> Result { Ok(match self { PruningSeed::NotPruned => block_height, PruningSeed::Pruned(seed) => { @@ -312,7 +312,7 @@ impl DecompressedPruningSeed { } /// Returns `true` if a peer with this pruning seed should have a non-pruned version of a block. - pub fn has_full_block(&self, height: u64, blockchain_height: u64) -> bool { + pub fn has_full_block(&self, height: usize, blockchain_height: usize) -> bool { match get_block_pruning_stripe(height, blockchain_height, self.log_stripes) { Some(block_stripe) => self.stripe == block_stripe, None => true, @@ -334,9 +334,9 @@ impl DecompressedPruningSeed { /// pub fn get_next_unpruned_block( &self, - block_height: u64, - blockchain_height: u64, - ) -> Result { + block_height: usize, + blockchain_height: usize, + ) -> Result { if block_height > CRYPTONOTE_MAX_BLOCK_HEIGHT || block_height > blockchain_height { return Err(PruningError::BlockHeightTooLarge); } @@ -373,7 +373,7 @@ impl DecompressedPruningSeed { // amt_of_cycles * blocks in a cycle + how many blocks through a cycles until the seed starts storing blocks let calculated_height = cycles_start * (CRYPTONOTE_PRUNING_STRIPE_SIZE << self.log_stripes) - + (self.stripe as u64 - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE; + + (self.stripe as usize - 1) * CRYPTONOTE_PRUNING_STRIPE_SIZE; if calculated_height + CRYPTONOTE_PRUNING_TIP_BLOCKS > blockchain_height { // if our calculated height is greater than the amount of tip blocks then the start of the tip blocks will be the next un-pruned @@ -400,9 +400,9 @@ impl DecompressedPruningSeed { /// pub fn get_next_pruned_block( &self, - block_height: u64, - blockchain_height: u64, - ) -> Result, PruningError> { + block_height: usize, + blockchain_height: usize, + ) -> Result, PruningError> { if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height { // If we are within `CRYPTONOTE_PRUNING_TIP_BLOCKS` of the chain we should // not prune blocks. @@ -434,16 +434,16 @@ impl DecompressedPruningSeed { } fn get_block_pruning_stripe( - block_height: u64, - blockchain_height: u64, + block_height: usize, + blockchain_height: usize, log_stripe: u32, ) -> Option { if block_height + CRYPTONOTE_PRUNING_TIP_BLOCKS >= blockchain_height { None } else { Some( - (((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & ((1 << log_stripe) as u64 - 1)) + 1) - as u32, // it's trivial to prove it's ok to us `as` here + (((block_height / CRYPTONOTE_PRUNING_STRIPE_SIZE) & ((1 << log_stripe) as usize - 1)) + + 1) as u32, // it's trivial to prove it's ok to us `as` here ) } } @@ -503,7 +503,7 @@ mod tests { for i in 0_u32..8 { assert_eq!( get_block_pruning_stripe( - (i * 4096) as u64, + (i * 4096) as usize, blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES ) @@ -515,7 +515,7 @@ mod tests { for i in 0_u32..8 { assert_eq!( get_block_pruning_stripe( - 32768 + (i * 4096) as u64, + 32768 + (i * 4096) as usize, blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES ) @@ -527,7 +527,7 @@ mod tests { for i in 1_u32..8 { assert_eq!( get_block_pruning_stripe( - 32767 + (i * 4096) as u64, + 32767 + (i * 4096) as usize, blockchain_height, CRYPTONOTE_PRUNING_LOG_STRIPES ) @@ -553,23 +553,23 @@ mod tests { for (i, seed) in all_valid_seeds.iter().enumerate() { assert_eq!( seed.get_next_unpruned_block(0, blockchain_height).unwrap(), - i as u64 * 4096 + i * 4096 ) } for (i, seed) in all_valid_seeds.iter().enumerate() { assert_eq!( - seed.get_next_unpruned_block((i as u64 + 1) * 4096, blockchain_height) + seed.get_next_unpruned_block((i + 1) * 4096, blockchain_height) .unwrap(), - i as u64 * 4096 + 32768 + i * 4096 + 32768 ) } for (i, seed) in all_valid_seeds.iter().enumerate() { assert_eq!( - seed.get_next_unpruned_block((i as u64 + 8) * 4096, blockchain_height) + seed.get_next_unpruned_block((i + 8) * 4096, blockchain_height) .unwrap(), - i as u64 * 4096 + 32768 + i * 4096 + 32768 ) } @@ -610,19 +610,19 @@ mod tests { for (i, seed) in all_valid_seeds.iter().enumerate() { assert_eq!( - seed.get_next_pruned_block((i as u64 + 1) * 4096, blockchain_height) + seed.get_next_pruned_block((i + 1) * 4096, blockchain_height) .unwrap() .unwrap(), - (i as u64 + 1) * 4096 + (i + 1) * 4096 ) } for (i, seed) in all_valid_seeds.iter().enumerate() { assert_eq!( - seed.get_next_pruned_block((i as u64 + 8) * 4096, blockchain_height) + seed.get_next_pruned_block((i + 8) * 4096, blockchain_height) .unwrap() .unwrap(), - (i as u64 + 9) * 4096 + (i + 9) * 4096 ) } diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index b0eb0136..4d358f41 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -65,17 +65,17 @@ pub fn add_block( #[cfg(debug_assertions)] { assert_eq!(block.block.serialize(), block.block_blob); - assert_eq!(block.block.txs.len(), block.txs.len()); + assert_eq!(block.block.transactions.len(), block.txs.len()); for (i, tx) in block.txs.iter().enumerate() { assert_eq!(tx.tx_blob, tx.tx.serialize()); - assert_eq!(tx.tx_hash, block.block.txs[i]); + assert_eq!(tx.tx_hash, block.block.transactions[i]); } } //------------------------------------------------------ Transaction / Outputs / Key Images // Add the miner transaction first. { - let tx = &block.block.miner_tx; + let tx = &block.block.miner_transaction; add_tx(tx, &tx.serialize(), &tx.hash(), &chain_height, tables)?; } @@ -154,8 +154,8 @@ pub fn pop_block( let block = Block::read(&mut block_blob.as_slice())?; //------------------------------------------------------ Transaction / Outputs / Key Images - remove_tx(&block.miner_tx.hash(), tables)?; - for tx_hash in &block.txs { + remove_tx(&block.miner_transaction.hash(), tables)?; + for tx_hash in &block.transactions { remove_tx(tx_hash, tables)?; } @@ -200,8 +200,8 @@ pub fn get_block_extended_header_from_height( #[allow(clippy::cast_possible_truncation)] Ok(ExtendedBlockHeader { cumulative_difficulty, - version: block.header.major_version, - vote: block.header.minor_version, + version: block.header.hardfork_version, + vote: block.header.hardfork_signal, timestamp: block.header.timestamp, block_weight: block_info.weight as usize, long_term_weight: block_info.long_term_weight as usize, @@ -297,7 +297,7 @@ mod test { // HACK: `add_block()` asserts blocks with non-sequential heights // cannot be added, to get around this, manually edit the block height. for (height, block) in blocks.iter_mut().enumerate() { - block.height = height as u64; + block.height = height; assert_eq!(block.block.serialize(), block.block_blob); } let generated_coins_sum = blocks @@ -369,8 +369,8 @@ mod test { let b1 = block_header_from_hash; let b2 = block; assert_eq!(b1, block_header_from_height); - assert_eq!(b1.version, b2.block.header.major_version); - assert_eq!(b1.vote, b2.block.header.minor_version); + assert_eq!(b1.version, b2.block.header.hardfork_version); + assert_eq!(b1.vote, b2.block.header.hardfork_signal); assert_eq!(b1.timestamp, b2.block.header.timestamp); assert_eq!(b1.cumulative_difficulty, b2.cumulative_difficulty); assert_eq!(b1.block_weight, b2.weight); @@ -388,7 +388,7 @@ mod test { assert_eq!(tx.tx_blob, tx2.serialize()); assert_eq!(tx.tx_weight, tx2.weight()); - assert_eq!(tx.tx_hash, block.block.txs[i]); + assert_eq!(tx.tx_hash, block.block.transactions[i]); assert_eq!(tx.tx_hash, tx2.hash()); } } @@ -440,7 +440,7 @@ mod test { let mut block = block_v9_tx3().clone(); - block.height = u64::from(u32::MAX) + 1; + block.height = usize::try_from(u32::MAX).unwrap() + 1; add_block(&block, &mut tables).unwrap(); } diff --git a/storage/blockchain/src/ops/blockchain.rs b/storage/blockchain/src/ops/blockchain.rs index e93af3dc..65d9ca26 100644 --- a/storage/blockchain/src/ops/blockchain.rs +++ b/storage/blockchain/src/ops/blockchain.rs @@ -25,7 +25,8 @@ use crate::{ pub fn chain_height( table_block_heights: &impl DatabaseRo, ) -> Result { - table_block_heights.len() + #[allow(clippy::cast_possible_truncation)] // we enforce 64-bit + table_block_heights.len().map(|height| height as usize) } /// Retrieve the height of the top block. @@ -47,7 +48,8 @@ pub fn top_block_height( ) -> Result { match table_block_heights.len()? { 0 => Err(RuntimeError::KeyNotFound), - height => Ok(height - 1), + #[allow(clippy::cast_possible_truncation)] // we enforce 64-bit + height => Ok(height as usize - 1), } } @@ -110,7 +112,7 @@ mod test { block_v9_tx3().clone(), block_v16_tx0().clone(), ]; - let blocks_len = u64::try_from(blocks.len()).unwrap(); + let blocks_len = blocks.len(); // Add blocks. { @@ -127,7 +129,6 @@ mod test { ); for (i, block) in blocks.iter_mut().enumerate() { - let i = u64::try_from(i).unwrap(); // HACK: `add_block()` asserts blocks with non-sequential heights // cannot be added, to get around this, manually edit the block height. block.height = i; diff --git a/storage/blockchain/src/ops/output.rs b/storage/blockchain/src/ops/output.rs index dfc52f20..f3453e46 100644 --- a/storage/blockchain/src/ops/output.rs +++ b/storage/blockchain/src/ops/output.rs @@ -2,7 +2,7 @@ //---------------------------------------------------------------------------------------------------- Import use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, Scalar}; -use monero_serai::{transaction::Timelock, H}; +use monero_serai::{generators::H, transaction::Timelock}; use cuprate_database::{ RuntimeError, {DatabaseRo, DatabaseRw}, @@ -157,7 +157,7 @@ pub fn output_to_output_on_chain( ) -> Result { // FIXME: implement lookup table for common values: // - let commitment = ED25519_BASEPOINT_POINT + H() * Scalar::from(amount); + let commitment = ED25519_BASEPOINT_POINT + *H * Scalar::from(amount); let time_lock = if output .output_flags @@ -173,7 +173,7 @@ pub fn output_to_output_on_chain( .unwrap_or(None); Ok(OutputOnChain { - height: u64::from(output.height), + height: output.height as usize, time_lock, key, commitment, @@ -213,7 +213,7 @@ pub fn rct_output_to_output_on_chain( .unwrap_or(None); Ok(OutputOnChain { - height: u64::from(rct_output.height), + height: rct_output.height as usize, time_lock, key, commitment, diff --git a/storage/blockchain/src/ops/tx.rs b/storage/blockchain/src/ops/tx.rs index f4a2675b..7d608ca0 100644 --- a/storage/blockchain/src/ops/tx.rs +++ b/storage/blockchain/src/ops/tx.rs @@ -68,7 +68,7 @@ pub fn add_tx( // so the `u64/usize` is stored without any tag. // // - match tx.prefix.timelock { + match tx.prefix().additional_timelock { Timelock::None => (), Timelock::Block(height) => tables.tx_unlock_time_mut().put(&tx_id, &(height as u64))?, Timelock::Time(time) => tables.tx_unlock_time_mut().put(&tx_id, &time)?, @@ -92,7 +92,7 @@ pub fn add_tx( let mut miner_tx = false; // Key images. - for inputs in &tx.prefix.inputs { + for inputs in &tx.prefix().inputs { match inputs { // Key images. Input::ToKey { key_image, .. } => { @@ -106,70 +106,64 @@ pub fn add_tx( //------------------------------------------------------ Outputs // Output bit flags. // Set to a non-zero bit value if the unlock time is non-zero. - let output_flags = match tx.prefix.timelock { + let output_flags = match tx.prefix().additional_timelock { Timelock::None => OutputFlags::empty(), Timelock::Block(_) | Timelock::Time(_) => OutputFlags::NON_ZERO_UNLOCK_TIME, }; - let mut amount_indices = Vec::with_capacity(tx.prefix.outputs.len()); - - for (i, output) in tx.prefix.outputs.iter().enumerate() { - let key = *output.key.as_bytes(); - - // Outputs with clear amounts. - let amount_index = if let Some(amount) = output.amount { - // RingCT (v2 transaction) miner outputs. - if miner_tx && tx.prefix.version == 2 { - // Create commitment. - // - // FIXME: implement lookup table for common values: - // - let commitment = (ED25519_BASEPOINT_POINT - + monero_serai::H() * Scalar::from(amount)) - .compress() - .to_bytes(); - - add_rct_output( - &RctOutput { - key, - height, - output_flags, - tx_idx: tx_id, - commitment, - }, - tables.rct_outputs_mut(), - )? - // Pre-RingCT outputs. - } else { - add_output( - amount, + let amount_indices = match &tx { + Transaction::V1 { prefix, .. } => prefix + .outputs + .iter() + .map(|output| { + // Pre-RingCT outputs. + Ok(add_output( + output.amount.unwrap_or(0), &Output { - key, + key: output.key.0, height, output_flags, tx_idx: tx_id, }, tables, )? - .amount_index - } - // RingCT outputs. - } else { - let commitment = tx.rct_signatures.base.commitments[i].compress().to_bytes(); - add_rct_output( - &RctOutput { - key, - height, - output_flags, - tx_idx: tx_id, - commitment, - }, - tables.rct_outputs_mut(), - )? - }; + .amount_index) + }) + .collect::, RuntimeError>>()?, + Transaction::V2 { prefix, proofs } => prefix + .outputs + .iter() + .enumerate() + .map(|(i, output)| { + // Create commitment. + // + // FIXME: implement lookup table for common values: + // + let commitment = if miner_tx { + ED25519_BASEPOINT_POINT + + *monero_serai::generators::H * Scalar::from(output.amount.unwrap_or(0)) + } else { + proofs + .as_ref() + .expect("A V2 transaction with no RCT proofs is a miner tx") + .base + .commitments[i] + }; - amount_indices.push(amount_index); - } // for each output + // Add the RCT output. + add_rct_output( + &RctOutput { + key: output.key.0, + height, + output_flags, + tx_idx: tx_id, + commitment: commitment.compress().0, + }, + tables.rct_outputs_mut(), + ) + }) + .collect::, _>>()?, + }; tables .tx_outputs_mut() @@ -227,7 +221,7 @@ pub fn remove_tx( //------------------------------------------------------ Key Images // Is this a miner transaction? let mut miner_tx = false; - for inputs in &tx.prefix.inputs { + for inputs in &tx.prefix().inputs { match inputs { // Key images. Input::ToKey { key_image, .. } => { @@ -240,11 +234,11 @@ pub fn remove_tx( //------------------------------------------------------ Outputs // Remove each output in the transaction. - for output in &tx.prefix.outputs { + for output in &tx.prefix().outputs { // Outputs with clear amounts. if let Some(amount) = output.amount { // RingCT miner outputs. - if miner_tx && tx.prefix.version == 2 { + if miner_tx && tx.version() == 2 { let amount_index = get_rct_num_outputs(tables.rct_outputs())? - 1; remove_rct_output(&amount_index, tables.rct_outputs_mut())?; // Pre-RingCT outputs. diff --git a/storage/blockchain/src/service/free.rs b/storage/blockchain/src/service/free.rs index 21fb05ba..e748bbbe 100644 --- a/storage/blockchain/src/service/free.rs +++ b/storage/blockchain/src/service/free.rs @@ -48,9 +48,9 @@ pub fn init( /// /// The height offset is the difference between the top block's height and the block height that should be in that position. #[inline] -pub(super) const fn compact_history_index_to_height_offset( - i: u64, -) -> u64 { +pub(super) const fn compact_history_index_to_height_offset( + i: usize, +) -> usize { // If the position is below the initial blocks just return the position back if i <= INITIAL_BLOCKS { i @@ -66,8 +66,8 @@ pub(super) const fn compact_history_index_to_height_offset( - top_block_height: u64, +pub(super) const fn compact_history_genesis_not_included( + top_block_height: usize, ) -> bool { // If the top block height is less than the initial blocks then it will always be included. // Otherwise, we use the fact that to reach the genesis block this statement must be true (for a @@ -91,7 +91,7 @@ mod tests { proptest! { #[test] - fn compact_history(top_height in 0_u64..500_000_000) { + fn compact_history(top_height in 0_usize..500_000_000) { let mut heights = (0..) .map(compact_history_index_to_height_offset::<11>) .map_while(|i| top_height.checked_sub(i)) diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index 993c52db..3331a557 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -87,7 +87,7 @@ //! //! // Prepare a request to write block. //! let mut block = block_v16_tx0().clone(); -//! # block.height = 0_u64; // must be 0th height or panic in `add_block()` +//! # block.height = 0_usize; // must be 0th height or panic in `add_block()` //! let request = BlockchainWriteRequest::WriteBlock(block); //! //! // Send the request. diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index fbd9f894..207da416 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -278,7 +278,7 @@ fn chain_height(env: &ConcreteEnv) -> ResponseResult { /// [`BlockchainReadRequest::GeneratedCoins`]. #[inline] -fn generated_coins(env: &ConcreteEnv, height: u64) -> ResponseResult { +fn generated_coins(env: &ConcreteEnv, height: usize) -> ResponseResult { // Single-threaded, no `ThreadLocal` required. let env_inner = env.env_inner(); let tx_ro = env_inner.tx_ro()?; @@ -429,7 +429,7 @@ fn compact_chain_history(env: &ConcreteEnv) -> ResponseResult { ); /// The amount of top block IDs in the compact chain. - const INITIAL_BLOCKS: u64 = 11; + const INITIAL_BLOCKS: usize = 11; // rayon is not used here because the amount of block IDs is expected to be small. let mut block_ids = (0..) diff --git a/storage/blockchain/src/service/tests.rs b/storage/blockchain/src/service/tests.rs index 72b60e2e..8d817bb0 100644 --- a/storage/blockchain/src/service/tests.rs +++ b/storage/blockchain/src/service/tests.rs @@ -78,7 +78,7 @@ async fn test_template( // cannot be added, to get around this, manually edit the block height. for (i, block_fn) in block_fns.iter().enumerate() { let mut block = block_fn().clone(); - block.height = i as u64; + block.height = i; // Request a block to be written, assert it was written. let request = BlockchainWriteRequest::WriteBlock(block); diff --git a/storage/blockchain/src/types.rs b/storage/blockchain/src/types.rs index a1f28f05..08cde314 100644 --- a/storage/blockchain/src/types.rs +++ b/storage/blockchain/src/types.rs @@ -68,7 +68,7 @@ pub type BlockBlob = StorableVec; pub type BlockHash = [u8; 32]; /// A block's height. -pub type BlockHeight = u64; +pub type BlockHeight = usize; /// A key image. pub type KeyImage = [u8; 32]; diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index dd24fd59..a96a9cfc 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -6,24 +6,26 @@ license = "MIT" authors = ["Boog900", "hinto-janai"] [dependencies] -cuprate-types = { path = "../types" } -cuprate-helper = { path = "../helper", features = ["map"] } -cuprate-wire = { path = "../net/wire" } -cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] } +cuprate-types = { path = "../types" } +cuprate-helper = { path = "../helper", features = ["map"] } +cuprate-wire = { path = "../net/wire" } +cuprate-p2p-core = { path = "../p2p/p2p-core", features = ["borsh"] } -hex = { workspace = true } -hex-literal = { workspace = true } -monero-serai = { workspace = true, features = ["std", "http-rpc"] } -futures = { workspace = true, features = ["std"] } -async-trait = { workspace = true } -tokio = { workspace = true, features = ["full"] } -tokio-util = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -bytes = { workspace = true, features = ["std"] } -tempfile = { workspace = true } -paste = { workspace = true } -borsh = { workspace = true, features = ["derive"]} +hex = { workspace = true } +hex-literal = { workspace = true } +monero-serai = { workspace = true, features = ["std"] } +monero-simple-request-rpc = { workspace = true } +monero-rpc = { workspace = true } +futures = { workspace = true, features = ["std"] } +async-trait = { workspace = true } +tokio = { workspace = true, features = ["full"] } +tokio-util = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +bytes = { workspace = true, features = ["std"] } +tempfile = { workspace = true } +paste = { workspace = true } +borsh = { workspace = true, features = ["derive"]} [dev-dependencies] hex = { workspace = true } diff --git a/test-utils/src/data/constants.rs b/test-utils/src/data/constants.rs index c1da6d01..fff04416 100644 --- a/test-utils/src/data/constants.rs +++ b/test-utils/src/data/constants.rs @@ -34,12 +34,12 @@ macro_rules! const_block_blob { #[doc = ""] #[doc = concat!("let block = Block::read(&mut ", stringify!($name), ").unwrap();")] #[doc = ""] - #[doc = concat!("assert_eq!(block.header.major_version, ", $major_version, ");")] - #[doc = concat!("assert_eq!(block.header.minor_version, ", $minor_version, ");")] + #[doc = concat!("assert_eq!(block.header.hardfork_version, ", $major_version, ");")] + #[doc = concat!("assert_eq!(block.header.hardfork_signal, ", $minor_version, ");")] #[doc = concat!("assert_eq!(block.header.timestamp, ", $timestamp, ");")] #[doc = concat!("assert_eq!(block.header.nonce, ", $nonce, ");")] - #[doc = concat!("assert!(matches!(block.miner_tx.prefix.inputs[0], Input::Gen(", $height, ")));")] - #[doc = concat!("assert_eq!(block.txs.len(), ", $tx_len, ");")] + #[doc = concat!("assert!(matches!(block.miner_transaction.prefix().inputs[0], Input::Gen(", $height, ")));")] + #[doc = concat!("assert_eq!(block.transactions.len(), ", $tx_len, ");")] #[doc = concat!("assert_eq!(hex::encode(block.hash()), \"", $hash, "\")")] /// ``` pub const $name: &[u8] = include_bytes!($data_path); @@ -107,7 +107,6 @@ macro_rules! const_tx_blob { timelock: $timelock:expr, // Transaction's timelock (use the real type `Timelock`) input_len: $input_len:literal, // Amount of inputs output_len: $output_len:literal, // Amount of outputs - signatures_len: $signatures_len:literal, // Amount of signatures ) => { #[doc = concat!("Transaction with hash `", $hash, "`.")] /// @@ -117,11 +116,10 @@ macro_rules! const_tx_blob { #[doc = ""] #[doc = concat!("let tx = Transaction::read(&mut ", stringify!($name), ").unwrap();")] #[doc = ""] - #[doc = concat!("assert_eq!(tx.prefix.version, ", $version, ");")] - #[doc = concat!("assert_eq!(tx.prefix.timelock, ", stringify!($timelock), ");")] - #[doc = concat!("assert_eq!(tx.prefix.inputs.len(), ", $input_len, ");")] - #[doc = concat!("assert_eq!(tx.prefix.outputs.len(), ", $output_len, ");")] - #[doc = concat!("assert_eq!(tx.signatures.len(), ", $signatures_len, ");")] + #[doc = concat!("assert_eq!(tx.version(), ", $version, ");")] + #[doc = concat!("assert_eq!(tx.prefix().additional_timelock, ", stringify!($timelock), ");")] + #[doc = concat!("assert_eq!(tx.prefix().inputs.len(), ", $input_len, ");")] + #[doc = concat!("assert_eq!(tx.prefix().outputs.len(), ", $output_len, ");")] #[doc = concat!("assert_eq!(hex::encode(tx.hash()), \"", $hash, "\")")] /// ``` pub const $name: &[u8] = include_bytes!($data_path); @@ -136,7 +134,6 @@ const_tx_blob! { timelock: Timelock::Block(100_081), input_len: 1, output_len: 5, - signatures_len: 0, } const_tx_blob! { @@ -147,7 +144,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 19, output_len: 61, - signatures_len: 19, } const_tx_blob! { @@ -158,7 +154,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 46, output_len: 46, - signatures_len: 46, } const_tx_blob! { @@ -169,7 +164,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 1, output_len: 2, - signatures_len: 0, } const_tx_blob! { @@ -180,7 +174,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 1, output_len: 2, - signatures_len: 0, } const_tx_blob! { @@ -191,7 +184,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 2, output_len: 2, - signatures_len: 0, } const_tx_blob! { @@ -202,7 +194,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 2, output_len: 5, - signatures_len: 2, } const_tx_blob! { @@ -213,7 +204,6 @@ const_tx_blob! { timelock: Timelock::None, input_len: 2, output_len: 2, - signatures_len: 0, } //---------------------------------------------------------------------------------------------------- Tests diff --git a/test-utils/src/data/free.rs b/test-utils/src/data/free.rs index ee6f49a6..d7f61ae5 100644 --- a/test-utils/src/data/free.rs +++ b/test-utils/src/data/free.rs @@ -8,11 +8,11 @@ //---------------------------------------------------------------------------------------------------- Import use std::sync::OnceLock; -use hex_literal::hex; -use monero_serai::{block::Block, transaction::Transaction}; - use cuprate_helper::map::combine_low_high_bits_to_u128; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; +use hex_literal::hex; +use monero_serai::transaction::Input; +use monero_serai::{block::Block, transaction::Transaction}; use crate::data::constants::{ BLOCK_43BD1F, BLOCK_5ECB7E, BLOCK_F91043, TX_2180A8, TX_3BC7FF, TX_84D48D, TX_9E3F73, @@ -31,7 +31,7 @@ use crate::data::constants::{ struct VerifiedBlockMap { block_blob: &'static [u8], pow_hash: [u8; 32], - height: u64, + height: usize, generated_coins: u64, weight: usize, long_term_weight: usize, @@ -68,11 +68,11 @@ impl VerifiedBlockMap { assert_eq!( txs.len(), - block.txs.len(), + block.transactions.len(), "(deserialized txs).len() != (txs hashes in block).len()" ); - for (tx, tx_hash_in_block) in txs.iter().zip(&block.txs) { + for (tx, tx_hash_in_block) in txs.iter().zip(&block.transactions) { assert_eq!( &tx.tx_hash, tx_hash_in_block, "deserialized tx hash is not the same as the one in the parent block" @@ -103,13 +103,43 @@ fn to_tx_verification_data(tx_blob: impl AsRef<[u8]>) -> VerifiedTransactionInfo let tx = Transaction::read(&mut tx_blob.as_slice()).unwrap(); VerifiedTransactionInformation { tx_weight: tx.weight(), - fee: tx.rct_signatures.base.fee, + fee: tx_fee(&tx), tx_hash: tx.hash(), tx_blob, tx, } } +/// Calculates the fee of the [`Transaction`]. +/// +/// # Panics +/// This will panic if the inputs overflow or the transaction outputs too much. +pub fn tx_fee(tx: &Transaction) -> u64 { + let mut fee = 0_u64; + + match &tx { + Transaction::V1 { prefix, .. } => { + for input in &prefix.inputs { + match input { + Input::Gen(_) => return 0, + Input::ToKey { amount, .. } => { + fee = fee.checked_add(amount.unwrap_or(0)).unwrap(); + } + } + } + + for output in &prefix.outputs { + fee.checked_sub(output.amount.unwrap_or(0)).unwrap(); + } + } + Transaction::V2 { proofs, .. } => { + fee = proofs.as_ref().unwrap().base.fee; + } + }; + + fee +} + //---------------------------------------------------------------------------------------------------- Blocks /// Generate a block accessor function with this signature: /// `fn() -> &'static VerifiedBlockInformation` @@ -255,7 +285,6 @@ macro_rules! transaction_verification_data_fn { #[doc = concat!("assert_eq!(tx.tx_blob, ", stringify!($tx_blob), ");")] #[doc = concat!("assert_eq!(tx.tx_weight, ", $weight, ");")] #[doc = concat!("assert_eq!(tx.tx_hash, hex!(\"", $hash, "\"));")] - #[doc = "assert_eq!(tx.fee, tx.tx.rct_signatures.base.fee);"] /// ``` pub fn $fn_name() -> &'static VerifiedTransactionInformation { static TX: OnceLock = OnceLock::new(); diff --git a/test-utils/src/data/mod.rs b/test-utils/src/data/mod.rs index 49ea89aa..696c6865 100644 --- a/test-utils/src/data/mod.rs +++ b/test-utils/src/data/mod.rs @@ -32,4 +32,6 @@ pub use constants::{ }; mod free; -pub use free::{block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_v1_sig0, tx_v1_sig2, tx_v2_rct3}; +pub use free::{ + block_v16_tx0, block_v1_tx2, block_v9_tx3, tx_fee, tx_v1_sig0, tx_v1_sig2, tx_v2_rct3, +}; diff --git a/test-utils/src/rpc/client.rs b/test-utils/src/rpc/client.rs index 28c49d8e..fbe6fb9e 100644 --- a/test-utils/src/rpc/client.rs +++ b/test-utils/src/rpc/client.rs @@ -5,13 +5,14 @@ use serde::Deserialize; use serde_json::json; use tokio::task::spawn_blocking; -use monero_serai::{ - block::Block, - rpc::{HttpRpc, Rpc}, -}; +use monero_rpc::Rpc; +use monero_serai::block::Block; +use monero_simple_request_rpc::SimpleRequestRpc; use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation}; +use crate::data::tx_fee; + //---------------------------------------------------------------------------------------------------- Constants /// The default URL used for Monero RPC connections. pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081"; @@ -20,7 +21,7 @@ pub const LOCALHOST_RPC_URL: &str = "http://127.0.0.1:18081"; /// An HTTP RPC client for Monero. pub struct HttpRpcClient { address: String, - rpc: Rpc, + rpc: SimpleRequestRpc, } impl HttpRpcClient { @@ -40,7 +41,7 @@ impl HttpRpcClient { let address = address.unwrap_or_else(|| LOCALHOST_RPC_URL.to_string()); Self { - rpc: HttpRpc::new(address.clone()).await.unwrap(), + rpc: SimpleRequestRpc::new(address.clone()).await.unwrap(), address, } } @@ -53,7 +54,7 @@ impl HttpRpcClient { /// Access to the inner RPC client for other usage. #[allow(dead_code)] - const fn rpc(&self) -> &Rpc { + const fn rpc(&self) -> &SimpleRequestRpc { &self.rpc } @@ -62,7 +63,7 @@ impl HttpRpcClient { /// # Panics /// This function will panic at any error point, e.g., /// if the node cannot be connected to, if deserialization fails, etc. - pub async fn get_verified_block_information(&self, height: u64) -> VerifiedBlockInformation { + pub async fn get_verified_block_information(&self, height: usize) -> VerifiedBlockInformation { #[derive(Debug, Deserialize)] struct Result { blob: String, @@ -75,7 +76,7 @@ impl HttpRpcClient { long_term_weight: usize, cumulative_difficulty: u128, hash: String, - height: u64, + height: usize, pow_hash: String, reward: u64, // generated_coins + total_tx_fees } @@ -111,7 +112,7 @@ impl HttpRpcClient { .unwrap(); let txs: Vec = self - .get_transaction_verification_data(&block.txs) + .get_transaction_verification_data(&block.transactions) .await .collect(); @@ -124,8 +125,8 @@ impl HttpRpcClient { let total_tx_fees = txs.iter().map(|tx| tx.fee).sum::(); let generated_coins = block - .miner_tx - .prefix + .miner_transaction + .prefix() .outputs .iter() .map(|output| output.amount.expect("miner_tx amount was None")) @@ -173,7 +174,7 @@ impl HttpRpcClient { tx_blob: tx.serialize(), tx_weight: tx.weight(), tx_hash, - fee: tx.rct_signatures.base.fee, + fee: tx_fee(&tx), tx, } }) @@ -199,7 +200,7 @@ mod tests { #[allow(clippy::too_many_arguments)] async fn assert_eq( rpc: &HttpRpcClient, - height: u64, + height: usize, block_hash: [u8; 32], pow_hash: [u8; 32], generated_coins: u64, diff --git a/types/Cargo.toml b/types/Cargo.toml index 8f16eb48..99fa978b 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -22,5 +22,6 @@ bytes = { workspace = true } curve25519-dalek = { workspace = true } monero-serai = { workspace = true } serde = { workspace = true, features = ["derive"], optional = true } +borsh = { workspace = true, optional = true } [dev-dependencies] \ No newline at end of file diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index f1a8a75e..b502c3fa 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -25,12 +25,12 @@ pub enum BlockchainReadRequest { /// Request a block's extended header. /// /// The input is the block's height. - BlockExtendedHeader(u64), + BlockExtendedHeader(usize), /// Request a block's hash. /// /// The input is the block's height and the chain it is on. - BlockHash(u64, Chain), + BlockHash(usize, Chain), /// Request to check if we have a block and which [`Chain`] it is on. /// @@ -45,7 +45,7 @@ pub enum BlockchainReadRequest { /// Request a range of block extended headers. /// /// The input is a range of block heights. - BlockExtendedHeaderInRange(Range, Chain), + BlockExtendedHeaderInRange(Range, Chain), /// Request the current chain height. /// @@ -53,7 +53,7 @@ pub enum BlockchainReadRequest { ChainHeight, /// Request the total amount of generated coins (atomic units) at this height. - GeneratedCoins(u64), + GeneratedCoins(usize), /// Request data for multiple outputs. /// @@ -137,7 +137,7 @@ pub enum BlockchainResponse { /// Response to [`BlockchainReadRequest::FindBlock`]. /// /// Inner value is the chain and height of the block if found. - FindBlock(Option<(Chain, u64)>), + FindBlock(Option<(Chain, usize)>), /// Response to [`BlockchainReadRequest::FilterUnknownHashes`]. /// @@ -152,7 +152,7 @@ pub enum BlockchainResponse { /// Response to [`BlockchainReadRequest::ChainHeight`]. /// /// Inner value is the chain height, and the top block's hash. - ChainHeight(u64, [u8; 32]), + ChainHeight(usize, [u8; 32]), /// Response to [`BlockchainReadRequest::GeneratedCoins`]. /// @@ -195,7 +195,7 @@ pub enum BlockchainResponse { /// Contains the index of the first unknown block and its expected height. /// /// This will be [`None`] if all blocks were known. - FindFirstUnknown(Option<(usize, u64)>), + FindFirstUnknown(Option<(usize, usize)>), //------------------------------------------------------ Writes /// Response to [`BlockchainWriteRequest::WriteBlock`]. diff --git a/types/src/types.rs b/types/src/types.rs index db315075..a4a7135f 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -17,13 +17,13 @@ pub struct ExtendedBlockHeader { /// /// This can also be represented with `cuprate_consensus::HardFork`. /// - /// This is the same value as [`monero_serai::block::BlockHeader::major_version`]. + /// This is the same value as [`monero_serai::block::BlockHeader::hardfork_version`]. pub version: u8, /// The block's hard-fork vote. /// /// This can also be represented with `cuprate_consensus::HardFork`. /// - /// This is the same value as [`monero_serai::block::BlockHeader::minor_version`]. + /// This is the same value as [`monero_serai::block::BlockHeader::hardfork_signal`]. pub vote: u8, /// The UNIX time at which the block was mined. pub timestamp: u64, @@ -72,7 +72,7 @@ pub struct VerifiedBlockInformation { /// /// [`Block::serialize`]. pub block_blob: Vec, - /// All the transactions in the block, excluding the [`Block::miner_tx`]. + /// All the transactions in the block, excluding the [`Block::miner_transaction`]. pub txs: Vec, /// The block's hash. /// @@ -81,7 +81,7 @@ pub struct VerifiedBlockInformation { /// The block's proof-of-work hash. pub pow_hash: [u8; 32], /// The block's height. - pub height: u64, + pub height: usize, /// The amount of generated coins (atomic units) in this block. pub generated_coins: u64, /// The adjusted block size, in bytes. @@ -119,7 +119,7 @@ pub struct AltBlockInformation { /// /// [`Block::serialize`]. pub block_blob: Vec, - /// All the transactions in the block, excluding the [`Block::miner_tx`]. + /// All the transactions in the block, excluding the [`Block::miner_transaction`]. pub txs: Vec, /// The block's hash. /// @@ -128,7 +128,7 @@ pub struct AltBlockInformation { /// The block's proof-of-work hash. pub pow_hash: [u8; 32], /// The block's height. - pub height: u64, + pub height: usize, /// The adjusted block size, in bytes. pub weight: usize, /// The long term block weight, which is the weight factored in with previous block weights. @@ -144,7 +144,7 @@ pub struct AltBlockInformation { #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct OutputOnChain { /// The block height this output belongs to. - pub height: u64, + pub height: usize, /// The timelock of this output, if any. pub time_lock: Timelock, /// The public key of this output, if any.