mirror of
https://github.com/hinto-janai/cuprate.git
synced 2024-11-16 15:58:14 +00:00
Merge branch 'main' into update
This commit is contained in:
commit
2fe52758fa
190 changed files with 3071 additions and 1489 deletions
12
Cargo.lock
generated
12
Cargo.lock
generated
|
@ -501,7 +501,6 @@ dependencies = [
|
||||||
"cuprate-p2p-core",
|
"cuprate-p2p-core",
|
||||||
"cuprate-pruning",
|
"cuprate-pruning",
|
||||||
"cuprate-test-utils",
|
"cuprate-test-utils",
|
||||||
"cuprate-wire",
|
|
||||||
"futures",
|
"futures",
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"rand",
|
"rand",
|
||||||
|
@ -540,6 +539,7 @@ dependencies = [
|
||||||
"monero-serai",
|
"monero-serai",
|
||||||
"pretty_assertions",
|
"pretty_assertions",
|
||||||
"proptest",
|
"proptest",
|
||||||
|
"rand",
|
||||||
"rayon",
|
"rayon",
|
||||||
"serde",
|
"serde",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
|
@ -552,6 +552,7 @@ dependencies = [
|
||||||
name = "cuprate-consensus"
|
name = "cuprate-consensus"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
"cuprate-consensus-rules",
|
"cuprate-consensus-rules",
|
||||||
"cuprate-helper",
|
"cuprate-helper",
|
||||||
"cuprate-test-utils",
|
"cuprate-test-utils",
|
||||||
|
@ -579,6 +580,7 @@ dependencies = [
|
||||||
name = "cuprate-consensus-rules"
|
name = "cuprate-consensus-rules"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
"crypto-bigint",
|
"crypto-bigint",
|
||||||
"cuprate-cryptonight",
|
"cuprate-cryptonight",
|
||||||
"cuprate-helper",
|
"cuprate-helper",
|
||||||
|
@ -670,15 +672,14 @@ dependencies = [
|
||||||
"cuprate-blockchain",
|
"cuprate-blockchain",
|
||||||
"cuprate-consensus",
|
"cuprate-consensus",
|
||||||
"cuprate-consensus-rules",
|
"cuprate-consensus-rules",
|
||||||
|
"cuprate-helper",
|
||||||
"cuprate-types",
|
"cuprate-types",
|
||||||
"hex",
|
"hex",
|
||||||
"hex-literal",
|
"hex-literal",
|
||||||
"monero-serai",
|
"monero-serai",
|
||||||
"rayon",
|
|
||||||
"sha3",
|
"sha3",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-test",
|
|
||||||
"tower",
|
"tower",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -698,6 +699,7 @@ version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"crossbeam",
|
"crossbeam",
|
||||||
|
"curve25519-dalek",
|
||||||
"dirs",
|
"dirs",
|
||||||
"futures",
|
"futures",
|
||||||
"libc",
|
"libc",
|
||||||
|
@ -723,6 +725,7 @@ version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.6.0",
|
"bitflags 2.6.0",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
"cfg-if",
|
||||||
"cuprate-helper",
|
"cuprate-helper",
|
||||||
"futures",
|
"futures",
|
||||||
"proptest",
|
"proptest",
|
||||||
|
@ -773,6 +776,7 @@ version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"borsh",
|
"borsh",
|
||||||
|
"cfg-if",
|
||||||
"cuprate-helper",
|
"cuprate-helper",
|
||||||
"cuprate-pruning",
|
"cuprate-pruning",
|
||||||
"cuprate-test-utils",
|
"cuprate-test-utils",
|
||||||
|
@ -787,7 +791,6 @@ dependencies = [
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
"tower",
|
"tower",
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing-subscriber",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -837,7 +840,6 @@ version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"borsh",
|
"borsh",
|
||||||
"bytes",
|
|
||||||
"cuprate-helper",
|
"cuprate-helper",
|
||||||
"cuprate-p2p-core",
|
"cuprate-p2p-core",
|
||||||
"cuprate-types",
|
"cuprate-types",
|
||||||
|
|
|
@ -210,7 +210,6 @@ unseparated_literal_suffix = "deny"
|
||||||
unnecessary_safety_doc = "deny"
|
unnecessary_safety_doc = "deny"
|
||||||
unnecessary_safety_comment = "deny"
|
unnecessary_safety_comment = "deny"
|
||||||
unnecessary_self_imports = "deny"
|
unnecessary_self_imports = "deny"
|
||||||
tests_outside_test_module = "deny"
|
|
||||||
string_to_string = "deny"
|
string_to_string = "deny"
|
||||||
rest_pat_in_fully_bound_structs = "deny"
|
rest_pat_in_fully_bound_structs = "deny"
|
||||||
redundant_type_annotations = "deny"
|
redundant_type_annotations = "deny"
|
||||||
|
@ -264,6 +263,7 @@ empty_enum_variants_with_brackets = "deny"
|
||||||
empty_drop = "deny"
|
empty_drop = "deny"
|
||||||
clone_on_ref_ptr = "deny"
|
clone_on_ref_ptr = "deny"
|
||||||
upper_case_acronyms = "deny"
|
upper_case_acronyms = "deny"
|
||||||
|
allow_attributes = "deny"
|
||||||
|
|
||||||
# Hot
|
# Hot
|
||||||
# inline_always = "deny"
|
# inline_always = "deny"
|
||||||
|
|
|
@ -12,6 +12,7 @@ cuprate-helper = { path = "../helper", default-features = false, features = ["st
|
||||||
cuprate-consensus-rules = { path = "./rules", features = ["rayon"] }
|
cuprate-consensus-rules = { path = "./rules", features = ["rayon"] }
|
||||||
cuprate-types = { path = "../types" }
|
cuprate-types = { path = "../types" }
|
||||||
|
|
||||||
|
cfg-if = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
tower = { workspace = true, features = ["util"] }
|
tower = { workspace = true, features = ["util"] }
|
||||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||||
|
@ -19,7 +20,6 @@ futures = { workspace = true, features = ["std", "async-await"] }
|
||||||
|
|
||||||
randomx-rs = { workspace = true }
|
randomx-rs = { workspace = true }
|
||||||
monero-serai = { workspace = true, features = ["std"] }
|
monero-serai = { workspace = true, features = ["std"] }
|
||||||
curve25519-dalek = { workspace = true }
|
|
||||||
|
|
||||||
rayon = { workspace = true }
|
rayon = { workspace = true }
|
||||||
thread_local = { workspace = true }
|
thread_local = { workspace = true }
|
||||||
|
@ -34,8 +34,12 @@ cuprate-test-utils = { path = "../test-utils" }
|
||||||
cuprate-consensus-rules = {path = "./rules", features = ["proptest"]}
|
cuprate-consensus-rules = {path = "./rules", features = ["proptest"]}
|
||||||
|
|
||||||
hex-literal = { workspace = true }
|
hex-literal = { workspace = true }
|
||||||
|
curve25519-dalek = { workspace = true }
|
||||||
|
|
||||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"]}
|
tokio = { workspace = true, features = ["rt-multi-thread", "macros"]}
|
||||||
tokio-test = { workspace = true }
|
tokio-test = { workspace = true }
|
||||||
proptest = { workspace = true }
|
proptest = { workspace = true }
|
||||||
proptest-derive = { workspace = true }
|
proptest-derive = { workspace = true }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
|
@ -9,19 +9,22 @@ name = "cuprate-fast-sync-create-hashes"
|
||||||
path = "src/create.rs"
|
path = "src/create.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap = { workspace = true, features = ["derive", "std"] }
|
|
||||||
cuprate-blockchain = { path = "../../storage/blockchain" }
|
cuprate-blockchain = { path = "../../storage/blockchain" }
|
||||||
cuprate-consensus = { path = ".." }
|
cuprate-consensus = { path = ".." }
|
||||||
cuprate-consensus-rules = { path = "../rules" }
|
cuprate-consensus-rules = { path = "../rules" }
|
||||||
cuprate-types = { path = "../../types" }
|
cuprate-types = { path = "../../types" }
|
||||||
hex.workspace = true
|
cuprate-helper = { path = "../../helper", features = ["cast"] }
|
||||||
hex-literal.workspace = true
|
|
||||||
monero-serai.workspace = true
|
clap = { workspace = true, features = ["derive", "std"] }
|
||||||
rayon.workspace = true
|
hex = { workspace = true }
|
||||||
sha3 = "0.10.8"
|
hex-literal = { workspace = true }
|
||||||
thiserror.workspace = true
|
monero-serai = { workspace = true }
|
||||||
|
sha3 = { version = "0.10.8" }
|
||||||
|
thiserror = { workspace = true }
|
||||||
tokio = { workspace = true, features = ["full"] }
|
tokio = { workspace = true, features = ["full"] }
|
||||||
tower.workspace = true
|
tower = { workspace = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio-test = "0.4.4"
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
|
@ -1,3 +1,8 @@
|
||||||
|
#![expect(
|
||||||
|
unused_crate_dependencies,
|
||||||
|
reason = "binary shares same Cargo.toml as library"
|
||||||
|
)]
|
||||||
|
|
||||||
use std::{fmt::Write, fs::write};
|
use std::{fmt::Write, fs::write};
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
@ -70,16 +75,13 @@ async fn main() {
|
||||||
let mut height = 0_usize;
|
let mut height = 0_usize;
|
||||||
|
|
||||||
while height < height_target {
|
while height < height_target {
|
||||||
match read_batch(&mut read_handle, height).await {
|
if let Ok(block_ids) = read_batch(&mut read_handle, height).await {
|
||||||
Ok(block_ids) => {
|
|
||||||
let hash = hash_of_hashes(block_ids.as_slice());
|
let hash = hash_of_hashes(block_ids.as_slice());
|
||||||
hashes_of_hashes.push(hash);
|
hashes_of_hashes.push(hash);
|
||||||
}
|
} else {
|
||||||
Err(_) => {
|
|
||||||
println!("Failed to read next batch from database");
|
println!("Failed to read next batch from database");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
height += BATCH_SIZE;
|
height += BATCH_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,5 +90,5 @@ async fn main() {
|
||||||
let generated = generate_hex(&hashes_of_hashes);
|
let generated = generate_hex(&hashes_of_hashes);
|
||||||
write("src/data/hashes_of_hashes", generated).expect("Could not write file");
|
write("src/data/hashes_of_hashes", generated).expect("Could not write file");
|
||||||
|
|
||||||
println!("Generated hashes up to block height {}", height);
|
println!("Generated hashes up to block height {height}");
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
[
|
[
|
||||||
hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"),
|
hex_literal::hex!("1adffbaf832784406018009e07d3dc3a39da7edb6632523c119ed8acb32eb934"),
|
||||||
hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"),
|
hex_literal::hex!("ae960265e3398d04f3cd4f949ed13c2689424887c71c1441a03d900a9d3a777f"),
|
||||||
hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"),
|
hex_literal::hex!("938c72d267bbd3a17cdecbe02443d00012ee62d6e9f3524f5a914192110b1798"),
|
||||||
hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"),
|
hex_literal::hex!("de0c82e51549b6514b42a591fd5440dddb5cc0118ec461459a99017bf06a0a0a"),
|
||||||
hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"),
|
hex_literal::hex!("9a50f4586ec7e0fb58c6383048d3b334180235fd34bb714af20f1a3ebce4c911"),
|
||||||
hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"),
|
hex_literal::hex!("5a3942f9bb318d65997bf57c40e045d62e7edbe35f3dae57499c2c5554896543"),
|
||||||
hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"),
|
hex_literal::hex!("9dccee3b094cdd1b98e357c2c81bfcea798ea75efd94e67c6f5e86f428c5ec2c"),
|
||||||
hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"),
|
hex_literal::hex!("620397540d44f21c3c57c20e9d47c6aaf0b1bf4302a4d43e75f2e33edd1a4032"),
|
||||||
hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"),
|
hex_literal::hex!("ef6c612fb17bd70ac2ac69b2f85a421b138cc3a81daf622b077cb402dbf68377"),
|
||||||
hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"),
|
hex_literal::hex!("6815ecb2bd73a3ba5f20558bfe1b714c30d6892b290e0d6f6cbf18237cedf75a"),
|
||||||
]
|
]
|
||||||
|
|
|
@ -6,8 +6,6 @@ use std::{
|
||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
};
|
};
|
||||||
|
|
||||||
#[allow(unused_imports)]
|
|
||||||
use hex_literal::hex;
|
|
||||||
use monero_serai::{
|
use monero_serai::{
|
||||||
block::Block,
|
block::Block,
|
||||||
transaction::{Input, Transaction},
|
transaction::{Input, Transaction},
|
||||||
|
@ -19,6 +17,7 @@ use cuprate_consensus::{
|
||||||
transactions::new_tx_verification_data,
|
transactions::new_tx_verification_data,
|
||||||
};
|
};
|
||||||
use cuprate_consensus_rules::{miner_tx::MinerTxError, ConsensusError};
|
use cuprate_consensus_rules::{miner_tx::MinerTxError, ConsensusError};
|
||||||
|
use cuprate_helper::cast::u64_to_usize;
|
||||||
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
|
use cuprate_types::{VerifiedBlockInformation, VerifiedTransactionInformation};
|
||||||
|
|
||||||
use crate::{hash_of_hashes, BlockId, HashOfHashes};
|
use crate::{hash_of_hashes, BlockId, HashOfHashes};
|
||||||
|
@ -31,9 +30,9 @@ const BATCH_SIZE: usize = 512;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
static HASHES_OF_HASHES: &[HashOfHashes] = &[
|
static HASHES_OF_HASHES: &[HashOfHashes] = &[
|
||||||
hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"),
|
hex_literal::hex!("3fdc9032c16d440f6c96be209c36d3d0e1aed61a2531490fe0ca475eb615c40a"),
|
||||||
hex!("0102030405060708010203040506070801020304050607080102030405060708"),
|
hex_literal::hex!("0102030405060708010203040506070801020304050607080102030405060708"),
|
||||||
hex!("0102030405060708010203040506070801020304050607080102030405060708"),
|
hex_literal::hex!("0102030405060708010203040506070801020304050607080102030405060708"),
|
||||||
];
|
];
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -44,14 +43,14 @@ fn max_height() -> u64 {
|
||||||
(HASHES_OF_HASHES.len() * BATCH_SIZE) as u64
|
(HASHES_OF_HASHES.len() * BATCH_SIZE) as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub struct ValidBlockId(BlockId);
|
pub struct ValidBlockId(BlockId);
|
||||||
|
|
||||||
fn valid_block_ids(block_ids: &[BlockId]) -> Vec<ValidBlockId> {
|
fn valid_block_ids(block_ids: &[BlockId]) -> Vec<ValidBlockId> {
|
||||||
block_ids.iter().map(|b| ValidBlockId(*b)).collect()
|
block_ids.iter().map(|b| ValidBlockId(*b)).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[expect(clippy::large_enum_variant)]
|
||||||
pub enum FastSyncRequest {
|
pub enum FastSyncRequest {
|
||||||
ValidateHashes {
|
ValidateHashes {
|
||||||
start_height: u64,
|
start_height: u64,
|
||||||
|
@ -64,8 +63,8 @@ pub enum FastSyncRequest {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[expect(clippy::large_enum_variant)]
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub enum FastSyncResponse {
|
pub enum FastSyncResponse {
|
||||||
ValidateHashes {
|
ValidateHashes {
|
||||||
validated_hashes: Vec<ValidBlockId>,
|
validated_hashes: Vec<ValidBlockId>,
|
||||||
|
@ -74,7 +73,7 @@ pub enum FastSyncResponse {
|
||||||
ValidateBlock(VerifiedBlockInformation),
|
ValidateBlock(VerifiedBlockInformation),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(thiserror::Error, Debug, PartialEq)]
|
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
|
||||||
pub enum FastSyncError {
|
pub enum FastSyncError {
|
||||||
#[error("Block does not match its expected hash")]
|
#[error("Block does not match its expected hash")]
|
||||||
BlockHashMismatch,
|
BlockHashMismatch,
|
||||||
|
@ -127,9 +126,9 @@ where
|
||||||
+ Send
|
+ Send
|
||||||
+ 'static,
|
+ 'static,
|
||||||
{
|
{
|
||||||
#[allow(dead_code)]
|
#[expect(dead_code)]
|
||||||
pub(crate) fn new(context_svc: C) -> FastSyncService<C> {
|
pub(crate) const fn new(context_svc: C) -> Self {
|
||||||
FastSyncService { context_svc }
|
Self { context_svc }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,7 +160,7 @@ where
|
||||||
FastSyncRequest::ValidateHashes {
|
FastSyncRequest::ValidateHashes {
|
||||||
start_height,
|
start_height,
|
||||||
block_ids,
|
block_ids,
|
||||||
} => validate_hashes(start_height, &block_ids).await,
|
} => validate_hashes(start_height, &block_ids),
|
||||||
FastSyncRequest::ValidateBlock { block, txs, token } => {
|
FastSyncRequest::ValidateBlock { block, txs, token } => {
|
||||||
validate_block(context_svc, block, txs, token).await
|
validate_block(context_svc, block, txs, token).await
|
||||||
}
|
}
|
||||||
|
@ -170,11 +169,13 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn validate_hashes(
|
fn validate_hashes(
|
||||||
start_height: u64,
|
start_height: u64,
|
||||||
block_ids: &[BlockId],
|
block_ids: &[BlockId],
|
||||||
) -> Result<FastSyncResponse, FastSyncError> {
|
) -> Result<FastSyncResponse, FastSyncError> {
|
||||||
if start_height as usize % BATCH_SIZE != 0 {
|
let start_height_usize = u64_to_usize(start_height);
|
||||||
|
|
||||||
|
if start_height_usize % BATCH_SIZE != 0 {
|
||||||
return Err(FastSyncError::InvalidStartHeight);
|
return Err(FastSyncError::InvalidStartHeight);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,9 +183,9 @@ async fn validate_hashes(
|
||||||
return Err(FastSyncError::OutOfRange);
|
return Err(FastSyncError::OutOfRange);
|
||||||
}
|
}
|
||||||
|
|
||||||
let stop_height = start_height as usize + block_ids.len();
|
let stop_height = start_height_usize + block_ids.len();
|
||||||
|
|
||||||
let batch_from = start_height as usize / BATCH_SIZE;
|
let batch_from = start_height_usize / BATCH_SIZE;
|
||||||
let batch_to = cmp::min(stop_height / BATCH_SIZE, HASHES_OF_HASHES.len());
|
let batch_to = cmp::min(stop_height / BATCH_SIZE, HASHES_OF_HASHES.len());
|
||||||
let n_batches = batch_to - batch_from;
|
let n_batches = batch_to - batch_from;
|
||||||
|
|
||||||
|
@ -285,7 +286,7 @@ where
|
||||||
block_blob,
|
block_blob,
|
||||||
txs: verified_txs,
|
txs: verified_txs,
|
||||||
block_hash,
|
block_hash,
|
||||||
pow_hash: [0u8; 32],
|
pow_hash: [0_u8; 32],
|
||||||
height: *height,
|
height: *height,
|
||||||
generated_coins,
|
generated_coins,
|
||||||
weight,
|
weight,
|
||||||
|
@ -299,46 +300,36 @@ where
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use tokio_test::block_on;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_validate_hashes_errors() {
|
fn test_validate_hashes_errors() {
|
||||||
let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]];
|
let ids = [[1_u8; 32], [2_u8; 32], [3_u8; 32], [4_u8; 32], [5_u8; 32]];
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
block_on(validate_hashes(3, &[])),
|
validate_hashes(3, &[]),
|
||||||
Err(FastSyncError::InvalidStartHeight)
|
Err(FastSyncError::InvalidStartHeight)
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
block_on(validate_hashes(3, &ids)),
|
validate_hashes(3, &ids),
|
||||||
Err(FastSyncError::InvalidStartHeight)
|
Err(FastSyncError::InvalidStartHeight)
|
||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(validate_hashes(20, &[]), Err(FastSyncError::OutOfRange));
|
||||||
block_on(validate_hashes(20, &[])),
|
assert_eq!(validate_hashes(20, &ids), Err(FastSyncError::OutOfRange));
|
||||||
Err(FastSyncError::OutOfRange)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
block_on(validate_hashes(20, &ids)),
|
|
||||||
Err(FastSyncError::OutOfRange)
|
|
||||||
);
|
|
||||||
|
|
||||||
|
assert_eq!(validate_hashes(4, &[]), Err(FastSyncError::NothingToDo));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
block_on(validate_hashes(4, &[])),
|
validate_hashes(4, &ids[..3]),
|
||||||
Err(FastSyncError::NothingToDo)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
block_on(validate_hashes(4, &ids[..3])),
|
|
||||||
Err(FastSyncError::NothingToDo)
|
Err(FastSyncError::NothingToDo)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_validate_hashes_success() {
|
fn test_validate_hashes_success() {
|
||||||
let ids = [[1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32], [5u8; 32]];
|
let ids = [[1_u8; 32], [2_u8; 32], [3_u8; 32], [4_u8; 32], [5_u8; 32]];
|
||||||
let validated_hashes = valid_block_ids(&ids[0..4]);
|
let validated_hashes = valid_block_ids(&ids[0..4]);
|
||||||
let unknown_hashes = ids[4..].to_vec();
|
let unknown_hashes = ids[4..].to_vec();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
block_on(validate_hashes(0, &ids)),
|
validate_hashes(0, &ids),
|
||||||
Ok(FastSyncResponse::ValidateHashes {
|
Ok(FastSyncResponse::ValidateHashes {
|
||||||
validated_hashes,
|
validated_hashes,
|
||||||
unknown_hashes
|
unknown_hashes
|
||||||
|
@ -349,15 +340,10 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_validate_hashes_mismatch() {
|
fn test_validate_hashes_mismatch() {
|
||||||
let ids = [
|
let ids = [
|
||||||
[1u8; 32], [2u8; 32], [3u8; 32], [5u8; 32], [1u8; 32], [2u8; 32], [3u8; 32], [4u8; 32],
|
[1_u8; 32], [2_u8; 32], [3_u8; 32], [5_u8; 32], [1_u8; 32], [2_u8; 32], [3_u8; 32],
|
||||||
|
[4_u8; 32],
|
||||||
];
|
];
|
||||||
assert_eq!(
|
assert_eq!(validate_hashes(0, &ids), Err(FastSyncError::Mismatch));
|
||||||
block_on(validate_hashes(0, &ids)),
|
assert_eq!(validate_hashes(4, &ids), Err(FastSyncError::Mismatch));
|
||||||
Err(FastSyncError::Mismatch)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
block_on(validate_hashes(4, &ids)),
|
|
||||||
Err(FastSyncError::Mismatch)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,9 @@
|
||||||
|
// Used in `create.rs`
|
||||||
|
use clap as _;
|
||||||
|
use cuprate_blockchain as _;
|
||||||
|
use hex as _;
|
||||||
|
use tokio as _;
|
||||||
|
|
||||||
pub mod fast_sync;
|
pub mod fast_sync;
|
||||||
pub mod util;
|
pub mod util;
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ authors = ["Boog900"]
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
proptest = ["dep:proptest", "dep:proptest-derive", "cuprate-types/proptest"]
|
proptest = ["cuprate-types/proptest"]
|
||||||
rayon = ["dep:rayon"]
|
rayon = ["dep:rayon"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
@ -24,15 +24,16 @@ hex = { workspace = true, features = ["std"] }
|
||||||
hex-literal = { workspace = true }
|
hex-literal = { workspace = true }
|
||||||
crypto-bigint = { workspace = true }
|
crypto-bigint = { workspace = true }
|
||||||
|
|
||||||
|
cfg-if = { workspace = true }
|
||||||
tracing = { workspace = true, features = ["std"] }
|
tracing = { workspace = true, features = ["std"] }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
|
|
||||||
rayon = { workspace = true, optional = true }
|
rayon = { workspace = true, optional = true }
|
||||||
|
|
||||||
proptest = {workspace = true, optional = true}
|
|
||||||
proptest-derive = {workspace = true, optional = true}
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
proptest = { workspace = true }
|
proptest = { workspace = true }
|
||||||
proptest-derive = { workspace = true }
|
proptest-derive = { workspace = true }
|
||||||
tokio = {version = "1.40.0", features = ["rt-multi-thread", "macros"]}
|
tokio = { version = "1.35.0", features = ["rt-multi-thread", "macros"] }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
|
@ -44,22 +44,22 @@ pub enum BlockError {
|
||||||
MinerTxError(#[from] MinerTxError),
|
MinerTxError(#[from] MinerTxError),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A trait to represent the RandomX VM.
|
/// A trait to represent the `RandomX` VM.
|
||||||
pub trait RandomX {
|
pub trait RandomX {
|
||||||
type Error;
|
type Error;
|
||||||
|
|
||||||
fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error>;
|
fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns if this height is a RandomX seed height.
|
/// Returns if this height is a `RandomX` seed height.
|
||||||
pub fn is_randomx_seed_height(height: usize) -> bool {
|
pub const fn is_randomx_seed_height(height: usize) -> bool {
|
||||||
height % RX_SEEDHASH_EPOCH_BLOCKS == 0
|
height % RX_SEEDHASH_EPOCH_BLOCKS == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the RandomX seed height for this block.
|
/// Returns the `RandomX` seed height for this block.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#randomx-seed>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#randomx-seed>
|
||||||
pub fn randomx_seed_height(height: usize) -> usize {
|
pub const fn randomx_seed_height(height: usize) -> usize {
|
||||||
if height <= RX_SEEDHASH_EPOCH_BLOCKS + RX_SEEDHASH_EPOCH_LAG {
|
if height <= RX_SEEDHASH_EPOCH_BLOCKS + RX_SEEDHASH_EPOCH_LAG {
|
||||||
0
|
0
|
||||||
} else {
|
} else {
|
||||||
|
@ -122,10 +122,10 @@ pub fn check_block_pow(hash: &[u8; 32], difficulty: u128) -> Result<(), BlockErr
|
||||||
/// Returns the penalty free zone
|
/// Returns the penalty free zone
|
||||||
///
|
///
|
||||||
/// <https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#penalty-free-zone>
|
/// <https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#penalty-free-zone>
|
||||||
pub fn penalty_free_zone(hf: &HardFork) -> usize {
|
pub fn penalty_free_zone(hf: HardFork) -> usize {
|
||||||
if hf == &HardFork::V1 {
|
if hf == HardFork::V1 {
|
||||||
PENALTY_FREE_ZONE_1
|
PENALTY_FREE_ZONE_1
|
||||||
} else if hf >= &HardFork::V2 && hf < &HardFork::V5 {
|
} else if hf >= HardFork::V2 && hf < HardFork::V5 {
|
||||||
PENALTY_FREE_ZONE_2
|
PENALTY_FREE_ZONE_2
|
||||||
} else {
|
} else {
|
||||||
PENALTY_FREE_ZONE_5
|
PENALTY_FREE_ZONE_5
|
||||||
|
@ -135,7 +135,7 @@ pub fn penalty_free_zone(hf: &HardFork) -> usize {
|
||||||
/// Sanity check on the block blob size.
|
/// Sanity check on the block blob size.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#block-weight-and-size>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#block-weight-and-size>
|
||||||
fn block_size_sanity_check(
|
const fn block_size_sanity_check(
|
||||||
block_blob_len: usize,
|
block_blob_len: usize,
|
||||||
effective_median: usize,
|
effective_median: usize,
|
||||||
) -> Result<(), BlockError> {
|
) -> Result<(), BlockError> {
|
||||||
|
@ -149,7 +149,7 @@ fn block_size_sanity_check(
|
||||||
/// Sanity check on the block weight.
|
/// Sanity check on the block weight.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#block-weight-and-size>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#block-weight-and-size>
|
||||||
pub fn check_block_weight(
|
pub const fn check_block_weight(
|
||||||
block_weight: usize,
|
block_weight: usize,
|
||||||
median_for_block_reward: usize,
|
median_for_block_reward: usize,
|
||||||
) -> Result<(), BlockError> {
|
) -> Result<(), BlockError> {
|
||||||
|
@ -163,7 +163,7 @@ pub fn check_block_weight(
|
||||||
/// Sanity check on number of txs in the block.
|
/// Sanity check on number of txs in the block.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#amount-of-transactions>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#amount-of-transactions>
|
||||||
fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> {
|
const fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> {
|
||||||
if number_none_miner_txs + 1 > 0x10000000 {
|
if number_none_miner_txs + 1 > 0x10000000 {
|
||||||
Err(BlockError::TooManyTxs)
|
Err(BlockError::TooManyTxs)
|
||||||
} else {
|
} else {
|
||||||
|
@ -175,10 +175,10 @@ fn check_amount_txs(number_none_miner_txs: usize) -> Result<(), BlockError> {
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#previous-id>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks.html#previous-id>
|
||||||
fn check_prev_id(block: &Block, top_hash: &[u8; 32]) -> Result<(), BlockError> {
|
fn check_prev_id(block: &Block, top_hash: &[u8; 32]) -> Result<(), BlockError> {
|
||||||
if &block.header.previous != top_hash {
|
if &block.header.previous == top_hash {
|
||||||
Err(BlockError::PreviousIDIncorrect)
|
|
||||||
} else {
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(BlockError::PreviousIDIncorrect)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,7 +273,7 @@ pub fn check_block(
|
||||||
block_weight,
|
block_weight,
|
||||||
block_chain_ctx.median_weight_for_block_reward,
|
block_chain_ctx.median_weight_for_block_reward,
|
||||||
block_chain_ctx.already_generated_coins,
|
block_chain_ctx.already_generated_coins,
|
||||||
&block_chain_ctx.current_hf,
|
block_chain_ctx.current_hf,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok((vote, generated_coins))
|
Ok((vote, generated_coins))
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
/// Decomposed amount table.
|
/// Decomposed amount table.
|
||||||
pub static DECOMPOSED_AMOUNTS: [u64; 172] = [
|
pub(crate) static DECOMPOSED_AMOUNTS: [u64; 172] = [
|
||||||
1, 2, 3, 4, 5, 6, 7, 8, 9,
|
1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||||
10, 20, 30, 40, 50, 60, 70, 80, 90,
|
10, 20, 30, 40, 50, 60, 70, 80, 90,
|
||||||
100, 200, 300, 400, 500, 600, 700, 800, 900,
|
100, 200, 300, 400, 500, 600, 700, 800, 900,
|
||||||
|
@ -40,8 +40,8 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn decomposed_amounts_return_decomposed() {
|
fn decomposed_amounts_return_decomposed() {
|
||||||
for amount in DECOMPOSED_AMOUNTS.iter() {
|
for amount in &DECOMPOSED_AMOUNTS {
|
||||||
assert!(is_decomposed_amount(amount))
|
assert!(is_decomposed_amount(amount));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ use monero_serai::{
|
||||||
|
|
||||||
use cuprate_helper::network::Network;
|
use cuprate_helper::network::Network;
|
||||||
|
|
||||||
const fn genesis_nonce(network: &Network) -> u32 {
|
const fn genesis_nonce(network: Network) -> u32 {
|
||||||
match network {
|
match network {
|
||||||
Network::Mainnet => 10000,
|
Network::Mainnet => 10000,
|
||||||
Network::Testnet => 10001,
|
Network::Testnet => 10001,
|
||||||
|
@ -16,7 +16,7 @@ const fn genesis_nonce(network: &Network) -> u32 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn genesis_miner_tx(network: &Network) -> Transaction {
|
fn genesis_miner_tx(network: Network) -> Transaction {
|
||||||
Transaction::read(&mut hex::decode(match network {
|
Transaction::read(&mut hex::decode(match network {
|
||||||
Network::Mainnet | Network::Testnet => "013c01ff0001ffffffffffff03029b2e4c0281c0b02e7c53291a94d1d0cbff8883f8024f5142ee494ffbbd08807121017767aafcde9be00dcfd098715ebcf7f410daebc582fda69d24a28e9d0bc890d1",
|
Network::Mainnet | Network::Testnet => "013c01ff0001ffffffffffff03029b2e4c0281c0b02e7c53291a94d1d0cbff8883f8024f5142ee494ffbbd08807121017767aafcde9be00dcfd098715ebcf7f410daebc582fda69d24a28e9d0bc890d1",
|
||||||
Network::Stagenet => "013c01ff0001ffffffffffff0302df5d56da0c7d643ddd1ce61901c7bdc5fb1738bfe39fbe69c28a3a7032729c0f2101168d0c4ca86fb55a4cf6a36d31431be1c53a3bd7411bb24e8832410289fa6f3b"
|
Network::Stagenet => "013c01ff0001ffffffffffff0302df5d56da0c7d643ddd1ce61901c7bdc5fb1738bfe39fbe69c28a3a7032729c0f2101168d0c4ca86fb55a4cf6a36d31431be1c53a3bd7411bb24e8832410289fa6f3b"
|
||||||
|
@ -26,7 +26,7 @@ fn genesis_miner_tx(network: &Network) -> Transaction {
|
||||||
/// Generates the Monero genesis block.
|
/// Generates the Monero genesis block.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/genesis_block.html>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/genesis_block.html>
|
||||||
pub fn generate_genesis_block(network: &Network) -> Block {
|
pub fn generate_genesis_block(network: Network) -> Block {
|
||||||
Block {
|
Block {
|
||||||
header: BlockHeader {
|
header: BlockHeader {
|
||||||
hardfork_version: 1,
|
hardfork_version: 1,
|
||||||
|
@ -47,19 +47,19 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn generate_genesis_blocks() {
|
fn generate_genesis_blocks() {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&generate_genesis_block(&Network::Mainnet).hash(),
|
&generate_genesis_block(Network::Mainnet).hash(),
|
||||||
hex::decode("418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3")
|
hex::decode("418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_slice()
|
.as_slice()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&generate_genesis_block(&Network::Testnet).hash(),
|
&generate_genesis_block(Network::Testnet).hash(),
|
||||||
hex::decode("48ca7cd3c8de5b6a4d53d2861fbdaedca141553559f9be9520068053cda8430b")
|
hex::decode("48ca7cd3c8de5b6a4d53d2861fbdaedca141553559f9be9520068053cda8430b")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_slice()
|
.as_slice()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
&generate_genesis_block(&Network::Stagenet).hash(),
|
&generate_genesis_block(Network::Stagenet).hash(),
|
||||||
hex::decode("76ee3cc98646292206cd3e86f74d88b4dcc1d937088645e9b0cbca84b7ce74eb")
|
hex::decode("76ee3cc98646292206cd3e86f74d88b4dcc1d937088645e9b0cbca84b7ce74eb")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_slice()
|
.as_slice()
|
||||||
|
|
|
@ -25,10 +25,10 @@ pub fn check_block_version_vote(
|
||||||
) -> Result<(), HardForkError> {
|
) -> Result<(), HardForkError> {
|
||||||
// self = current hf
|
// self = current hf
|
||||||
if hf != version {
|
if hf != version {
|
||||||
Err(HardForkError::VersionIncorrect)?;
|
return Err(HardForkError::VersionIncorrect);
|
||||||
}
|
}
|
||||||
if hf > vote {
|
if hf > vote {
|
||||||
Err(HardForkError::VoteTooLow)?;
|
return Err(HardForkError::VoteTooLow);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -41,8 +41,8 @@ pub struct HFInfo {
|
||||||
threshold: usize,
|
threshold: usize,
|
||||||
}
|
}
|
||||||
impl HFInfo {
|
impl HFInfo {
|
||||||
pub const fn new(height: usize, threshold: usize) -> HFInfo {
|
pub const fn new(height: usize, threshold: usize) -> Self {
|
||||||
HFInfo { height, threshold }
|
Self { height, threshold }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ impl HFInfo {
|
||||||
pub struct HFsInfo([HFInfo; NUMB_OF_HARD_FORKS]);
|
pub struct HFsInfo([HFInfo; NUMB_OF_HARD_FORKS]);
|
||||||
|
|
||||||
impl HFsInfo {
|
impl HFsInfo {
|
||||||
pub fn info_for_hf(&self, hf: &HardFork) -> HFInfo {
|
pub const fn info_for_hf(&self, hf: &HardFork) -> HFInfo {
|
||||||
self.0[*hf as usize - 1]
|
self.0[*hf as usize - 1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ impl HFsInfo {
|
||||||
/// Returns the main-net hard-fork information.
|
/// Returns the main-net hard-fork information.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#Mainnet-Hard-Forks>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#Mainnet-Hard-Forks>
|
||||||
pub const fn main_net() -> HFsInfo {
|
pub const fn main_net() -> Self {
|
||||||
Self([
|
Self([
|
||||||
HFInfo::new(0, 0),
|
HFInfo::new(0, 0),
|
||||||
HFInfo::new(1009827, 0),
|
HFInfo::new(1009827, 0),
|
||||||
|
@ -86,7 +86,7 @@ impl HFsInfo {
|
||||||
/// Returns the test-net hard-fork information.
|
/// Returns the test-net hard-fork information.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#Testnet-Hard-Forks>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#Testnet-Hard-Forks>
|
||||||
pub const fn test_net() -> HFsInfo {
|
pub const fn test_net() -> Self {
|
||||||
Self([
|
Self([
|
||||||
HFInfo::new(0, 0),
|
HFInfo::new(0, 0),
|
||||||
HFInfo::new(624634, 0),
|
HFInfo::new(624634, 0),
|
||||||
|
@ -110,7 +110,7 @@ impl HFsInfo {
|
||||||
/// Returns the test-net hard-fork information.
|
/// Returns the test-net hard-fork information.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#Stagenet-Hard-Forks>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#Stagenet-Hard-Forks>
|
||||||
pub const fn stage_net() -> HFsInfo {
|
pub const fn stage_net() -> Self {
|
||||||
Self([
|
Self([
|
||||||
HFInfo::new(0, 0),
|
HFInfo::new(0, 0),
|
||||||
HFInfo::new(32000, 0),
|
HFInfo::new(32000, 0),
|
||||||
|
@ -165,8 +165,8 @@ impl Display for HFVotes {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HFVotes {
|
impl HFVotes {
|
||||||
pub fn new(window_size: usize) -> HFVotes {
|
pub fn new(window_size: usize) -> Self {
|
||||||
HFVotes {
|
Self {
|
||||||
votes: [0; NUMB_OF_HARD_FORKS],
|
votes: [0; NUMB_OF_HARD_FORKS],
|
||||||
vote_list: VecDeque::with_capacity(window_size),
|
vote_list: VecDeque::with_capacity(window_size),
|
||||||
window_size,
|
window_size,
|
||||||
|
@ -251,6 +251,6 @@ impl HFVotes {
|
||||||
/// Returns the votes needed for a hard-fork.
|
/// Returns the votes needed for a hard-fork.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#accepting-a-fork>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/hardforks.html#accepting-a-fork>
|
||||||
pub fn votes_needed(threshold: usize, window: usize) -> usize {
|
pub const fn votes_needed(threshold: usize, window: usize) -> usize {
|
||||||
(threshold * window).div_ceil(100)
|
(threshold * window).div_ceil(100)
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ proptest! {
|
||||||
prop_assert_eq!(hf_votes.total_votes(), hf_votes.vote_list.len());
|
prop_assert_eq!(hf_votes.total_votes(), hf_votes.vote_list.len());
|
||||||
|
|
||||||
let mut votes = [0_usize; NUMB_OF_HARD_FORKS];
|
let mut votes = [0_usize; NUMB_OF_HARD_FORKS];
|
||||||
for vote in hf_votes.vote_list.iter() {
|
for vote in &hf_votes.vote_list {
|
||||||
// manually go through the list of votes tallying
|
// manually go through the list of votes tallying
|
||||||
votes[*vote as usize - 1] += 1;
|
votes[*vote as usize - 1] += 1;
|
||||||
}
|
}
|
||||||
|
@ -61,9 +61,9 @@ proptest! {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn window_size_kept_constant(mut hf_votes in arb_full_hf_votes(), new_votes in any::<Vec<HardFork>>()) {
|
fn window_size_kept_constant(mut hf_votes in arb_full_hf_votes(), new_votes in any::<Vec<HardFork>>()) {
|
||||||
for new_vote in new_votes.into_iter() {
|
for new_vote in new_votes {
|
||||||
hf_votes.add_vote_for_hf(&new_vote);
|
hf_votes.add_vote_for_hf(&new_vote);
|
||||||
prop_assert_eq!(hf_votes.total_votes(), TEST_WINDOW_SIZE)
|
prop_assert_eq!(hf_votes.total_votes(), TEST_WINDOW_SIZE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,12 @@
|
||||||
|
cfg_if::cfg_if! {
|
||||||
|
// Used in external `tests/`.
|
||||||
|
if #[cfg(test)] {
|
||||||
|
use proptest as _;
|
||||||
|
use proptest_derive as _;
|
||||||
|
use tokio as _;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
pub mod batch_verifier;
|
pub mod batch_verifier;
|
||||||
|
|
|
@ -40,7 +40,7 @@ const MINER_TX_TIME_LOCKED_BLOCKS: usize = 60;
|
||||||
/// the block.
|
/// the block.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/reward.html#calculating-base-block-reward>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/reward.html#calculating-base-block-reward>
|
||||||
fn calculate_base_reward(already_generated_coins: u64, hf: &HardFork) -> u64 {
|
fn calculate_base_reward(already_generated_coins: u64, hf: HardFork) -> u64 {
|
||||||
let target_mins = hf.block_time().as_secs() / 60;
|
let target_mins = hf.block_time().as_secs() / 60;
|
||||||
let emission_speed_factor = 20 - (target_mins - 1);
|
let emission_speed_factor = 20 - (target_mins - 1);
|
||||||
((MONEY_SUPPLY - already_generated_coins) >> emission_speed_factor)
|
((MONEY_SUPPLY - already_generated_coins) >> emission_speed_factor)
|
||||||
|
@ -54,7 +54,7 @@ pub fn calculate_block_reward(
|
||||||
block_weight: usize,
|
block_weight: usize,
|
||||||
median_bw: usize,
|
median_bw: usize,
|
||||||
already_generated_coins: u64,
|
already_generated_coins: u64,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> u64 {
|
) -> u64 {
|
||||||
let base_reward = calculate_base_reward(already_generated_coins, hf);
|
let base_reward = calculate_base_reward(already_generated_coins, hf);
|
||||||
|
|
||||||
|
@ -75,9 +75,9 @@ pub fn calculate_block_reward(
|
||||||
/// Checks the miner transactions version.
|
/// Checks the miner transactions version.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#version>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#version>
|
||||||
fn check_miner_tx_version(tx_version: &TxVersion, hf: &HardFork) -> Result<(), MinerTxError> {
|
fn check_miner_tx_version(tx_version: TxVersion, hf: HardFork) -> Result<(), MinerTxError> {
|
||||||
// The TxVersion enum checks if the version is not 1 or 2
|
// The TxVersion enum checks if the version is not 1 or 2
|
||||||
if hf >= &HardFork::V12 && tx_version != &TxVersion::RingCT {
|
if hf >= HardFork::V12 && tx_version != TxVersion::RingCT {
|
||||||
Err(MinerTxError::VersionInvalid)
|
Err(MinerTxError::VersionInvalid)
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -94,31 +94,31 @@ fn check_inputs(inputs: &[Input], chain_height: usize) -> Result<(), MinerTxErro
|
||||||
|
|
||||||
match &inputs[0] {
|
match &inputs[0] {
|
||||||
Input::Gen(height) => {
|
Input::Gen(height) => {
|
||||||
if height != &chain_height {
|
if height == &chain_height {
|
||||||
Err(MinerTxError::InputsHeightIncorrect)
|
|
||||||
} else {
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(MinerTxError::InputsHeightIncorrect)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => Err(MinerTxError::InputNotOfTypeGen),
|
Input::ToKey { .. } => Err(MinerTxError::InputNotOfTypeGen),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks the miner transaction has a correct time lock.
|
/// Checks the miner transaction has a correct time lock.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#unlock-time>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#unlock-time>
|
||||||
fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> {
|
const fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), MinerTxError> {
|
||||||
match time_lock {
|
match time_lock {
|
||||||
&Timelock::Block(till_height) => {
|
&Timelock::Block(till_height) => {
|
||||||
// Lock times above this amount are timestamps not blocks.
|
// Lock times above this amount are timestamps not blocks.
|
||||||
// This is just for safety though and shouldn't actually be hit.
|
// This is just for safety though and shouldn't actually be hit.
|
||||||
if till_height > 500_000_000 {
|
if till_height > 500_000_000 {
|
||||||
Err(MinerTxError::InvalidLockTime)?;
|
return Err(MinerTxError::InvalidLockTime);
|
||||||
}
|
}
|
||||||
if till_height != chain_height + MINER_TX_TIME_LOCKED_BLOCKS {
|
if till_height == chain_height + MINER_TX_TIME_LOCKED_BLOCKS {
|
||||||
Err(MinerTxError::InvalidLockTime)
|
|
||||||
} else {
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(MinerTxError::InvalidLockTime)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => Err(MinerTxError::InvalidLockTime),
|
_ => Err(MinerTxError::InvalidLockTime),
|
||||||
|
@ -131,18 +131,18 @@ fn check_time_lock(time_lock: &Timelock, chain_height: usize) -> Result<(), Mine
|
||||||
/// && <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#zero-amount-v1-output>
|
/// && <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#zero-amount-v1-output>
|
||||||
fn sum_outputs(
|
fn sum_outputs(
|
||||||
outputs: &[Output],
|
outputs: &[Output],
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
tx_version: &TxVersion,
|
tx_version: TxVersion,
|
||||||
) -> Result<u64, MinerTxError> {
|
) -> Result<u64, MinerTxError> {
|
||||||
let mut sum: u64 = 0;
|
let mut sum: u64 = 0;
|
||||||
for out in outputs {
|
for out in outputs {
|
||||||
let amt = out.amount.unwrap_or(0);
|
let amt = out.amount.unwrap_or(0);
|
||||||
|
|
||||||
if tx_version == &TxVersion::RingSignatures && amt == 0 {
|
if tx_version == TxVersion::RingSignatures && amt == 0 {
|
||||||
return Err(MinerTxError::OutputAmountIncorrect);
|
return Err(MinerTxError::OutputAmountIncorrect);
|
||||||
}
|
}
|
||||||
|
|
||||||
if hf == &HardFork::V3 && !is_decomposed_amount(&amt) {
|
if hf == HardFork::V3 && !is_decomposed_amount(&amt) {
|
||||||
return Err(MinerTxError::OutputNotDecomposed);
|
return Err(MinerTxError::OutputNotDecomposed);
|
||||||
}
|
}
|
||||||
sum = sum.checked_add(amt).ok_or(MinerTxError::OutputsOverflow)?;
|
sum = sum.checked_add(amt).ok_or(MinerTxError::OutputsOverflow)?;
|
||||||
|
@ -157,9 +157,9 @@ fn check_total_output_amt(
|
||||||
total_output: u64,
|
total_output: u64,
|
||||||
reward: u64,
|
reward: u64,
|
||||||
fees: u64,
|
fees: u64,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> Result<u64, MinerTxError> {
|
) -> Result<u64, MinerTxError> {
|
||||||
if hf == &HardFork::V1 || hf >= &HardFork::V12 {
|
if hf == HardFork::V1 || hf >= HardFork::V12 {
|
||||||
if total_output != reward + fees {
|
if total_output != reward + fees {
|
||||||
return Err(MinerTxError::OutputAmountIncorrect);
|
return Err(MinerTxError::OutputAmountIncorrect);
|
||||||
}
|
}
|
||||||
|
@ -185,16 +185,16 @@ pub fn check_miner_tx(
|
||||||
block_weight: usize,
|
block_weight: usize,
|
||||||
median_bw: usize,
|
median_bw: usize,
|
||||||
already_generated_coins: u64,
|
already_generated_coins: u64,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> Result<u64, MinerTxError> {
|
) -> Result<u64, MinerTxError> {
|
||||||
let tx_version = TxVersion::from_raw(tx.version()).ok_or(MinerTxError::VersionInvalid)?;
|
let tx_version = TxVersion::from_raw(tx.version()).ok_or(MinerTxError::VersionInvalid)?;
|
||||||
check_miner_tx_version(&tx_version, hf)?;
|
check_miner_tx_version(tx_version, hf)?;
|
||||||
|
|
||||||
// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#ringct-type>
|
// ref: <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#ringct-type>
|
||||||
match tx {
|
match tx {
|
||||||
Transaction::V1 { .. } => (),
|
Transaction::V1 { .. } => (),
|
||||||
Transaction::V2 { proofs, .. } => {
|
Transaction::V2 { proofs, .. } => {
|
||||||
if hf >= &HardFork::V12 && proofs.is_some() {
|
if hf >= HardFork::V12 && proofs.is_some() {
|
||||||
return Err(MinerTxError::RCTTypeNotNULL);
|
return Err(MinerTxError::RCTTypeNotNULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -207,7 +207,7 @@ pub fn check_miner_tx(
|
||||||
check_output_types(&tx.prefix().outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?;
|
check_output_types(&tx.prefix().outputs, hf).map_err(|_| MinerTxError::InvalidOutputType)?;
|
||||||
|
|
||||||
let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf);
|
let reward = calculate_block_reward(block_weight, median_bw, already_generated_coins, hf);
|
||||||
let total_outs = sum_outputs(&tx.prefix().outputs, hf, &tx_version)?;
|
let total_outs = sum_outputs(&tx.prefix().outputs, hf, tx_version)?;
|
||||||
|
|
||||||
check_total_output_amt(total_outs, reward, total_fees, hf)
|
check_total_output_amt(total_outs, reward, total_fees, hf)
|
||||||
}
|
}
|
||||||
|
@ -221,7 +221,7 @@ mod tests {
|
||||||
proptest! {
|
proptest! {
|
||||||
#[test]
|
#[test]
|
||||||
fn tail_emission(generated_coins in any::<u64>(), hf in any::<HardFork>()) {
|
fn tail_emission(generated_coins in any::<u64>(), hf in any::<HardFork>()) {
|
||||||
prop_assert!(calculate_base_reward(generated_coins, &hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60)
|
prop_assert!(calculate_base_reward(generated_coins, hf) >= MINIMUM_REWARD_PER_MIN * hf.block_time().as_secs() / 60);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,11 +99,8 @@ fn check_output_keys(outputs: &[Output]) -> Result<(), TransactionError> {
|
||||||
///
|
///
|
||||||
/// <https://monero-book.cuprate.org/consensus_rules/transactions/outputs.html#output-type>
|
/// <https://monero-book.cuprate.org/consensus_rules/transactions/outputs.html#output-type>
|
||||||
/// <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#output-type>
|
/// <https://monero-book.cuprate.org/consensus_rules/blocks/miner_tx.html#output-type>
|
||||||
pub(crate) fn check_output_types(
|
pub(crate) fn check_output_types(outputs: &[Output], hf: HardFork) -> Result<(), TransactionError> {
|
||||||
outputs: &[Output],
|
if hf == HardFork::V15 {
|
||||||
hf: &HardFork,
|
|
||||||
) -> Result<(), TransactionError> {
|
|
||||||
if hf == &HardFork::V15 {
|
|
||||||
for outs in outputs.windows(2) {
|
for outs in outputs.windows(2) {
|
||||||
if outs[0].view_tag.is_some() != outs[1].view_tag.is_some() {
|
if outs[0].view_tag.is_some() != outs[1].view_tag.is_some() {
|
||||||
return Err(TransactionError::OutputTypeInvalid);
|
return Err(TransactionError::OutputTypeInvalid);
|
||||||
|
@ -113,8 +110,8 @@ pub(crate) fn check_output_types(
|
||||||
}
|
}
|
||||||
|
|
||||||
for out in outputs {
|
for out in outputs {
|
||||||
if hf <= &HardFork::V14 && out.view_tag.is_some()
|
if hf <= HardFork::V14 && out.view_tag.is_some()
|
||||||
|| hf >= &HardFork::V16 && out.view_tag.is_none()
|
|| hf >= HardFork::V16 && out.view_tag.is_none()
|
||||||
{
|
{
|
||||||
return Err(TransactionError::OutputTypeInvalid);
|
return Err(TransactionError::OutputTypeInvalid);
|
||||||
}
|
}
|
||||||
|
@ -125,12 +122,12 @@ pub(crate) fn check_output_types(
|
||||||
/// Checks the individual outputs amount for version 1 txs.
|
/// Checks the individual outputs amount for version 1 txs.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/outputs.html#output-amount>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/outputs.html#output-amount>
|
||||||
fn check_output_amount_v1(amount: u64, hf: &HardFork) -> Result<(), TransactionError> {
|
fn check_output_amount_v1(amount: u64, hf: HardFork) -> Result<(), TransactionError> {
|
||||||
if amount == 0 {
|
if amount == 0 {
|
||||||
return Err(TransactionError::ZeroOutputForV1);
|
return Err(TransactionError::ZeroOutputForV1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if hf >= &HardFork::V2 && !is_decomposed_amount(&amount) {
|
if hf >= HardFork::V2 && !is_decomposed_amount(&amount) {
|
||||||
return Err(TransactionError::AmountNotDecomposed);
|
return Err(TransactionError::AmountNotDecomposed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,7 +137,7 @@ fn check_output_amount_v1(amount: u64, hf: &HardFork) -> Result<(), TransactionE
|
||||||
/// Checks the individual outputs amount for version 2 txs.
|
/// Checks the individual outputs amount for version 2 txs.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/outputs.html#output-amount>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/outputs.html#output-amount>
|
||||||
fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> {
|
const fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> {
|
||||||
if amount == 0 {
|
if amount == 0 {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
|
@ -154,8 +151,8 @@ fn check_output_amount_v2(amount: u64) -> Result<(), TransactionError> {
|
||||||
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/outputs.html#outputs-must-not-overflow>
|
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/outputs.html#outputs-must-not-overflow>
|
||||||
fn sum_outputs(
|
fn sum_outputs(
|
||||||
outputs: &[Output],
|
outputs: &[Output],
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
tx_version: &TxVersion,
|
tx_version: TxVersion,
|
||||||
) -> Result<u64, TransactionError> {
|
) -> Result<u64, TransactionError> {
|
||||||
let mut sum: u64 = 0;
|
let mut sum: u64 = 0;
|
||||||
|
|
||||||
|
@ -181,15 +178,15 @@ fn sum_outputs(
|
||||||
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/ring_ct/bulletproofs+.html#max-outputs>
|
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/ring_ct/bulletproofs+.html#max-outputs>
|
||||||
fn check_number_of_outputs(
|
fn check_number_of_outputs(
|
||||||
outputs: usize,
|
outputs: usize,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
tx_version: &TxVersion,
|
tx_version: TxVersion,
|
||||||
bp_or_bpp: bool,
|
bp_or_bpp: bool,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
if tx_version == &TxVersion::RingSignatures {
|
if tx_version == TxVersion::RingSignatures {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
if hf >= &HardFork::V12 && outputs < 2 {
|
if hf >= HardFork::V12 && outputs < 2 {
|
||||||
return Err(TransactionError::InvalidNumberOfOutputs);
|
return Err(TransactionError::InvalidNumberOfOutputs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,8 +204,8 @@ fn check_number_of_outputs(
|
||||||
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/ring_ct/bulletproofs+.html#max-outputs>
|
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/ring_ct/bulletproofs+.html#max-outputs>
|
||||||
fn check_outputs_semantics(
|
fn check_outputs_semantics(
|
||||||
outputs: &[Output],
|
outputs: &[Output],
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
tx_version: &TxVersion,
|
tx_version: TxVersion,
|
||||||
bp_or_bpp: bool,
|
bp_or_bpp: bool,
|
||||||
) -> Result<u64, TransactionError> {
|
) -> Result<u64, TransactionError> {
|
||||||
check_output_types(outputs, hf)?;
|
check_output_types(outputs, hf)?;
|
||||||
|
@ -223,11 +220,11 @@ fn check_outputs_semantics(
|
||||||
/// Checks if an outputs unlock time has passed.
|
/// Checks if an outputs unlock time has passed.
|
||||||
///
|
///
|
||||||
/// <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html>
|
/// <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html>
|
||||||
pub fn output_unlocked(
|
pub const fn output_unlocked(
|
||||||
time_lock: &Timelock,
|
time_lock: &Timelock,
|
||||||
current_chain_height: usize,
|
current_chain_height: usize,
|
||||||
current_time_lock_timestamp: u64,
|
current_time_lock_timestamp: u64,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
match *time_lock {
|
match *time_lock {
|
||||||
Timelock::None => true,
|
Timelock::None => true,
|
||||||
|
@ -243,7 +240,7 @@ pub fn output_unlocked(
|
||||||
/// Returns if a locked output, which uses a block height, can be spent.
|
/// Returns if a locked output, which uses a block height, can be spent.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#block-height>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#block-height>
|
||||||
fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool {
|
const fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> bool {
|
||||||
// current_chain_height = 1 + top height
|
// current_chain_height = 1 + top height
|
||||||
unlock_height <= current_chain_height
|
unlock_height <= current_chain_height
|
||||||
}
|
}
|
||||||
|
@ -251,10 +248,10 @@ fn check_block_time_lock(unlock_height: usize, current_chain_height: usize) -> b
|
||||||
/// Returns if a locked output, which uses a block height, can be spent.
|
/// Returns if a locked output, which uses a block height, can be spent.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#timestamp>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#timestamp>
|
||||||
fn check_timestamp_time_lock(
|
const fn check_timestamp_time_lock(
|
||||||
unlock_timestamp: u64,
|
unlock_timestamp: u64,
|
||||||
current_time_lock_timestamp: u64,
|
current_time_lock_timestamp: u64,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
current_time_lock_timestamp + hf.block_time().as_secs() >= unlock_timestamp
|
current_time_lock_timestamp + hf.block_time().as_secs() >= unlock_timestamp
|
||||||
}
|
}
|
||||||
|
@ -269,19 +266,19 @@ fn check_all_time_locks(
|
||||||
time_locks: &[Timelock],
|
time_locks: &[Timelock],
|
||||||
current_chain_height: usize,
|
current_chain_height: usize,
|
||||||
current_time_lock_timestamp: u64,
|
current_time_lock_timestamp: u64,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
time_locks.iter().try_for_each(|time_lock| {
|
time_locks.iter().try_for_each(|time_lock| {
|
||||||
if !output_unlocked(
|
if output_unlocked(
|
||||||
time_lock,
|
time_lock,
|
||||||
current_chain_height,
|
current_chain_height,
|
||||||
current_time_lock_timestamp,
|
current_time_lock_timestamp,
|
||||||
hf,
|
hf,
|
||||||
) {
|
) {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
tracing::debug!("Transaction invalid: one or more inputs locked, lock: {time_lock:?}.");
|
tracing::debug!("Transaction invalid: one or more inputs locked, lock: {time_lock:?}.");
|
||||||
Err(TransactionError::OneOrMoreRingMembersLocked)
|
Err(TransactionError::OneOrMoreRingMembersLocked)
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -292,11 +289,11 @@ fn check_all_time_locks(
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#minimum-decoys>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#minimum-decoys>
|
||||||
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#equal-number-of-decoys>
|
/// && <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#equal-number-of-decoys>
|
||||||
pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), TransactionError> {
|
pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: HardFork) -> Result<(), TransactionError> {
|
||||||
if hf == &HardFork::V15 {
|
if hf == HardFork::V15 {
|
||||||
// Hard-fork 15 allows both v14 and v16 rules
|
// Hard-fork 15 allows both v14 and v16 rules
|
||||||
return check_decoy_info(decoy_info, &HardFork::V14)
|
return check_decoy_info(decoy_info, HardFork::V14)
|
||||||
.or_else(|_| check_decoy_info(decoy_info, &HardFork::V16));
|
.or_else(|_| check_decoy_info(decoy_info, HardFork::V16));
|
||||||
}
|
}
|
||||||
|
|
||||||
let current_minimum_decoys = minimum_decoys(hf);
|
let current_minimum_decoys = minimum_decoys(hf);
|
||||||
|
@ -310,13 +307,13 @@ pub fn check_decoy_info(decoy_info: &DecoyInfo, hf: &HardFork) -> Result<(), Tra
|
||||||
if decoy_info.mixable > 1 {
|
if decoy_info.mixable > 1 {
|
||||||
return Err(TransactionError::MoreThanOneMixableInputWithUnmixable);
|
return Err(TransactionError::MoreThanOneMixableInputWithUnmixable);
|
||||||
}
|
}
|
||||||
} else if hf >= &HardFork::V8 && decoy_info.min_decoys != current_minimum_decoys {
|
} else if hf >= HardFork::V8 && decoy_info.min_decoys != current_minimum_decoys {
|
||||||
// From V8 enforce the minimum used number of rings is the default minimum.
|
// From V8 enforce the minimum used number of rings is the default minimum.
|
||||||
return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys);
|
return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys);
|
||||||
}
|
}
|
||||||
|
|
||||||
// From v12 all inputs must have the same number of decoys.
|
// From v12 all inputs must have the same number of decoys.
|
||||||
if hf >= &HardFork::V12 && decoy_info.min_decoys != decoy_info.max_decoys {
|
if hf >= HardFork::V12 && decoy_info.min_decoys != decoy_info.max_decoys {
|
||||||
return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys);
|
return Err(TransactionError::InputDoesNotHaveExpectedNumbDecoys);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,19 +331,19 @@ fn check_key_images(input: &Input) -> Result<(), TransactionError> {
|
||||||
return Err(TransactionError::KeyImageIsNotInPrimeSubGroup);
|
return Err(TransactionError::KeyImageIsNotInPrimeSubGroup);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => Err(TransactionError::IncorrectInputType)?,
|
Input::Gen(_) => return Err(TransactionError::IncorrectInputType),
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks that the input is of type [`Input::ToKey`] aka txin_to_key.
|
/// Checks that the input is of type [`Input::ToKey`] aka `txin_to_key`.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#input-type>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#input-type>
|
||||||
fn check_input_type(input: &Input) -> Result<(), TransactionError> {
|
const fn check_input_type(input: &Input) -> Result<(), TransactionError> {
|
||||||
match input {
|
match input {
|
||||||
Input::ToKey { .. } => Ok(()),
|
Input::ToKey { .. } => Ok(()),
|
||||||
_ => Err(TransactionError::IncorrectInputType)?,
|
Input::Gen(_) => Err(TransactionError::IncorrectInputType),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,15 +359,15 @@ fn check_input_has_decoys(input: &Input) -> Result<(), TransactionError> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => Err(TransactionError::IncorrectInputType)?,
|
Input::Gen(_) => Err(TransactionError::IncorrectInputType),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks that the ring members for the input are unique after hard-fork 6.
|
/// Checks that the ring members for the input are unique after hard-fork 6.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#unique-ring-members>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#unique-ring-members>
|
||||||
fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), TransactionError> {
|
fn check_ring_members_unique(input: &Input, hf: HardFork) -> Result<(), TransactionError> {
|
||||||
if hf >= &HardFork::V6 {
|
if hf >= HardFork::V6 {
|
||||||
match input {
|
match input {
|
||||||
Input::ToKey { key_offsets, .. } => key_offsets.iter().skip(1).try_for_each(|offset| {
|
Input::ToKey { key_offsets, .. } => key_offsets.iter().skip(1).try_for_each(|offset| {
|
||||||
if *offset == 0 {
|
if *offset == 0 {
|
||||||
|
@ -379,7 +376,7 @@ fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), Transac
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
_ => Err(TransactionError::IncorrectInputType)?,
|
Input::Gen(_) => Err(TransactionError::IncorrectInputType),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -389,23 +386,22 @@ fn check_ring_members_unique(input: &Input, hf: &HardFork) -> Result<(), Transac
|
||||||
/// Checks that from hf 7 the inputs are sorted by key image.
|
/// Checks that from hf 7 the inputs are sorted by key image.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#sorted-inputs>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#sorted-inputs>
|
||||||
fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), TransactionError> {
|
fn check_inputs_sorted(inputs: &[Input], hf: HardFork) -> Result<(), TransactionError> {
|
||||||
let get_ki = |inp: &Input| match inp {
|
let get_ki = |inp: &Input| match inp {
|
||||||
Input::ToKey { key_image, .. } => Ok(key_image.compress().to_bytes()),
|
Input::ToKey { key_image, .. } => Ok(key_image.compress().to_bytes()),
|
||||||
_ => Err(TransactionError::IncorrectInputType),
|
Input::Gen(_) => Err(TransactionError::IncorrectInputType),
|
||||||
};
|
};
|
||||||
|
|
||||||
if hf >= &HardFork::V7 {
|
if hf >= HardFork::V7 {
|
||||||
for inps in inputs.windows(2) {
|
for inps in inputs.windows(2) {
|
||||||
match get_ki(&inps[0])?.cmp(&get_ki(&inps[1])?) {
|
match get_ki(&inps[0])?.cmp(&get_ki(&inps[1])?) {
|
||||||
Ordering::Greater => (),
|
Ordering::Greater => (),
|
||||||
_ => return Err(TransactionError::InputsAreNotOrdered),
|
_ => return Err(TransactionError::InputsAreNotOrdered),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks the youngest output is at least 10 blocks old.
|
/// Checks the youngest output is at least 10 blocks old.
|
||||||
|
@ -414,9 +410,9 @@ fn check_inputs_sorted(inputs: &[Input], hf: &HardFork) -> Result<(), Transactio
|
||||||
fn check_10_block_lock(
|
fn check_10_block_lock(
|
||||||
youngest_used_out_height: usize,
|
youngest_used_out_height: usize,
|
||||||
current_chain_height: usize,
|
current_chain_height: usize,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
if hf >= &HardFork::V12 {
|
if hf >= HardFork::V12 {
|
||||||
if youngest_used_out_height + 10 > current_chain_height {
|
if youngest_used_out_height + 10 > current_chain_height {
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
"Transaction invalid: One or more ring members younger than 10 blocks."
|
"Transaction invalid: One or more ring members younger than 10 blocks."
|
||||||
|
@ -442,7 +438,7 @@ fn sum_inputs_check_overflow(inputs: &[Input]) -> Result<u64, TransactionError>
|
||||||
.checked_add(amount.unwrap_or(0))
|
.checked_add(amount.unwrap_or(0))
|
||||||
.ok_or(TransactionError::InputsOverflow)?;
|
.ok_or(TransactionError::InputsOverflow)?;
|
||||||
}
|
}
|
||||||
_ => Err(TransactionError::IncorrectInputType)?,
|
Input::Gen(_) => return Err(TransactionError::IncorrectInputType),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -454,7 +450,7 @@ fn sum_inputs_check_overflow(inputs: &[Input]) -> Result<u64, TransactionError>
|
||||||
/// Semantic rules are rules that don't require blockchain context, the hard-fork does not require blockchain context as:
|
/// Semantic rules are rules that don't require blockchain context, the hard-fork does not require blockchain context as:
|
||||||
/// - The tx-pool will use the current hard-fork
|
/// - The tx-pool will use the current hard-fork
|
||||||
/// - When syncing the hard-fork is in the block header.
|
/// - When syncing the hard-fork is in the block header.
|
||||||
fn check_inputs_semantics(inputs: &[Input], hf: &HardFork) -> Result<u64, TransactionError> {
|
fn check_inputs_semantics(inputs: &[Input], hf: HardFork) -> Result<u64, TransactionError> {
|
||||||
// <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#no-empty-inputs>
|
// <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#no-empty-inputs>
|
||||||
if inputs.is_empty() {
|
if inputs.is_empty() {
|
||||||
return Err(TransactionError::NoInputs);
|
return Err(TransactionError::NoInputs);
|
||||||
|
@ -481,14 +477,14 @@ fn check_inputs_contextual(
|
||||||
inputs: &[Input],
|
inputs: &[Input],
|
||||||
tx_ring_members_info: &TxRingMembersInfo,
|
tx_ring_members_info: &TxRingMembersInfo,
|
||||||
current_chain_height: usize,
|
current_chain_height: usize,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
// This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members.
|
// This rule is not contained in monero-core explicitly, but it is enforced by how Monero picks ring members.
|
||||||
// When picking ring members monerod will only look in the DB at past blocks so an output has to be younger
|
// When picking ring members monerod will only look in the DB at past blocks so an output has to be younger
|
||||||
// than this transaction to be used in this tx.
|
// than this transaction to be used in this tx.
|
||||||
if tx_ring_members_info.youngest_used_out_height >= current_chain_height {
|
if tx_ring_members_info.youngest_used_out_height >= current_chain_height {
|
||||||
tracing::debug!("Transaction invalid: One or more ring members too young.");
|
tracing::debug!("Transaction invalid: One or more ring members too young.");
|
||||||
Err(TransactionError::OneOrMoreRingMembersLocked)?;
|
return Err(TransactionError::OneOrMoreRingMembersLocked);
|
||||||
}
|
}
|
||||||
|
|
||||||
check_10_block_lock(
|
check_10_block_lock(
|
||||||
|
@ -500,7 +496,7 @@ fn check_inputs_contextual(
|
||||||
if let Some(decoys_info) = &tx_ring_members_info.decoy_info {
|
if let Some(decoys_info) = &tx_ring_members_info.decoy_info {
|
||||||
check_decoy_info(decoys_info, hf)?;
|
check_decoy_info(decoys_info, hf)?;
|
||||||
} else {
|
} else {
|
||||||
assert_eq!(hf, &HardFork::V1);
|
assert_eq!(hf, HardFork::V1);
|
||||||
}
|
}
|
||||||
|
|
||||||
for input in inputs {
|
for input in inputs {
|
||||||
|
@ -517,22 +513,22 @@ fn check_inputs_contextual(
|
||||||
/// <https://monero-book.cuprate.org/consensus_rules/transactions.html#version>
|
/// <https://monero-book.cuprate.org/consensus_rules/transactions.html#version>
|
||||||
fn check_tx_version(
|
fn check_tx_version(
|
||||||
decoy_info: &Option<DecoyInfo>,
|
decoy_info: &Option<DecoyInfo>,
|
||||||
version: &TxVersion,
|
version: TxVersion,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
if let Some(decoy_info) = decoy_info {
|
if let Some(decoy_info) = decoy_info {
|
||||||
let max = max_tx_version(hf);
|
let max = max_tx_version(hf);
|
||||||
if version > &max {
|
if version > max {
|
||||||
return Err(TransactionError::TransactionVersionInvalid);
|
return Err(TransactionError::TransactionVersionInvalid);
|
||||||
}
|
}
|
||||||
|
|
||||||
let min = min_tx_version(hf);
|
let min = min_tx_version(hf);
|
||||||
if version < &min && decoy_info.not_mixable == 0 {
|
if version < min && decoy_info.not_mixable == 0 {
|
||||||
return Err(TransactionError::TransactionVersionInvalid);
|
return Err(TransactionError::TransactionVersionInvalid);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// This will only happen for hard-fork 1 when only RingSignatures are allowed.
|
// This will only happen for hard-fork 1 when only RingSignatures are allowed.
|
||||||
if version != &TxVersion::RingSignatures {
|
if version != TxVersion::RingSignatures {
|
||||||
return Err(TransactionError::TransactionVersionInvalid);
|
return Err(TransactionError::TransactionVersionInvalid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -541,8 +537,8 @@ fn check_tx_version(
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the default maximum tx version for the given hard-fork.
|
/// Returns the default maximum tx version for the given hard-fork.
|
||||||
fn max_tx_version(hf: &HardFork) -> TxVersion {
|
fn max_tx_version(hf: HardFork) -> TxVersion {
|
||||||
if hf <= &HardFork::V3 {
|
if hf <= HardFork::V3 {
|
||||||
TxVersion::RingSignatures
|
TxVersion::RingSignatures
|
||||||
} else {
|
} else {
|
||||||
TxVersion::RingCT
|
TxVersion::RingCT
|
||||||
|
@ -550,15 +546,15 @@ fn max_tx_version(hf: &HardFork) -> TxVersion {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the default minimum tx version for the given hard-fork.
|
/// Returns the default minimum tx version for the given hard-fork.
|
||||||
fn min_tx_version(hf: &HardFork) -> TxVersion {
|
fn min_tx_version(hf: HardFork) -> TxVersion {
|
||||||
if hf >= &HardFork::V6 {
|
if hf >= HardFork::V6 {
|
||||||
TxVersion::RingCT
|
TxVersion::RingCT
|
||||||
} else {
|
} else {
|
||||||
TxVersion::RingSignatures
|
TxVersion::RingSignatures
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_weight_limit(hf: &HardFork) -> usize {
|
fn transaction_weight_limit(hf: HardFork) -> usize {
|
||||||
penalty_free_zone(hf) / 2 - 600
|
penalty_free_zone(hf) / 2 - 600
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -575,14 +571,14 @@ pub fn check_transaction_semantic(
|
||||||
tx_blob_size: usize,
|
tx_blob_size: usize,
|
||||||
tx_weight: usize,
|
tx_weight: usize,
|
||||||
tx_hash: &[u8; 32],
|
tx_hash: &[u8; 32],
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
verifier: impl BatchVerifier,
|
verifier: impl BatchVerifier,
|
||||||
) -> Result<u64, TransactionError> {
|
) -> Result<u64, TransactionError> {
|
||||||
// <https://monero-book.cuprate.org/consensus_rules/transactions.html#transaction-size>
|
// <https://monero-book.cuprate.org/consensus_rules/transactions.html#transaction-size>
|
||||||
if tx_blob_size > MAX_TX_BLOB_SIZE
|
if tx_blob_size > MAX_TX_BLOB_SIZE
|
||||||
|| (hf >= &HardFork::V8 && tx_weight > transaction_weight_limit(hf))
|
|| (hf >= HardFork::V8 && tx_weight > transaction_weight_limit(hf))
|
||||||
{
|
{
|
||||||
Err(TransactionError::TooBig)?;
|
return Err(TransactionError::TooBig);
|
||||||
}
|
}
|
||||||
|
|
||||||
let tx_version =
|
let tx_version =
|
||||||
|
@ -602,13 +598,13 @@ pub fn check_transaction_semantic(
|
||||||
Transaction::V2 { proofs: None, .. } | Transaction::V1 { .. } => false,
|
Transaction::V2 { proofs: None, .. } | Transaction::V1 { .. } => false,
|
||||||
};
|
};
|
||||||
|
|
||||||
let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, &tx_version, bp_or_bpp)?;
|
let outputs_sum = check_outputs_semantics(&tx.prefix().outputs, hf, tx_version, bp_or_bpp)?;
|
||||||
let inputs_sum = check_inputs_semantics(&tx.prefix().inputs, hf)?;
|
let inputs_sum = check_inputs_semantics(&tx.prefix().inputs, hf)?;
|
||||||
|
|
||||||
let fee = match tx {
|
let fee = match tx {
|
||||||
Transaction::V1 { .. } => {
|
Transaction::V1 { .. } => {
|
||||||
if outputs_sum >= inputs_sum {
|
if outputs_sum >= inputs_sum {
|
||||||
Err(TransactionError::OutputsTooHigh)?;
|
return Err(TransactionError::OutputsTooHigh);
|
||||||
}
|
}
|
||||||
inputs_sum - outputs_sum
|
inputs_sum - outputs_sum
|
||||||
}
|
}
|
||||||
|
@ -633,13 +629,12 @@ pub fn check_transaction_semantic(
|
||||||
/// This function also does _not_ check for duplicate key-images: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#unique-key-image>.
|
/// This function also does _not_ check for duplicate key-images: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#unique-key-image>.
|
||||||
///
|
///
|
||||||
/// `current_time_lock_timestamp` must be: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#getting-the-current-time>.
|
/// `current_time_lock_timestamp` must be: <https://monero-book.cuprate.org/consensus_rules/transactions/unlock_time.html#getting-the-current-time>.
|
||||||
|
|
||||||
pub fn check_transaction_contextual(
|
pub fn check_transaction_contextual(
|
||||||
tx: &Transaction,
|
tx: &Transaction,
|
||||||
tx_ring_members_info: &TxRingMembersInfo,
|
tx_ring_members_info: &TxRingMembersInfo,
|
||||||
current_chain_height: usize,
|
current_chain_height: usize,
|
||||||
current_time_lock_timestamp: u64,
|
current_time_lock_timestamp: u64,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> Result<(), TransactionError> {
|
) -> Result<(), TransactionError> {
|
||||||
let tx_version =
|
let tx_version =
|
||||||
TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?;
|
TxVersion::from_raw(tx.version()).ok_or(TransactionError::TransactionVersionInvalid)?;
|
||||||
|
@ -650,7 +645,7 @@ pub fn check_transaction_contextual(
|
||||||
current_chain_height,
|
current_chain_height,
|
||||||
hf,
|
hf,
|
||||||
)?;
|
)?;
|
||||||
check_tx_version(&tx_ring_members_info.decoy_info, &tx_version, hf)?;
|
check_tx_version(&tx_ring_members_info.decoy_info, tx_version, hf)?;
|
||||||
|
|
||||||
check_all_time_locks(
|
check_all_time_locks(
|
||||||
&tx_ring_members_info.time_locked_outs,
|
&tx_ring_members_info.time_locked_outs,
|
||||||
|
|
|
@ -26,7 +26,7 @@ pub fn get_absolute_offsets(relative_offsets: &[u64]) -> Result<Vec<u64>, Transa
|
||||||
Ok(offsets)
|
Ok(offsets)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inserts the output IDs that are needed to verify the transaction inputs into the provided HashMap.
|
/// Inserts the output IDs that are needed to verify the transaction inputs into the provided `HashMap`.
|
||||||
///
|
///
|
||||||
/// This will error if the inputs are empty
|
/// This will error if the inputs are empty
|
||||||
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions.html#no-empty-inputs>
|
/// <https://cuprate.github.io/monero-book/consensus_rules/transactions.html#no-empty-inputs>
|
||||||
|
@ -49,7 +49,7 @@ pub fn insert_ring_member_ids(
|
||||||
.entry(amount.unwrap_or(0))
|
.entry(amount.unwrap_or(0))
|
||||||
.or_default()
|
.or_default()
|
||||||
.extend(get_absolute_offsets(key_offsets)?),
|
.extend(get_absolute_offsets(key_offsets)?),
|
||||||
_ => return Err(TransactionError::IncorrectInputType),
|
Input::Gen(_) => return Err(TransactionError::IncorrectInputType),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -60,7 +60,7 @@ pub fn insert_ring_member_ids(
|
||||||
pub enum Rings {
|
pub enum Rings {
|
||||||
/// Legacy, pre-ringCT, rings.
|
/// Legacy, pre-ringCT, rings.
|
||||||
Legacy(Vec<Vec<EdwardsPoint>>),
|
Legacy(Vec<Vec<EdwardsPoint>>),
|
||||||
/// RingCT rings, (outkey, amount commitment).
|
/// `RingCT` rings, (outkey, amount commitment).
|
||||||
RingCT(Vec<Vec<[EdwardsPoint; 2]>>),
|
RingCT(Vec<Vec<[EdwardsPoint; 2]>>),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,15 +103,15 @@ impl DecoyInfo {
|
||||||
///
|
///
|
||||||
/// So:
|
/// So:
|
||||||
///
|
///
|
||||||
/// amount_outs_on_chain(inputs`[X]`) == outputs_with_amount`[X]`
|
/// `amount_outs_on_chain(inputs[X]) == outputs_with_amount[X]`
|
||||||
///
|
///
|
||||||
/// Do not rely on this function to do consensus checks!
|
/// Do not rely on this function to do consensus checks!
|
||||||
///
|
///
|
||||||
pub fn new(
|
pub fn new(
|
||||||
inputs: &[Input],
|
inputs: &[Input],
|
||||||
outputs_with_amount: impl Fn(u64) -> usize,
|
outputs_with_amount: impl Fn(u64) -> usize,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> Result<DecoyInfo, TransactionError> {
|
) -> Result<Self, TransactionError> {
|
||||||
let mut min_decoys = usize::MAX;
|
let mut min_decoys = usize::MAX;
|
||||||
let mut max_decoys = usize::MIN;
|
let mut max_decoys = usize::MIN;
|
||||||
let mut mixable = 0;
|
let mut mixable = 0;
|
||||||
|
@ -119,7 +119,7 @@ impl DecoyInfo {
|
||||||
|
|
||||||
let minimum_decoys = minimum_decoys(hf);
|
let minimum_decoys = minimum_decoys(hf);
|
||||||
|
|
||||||
for inp in inputs.iter() {
|
for inp in inputs {
|
||||||
match inp {
|
match inp {
|
||||||
Input::ToKey {
|
Input::ToKey {
|
||||||
key_offsets,
|
key_offsets,
|
||||||
|
@ -149,11 +149,11 @@ impl DecoyInfo {
|
||||||
min_decoys = min(min_decoys, numb_decoys);
|
min_decoys = min(min_decoys, numb_decoys);
|
||||||
max_decoys = max(max_decoys, numb_decoys);
|
max_decoys = max(max_decoys, numb_decoys);
|
||||||
}
|
}
|
||||||
_ => return Err(TransactionError::IncorrectInputType),
|
Input::Gen(_) => return Err(TransactionError::IncorrectInputType),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(DecoyInfo {
|
Ok(Self {
|
||||||
mixable,
|
mixable,
|
||||||
not_mixable,
|
not_mixable,
|
||||||
min_decoys,
|
min_decoys,
|
||||||
|
@ -166,7 +166,7 @@ impl DecoyInfo {
|
||||||
/// **There are exceptions to this always being the minimum decoys**
|
/// **There are exceptions to this always being the minimum decoys**
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#default-minimum-decoys>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/inputs.html#default-minimum-decoys>
|
||||||
pub(crate) fn minimum_decoys(hf: &HardFork) -> usize {
|
pub(crate) fn minimum_decoys(hf: HardFork) -> usize {
|
||||||
use HardFork as HF;
|
use HardFork as HF;
|
||||||
match hf {
|
match hf {
|
||||||
HF::V1 => panic!("hard-fork 1 does not use these rules!"),
|
HF::V1 => panic!("hard-fork 1 does not use these rules!"),
|
||||||
|
|
|
@ -40,10 +40,10 @@ pub enum RingCTError {
|
||||||
CLSAGError(#[from] ClsagError),
|
CLSAGError(#[from] ClsagError),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks the RingCT type is allowed for the current hard fork.
|
/// Checks the `RingCT` type is allowed for the current hard fork.
|
||||||
///
|
///
|
||||||
/// <https://monero-book.cuprate.org/consensus_rules/ring_ct.html#type>
|
/// <https://monero-book.cuprate.org/consensus_rules/ring_ct.html#type>
|
||||||
fn check_rct_type(ty: &RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), RingCTError> {
|
fn check_rct_type(ty: RctType, hf: HardFork, tx_hash: &[u8; 32]) -> Result<(), RingCTError> {
|
||||||
use HardFork as F;
|
use HardFork as F;
|
||||||
use RctType as T;
|
use RctType as T;
|
||||||
|
|
||||||
|
@ -125,11 +125,11 @@ pub(crate) fn ring_ct_semantic_checks(
|
||||||
proofs: &RctProofs,
|
proofs: &RctProofs,
|
||||||
tx_hash: &[u8; 32],
|
tx_hash: &[u8; 32],
|
||||||
verifier: impl BatchVerifier,
|
verifier: impl BatchVerifier,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> Result<(), RingCTError> {
|
) -> Result<(), RingCTError> {
|
||||||
let rct_type = proofs.rct_type();
|
let rct_type = proofs.rct_type();
|
||||||
|
|
||||||
check_rct_type(&rct_type, *hf, tx_hash)?;
|
check_rct_type(rct_type, hf, tx_hash)?;
|
||||||
check_output_range_proofs(proofs, verifier)?;
|
check_output_range_proofs(proofs, verifier)?;
|
||||||
|
|
||||||
if rct_type != RctType::AggregateMlsagBorromean {
|
if rct_type != RctType::AggregateMlsagBorromean {
|
||||||
|
@ -154,7 +154,7 @@ pub(crate) fn check_input_signatures(
|
||||||
};
|
};
|
||||||
|
|
||||||
if rings.is_empty() {
|
if rings.is_empty() {
|
||||||
Err(RingCTError::RingInvalid)?;
|
return Err(RingCTError::RingInvalid);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pseudo_outs = match &proofs.prunable {
|
let pseudo_outs = match &proofs.prunable {
|
||||||
|
@ -222,20 +222,20 @@ mod tests {
|
||||||
#[test]
|
#[test]
|
||||||
fn grandfathered_bulletproofs2() {
|
fn grandfathered_bulletproofs2() {
|
||||||
assert!(check_rct_type(
|
assert!(check_rct_type(
|
||||||
&RctType::MlsagBulletproofsCompactAmount,
|
RctType::MlsagBulletproofsCompactAmount,
|
||||||
HardFork::V14,
|
HardFork::V14,
|
||||||
&[0; 32]
|
&[0; 32]
|
||||||
)
|
)
|
||||||
.is_err());
|
.is_err());
|
||||||
|
|
||||||
assert!(check_rct_type(
|
assert!(check_rct_type(
|
||||||
&RctType::MlsagBulletproofsCompactAmount,
|
RctType::MlsagBulletproofsCompactAmount,
|
||||||
HardFork::V14,
|
HardFork::V14,
|
||||||
&GRANDFATHERED_TRANSACTIONS[0]
|
&GRANDFATHERED_TRANSACTIONS[0]
|
||||||
)
|
)
|
||||||
.is_ok());
|
.is_ok());
|
||||||
assert!(check_rct_type(
|
assert!(check_rct_type(
|
||||||
&RctType::MlsagBulletproofsCompactAmount,
|
RctType::MlsagBulletproofsCompactAmount,
|
||||||
HardFork::V14,
|
HardFork::V14,
|
||||||
&GRANDFATHERED_TRANSACTIONS[1]
|
&GRANDFATHERED_TRANSACTIONS[1]
|
||||||
)
|
)
|
||||||
|
|
|
@ -17,7 +17,7 @@ use crate::try_par_iter;
|
||||||
/// Verifies the ring signature.
|
/// Verifies the ring signature.
|
||||||
///
|
///
|
||||||
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/ring_signatures.html>
|
/// ref: <https://monero-book.cuprate.org/consensus_rules/transactions/ring_signatures.html>
|
||||||
pub fn check_input_signatures(
|
pub(crate) fn check_input_signatures(
|
||||||
inputs: &[Input],
|
inputs: &[Input],
|
||||||
signatures: &[RingSignature],
|
signatures: &[RingSignature],
|
||||||
rings: &Rings,
|
rings: &Rings,
|
||||||
|
@ -45,7 +45,7 @@ pub fn check_input_signatures(
|
||||||
Ok(())
|
Ok(())
|
||||||
})?;
|
})?;
|
||||||
}
|
}
|
||||||
_ => panic!("tried to verify v1 tx with a non v1 ring"),
|
Rings::RingCT(_) => panic!("tried to verify v1 tx with a non v1 ring"),
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,13 +16,13 @@ use crate::decomposed_amount::DECOMPOSED_AMOUNTS;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_check_output_amount_v1() {
|
fn test_check_output_amount_v1() {
|
||||||
for amount in DECOMPOSED_AMOUNTS.iter() {
|
for amount in &DECOMPOSED_AMOUNTS {
|
||||||
assert!(check_output_amount_v1(*amount, &HardFork::V2).is_ok())
|
assert!(check_output_amount_v1(*amount, HardFork::V2).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
proptest!(|(amount in any::<u64>().prop_filter("value_decomposed", |val| !is_decomposed_amount(val)))| {
|
proptest!(|(amount in any::<u64>().prop_filter("value_decomposed", |val| !is_decomposed_amount(val)))| {
|
||||||
prop_assert!(check_output_amount_v1(amount, &HardFork::V2).is_err());
|
prop_assert!(check_output_amount_v1(amount, HardFork::V2).is_err());
|
||||||
prop_assert!(check_output_amount_v1(amount, &HardFork::V1).is_ok())
|
prop_assert!(check_output_amount_v1(amount, HardFork::V1).is_ok());
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,10 +41,10 @@ fn test_sum_outputs() {
|
||||||
|
|
||||||
let outs = [output_10, outputs_20];
|
let outs = [output_10, outputs_20];
|
||||||
|
|
||||||
let sum = sum_outputs(&outs, &HardFork::V16, &TxVersion::RingSignatures).unwrap();
|
let sum = sum_outputs(&outs, HardFork::V16, TxVersion::RingSignatures).unwrap();
|
||||||
assert_eq!(sum, 30);
|
assert_eq!(sum, 30);
|
||||||
|
|
||||||
assert!(sum_outputs(&outs, &HardFork::V16, &TxVersion::RingCT).is_err())
|
assert!(sum_outputs(&outs, HardFork::V16, TxVersion::RingCT).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -52,50 +52,50 @@ fn test_decoy_info() {
|
||||||
let decoy_info = DecoyInfo {
|
let decoy_info = DecoyInfo {
|
||||||
mixable: 0,
|
mixable: 0,
|
||||||
not_mixable: 0,
|
not_mixable: 0,
|
||||||
min_decoys: minimum_decoys(&HardFork::V8),
|
min_decoys: minimum_decoys(HardFork::V8),
|
||||||
max_decoys: minimum_decoys(&HardFork::V8) + 1,
|
max_decoys: minimum_decoys(HardFork::V8) + 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok());
|
assert!(check_decoy_info(&decoy_info, HardFork::V8).is_ok());
|
||||||
assert!(check_decoy_info(&decoy_info, &HardFork::V16).is_err());
|
assert!(check_decoy_info(&decoy_info, HardFork::V16).is_err());
|
||||||
|
|
||||||
let mut decoy_info = DecoyInfo {
|
let mut decoy_info = DecoyInfo {
|
||||||
mixable: 0,
|
mixable: 0,
|
||||||
not_mixable: 0,
|
not_mixable: 0,
|
||||||
min_decoys: minimum_decoys(&HardFork::V8) - 1,
|
min_decoys: minimum_decoys(HardFork::V8) - 1,
|
||||||
max_decoys: minimum_decoys(&HardFork::V8) + 1,
|
max_decoys: minimum_decoys(HardFork::V8) + 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err());
|
assert!(check_decoy_info(&decoy_info, HardFork::V8).is_err());
|
||||||
|
|
||||||
decoy_info.not_mixable = 1;
|
decoy_info.not_mixable = 1;
|
||||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_ok());
|
assert!(check_decoy_info(&decoy_info, HardFork::V8).is_ok());
|
||||||
|
|
||||||
decoy_info.mixable = 2;
|
decoy_info.mixable = 2;
|
||||||
assert!(check_decoy_info(&decoy_info, &HardFork::V8).is_err());
|
assert!(check_decoy_info(&decoy_info, HardFork::V8).is_err());
|
||||||
|
|
||||||
let mut decoy_info = DecoyInfo {
|
let mut decoy_info = DecoyInfo {
|
||||||
mixable: 0,
|
mixable: 0,
|
||||||
not_mixable: 0,
|
not_mixable: 0,
|
||||||
min_decoys: minimum_decoys(&HardFork::V12),
|
min_decoys: minimum_decoys(HardFork::V12),
|
||||||
max_decoys: minimum_decoys(&HardFork::V12) + 1,
|
max_decoys: minimum_decoys(HardFork::V12) + 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_err());
|
assert!(check_decoy_info(&decoy_info, HardFork::V12).is_err());
|
||||||
|
|
||||||
decoy_info.max_decoys = decoy_info.min_decoys;
|
decoy_info.max_decoys = decoy_info.min_decoys;
|
||||||
assert!(check_decoy_info(&decoy_info, &HardFork::V12).is_ok());
|
assert!(check_decoy_info(&decoy_info, HardFork::V12).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_torsion_ki() {
|
fn test_torsion_ki() {
|
||||||
for &key_image in EIGHT_TORSION[1..].iter() {
|
for &key_image in &EIGHT_TORSION[1..] {
|
||||||
assert!(check_key_images(&Input::ToKey {
|
assert!(check_key_images(&Input::ToKey {
|
||||||
key_image,
|
key_image,
|
||||||
amount: None,
|
amount: None,
|
||||||
key_offsets: vec![],
|
key_offsets: vec![],
|
||||||
})
|
})
|
||||||
.is_err())
|
.is_err());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ prop_compose! {
|
||||||
prop_compose! {
|
prop_compose! {
|
||||||
/// Returns a valid torsioned point.
|
/// Returns a valid torsioned point.
|
||||||
fn random_torsioned_point()(point in random_point(), torsion in 1..8_usize ) -> EdwardsPoint {
|
fn random_torsioned_point()(point in random_point(), torsion in 1..8_usize ) -> EdwardsPoint {
|
||||||
point + curve25519_dalek::constants::EIGHT_TORSION[torsion]
|
point + EIGHT_TORSION[torsion]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ prop_compose! {
|
||||||
/// Returns a [`Timelock`] that is unlocked given a height and time.
|
/// Returns a [`Timelock`] that is unlocked given a height and time.
|
||||||
fn unlocked_timelock(height: u64, time_for_time_lock: u64)(
|
fn unlocked_timelock(height: u64, time_for_time_lock: u64)(
|
||||||
ty in 0..3,
|
ty in 0..3,
|
||||||
lock_height in 0..(height+1),
|
lock_height in 0..=height,
|
||||||
time_for_time_lock in 0..(time_for_time_lock+121),
|
time_for_time_lock in 0..(time_for_time_lock+121),
|
||||||
) -> Timelock {
|
) -> Timelock {
|
||||||
match ty {
|
match ty {
|
||||||
|
@ -203,33 +203,33 @@ proptest! {
|
||||||
hf_no_view_tags in hf_in_range(1..14),
|
hf_no_view_tags in hf_in_range(1..14),
|
||||||
hf_view_tags in hf_in_range(16..17),
|
hf_view_tags in hf_in_range(16..17),
|
||||||
) {
|
) {
|
||||||
prop_assert!(check_output_types(&view_tag_outs, &hf_view_tags).is_ok());
|
prop_assert!(check_output_types(&view_tag_outs, hf_view_tags).is_ok());
|
||||||
prop_assert!(check_output_types(&view_tag_outs, &hf_no_view_tags).is_err());
|
prop_assert!(check_output_types(&view_tag_outs, hf_no_view_tags).is_err());
|
||||||
|
|
||||||
|
|
||||||
prop_assert!(check_output_types(&non_view_tag_outs, &hf_no_view_tags).is_ok());
|
prop_assert!(check_output_types(&non_view_tag_outs, hf_no_view_tags).is_ok());
|
||||||
prop_assert!(check_output_types(&non_view_tag_outs, &hf_view_tags).is_err());
|
prop_assert!(check_output_types(&non_view_tag_outs, hf_view_tags).is_err());
|
||||||
|
|
||||||
prop_assert!(check_output_types(&non_view_tag_outs, &HardFork::V15).is_ok());
|
prop_assert!(check_output_types(&non_view_tag_outs, HardFork::V15).is_ok());
|
||||||
prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_ok());
|
prop_assert!(check_output_types(&view_tag_outs, HardFork::V15).is_ok());
|
||||||
view_tag_outs.append(&mut non_view_tag_outs);
|
view_tag_outs.append(&mut non_view_tag_outs);
|
||||||
prop_assert!(check_output_types(&view_tag_outs, &HardFork::V15).is_err());
|
prop_assert!(check_output_types(&view_tag_outs, HardFork::V15).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize) {
|
fn test_valid_number_of_outputs(valid_numb_outs in 2..17_usize) {
|
||||||
prop_assert!(check_number_of_outputs(valid_numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_ok());
|
prop_assert!(check_number_of_outputs(valid_numb_outs, HardFork::V16, TxVersion::RingCT, true).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX) {
|
fn test_invalid_number_of_outputs(numb_outs in 17..usize::MAX) {
|
||||||
prop_assert!(check_number_of_outputs(numb_outs, &HardFork::V16, &TxVersion::RingCT, true).is_err());
|
prop_assert!(check_number_of_outputs(numb_outs, HardFork::V16, TxVersion::RingCT, true).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_check_output_amount_v2(amt in 1..u64::MAX) {
|
fn test_check_output_amount_v2(amt in 1..u64::MAX) {
|
||||||
prop_assert!(check_output_amount_v2(amt).is_err());
|
prop_assert!(check_output_amount_v2(amt).is_err());
|
||||||
prop_assert!(check_output_amount_v2(0).is_ok())
|
prop_assert!(check_output_amount_v2(0).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -241,9 +241,9 @@ proptest! {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_timestamp_time_lock(timestamp in 500_000_001..u64::MAX) {
|
fn test_timestamp_time_lock(timestamp in 500_000_001..u64::MAX) {
|
||||||
prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, &HardFork::V16));
|
prop_assert!(check_timestamp_time_lock(timestamp, timestamp - 120, HardFork::V16));
|
||||||
prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, &HardFork::V16));
|
prop_assert!(!check_timestamp_time_lock(timestamp, timestamp - 121, HardFork::V16));
|
||||||
prop_assert!(check_timestamp_time_lock(timestamp, timestamp, &HardFork::V16));
|
prop_assert!(check_timestamp_time_lock(timestamp, timestamp, HardFork::V16));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -251,11 +251,11 @@ proptest! {
|
||||||
mut locked_locks in vec(locked_timelock(5_000, 100_000_000), 1..50),
|
mut locked_locks in vec(locked_timelock(5_000, 100_000_000), 1..50),
|
||||||
mut unlocked_locks in vec(unlocked_timelock(5_000, 100_000_000), 1..50)
|
mut unlocked_locks in vec(unlocked_timelock(5_000, 100_000_000), 1..50)
|
||||||
) {
|
) {
|
||||||
assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, &HardFork::V16).is_err());
|
assert!(check_all_time_locks(&locked_locks, 5_000, 100_000_000, HardFork::V16).is_err());
|
||||||
assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_ok());
|
assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, HardFork::V16).is_ok());
|
||||||
|
|
||||||
unlocked_locks.append(&mut locked_locks);
|
unlocked_locks.append(&mut locked_locks);
|
||||||
assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, &HardFork::V16).is_err());
|
assert!(check_all_time_locks(&unlocked_locks, 5_000, 100_000_000, HardFork::V16).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::{cell::RefCell, ops::DerefMut};
|
use std::cell::RefCell;
|
||||||
|
|
||||||
use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier;
|
use monero_serai::ringct::bulletproofs::BatchVerifier as InternalBatchVerifier;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
|
@ -13,8 +13,8 @@ pub struct MultiThreadedBatchVerifier {
|
||||||
|
|
||||||
impl MultiThreadedBatchVerifier {
|
impl MultiThreadedBatchVerifier {
|
||||||
/// Create a new multithreaded batch verifier,
|
/// Create a new multithreaded batch verifier,
|
||||||
pub fn new(numb_threads: usize) -> MultiThreadedBatchVerifier {
|
pub fn new(numb_threads: usize) -> Self {
|
||||||
MultiThreadedBatchVerifier {
|
Self {
|
||||||
internal: ThreadLocal::with_capacity(numb_threads),
|
internal: ThreadLocal::with_capacity(numb_threads),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -42,6 +42,6 @@ impl BatchVerifier for &'_ MultiThreadedBatchVerifier {
|
||||||
.get_or(|| RefCell::new(InternalBatchVerifier::new()))
|
.get_or(|| RefCell::new(InternalBatchVerifier::new()))
|
||||||
.borrow_mut();
|
.borrow_mut();
|
||||||
|
|
||||||
stmt(verifier.deref_mut())
|
stmt(&mut verifier)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,17 +72,17 @@ impl PreparedBlockExPow {
|
||||||
/// This errors if either the `block`'s:
|
/// This errors if either the `block`'s:
|
||||||
/// - Hard-fork values are invalid
|
/// - Hard-fork values are invalid
|
||||||
/// - Miner transaction is missing a miner input
|
/// - Miner transaction is missing a miner input
|
||||||
pub fn new(block: Block) -> Result<PreparedBlockExPow, ConsensusError> {
|
pub fn new(block: Block) -> Result<Self, ConsensusError> {
|
||||||
let (hf_version, hf_vote) = HardFork::from_block_header(&block.header)
|
let (hf_version, hf_vote) = HardFork::from_block_header(&block.header)
|
||||||
.map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?;
|
.map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?;
|
||||||
|
|
||||||
let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else {
|
let Some(Input::Gen(height)) = block.miner_transaction.prefix().inputs.first() else {
|
||||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
return Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
MinerTxError::InputNotOfTypeGen,
|
MinerTxError::InputNotOfTypeGen,
|
||||||
)))?
|
)));
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(PreparedBlockExPow {
|
Ok(Self {
|
||||||
block_blob: block.serialize(),
|
block_blob: block.serialize(),
|
||||||
hf_vote,
|
hf_vote,
|
||||||
hf_version,
|
hf_version,
|
||||||
|
@ -123,20 +123,17 @@ impl PreparedBlock {
|
||||||
///
|
///
|
||||||
/// The randomX VM must be Some if RX is needed or this will panic.
|
/// The randomX VM must be Some if RX is needed or this will panic.
|
||||||
/// The randomX VM must also be initialised with the correct seed.
|
/// The randomX VM must also be initialised with the correct seed.
|
||||||
fn new<R: RandomX>(
|
fn new<R: RandomX>(block: Block, randomx_vm: Option<&R>) -> Result<Self, ConsensusError> {
|
||||||
block: Block,
|
|
||||||
randomx_vm: Option<&R>,
|
|
||||||
) -> Result<PreparedBlock, ConsensusError> {
|
|
||||||
let (hf_version, hf_vote) = HardFork::from_block_header(&block.header)
|
let (hf_version, hf_vote) = HardFork::from_block_header(&block.header)
|
||||||
.map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?;
|
.map_err(|_| BlockError::HardForkError(HardForkError::HardForkUnknown))?;
|
||||||
|
|
||||||
let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else {
|
let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else {
|
||||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
return Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
MinerTxError::InputNotOfTypeGen,
|
MinerTxError::InputNotOfTypeGen,
|
||||||
)))?
|
)));
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(PreparedBlock {
|
Ok(Self {
|
||||||
block_blob: block.serialize(),
|
block_blob: block.serialize(),
|
||||||
hf_vote,
|
hf_vote,
|
||||||
hf_version,
|
hf_version,
|
||||||
|
@ -156,17 +153,17 @@ impl PreparedBlock {
|
||||||
|
|
||||||
/// Creates a new [`PreparedBlock`] from a [`PreparedBlockExPow`].
|
/// Creates a new [`PreparedBlock`] from a [`PreparedBlockExPow`].
|
||||||
///
|
///
|
||||||
/// This function will give an invalid PoW hash if `randomx_vm` is not initialised
|
/// This function will give an invalid proof-of-work hash if `randomx_vm` is not initialised
|
||||||
/// with the correct seed.
|
/// with the correct seed.
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// This function will panic if `randomx_vm` is
|
/// This function will panic if `randomx_vm` is
|
||||||
/// [`None`] even though RandomX is needed.
|
/// [`None`] even though `RandomX` is needed.
|
||||||
fn new_prepped<R: RandomX>(
|
fn new_prepped<R: RandomX>(
|
||||||
block: PreparedBlockExPow,
|
block: PreparedBlockExPow,
|
||||||
randomx_vm: Option<&R>,
|
randomx_vm: Option<&R>,
|
||||||
) -> Result<PreparedBlock, ConsensusError> {
|
) -> Result<Self, ConsensusError> {
|
||||||
Ok(PreparedBlock {
|
Ok(Self {
|
||||||
block_blob: block.block_blob,
|
block_blob: block.block_blob,
|
||||||
hf_vote: block.hf_vote,
|
hf_vote: block.hf_vote,
|
||||||
hf_version: block.hf_version,
|
hf_version: block.hf_version,
|
||||||
|
@ -218,7 +215,6 @@ pub enum VerifyBlockRequest {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A response from a verify block request.
|
/// A response from a verify block request.
|
||||||
#[allow(clippy::large_enum_variant)] // The largest variant is most common ([`MainChain`])
|
|
||||||
pub enum VerifyBlockResponse {
|
pub enum VerifyBlockResponse {
|
||||||
/// This block is valid.
|
/// This block is valid.
|
||||||
MainChain(VerifiedBlockInformation),
|
MainChain(VerifiedBlockInformation),
|
||||||
|
@ -254,12 +250,8 @@ where
|
||||||
D::Future: Send + 'static,
|
D::Future: Send + 'static,
|
||||||
{
|
{
|
||||||
/// Creates a new block verifier.
|
/// Creates a new block verifier.
|
||||||
pub(crate) fn new(
|
pub(crate) const fn new(context_svc: C, tx_verifier_svc: TxV, database: D) -> Self {
|
||||||
context_svc: C,
|
Self {
|
||||||
tx_verifier_svc: TxV,
|
|
||||||
database: D,
|
|
||||||
) -> BlockVerifierService<C, TxV, D> {
|
|
||||||
BlockVerifierService {
|
|
||||||
context_svc,
|
context_svc,
|
||||||
tx_verifier_svc,
|
tx_verifier_svc,
|
||||||
_database: database,
|
_database: database,
|
||||||
|
|
|
@ -36,8 +36,8 @@ use crate::{
|
||||||
///
|
///
|
||||||
/// Returns [`AltBlockInformation`], which contains the cumulative difficulty of the alt chain.
|
/// Returns [`AltBlockInformation`], which contains the cumulative difficulty of the alt chain.
|
||||||
///
|
///
|
||||||
/// This function only checks the block's PoW and its weight.
|
/// This function only checks the block's proof-of-work and its weight.
|
||||||
pub async fn sanity_check_alt_block<C>(
|
pub(crate) async fn sanity_check_alt_block<C>(
|
||||||
block: Block,
|
block: Block,
|
||||||
txs: HashMap<[u8; 32], TransactionVerificationData>,
|
txs: HashMap<[u8; 32], TransactionVerificationData>,
|
||||||
mut context_svc: C,
|
mut context_svc: C,
|
||||||
|
@ -66,15 +66,17 @@ where
|
||||||
|
|
||||||
// Check if the block's miner input is formed correctly.
|
// Check if the block's miner input is formed correctly.
|
||||||
let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else {
|
let [Input::Gen(height)] = &block.miner_transaction.prefix().inputs[..] else {
|
||||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
return Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
MinerTxError::InputNotOfTypeGen,
|
MinerTxError::InputNotOfTypeGen,
|
||||||
)))?
|
))
|
||||||
|
.into());
|
||||||
};
|
};
|
||||||
|
|
||||||
if *height != alt_context_cache.chain_height {
|
if *height != alt_context_cache.chain_height {
|
||||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
return Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
MinerTxError::InputsHeightIncorrect,
|
MinerTxError::InputsHeightIncorrect,
|
||||||
)))?
|
))
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// prep the alt block.
|
// prep the alt block.
|
||||||
|
@ -103,10 +105,10 @@ where
|
||||||
if let Some(median_timestamp) =
|
if let Some(median_timestamp) =
|
||||||
difficulty_cache.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW))
|
difficulty_cache.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW))
|
||||||
{
|
{
|
||||||
check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)?
|
check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)?;
|
||||||
};
|
};
|
||||||
|
|
||||||
let next_difficulty = difficulty_cache.next_difficulty(&prepped_block.hf_version);
|
let next_difficulty = difficulty_cache.next_difficulty(prepped_block.hf_version);
|
||||||
// make sure the block's PoW is valid for this difficulty.
|
// make sure the block's PoW is valid for this difficulty.
|
||||||
check_block_pow(&prepped_block.pow_hash, next_difficulty).map_err(ConsensusError::Block)?;
|
check_block_pow(&prepped_block.pow_hash, next_difficulty).map_err(ConsensusError::Block)?;
|
||||||
|
|
||||||
|
@ -127,12 +129,12 @@ where
|
||||||
// Check the block weight is below the limit.
|
// Check the block weight is below the limit.
|
||||||
check_block_weight(
|
check_block_weight(
|
||||||
block_weight,
|
block_weight,
|
||||||
alt_weight_cache.median_for_block_reward(&prepped_block.hf_version),
|
alt_weight_cache.median_for_block_reward(prepped_block.hf_version),
|
||||||
)
|
)
|
||||||
.map_err(ConsensusError::Block)?;
|
.map_err(ConsensusError::Block)?;
|
||||||
|
|
||||||
let long_term_weight = weight::calculate_block_long_term_weight(
|
let long_term_weight = weight::calculate_block_long_term_weight(
|
||||||
&prepped_block.hf_version,
|
prepped_block.hf_version,
|
||||||
block_weight,
|
block_weight,
|
||||||
alt_weight_cache.median_long_term_weight(),
|
alt_weight_cache.median_long_term_weight(),
|
||||||
);
|
);
|
||||||
|
@ -232,9 +234,9 @@ where
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Some(
|
Ok(Some(Arc::clone(
|
||||||
alt_chain_context.cached_rx_vm.insert(cached_vm).1.clone(),
|
&alt_chain_context.cached_rx_vm.insert(cached_vm).1,
|
||||||
))
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the [`DifficultyCache`] for the alt chain.
|
/// Returns the [`DifficultyCache`] for the alt chain.
|
||||||
|
|
|
@ -68,16 +68,17 @@ where
|
||||||
|
|
||||||
// Make sure no blocks in the batch have a higher hard fork than the last block.
|
// Make sure no blocks in the batch have a higher hard fork than the last block.
|
||||||
if block_0.hf_version > top_hf_in_batch {
|
if block_0.hf_version > top_hf_in_batch {
|
||||||
Err(ConsensusError::Block(BlockError::HardForkError(
|
return Err(ConsensusError::Block(BlockError::HardForkError(
|
||||||
HardForkError::VersionIncorrect,
|
HardForkError::VersionIncorrect,
|
||||||
)))?;
|
))
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if block_0.block_hash != block_1.block.header.previous
|
if block_0.block_hash != block_1.block.header.previous
|
||||||
|| block_0.height != block_1.height - 1
|
|| block_0.height != block_1.height - 1
|
||||||
{
|
{
|
||||||
tracing::debug!("Blocks do not follow each other, verification failed.");
|
tracing::debug!("Blocks do not follow each other, verification failed.");
|
||||||
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
|
return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache any potential RX VM seeds as we may need them for future blocks in the batch.
|
// Cache any potential RX VM seeds as we may need them for future blocks in the batch.
|
||||||
|
@ -85,7 +86,7 @@ where
|
||||||
new_rx_vm = Some((block_0.height, block_0.block_hash));
|
new_rx_vm = Some((block_0.height, block_0.block_hash));
|
||||||
}
|
}
|
||||||
|
|
||||||
timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version))
|
timestamps_hfs.push((block_0.block.header.timestamp, block_0.hf_version));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the current blockchain context.
|
// Get the current blockchain context.
|
||||||
|
@ -117,15 +118,16 @@ where
|
||||||
if context.chain_height != blocks[0].height {
|
if context.chain_height != blocks[0].height {
|
||||||
tracing::debug!("Blocks do not follow main chain, verification failed.");
|
tracing::debug!("Blocks do not follow main chain, verification failed.");
|
||||||
|
|
||||||
Err(ConsensusError::Block(BlockError::MinerTxError(
|
return Err(ConsensusError::Block(BlockError::MinerTxError(
|
||||||
MinerTxError::InputsHeightIncorrect,
|
MinerTxError::InputsHeightIncorrect,
|
||||||
)))?;
|
))
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if context.top_hash != blocks[0].block.header.previous {
|
if context.top_hash != blocks[0].block.header.previous {
|
||||||
tracing::debug!("Blocks do not follow main chain, verification failed.");
|
tracing::debug!("Blocks do not follow main chain, verification failed.");
|
||||||
|
|
||||||
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?;
|
return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut rx_vms = if top_hf_in_batch < HardFork::V12 {
|
let mut rx_vms = if top_hf_in_batch < HardFork::V12 {
|
||||||
|
@ -156,7 +158,7 @@ where
|
||||||
context_svc
|
context_svc
|
||||||
.oneshot(BlockChainContextRequest::NewRXVM((
|
.oneshot(BlockChainContextRequest::NewRXVM((
|
||||||
new_vm_seed,
|
new_vm_seed,
|
||||||
new_vm.clone(),
|
Arc::clone(&new_vm),
|
||||||
)))
|
)))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
|
|
@ -56,8 +56,8 @@ pub struct ContextConfig {
|
||||||
|
|
||||||
impl ContextConfig {
|
impl ContextConfig {
|
||||||
/// Get the config for main-net.
|
/// Get the config for main-net.
|
||||||
pub fn main_net() -> ContextConfig {
|
pub const fn main_net() -> Self {
|
||||||
ContextConfig {
|
Self {
|
||||||
hard_fork_cfg: HardForkConfig::main_net(),
|
hard_fork_cfg: HardForkConfig::main_net(),
|
||||||
difficulty_cfg: DifficultyCacheConfig::main_net(),
|
difficulty_cfg: DifficultyCacheConfig::main_net(),
|
||||||
weights_config: BlockWeightsCacheConfig::main_net(),
|
weights_config: BlockWeightsCacheConfig::main_net(),
|
||||||
|
@ -65,8 +65,8 @@ impl ContextConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the config for stage-net.
|
/// Get the config for stage-net.
|
||||||
pub fn stage_net() -> ContextConfig {
|
pub const fn stage_net() -> Self {
|
||||||
ContextConfig {
|
Self {
|
||||||
hard_fork_cfg: HardForkConfig::stage_net(),
|
hard_fork_cfg: HardForkConfig::stage_net(),
|
||||||
// These 2 have the same config as main-net.
|
// These 2 have the same config as main-net.
|
||||||
difficulty_cfg: DifficultyCacheConfig::main_net(),
|
difficulty_cfg: DifficultyCacheConfig::main_net(),
|
||||||
|
@ -75,8 +75,8 @@ impl ContextConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the config for test-net.
|
/// Get the config for test-net.
|
||||||
pub fn test_net() -> ContextConfig {
|
pub const fn test_net() -> Self {
|
||||||
ContextConfig {
|
Self {
|
||||||
hard_fork_cfg: HardForkConfig::test_net(),
|
hard_fork_cfg: HardForkConfig::test_net(),
|
||||||
// These 2 have the same config as main-net.
|
// These 2 have the same config as main-net.
|
||||||
difficulty_cfg: DifficultyCacheConfig::main_net(),
|
difficulty_cfg: DifficultyCacheConfig::main_net(),
|
||||||
|
@ -155,7 +155,7 @@ impl RawBlockChainContext {
|
||||||
/// Returns the next blocks long term weight from its block weight.
|
/// Returns the next blocks long term weight from its block weight.
|
||||||
pub fn next_block_long_term_weight(&self, block_weight: usize) -> usize {
|
pub fn next_block_long_term_weight(&self, block_weight: usize) -> usize {
|
||||||
weight::calculate_block_long_term_weight(
|
weight::calculate_block_long_term_weight(
|
||||||
&self.current_hf,
|
self.current_hf,
|
||||||
block_weight,
|
block_weight,
|
||||||
self.median_long_term_weight,
|
self.median_long_term_weight,
|
||||||
)
|
)
|
||||||
|
@ -191,7 +191,7 @@ impl BlockChainContext {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the blockchain context without checking the validity token.
|
/// Returns the blockchain context without checking the validity token.
|
||||||
pub fn unchecked_blockchain_context(&self) -> &RawBlockChainContext {
|
pub const fn unchecked_blockchain_context(&self) -> &RawBlockChainContext {
|
||||||
&self.raw
|
&self.raw
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -222,7 +222,7 @@ pub struct NewBlockData {
|
||||||
pub enum BlockChainContextRequest {
|
pub enum BlockChainContextRequest {
|
||||||
/// Get the current blockchain context.
|
/// Get the current blockchain context.
|
||||||
GetContext,
|
GetContext,
|
||||||
/// Gets the current RandomX VM.
|
/// Gets the current `RandomX` VM.
|
||||||
GetCurrentRxVm,
|
GetCurrentRxVm,
|
||||||
/// Get the next difficulties for these blocks.
|
/// Get the next difficulties for these blocks.
|
||||||
///
|
///
|
||||||
|
@ -288,7 +288,7 @@ pub enum BlockChainContextRequest {
|
||||||
/// This variant is private and is not callable from outside this crate, the block verifier service will
|
/// This variant is private and is not callable from outside this crate, the block verifier service will
|
||||||
/// handle getting the randomX VM of an alt chain.
|
/// handle getting the randomX VM of an alt chain.
|
||||||
AltChainRxVM {
|
AltChainRxVM {
|
||||||
/// The height the RandomX VM is needed for.
|
/// The height the `RandomX` VM is needed for.
|
||||||
height: usize,
|
height: usize,
|
||||||
/// The chain to look in for the seed.
|
/// The chain to look in for the seed.
|
||||||
chain: Chain,
|
chain: Chain,
|
||||||
|
@ -312,7 +312,7 @@ pub enum BlockChainContextRequest {
|
||||||
pub enum BlockChainContextResponse {
|
pub enum BlockChainContextResponse {
|
||||||
/// Blockchain context response.
|
/// Blockchain context response.
|
||||||
Context(BlockChainContext),
|
Context(BlockChainContext),
|
||||||
/// A map of seed height to RandomX VMs.
|
/// A map of seed height to `RandomX` VMs.
|
||||||
RxVms(HashMap<usize, Arc<RandomXVm>>),
|
RxVms(HashMap<usize, Arc<RandomXVm>>),
|
||||||
/// A list of difficulties.
|
/// A list of difficulties.
|
||||||
BatchDifficulties(Vec<u128>),
|
BatchDifficulties(Vec<u128>),
|
||||||
|
|
|
@ -68,29 +68,33 @@ impl AltChainContextCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A map of top IDs to alt chains.
|
/// A map of top IDs to alt chains.
|
||||||
pub struct AltChainMap {
|
pub(crate) struct AltChainMap {
|
||||||
alt_cache_map: HashMap<[u8; 32], Box<AltChainContextCache>>,
|
alt_cache_map: HashMap<[u8; 32], Box<AltChainContextCache>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AltChainMap {
|
impl AltChainMap {
|
||||||
pub fn new() -> Self {
|
pub(crate) fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
alt_cache_map: HashMap::new(),
|
alt_cache_map: HashMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn clear(&mut self) {
|
pub(crate) fn clear(&mut self) {
|
||||||
self.alt_cache_map.clear();
|
self.alt_cache_map.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add an alt chain cache to the map.
|
/// Add an alt chain cache to the map.
|
||||||
pub fn add_alt_cache(&mut self, prev_id: [u8; 32], alt_cache: Box<AltChainContextCache>) {
|
pub(crate) fn add_alt_cache(
|
||||||
|
&mut self,
|
||||||
|
prev_id: [u8; 32],
|
||||||
|
alt_cache: Box<AltChainContextCache>,
|
||||||
|
) {
|
||||||
self.alt_cache_map.insert(prev_id, alt_cache);
|
self.alt_cache_map.insert(prev_id, alt_cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to take an [`AltChainContextCache`] from the map, returning [`None`] if no cache is
|
/// Attempts to take an [`AltChainContextCache`] from the map, returning [`None`] if no cache is
|
||||||
/// present.
|
/// present.
|
||||||
pub async fn get_alt_chain_context<D: Database>(
|
pub(crate) async fn get_alt_chain_context<D: Database>(
|
||||||
&mut self,
|
&mut self,
|
||||||
prev_id: [u8; 32],
|
prev_id: [u8; 32],
|
||||||
database: D,
|
database: D,
|
||||||
|
@ -109,7 +113,7 @@ impl AltChainMap {
|
||||||
|
|
||||||
let Some((parent_chain, top_height)) = res else {
|
let Some((parent_chain, top_height)) = res else {
|
||||||
// Couldn't find prev_id
|
// Couldn't find prev_id
|
||||||
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?
|
return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into());
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Box::new(AltChainContextCache {
|
Ok(Box::new(AltChainContextCache {
|
||||||
|
@ -125,7 +129,7 @@ impl AltChainMap {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a [`DifficultyCache`] for an alt chain.
|
/// Builds a [`DifficultyCache`] for an alt chain.
|
||||||
pub async fn get_alt_chain_difficulty_cache<D: Database + Clone>(
|
pub(crate) async fn get_alt_chain_difficulty_cache<D: Database + Clone>(
|
||||||
prev_id: [u8; 32],
|
prev_id: [u8; 32],
|
||||||
main_chain_difficulty_cache: &DifficultyCache,
|
main_chain_difficulty_cache: &DifficultyCache,
|
||||||
mut database: D,
|
mut database: D,
|
||||||
|
@ -142,7 +146,7 @@ pub async fn get_alt_chain_difficulty_cache<D: Database + Clone>(
|
||||||
|
|
||||||
let Some((chain, top_height)) = res else {
|
let Some((chain, top_height)) = res else {
|
||||||
// Can't find prev_id
|
// Can't find prev_id
|
||||||
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?
|
return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into());
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(match chain {
|
Ok(match chain {
|
||||||
|
@ -172,7 +176,7 @@ pub async fn get_alt_chain_difficulty_cache<D: Database + Clone>(
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a [`BlockWeightsCache`] for an alt chain.
|
/// Builds a [`BlockWeightsCache`] for an alt chain.
|
||||||
pub async fn get_alt_chain_weight_cache<D: Database + Clone>(
|
pub(crate) async fn get_alt_chain_weight_cache<D: Database + Clone>(
|
||||||
prev_id: [u8; 32],
|
prev_id: [u8; 32],
|
||||||
main_chain_weight_cache: &BlockWeightsCache,
|
main_chain_weight_cache: &BlockWeightsCache,
|
||||||
mut database: D,
|
mut database: D,
|
||||||
|
@ -189,7 +193,7 @@ pub async fn get_alt_chain_weight_cache<D: Database + Clone>(
|
||||||
|
|
||||||
let Some((chain, top_height)) = res else {
|
let Some((chain, top_height)) = res else {
|
||||||
// Can't find prev_id
|
// Can't find prev_id
|
||||||
Err(ConsensusError::Block(BlockError::PreviousIDIncorrect))?
|
return Err(ConsensusError::Block(BlockError::PreviousIDIncorrect).into());
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(match chain {
|
Ok(match chain {
|
||||||
|
|
|
@ -43,24 +43,24 @@ impl DifficultyCacheConfig {
|
||||||
///
|
///
|
||||||
/// # Notes
|
/// # Notes
|
||||||
/// You probably do not need this, use [`DifficultyCacheConfig::main_net`] instead.
|
/// You probably do not need this, use [`DifficultyCacheConfig::main_net`] instead.
|
||||||
pub const fn new(window: usize, cut: usize, lag: usize) -> DifficultyCacheConfig {
|
pub const fn new(window: usize, cut: usize, lag: usize) -> Self {
|
||||||
DifficultyCacheConfig { window, cut, lag }
|
Self { window, cut, lag }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the total amount of blocks we need to track to calculate difficulty
|
/// Returns the total amount of blocks we need to track to calculate difficulty
|
||||||
pub fn total_block_count(&self) -> usize {
|
pub const fn total_block_count(&self) -> usize {
|
||||||
self.window + self.lag
|
self.window + self.lag
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The amount of blocks we account for after removing the outliers.
|
/// The amount of blocks we account for after removing the outliers.
|
||||||
pub fn accounted_window_len(&self) -> usize {
|
pub const fn accounted_window_len(&self) -> usize {
|
||||||
self.window - 2 * self.cut
|
self.window - 2 * self.cut
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the config needed for [`Mainnet`](cuprate_helper::network::Network::Mainnet). This is also the
|
/// Returns the config needed for [`Mainnet`](cuprate_helper::network::Network::Mainnet). This is also the
|
||||||
/// config for all other current networks.
|
/// config for all other current networks.
|
||||||
pub const fn main_net() -> DifficultyCacheConfig {
|
pub const fn main_net() -> Self {
|
||||||
DifficultyCacheConfig {
|
Self {
|
||||||
window: DIFFICULTY_WINDOW,
|
window: DIFFICULTY_WINDOW,
|
||||||
cut: DIFFICULTY_CUT,
|
cut: DIFFICULTY_CUT,
|
||||||
lag: DIFFICULTY_LAG,
|
lag: DIFFICULTY_LAG,
|
||||||
|
@ -112,7 +112,7 @@ impl DifficultyCache {
|
||||||
timestamps.len()
|
timestamps.len()
|
||||||
);
|
);
|
||||||
|
|
||||||
let diff = DifficultyCache {
|
let diff = Self {
|
||||||
timestamps,
|
timestamps,
|
||||||
cumulative_difficulties,
|
cumulative_difficulties,
|
||||||
last_accounted_height: chain_height - 1,
|
last_accounted_height: chain_height - 1,
|
||||||
|
@ -203,8 +203,8 @@ impl DifficultyCache {
|
||||||
|
|
||||||
/// Returns the required difficulty for the next block.
|
/// Returns the required difficulty for the next block.
|
||||||
///
|
///
|
||||||
/// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/difficulty.html#calculating-difficulty
|
/// See: <https://cuprate.github.io/monero-book/consensus_rules/blocks/difficulty.html#calculating-difficulty>
|
||||||
pub fn next_difficulty(&self, hf: &HardFork) -> u128 {
|
pub fn next_difficulty(&self, hf: HardFork) -> u128 {
|
||||||
next_difficulty(
|
next_difficulty(
|
||||||
&self.config,
|
&self.config,
|
||||||
&self.timestamps,
|
&self.timestamps,
|
||||||
|
@ -223,7 +223,7 @@ impl DifficultyCache {
|
||||||
pub fn next_difficulties(
|
pub fn next_difficulties(
|
||||||
&self,
|
&self,
|
||||||
blocks: Vec<(u64, HardFork)>,
|
blocks: Vec<(u64, HardFork)>,
|
||||||
current_hf: &HardFork,
|
current_hf: HardFork,
|
||||||
) -> Vec<u128> {
|
) -> Vec<u128> {
|
||||||
let mut timestamps = self.timestamps.clone();
|
let mut timestamps = self.timestamps.clone();
|
||||||
let mut cumulative_difficulties = self.cumulative_difficulties.clone();
|
let mut cumulative_difficulties = self.cumulative_difficulties.clone();
|
||||||
|
@ -232,8 +232,6 @@ impl DifficultyCache {
|
||||||
|
|
||||||
difficulties.push(self.next_difficulty(current_hf));
|
difficulties.push(self.next_difficulty(current_hf));
|
||||||
|
|
||||||
let mut diff_info_popped = Vec::new();
|
|
||||||
|
|
||||||
for (new_timestamp, hf) in blocks {
|
for (new_timestamp, hf) in blocks {
|
||||||
timestamps.push_back(new_timestamp);
|
timestamps.push_back(new_timestamp);
|
||||||
|
|
||||||
|
@ -241,17 +239,15 @@ impl DifficultyCache {
|
||||||
cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap());
|
cumulative_difficulties.push_back(last_cum_diff + *difficulties.last().unwrap());
|
||||||
|
|
||||||
if timestamps.len() > self.config.total_block_count() {
|
if timestamps.len() > self.config.total_block_count() {
|
||||||
diff_info_popped.push((
|
timestamps.pop_front().unwrap();
|
||||||
timestamps.pop_front().unwrap(),
|
cumulative_difficulties.pop_front().unwrap();
|
||||||
cumulative_difficulties.pop_front().unwrap(),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
difficulties.push(next_difficulty(
|
difficulties.push(next_difficulty(
|
||||||
&self.config,
|
&self.config,
|
||||||
×tamps,
|
×tamps,
|
||||||
&cumulative_difficulties,
|
&cumulative_difficulties,
|
||||||
&hf,
|
hf,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -295,12 +291,12 @@ impl DifficultyCache {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculates the next difficulty with the inputted config/timestamps/cumulative_difficulties.
|
/// Calculates the next difficulty with the inputted `config/timestamps/cumulative_difficulties`.
|
||||||
fn next_difficulty(
|
fn next_difficulty(
|
||||||
config: &DifficultyCacheConfig,
|
config: &DifficultyCacheConfig,
|
||||||
timestamps: &VecDeque<u64>,
|
timestamps: &VecDeque<u64>,
|
||||||
cumulative_difficulties: &VecDeque<u128>,
|
cumulative_difficulties: &VecDeque<u128>,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
) -> u128 {
|
) -> u128 {
|
||||||
if timestamps.len() <= 1 {
|
if timestamps.len() <= 1 {
|
||||||
return 1;
|
return 1;
|
||||||
|
|
|
@ -28,7 +28,7 @@ pub struct HardForkConfig {
|
||||||
|
|
||||||
impl HardForkConfig {
|
impl HardForkConfig {
|
||||||
/// Config for main-net.
|
/// Config for main-net.
|
||||||
pub const fn main_net() -> HardForkConfig {
|
pub const fn main_net() -> Self {
|
||||||
Self {
|
Self {
|
||||||
info: HFsInfo::main_net(),
|
info: HFsInfo::main_net(),
|
||||||
window: DEFAULT_WINDOW_SIZE,
|
window: DEFAULT_WINDOW_SIZE,
|
||||||
|
@ -36,7 +36,7 @@ impl HardForkConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Config for stage-net.
|
/// Config for stage-net.
|
||||||
pub const fn stage_net() -> HardForkConfig {
|
pub const fn stage_net() -> Self {
|
||||||
Self {
|
Self {
|
||||||
info: HFsInfo::stage_net(),
|
info: HFsInfo::stage_net(),
|
||||||
window: DEFAULT_WINDOW_SIZE,
|
window: DEFAULT_WINDOW_SIZE,
|
||||||
|
@ -44,7 +44,7 @@ impl HardForkConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Config for test-net.
|
/// Config for test-net.
|
||||||
pub const fn test_net() -> HardForkConfig {
|
pub const fn test_net() -> Self {
|
||||||
Self {
|
Self {
|
||||||
info: HFsInfo::test_net(),
|
info: HFsInfo::test_net(),
|
||||||
window: DEFAULT_WINDOW_SIZE,
|
window: DEFAULT_WINDOW_SIZE,
|
||||||
|
@ -54,7 +54,7 @@ impl HardForkConfig {
|
||||||
|
|
||||||
/// A struct that keeps track of the current hard-fork and current votes.
|
/// A struct that keeps track of the current hard-fork and current votes.
|
||||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
pub struct HardForkState {
|
pub(crate) struct HardForkState {
|
||||||
/// The current active hard-fork.
|
/// The current active hard-fork.
|
||||||
pub(crate) current_hardfork: HardFork,
|
pub(crate) current_hardfork: HardFork,
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ impl HardForkState {
|
||||||
get_votes_in_range(database.clone(), block_start..chain_height, config.window).await?;
|
get_votes_in_range(database.clone(), block_start..chain_height, config.window).await?;
|
||||||
|
|
||||||
if chain_height > config.window {
|
if chain_height > config.window {
|
||||||
debug_assert_eq!(votes.total_votes(), config.window)
|
debug_assert_eq!(votes.total_votes(), config.window);
|
||||||
}
|
}
|
||||||
|
|
||||||
let BlockchainResponse::BlockExtendedHeader(ext_header) = database
|
let BlockchainResponse::BlockExtendedHeader(ext_header) = database
|
||||||
|
@ -97,7 +97,7 @@ impl HardForkState {
|
||||||
|
|
||||||
let current_hardfork = ext_header.version;
|
let current_hardfork = ext_header.version;
|
||||||
|
|
||||||
let mut hfs = HardForkState {
|
let mut hfs = Self {
|
||||||
config,
|
config,
|
||||||
current_hardfork,
|
current_hardfork,
|
||||||
votes,
|
votes,
|
||||||
|
@ -122,7 +122,7 @@ impl HardForkState {
|
||||||
/// # Invariant
|
/// # Invariant
|
||||||
///
|
///
|
||||||
/// This _must_ only be used on a main-chain cache.
|
/// This _must_ only be used on a main-chain cache.
|
||||||
pub async fn pop_blocks_main_chain<D: Database + Clone>(
|
pub(crate) async fn pop_blocks_main_chain<D: Database + Clone>(
|
||||||
&mut self,
|
&mut self,
|
||||||
numb_blocks: usize,
|
numb_blocks: usize,
|
||||||
database: D,
|
database: D,
|
||||||
|
@ -159,7 +159,7 @@ impl HardForkState {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a new block to the cache.
|
/// Add a new block to the cache.
|
||||||
pub fn new_block(&mut self, vote: HardFork, height: usize) {
|
pub(crate) fn new_block(&mut self, vote: HardFork, height: usize) {
|
||||||
// We don't _need_ to take in `height` but it's for safety, so we don't silently loose track
|
// We don't _need_ to take in `height` but it's for safety, so we don't silently loose track
|
||||||
// of blocks.
|
// of blocks.
|
||||||
assert_eq!(self.last_height + 1, height);
|
assert_eq!(self.last_height + 1, height);
|
||||||
|
@ -183,7 +183,7 @@ impl HardForkState {
|
||||||
|
|
||||||
/// Checks if the next hard-fork should be activated and activates it if it should.
|
/// Checks if the next hard-fork should be activated and activates it if it should.
|
||||||
///
|
///
|
||||||
/// https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork
|
/// <https://cuprate.github.io/monero-docs/consensus_rules/hardforks.html#accepting-a-fork>
|
||||||
fn check_set_new_hf(&mut self) {
|
fn check_set_new_hf(&mut self) {
|
||||||
self.current_hardfork = self.votes.current_fork(
|
self.current_hardfork = self.votes.current_fork(
|
||||||
&self.current_hardfork,
|
&self.current_hardfork,
|
||||||
|
@ -194,7 +194,7 @@ impl HardForkState {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the current hard-fork.
|
/// Returns the current hard-fork.
|
||||||
pub fn current_hardfork(&self) -> HardFork {
|
pub(crate) const fn current_hardfork(&self) -> HardFork {
|
||||||
self.current_hardfork
|
self.current_hardfork
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -218,7 +218,7 @@ async fn get_votes_in_range<D: Database>(
|
||||||
panic!("Database sent incorrect response!");
|
panic!("Database sent incorrect response!");
|
||||||
};
|
};
|
||||||
|
|
||||||
for hf_info in vote_list.into_iter() {
|
for hf_info in vote_list {
|
||||||
votes.add_vote_for_hf(&HardFork::from_vote(hf_info.vote));
|
votes.add_vote_for_hf(&HardFork::from_vote(hf_info.vote));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
//! RandomX VM Cache
|
//! `RandomX` VM Cache
|
||||||
//!
|
//!
|
||||||
//! This module keeps track of the RandomX VM to calculate the next blocks PoW, if the block needs a randomX VM and potentially
|
//! This module keeps track of the `RandomX` VM to calculate the next blocks proof-of-work, if the block needs a randomX VM and potentially
|
||||||
//! more VMs around this height.
|
//! more VMs around this height.
|
||||||
//!
|
//!
|
||||||
use std::{
|
use std::{
|
||||||
|
@ -34,11 +34,11 @@ const RX_SEEDS_CACHED: usize = 2;
|
||||||
/// A multithreaded randomX VM.
|
/// A multithreaded randomX VM.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct RandomXVm {
|
pub struct RandomXVm {
|
||||||
/// These RandomX VMs all share the same cache.
|
/// These `RandomX` VMs all share the same cache.
|
||||||
vms: ThreadLocal<VmInner>,
|
vms: ThreadLocal<VmInner>,
|
||||||
/// The RandomX cache.
|
/// The `RandomX` cache.
|
||||||
cache: RandomXCache,
|
cache: RandomXCache,
|
||||||
/// The flags used to start the RandomX VMs.
|
/// The flags used to start the `RandomX` VMs.
|
||||||
flags: RandomXFlag,
|
flags: RandomXFlag,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ impl RandomXVm {
|
||||||
|
|
||||||
let cache = RandomXCache::new(flags, seed.as_slice())?;
|
let cache = RandomXCache::new(flags, seed.as_slice())?;
|
||||||
|
|
||||||
Ok(RandomXVm {
|
Ok(Self {
|
||||||
vms: ThreadLocal::new(),
|
vms: ThreadLocal::new(),
|
||||||
cache,
|
cache,
|
||||||
flags,
|
flags,
|
||||||
|
@ -69,10 +69,10 @@ impl RandomX for RandomXVm {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a
|
/// The randomX VMs cache, keeps the VM needed to calculate the current block's proof-of-work hash (if a VM is needed) and a
|
||||||
/// couple more around this VM.
|
/// couple more around this VM.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct RandomXVmCache {
|
pub(crate) struct RandomXVmCache {
|
||||||
/// The top [`RX_SEEDS_CACHED`] RX seeds.
|
/// The top [`RX_SEEDS_CACHED`] RX seeds.
|
||||||
pub(crate) seeds: VecDeque<(usize, [u8; 32])>,
|
pub(crate) seeds: VecDeque<(usize, [u8; 32])>,
|
||||||
/// The VMs for `seeds` (if after hf 12, otherwise this will be empty).
|
/// The VMs for `seeds` (if after hf 12, otherwise this will be empty).
|
||||||
|
@ -117,7 +117,7 @@ impl RandomXVmCache {
|
||||||
HashMap::new()
|
HashMap::new()
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(RandomXVmCache {
|
Ok(Self {
|
||||||
seeds,
|
seeds,
|
||||||
vms,
|
vms,
|
||||||
cached_vm: None,
|
cached_vm: None,
|
||||||
|
@ -125,14 +125,14 @@ impl RandomXVmCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a randomX VM to the cache, with the seed it was created with.
|
/// Add a randomX VM to the cache, with the seed it was created with.
|
||||||
pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVm>)) {
|
pub(crate) fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVm>)) {
|
||||||
self.cached_vm.replace(vm);
|
self.cached_vm.replace(vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a RX VM for an alt chain, looking at the main chain RX VMs to see if we can use one
|
/// Creates a RX VM for an alt chain, looking at the main chain RX VMs to see if we can use one
|
||||||
/// of them first.
|
/// of them first.
|
||||||
pub async fn get_alt_vm<D: Database>(
|
pub(crate) async fn get_alt_vm<D: Database>(
|
||||||
&mut self,
|
&self,
|
||||||
height: usize,
|
height: usize,
|
||||||
chain: Chain,
|
chain: Chain,
|
||||||
database: D,
|
database: D,
|
||||||
|
@ -152,7 +152,7 @@ impl RandomXVmCache {
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
|
||||||
return Ok(vm.clone());
|
return Ok(Arc::clone(vm));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,8 +161,8 @@ impl RandomXVmCache {
|
||||||
Ok(alt_vm)
|
Ok(alt_vm)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the main-chain RandomX VMs.
|
/// Get the main-chain `RandomX` VMs.
|
||||||
pub async fn get_vms(&mut self) -> HashMap<usize, Arc<RandomXVm>> {
|
pub(crate) async fn get_vms(&mut self) -> HashMap<usize, Arc<RandomXVm>> {
|
||||||
match self.seeds.len().checked_sub(self.vms.len()) {
|
match self.seeds.len().checked_sub(self.vms.len()) {
|
||||||
// No difference in the amount of seeds to VMs.
|
// No difference in the amount of seeds to VMs.
|
||||||
Some(0) => (),
|
Some(0) => (),
|
||||||
|
@ -206,23 +206,23 @@ impl RandomXVmCache {
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
})
|
})
|
||||||
.await
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.vms.clone()
|
self.vms.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes all the RandomX VMs above the `new_height`.
|
/// Removes all the `RandomX` VMs above the `new_height`.
|
||||||
pub fn pop_blocks_main_chain(&mut self, new_height: usize) {
|
pub(crate) fn pop_blocks_main_chain(&mut self, new_height: usize) {
|
||||||
self.seeds.retain(|(height, _)| *height < new_height);
|
self.seeds.retain(|(height, _)| *height < new_height);
|
||||||
self.vms.retain(|height, _| *height < new_height);
|
self.vms.retain(|height, _| *height < new_height);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a new block to the VM cache.
|
/// Add a new block to the VM cache.
|
||||||
///
|
///
|
||||||
/// hash is the block hash not the blocks PoW hash.
|
/// hash is the block hash not the blocks proof-of-work hash.
|
||||||
pub fn new_block(&mut self, height: usize, hash: &[u8; 32]) {
|
pub(crate) fn new_block(&mut self, height: usize, hash: &[u8; 32]) {
|
||||||
if is_randomx_seed_height(height) {
|
if is_randomx_seed_height(height) {
|
||||||
tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",);
|
tracing::debug!("Block {height} is a randomX seed height, adding it to the cache.",);
|
||||||
|
|
||||||
|
@ -235,7 +235,7 @@ impl RandomXVmCache {
|
||||||
self.seeds
|
self.seeds
|
||||||
.iter()
|
.iter()
|
||||||
.any(|(cached_height, _)| height == cached_height)
|
.any(|(cached_height, _)| height == cached_height)
|
||||||
})
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -258,7 +258,7 @@ pub(crate) fn get_last_rx_seed_heights(mut last_height: usize, mut amount: usize
|
||||||
// We don't include the lag as we only want seeds not the specific seed for this height.
|
// We don't include the lag as we only want seeds not the specific seed for this height.
|
||||||
let seed_height = (last_height - 1) & !(RX_SEEDHASH_EPOCH_BLOCKS - 1);
|
let seed_height = (last_height - 1) & !(RX_SEEDHASH_EPOCH_BLOCKS - 1);
|
||||||
seeds.push(seed_height);
|
seeds.push(seed_height);
|
||||||
last_height = seed_height
|
last_height = seed_height;
|
||||||
}
|
}
|
||||||
|
|
||||||
seeds
|
seeds
|
||||||
|
|
|
@ -36,7 +36,7 @@ pub(super) struct ContextTaskRequest {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The Context task that keeps the blockchain context and handles requests.
|
/// The Context task that keeps the blockchain context and handles requests.
|
||||||
pub struct ContextTask<D: Database> {
|
pub(crate) struct ContextTask<D: Database> {
|
||||||
/// A token used to invalidate previous contexts when a new
|
/// A token used to invalidate previous contexts when a new
|
||||||
/// block is added to the chain.
|
/// block is added to the chain.
|
||||||
current_validity_token: ValidityToken,
|
current_validity_token: ValidityToken,
|
||||||
|
@ -65,7 +65,7 @@ pub struct ContextTask<D: Database> {
|
||||||
impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
||||||
/// Initialize the [`ContextTask`], this will need to pull a lot of data from the database so may take a
|
/// Initialize the [`ContextTask`], this will need to pull a lot of data from the database so may take a
|
||||||
/// while to complete.
|
/// while to complete.
|
||||||
pub async fn init_context(
|
pub(crate) async fn init_context(
|
||||||
cfg: ContextConfig,
|
cfg: ContextConfig,
|
||||||
mut database: D,
|
mut database: D,
|
||||||
) -> Result<Self, ExtendedConsensusError> {
|
) -> Result<Self, ExtendedConsensusError> {
|
||||||
|
@ -131,7 +131,7 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
||||||
rx_vms::RandomXVmCache::init_from_chain_height(chain_height, ¤t_hf, db).await
|
rx_vms::RandomXVmCache::init_from_chain_height(chain_height, ¤t_hf, db).await
|
||||||
});
|
});
|
||||||
|
|
||||||
let context_svc = ContextTask {
|
let context_svc = Self {
|
||||||
current_validity_token: ValidityToken::new(),
|
current_validity_token: ValidityToken::new(),
|
||||||
difficulty_cache: difficulty_cache_handle.await.unwrap()?,
|
difficulty_cache: difficulty_cache_handle.await.unwrap()?,
|
||||||
weight_cache: weight_cache_handle.await.unwrap()?,
|
weight_cache: weight_cache_handle.await.unwrap()?,
|
||||||
|
@ -148,7 +148,7 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handles a [`BlockChainContextRequest`] and returns a [`BlockChainContextResponse`].
|
/// Handles a [`BlockChainContextRequest`] and returns a [`BlockChainContextResponse`].
|
||||||
pub async fn handle_req(
|
pub(crate) async fn handle_req(
|
||||||
&mut self,
|
&mut self,
|
||||||
req: BlockChainContextRequest,
|
req: BlockChainContextRequest,
|
||||||
) -> Result<BlockChainContextResponse, tower::BoxError> {
|
) -> Result<BlockChainContextResponse, tower::BoxError> {
|
||||||
|
@ -164,17 +164,17 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
||||||
context_to_verify_block: ContextToVerifyBlock {
|
context_to_verify_block: ContextToVerifyBlock {
|
||||||
median_weight_for_block_reward: self
|
median_weight_for_block_reward: self
|
||||||
.weight_cache
|
.weight_cache
|
||||||
.median_for_block_reward(¤t_hf),
|
.median_for_block_reward(current_hf),
|
||||||
effective_median_weight: self
|
effective_median_weight: self
|
||||||
.weight_cache
|
.weight_cache
|
||||||
.effective_median_block_weight(¤t_hf),
|
.effective_median_block_weight(current_hf),
|
||||||
top_hash: self.top_block_hash,
|
top_hash: self.top_block_hash,
|
||||||
median_block_timestamp: self
|
median_block_timestamp: self
|
||||||
.difficulty_cache
|
.difficulty_cache
|
||||||
.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)),
|
.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)),
|
||||||
chain_height: self.chain_height,
|
chain_height: self.chain_height,
|
||||||
current_hf,
|
current_hf,
|
||||||
next_difficulty: self.difficulty_cache.next_difficulty(¤t_hf),
|
next_difficulty: self.difficulty_cache.next_difficulty(current_hf),
|
||||||
already_generated_coins: self.already_generated_coins,
|
already_generated_coins: self.already_generated_coins,
|
||||||
},
|
},
|
||||||
cumulative_difficulty: self.difficulty_cache.cumulative_difficulty(),
|
cumulative_difficulty: self.difficulty_cache.cumulative_difficulty(),
|
||||||
|
@ -191,7 +191,7 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
||||||
|
|
||||||
let next_diffs = self
|
let next_diffs = self
|
||||||
.difficulty_cache
|
.difficulty_cache
|
||||||
.next_difficulties(blocks, &self.hardfork_state.current_hardfork());
|
.next_difficulties(blocks, self.hardfork_state.current_hardfork());
|
||||||
BlockChainContextResponse::BatchDifficulties(next_diffs)
|
BlockChainContextResponse::BatchDifficulties(next_diffs)
|
||||||
}
|
}
|
||||||
BlockChainContextRequest::NewRXVM(vm) => {
|
BlockChainContextRequest::NewRXVM(vm) => {
|
||||||
|
@ -330,10 +330,10 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
|
||||||
|
|
||||||
/// Run the [`ContextTask`], the task will listen for requests on the passed in channel. When the channel closes the
|
/// Run the [`ContextTask`], the task will listen for requests on the passed in channel. When the channel closes the
|
||||||
/// task will finish.
|
/// task will finish.
|
||||||
pub async fn run(mut self, mut rx: mpsc::Receiver<ContextTaskRequest>) {
|
pub(crate) async fn run(mut self, mut rx: mpsc::Receiver<ContextTaskRequest>) {
|
||||||
while let Some(req) = rx.recv().await {
|
while let Some(req) = rx.recv().await {
|
||||||
let res = self.handle_req(req.req).instrument(req.span).await;
|
let res = self.handle_req(req.req).instrument(req.span).await;
|
||||||
let _ = req.tx.send(res);
|
drop(req.tx.send(res));
|
||||||
}
|
}
|
||||||
|
|
||||||
tracing::info!("Shutting down blockchain context task.");
|
tracing::info!("Shutting down blockchain context task.");
|
||||||
|
|
|
@ -15,8 +15,8 @@ pub struct ValidityToken {
|
||||||
|
|
||||||
impl ValidityToken {
|
impl ValidityToken {
|
||||||
/// Creates a new [`ValidityToken`]
|
/// Creates a new [`ValidityToken`]
|
||||||
pub fn new() -> ValidityToken {
|
pub fn new() -> Self {
|
||||||
ValidityToken {
|
Self {
|
||||||
token: CancellationToken::new(),
|
token: CancellationToken::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,6 @@ impl ValidityToken {
|
||||||
|
|
||||||
/// Sets the data to invalid.
|
/// Sets the data to invalid.
|
||||||
pub fn set_data_invalid(self) {
|
pub fn set_data_invalid(self) {
|
||||||
self.token.cancel()
|
self.token.cancel();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,16 +38,16 @@ pub struct BlockWeightsCacheConfig {
|
||||||
|
|
||||||
impl BlockWeightsCacheConfig {
|
impl BlockWeightsCacheConfig {
|
||||||
/// Creates a new [`BlockWeightsCacheConfig`]
|
/// Creates a new [`BlockWeightsCacheConfig`]
|
||||||
pub const fn new(short_term_window: usize, long_term_window: usize) -> BlockWeightsCacheConfig {
|
pub const fn new(short_term_window: usize, long_term_window: usize) -> Self {
|
||||||
BlockWeightsCacheConfig {
|
Self {
|
||||||
short_term_window,
|
short_term_window,
|
||||||
long_term_window,
|
long_term_window,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the [`BlockWeightsCacheConfig`] for all networks (They are all the same as mainnet).
|
/// Returns the [`BlockWeightsCacheConfig`] for all networks (They are all the same as mainnet).
|
||||||
pub fn main_net() -> BlockWeightsCacheConfig {
|
pub const fn main_net() -> Self {
|
||||||
BlockWeightsCacheConfig {
|
Self {
|
||||||
short_term_window: SHORT_TERM_WINDOW,
|
short_term_window: SHORT_TERM_WINDOW,
|
||||||
long_term_window: LONG_TERM_WINDOW,
|
long_term_window: LONG_TERM_WINDOW,
|
||||||
}
|
}
|
||||||
|
@ -99,7 +99,7 @@ impl BlockWeightsCache {
|
||||||
|
|
||||||
tracing::info!("Initialized block weight cache, chain-height: {:?}, long term weights length: {:?}, short term weights length: {:?}", chain_height, long_term_weights.len(), short_term_block_weights.len());
|
tracing::info!("Initialized block weight cache, chain-height: {:?}, long term weights length: {:?}, short term weights length: {:?}", chain_height, long_term_weights.len(), short_term_block_weights.len());
|
||||||
|
|
||||||
Ok(BlockWeightsCache {
|
Ok(Self {
|
||||||
short_term_block_weights: rayon_spawn_async(move || {
|
short_term_block_weights: rayon_spawn_async(move || {
|
||||||
RollingMedian::from_vec(short_term_block_weights, config.short_term_window)
|
RollingMedian::from_vec(short_term_block_weights, config.short_term_window)
|
||||||
})
|
})
|
||||||
|
@ -178,7 +178,7 @@ impl BlockWeightsCache {
|
||||||
|
|
||||||
/// Add a new block to the cache.
|
/// Add a new block to the cache.
|
||||||
///
|
///
|
||||||
/// The block_height **MUST** be one more than the last height the cache has
|
/// The `block_height` **MUST** be one more than the last height the cache has
|
||||||
/// seen.
|
/// seen.
|
||||||
pub fn new_block(&mut self, block_height: usize, block_weight: usize, long_term_weight: usize) {
|
pub fn new_block(&mut self, block_height: usize, block_weight: usize, long_term_weight: usize) {
|
||||||
assert_eq!(self.tip_height + 1, block_height);
|
assert_eq!(self.tip_height + 1, block_height);
|
||||||
|
@ -208,8 +208,8 @@ impl BlockWeightsCache {
|
||||||
/// Returns the effective median weight, used for block reward calculations and to calculate
|
/// Returns the effective median weight, used for block reward calculations and to calculate
|
||||||
/// the block weight limit.
|
/// the block weight limit.
|
||||||
///
|
///
|
||||||
/// See: https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#calculating-effective-median-weight
|
/// See: <https://cuprate.github.io/monero-book/consensus_rules/blocks/weight_limit.html#calculating-effective-median-weight>
|
||||||
pub fn effective_median_block_weight(&self, hf: &HardFork) -> usize {
|
pub fn effective_median_block_weight(&self, hf: HardFork) -> usize {
|
||||||
calculate_effective_median_block_weight(
|
calculate_effective_median_block_weight(
|
||||||
hf,
|
hf,
|
||||||
self.median_short_term_weight(),
|
self.median_short_term_weight(),
|
||||||
|
@ -219,9 +219,9 @@ impl BlockWeightsCache {
|
||||||
|
|
||||||
/// Returns the median weight used to calculate block reward punishment.
|
/// Returns the median weight used to calculate block reward punishment.
|
||||||
///
|
///
|
||||||
/// https://cuprate.github.io/monero-book/consensus_rules/blocks/reward.html#calculating-block-reward
|
/// <https://cuprate.github.io/monero-book/consensus_rules/blocks/reward.html#calculating-block-reward>
|
||||||
pub fn median_for_block_reward(&self, hf: &HardFork) -> usize {
|
pub fn median_for_block_reward(&self, hf: HardFork) -> usize {
|
||||||
if hf < &HardFork::V12 {
|
if hf < HardFork::V12 {
|
||||||
self.median_short_term_weight()
|
self.median_short_term_weight()
|
||||||
} else {
|
} else {
|
||||||
self.effective_median_block_weight(hf)
|
self.effective_median_block_weight(hf)
|
||||||
|
@ -232,17 +232,17 @@ impl BlockWeightsCache {
|
||||||
|
|
||||||
/// Calculates the effective median with the long term and short term median.
|
/// Calculates the effective median with the long term and short term median.
|
||||||
fn calculate_effective_median_block_weight(
|
fn calculate_effective_median_block_weight(
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
median_short_term_weight: usize,
|
median_short_term_weight: usize,
|
||||||
median_long_term_weight: usize,
|
median_long_term_weight: usize,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
if hf < &HardFork::V10 {
|
if hf < HardFork::V10 {
|
||||||
return median_short_term_weight.max(penalty_free_zone(hf));
|
return median_short_term_weight.max(penalty_free_zone(hf));
|
||||||
}
|
}
|
||||||
|
|
||||||
let long_term_median = median_long_term_weight.max(PENALTY_FREE_ZONE_5);
|
let long_term_median = median_long_term_weight.max(PENALTY_FREE_ZONE_5);
|
||||||
let short_term_median = median_short_term_weight;
|
let short_term_median = median_short_term_weight;
|
||||||
let effective_median = if hf >= &HardFork::V10 && hf < &HardFork::V15 {
|
let effective_median = if hf >= HardFork::V10 && hf < HardFork::V15 {
|
||||||
min(
|
min(
|
||||||
max(PENALTY_FREE_ZONE_5, short_term_median),
|
max(PENALTY_FREE_ZONE_5, short_term_median),
|
||||||
50 * long_term_median,
|
50 * long_term_median,
|
||||||
|
@ -258,19 +258,19 @@ fn calculate_effective_median_block_weight(
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculates a blocks long term weight.
|
/// Calculates a blocks long term weight.
|
||||||
pub fn calculate_block_long_term_weight(
|
pub(crate) fn calculate_block_long_term_weight(
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
block_weight: usize,
|
block_weight: usize,
|
||||||
long_term_median: usize,
|
long_term_median: usize,
|
||||||
) -> usize {
|
) -> usize {
|
||||||
if hf < &HardFork::V10 {
|
if hf < HardFork::V10 {
|
||||||
return block_weight;
|
return block_weight;
|
||||||
}
|
}
|
||||||
|
|
||||||
let long_term_median = max(penalty_free_zone(hf), long_term_median);
|
let long_term_median = max(penalty_free_zone(hf), long_term_median);
|
||||||
|
|
||||||
let (short_term_constraint, adjusted_block_weight) =
|
let (short_term_constraint, adjusted_block_weight) =
|
||||||
if hf >= &HardFork::V10 && hf < &HardFork::V15 {
|
if hf >= HardFork::V10 && hf < HardFork::V15 {
|
||||||
let stc = long_term_median + long_term_median * 2 / 5;
|
let stc = long_term_median + long_term_median * 2 / 5;
|
||||||
(stc, block_weight)
|
(stc, block_weight)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -10,6 +10,16 @@
|
||||||
//! implement a database you need to have a service which accepts [`BlockchainReadRequest`] and responds
|
//! implement a database you need to have a service which accepts [`BlockchainReadRequest`] and responds
|
||||||
//! with [`BlockchainResponse`].
|
//! with [`BlockchainResponse`].
|
||||||
//!
|
//!
|
||||||
|
|
||||||
|
cfg_if::cfg_if! {
|
||||||
|
// Used in external `tests/`.
|
||||||
|
if #[cfg(test)] {
|
||||||
|
use cuprate_test_utils as _;
|
||||||
|
use curve25519_dalek as _;
|
||||||
|
use hex_literal as _;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
use cuprate_consensus_rules::ConsensusError;
|
use cuprate_consensus_rules::ConsensusError;
|
||||||
|
|
||||||
mod batch_verifier;
|
mod batch_verifier;
|
||||||
|
@ -34,6 +44,7 @@ pub use cuprate_types::{
|
||||||
|
|
||||||
/// An Error returned from one of the consensus services.
|
/// An Error returned from one of the consensus services.
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
#[expect(variant_size_differences)]
|
||||||
pub enum ExtendedConsensusError {
|
pub enum ExtendedConsensusError {
|
||||||
/// A consensus error.
|
/// A consensus error.
|
||||||
#[error("{0}")]
|
#[error("{0}")]
|
||||||
|
@ -53,7 +64,8 @@ pub enum ExtendedConsensusError {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialize the 2 verifier [`tower::Service`]s (block and transaction).
|
/// Initialize the 2 verifier [`tower::Service`]s (block and transaction).
|
||||||
pub async fn initialize_verifier<D, Ctx>(
|
#[expect(clippy::type_complexity)]
|
||||||
|
pub fn initialize_verifier<D, Ctx>(
|
||||||
database: D,
|
database: D,
|
||||||
ctx_svc: Ctx,
|
ctx_svc: Ctx,
|
||||||
) -> Result<
|
) -> Result<
|
||||||
|
@ -112,7 +124,7 @@ pub mod __private {
|
||||||
Response = BlockchainResponse,
|
Response = BlockchainResponse,
|
||||||
Error = tower::BoxError,
|
Error = tower::BoxError,
|
||||||
>,
|
>,
|
||||||
> crate::Database for T
|
> Database for T
|
||||||
where
|
where
|
||||||
T::Future: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static,
|
T::Future: Future<Output = Result<Self::Response, Self::Error>> + Send + 'static,
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
mod context;
|
mod context;
|
||||||
pub mod mock_db;
|
pub(crate) mod mock_db;
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
use cuprate_consensus_rules::HardFork;
|
use cuprate_consensus_rules::HardFork;
|
||||||
|
|
||||||
pub static HFS_2688888_2689608: [(HardFork, HardFork); 720] =
|
pub(crate) static HFS_2688888_2689608: [(HardFork, HardFork); 720] =
|
||||||
include!("./data/hfs_2688888_2689608");
|
include!("./data/hfs_2688888_2689608");
|
||||||
|
|
||||||
pub static HFS_2678808_2688888: [(HardFork, HardFork); 10080] =
|
pub(crate) static HFS_2678808_2688888: [(HardFork, HardFork); 10080] =
|
||||||
include!("./data/hfs_2678808_2688888");
|
include!("./data/hfs_2678808_2688888");
|
||||||
|
|
||||||
pub static BW_2850000_3050000: [(usize, usize); 200_000] = include!("./data/bw_2850000_3050000");
|
pub(crate) static BW_2850000_3050000: [(usize, usize); 200_000] =
|
||||||
|
include!("./data/bw_2850000_3050000");
|
||||||
|
|
||||||
pub static DIF_3000000_3002000: [(u128, u64); 2000] = include!("./data/dif_3000000_3002000");
|
pub(crate) static DIF_3000000_3002000: [(u128, u64); 2000] = include!("./data/dif_3000000_3002000");
|
||||||
|
|
|
@ -17,7 +17,7 @@ const TEST_LAG: usize = 2;
|
||||||
|
|
||||||
const TEST_TOTAL_ACCOUNTED_BLOCKS: usize = TEST_WINDOW + TEST_LAG;
|
const TEST_TOTAL_ACCOUNTED_BLOCKS: usize = TEST_WINDOW + TEST_LAG;
|
||||||
|
|
||||||
pub const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig =
|
pub(crate) const TEST_DIFFICULTY_CONFIG: DifficultyCacheConfig =
|
||||||
DifficultyCacheConfig::new(TEST_WINDOW, TEST_CUT, TEST_LAG);
|
DifficultyCacheConfig::new(TEST_WINDOW, TEST_CUT, TEST_LAG);
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
@ -35,7 +35,7 @@ async fn first_3_blocks_fixed_difficulty() -> Result<(), tower::BoxError> {
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
for height in 1..3 {
|
for height in 1..3 {
|
||||||
assert_eq!(difficulty_cache.next_difficulty(&HardFork::V1), 1);
|
assert_eq!(difficulty_cache.next_difficulty(HardFork::V1), 1);
|
||||||
difficulty_cache.new_block(height, 0, u128::MAX);
|
difficulty_cache.new_block(height, 0, u128::MAX);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -66,7 +66,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> {
|
||||||
for (cum_dif, timestamp) in DIF_3000000_3002000.iter().take(cfg.total_block_count()) {
|
for (cum_dif, timestamp) in DIF_3000000_3002000.iter().take(cfg.total_block_count()) {
|
||||||
db_builder.add_block(
|
db_builder.add_block(
|
||||||
DummyBlockExtendedHeader::default().with_difficulty_info(*timestamp, *cum_dif),
|
DummyBlockExtendedHeader::default().with_difficulty_info(*timestamp, *cum_dif),
|
||||||
)
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut diff_cache = DifficultyCache::init_from_chain_height(
|
let mut diff_cache = DifficultyCache::init_from_chain_height(
|
||||||
|
@ -84,7 +84,7 @@ async fn calculate_diff_3000000_3002000() -> Result<(), tower::BoxError> {
|
||||||
{
|
{
|
||||||
let diff = diff_info[1].0 - diff_info[0].0;
|
let diff = diff_info[1].0 - diff_info[0].0;
|
||||||
|
|
||||||
assert_eq!(diff_cache.next_difficulty(&HardFork::V16), diff);
|
assert_eq!(diff_cache.next_difficulty(HardFork::V16), diff);
|
||||||
|
|
||||||
diff_cache.new_block(3_000_720 + i, diff_info[1].1, diff_info[1].0);
|
diff_cache.new_block(3_000_720 + i, diff_info[1].1, diff_info[1].0);
|
||||||
}
|
}
|
||||||
|
@ -139,22 +139,22 @@ proptest! {
|
||||||
no_lag_cache.cumulative_difficulties.pop_front();
|
no_lag_cache.cumulative_difficulties.pop_front();
|
||||||
}
|
}
|
||||||
// get the difficulty
|
// get the difficulty
|
||||||
let next_diff_no_lag = no_lag_cache.next_difficulty(&hf);
|
let next_diff_no_lag = no_lag_cache.next_difficulty(hf);
|
||||||
|
|
||||||
for _ in 0..TEST_LAG {
|
for _ in 0..TEST_LAG {
|
||||||
// add new blocks to the lagged cache
|
// add new blocks to the lagged cache
|
||||||
diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
||||||
}
|
}
|
||||||
// they both should now be the same
|
// they both should now be the same
|
||||||
prop_assert_eq!(diff_cache.next_difficulty(&hf), next_diff_no_lag)
|
prop_assert_eq!(diff_cache.next_difficulty(hf), next_diff_no_lag);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn next_difficulty_consistent(diff_cache in arb_difficulty_cache(TEST_TOTAL_ACCOUNTED_BLOCKS), hf in any::<HardFork>()) {
|
fn next_difficulty_consistent(diff_cache in arb_difficulty_cache(TEST_TOTAL_ACCOUNTED_BLOCKS), hf in any::<HardFork>()) {
|
||||||
let first_call = diff_cache.next_difficulty(&hf);
|
let first_call = diff_cache.next_difficulty(hf);
|
||||||
prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf));
|
prop_assert_eq!(first_call, diff_cache.next_difficulty(hf));
|
||||||
prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf));
|
prop_assert_eq!(first_call, diff_cache.next_difficulty(hf));
|
||||||
prop_assert_eq!(first_call, diff_cache.next_difficulty(&hf));
|
prop_assert_eq!(first_call, diff_cache.next_difficulty(hf));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -178,7 +178,7 @@ proptest! {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn window_size_kept_constant(mut diff_cache in arb_difficulty_cache(TEST_TOTAL_ACCOUNTED_BLOCKS), new_blocks in any::<Vec<(u64, u128)>>()) {
|
fn window_size_kept_constant(mut diff_cache in arb_difficulty_cache(TEST_TOTAL_ACCOUNTED_BLOCKS), new_blocks in any::<Vec<(u64, u128)>>()) {
|
||||||
for (timestamp, cumulative_difficulty) in new_blocks.into_iter() {
|
for (timestamp, cumulative_difficulty) in new_blocks {
|
||||||
diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
diff_cache.new_block(diff_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
||||||
prop_assert_eq!(diff_cache.timestamps.len(), TEST_TOTAL_ACCOUNTED_BLOCKS);
|
prop_assert_eq!(diff_cache.timestamps.len(), TEST_TOTAL_ACCOUNTED_BLOCKS);
|
||||||
prop_assert_eq!(diff_cache.cumulative_difficulties.len(), TEST_TOTAL_ACCOUNTED_BLOCKS);
|
prop_assert_eq!(diff_cache.cumulative_difficulties.len(), TEST_TOTAL_ACCOUNTED_BLOCKS);
|
||||||
|
@ -193,7 +193,7 @@ proptest! {
|
||||||
) {
|
) {
|
||||||
let cache = diff_cache.clone();
|
let cache = diff_cache.clone();
|
||||||
|
|
||||||
diff_cache.next_difficulties(timestamps.into_iter().zip([hf].into_iter().cycle()).collect(), &hf);
|
diff_cache.next_difficulties(timestamps.into_iter().zip(std::iter::once(hf).cycle()).collect(), hf);
|
||||||
|
|
||||||
prop_assert_eq!(diff_cache, cache);
|
prop_assert_eq!(diff_cache, cache);
|
||||||
}
|
}
|
||||||
|
@ -204,12 +204,12 @@ proptest! {
|
||||||
timestamps in any_with::<Vec<u64>>(size_range(0..1000).lift()),
|
timestamps in any_with::<Vec<u64>>(size_range(0..1000).lift()),
|
||||||
hf in any::<HardFork>(),
|
hf in any::<HardFork>(),
|
||||||
) {
|
) {
|
||||||
let timestamps: Vec<_> = timestamps.into_iter().zip([hf].into_iter().cycle()).collect();
|
let timestamps: Vec<_> = timestamps.into_iter().zip(std::iter::once(hf).cycle()).collect();
|
||||||
|
|
||||||
let diffs = diff_cache.next_difficulties(timestamps.clone(), &hf);
|
let diffs = diff_cache.next_difficulties(timestamps.clone(), hf);
|
||||||
|
|
||||||
for (timestamp, diff) in timestamps.into_iter().zip(diffs.into_iter()) {
|
for (timestamp, diff) in timestamps.into_iter().zip(diffs.into_iter()) {
|
||||||
prop_assert_eq!(diff_cache.next_difficulty(×tamp.1), diff);
|
prop_assert_eq!(diff_cache.next_difficulty(timestamp.1), diff);
|
||||||
diff_cache.new_block(diff_cache.last_accounted_height +1, timestamp.0, diff + diff_cache.cumulative_difficulty());
|
diff_cache.new_block(diff_cache.last_accounted_height +1, timestamp.0, diff + diff_cache.cumulative_difficulty());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,7 +226,7 @@ proptest! {
|
||||||
let blocks_to_pop = new_blocks.len();
|
let blocks_to_pop = new_blocks.len();
|
||||||
|
|
||||||
let mut new_cache = old_cache.clone();
|
let mut new_cache = old_cache.clone();
|
||||||
for (timestamp, cumulative_difficulty) in new_blocks.into_iter() {
|
for (timestamp, cumulative_difficulty) in new_blocks {
|
||||||
database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty));
|
database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty));
|
||||||
new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
||||||
}
|
}
|
||||||
|
@ -250,7 +250,7 @@ proptest! {
|
||||||
let blocks_to_pop = new_blocks.len();
|
let blocks_to_pop = new_blocks.len();
|
||||||
|
|
||||||
let mut new_cache = old_cache.clone();
|
let mut new_cache = old_cache.clone();
|
||||||
for (timestamp, cumulative_difficulty) in new_blocks.into_iter() {
|
for (timestamp, cumulative_difficulty) in new_blocks {
|
||||||
database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty));
|
database.add_block(DummyBlockExtendedHeader::default().with_difficulty_info(timestamp, cumulative_difficulty));
|
||||||
new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
new_cache.new_block(new_cache.last_accounted_height+1, timestamp, cumulative_difficulty);
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ const TEST_HFS: [HFInfo; NUMB_OF_HARD_FORKS] = [
|
||||||
HFInfo::new(150, 0),
|
HFInfo::new(150, 0),
|
||||||
];
|
];
|
||||||
|
|
||||||
pub const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig {
|
pub(crate) const TEST_HARD_FORK_CONFIG: HardForkConfig = HardForkConfig {
|
||||||
window: TEST_WINDOW_SIZE,
|
window: TEST_WINDOW_SIZE,
|
||||||
info: HFsInfo::new(TEST_HFS),
|
info: HFsInfo::new(TEST_HFS),
|
||||||
};
|
};
|
||||||
|
|
|
@ -39,6 +39,7 @@ fn rx_heights_consistent() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
#[expect(unused_qualifications, reason = "false positive in tokio macro")]
|
||||||
async fn rx_vm_created_on_hf_12() {
|
async fn rx_vm_created_on_hf_12() {
|
||||||
let db = DummyDatabaseBuilder::default().finish(Some(10));
|
let db = DummyDatabaseBuilder::default().finish(Some(10));
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,8 @@ use crate::{
|
||||||
};
|
};
|
||||||
use cuprate_types::Chain;
|
use cuprate_types::Chain;
|
||||||
|
|
||||||
pub const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig = BlockWeightsCacheConfig::new(100, 5000);
|
pub(crate) const TEST_WEIGHT_CONFIG: BlockWeightsCacheConfig =
|
||||||
|
BlockWeightsCacheConfig::new(100, 5000);
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> {
|
async fn blocks_out_of_window_not_counted() -> Result<(), tower::BoxError> {
|
||||||
|
@ -157,7 +158,7 @@ async fn calc_bw_ltw_2850000_3050000() {
|
||||||
|
|
||||||
for (i, (weight, ltw)) in BW_2850000_3050000.iter().skip(100_000).enumerate() {
|
for (i, (weight, ltw)) in BW_2850000_3050000.iter().skip(100_000).enumerate() {
|
||||||
let calc_ltw = calculate_block_long_term_weight(
|
let calc_ltw = calculate_block_long_term_weight(
|
||||||
&HardFork::V16,
|
HardFork::V16,
|
||||||
*weight,
|
*weight,
|
||||||
weight_cache.median_long_term_weight(),
|
weight_cache.median_long_term_weight(),
|
||||||
);
|
);
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![expect(non_local_definitions, reason = "proptest macro")]
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
future::Future,
|
future::Future,
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
|
@ -60,7 +62,7 @@ pub struct DummyBlockExtendedHeader {
|
||||||
|
|
||||||
impl From<DummyBlockExtendedHeader> for ExtendedBlockHeader {
|
impl From<DummyBlockExtendedHeader> for ExtendedBlockHeader {
|
||||||
fn from(value: DummyBlockExtendedHeader) -> Self {
|
fn from(value: DummyBlockExtendedHeader) -> Self {
|
||||||
ExtendedBlockHeader {
|
Self {
|
||||||
version: value.version.unwrap_or(HardFork::V1),
|
version: value.version.unwrap_or(HardFork::V1),
|
||||||
vote: value.vote.unwrap_or(HardFork::V1).as_u8(),
|
vote: value.vote.unwrap_or(HardFork::V1).as_u8(),
|
||||||
timestamp: value.timestamp.unwrap_or_default(),
|
timestamp: value.timestamp.unwrap_or_default(),
|
||||||
|
@ -72,31 +74,23 @@ impl From<DummyBlockExtendedHeader> for ExtendedBlockHeader {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DummyBlockExtendedHeader {
|
impl DummyBlockExtendedHeader {
|
||||||
pub fn with_weight_into(
|
pub const fn with_weight_into(mut self, weight: usize, long_term_weight: usize) -> Self {
|
||||||
mut self,
|
|
||||||
weight: usize,
|
|
||||||
long_term_weight: usize,
|
|
||||||
) -> DummyBlockExtendedHeader {
|
|
||||||
self.block_weight = Some(weight);
|
self.block_weight = Some(weight);
|
||||||
self.long_term_weight = Some(long_term_weight);
|
self.long_term_weight = Some(long_term_weight);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_hard_fork_info(
|
pub const fn with_hard_fork_info(mut self, version: HardFork, vote: HardFork) -> Self {
|
||||||
mut self,
|
|
||||||
version: HardFork,
|
|
||||||
vote: HardFork,
|
|
||||||
) -> DummyBlockExtendedHeader {
|
|
||||||
self.vote = Some(vote);
|
self.vote = Some(vote);
|
||||||
self.version = Some(version);
|
self.version = Some(version);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_difficulty_info(
|
pub const fn with_difficulty_info(
|
||||||
mut self,
|
mut self,
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
cumulative_difficulty: u128,
|
cumulative_difficulty: u128,
|
||||||
) -> DummyBlockExtendedHeader {
|
) -> Self {
|
||||||
self.timestamp = Some(timestamp);
|
self.timestamp = Some(timestamp);
|
||||||
self.cumulative_difficulty = Some(cumulative_difficulty);
|
self.cumulative_difficulty = Some(cumulative_difficulty);
|
||||||
self
|
self
|
||||||
|
@ -104,16 +98,16 @@ impl DummyBlockExtendedHeader {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
pub struct DummyDatabaseBuilder {
|
pub(crate) struct DummyDatabaseBuilder {
|
||||||
blocks: Vec<DummyBlockExtendedHeader>,
|
blocks: Vec<DummyBlockExtendedHeader>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DummyDatabaseBuilder {
|
impl DummyDatabaseBuilder {
|
||||||
pub fn add_block(&mut self, block: DummyBlockExtendedHeader) {
|
pub(crate) fn add_block(&mut self, block: DummyBlockExtendedHeader) {
|
||||||
self.blocks.push(block);
|
self.blocks.push(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn finish(self, dummy_height: Option<usize>) -> DummyDatabase {
|
pub(crate) fn finish(self, dummy_height: Option<usize>) -> DummyDatabase {
|
||||||
DummyDatabase {
|
DummyDatabase {
|
||||||
blocks: Arc::new(self.blocks.into()),
|
blocks: Arc::new(self.blocks.into()),
|
||||||
dummy_height,
|
dummy_height,
|
||||||
|
@ -122,14 +116,15 @@ impl DummyDatabaseBuilder {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct DummyDatabase {
|
pub(crate) struct DummyDatabase {
|
||||||
blocks: Arc<RwLock<Vec<DummyBlockExtendedHeader>>>,
|
blocks: Arc<RwLock<Vec<DummyBlockExtendedHeader>>>,
|
||||||
dummy_height: Option<usize>,
|
dummy_height: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DummyDatabase {
|
impl DummyDatabase {
|
||||||
pub fn add_block(&mut self, block: DummyBlockExtendedHeader) {
|
#[expect(clippy::needless_pass_by_ref_mut)]
|
||||||
self.blocks.write().unwrap().push(block)
|
pub(crate) fn add_block(&mut self, block: DummyBlockExtendedHeader) {
|
||||||
|
self.blocks.write().unwrap().push(block);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,7 +139,7 @@ impl Service<BlockchainReadRequest> for DummyDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn call(&mut self, req: BlockchainReadRequest) -> Self::Future {
|
fn call(&mut self, req: BlockchainReadRequest) -> Self::Future {
|
||||||
let blocks = self.blocks.clone();
|
let blocks = Arc::clone(&self.blocks);
|
||||||
let dummy_height = self.dummy_height;
|
let dummy_height = self.dummy_height;
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashSet,
|
collections::HashSet,
|
||||||
future::Future,
|
future::Future,
|
||||||
ops::Deref,
|
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
|
@ -102,8 +101,8 @@ where
|
||||||
D::Future: Send + 'static,
|
D::Future: Send + 'static,
|
||||||
{
|
{
|
||||||
/// Creates a new [`TxVerifierService`].
|
/// Creates a new [`TxVerifierService`].
|
||||||
pub fn new(database: D) -> TxVerifierService<D> {
|
pub const fn new(database: D) -> Self {
|
||||||
TxVerifierService { database }
|
Self { database }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -244,7 +243,7 @@ where
|
||||||
|
|
||||||
if kis_spent {
|
if kis_spent {
|
||||||
tracing::debug!("One or more key images in batch already spent.");
|
tracing::debug!("One or more key images in batch already spent.");
|
||||||
Err(ConsensusError::Transaction(TransactionError::KeyImageSpent))?;
|
return Err(ConsensusError::Transaction(TransactionError::KeyImageSpent).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut verified_at_block_hashes = txs
|
let mut verified_at_block_hashes = txs
|
||||||
|
@ -281,8 +280,8 @@ where
|
||||||
let (txs_needing_full_verification, txs_needing_partial_verification) =
|
let (txs_needing_full_verification, txs_needing_partial_verification) =
|
||||||
transactions_needing_verification(
|
transactions_needing_verification(
|
||||||
txs,
|
txs,
|
||||||
verified_at_block_hashes,
|
&verified_at_block_hashes,
|
||||||
&hf,
|
hf,
|
||||||
current_chain_height,
|
current_chain_height,
|
||||||
time_for_time_lock,
|
time_for_time_lock,
|
||||||
)?;
|
)?;
|
||||||
|
@ -302,11 +301,14 @@ where
|
||||||
Ok(VerifyTxResponse::Ok)
|
Ok(VerifyTxResponse::Ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)] // I don't think the return is too complex
|
#[expect(
|
||||||
|
clippy::type_complexity,
|
||||||
|
reason = "I don't think the return is too complex"
|
||||||
|
)]
|
||||||
fn transactions_needing_verification(
|
fn transactions_needing_verification(
|
||||||
txs: &[Arc<TransactionVerificationData>],
|
txs: &[Arc<TransactionVerificationData>],
|
||||||
hashes_in_main_chain: HashSet<[u8; 32]>,
|
hashes_in_main_chain: &HashSet<[u8; 32]>,
|
||||||
current_hf: &HardFork,
|
current_hf: HardFork,
|
||||||
current_chain_height: usize,
|
current_chain_height: usize,
|
||||||
time_for_time_lock: u64,
|
time_for_time_lock: u64,
|
||||||
) -> Result<
|
) -> Result<
|
||||||
|
@ -321,27 +323,28 @@ fn transactions_needing_verification(
|
||||||
// txs needing partial _contextual_ validation, not semantic.
|
// txs needing partial _contextual_ validation, not semantic.
|
||||||
let mut partial_validation_transactions = Vec::new();
|
let mut partial_validation_transactions = Vec::new();
|
||||||
|
|
||||||
for tx in txs.iter() {
|
for tx in txs {
|
||||||
let guard = tx.cached_verification_state.lock().unwrap();
|
let guard = tx.cached_verification_state.lock().unwrap();
|
||||||
|
|
||||||
match guard.deref() {
|
match &*guard {
|
||||||
CachedVerificationState::NotVerified => {
|
CachedVerificationState::NotVerified => {
|
||||||
drop(guard);
|
drop(guard);
|
||||||
full_validation_transactions
|
full_validation_transactions
|
||||||
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
|
.push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
CachedVerificationState::ValidAtHashAndHF { block_hash, hf } => {
|
CachedVerificationState::ValidAtHashAndHF { block_hash, hf } => {
|
||||||
if current_hf != hf {
|
if current_hf != *hf {
|
||||||
drop(guard);
|
drop(guard);
|
||||||
full_validation_transactions
|
full_validation_transactions
|
||||||
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
|
.push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hashes_in_main_chain.contains(block_hash) {
|
if !hashes_in_main_chain.contains(block_hash) {
|
||||||
drop(guard);
|
drop(guard);
|
||||||
full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual));
|
full_validation_transactions
|
||||||
|
.push((Arc::clone(tx), VerificationNeeded::Contextual));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -350,21 +353,22 @@ fn transactions_needing_verification(
|
||||||
hf,
|
hf,
|
||||||
time_lock,
|
time_lock,
|
||||||
} => {
|
} => {
|
||||||
if current_hf != hf {
|
if current_hf != *hf {
|
||||||
drop(guard);
|
drop(guard);
|
||||||
full_validation_transactions
|
full_validation_transactions
|
||||||
.push((tx.clone(), VerificationNeeded::SemanticAndContextual));
|
.push((Arc::clone(tx), VerificationNeeded::SemanticAndContextual));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hashes_in_main_chain.contains(block_hash) {
|
if !hashes_in_main_chain.contains(block_hash) {
|
||||||
drop(guard);
|
drop(guard);
|
||||||
full_validation_transactions.push((tx.clone(), VerificationNeeded::Contextual));
|
full_validation_transactions
|
||||||
|
.push((Arc::clone(tx), VerificationNeeded::Contextual));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the time lock is still locked then the transaction is invalid.
|
// If the time lock is still locked then the transaction is invalid.
|
||||||
if !output_unlocked(time_lock, current_chain_height, time_for_time_lock, hf) {
|
if !output_unlocked(time_lock, current_chain_height, time_for_time_lock, *hf) {
|
||||||
return Err(ConsensusError::Transaction(
|
return Err(ConsensusError::Transaction(
|
||||||
TransactionError::OneOrMoreRingMembersLocked,
|
TransactionError::OneOrMoreRingMembersLocked,
|
||||||
));
|
));
|
||||||
|
@ -374,7 +378,7 @@ fn transactions_needing_verification(
|
||||||
|
|
||||||
if tx.version == TxVersion::RingSignatures {
|
if tx.version == TxVersion::RingSignatures {
|
||||||
drop(guard);
|
drop(guard);
|
||||||
partial_validation_transactions.push(tx.clone());
|
partial_validation_transactions.push(Arc::clone(tx));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -400,7 +404,7 @@ where
|
||||||
|
|
||||||
batch_get_decoy_info(&txs, hf, database)
|
batch_get_decoy_info(&txs, hf, database)
|
||||||
.await?
|
.await?
|
||||||
.try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, &hf)?)))?;
|
.try_for_each(|decoy_info| decoy_info.and_then(|di| Ok(check_decoy_info(&di, hf)?)))?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -417,7 +421,7 @@ where
|
||||||
D: Database + Clone + Sync + Send + 'static,
|
D: Database + Clone + Sync + Send + 'static,
|
||||||
{
|
{
|
||||||
let txs_ring_member_info =
|
let txs_ring_member_info =
|
||||||
batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), &hf, database).await?;
|
batch_get_ring_member_info(txs.iter().map(|(tx, _)| tx), hf, database).await?;
|
||||||
|
|
||||||
rayon_spawn_async(move || {
|
rayon_spawn_async(move || {
|
||||||
let batch_verifier = MultiThreadedBatchVerifier::new(rayon::current_num_threads());
|
let batch_verifier = MultiThreadedBatchVerifier::new(rayon::current_num_threads());
|
||||||
|
@ -432,7 +436,7 @@ where
|
||||||
tx.tx_blob.len(),
|
tx.tx_blob.len(),
|
||||||
tx.tx_weight,
|
tx.tx_weight,
|
||||||
&tx.tx_hash,
|
&tx.tx_hash,
|
||||||
&hf,
|
hf,
|
||||||
&batch_verifier,
|
&batch_verifier,
|
||||||
)?;
|
)?;
|
||||||
// make sure we calculated the right fee.
|
// make sure we calculated the right fee.
|
||||||
|
@ -445,7 +449,7 @@ where
|
||||||
ring,
|
ring,
|
||||||
current_chain_height,
|
current_chain_height,
|
||||||
current_time_lock_timestamp,
|
current_time_lock_timestamp,
|
||||||
&hf,
|
hf,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok::<_, ConsensusError>(())
|
Ok::<_, ConsensusError>(())
|
||||||
|
|
|
@ -57,7 +57,7 @@ fn get_ring_members_for_inputs(
|
||||||
})
|
})
|
||||||
.collect::<Result<_, TransactionError>>()?)
|
.collect::<Result<_, TransactionError>>()?)
|
||||||
}
|
}
|
||||||
_ => Err(TransactionError::IncorrectInputType),
|
Input::Gen(_) => Err(TransactionError::IncorrectInputType),
|
||||||
})
|
})
|
||||||
.collect::<Result<_, TransactionError>>()
|
.collect::<Result<_, TransactionError>>()
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,7 @@ fn new_rings(
|
||||||
/// them.
|
/// them.
|
||||||
pub async fn batch_get_ring_member_info<D: Database>(
|
pub async fn batch_get_ring_member_info<D: Database>(
|
||||||
txs_verification_data: impl Iterator<Item = &Arc<TransactionVerificationData>> + Clone,
|
txs_verification_data: impl Iterator<Item = &Arc<TransactionVerificationData>> + Clone,
|
||||||
hf: &HardFork,
|
hf: HardFork,
|
||||||
mut database: D,
|
mut database: D,
|
||||||
) -> Result<Vec<TxRingMembersInfo>, ExtendedConsensusError> {
|
) -> Result<Vec<TxRingMembersInfo>, ExtendedConsensusError> {
|
||||||
let mut output_ids = HashMap::new();
|
let mut output_ids = HashMap::new();
|
||||||
|
@ -183,14 +183,14 @@ pub async fn batch_get_ring_member_info<D: Database>(
|
||||||
)
|
)
|
||||||
.map_err(ConsensusError::Transaction)?;
|
.map_err(ConsensusError::Transaction)?;
|
||||||
|
|
||||||
let decoy_info = if hf != &HardFork::V1 {
|
let decoy_info = if hf == HardFork::V1 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
// this data is only needed after hard-fork 1.
|
// this data is only needed after hard-fork 1.
|
||||||
Some(
|
Some(
|
||||||
DecoyInfo::new(&tx_v_data.tx.prefix().inputs, numb_outputs, hf)
|
DecoyInfo::new(&tx_v_data.tx.prefix().inputs, numb_outputs, hf)
|
||||||
.map_err(ConsensusError::Transaction)?,
|
.map_err(ConsensusError::Transaction)?,
|
||||||
)
|
)
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
};
|
||||||
|
|
||||||
new_ring_member_info(ring_members_for_tx, decoy_info, tx_v_data.version)
|
new_ring_member_info(ring_members_for_tx, decoy_info, tx_v_data.version)
|
||||||
|
@ -224,7 +224,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>(
|
||||||
.flat_map(|tx_info| {
|
.flat_map(|tx_info| {
|
||||||
tx_info.tx.prefix().inputs.iter().map(|input| match input {
|
tx_info.tx.prefix().inputs.iter().map(|input| match input {
|
||||||
Input::ToKey { amount, .. } => amount.unwrap_or(0),
|
Input::ToKey { amount, .. } => amount.unwrap_or(0),
|
||||||
_ => 0,
|
Input::Gen(_) => 0,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.collect::<HashSet<_>>();
|
.collect::<HashSet<_>>();
|
||||||
|
@ -249,7 +249,7 @@ pub async fn batch_get_decoy_info<'a, D: Database + Clone + Send + 'static>(
|
||||||
DecoyInfo::new(
|
DecoyInfo::new(
|
||||||
&tx_v_data.tx.prefix().inputs,
|
&tx_v_data.tx.prefix().inputs,
|
||||||
|amt| outputs_with_amount.get(&amt).copied().unwrap_or(0),
|
|amt| outputs_with_amount.get(&amt).copied().unwrap_or(0),
|
||||||
&hf,
|
hf,
|
||||||
)
|
)
|
||||||
.map_err(ConsensusError::Transaction)
|
.map_err(ConsensusError::Transaction)
|
||||||
}))
|
}))
|
||||||
|
|
|
@ -39,7 +39,7 @@ pub fn new_tx_verification_data(
|
||||||
/// Calculates the weight of a [`Transaction`].
|
/// Calculates the weight of a [`Transaction`].
|
||||||
///
|
///
|
||||||
/// This is more efficient that [`Transaction::weight`] if you already have the transaction blob.
|
/// This is more efficient that [`Transaction::weight`] if you already have the transaction blob.
|
||||||
pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize {
|
pub(crate) fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize {
|
||||||
// the tx weight is only different from the blobs length for bp(+) txs.
|
// the tx weight is only different from the blobs length for bp(+) txs.
|
||||||
|
|
||||||
match &tx {
|
match &tx {
|
||||||
|
@ -64,7 +64,7 @@ pub fn tx_weight(tx: &Transaction, tx_blob: &[u8]) -> usize {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculates the fee of the [`Transaction`].
|
/// Calculates the fee of the [`Transaction`].
|
||||||
pub fn tx_fee(tx: &Transaction) -> Result<u64, TransactionError> {
|
pub(crate) fn tx_fee(tx: &Transaction) -> Result<u64, TransactionError> {
|
||||||
let mut fee = 0_u64;
|
let mut fee = 0_u64;
|
||||||
|
|
||||||
match &tx {
|
match &tx {
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
#![expect(unused_crate_dependencies, reason = "external test module")]
|
||||||
|
#![expect(clippy::allow_attributes, reason = "usage inside macro")]
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashMap},
|
collections::{BTreeMap, HashMap},
|
||||||
future::ready,
|
future::ready,
|
||||||
|
@ -29,7 +32,7 @@ fn dummy_database(outputs: BTreeMap<u64, OutputOnChain>) -> impl Database + Clon
|
||||||
BlockchainResponse::NumberOutputsWithAmount(HashMap::new())
|
BlockchainResponse::NumberOutputsWithAmount(HashMap::new())
|
||||||
}
|
}
|
||||||
BlockchainReadRequest::Outputs(outs) => {
|
BlockchainReadRequest::Outputs(outs) => {
|
||||||
let idxs = outs.get(&0).unwrap();
|
let idxs = &outs[&0];
|
||||||
|
|
||||||
let mut ret = HashMap::new();
|
let mut ret = HashMap::new();
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,8 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus"
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
# All features on by default.
|
# All features off by default.
|
||||||
default = ["std", "atomic", "asynch", "cast", "fs", "num", "map", "time", "thread", "constants"]
|
default = []
|
||||||
std = []
|
std = []
|
||||||
atomic = ["dep:crossbeam"]
|
atomic = ["dep:crossbeam"]
|
||||||
asynch = ["dep:futures", "dep:rayon"]
|
asynch = ["dep:futures", "dep:rayon"]
|
||||||
|
@ -21,6 +21,7 @@ num = []
|
||||||
map = ["cast", "dep:monero-serai"]
|
map = ["cast", "dep:monero-serai"]
|
||||||
time = ["dep:chrono", "std"]
|
time = ["dep:chrono", "std"]
|
||||||
thread = ["std", "dep:target_os_lib"]
|
thread = ["std", "dep:target_os_lib"]
|
||||||
|
tx = ["dep:monero-serai"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
crossbeam = { workspace = true, optional = true }
|
crossbeam = { workspace = true, optional = true }
|
||||||
|
@ -40,6 +41,7 @@ target_os_lib = { package = "libc", version = "0.2.158", optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { workspace = true, features = ["full"] }
|
tokio = { workspace = true, features = ["full"] }
|
||||||
|
curve25519-dalek = { workspace = true }
|
||||||
|
|
||||||
[lints]
|
[lints]
|
||||||
workspace = true
|
workspace = true
|
||||||
|
|
|
@ -5,9 +5,6 @@
|
||||||
//---------------------------------------------------------------------------------------------------- Use
|
//---------------------------------------------------------------------------------------------------- Use
|
||||||
use crossbeam::atomic::AtomicCell;
|
use crossbeam::atomic::AtomicCell;
|
||||||
|
|
||||||
#[allow(unused_imports)] // docs
|
|
||||||
use std::sync::atomic::{Ordering, Ordering::Acquire, Ordering::Release};
|
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Atomic Float
|
//---------------------------------------------------------------------------------------------------- Atomic Float
|
||||||
/// Compile-time assertion that our floats are
|
/// Compile-time assertion that our floats are
|
||||||
/// lock-free for the target we're building for.
|
/// lock-free for the target we're building for.
|
||||||
|
@ -31,9 +28,13 @@ const _: () = {
|
||||||
/// This is an alias for
|
/// This is an alias for
|
||||||
/// [`crossbeam::atomic::AtomicCell<f32>`](https://docs.rs/crossbeam/latest/crossbeam/atomic/struct.AtomicCell.html).
|
/// [`crossbeam::atomic::AtomicCell<f32>`](https://docs.rs/crossbeam/latest/crossbeam/atomic/struct.AtomicCell.html).
|
||||||
///
|
///
|
||||||
/// Note that there are no [`Ordering`] parameters,
|
/// Note that there are no [Ordering] parameters,
|
||||||
/// atomic loads use [`Acquire`],
|
/// atomic loads use [Acquire],
|
||||||
/// and atomic stores use [`Release`].
|
/// and atomic stores use [Release].
|
||||||
|
///
|
||||||
|
/// [Ordering]: std::sync::atomic::Ordering
|
||||||
|
/// [Acquire]: std::sync::atomic::Ordering::Acquire
|
||||||
|
/// [Release]: std::sync::atomic::Ordering::Release
|
||||||
pub type AtomicF32 = AtomicCell<f32>;
|
pub type AtomicF32 = AtomicCell<f32>;
|
||||||
|
|
||||||
/// An atomic [`f64`].
|
/// An atomic [`f64`].
|
||||||
|
@ -41,9 +42,13 @@ pub type AtomicF32 = AtomicCell<f32>;
|
||||||
/// This is an alias for
|
/// This is an alias for
|
||||||
/// [`crossbeam::atomic::AtomicCell<f64>`](https://docs.rs/crossbeam/latest/crossbeam/atomic/struct.AtomicCell.html).
|
/// [`crossbeam::atomic::AtomicCell<f64>`](https://docs.rs/crossbeam/latest/crossbeam/atomic/struct.AtomicCell.html).
|
||||||
///
|
///
|
||||||
/// Note that there are no [`Ordering`] parameters,
|
/// Note that there are no [Ordering] parameters,
|
||||||
/// atomic loads use [`Acquire`],
|
/// atomic loads use [Acquire],
|
||||||
/// and atomic stores use [`Release`].
|
/// and atomic stores use [Release].
|
||||||
|
///
|
||||||
|
/// [Ordering]: std::sync::atomic::Ordering
|
||||||
|
/// [Acquire]: std::sync::atomic::Ordering::Acquire
|
||||||
|
/// [Release]: std::sync::atomic::Ordering::Release
|
||||||
pub type AtomicF64 = AtomicCell<f64>;
|
pub type AtomicF64 = AtomicCell<f64>;
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- TESTS
|
//---------------------------------------------------------------------------------------------------- TESTS
|
||||||
|
|
|
@ -31,6 +31,8 @@ pub mod thread;
|
||||||
#[cfg(feature = "time")]
|
#[cfg(feature = "time")]
|
||||||
pub mod time;
|
pub mod time;
|
||||||
|
|
||||||
|
#[cfg(feature = "tx")]
|
||||||
|
pub mod tx;
|
||||||
//---------------------------------------------------------------------------------------------------- Private Usage
|
//---------------------------------------------------------------------------------------------------- Private Usage
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------------------------
|
||||||
|
|
|
@ -29,7 +29,7 @@ use crate::cast::{u64_to_usize, usize_to_u64};
|
||||||
/// ```
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) {
|
pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) {
|
||||||
#[allow(clippy::cast_possible_truncation)]
|
#[expect(clippy::cast_possible_truncation)]
|
||||||
(value as u64, (value >> 64) as u64)
|
(value as u64, (value >> 64) as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,7 @@ where
|
||||||
///
|
///
|
||||||
/// # Invariant
|
/// # Invariant
|
||||||
/// If not sorted the output will be invalid.
|
/// If not sorted the output will be invalid.
|
||||||
#[allow(clippy::debug_assert_with_mut_call)]
|
#[expect(clippy::debug_assert_with_mut_call)]
|
||||||
pub fn median<T>(array: impl AsRef<[T]>) -> T
|
pub fn median<T>(array: impl AsRef<[T]>) -> T
|
||||||
where
|
where
|
||||||
T: Add<Output = T>
|
T: Add<Output = T>
|
||||||
|
|
|
@ -6,7 +6,6 @@
|
||||||
use std::{cmp::max, num::NonZeroUsize};
|
use std::{cmp::max, num::NonZeroUsize};
|
||||||
|
|
||||||
//---------------------------------------------------------------------------------------------------- Thread Count & Percent
|
//---------------------------------------------------------------------------------------------------- Thread Count & Percent
|
||||||
#[allow(non_snake_case)]
|
|
||||||
/// Get the total amount of system threads.
|
/// Get the total amount of system threads.
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
|
@ -31,7 +30,12 @@ macro_rules! impl_thread_percent {
|
||||||
// unwrap here is okay because:
|
// unwrap here is okay because:
|
||||||
// - THREADS().get() is always non-zero
|
// - THREADS().get() is always non-zero
|
||||||
// - max() guards against 0
|
// - max() guards against 0
|
||||||
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss)]
|
#[expect(
|
||||||
|
clippy::cast_possible_truncation,
|
||||||
|
clippy::cast_sign_loss,
|
||||||
|
clippy::cast_precision_loss,
|
||||||
|
reason = "we need to round integers"
|
||||||
|
)]
|
||||||
NonZeroUsize::new(max(1, (threads().get() as f64 * $percent).floor() as usize)).unwrap()
|
NonZeroUsize::new(max(1, (threads().get() as f64 * $percent).floor() as usize)).unwrap()
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
|
|
|
@ -129,7 +129,7 @@ pub const fn secs_to_clock(seconds: u32) -> (u8, u8, u8) {
|
||||||
debug_assert!(m < 60);
|
debug_assert!(m < 60);
|
||||||
debug_assert!(s < 60);
|
debug_assert!(s < 60);
|
||||||
|
|
||||||
#[allow(clippy::cast_possible_truncation)] // checked above
|
#[expect(clippy::cast_possible_truncation, reason = "checked above")]
|
||||||
(h as u8, m, s)
|
(h as u8, m, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,7 +154,7 @@ pub fn time() -> u32 {
|
||||||
///
|
///
|
||||||
/// This is guaranteed to return a value between `0..=86399`
|
/// This is guaranteed to return a value between `0..=86399`
|
||||||
pub fn time_utc() -> u32 {
|
pub fn time_utc() -> u32 {
|
||||||
#[allow(clippy::cast_sign_loss)] // checked in function calls
|
#[expect(clippy::cast_sign_loss, reason = "checked in function calls")]
|
||||||
unix_clock(chrono::offset::Local::now().timestamp() as u64)
|
unix_clock(chrono::offset::Local::now().timestamp() as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
70
helper/src/tx.rs
Normal file
70
helper/src/tx.rs
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
//! Utils for working with [`Transaction`]
|
||||||
|
|
||||||
|
use monero_serai::transaction::{Input, Transaction};
|
||||||
|
|
||||||
|
/// Calculates the fee of the [`Transaction`].
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
/// This will panic if the inputs overflow or the transaction outputs too much, so should only
|
||||||
|
/// be used on known to be valid txs.
|
||||||
|
pub fn tx_fee(tx: &Transaction) -> u64 {
|
||||||
|
let mut fee = 0_u64;
|
||||||
|
|
||||||
|
match &tx {
|
||||||
|
Transaction::V1 { prefix, .. } => {
|
||||||
|
for input in &prefix.inputs {
|
||||||
|
match input {
|
||||||
|
Input::Gen(_) => return 0,
|
||||||
|
Input::ToKey { amount, .. } => {
|
||||||
|
fee = fee.checked_add(amount.unwrap_or(0)).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for output in &prefix.outputs {
|
||||||
|
fee = fee.checked_sub(output.amount.unwrap_or(0)).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Transaction::V2 { proofs, .. } => {
|
||||||
|
fee = proofs.as_ref().unwrap().base.fee;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
fee
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use curve25519_dalek::{edwards::CompressedEdwardsY, EdwardsPoint};
|
||||||
|
use monero_serai::transaction::{NotPruned, Output, Timelock, TransactionPrefix};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic(expected = "called `Option::unwrap()` on a `None` value")]
|
||||||
|
fn tx_fee_panic() {
|
||||||
|
let input = Input::ToKey {
|
||||||
|
amount: Some(u64::MAX),
|
||||||
|
key_offsets: vec![],
|
||||||
|
key_image: EdwardsPoint::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let output = Output {
|
||||||
|
amount: Some(u64::MAX),
|
||||||
|
key: CompressedEdwardsY::default(),
|
||||||
|
view_tag: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let tx = Transaction::<NotPruned>::V1 {
|
||||||
|
prefix: TransactionPrefix {
|
||||||
|
additional_timelock: Timelock::None,
|
||||||
|
inputs: vec![input; 2],
|
||||||
|
outputs: vec![output],
|
||||||
|
extra: vec![],
|
||||||
|
},
|
||||||
|
signatures: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
tx_fee(&tx);
|
||||||
|
}
|
||||||
|
}
|
|
@ -25,3 +25,6 @@ thiserror = { workspace = true, optional = true}
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hex = { workspace = true, features = ["default"] }
|
hex = { workspace = true, features = ["default"] }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
|
@ -9,7 +9,7 @@ pub struct ContainerAsBlob<T: Containerable + EpeeValue>(Vec<T>);
|
||||||
|
|
||||||
impl<T: Containerable + EpeeValue> From<Vec<T>> for ContainerAsBlob<T> {
|
impl<T: Containerable + EpeeValue> From<Vec<T>> for ContainerAsBlob<T> {
|
||||||
fn from(value: Vec<T>) -> Self {
|
fn from(value: Vec<T>) -> Self {
|
||||||
ContainerAsBlob(value)
|
Self(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,9 +36,7 @@ impl<T: Containerable + EpeeValue> EpeeValue for ContainerAsBlob<T> {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(ContainerAsBlob(
|
Ok(Self(bytes.chunks(T::SIZE).map(T::from_bytes).collect()))
|
||||||
bytes.chunks(T::SIZE).map(T::from_bytes).collect(),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_write(&self) -> bool {
|
fn should_write(&self) -> bool {
|
||||||
|
@ -46,10 +44,10 @@ impl<T: Containerable + EpeeValue> EpeeValue for ContainerAsBlob<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn epee_default_value() -> Option<Self> {
|
fn epee_default_value() -> Option<Self> {
|
||||||
Some(ContainerAsBlob(vec![]))
|
Some(Self(vec![]))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write<B: BufMut>(self, w: &mut B) -> crate::Result<()> {
|
fn write<B: BufMut>(self, w: &mut B) -> Result<()> {
|
||||||
let mut buf = BytesMut::with_capacity(self.0.len() * T::SIZE);
|
let mut buf = BytesMut::with_capacity(self.0.len() * T::SIZE);
|
||||||
self.0.iter().for_each(|tt| tt.push_bytes(&mut buf));
|
self.0.iter().for_each(|tt| tt.push_bytes(&mut buf));
|
||||||
buf.write(w)
|
buf.write(w)
|
||||||
|
|
|
@ -7,6 +7,7 @@ use core::{
|
||||||
pub type Result<T> = core::result::Result<T, Error>;
|
pub type Result<T> = core::result::Result<T, Error>;
|
||||||
|
|
||||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||||
|
#[expect(clippy::error_impl_error, reason = "FIXME: rename this type")]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[cfg_attr(feature = "std", error("IO error: {0}"))]
|
#[cfg_attr(feature = "std", error("IO error: {0}"))]
|
||||||
IO(&'static str),
|
IO(&'static str),
|
||||||
|
@ -17,19 +18,18 @@ pub enum Error {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
fn field_name(&self) -> &'static str {
|
const fn field_name(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
Error::IO(_) => "io",
|
Self::IO(_) => "io",
|
||||||
Error::Format(_) => "format",
|
Self::Format(_) => "format",
|
||||||
Error::Value(_) => "value",
|
Self::Value(_) => "value",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn field_data(&self) -> &str {
|
fn field_data(&self) -> &str {
|
||||||
match self {
|
match self {
|
||||||
Error::IO(data) => data,
|
Self::IO(data) | Self::Format(data) => data,
|
||||||
Error::Format(data) => data,
|
Self::Value(data) => data,
|
||||||
Error::Value(data) => data,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,12 +44,12 @@ impl Debug for Error {
|
||||||
|
|
||||||
impl From<TryFromIntError> for Error {
|
impl From<TryFromIntError> for Error {
|
||||||
fn from(_: TryFromIntError) -> Self {
|
fn from(_: TryFromIntError) -> Self {
|
||||||
Error::Value("Int is too large".to_string())
|
Self::Value("Int is too large".to_string())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Utf8Error> for Error {
|
impl From<Utf8Error> for Error {
|
||||||
fn from(_: Utf8Error) -> Self {
|
fn from(_: Utf8Error) -> Self {
|
||||||
Error::Value("Invalid utf8 str".to_string())
|
Self::Value("Invalid utf8 str".to_string())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ use bytes::{Buf, BufMut};
|
||||||
use crate::error::*;
|
use crate::error::*;
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn checked_read_primitive<B: Buf, R: Sized>(
|
pub(crate) fn checked_read_primitive<B: Buf, R: Sized>(
|
||||||
b: &mut B,
|
b: &mut B,
|
||||||
read: impl Fn(&mut B) -> R,
|
read: impl Fn(&mut B) -> R,
|
||||||
) -> Result<R> {
|
) -> Result<R> {
|
||||||
|
@ -11,16 +11,20 @@ pub fn checked_read_primitive<B: Buf, R: Sized>(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn checked_read<B: Buf, R>(b: &mut B, read: impl Fn(&mut B) -> R, size: usize) -> Result<R> {
|
pub(crate) fn checked_read<B: Buf, R>(
|
||||||
|
b: &mut B,
|
||||||
|
read: impl Fn(&mut B) -> R,
|
||||||
|
size: usize,
|
||||||
|
) -> Result<R> {
|
||||||
if b.remaining() < size {
|
if b.remaining() < size {
|
||||||
Err(Error::IO("Not enough bytes in buffer to build object."))?;
|
Err(Error::IO("Not enough bytes in buffer to build object."))
|
||||||
}
|
} else {
|
||||||
|
|
||||||
Ok(read(b))
|
Ok(read(b))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn checked_write_primitive<B: BufMut, T: Sized>(
|
pub(crate) fn checked_write_primitive<B: BufMut, T: Sized>(
|
||||||
b: &mut B,
|
b: &mut B,
|
||||||
write: impl Fn(&mut B, T),
|
write: impl Fn(&mut B, T),
|
||||||
t: T,
|
t: T,
|
||||||
|
@ -29,16 +33,16 @@ pub fn checked_write_primitive<B: BufMut, T: Sized>(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn checked_write<B: BufMut, T>(
|
pub(crate) fn checked_write<B: BufMut, T>(
|
||||||
b: &mut B,
|
b: &mut B,
|
||||||
write: impl Fn(&mut B, T),
|
write: impl Fn(&mut B, T),
|
||||||
t: T,
|
t: T,
|
||||||
size: usize,
|
size: usize,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
if b.remaining_mut() < size {
|
if b.remaining_mut() < size {
|
||||||
Err(Error::IO("Not enough capacity to write object."))?;
|
Err(Error::IO("Not enough capacity to write object."))
|
||||||
}
|
} else {
|
||||||
|
|
||||||
write(b, t);
|
write(b, t);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -59,9 +59,12 @@
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
use hex as _;
|
||||||
|
|
||||||
extern crate alloc;
|
extern crate alloc;
|
||||||
|
|
||||||
use core::{ops::Deref, str::from_utf8 as str_from_utf8};
|
use core::str::from_utf8 as str_from_utf8;
|
||||||
|
|
||||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||||
|
|
||||||
|
@ -130,7 +133,7 @@ pub fn to_bytes<T: EpeeObject>(val: T) -> Result<BytesMut> {
|
||||||
fn read_header<B: Buf>(r: &mut B) -> Result<()> {
|
fn read_header<B: Buf>(r: &mut B) -> Result<()> {
|
||||||
let buf = checked_read(r, |b: &mut B| b.copy_to_bytes(HEADER.len()), HEADER.len())?;
|
let buf = checked_read(r, |b: &mut B| b.copy_to_bytes(HEADER.len()), HEADER.len())?;
|
||||||
|
|
||||||
if buf.deref() != HEADER {
|
if &*buf != HEADER {
|
||||||
return Err(Error::Format("Data does not contain header"));
|
return Err(Error::Format("Data does not contain header"));
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -185,7 +188,7 @@ fn read_object<T: EpeeObject, B: Buf>(r: &mut B, skipped_objects: &mut u8) -> Re
|
||||||
|
|
||||||
for _ in 0..number_o_field {
|
for _ in 0..number_o_field {
|
||||||
let field_name_bytes = read_field_name_bytes(r)?;
|
let field_name_bytes = read_field_name_bytes(r)?;
|
||||||
let field_name = str_from_utf8(field_name_bytes.deref())?;
|
let field_name = str_from_utf8(&field_name_bytes)?;
|
||||||
|
|
||||||
if !object_builder.add_field(field_name, r)? {
|
if !object_builder.add_field(field_name, r)? {
|
||||||
skip_epee_value(r, skipped_objects)?;
|
skip_epee_value(r, skipped_objects)?;
|
||||||
|
@ -289,7 +292,7 @@ where
|
||||||
B: BufMut,
|
B: BufMut,
|
||||||
{
|
{
|
||||||
write_varint(usize_to_u64(iterator.len()), w)?;
|
write_varint(usize_to_u64(iterator.len()), w)?;
|
||||||
for item in iterator.into_iter() {
|
for item in iterator {
|
||||||
item.write(w)?;
|
item.write(w)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -329,10 +332,7 @@ impl EpeeObject for SkipObject {
|
||||||
fn skip_epee_value<B: Buf>(r: &mut B, skipped_objects: &mut u8) -> Result<()> {
|
fn skip_epee_value<B: Buf>(r: &mut B, skipped_objects: &mut u8) -> Result<()> {
|
||||||
let marker = read_marker(r)?;
|
let marker = read_marker(r)?;
|
||||||
|
|
||||||
let mut len = 1;
|
let len = if marker.is_seq { read_varint(r)? } else { 1 };
|
||||||
if marker.is_seq {
|
|
||||||
len = read_varint(r)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(size) = marker.inner_marker.size() {
|
if let Some(size) = marker.inner_marker.size() {
|
||||||
let bytes_to_skip = size
|
let bytes_to_skip = size
|
||||||
|
|
|
@ -19,13 +19,13 @@ pub enum InnerMarker {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl InnerMarker {
|
impl InnerMarker {
|
||||||
pub fn size(&self) -> Option<usize> {
|
pub const fn size(&self) -> Option<usize> {
|
||||||
Some(match self {
|
Some(match self {
|
||||||
InnerMarker::I64 | InnerMarker::U64 | InnerMarker::F64 => 8,
|
Self::I64 | Self::U64 | Self::F64 => 8,
|
||||||
InnerMarker::I32 | InnerMarker::U32 => 4,
|
Self::I32 | Self::U32 => 4,
|
||||||
InnerMarker::I16 | InnerMarker::U16 => 2,
|
Self::I16 | Self::U16 => 2,
|
||||||
InnerMarker::I8 | InnerMarker::U8 | InnerMarker::Bool => 1,
|
Self::I8 | Self::U8 | Self::Bool => 1,
|
||||||
InnerMarker::String | InnerMarker::Object => return None,
|
Self::String | Self::Object => return None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,23 +40,23 @@ pub struct Marker {
|
||||||
|
|
||||||
impl Marker {
|
impl Marker {
|
||||||
pub(crate) const fn new(inner_marker: InnerMarker) -> Self {
|
pub(crate) const fn new(inner_marker: InnerMarker) -> Self {
|
||||||
Marker {
|
Self {
|
||||||
inner_marker,
|
inner_marker,
|
||||||
is_seq: false,
|
is_seq: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
pub const fn into_seq(self) -> Self {
|
pub const fn into_seq(self) -> Self {
|
||||||
if self.is_seq {
|
assert!(!self.is_seq, "Sequence of sequence not allowed!");
|
||||||
panic!("Sequence of sequence not allowed!");
|
|
||||||
}
|
|
||||||
if matches!(self.inner_marker, InnerMarker::U8) {
|
if matches!(self.inner_marker, InnerMarker::U8) {
|
||||||
return Marker {
|
return Self {
|
||||||
inner_marker: InnerMarker::String,
|
inner_marker: InnerMarker::String,
|
||||||
is_seq: false,
|
is_seq: false,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
Marker {
|
Self {
|
||||||
inner_marker: self.inner_marker,
|
inner_marker: self.inner_marker,
|
||||||
is_seq: true,
|
is_seq: true,
|
||||||
}
|
}
|
||||||
|
@ -112,7 +112,7 @@ impl TryFrom<u8> for Marker {
|
||||||
_ => return Err(Error::Format("Unknown value Marker")),
|
_ => return Err(Error::Format("Unknown value Marker")),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Marker {
|
Ok(Self {
|
||||||
inner_marker,
|
inner_marker,
|
||||||
is_seq,
|
is_seq,
|
||||||
})
|
})
|
||||||
|
|
|
@ -71,7 +71,7 @@ impl<T: EpeeObject> EpeeValue for Vec<T> {
|
||||||
|
|
||||||
let individual_marker = Marker::new(marker.inner_marker);
|
let individual_marker = Marker::new(marker.inner_marker);
|
||||||
|
|
||||||
let mut res = Vec::with_capacity(len);
|
let mut res = Self::with_capacity(len);
|
||||||
for _ in 0..len {
|
for _ in 0..len {
|
||||||
res.push(T::read(r, &individual_marker)?);
|
res.push(T::read(r, &individual_marker)?);
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,7 @@ impl<T: EpeeObject> EpeeValue for Vec<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn epee_default_value() -> Option<Self> {
|
fn epee_default_value() -> Option<Self> {
|
||||||
Some(Vec::new())
|
Some(Self::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write<B: BufMut>(self, w: &mut B) -> Result<()> {
|
fn write<B: BufMut>(self, w: &mut B) -> Result<()> {
|
||||||
|
@ -181,7 +181,7 @@ impl EpeeValue for Vec<u8> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn epee_default_value() -> Option<Self> {
|
fn epee_default_value() -> Option<Self> {
|
||||||
Some(Vec::new())
|
Some(Self::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_write(&self) -> bool {
|
fn should_write(&self) -> bool {
|
||||||
|
@ -216,7 +216,7 @@ impl EpeeValue for Bytes {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn epee_default_value() -> Option<Self> {
|
fn epee_default_value() -> Option<Self> {
|
||||||
Some(Bytes::new())
|
Some(Self::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_write(&self) -> bool {
|
fn should_write(&self) -> bool {
|
||||||
|
@ -247,14 +247,14 @@ impl EpeeValue for BytesMut {
|
||||||
return Err(Error::IO("Not enough bytes to fill object"));
|
return Err(Error::IO("Not enough bytes to fill object"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut bytes = BytesMut::zeroed(len);
|
let mut bytes = Self::zeroed(len);
|
||||||
r.copy_to_slice(&mut bytes);
|
r.copy_to_slice(&mut bytes);
|
||||||
|
|
||||||
Ok(bytes)
|
Ok(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn epee_default_value() -> Option<Self> {
|
fn epee_default_value() -> Option<Self> {
|
||||||
Some(BytesMut::new())
|
Some(Self::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_write(&self) -> bool {
|
fn should_write(&self) -> bool {
|
||||||
|
@ -285,12 +285,11 @@ impl<const N: usize> EpeeValue for ByteArrayVec<N> {
|
||||||
return Err(Error::IO("Not enough bytes to fill object"));
|
return Err(Error::IO("Not enough bytes to fill object"));
|
||||||
}
|
}
|
||||||
|
|
||||||
ByteArrayVec::try_from(r.copy_to_bytes(len))
|
Self::try_from(r.copy_to_bytes(len)).map_err(|_| Error::Format("Field has invalid length"))
|
||||||
.map_err(|_| Error::Format("Field has invalid length"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn epee_default_value() -> Option<Self> {
|
fn epee_default_value() -> Option<Self> {
|
||||||
Some(ByteArrayVec::try_from(Bytes::new()).unwrap())
|
Some(Self::try_from(Bytes::new()).unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_write(&self) -> bool {
|
fn should_write(&self) -> bool {
|
||||||
|
@ -320,8 +319,7 @@ impl<const N: usize> EpeeValue for ByteArray<N> {
|
||||||
return Err(Error::IO("Not enough bytes to fill object"));
|
return Err(Error::IO("Not enough bytes to fill object"));
|
||||||
}
|
}
|
||||||
|
|
||||||
ByteArray::try_from(r.copy_to_bytes(N))
|
Self::try_from(r.copy_to_bytes(N)).map_err(|_| Error::Format("Field has invalid length"))
|
||||||
.map_err(|_| Error::Format("Field has invalid length"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write<B: BufMut>(self, w: &mut B) -> Result<()> {
|
fn write<B: BufMut>(self, w: &mut B) -> Result<()> {
|
||||||
|
@ -335,7 +333,7 @@ impl EpeeValue for String {
|
||||||
|
|
||||||
fn read<B: Buf>(r: &mut B, marker: &Marker) -> Result<Self> {
|
fn read<B: Buf>(r: &mut B, marker: &Marker) -> Result<Self> {
|
||||||
let bytes = Vec::<u8>::read(r, marker)?;
|
let bytes = Vec::<u8>::read(r, marker)?;
|
||||||
String::from_utf8(bytes).map_err(|_| Error::Format("Invalid string"))
|
Self::from_utf8(bytes).map_err(|_| Error::Format("Invalid string"))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_write(&self) -> bool {
|
fn should_write(&self) -> bool {
|
||||||
|
@ -343,7 +341,7 @@ impl EpeeValue for String {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn epee_default_value() -> Option<Self> {
|
fn epee_default_value() -> Option<Self> {
|
||||||
Some(String::new())
|
Some(Self::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write<B: BufMut>(self, w: &mut B) -> Result<()> {
|
fn write<B: BufMut>(self, w: &mut B) -> Result<()> {
|
||||||
|
@ -383,7 +381,7 @@ impl<const N: usize> EpeeValue for Vec<[u8; N]> {
|
||||||
|
|
||||||
let individual_marker = Marker::new(marker.inner_marker);
|
let individual_marker = Marker::new(marker.inner_marker);
|
||||||
|
|
||||||
let mut res = Vec::with_capacity(len);
|
let mut res = Self::with_capacity(len);
|
||||||
for _ in 0..len {
|
for _ in 0..len {
|
||||||
res.push(<[u8; N]>::read(r, &individual_marker)?);
|
res.push(<[u8; N]>::read(r, &individual_marker)?);
|
||||||
}
|
}
|
||||||
|
@ -395,7 +393,7 @@ impl<const N: usize> EpeeValue for Vec<[u8; N]> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn epee_default_value() -> Option<Self> {
|
fn epee_default_value() -> Option<Self> {
|
||||||
Some(Vec::new())
|
Some(Self::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write<B: BufMut>(self, w: &mut B) -> Result<()> {
|
fn write<B: BufMut>(self, w: &mut B) -> Result<()> {
|
||||||
|
|
|
@ -21,14 +21,14 @@ const FITS_IN_FOUR_BYTES: u64 = 2_u64.pow(32 - SIZE_OF_SIZE_MARKER) - 1;
|
||||||
/// ```
|
/// ```
|
||||||
pub fn read_varint<B: Buf>(r: &mut B) -> Result<u64> {
|
pub fn read_varint<B: Buf>(r: &mut B) -> Result<u64> {
|
||||||
if !r.has_remaining() {
|
if !r.has_remaining() {
|
||||||
Err(Error::IO("Not enough bytes to build VarInt"))?
|
return Err(Error::IO("Not enough bytes to build VarInt"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let vi_start = r.get_u8();
|
let vi_start = r.get_u8();
|
||||||
let len = 1 << (vi_start & 0b11);
|
let len = 1 << (vi_start & 0b11);
|
||||||
|
|
||||||
if r.remaining() < len - 1 {
|
if r.remaining() < len - 1 {
|
||||||
Err(Error::IO("Not enough bytes to build VarInt"))?
|
return Err(Error::IO("Not enough bytes to build VarInt"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut vi = u64::from(vi_start >> 2);
|
let mut vi = u64::from(vi_start >> 2);
|
||||||
|
@ -67,12 +67,15 @@ pub fn write_varint<B: BufMut>(number: u64, w: &mut B) -> Result<()> {
|
||||||
};
|
};
|
||||||
|
|
||||||
if w.remaining_mut() < 1 << size_marker {
|
if w.remaining_mut() < 1 << size_marker {
|
||||||
Err(Error::IO("Not enough capacity to write VarInt"))?;
|
return Err(Error::IO("Not enough capacity to write VarInt"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let number = (number << 2) | size_marker;
|
let number = (number << 2) | size_marker;
|
||||||
|
|
||||||
// Although `as` is unsafe we just checked the length.
|
#[expect(
|
||||||
|
clippy::cast_possible_truncation,
|
||||||
|
reason = "Although `as` is unsafe we just checked the length."
|
||||||
|
)]
|
||||||
match size_marker {
|
match size_marker {
|
||||||
0 => w.put_u8(number as u8),
|
0 => w.put_u8(number as u8),
|
||||||
1 => w.put_u16_le(number as u16),
|
1 => w.put_u16_le(number as u16),
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![expect(unused_crate_dependencies, reason = "outer test module")]
|
||||||
|
|
||||||
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
||||||
|
|
||||||
struct AltName {
|
struct AltName {
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![expect(unused_crate_dependencies, reason = "outer test module")]
|
||||||
|
|
||||||
use cuprate_epee_encoding::{epee_object, from_bytes};
|
use cuprate_epee_encoding::{epee_object, from_bytes};
|
||||||
|
|
||||||
struct T {
|
struct T {
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![expect(unused_crate_dependencies, reason = "outer test module")]
|
||||||
|
|
||||||
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
||||||
|
|
||||||
pub struct Optional {
|
pub struct Optional {
|
||||||
|
@ -58,7 +60,7 @@ fn epee_non_default_does_encode() {
|
||||||
|
|
||||||
let val: Optional = from_bytes(&mut bytes).unwrap();
|
let val: Optional = from_bytes(&mut bytes).unwrap();
|
||||||
assert_eq!(val.optional_val, -3);
|
assert_eq!(val.optional_val, -3);
|
||||||
assert_eq!(val.val, 8)
|
assert_eq!(val.val, 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -70,5 +72,5 @@ fn epee_value_not_present_with_default() {
|
||||||
|
|
||||||
let val: Optional = from_bytes(&mut bytes).unwrap();
|
let val: Optional = from_bytes(&mut bytes).unwrap();
|
||||||
assert_eq!(val.optional_val, -4);
|
assert_eq!(val.optional_val, -4);
|
||||||
assert_eq!(val.val, 76)
|
assert_eq!(val.val, 76);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![expect(unused_crate_dependencies, reason = "outer test module")]
|
||||||
|
|
||||||
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
||||||
|
|
||||||
struct Child {
|
struct Child {
|
||||||
|
@ -37,6 +39,7 @@ epee_object!(
|
||||||
);
|
);
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
#[expect(clippy::float_cmp)]
|
||||||
fn epee_flatten() {
|
fn epee_flatten() {
|
||||||
let val2 = ParentChild {
|
let val2 = ParentChild {
|
||||||
h: 38.9,
|
h: 38.9,
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
|
#![expect(unused_crate_dependencies, reason = "outer test module")]
|
||||||
|
|
||||||
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
||||||
use std::ops::Deref;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct T {
|
struct T {
|
||||||
|
@ -28,6 +29,6 @@ fn optional_val_in_data() {
|
||||||
];
|
];
|
||||||
let t: T = from_bytes(&mut &bytes[..]).unwrap();
|
let t: T = from_bytes(&mut &bytes[..]).unwrap();
|
||||||
let bytes2 = to_bytes(t.clone()).unwrap();
|
let bytes2 = to_bytes(t.clone()).unwrap();
|
||||||
assert_eq!(bytes.as_slice(), bytes2.deref());
|
assert_eq!(bytes.as_slice(), &*bytes2);
|
||||||
assert_eq!(t.val.unwrap(), 21);
|
assert_eq!(t.val.unwrap(), 21);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![expect(unused_crate_dependencies, reason = "outer test module")]
|
||||||
|
|
||||||
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
||||||
|
|
||||||
#[derive(Eq, PartialEq, Debug, Clone)]
|
#[derive(Eq, PartialEq, Debug, Clone)]
|
||||||
|
@ -5,7 +7,7 @@ pub struct SupportFlags(u32);
|
||||||
|
|
||||||
impl From<u32> for SupportFlags {
|
impl From<u32> for SupportFlags {
|
||||||
fn from(value: u32) -> Self {
|
fn from(value: u32) -> Self {
|
||||||
SupportFlags(value)
|
Self(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![expect(unused_crate_dependencies, reason = "outer test module")]
|
||||||
|
|
||||||
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
use cuprate_epee_encoding::{epee_object, from_bytes, to_bytes};
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![expect(unused_crate_dependencies, reason = "outer test module")]
|
||||||
|
|
||||||
use cuprate_epee_encoding::{epee_object, from_bytes};
|
use cuprate_epee_encoding::{epee_object, from_bytes};
|
||||||
|
|
||||||
struct ObjSeq {
|
struct ObjSeq {
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#![expect(unused_crate_dependencies, reason = "outer test module")]
|
||||||
|
|
||||||
use cuprate_epee_encoding::{epee_object, from_bytes};
|
use cuprate_epee_encoding::{epee_object, from_bytes};
|
||||||
|
|
||||||
struct D {
|
struct D {
|
||||||
|
@ -737,5 +739,5 @@ fn stack_overflow() {
|
||||||
|
|
||||||
let obj: Result<Q, _> = from_bytes(&mut bytes.as_slice());
|
let obj: Result<Q, _> = from_bytes(&mut bytes.as_slice());
|
||||||
|
|
||||||
assert!(obj.is_err())
|
assert!(obj.is_err());
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,3 +17,6 @@ serde = { workspace = true, features = ["derive"], optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
serde_json = { workspace = true, features = ["std"] }
|
serde_json = { workspace = true, features = ["std"] }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
|
@ -22,17 +22,15 @@ pub enum FixedByteError {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FixedByteError {
|
impl FixedByteError {
|
||||||
fn field_name(&self) -> &'static str {
|
const fn field_name(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
FixedByteError::InvalidLength => "input",
|
Self::InvalidLength => "input",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn field_data(&self) -> &'static str {
|
const fn field_data(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
FixedByteError::InvalidLength => {
|
Self::InvalidLength => "Cannot create fix byte array, input has invalid length.",
|
||||||
"Cannot create fix byte array, input has invalid length."
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -82,7 +80,7 @@ impl<const N: usize> ByteArray<N> {
|
||||||
|
|
||||||
impl<const N: usize> From<[u8; N]> for ByteArray<N> {
|
impl<const N: usize> From<[u8; N]> for ByteArray<N> {
|
||||||
fn from(value: [u8; N]) -> Self {
|
fn from(value: [u8; N]) -> Self {
|
||||||
ByteArray(Bytes::copy_from_slice(&value))
|
Self(Bytes::copy_from_slice(&value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,7 +99,7 @@ impl<const N: usize> TryFrom<Bytes> for ByteArray<N> {
|
||||||
if value.len() != N {
|
if value.len() != N {
|
||||||
return Err(FixedByteError::InvalidLength);
|
return Err(FixedByteError::InvalidLength);
|
||||||
}
|
}
|
||||||
Ok(ByteArray(value))
|
Ok(Self(value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,7 +110,7 @@ impl<const N: usize> TryFrom<Vec<u8>> for ByteArray<N> {
|
||||||
if value.len() != N {
|
if value.len() != N {
|
||||||
return Err(FixedByteError::InvalidLength);
|
return Err(FixedByteError::InvalidLength);
|
||||||
}
|
}
|
||||||
Ok(ByteArray(Bytes::from(value)))
|
Ok(Self(Bytes::from(value)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,11 +140,11 @@ impl<'de, const N: usize> Deserialize<'de> for ByteArrayVec<N> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<const N: usize> ByteArrayVec<N> {
|
impl<const N: usize> ByteArrayVec<N> {
|
||||||
pub fn len(&self) -> usize {
|
pub const fn len(&self) -> usize {
|
||||||
self.0.len() / N
|
self.0.len() / N
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_empty(&self) -> bool {
|
pub const fn is_empty(&self) -> bool {
|
||||||
self.len() == 0
|
self.len() == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,6 +160,7 @@ impl<const N: usize> ByteArrayVec<N> {
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// Panics if at > len.
|
/// Panics if at > len.
|
||||||
|
#[must_use]
|
||||||
pub fn split_off(&mut self, at: usize) -> Self {
|
pub fn split_off(&mut self, at: usize) -> Self {
|
||||||
Self(self.0.split_off(at * N))
|
Self(self.0.split_off(at * N))
|
||||||
}
|
}
|
||||||
|
@ -169,9 +168,9 @@ impl<const N: usize> ByteArrayVec<N> {
|
||||||
|
|
||||||
impl<const N: usize> From<&ByteArrayVec<N>> for Vec<[u8; N]> {
|
impl<const N: usize> From<&ByteArrayVec<N>> for Vec<[u8; N]> {
|
||||||
fn from(value: &ByteArrayVec<N>) -> Self {
|
fn from(value: &ByteArrayVec<N>) -> Self {
|
||||||
let mut out = Vec::with_capacity(value.len());
|
let mut out = Self::with_capacity(value.len());
|
||||||
for i in 0..value.len() {
|
for i in 0..value.len() {
|
||||||
out.push(value[i])
|
out.push(value[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
out
|
out
|
||||||
|
@ -181,11 +180,11 @@ impl<const N: usize> From<&ByteArrayVec<N>> for Vec<[u8; N]> {
|
||||||
impl<const N: usize> From<Vec<[u8; N]>> for ByteArrayVec<N> {
|
impl<const N: usize> From<Vec<[u8; N]>> for ByteArrayVec<N> {
|
||||||
fn from(value: Vec<[u8; N]>) -> Self {
|
fn from(value: Vec<[u8; N]>) -> Self {
|
||||||
let mut bytes = BytesMut::with_capacity(N * value.len());
|
let mut bytes = BytesMut::with_capacity(N * value.len());
|
||||||
for i in value.into_iter() {
|
for i in value {
|
||||||
bytes.extend_from_slice(&i)
|
bytes.extend_from_slice(&i);
|
||||||
}
|
}
|
||||||
|
|
||||||
ByteArrayVec(bytes.freeze())
|
Self(bytes.freeze())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,13 +196,13 @@ impl<const N: usize> TryFrom<Bytes> for ByteArrayVec<N> {
|
||||||
return Err(FixedByteError::InvalidLength);
|
return Err(FixedByteError::InvalidLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(ByteArrayVec(value))
|
Ok(Self(value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<const N: usize> From<[u8; N]> for ByteArrayVec<N> {
|
impl<const N: usize> From<[u8; N]> for ByteArrayVec<N> {
|
||||||
fn from(value: [u8; N]) -> Self {
|
fn from(value: [u8; N]) -> Self {
|
||||||
ByteArrayVec(Bytes::copy_from_slice(value.as_slice()))
|
Self(Bytes::copy_from_slice(value.as_slice()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,11 +210,11 @@ impl<const N: usize, const LEN: usize> From<[[u8; N]; LEN]> for ByteArrayVec<N>
|
||||||
fn from(value: [[u8; N]; LEN]) -> Self {
|
fn from(value: [[u8; N]; LEN]) -> Self {
|
||||||
let mut bytes = BytesMut::with_capacity(N * LEN);
|
let mut bytes = BytesMut::with_capacity(N * LEN);
|
||||||
|
|
||||||
for val in value.into_iter() {
|
for val in value {
|
||||||
bytes.put_slice(val.as_slice());
|
bytes.put_slice(val.as_slice());
|
||||||
}
|
}
|
||||||
|
|
||||||
ByteArrayVec(bytes.freeze())
|
Self(bytes.freeze())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,7 +226,7 @@ impl<const N: usize> TryFrom<Vec<u8>> for ByteArrayVec<N> {
|
||||||
return Err(FixedByteError::InvalidLength);
|
return Err(FixedByteError::InvalidLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(ByteArrayVec(Bytes::from(value)))
|
Ok(Self(Bytes::from(value)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,9 +234,12 @@ impl<const N: usize> Index<usize> for ByteArrayVec<N> {
|
||||||
type Output = [u8; N];
|
type Output = [u8; N];
|
||||||
|
|
||||||
fn index(&self, index: usize) -> &Self::Output {
|
fn index(&self, index: usize) -> &Self::Output {
|
||||||
if (index + 1) * N > self.0.len() {
|
assert!(
|
||||||
panic!("Index out of range, idx: {}, length: {}", index, self.len());
|
(index + 1) * N <= self.0.len(),
|
||||||
}
|
"Index out of range, idx: {}, length: {}",
|
||||||
|
index,
|
||||||
|
self.len()
|
||||||
|
);
|
||||||
|
|
||||||
self.0[index * N..(index + 1) * N]
|
self.0[index * N..(index + 1) * N]
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
|
|
@ -14,6 +14,7 @@ tracing = ["dep:tracing", "tokio-util/tracing"]
|
||||||
[dependencies]
|
[dependencies]
|
||||||
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
|
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
|
||||||
|
|
||||||
|
cfg-if = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
bytes = { workspace = true, features = ["std"] }
|
bytes = { workspace = true, features = ["std"] }
|
||||||
bitflags = { workspace = true }
|
bitflags = { workspace = true }
|
||||||
|
@ -27,3 +28,6 @@ rand = { workspace = true, features = ["std", "std_rng"] }
|
||||||
tokio-util = { workspace = true, features = ["io-util"]}
|
tokio-util = { workspace = true, features = ["io-util"]}
|
||||||
tokio = { workspace = true, features = ["full"] }
|
tokio = { workspace = true, features = ["full"] }
|
||||||
futures = { workspace = true, features = ["std"] }
|
futures = { workspace = true, features = ["std"] }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
|
@ -47,7 +47,7 @@ pub struct LevinBucketCodec<C> {
|
||||||
|
|
||||||
impl<C> Default for LevinBucketCodec<C> {
|
impl<C> Default for LevinBucketCodec<C> {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
LevinBucketCodec {
|
Self {
|
||||||
state: LevinBucketState::WaitingForHeader,
|
state: LevinBucketState::WaitingForHeader,
|
||||||
protocol: Protocol::default(),
|
protocol: Protocol::default(),
|
||||||
handshake_message_seen: false,
|
handshake_message_seen: false,
|
||||||
|
@ -56,8 +56,8 @@ impl<C> Default for LevinBucketCodec<C> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C> LevinBucketCodec<C> {
|
impl<C> LevinBucketCodec<C> {
|
||||||
pub fn new(protocol: Protocol) -> Self {
|
pub const fn new(protocol: Protocol) -> Self {
|
||||||
LevinBucketCodec {
|
Self {
|
||||||
state: LevinBucketState::WaitingForHeader,
|
state: LevinBucketState::WaitingForHeader,
|
||||||
protocol,
|
protocol,
|
||||||
handshake_message_seen: false,
|
handshake_message_seen: false,
|
||||||
|
@ -112,8 +112,10 @@ impl<C: LevinCommand + Debug> Decoder for LevinBucketCodec<C> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let _ =
|
drop(std::mem::replace(
|
||||||
std::mem::replace(&mut self.state, LevinBucketState::WaitingForBody(head));
|
&mut self.state,
|
||||||
|
LevinBucketState::WaitingForBody(head),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
LevinBucketState::WaitingForBody(head) => {
|
LevinBucketState::WaitingForBody(head) => {
|
||||||
let body_len = u64_to_usize(head.size);
|
let body_len = u64_to_usize(head.size);
|
||||||
|
@ -145,7 +147,7 @@ impl<C: LevinCommand> Encoder<Bucket<C>> for LevinBucketCodec<C> {
|
||||||
type Error = BucketError;
|
type Error = BucketError;
|
||||||
fn encode(&mut self, item: Bucket<C>, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
fn encode(&mut self, item: Bucket<C>, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||||
if let Some(additional) = (HEADER_SIZE + item.body.len()).checked_sub(dst.capacity()) {
|
if let Some(additional) = (HEADER_SIZE + item.body.len()).checked_sub(dst.capacity()) {
|
||||||
dst.reserve(additional)
|
dst.reserve(additional);
|
||||||
}
|
}
|
||||||
|
|
||||||
item.header.write_bytes_into(dst);
|
item.header.write_bytes_into(dst);
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
// copies or substantial portions of the Software.
|
// copies or substantial portions of the Software.
|
||||||
//
|
//
|
||||||
|
|
||||||
//! This module provides a struct BucketHead for the header of a levin protocol
|
//! This module provides a struct `BucketHead` for the header of a levin protocol
|
||||||
//! message.
|
//! message.
|
||||||
|
|
||||||
use bitflags::bitflags;
|
use bitflags::bitflags;
|
||||||
|
@ -62,7 +62,7 @@ bitflags! {
|
||||||
|
|
||||||
impl From<u32> for Flags {
|
impl From<u32> for Flags {
|
||||||
fn from(value: u32) -> Self {
|
fn from(value: u32) -> Self {
|
||||||
Flags(value)
|
Self(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,9 +99,9 @@ impl<C: LevinCommand> BucketHead<C> {
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
/// This function will panic if there aren't enough bytes to fill the header.
|
/// This function will panic if there aren't enough bytes to fill the header.
|
||||||
/// Currently [HEADER_SIZE]
|
/// Currently [`HEADER_SIZE`]
|
||||||
pub fn from_bytes(buf: &mut BytesMut) -> BucketHead<C> {
|
pub fn from_bytes(buf: &mut BytesMut) -> Self {
|
||||||
BucketHead {
|
Self {
|
||||||
signature: buf.get_u64_le(),
|
signature: buf.get_u64_le(),
|
||||||
size: buf.get_u64_le(),
|
size: buf.get_u64_le(),
|
||||||
have_to_return_data: buf.get_u8() != 0,
|
have_to_return_data: buf.get_u8() != 0,
|
||||||
|
|
|
@ -33,6 +33,16 @@
|
||||||
#![deny(unused_mut)]
|
#![deny(unused_mut)]
|
||||||
//#![deny(missing_docs)]
|
//#![deny(missing_docs)]
|
||||||
|
|
||||||
|
cfg_if::cfg_if! {
|
||||||
|
// Used in `tests/`.
|
||||||
|
if #[cfg(test)] {
|
||||||
|
use futures as _;
|
||||||
|
use proptest as _;
|
||||||
|
use rand as _;
|
||||||
|
use tokio as _;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
use bytes::{Buf, Bytes};
|
use bytes::{Buf, Bytes};
|
||||||
|
@ -99,7 +109,7 @@ pub struct Protocol {
|
||||||
|
|
||||||
impl Default for Protocol {
|
impl Default for Protocol {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Protocol {
|
Self {
|
||||||
version: MONERO_PROTOCOL_VERSION,
|
version: MONERO_PROTOCOL_VERSION,
|
||||||
signature: MONERO_LEVIN_SIGNATURE,
|
signature: MONERO_LEVIN_SIGNATURE,
|
||||||
max_packet_size_before_handshake: MONERO_MAX_PACKET_SIZE_BEFORE_HANDSHAKE,
|
max_packet_size_before_handshake: MONERO_MAX_PACKET_SIZE_BEFORE_HANDSHAKE,
|
||||||
|
@ -130,22 +140,22 @@ pub enum MessageType {
|
||||||
|
|
||||||
impl MessageType {
|
impl MessageType {
|
||||||
/// Returns if the message requires a response
|
/// Returns if the message requires a response
|
||||||
pub fn have_to_return_data(&self) -> bool {
|
pub const fn have_to_return_data(&self) -> bool {
|
||||||
match self {
|
match self {
|
||||||
MessageType::Request => true,
|
Self::Request => true,
|
||||||
MessageType::Response | MessageType::Notification => false,
|
Self::Response | Self::Notification => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `MessageType` given the flags and have_to_return_data fields
|
/// Returns the `MessageType` given the flags and `have_to_return_data` fields
|
||||||
pub fn from_flags_and_have_to_return(
|
pub const fn from_flags_and_have_to_return(
|
||||||
flags: Flags,
|
flags: Flags,
|
||||||
have_to_return: bool,
|
have_to_return: bool,
|
||||||
) -> Result<Self, BucketError> {
|
) -> Result<Self, BucketError> {
|
||||||
Ok(match (flags, have_to_return) {
|
Ok(match (flags, have_to_return) {
|
||||||
(Flags::REQUEST, true) => MessageType::Request,
|
(Flags::REQUEST, true) => Self::Request,
|
||||||
(Flags::REQUEST, false) => MessageType::Notification,
|
(Flags::REQUEST, false) => Self::Notification,
|
||||||
(Flags::RESPONSE, false) => MessageType::Response,
|
(Flags::RESPONSE, false) => Self::Response,
|
||||||
_ => {
|
_ => {
|
||||||
return Err(BucketError::InvalidHeaderFlags(
|
return Err(BucketError::InvalidHeaderFlags(
|
||||||
"Unable to assign a message type to this bucket",
|
"Unable to assign a message type to this bucket",
|
||||||
|
@ -154,10 +164,10 @@ impl MessageType {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn as_flags(&self) -> header::Flags {
|
pub const fn as_flags(&self) -> Flags {
|
||||||
match self {
|
match self {
|
||||||
MessageType::Request | MessageType::Notification => header::Flags::REQUEST,
|
Self::Request | Self::Notification => Flags::REQUEST,
|
||||||
MessageType::Response => header::Flags::RESPONSE,
|
Self::Response => Flags::RESPONSE,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -173,7 +183,7 @@ pub struct BucketBuilder<C> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: LevinCommand> BucketBuilder<C> {
|
impl<C: LevinCommand> BucketBuilder<C> {
|
||||||
pub fn new(protocol: &Protocol) -> Self {
|
pub const fn new(protocol: &Protocol) -> Self {
|
||||||
Self {
|
Self {
|
||||||
signature: Some(protocol.signature),
|
signature: Some(protocol.signature),
|
||||||
ty: None,
|
ty: None,
|
||||||
|
@ -185,27 +195,27 @@ impl<C: LevinCommand> BucketBuilder<C> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_signature(&mut self, sig: u64) {
|
pub fn set_signature(&mut self, sig: u64) {
|
||||||
self.signature = Some(sig)
|
self.signature = Some(sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_message_type(&mut self, ty: MessageType) {
|
pub fn set_message_type(&mut self, ty: MessageType) {
|
||||||
self.ty = Some(ty)
|
self.ty = Some(ty);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_command(&mut self, command: C) {
|
pub fn set_command(&mut self, command: C) {
|
||||||
self.command = Some(command)
|
self.command = Some(command);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_return_code(&mut self, code: i32) {
|
pub fn set_return_code(&mut self, code: i32) {
|
||||||
self.return_code = Some(code)
|
self.return_code = Some(code);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_protocol_version(&mut self, version: u32) {
|
pub fn set_protocol_version(&mut self, version: u32) {
|
||||||
self.protocol_version = Some(version)
|
self.protocol_version = Some(version);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_body(&mut self, body: Bytes) {
|
pub fn set_body(&mut self, body: Bytes) {
|
||||||
self.body = Some(body)
|
self.body = Some(body);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn finish(self) -> Bucket<C> {
|
pub fn finish(self) -> Bucket<C> {
|
||||||
|
|
|
@ -33,13 +33,13 @@ pub enum LevinMessage<T: LevinBody> {
|
||||||
|
|
||||||
impl<T: LevinBody> From<T> for LevinMessage<T> {
|
impl<T: LevinBody> From<T> for LevinMessage<T> {
|
||||||
fn from(value: T) -> Self {
|
fn from(value: T) -> Self {
|
||||||
LevinMessage::Body(value)
|
Self::Body(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: LevinBody> From<Bucket<T::Command>> for LevinMessage<T> {
|
impl<T: LevinBody> From<Bucket<T::Command>> for LevinMessage<T> {
|
||||||
fn from(value: Bucket<T::Command>) -> Self {
|
fn from(value: Bucket<T::Command>) -> Self {
|
||||||
LevinMessage::Bucket(value)
|
Self::Bucket(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ pub struct Dummy(pub usize);
|
||||||
|
|
||||||
impl<T: LevinBody> From<Dummy> for LevinMessage<T> {
|
impl<T: LevinBody> From<Dummy> for LevinMessage<T> {
|
||||||
fn from(value: Dummy) -> Self {
|
fn from(value: Dummy) -> Self {
|
||||||
LevinMessage::Dummy(value.0)
|
Self::Dummy(value.0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,12 +76,11 @@ pub fn make_fragmented_messages<T: LevinBody>(
|
||||||
fragment_size: usize,
|
fragment_size: usize,
|
||||||
message: T,
|
message: T,
|
||||||
) -> Result<Vec<Bucket<T::Command>>, BucketError> {
|
) -> Result<Vec<Bucket<T::Command>>, BucketError> {
|
||||||
if fragment_size * 2 < HEADER_SIZE {
|
assert!(
|
||||||
panic!(
|
fragment_size * 2 >= HEADER_SIZE,
|
||||||
"Fragment size: {fragment_size}, is too small, must be at least {}",
|
"Fragment size: {fragment_size}, is too small, must be at least {}",
|
||||||
2 * HEADER_SIZE
|
2 * HEADER_SIZE
|
||||||
);
|
);
|
||||||
}
|
|
||||||
|
|
||||||
let mut builder = BucketBuilder::new(protocol);
|
let mut builder = BucketBuilder::new(protocol);
|
||||||
message.encode(&mut builder)?;
|
message.encode(&mut builder)?;
|
||||||
|
|
|
@ -1,3 +1,9 @@
|
||||||
|
#![expect(
|
||||||
|
clippy::tests_outside_test_module,
|
||||||
|
unused_crate_dependencies,
|
||||||
|
reason = "outer test module"
|
||||||
|
)]
|
||||||
|
|
||||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||||
use futures::{SinkExt, StreamExt};
|
use futures::{SinkExt, StreamExt};
|
||||||
use proptest::{prelude::any_with, prop_assert_eq, proptest, sample::size_range};
|
use proptest::{prelude::any_with, prop_assert_eq, proptest, sample::size_range};
|
||||||
|
@ -58,12 +64,12 @@ impl LevinBody for TestBody {
|
||||||
) -> Result<Self, BucketError> {
|
) -> Result<Self, BucketError> {
|
||||||
let size = u64_to_usize(body.get_u64_le());
|
let size = u64_to_usize(body.get_u64_le());
|
||||||
// bucket
|
// bucket
|
||||||
Ok(TestBody::Bytes(size, body.copy_to_bytes(size)))
|
Ok(Self::Bytes(size, body.copy_to_bytes(size)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode(self, builder: &mut BucketBuilder<Self::Command>) -> Result<(), BucketError> {
|
fn encode(self, builder: &mut BucketBuilder<Self::Command>) -> Result<(), BucketError> {
|
||||||
match self {
|
match self {
|
||||||
TestBody::Bytes(len, bytes) => {
|
Self::Bytes(len, bytes) => {
|
||||||
let mut buf = BytesMut::new();
|
let mut buf = BytesMut::new();
|
||||||
buf.put_u64_le(len as u64);
|
buf.put_u64_le(len as u64);
|
||||||
buf.extend_from_slice(bytes.as_ref());
|
buf.extend_from_slice(bytes.as_ref());
|
||||||
|
@ -141,12 +147,12 @@ proptest! {
|
||||||
message2.extend_from_slice(&fragments[0].body[(33 + 8)..]);
|
message2.extend_from_slice(&fragments[0].body[(33 + 8)..]);
|
||||||
|
|
||||||
for frag in fragments.iter().skip(1) {
|
for frag in fragments.iter().skip(1) {
|
||||||
message2.extend_from_slice(frag.body.as_ref())
|
message2.extend_from_slice(frag.body.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
prop_assert_eq!(message.as_slice(), &message2[0..message.len()], "numb_fragments: {}", fragments.len());
|
prop_assert_eq!(message.as_slice(), &message2[0..message.len()], "numb_fragments: {}", fragments.len());
|
||||||
|
|
||||||
for byte in message2[message.len()..].iter(){
|
for byte in &message2[message.len()..]{
|
||||||
prop_assert_eq!(*byte, 0);
|
prop_assert_eq!(*byte, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ cuprate-levin = { path = "../levin" }
|
||||||
cuprate-epee-encoding = { path = "../epee-encoding" }
|
cuprate-epee-encoding = { path = "../epee-encoding" }
|
||||||
cuprate-fixed-bytes = { path = "../fixed-bytes" }
|
cuprate-fixed-bytes = { path = "../fixed-bytes" }
|
||||||
cuprate-types = { path = "../../types", default-features = false, features = ["epee"] }
|
cuprate-types = { path = "../../types", default-features = false, features = ["epee"] }
|
||||||
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
|
cuprate-helper = { path = "../../helper", default-features = false, features = ["map"] }
|
||||||
|
|
||||||
bitflags = { workspace = true, features = ["std"] }
|
bitflags = { workspace = true, features = ["std"] }
|
||||||
bytes = { workspace = true, features = ["std"] }
|
bytes = { workspace = true, features = ["std"] }
|
||||||
|
@ -24,3 +24,5 @@ thiserror = { workspace = true }
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hex = { workspace = true, features = ["std"]}
|
hex = { workspace = true, features = ["std"]}
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
|
@ -51,38 +51,38 @@ impl EpeeObject for NetworkAddress {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NetworkAddress {
|
impl NetworkAddress {
|
||||||
pub fn get_zone(&self) -> NetZone {
|
pub const fn get_zone(&self) -> NetZone {
|
||||||
match self {
|
match self {
|
||||||
NetworkAddress::Clear(_) => NetZone::Public,
|
Self::Clear(_) => NetZone::Public,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_loopback(&self) -> bool {
|
pub const fn is_loopback(&self) -> bool {
|
||||||
// TODO
|
// TODO
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_local(&self) -> bool {
|
pub const fn is_local(&self) -> bool {
|
||||||
// TODO
|
// TODO
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn port(&self) -> u16 {
|
pub const fn port(&self) -> u16 {
|
||||||
match self {
|
match self {
|
||||||
NetworkAddress::Clear(ip) => ip.port(),
|
Self::Clear(ip) => ip.port(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<net::SocketAddrV4> for NetworkAddress {
|
impl From<net::SocketAddrV4> for NetworkAddress {
|
||||||
fn from(value: net::SocketAddrV4) -> Self {
|
fn from(value: net::SocketAddrV4) -> Self {
|
||||||
NetworkAddress::Clear(value.into())
|
Self::Clear(value.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<net::SocketAddrV6> for NetworkAddress {
|
impl From<net::SocketAddrV6> for NetworkAddress {
|
||||||
fn from(value: net::SocketAddrV6) -> Self {
|
fn from(value: net::SocketAddrV6) -> Self {
|
||||||
NetworkAddress::Clear(value.into())
|
Self::Clear(value.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ impl From<NetworkAddress> for TaggedNetworkAddress {
|
||||||
fn from(value: NetworkAddress) -> Self {
|
fn from(value: NetworkAddress) -> Self {
|
||||||
match value {
|
match value {
|
||||||
NetworkAddress::Clear(addr) => match addr {
|
NetworkAddress::Clear(addr) => match addr {
|
||||||
SocketAddr::V4(addr) => TaggedNetworkAddress {
|
SocketAddr::V4(addr) => Self {
|
||||||
ty: Some(1),
|
ty: Some(1),
|
||||||
addr: Some(AllFieldsNetworkAddress {
|
addr: Some(AllFieldsNetworkAddress {
|
||||||
m_ip: Some(u32::from_be_bytes(addr.ip().octets())),
|
m_ip: Some(u32::from_be_bytes(addr.ip().octets())),
|
||||||
|
@ -82,7 +82,7 @@ impl From<NetworkAddress> for TaggedNetworkAddress {
|
||||||
addr: None,
|
addr: None,
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
SocketAddr::V6(addr) => TaggedNetworkAddress {
|
SocketAddr::V6(addr) => Self {
|
||||||
ty: Some(2),
|
ty: Some(2),
|
||||||
addr: Some(AllFieldsNetworkAddress {
|
addr: Some(AllFieldsNetworkAddress {
|
||||||
addr: Some(addr.ip().octets()),
|
addr: Some(addr.ip().octets()),
|
||||||
|
|
|
@ -55,27 +55,27 @@ pub enum LevinCommand {
|
||||||
|
|
||||||
impl std::fmt::Display for LevinCommand {
|
impl std::fmt::Display for LevinCommand {
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||||
if let LevinCommand::Unknown(id) = self {
|
if let Self::Unknown(id) = self {
|
||||||
return f.write_str(&format!("unknown id: {}", id));
|
return f.write_str(&format!("unknown id: {id}"));
|
||||||
}
|
}
|
||||||
|
|
||||||
f.write_str(match self {
|
f.write_str(match self {
|
||||||
LevinCommand::Handshake => "handshake",
|
Self::Handshake => "handshake",
|
||||||
LevinCommand::TimedSync => "timed sync",
|
Self::TimedSync => "timed sync",
|
||||||
LevinCommand::Ping => "ping",
|
Self::Ping => "ping",
|
||||||
LevinCommand::SupportFlags => "support flags",
|
Self::SupportFlags => "support flags",
|
||||||
|
|
||||||
LevinCommand::NewBlock => "new block",
|
Self::NewBlock => "new block",
|
||||||
LevinCommand::NewTransactions => "new transactions",
|
Self::NewTransactions => "new transactions",
|
||||||
LevinCommand::GetObjectsRequest => "get objects request",
|
Self::GetObjectsRequest => "get objects request",
|
||||||
LevinCommand::GetObjectsResponse => "get objects response",
|
Self::GetObjectsResponse => "get objects response",
|
||||||
LevinCommand::ChainRequest => "chain request",
|
Self::ChainRequest => "chain request",
|
||||||
LevinCommand::ChainResponse => "chain response",
|
Self::ChainResponse => "chain response",
|
||||||
LevinCommand::NewFluffyBlock => "new fluffy block",
|
Self::NewFluffyBlock => "new fluffy block",
|
||||||
LevinCommand::FluffyMissingTxsRequest => "fluffy missing transaction request",
|
Self::FluffyMissingTxsRequest => "fluffy missing transaction request",
|
||||||
LevinCommand::GetTxPoolCompliment => "get transaction pool compliment",
|
Self::GetTxPoolCompliment => "get transaction pool compliment",
|
||||||
|
|
||||||
LevinCommand::Unknown(_) => unreachable!(),
|
Self::Unknown(_) => unreachable!(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -83,50 +83,51 @@ impl std::fmt::Display for LevinCommand {
|
||||||
impl LevinCommandTrait for LevinCommand {
|
impl LevinCommandTrait for LevinCommand {
|
||||||
fn bucket_size_limit(&self) -> u64 {
|
fn bucket_size_limit(&self) -> u64 {
|
||||||
// https://github.com/monero-project/monero/blob/00fd416a99686f0956361d1cd0337fe56e58d4a7/src/cryptonote_basic/connection_context.cpp#L37
|
// https://github.com/monero-project/monero/blob/00fd416a99686f0956361d1cd0337fe56e58d4a7/src/cryptonote_basic/connection_context.cpp#L37
|
||||||
|
#[expect(clippy::match_same_arms, reason = "formatting is more clear")]
|
||||||
match self {
|
match self {
|
||||||
LevinCommand::Handshake => 65536,
|
Self::Handshake => 65536,
|
||||||
LevinCommand::TimedSync => 65536,
|
Self::TimedSync => 65536,
|
||||||
LevinCommand::Ping => 4096,
|
Self::Ping => 4096,
|
||||||
LevinCommand::SupportFlags => 4096,
|
Self::SupportFlags => 4096,
|
||||||
|
|
||||||
LevinCommand::NewBlock => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though)
|
Self::NewBlock => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though)
|
||||||
LevinCommand::NewTransactions => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though)
|
Self::NewTransactions => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though)
|
||||||
LevinCommand::GetObjectsRequest => 1024 * 1024 * 2, // 2 MB
|
Self::GetObjectsRequest => 1024 * 1024 * 2, // 2 MB
|
||||||
LevinCommand::GetObjectsResponse => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though)
|
Self::GetObjectsResponse => 1024 * 1024 * 128, // 128 MB (max packet is a bit less than 100 MB though)
|
||||||
LevinCommand::ChainRequest => 512 * 1024, // 512 kB
|
Self::ChainRequest => 512 * 1024, // 512 kB
|
||||||
LevinCommand::ChainResponse => 1024 * 1024 * 4, // 4 MB
|
Self::ChainResponse => 1024 * 1024 * 4, // 4 MB
|
||||||
LevinCommand::NewFluffyBlock => 1024 * 1024 * 4, // 4 MB
|
Self::NewFluffyBlock => 1024 * 1024 * 4, // 4 MB
|
||||||
LevinCommand::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB
|
Self::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB
|
||||||
LevinCommand::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB
|
Self::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB
|
||||||
|
|
||||||
LevinCommand::Unknown(_) => u64::MAX,
|
Self::Unknown(_) => u64::MAX,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_handshake(&self) -> bool {
|
fn is_handshake(&self) -> bool {
|
||||||
matches!(self, LevinCommand::Handshake)
|
matches!(self, Self::Handshake)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<u32> for LevinCommand {
|
impl From<u32> for LevinCommand {
|
||||||
fn from(value: u32) -> Self {
|
fn from(value: u32) -> Self {
|
||||||
match value {
|
match value {
|
||||||
1001 => LevinCommand::Handshake,
|
1001 => Self::Handshake,
|
||||||
1002 => LevinCommand::TimedSync,
|
1002 => Self::TimedSync,
|
||||||
1003 => LevinCommand::Ping,
|
1003 => Self::Ping,
|
||||||
1007 => LevinCommand::SupportFlags,
|
1007 => Self::SupportFlags,
|
||||||
|
|
||||||
2001 => LevinCommand::NewBlock,
|
2001 => Self::NewBlock,
|
||||||
2002 => LevinCommand::NewTransactions,
|
2002 => Self::NewTransactions,
|
||||||
2003 => LevinCommand::GetObjectsRequest,
|
2003 => Self::GetObjectsRequest,
|
||||||
2004 => LevinCommand::GetObjectsResponse,
|
2004 => Self::GetObjectsResponse,
|
||||||
2006 => LevinCommand::ChainRequest,
|
2006 => Self::ChainRequest,
|
||||||
2007 => LevinCommand::ChainResponse,
|
2007 => Self::ChainResponse,
|
||||||
2008 => LevinCommand::NewFluffyBlock,
|
2008 => Self::NewFluffyBlock,
|
||||||
2009 => LevinCommand::FluffyMissingTxsRequest,
|
2009 => Self::FluffyMissingTxsRequest,
|
||||||
2010 => LevinCommand::GetTxPoolCompliment,
|
2010 => Self::GetTxPoolCompliment,
|
||||||
|
|
||||||
x => LevinCommand::Unknown(x),
|
x => Self::Unknown(x),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -191,19 +192,19 @@ pub enum ProtocolMessage {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ProtocolMessage {
|
impl ProtocolMessage {
|
||||||
pub fn command(&self) -> LevinCommand {
|
pub const fn command(&self) -> LevinCommand {
|
||||||
use LevinCommand as C;
|
use LevinCommand as C;
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
ProtocolMessage::NewBlock(_) => C::NewBlock,
|
Self::NewBlock(_) => C::NewBlock,
|
||||||
ProtocolMessage::NewFluffyBlock(_) => C::NewFluffyBlock,
|
Self::NewFluffyBlock(_) => C::NewFluffyBlock,
|
||||||
ProtocolMessage::GetObjectsRequest(_) => C::GetObjectsRequest,
|
Self::GetObjectsRequest(_) => C::GetObjectsRequest,
|
||||||
ProtocolMessage::GetObjectsResponse(_) => C::GetObjectsResponse,
|
Self::GetObjectsResponse(_) => C::GetObjectsResponse,
|
||||||
ProtocolMessage::ChainRequest(_) => C::ChainRequest,
|
Self::ChainRequest(_) => C::ChainRequest,
|
||||||
ProtocolMessage::ChainEntryResponse(_) => C::ChainResponse,
|
Self::ChainEntryResponse(_) => C::ChainResponse,
|
||||||
ProtocolMessage::NewTransactions(_) => C::NewTransactions,
|
Self::NewTransactions(_) => C::NewTransactions,
|
||||||
ProtocolMessage::FluffyMissingTransactionsRequest(_) => C::FluffyMissingTxsRequest,
|
Self::FluffyMissingTransactionsRequest(_) => C::FluffyMissingTxsRequest,
|
||||||
ProtocolMessage::GetTxPoolCompliment(_) => C::GetTxPoolCompliment,
|
Self::GetTxPoolCompliment(_) => C::GetTxPoolCompliment,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,26 +231,26 @@ impl ProtocolMessage {
|
||||||
use LevinCommand as C;
|
use LevinCommand as C;
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
ProtocolMessage::NewBlock(val) => build_message(C::NewBlock, val, builder)?,
|
Self::NewBlock(val) => build_message(C::NewBlock, val, builder)?,
|
||||||
ProtocolMessage::NewTransactions(val) => {
|
Self::NewTransactions(val) => {
|
||||||
build_message(C::NewTransactions, val, builder)?
|
build_message(C::NewTransactions, val, builder)?;
|
||||||
}
|
}
|
||||||
ProtocolMessage::GetObjectsRequest(val) => {
|
Self::GetObjectsRequest(val) => {
|
||||||
build_message(C::GetObjectsRequest, val, builder)?
|
build_message(C::GetObjectsRequest, val, builder)?;
|
||||||
}
|
}
|
||||||
ProtocolMessage::GetObjectsResponse(val) => {
|
Self::GetObjectsResponse(val) => {
|
||||||
build_message(C::GetObjectsResponse, val, builder)?
|
build_message(C::GetObjectsResponse, val, builder)?;
|
||||||
}
|
}
|
||||||
ProtocolMessage::ChainRequest(val) => build_message(C::ChainRequest, val, builder)?,
|
Self::ChainRequest(val) => build_message(C::ChainRequest, val, builder)?,
|
||||||
ProtocolMessage::ChainEntryResponse(val) => {
|
Self::ChainEntryResponse(val) => {
|
||||||
build_message(C::ChainResponse, val, builder)?
|
build_message(C::ChainResponse, val, builder)?;
|
||||||
}
|
}
|
||||||
ProtocolMessage::NewFluffyBlock(val) => build_message(C::NewFluffyBlock, val, builder)?,
|
Self::NewFluffyBlock(val) => build_message(C::NewFluffyBlock, val, builder)?,
|
||||||
ProtocolMessage::FluffyMissingTransactionsRequest(val) => {
|
Self::FluffyMissingTransactionsRequest(val) => {
|
||||||
build_message(C::FluffyMissingTxsRequest, val, builder)?
|
build_message(C::FluffyMissingTxsRequest, val, builder)?;
|
||||||
}
|
}
|
||||||
ProtocolMessage::GetTxPoolCompliment(val) => {
|
Self::GetTxPoolCompliment(val) => {
|
||||||
build_message(C::GetTxPoolCompliment, val, builder)?
|
build_message(C::GetTxPoolCompliment, val, builder)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -265,14 +266,14 @@ pub enum AdminRequestMessage {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AdminRequestMessage {
|
impl AdminRequestMessage {
|
||||||
pub fn command(&self) -> LevinCommand {
|
pub const fn command(&self) -> LevinCommand {
|
||||||
use LevinCommand as C;
|
use LevinCommand as C;
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
AdminRequestMessage::Handshake(_) => C::Handshake,
|
Self::Handshake(_) => C::Handshake,
|
||||||
AdminRequestMessage::Ping => C::Ping,
|
Self::Ping => C::Ping,
|
||||||
AdminRequestMessage::SupportFlags => C::SupportFlags,
|
Self::SupportFlags => C::SupportFlags,
|
||||||
AdminRequestMessage::TimedSync(_) => C::TimedSync,
|
Self::TimedSync(_) => C::TimedSync,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -286,13 +287,13 @@ impl AdminRequestMessage {
|
||||||
cuprate_epee_encoding::from_bytes::<EmptyMessage, _>(buf)
|
cuprate_epee_encoding::from_bytes::<EmptyMessage, _>(buf)
|
||||||
.map_err(|e| BucketError::BodyDecodingError(e.into()))?;
|
.map_err(|e| BucketError::BodyDecodingError(e.into()))?;
|
||||||
|
|
||||||
AdminRequestMessage::Ping
|
Self::Ping
|
||||||
}
|
}
|
||||||
C::SupportFlags => {
|
C::SupportFlags => {
|
||||||
cuprate_epee_encoding::from_bytes::<EmptyMessage, _>(buf)
|
cuprate_epee_encoding::from_bytes::<EmptyMessage, _>(buf)
|
||||||
.map_err(|e| BucketError::BodyDecodingError(e.into()))?;
|
.map_err(|e| BucketError::BodyDecodingError(e.into()))?;
|
||||||
|
|
||||||
AdminRequestMessage::SupportFlags
|
Self::SupportFlags
|
||||||
}
|
}
|
||||||
_ => return Err(BucketError::UnknownCommand),
|
_ => return Err(BucketError::UnknownCommand),
|
||||||
})
|
})
|
||||||
|
@ -302,11 +303,11 @@ impl AdminRequestMessage {
|
||||||
use LevinCommand as C;
|
use LevinCommand as C;
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
AdminRequestMessage::Handshake(val) => build_message(C::Handshake, val, builder)?,
|
Self::Handshake(val) => build_message(C::Handshake, val, builder)?,
|
||||||
AdminRequestMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?,
|
Self::TimedSync(val) => build_message(C::TimedSync, val, builder)?,
|
||||||
AdminRequestMessage::Ping => build_message(C::Ping, EmptyMessage, builder)?,
|
Self::Ping => build_message(C::Ping, EmptyMessage, builder)?,
|
||||||
AdminRequestMessage::SupportFlags => {
|
Self::SupportFlags => {
|
||||||
build_message(C::SupportFlags, EmptyMessage, builder)?
|
build_message(C::SupportFlags, EmptyMessage, builder)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -322,14 +323,14 @@ pub enum AdminResponseMessage {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AdminResponseMessage {
|
impl AdminResponseMessage {
|
||||||
pub fn command(&self) -> LevinCommand {
|
pub const fn command(&self) -> LevinCommand {
|
||||||
use LevinCommand as C;
|
use LevinCommand as C;
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
AdminResponseMessage::Handshake(_) => C::Handshake,
|
Self::Handshake(_) => C::Handshake,
|
||||||
AdminResponseMessage::Ping(_) => C::Ping,
|
Self::Ping(_) => C::Ping,
|
||||||
AdminResponseMessage::SupportFlags(_) => C::SupportFlags,
|
Self::SupportFlags(_) => C::SupportFlags,
|
||||||
AdminResponseMessage::TimedSync(_) => C::TimedSync,
|
Self::TimedSync(_) => C::TimedSync,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -349,11 +350,11 @@ impl AdminResponseMessage {
|
||||||
use LevinCommand as C;
|
use LevinCommand as C;
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
AdminResponseMessage::Handshake(val) => build_message(C::Handshake, val, builder)?,
|
Self::Handshake(val) => build_message(C::Handshake, val, builder)?,
|
||||||
AdminResponseMessage::TimedSync(val) => build_message(C::TimedSync, val, builder)?,
|
Self::TimedSync(val) => build_message(C::TimedSync, val, builder)?,
|
||||||
AdminResponseMessage::Ping(val) => build_message(C::Ping, val, builder)?,
|
Self::Ping(val) => build_message(C::Ping, val, builder)?,
|
||||||
AdminResponseMessage::SupportFlags(val) => {
|
Self::SupportFlags(val) => {
|
||||||
build_message(C::SupportFlags, val, builder)?
|
build_message(C::SupportFlags, val, builder)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -368,23 +369,23 @@ pub enum Message {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Message {
|
impl Message {
|
||||||
pub fn is_request(&self) -> bool {
|
pub const fn is_request(&self) -> bool {
|
||||||
matches!(self, Message::Request(_))
|
matches!(self, Self::Request(_))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_response(&self) -> bool {
|
pub const fn is_response(&self) -> bool {
|
||||||
matches!(self, Message::Response(_))
|
matches!(self, Self::Response(_))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_protocol(&self) -> bool {
|
pub const fn is_protocol(&self) -> bool {
|
||||||
matches!(self, Message::Protocol(_))
|
matches!(self, Self::Protocol(_))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn command(&self) -> LevinCommand {
|
pub const fn command(&self) -> LevinCommand {
|
||||||
match self {
|
match self {
|
||||||
Message::Request(mes) => mes.command(),
|
Self::Request(mes) => mes.command(),
|
||||||
Message::Response(mes) => mes.command(),
|
Self::Response(mes) => mes.command(),
|
||||||
Message::Protocol(mes) => mes.command(),
|
Self::Protocol(mes) => mes.command(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -398,27 +399,25 @@ impl LevinBody for Message {
|
||||||
command: LevinCommand,
|
command: LevinCommand,
|
||||||
) -> Result<Self, BucketError> {
|
) -> Result<Self, BucketError> {
|
||||||
Ok(match typ {
|
Ok(match typ {
|
||||||
MessageType::Request => Message::Request(AdminRequestMessage::decode(body, command)?),
|
MessageType::Request => Self::Request(AdminRequestMessage::decode(body, command)?),
|
||||||
MessageType::Response => {
|
MessageType::Response => Self::Response(AdminResponseMessage::decode(body, command)?),
|
||||||
Message::Response(AdminResponseMessage::decode(body, command)?)
|
MessageType::Notification => Self::Protocol(ProtocolMessage::decode(body, command)?),
|
||||||
}
|
|
||||||
MessageType::Notification => Message::Protocol(ProtocolMessage::decode(body, command)?),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encode(self, builder: &mut BucketBuilder<LevinCommand>) -> Result<(), BucketError> {
|
fn encode(self, builder: &mut BucketBuilder<LevinCommand>) -> Result<(), BucketError> {
|
||||||
match self {
|
match self {
|
||||||
Message::Protocol(pro) => {
|
Self::Protocol(pro) => {
|
||||||
builder.set_message_type(MessageType::Notification);
|
builder.set_message_type(MessageType::Notification);
|
||||||
builder.set_return_code(0);
|
builder.set_return_code(0);
|
||||||
pro.build(builder)
|
pro.build(builder)
|
||||||
}
|
}
|
||||||
Message::Request(req) => {
|
Self::Request(req) => {
|
||||||
builder.set_message_type(MessageType::Request);
|
builder.set_message_type(MessageType::Request);
|
||||||
builder.set_return_code(0);
|
builder.set_return_code(0);
|
||||||
req.build(builder)
|
req.build(builder)
|
||||||
}
|
}
|
||||||
Message::Response(res) => {
|
Self::Response(res) => {
|
||||||
builder.set_message_type(MessageType::Response);
|
builder.set_message_type(MessageType::Response);
|
||||||
builder.set_return_code(1);
|
builder.set_return_code(1);
|
||||||
res.build(builder)
|
res.build(builder)
|
||||||
|
|
|
@ -45,7 +45,7 @@ pub struct HandshakeResponse {
|
||||||
pub node_data: BasicNodeData,
|
pub node_data: BasicNodeData,
|
||||||
/// Core Sync Data
|
/// Core Sync Data
|
||||||
pub payload_data: CoreSyncData,
|
pub payload_data: CoreSyncData,
|
||||||
/// PeerList
|
/// `PeerList`
|
||||||
pub local_peerlist_new: Vec<PeerListEntryBase>,
|
pub local_peerlist_new: Vec<PeerListEntryBase>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ epee_object!(
|
||||||
local_peerlist_new: Vec<PeerListEntryBase>,
|
local_peerlist_new: Vec<PeerListEntryBase>,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// A TimedSync Request
|
/// A `TimedSync` Request
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct TimedSyncRequest {
|
pub struct TimedSyncRequest {
|
||||||
/// Core Sync Data
|
/// Core Sync Data
|
||||||
|
@ -68,12 +68,12 @@ epee_object!(
|
||||||
payload_data: CoreSyncData,
|
payload_data: CoreSyncData,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// A TimedSync Response
|
/// A `TimedSync` Response
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct TimedSyncResponse {
|
pub struct TimedSyncResponse {
|
||||||
/// Core Sync Data
|
/// Core Sync Data
|
||||||
pub payload_data: CoreSyncData,
|
pub payload_data: CoreSyncData,
|
||||||
/// PeerList
|
/// `PeerList`
|
||||||
pub local_peerlist_new: Vec<PeerListEntryBase>,
|
pub local_peerlist_new: Vec<PeerListEntryBase>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
use bitflags::bitflags;
|
use bitflags::bitflags;
|
||||||
|
|
||||||
use cuprate_epee_encoding::epee_object;
|
use cuprate_epee_encoding::epee_object;
|
||||||
|
use cuprate_helper::map::split_u128_into_low_high_bits;
|
||||||
pub use cuprate_types::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs};
|
pub use cuprate_types::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs};
|
||||||
|
|
||||||
use crate::NetworkAddress;
|
use crate::NetworkAddress;
|
||||||
|
@ -34,7 +35,7 @@ bitflags! {
|
||||||
|
|
||||||
impl From<u32> for PeerSupportFlags {
|
impl From<u32> for PeerSupportFlags {
|
||||||
fn from(value: u32) -> Self {
|
fn from(value: u32) -> Self {
|
||||||
PeerSupportFlags(value)
|
Self(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,16 +114,17 @@ epee_object! {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CoreSyncData {
|
impl CoreSyncData {
|
||||||
pub fn new(
|
pub const fn new(
|
||||||
cumulative_difficulty_128: u128,
|
cumulative_difficulty_128: u128,
|
||||||
current_height: u64,
|
current_height: u64,
|
||||||
pruning_seed: u32,
|
pruning_seed: u32,
|
||||||
top_id: [u8; 32],
|
top_id: [u8; 32],
|
||||||
top_version: u8,
|
top_version: u8,
|
||||||
) -> CoreSyncData {
|
) -> Self {
|
||||||
let cumulative_difficulty = cumulative_difficulty_128 as u64;
|
let (cumulative_difficulty, cumulative_difficulty_top64) =
|
||||||
let cumulative_difficulty_top64 = (cumulative_difficulty_128 >> 64) as u64;
|
split_u128_into_low_high_bits(cumulative_difficulty_128);
|
||||||
CoreSyncData {
|
|
||||||
|
Self {
|
||||||
cumulative_difficulty,
|
cumulative_difficulty,
|
||||||
cumulative_difficulty_top64,
|
cumulative_difficulty_top64,
|
||||||
current_height,
|
current_height,
|
||||||
|
@ -139,7 +141,7 @@ impl CoreSyncData {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// PeerListEntryBase, information kept on a peer which will be entered
|
/// `PeerListEntryBase`, information kept on a peer which will be entered
|
||||||
/// in a peer list/store.
|
/// in a peer list/store.
|
||||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||||
pub struct PeerListEntryBase {
|
pub struct PeerListEntryBase {
|
||||||
|
|
|
@ -127,7 +127,7 @@ pub struct ChainResponse {
|
||||||
|
|
||||||
impl ChainResponse {
|
impl ChainResponse {
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn cumulative_difficulty(&self) -> u128 {
|
pub const fn cumulative_difficulty(&self) -> u128 {
|
||||||
let cumulative_difficulty = self.cumulative_difficulty_top64 as u128;
|
let cumulative_difficulty = self.cumulative_difficulty_top64 as u128;
|
||||||
cumulative_difficulty << 64 | self.cumulative_difficulty_low64 as u128
|
cumulative_difficulty << 64 | self.cumulative_difficulty_low64 as u128
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,7 @@ epee_object!(
|
||||||
current_blockchain_height: u64,
|
current_blockchain_height: u64,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// A request for Txs we are missing from our TxPool
|
/// A request for Txs we are missing from our `TxPool`
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct FluffyMissingTransactionsRequest {
|
pub struct FluffyMissingTransactionsRequest {
|
||||||
/// The Block we are missing the Txs in
|
/// The Block we are missing the Txs in
|
||||||
|
@ -177,7 +177,7 @@ epee_object!(
|
||||||
missing_tx_indices: Vec<u64> as ContainerAsBlob<u64>,
|
missing_tx_indices: Vec<u64> as ContainerAsBlob<u64>,
|
||||||
);
|
);
|
||||||
|
|
||||||
/// TxPoolCompliment
|
/// `TxPoolCompliment`
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct GetTxPoolCompliment {
|
pub struct GetTxPoolCompliment {
|
||||||
/// Tx Hashes
|
/// Tx Hashes
|
||||||
|
|
|
@ -8,7 +8,6 @@ authors = ["Boog900"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
cuprate-pruning = { path = "../../pruning" }
|
cuprate-pruning = { path = "../../pruning" }
|
||||||
cuprate-wire = { path= "../../net/wire" }
|
|
||||||
cuprate-p2p-core = { path = "../p2p-core" }
|
cuprate-p2p-core = { path = "../p2p-core" }
|
||||||
|
|
||||||
tower = { workspace = true, features = ["util"] }
|
tower = { workspace = true, features = ["util"] }
|
||||||
|
@ -29,3 +28,6 @@ borsh = { workspace = true, features = ["derive", "std"]}
|
||||||
cuprate-test-utils = {path = "../../test-utils"}
|
cuprate-test-utils = {path = "../../test-utils"}
|
||||||
|
|
||||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"]}
|
tokio = { workspace = true, features = ["rt-multi-thread", "macros"]}
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
|
@ -36,7 +36,7 @@ use crate::{
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
/// An entry in the connected list.
|
/// An entry in the connected list.
|
||||||
pub struct ConnectionPeerEntry<Z: NetworkZone> {
|
pub(crate) struct ConnectionPeerEntry<Z: NetworkZone> {
|
||||||
addr: Option<Z::Addr>,
|
addr: Option<Z::Addr>,
|
||||||
id: u64,
|
id: u64,
|
||||||
handle: ConnectionHandle,
|
handle: ConnectionHandle,
|
||||||
|
@ -109,14 +109,14 @@ impl<Z: BorshNetworkZone> AddressBook<Z> {
|
||||||
match handle.poll_unpin(cx) {
|
match handle.poll_unpin(cx) {
|
||||||
Poll::Pending => return,
|
Poll::Pending => return,
|
||||||
Poll::Ready(Ok(Err(e))) => {
|
Poll::Ready(Ok(Err(e))) => {
|
||||||
tracing::error!("Could not save peer list to disk, got error: {}", e)
|
tracing::error!("Could not save peer list to disk, got error: {e}");
|
||||||
}
|
}
|
||||||
Poll::Ready(Err(e)) => {
|
Poll::Ready(Err(e)) => {
|
||||||
if e.is_panic() {
|
if e.is_panic() {
|
||||||
panic::resume_unwind(e.into_panic())
|
panic::resume_unwind(e.into_panic())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => (),
|
Poll::Ready(_) => (),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// the task is finished.
|
// the task is finished.
|
||||||
|
@ -144,6 +144,7 @@ impl<Z: BorshNetworkZone> AddressBook<Z> {
|
||||||
let mut internal_addr_disconnected = Vec::new();
|
let mut internal_addr_disconnected = Vec::new();
|
||||||
let mut addrs_to_ban = Vec::new();
|
let mut addrs_to_ban = Vec::new();
|
||||||
|
|
||||||
|
#[expect(clippy::iter_over_hash_type, reason = "ordering doesn't matter here")]
|
||||||
for (internal_addr, peer) in &mut self.connected_peers {
|
for (internal_addr, peer) in &mut self.connected_peers {
|
||||||
if let Some(time) = peer.handle.check_should_ban() {
|
if let Some(time) = peer.handle.check_should_ban() {
|
||||||
match internal_addr {
|
match internal_addr {
|
||||||
|
@ -158,7 +159,7 @@ impl<Z: BorshNetworkZone> AddressBook<Z> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (addr, time) in addrs_to_ban.into_iter() {
|
for (addr, time) in addrs_to_ban {
|
||||||
self.ban_peer(addr, time);
|
self.ban_peer(addr, time);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,12 +173,7 @@ impl<Z: BorshNetworkZone> AddressBook<Z> {
|
||||||
.remove(&addr);
|
.remove(&addr);
|
||||||
|
|
||||||
// If the amount of peers with this ban id is 0 remove the whole set.
|
// If the amount of peers with this ban id is 0 remove the whole set.
|
||||||
if self
|
if self.connected_peers_ban_id[&addr.ban_id()].is_empty() {
|
||||||
.connected_peers_ban_id
|
|
||||||
.get(&addr.ban_id())
|
|
||||||
.unwrap()
|
|
||||||
.is_empty()
|
|
||||||
{
|
|
||||||
self.connected_peers_ban_id.remove(&addr.ban_id());
|
self.connected_peers_ban_id.remove(&addr.ban_id());
|
||||||
}
|
}
|
||||||
// remove the peer from the anchor list.
|
// remove the peer from the anchor list.
|
||||||
|
@ -188,7 +184,7 @@ impl<Z: BorshNetworkZone> AddressBook<Z> {
|
||||||
|
|
||||||
fn ban_peer(&mut self, addr: Z::Addr, time: Duration) {
|
fn ban_peer(&mut self, addr: Z::Addr, time: Duration) {
|
||||||
if self.banned_peers.contains_key(&addr.ban_id()) {
|
if self.banned_peers.contains_key(&addr.ban_id()) {
|
||||||
tracing::error!("Tried to ban peer twice, this shouldn't happen.")
|
tracing::error!("Tried to ban peer twice, this shouldn't happen.");
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(connected_peers_with_ban_id) = self.connected_peers_ban_id.get(&addr.ban_id()) {
|
if let Some(connected_peers_with_ban_id) = self.connected_peers_ban_id.get(&addr.ban_id()) {
|
||||||
|
@ -242,10 +238,10 @@ impl<Z: BorshNetworkZone> AddressBook<Z> {
|
||||||
peer_list.retain_mut(|peer| {
|
peer_list.retain_mut(|peer| {
|
||||||
peer.adr.make_canonical();
|
peer.adr.make_canonical();
|
||||||
|
|
||||||
if !peer.adr.should_add_to_peer_list() {
|
if peer.adr.should_add_to_peer_list() {
|
||||||
false
|
|
||||||
} else {
|
|
||||||
!self.is_peer_banned(&peer.adr)
|
!self.is_peer_banned(&peer.adr)
|
||||||
|
} else {
|
||||||
|
false
|
||||||
}
|
}
|
||||||
// TODO: check rpc/ p2p ports not the same
|
// TODO: check rpc/ p2p ports not the same
|
||||||
});
|
});
|
||||||
|
@ -391,7 +387,7 @@ impl<Z: BorshNetworkZone> Service<AddressBookRequest<Z>> for AddressBook<Z> {
|
||||||
rpc_credits_per_hash,
|
rpc_credits_per_hash,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.map(|_| AddressBookResponse::Ok),
|
.map(|()| AddressBookResponse::Ok),
|
||||||
AddressBookRequest::IncomingPeerList(peer_list) => {
|
AddressBookRequest::IncomingPeerList(peer_list) => {
|
||||||
self.handle_incoming_peer_list(peer_list);
|
self.handle_incoming_peer_list(peer_list);
|
||||||
Ok(AddressBookResponse::Ok)
|
Ok(AddressBookResponse::Ok)
|
||||||
|
|
|
@ -109,7 +109,7 @@ async fn add_new_peer_already_connected() {
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
Err(AddressBookError::PeerAlreadyConnected)
|
Err(AddressBookError::PeerAlreadyConnected)
|
||||||
)
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
@ -143,5 +143,5 @@ async fn banned_peer_removed_from_peer_lists() {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.into_inner(),
|
.into_inner(),
|
||||||
TestNetZoneAddr(1)
|
TestNetZoneAddr(1)
|
||||||
)
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,31 +7,31 @@ use cuprate_p2p_core::{services::ZoneSpecificPeerListEntryBase, NetZoneAddress,
|
||||||
use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT};
|
use cuprate_pruning::{PruningSeed, CRYPTONOTE_MAX_BLOCK_HEIGHT};
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests;
|
pub(crate) mod tests;
|
||||||
|
|
||||||
/// A Peer list in the address book.
|
/// A Peer list in the address book.
|
||||||
///
|
///
|
||||||
/// This could either be the white list or gray list.
|
/// This could either be the white list or gray list.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PeerList<Z: NetworkZone> {
|
pub(crate) struct PeerList<Z: NetworkZone> {
|
||||||
/// The peers with their peer data.
|
/// The peers with their peer data.
|
||||||
pub peers: IndexMap<Z::Addr, ZoneSpecificPeerListEntryBase<Z::Addr>>,
|
pub peers: IndexMap<Z::Addr, ZoneSpecificPeerListEntryBase<Z::Addr>>,
|
||||||
/// An index of Pruning seed to address, so can quickly grab peers with the blocks
|
/// An index of Pruning seed to address, so can quickly grab peers with the blocks
|
||||||
/// we want.
|
/// we want.
|
||||||
///
|
///
|
||||||
/// Pruning seeds are sorted by first their log_stripes and then their stripe.
|
/// Pruning seeds are sorted by first their `log_stripes` and then their stripe.
|
||||||
/// This means the first peers in this list will store more blocks than peers
|
/// This means the first peers in this list will store more blocks than peers
|
||||||
/// later on. So when we need a peer with a certain block we look at the peers
|
/// later on. So when we need a peer with a certain block we look at the peers
|
||||||
/// storing more blocks first then work our way to the peers storing less.
|
/// storing more blocks first then work our way to the peers storing less.
|
||||||
///
|
///
|
||||||
pruning_seeds: BTreeMap<PruningSeed, Vec<Z::Addr>>,
|
pruning_seeds: BTreeMap<PruningSeed, Vec<Z::Addr>>,
|
||||||
/// A hashmap linking ban_ids to addresses.
|
/// A hashmap linking `ban_ids` to addresses.
|
||||||
ban_ids: HashMap<<Z::Addr as NetZoneAddress>::BanID, Vec<Z::Addr>>,
|
ban_ids: HashMap<<Z::Addr as NetZoneAddress>::BanID, Vec<Z::Addr>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Z: NetworkZone> PeerList<Z> {
|
impl<Z: NetworkZone> PeerList<Z> {
|
||||||
/// Creates a new peer list.
|
/// Creates a new peer list.
|
||||||
pub fn new(list: Vec<ZoneSpecificPeerListEntryBase<Z::Addr>>) -> PeerList<Z> {
|
pub(crate) fn new(list: Vec<ZoneSpecificPeerListEntryBase<Z::Addr>>) -> Self {
|
||||||
let mut peers = IndexMap::with_capacity(list.len());
|
let mut peers = IndexMap::with_capacity(list.len());
|
||||||
let mut pruning_seeds = BTreeMap::new();
|
let mut pruning_seeds = BTreeMap::new();
|
||||||
let mut ban_ids = HashMap::with_capacity(list.len());
|
let mut ban_ids = HashMap::with_capacity(list.len());
|
||||||
|
@ -49,7 +49,7 @@ impl<Z: NetworkZone> PeerList<Z> {
|
||||||
|
|
||||||
peers.insert(peer.adr, peer);
|
peers.insert(peer.adr, peer);
|
||||||
}
|
}
|
||||||
PeerList {
|
Self {
|
||||||
peers,
|
peers,
|
||||||
pruning_seeds,
|
pruning_seeds,
|
||||||
ban_ids,
|
ban_ids,
|
||||||
|
@ -57,21 +57,20 @@ impl<Z: NetworkZone> PeerList<Z> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the length of the peer list
|
/// Gets the length of the peer list
|
||||||
pub fn len(&self) -> usize {
|
pub(crate) fn len(&self) -> usize {
|
||||||
self.peers.len()
|
self.peers.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds a new peer to the peer list
|
/// Adds a new peer to the peer list
|
||||||
pub fn add_new_peer(&mut self, peer: ZoneSpecificPeerListEntryBase<Z::Addr>) {
|
pub(crate) fn add_new_peer(&mut self, peer: ZoneSpecificPeerListEntryBase<Z::Addr>) {
|
||||||
if self.peers.insert(peer.adr, peer).is_none() {
|
if self.peers.insert(peer.adr, peer).is_none() {
|
||||||
// It's more clear with this
|
#[expect(clippy::unwrap_or_default, reason = "It's more clear with this")]
|
||||||
#[allow(clippy::unwrap_or_default)]
|
|
||||||
self.pruning_seeds
|
self.pruning_seeds
|
||||||
.entry(peer.pruning_seed)
|
.entry(peer.pruning_seed)
|
||||||
.or_insert_with(Vec::new)
|
.or_insert_with(Vec::new)
|
||||||
.push(peer.adr);
|
.push(peer.adr);
|
||||||
|
|
||||||
#[allow(clippy::unwrap_or_default)]
|
#[expect(clippy::unwrap_or_default)]
|
||||||
self.ban_ids
|
self.ban_ids
|
||||||
.entry(peer.adr.ban_id())
|
.entry(peer.adr.ban_id())
|
||||||
.or_insert_with(Vec::new)
|
.or_insert_with(Vec::new)
|
||||||
|
@ -85,7 +84,7 @@ impl<Z: NetworkZone> PeerList<Z> {
|
||||||
/// list.
|
/// list.
|
||||||
///
|
///
|
||||||
/// The given peer will be removed from the peer list.
|
/// The given peer will be removed from the peer list.
|
||||||
pub fn take_random_peer<R: Rng>(
|
pub(crate) fn take_random_peer<R: Rng>(
|
||||||
&mut self,
|
&mut self,
|
||||||
r: &mut R,
|
r: &mut R,
|
||||||
block_needed: Option<usize>,
|
block_needed: Option<usize>,
|
||||||
|
@ -127,7 +126,7 @@ impl<Z: NetworkZone> PeerList<Z> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_random_peers<R: Rng>(
|
pub(crate) fn get_random_peers<R: Rng>(
|
||||||
&self,
|
&self,
|
||||||
r: &mut R,
|
r: &mut R,
|
||||||
len: usize,
|
len: usize,
|
||||||
|
@ -142,7 +141,7 @@ impl<Z: NetworkZone> PeerList<Z> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a mutable reference to a peer.
|
/// Returns a mutable reference to a peer.
|
||||||
pub fn get_peer_mut(
|
pub(crate) fn get_peer_mut(
|
||||||
&mut self,
|
&mut self,
|
||||||
peer: &Z::Addr,
|
peer: &Z::Addr,
|
||||||
) -> Option<&mut ZoneSpecificPeerListEntryBase<Z::Addr>> {
|
) -> Option<&mut ZoneSpecificPeerListEntryBase<Z::Addr>> {
|
||||||
|
@ -150,7 +149,7 @@ impl<Z: NetworkZone> PeerList<Z> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the list contains this peer.
|
/// Returns true if the list contains this peer.
|
||||||
pub fn contains_peer(&self, peer: &Z::Addr) -> bool {
|
pub(crate) fn contains_peer(&self, peer: &Z::Addr) -> bool {
|
||||||
self.peers.contains_key(peer)
|
self.peers.contains_key(peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,11 +188,11 @@ impl<Z: NetworkZone> PeerList<Z> {
|
||||||
/// MUST NOT BE USED ALONE
|
/// MUST NOT BE USED ALONE
|
||||||
fn remove_peer_from_all_idxs(&mut self, peer: &ZoneSpecificPeerListEntryBase<Z::Addr>) {
|
fn remove_peer_from_all_idxs(&mut self, peer: &ZoneSpecificPeerListEntryBase<Z::Addr>) {
|
||||||
self.remove_peer_pruning_idx(peer);
|
self.remove_peer_pruning_idx(peer);
|
||||||
self.remove_peer_ban_idx(peer)
|
self.remove_peer_ban_idx(peer);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes a peer from the peer list
|
/// Removes a peer from the peer list
|
||||||
pub fn remove_peer(
|
pub(crate) fn remove_peer(
|
||||||
&mut self,
|
&mut self,
|
||||||
peer: &Z::Addr,
|
peer: &Z::Addr,
|
||||||
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
|
) -> Option<ZoneSpecificPeerListEntryBase<Z::Addr>> {
|
||||||
|
@ -203,7 +202,7 @@ impl<Z: NetworkZone> PeerList<Z> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes all peers with a specific ban id.
|
/// Removes all peers with a specific ban id.
|
||||||
pub fn remove_peers_with_ban_id(&mut self, ban_id: &<Z::Addr as NetZoneAddress>::BanID) {
|
pub(crate) fn remove_peers_with_ban_id(&mut self, ban_id: &<Z::Addr as NetZoneAddress>::BanID) {
|
||||||
let Some(addresses) = self.ban_ids.get(ban_id) else {
|
let Some(addresses) = self.ban_ids.get(ban_id) else {
|
||||||
// No peers to ban
|
// No peers to ban
|
||||||
return;
|
return;
|
||||||
|
@ -217,8 +216,8 @@ impl<Z: NetworkZone> PeerList<Z> {
|
||||||
/// Tries to reduce the peer list to `new_len`.
|
/// Tries to reduce the peer list to `new_len`.
|
||||||
///
|
///
|
||||||
/// This function could keep the list bigger than `new_len` if `must_keep_peers`s length
|
/// This function could keep the list bigger than `new_len` if `must_keep_peers`s length
|
||||||
/// is larger than new_len, in that case we will remove as much as we can.
|
/// is larger than `new_len`, in that case we will remove as much as we can.
|
||||||
pub fn reduce_list(&mut self, must_keep_peers: &HashSet<Z::Addr>, new_len: usize) {
|
pub(crate) fn reduce_list(&mut self, must_keep_peers: &HashSet<Z::Addr>, new_len: usize) {
|
||||||
if new_len >= self.len() {
|
if new_len >= self.len() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ fn make_fake_peer(
|
||||||
) -> ZoneSpecificPeerListEntryBase<TestNetZoneAddr> {
|
) -> ZoneSpecificPeerListEntryBase<TestNetZoneAddr> {
|
||||||
ZoneSpecificPeerListEntryBase {
|
ZoneSpecificPeerListEntryBase {
|
||||||
adr: TestNetZoneAddr(id),
|
adr: TestNetZoneAddr(id),
|
||||||
id: id as u64,
|
id: u64::from(id),
|
||||||
last_seen: 0,
|
last_seen: 0,
|
||||||
pruning_seed: PruningSeed::decompress(pruning_seed.unwrap_or(0)).unwrap(),
|
pruning_seed: PruningSeed::decompress(pruning_seed.unwrap_or(0)).unwrap(),
|
||||||
rpc_port: 0,
|
rpc_port: 0,
|
||||||
|
@ -22,14 +22,14 @@ fn make_fake_peer(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_fake_peer_list(
|
pub(crate) fn make_fake_peer_list(
|
||||||
start_idx: u32,
|
start_idx: u32,
|
||||||
numb_o_peers: u32,
|
numb_o_peers: u32,
|
||||||
) -> PeerList<TestNetZone<true, true, true>> {
|
) -> PeerList<TestNetZone<true, true, true>> {
|
||||||
let mut peer_list = Vec::with_capacity(numb_o_peers as usize);
|
let mut peer_list = Vec::with_capacity(numb_o_peers as usize);
|
||||||
|
|
||||||
for idx in start_idx..(start_idx + numb_o_peers) {
|
for idx in start_idx..(start_idx + numb_o_peers) {
|
||||||
peer_list.push(make_fake_peer(idx, None))
|
peer_list.push(make_fake_peer(idx, None));
|
||||||
}
|
}
|
||||||
|
|
||||||
PeerList::new(peer_list)
|
PeerList::new(peer_list)
|
||||||
|
@ -50,7 +50,7 @@ fn make_fake_peer_list_with_random_pruning_seeds(
|
||||||
} else {
|
} else {
|
||||||
r.gen_range(384..=391)
|
r.gen_range(384..=391)
|
||||||
}),
|
}),
|
||||||
))
|
));
|
||||||
}
|
}
|
||||||
PeerList::new(peer_list)
|
PeerList::new(peer_list)
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ fn peer_list_reduce_length() {
|
||||||
#[test]
|
#[test]
|
||||||
fn peer_list_reduce_length_with_peers_we_need() {
|
fn peer_list_reduce_length_with_peers_we_need() {
|
||||||
let mut peer_list = make_fake_peer_list(0, 500);
|
let mut peer_list = make_fake_peer_list(0, 500);
|
||||||
let must_keep_peers = HashSet::from_iter(peer_list.peers.keys().copied());
|
let must_keep_peers = peer_list.peers.keys().copied().collect::<HashSet<_>>();
|
||||||
|
|
||||||
let target_len = 49;
|
let target_len = 49;
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ fn peer_list_remove_specific_peer() {
|
||||||
let peers = peer_list.peers;
|
let peers = peer_list.peers;
|
||||||
|
|
||||||
for (_, addrs) in pruning_idxs {
|
for (_, addrs) in pruning_idxs {
|
||||||
addrs.iter().for_each(|adr| assert_ne!(adr, &peer.adr))
|
addrs.iter().for_each(|adr| assert_ne!(adr, &peer.adr));
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(!peers.contains_key(&peer.adr));
|
assert!(!peers.contains_key(&peer.adr));
|
||||||
|
@ -104,13 +104,13 @@ fn peer_list_pruning_idxs_are_correct() {
|
||||||
let mut total_len = 0;
|
let mut total_len = 0;
|
||||||
|
|
||||||
for (seed, list) in peer_list.pruning_seeds {
|
for (seed, list) in peer_list.pruning_seeds {
|
||||||
for peer in list.iter() {
|
for peer in &list {
|
||||||
assert_eq!(peer_list.peers.get(peer).unwrap().pruning_seed, seed);
|
assert_eq!(peer_list.peers.get(peer).unwrap().pruning_seed, seed);
|
||||||
total_len += 1;
|
total_len += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(total_len, peer_list.peers.len())
|
assert_eq!(total_len, peer_list.peers.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -122,11 +122,7 @@ fn peer_list_add_new_peer() {
|
||||||
|
|
||||||
assert_eq!(peer_list.len(), 11);
|
assert_eq!(peer_list.len(), 11);
|
||||||
assert_eq!(peer_list.peers.get(&new_peer.adr), Some(&new_peer));
|
assert_eq!(peer_list.peers.get(&new_peer.adr), Some(&new_peer));
|
||||||
assert!(peer_list
|
assert!(peer_list.pruning_seeds[&new_peer.pruning_seed].contains(&new_peer.adr));
|
||||||
.pruning_seeds
|
|
||||||
.get(&new_peer.pruning_seed)
|
|
||||||
.unwrap()
|
|
||||||
.contains(&new_peer.adr));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -164,7 +160,7 @@ fn peer_list_get_peer_with_block() {
|
||||||
assert!(peer
|
assert!(peer
|
||||||
.pruning_seed
|
.pruning_seed
|
||||||
.get_next_unpruned_block(1, 1_000_000)
|
.get_next_unpruned_block(1, 1_000_000)
|
||||||
.is_ok())
|
.is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
|
@ -1,3 +1,8 @@
|
||||||
|
#![expect(
|
||||||
|
single_use_lifetimes,
|
||||||
|
reason = "false positive on generated derive code on `SerPeerDataV1`"
|
||||||
|
)]
|
||||||
|
|
||||||
use std::fs;
|
use std::fs;
|
||||||
|
|
||||||
use borsh::{from_slice, to_vec, BorshDeserialize, BorshSerialize};
|
use borsh::{from_slice, to_vec, BorshDeserialize, BorshSerialize};
|
||||||
|
@ -21,7 +26,7 @@ struct DeserPeerDataV1<A: NetZoneAddress> {
|
||||||
gray_list: Vec<ZoneSpecificPeerListEntryBase<A>>,
|
gray_list: Vec<ZoneSpecificPeerListEntryBase<A>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_peers_to_disk<Z: BorshNetworkZone>(
|
pub(crate) fn save_peers_to_disk<Z: BorshNetworkZone>(
|
||||||
cfg: &AddressBookConfig,
|
cfg: &AddressBookConfig,
|
||||||
white_list: &PeerList<Z>,
|
white_list: &PeerList<Z>,
|
||||||
gray_list: &PeerList<Z>,
|
gray_list: &PeerList<Z>,
|
||||||
|
@ -38,7 +43,7 @@ pub fn save_peers_to_disk<Z: BorshNetworkZone>(
|
||||||
spawn_blocking(move || fs::write(&file, &data))
|
spawn_blocking(move || fs::write(&file, &data))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn read_peers_from_disk<Z: BorshNetworkZone>(
|
pub(crate) async fn read_peers_from_disk<Z: BorshNetworkZone>(
|
||||||
cfg: &AddressBookConfig,
|
cfg: &AddressBookConfig,
|
||||||
) -> Result<
|
) -> Result<
|
||||||
(
|
(
|
||||||
|
|
|
@ -25,3 +25,6 @@ thiserror = { workspace = true }
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync"] }
|
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync"] }
|
||||||
proptest = { workspace = true, features = ["default"] }
|
proptest = { workspace = true, features = ["default"] }
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
|
@ -8,7 +8,7 @@ use std::{
|
||||||
/// (1 - ep) is the probability that a transaction travels for `k` hops before a nodes embargo timeout fires, this constant is (1 - ep).
|
/// (1 - ep) is the probability that a transaction travels for `k` hops before a nodes embargo timeout fires, this constant is (1 - ep).
|
||||||
const EMBARGO_FULL_TRAVEL_PROBABILITY: f64 = 0.90;
|
const EMBARGO_FULL_TRAVEL_PROBABILITY: f64 = 0.90;
|
||||||
|
|
||||||
/// The graph type to use for dandelion routing, the dandelion paper recommends [Graph::FourRegular].
|
/// The graph type to use for dandelion routing, the dandelion paper recommends [`Graph::FourRegular`].
|
||||||
///
|
///
|
||||||
/// The decision between line graphs and 4-regular graphs depend on the priorities of the system, if
|
/// The decision between line graphs and 4-regular graphs depend on the priorities of the system, if
|
||||||
/// linkability of transactions is a first order concern then line graphs may be better, however 4-regular graphs
|
/// linkability of transactions is a first order concern then line graphs may be better, however 4-regular graphs
|
||||||
|
@ -66,7 +66,7 @@ impl DandelionConfig {
|
||||||
/// Returns the number of outbound peers to use to stem transactions.
|
/// Returns the number of outbound peers to use to stem transactions.
|
||||||
///
|
///
|
||||||
/// This value depends on the [`Graph`] chosen.
|
/// This value depends on the [`Graph`] chosen.
|
||||||
pub fn number_of_stems(&self) -> usize {
|
pub const fn number_of_stems(&self) -> usize {
|
||||||
match self.graph {
|
match self.graph {
|
||||||
Graph::Line => 1,
|
Graph::Line => 1,
|
||||||
Graph::FourRegular => 2,
|
Graph::FourRegular => 2,
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
//! The diffuse service should have a request of [`DiffuseRequest`](traits::DiffuseRequest) and it's error
|
//! The diffuse service should have a request of [`DiffuseRequest`](traits::DiffuseRequest) and it's error
|
||||||
//! should be [`tower::BoxError`].
|
//! should be [`tower::BoxError`].
|
||||||
//!
|
//!
|
||||||
//! ## Outbound Peer TryStream
|
//! ## Outbound Peer `TryStream`
|
||||||
//!
|
//!
|
||||||
//! The outbound peer [`TryStream`](futures::TryStream) should provide a stream of randomly selected outbound
|
//! The outbound peer [`TryStream`](futures::TryStream) should provide a stream of randomly selected outbound
|
||||||
//! peers, these peers will then be used to route stem txs to.
|
//! peers, these peers will then be used to route stem txs to.
|
||||||
|
@ -37,7 +37,7 @@
|
||||||
//! ## Peer Service
|
//! ## Peer Service
|
||||||
//!
|
//!
|
||||||
//! This service represents a connection to an individual peer, this should be returned from the Outbound Peer
|
//! This service represents a connection to an individual peer, this should be returned from the Outbound Peer
|
||||||
//! TryStream. This should immediately send the transaction to the peer when requested, it should _not_ set
|
//! `TryStream`. This should immediately send the transaction to the peer when requested, it should _not_ set
|
||||||
//! a timer.
|
//! a timer.
|
||||||
//!
|
//!
|
||||||
//! The peer service should have a request of [`StemRequest`](traits::StemRequest) and its error
|
//! The peer service should have a request of [`StemRequest`](traits::StemRequest) and its error
|
||||||
|
|
|
@ -30,7 +30,7 @@ pub struct IncomingTxBuilder<const RS: bool, const DBS: bool, Tx, TxId, PeerId>
|
||||||
|
|
||||||
impl<Tx, TxId, PeerId> IncomingTxBuilder<false, false, Tx, TxId, PeerId> {
|
impl<Tx, TxId, PeerId> IncomingTxBuilder<false, false, Tx, TxId, PeerId> {
|
||||||
/// Creates a new [`IncomingTxBuilder`].
|
/// Creates a new [`IncomingTxBuilder`].
|
||||||
pub fn new(tx: Tx, tx_id: TxId) -> Self {
|
pub const fn new(tx: Tx, tx_id: TxId) -> Self {
|
||||||
Self {
|
Self {
|
||||||
tx,
|
tx,
|
||||||
tx_id,
|
tx_id,
|
||||||
|
|
|
@ -88,9 +88,7 @@ where
|
||||||
.insert(peer.clone());
|
.insert(peer.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let state = from
|
let state = from.map_or(TxState::Local, |from| TxState::Stem { from });
|
||||||
.map(|from| TxState::Stem { from })
|
|
||||||
.unwrap_or(TxState::Local);
|
|
||||||
|
|
||||||
let fut = self
|
let fut = self
|
||||||
.dandelion_router
|
.dandelion_router
|
||||||
|
@ -280,13 +278,15 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(e) = self.handle_incoming_tx(tx, routing_state, tx_id).await {
|
if let Err(e) = self.handle_incoming_tx(tx, routing_state, tx_id).await {
|
||||||
|
#[expect(clippy::let_underscore_must_use, reason = "dropped receivers can be ignored")]
|
||||||
let _ = res_tx.send(());
|
let _ = res_tx.send(());
|
||||||
|
|
||||||
tracing::error!("Error handling transaction in dandelion pool: {e}");
|
tracing::error!("Error handling transaction in dandelion pool: {e}");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let _ = res_tx.send(());
|
|
||||||
|
|
||||||
|
#[expect(clippy::let_underscore_must_use)]
|
||||||
|
let _ = res_tx.send(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,7 +140,7 @@ where
|
||||||
State::Stem
|
State::Stem
|
||||||
};
|
};
|
||||||
|
|
||||||
DandelionRouter {
|
Self {
|
||||||
outbound_peer_discover: Box::pin(outbound_peer_discover),
|
outbound_peer_discover: Box::pin(outbound_peer_discover),
|
||||||
broadcast_svc,
|
broadcast_svc,
|
||||||
current_state,
|
current_state,
|
||||||
|
@ -198,7 +198,7 @@ where
|
||||||
fn stem_tx(
|
fn stem_tx(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: Tx,
|
tx: Tx,
|
||||||
from: Id,
|
from: &Id,
|
||||||
) -> BoxFuture<'static, Result<State, DandelionRouterError>> {
|
) -> BoxFuture<'static, Result<State, DandelionRouterError>> {
|
||||||
if self.stem_peers.is_empty() {
|
if self.stem_peers.is_empty() {
|
||||||
tracing::debug!("Stem peers are empty, fluffing stem transaction.");
|
tracing::debug!("Stem peers are empty, fluffing stem transaction.");
|
||||||
|
@ -216,7 +216,7 @@ where
|
||||||
});
|
});
|
||||||
|
|
||||||
let Some(peer) = self.stem_peers.get_mut(stem_route) else {
|
let Some(peer) = self.stem_peers.get_mut(stem_route) else {
|
||||||
self.stem_routes.remove(&from);
|
self.stem_routes.remove(from);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -302,7 +302,7 @@ where
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
parent: span,
|
parent: span,
|
||||||
"Peer returned an error on `poll_ready`: {e}, removing from router.",
|
"Peer returned an error on `poll_ready`: {e}, removing from router.",
|
||||||
)
|
);
|
||||||
})
|
})
|
||||||
.is_ok(),
|
.is_ok(),
|
||||||
Poll::Pending => {
|
Poll::Pending => {
|
||||||
|
@ -341,7 +341,7 @@ where
|
||||||
State::Stem => {
|
State::Stem => {
|
||||||
tracing::trace!(parent: &self.span, "Steming transaction");
|
tracing::trace!(parent: &self.span, "Steming transaction");
|
||||||
|
|
||||||
self.stem_tx(req.tx, from)
|
self.stem_tx(req.tx, &from)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
TxState::Local => {
|
TxState::Local => {
|
||||||
|
|
|
@ -12,7 +12,7 @@ use crate::{
|
||||||
OutboundPeer, State,
|
OutboundPeer, State,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn mock_discover_svc<Req: Send + 'static>() -> (
|
pub(crate) fn mock_discover_svc<Req: Send + 'static>() -> (
|
||||||
impl Stream<
|
impl Stream<
|
||||||
Item = Result<
|
Item = Result<
|
||||||
OutboundPeer<
|
OutboundPeer<
|
||||||
|
@ -49,7 +49,7 @@ pub fn mock_discover_svc<Req: Send + 'static>() -> (
|
||||||
(discover, rx)
|
(discover, rx)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mock_broadcast_svc<Req: Send + 'static>() -> (
|
pub(crate) fn mock_broadcast_svc<Req: Send + 'static>() -> (
|
||||||
impl Service<
|
impl Service<
|
||||||
Req,
|
Req,
|
||||||
Future = impl Future<Output = Result<(), tower::BoxError>> + Send + 'static,
|
Future = impl Future<Output = Result<(), tower::BoxError>> + Send + 'static,
|
||||||
|
@ -70,8 +70,8 @@ pub fn mock_broadcast_svc<Req: Send + 'static>() -> (
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)] // just test code.
|
#[expect(clippy::type_complexity, reason = "just test code.")]
|
||||||
pub fn mock_in_memory_backing_pool<
|
pub(crate) fn mock_in_memory_backing_pool<
|
||||||
Tx: Clone + Send + 'static,
|
Tx: Clone + Send + 'static,
|
||||||
TxID: Clone + Hash + Eq + Send + 'static,
|
TxID: Clone + Hash + Eq + Send + 'static,
|
||||||
>() -> (
|
>() -> (
|
||||||
|
@ -85,11 +85,11 @@ pub fn mock_in_memory_backing_pool<
|
||||||
Arc<std::sync::Mutex<HashMap<TxID, (Tx, State)>>>,
|
Arc<std::sync::Mutex<HashMap<TxID, (Tx, State)>>>,
|
||||||
) {
|
) {
|
||||||
let txs = Arc::new(std::sync::Mutex::new(HashMap::new()));
|
let txs = Arc::new(std::sync::Mutex::new(HashMap::new()));
|
||||||
let txs_2 = txs.clone();
|
let txs_2 = Arc::clone(&txs);
|
||||||
|
|
||||||
(
|
(
|
||||||
service_fn(move |req: TxStoreRequest<TxID>| {
|
service_fn(move |req: TxStoreRequest<TxID>| {
|
||||||
let txs = txs.clone();
|
let txs = Arc::clone(&txs);
|
||||||
async move {
|
async move {
|
||||||
match req {
|
match req {
|
||||||
TxStoreRequest::Get(tx_id) => {
|
TxStoreRequest::Get(tx_id) => {
|
||||||
|
|
|
@ -39,5 +39,5 @@ async fn basic_functionality() {
|
||||||
// TODO: the DandelionPoolManager doesn't handle adding txs to the pool, add more tests here to test
|
// TODO: the DandelionPoolManager doesn't handle adding txs to the pool, add more tests here to test
|
||||||
// all functionality.
|
// all functionality.
|
||||||
//assert!(pool.lock().unwrap().contains_key(&1));
|
//assert!(pool.lock().unwrap().contains_key(&1));
|
||||||
assert!(broadcast_rx.try_recv().is_ok())
|
assert!(broadcast_rx.try_recv().is_ok());
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,13 +14,14 @@ cuprate-helper = { path = "../../helper", features = ["asynch"], default-feature
|
||||||
cuprate-wire = { path = "../../net/wire", features = ["tracing"] }
|
cuprate-wire = { path = "../../net/wire", features = ["tracing"] }
|
||||||
cuprate-pruning = { path = "../../pruning" }
|
cuprate-pruning = { path = "../../pruning" }
|
||||||
|
|
||||||
tokio = { workspace = true, features = ["net", "sync", "macros", "time"]}
|
tokio = { workspace = true, features = ["net", "sync", "macros", "time", "rt", "rt-multi-thread"]}
|
||||||
tokio-util = { workspace = true, features = ["codec"] }
|
tokio-util = { workspace = true, features = ["codec"] }
|
||||||
tokio-stream = { workspace = true, features = ["sync"]}
|
tokio-stream = { workspace = true, features = ["sync"]}
|
||||||
futures = { workspace = true, features = ["std"] }
|
futures = { workspace = true, features = ["std"] }
|
||||||
async-trait = { workspace = true }
|
async-trait = { workspace = true }
|
||||||
tower = { workspace = true, features = ["util", "tracing"] }
|
tower = { workspace = true, features = ["util", "tracing"] }
|
||||||
|
|
||||||
|
cfg-if = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||||
hex-literal = { workspace = true }
|
hex-literal = { workspace = true }
|
||||||
|
@ -31,6 +32,7 @@ borsh = { workspace = true, features = ["derive", "std"], optional = true }
|
||||||
cuprate-test-utils = { path = "../../test-utils" }
|
cuprate-test-utils = { path = "../../test-utils" }
|
||||||
|
|
||||||
hex = { workspace = true, features = ["std"] }
|
hex = { workspace = true, features = ["std"] }
|
||||||
tokio = { workspace = true, features = ["net", "rt-multi-thread", "rt", "macros"]}
|
|
||||||
tokio-test = { workspace = true }
|
tokio-test = { workspace = true }
|
||||||
tracing-subscriber = { workspace = true }
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue