Merge branch 'main' into rpc-handler

This commit is contained in:
hinto.janai 2024-09-02 16:50:49 -04:00
commit 29587d9bb4
No known key found for this signature in database
GPG key ID: D47CE05FA175A499
68 changed files with 443 additions and 837 deletions

47
Cargo.lock generated
View file

@ -17,6 +17,12 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "adler2"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
[[package]] [[package]]
name = "ahash" name = "ahash"
version = "0.8.11" version = "0.8.11"
@ -160,7 +166,7 @@ dependencies = [
"cc", "cc",
"cfg-if", "cfg-if",
"libc", "libc",
"miniz_oxide", "miniz_oxide 0.7.3",
"object", "object",
"rustc-demangle", "rustc-demangle",
] ]
@ -646,6 +652,7 @@ version = "0.5.0"
dependencies = [ dependencies = [
"bytes", "bytes",
"cuprate-fixed-bytes", "cuprate-fixed-bytes",
"cuprate-helper",
"hex", "hex",
"paste", "paste",
"ref-cast", "ref-cast",
@ -713,6 +720,7 @@ version = "0.1.0"
dependencies = [ dependencies = [
"bitflags 2.5.0", "bitflags 2.5.0",
"bytes", "bytes",
"cuprate-helper",
"futures", "futures",
"proptest", "proptest",
"rand", "rand",
@ -796,6 +804,7 @@ dependencies = [
"cuprate-helper", "cuprate-helper",
"cuprate-json-rpc", "cuprate-json-rpc",
"cuprate-rpc-types", "cuprate-rpc-types",
"cuprate-test-utils",
"futures", "futures",
"paste", "paste",
"serde", "serde",
@ -811,12 +820,9 @@ version = "0.0.0"
dependencies = [ dependencies = [
"cuprate-epee-encoding", "cuprate-epee-encoding",
"cuprate-fixed-bytes", "cuprate-fixed-bytes",
"cuprate-json-rpc",
"cuprate-test-utils", "cuprate-test-utils",
"cuprate-types", "cuprate-types",
"monero-serai",
"paste", "paste",
"pretty_assertions",
"serde", "serde",
"serde_json", "serde_json",
] ]
@ -873,7 +879,6 @@ dependencies = [
name = "cuprate-types" name = "cuprate-types"
version = "0.0.0" version = "0.0.0"
dependencies = [ dependencies = [
"borsh",
"bytes", "bytes",
"cuprate-epee-encoding", "cuprate-epee-encoding",
"cuprate-fixed-bytes", "cuprate-fixed-bytes",
@ -893,6 +898,7 @@ dependencies = [
"bytes", "bytes",
"cuprate-epee-encoding", "cuprate-epee-encoding",
"cuprate-fixed-bytes", "cuprate-fixed-bytes",
"cuprate-helper",
"cuprate-levin", "cuprate-levin",
"cuprate-types", "cuprate-types",
"hex", "hex",
@ -1140,12 +1146,12 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
[[package]] [[package]]
name = "flate2" name = "flate2"
version = "1.0.30" version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253"
dependencies = [ dependencies = [
"crc32fast", "crc32fast",
"miniz_oxide", "miniz_oxide 0.8.0",
] ]
[[package]] [[package]]
@ -1299,9 +1305,9 @@ dependencies = [
[[package]] [[package]]
name = "h2" name = "h2"
version = "0.4.5" version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205"
dependencies = [ dependencies = [
"atomic-waker", "atomic-waker",
"bytes", "bytes",
@ -1795,6 +1801,15 @@ dependencies = [
"adler", "adler",
] ]
[[package]]
name = "miniz_oxide"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1"
dependencies = [
"adler2",
]
[[package]] [[package]]
name = "mio" name = "mio"
version = "0.8.11" version = "0.8.11"
@ -2457,9 +2472,9 @@ dependencies = [
[[package]] [[package]]
name = "rustls-pki-types" name = "rustls-pki-types"
version = "1.7.0" version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0"
[[package]] [[package]]
name = "rustls-webpki" name = "rustls-webpki"
@ -3027,9 +3042,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]] [[package]]
name = "ureq" name = "ureq"
version = "2.10.0" version = "2.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72139d247e5f97a3eff96229a7ae85ead5328a39efe76f8bf5a06313d505b6ea" checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a"
dependencies = [ dependencies = [
"base64", "base64",
"flate2", "flate2",
@ -3152,9 +3167,9 @@ checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
[[package]] [[package]]
name = "webpki-roots" name = "webpki-roots"
version = "0.26.3" version = "0.26.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a"
dependencies = [ dependencies = [
"rustls-pki-types", "rustls-pki-types",
] ]

View file

@ -262,6 +262,7 @@ empty_structs_with_brackets = "deny"
empty_enum_variants_with_brackets = "deny" empty_enum_variants_with_brackets = "deny"
empty_drop = "deny" empty_drop = "deny"
clone_on_ref_ptr = "deny" clone_on_ref_ptr = "deny"
upper_case_acronyms = "deny"
# Hot # Hot
# inline_always = "deny" # inline_always = "deny"
@ -278,13 +279,15 @@ clone_on_ref_ptr = "deny"
# allow_attributes_without_reason = "deny" # allow_attributes_without_reason = "deny"
# missing_assert_message = "deny" # missing_assert_message = "deny"
# missing_docs_in_private_items = "deny" # missing_docs_in_private_items = "deny"
# undocumented_unsafe_blocks = "deny" undocumented_unsafe_blocks = "deny"
# multiple_unsafe_ops_per_block = "deny" # multiple_unsafe_ops_per_block = "deny"
# single_char_lifetime_names = "deny" # single_char_lifetime_names = "deny"
# wildcard_enum_match_arm = "deny" # wildcard_enum_match_arm = "deny"
[workspace.lints.rust] [workspace.lints.rust]
# Cold # Cold
future_incompatible = { level = "deny", priority = -1 }
nonstandard_style = { level = "deny", priority = -1 }
absolute_paths_not_starting_with_crate = "deny" absolute_paths_not_starting_with_crate = "deny"
explicit_outlives_requirements = "deny" explicit_outlives_requirements = "deny"
keyword_idents_2018 = "deny" keyword_idents_2018 = "deny"
@ -305,10 +308,11 @@ ambiguous_glob_imports = "deny"
unused_unsafe = "deny" unused_unsafe = "deny"
# Warm # Warm
let_underscore_drop = "deny" let_underscore = { level = "deny", priority = -1 }
unreachable_pub = "deny" unreachable_pub = "deny"
unused_qualifications = "deny" unused_qualifications = "deny"
variant_size_differences = "deny" variant_size_differences = "deny"
non_camel_case_types = "deny"
# Hot # Hot
# unused_results = "deny" # unused_results = "deny"

1
clippy.toml Normal file
View file

@ -0,0 +1 @@
upper-case-acronyms-aggressive = true

View file

@ -11,7 +11,7 @@ proptest = ["dep:proptest", "dep:proptest-derive", "cuprate-types/proptest"]
rayon = ["dep:rayon"] rayon = ["dep:rayon"]
[dependencies] [dependencies]
cuprate-helper = { path = "../../helper", default-features = false, features = ["std"] } cuprate-helper = { path = "../../helper", default-features = false, features = ["std", "cast"] }
cuprate-types = { path = "../../types", default-features = false } cuprate-types = { path = "../../types", default-features = false }
cuprate-cryptonight = {path = "../../cryptonight"} cuprate-cryptonight = {path = "../../cryptonight"}

View file

@ -9,6 +9,8 @@ use proptest::{collection::vec, prelude::*};
use monero_serai::transaction::Output; use monero_serai::transaction::Output;
use cuprate_helper::cast::u64_to_usize;
use super::*; use super::*;
use crate::decomposed_amount::DECOMPOSED_AMOUNTS; use crate::decomposed_amount::DECOMPOSED_AMOUNTS;
@ -164,7 +166,7 @@ prop_compose! {
if timebased || lock_height > 500_000_000 { if timebased || lock_height > 500_000_000 {
Timelock::Time(time_for_time_lock) Timelock::Time(time_for_time_lock)
} else { } else {
Timelock::Block(usize::try_from(lock_height).unwrap()) Timelock::Block(u64_to_usize(lock_height))
} }
} }
} }
@ -179,7 +181,7 @@ prop_compose! {
match ty { match ty {
0 => Timelock::None, 0 => Timelock::None,
1 => Timelock::Time(time_for_time_lock), 1 => Timelock::Time(time_for_time_lock),
_ => Timelock::Block(usize::try_from(lock_height).unwrap()) _ => Timelock::Block(u64_to_usize(lock_height))
} }
} }
} }

View file

@ -14,7 +14,7 @@ use cuprate_consensus_rules::{
miner_tx::MinerTxError, miner_tx::MinerTxError,
ConsensusError, ConsensusError,
}; };
use cuprate_helper::asynch::rayon_spawn_async; use cuprate_helper::{asynch::rayon_spawn_async, cast::u64_to_usize};
use cuprate_types::{ use cuprate_types::{
AltBlockInformation, Chain, ChainId, TransactionVerificationData, AltBlockInformation, Chain, ChainId, TransactionVerificationData,
VerifiedTransactionInformation, VerifiedTransactionInformation,
@ -24,7 +24,7 @@ use crate::{
block::{free::pull_ordered_transactions, PreparedBlock}, block::{free::pull_ordered_transactions, PreparedBlock},
context::{ context::{
difficulty::DifficultyCache, difficulty::DifficultyCache,
rx_vms::RandomXVM, rx_vms::RandomXVm,
weight::{self, BlockWeightsCache}, weight::{self, BlockWeightsCache},
AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW, AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW,
}, },
@ -101,7 +101,7 @@ where
// Check the alt block timestamp is in the correct range. // Check the alt block timestamp is in the correct range.
if let Some(median_timestamp) = if let Some(median_timestamp) =
difficulty_cache.median_timestamp(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW.try_into().unwrap()) difficulty_cache.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW))
{ {
check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)? check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)?
}; };
@ -195,7 +195,7 @@ async fn alt_rx_vm<C>(
parent_chain: Chain, parent_chain: Chain,
alt_chain_context: &mut AltChainContextCache, alt_chain_context: &mut AltChainContextCache,
context_svc: C, context_svc: C,
) -> Result<Option<Arc<RandomXVM>>, ExtendedConsensusError> ) -> Result<Option<Arc<RandomXVm>>, ExtendedConsensusError>
where where
C: Service< C: Service<
BlockChainContextRequest, BlockChainContextRequest,

View file

@ -15,7 +15,7 @@ use cuprate_helper::asynch::rayon_spawn_async;
use crate::{ use crate::{
block::{free::pull_ordered_transactions, PreparedBlock, PreparedBlockExPow}, block::{free::pull_ordered_transactions, PreparedBlock, PreparedBlockExPow},
context::rx_vms::RandomXVM, context::rx_vms::RandomXVm,
transactions::new_tx_verification_data, transactions::new_tx_verification_data,
BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError, BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError,
VerifyBlockResponse, VerifyBlockResponse,
@ -148,7 +148,7 @@ where
tracing::debug!("New randomX seed in batch, initialising VM"); tracing::debug!("New randomX seed in batch, initialising VM");
let new_vm = rayon_spawn_async(move || { let new_vm = rayon_spawn_async(move || {
Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!")) Arc::new(RandomXVm::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
}) })
.await; .await;

View file

@ -33,7 +33,7 @@ mod tokens;
use cuprate_types::Chain; use cuprate_types::Chain;
use difficulty::DifficultyCache; use difficulty::DifficultyCache;
use rx_vms::RandomXVM; use rx_vms::RandomXVm;
use weight::BlockWeightsCache; use weight::BlockWeightsCache;
pub(crate) use alt_chains::{sealed::AltChainRequestToken, AltChainContextCache}; pub(crate) use alt_chains::{sealed::AltChainRequestToken, AltChainContextCache};
@ -236,7 +236,7 @@ pub enum BlockChainContextRequest {
/// seed. /// seed.
/// ///
/// This should include the seed used to init this VM and the VM. /// This should include the seed used to init this VM and the VM.
NewRXVM(([u8; 32], Arc<RandomXVM>)), NewRXVM(([u8; 32], Arc<RandomXVm>)),
/// A request to add a new block to the cache. /// A request to add a new block to the cache.
Update(NewBlockData), Update(NewBlockData),
/// Pop blocks from the cache to the specified height. /// Pop blocks from the cache to the specified height.
@ -313,7 +313,7 @@ pub enum BlockChainContextResponse {
/// Blockchain context response. /// Blockchain context response.
Context(BlockChainContext), Context(BlockChainContext),
/// A map of seed height to RandomX VMs. /// A map of seed height to RandomX VMs.
RxVms(HashMap<usize, Arc<RandomXVM>>), RxVms(HashMap<usize, Arc<RandomXVm>>),
/// A list of difficulties. /// A list of difficulties.
BatchDifficulties(Vec<u128>), BatchDifficulties(Vec<u128>),
/// An alt chain context cache. /// An alt chain context cache.
@ -321,7 +321,7 @@ pub enum BlockChainContextResponse {
/// A difficulty cache for an alt chain. /// A difficulty cache for an alt chain.
AltChainDifficultyCache(DifficultyCache), AltChainDifficultyCache(DifficultyCache),
/// A randomX VM for an alt chain. /// A randomX VM for an alt chain.
AltChainRxVM(Arc<RandomXVM>), AltChainRxVM(Arc<RandomXVm>),
/// A weight cache for an alt chain /// A weight cache for an alt chain
AltChainWeightCache(BlockWeightsCache), AltChainWeightCache(BlockWeightsCache),
/// A generic Ok response. /// A generic Ok response.

View file

@ -11,7 +11,7 @@ use cuprate_types::{
use crate::{ use crate::{
ExtendedConsensusError, ExtendedConsensusError,
__private::Database, __private::Database,
context::{difficulty::DifficultyCache, rx_vms::RandomXVM, weight::BlockWeightsCache}, context::{difficulty::DifficultyCache, rx_vms::RandomXVm, weight::BlockWeightsCache},
}; };
pub(crate) mod sealed { pub(crate) mod sealed {
@ -32,7 +32,7 @@ pub struct AltChainContextCache {
pub difficulty_cache: Option<DifficultyCache>, pub difficulty_cache: Option<DifficultyCache>,
/// A cached RX VM. /// A cached RX VM.
pub cached_rx_vm: Option<(usize, Arc<RandomXVM>)>, pub cached_rx_vm: Option<(usize, Arc<RandomXVm>)>,
/// The chain height of the alt chain. /// The chain height of the alt chain.
pub chain_height: usize, pub chain_height: usize,

View file

@ -9,7 +9,7 @@ use std::{
}; };
use futures::{stream::FuturesOrdered, StreamExt}; use futures::{stream::FuturesOrdered, StreamExt};
use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner}; use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VmInner};
use rayon::prelude::*; use rayon::prelude::*;
use thread_local::ThreadLocal; use thread_local::ThreadLocal;
use tower::ServiceExt; use tower::ServiceExt;
@ -33,16 +33,16 @@ const RX_SEEDS_CACHED: usize = 2;
/// A multithreaded randomX VM. /// A multithreaded randomX VM.
#[derive(Debug)] #[derive(Debug)]
pub struct RandomXVM { pub struct RandomXVm {
/// These RandomX VMs all share the same cache. /// These RandomX VMs all share the same cache.
vms: ThreadLocal<VMInner>, vms: ThreadLocal<VmInner>,
/// The RandomX cache. /// The RandomX cache.
cache: RandomXCache, cache: RandomXCache,
/// The flags used to start the RandomX VMs. /// The flags used to start the RandomX VMs.
flags: RandomXFlag, flags: RandomXFlag,
} }
impl RandomXVM { impl RandomXVm {
/// Create a new multithreaded randomX VM with the provided seed. /// Create a new multithreaded randomX VM with the provided seed.
pub fn new(seed: &[u8; 32]) -> Result<Self, RandomXError> { pub fn new(seed: &[u8; 32]) -> Result<Self, RandomXError> {
// TODO: allow passing in flags. // TODO: allow passing in flags.
@ -50,7 +50,7 @@ impl RandomXVM {
let cache = RandomXCache::new(flags, seed.as_slice())?; let cache = RandomXCache::new(flags, seed.as_slice())?;
Ok(RandomXVM { Ok(RandomXVm {
vms: ThreadLocal::new(), vms: ThreadLocal::new(),
cache, cache,
flags, flags,
@ -58,12 +58,12 @@ impl RandomXVM {
} }
} }
impl RandomX for RandomXVM { impl RandomX for RandomXVm {
type Error = RandomXError; type Error = RandomXError;
fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error> { fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error> {
self.vms self.vms
.get_or_try(|| VMInner::new(self.flags, Some(self.cache.clone()), None))? .get_or_try(|| VmInner::new(self.flags, Some(self.cache.clone()), None))?
.calculate_hash(buf) .calculate_hash(buf)
.map(|out| out.try_into().unwrap()) .map(|out| out.try_into().unwrap())
} }
@ -72,17 +72,17 @@ impl RandomX for RandomXVM {
/// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a /// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a
/// couple more around this VM. /// couple more around this VM.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct RandomXVMCache { pub struct RandomXVmCache {
/// The top [`RX_SEEDS_CACHED`] RX seeds. /// The top [`RX_SEEDS_CACHED`] RX seeds.
pub(crate) seeds: VecDeque<(usize, [u8; 32])>, pub(crate) seeds: VecDeque<(usize, [u8; 32])>,
/// The VMs for `seeds` (if after hf 12, otherwise this will be empty). /// The VMs for `seeds` (if after hf 12, otherwise this will be empty).
pub(crate) vms: HashMap<usize, Arc<RandomXVM>>, pub(crate) vms: HashMap<usize, Arc<RandomXVm>>,
/// A single cached VM that was given to us from a part of Cuprate. /// A single cached VM that was given to us from a part of Cuprate.
pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVM>)>, pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVm>)>,
} }
impl RandomXVMCache { impl RandomXVmCache {
#[instrument(name = "init_rx_vm_cache", level = "info", skip(database))] #[instrument(name = "init_rx_vm_cache", level = "info", skip(database))]
pub async fn init_from_chain_height<D: Database + Clone>( pub async fn init_from_chain_height<D: Database + Clone>(
chain_height: usize, chain_height: usize,
@ -106,7 +106,7 @@ impl RandomXVMCache {
.map(|(height, seed)| { .map(|(height, seed)| {
( (
*height, *height,
Arc::new(RandomXVM::new(seed).expect("Failed to create RandomX VM!")), Arc::new(RandomXVm::new(seed).expect("Failed to create RandomX VM!")),
) )
}) })
.collect() .collect()
@ -117,7 +117,7 @@ impl RandomXVMCache {
HashMap::new() HashMap::new()
}; };
Ok(RandomXVMCache { Ok(RandomXVmCache {
seeds, seeds,
vms, vms,
cached_vm: None, cached_vm: None,
@ -125,7 +125,7 @@ impl RandomXVMCache {
} }
/// Add a randomX VM to the cache, with the seed it was created with. /// Add a randomX VM to the cache, with the seed it was created with.
pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVM>)) { pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVm>)) {
self.cached_vm.replace(vm); self.cached_vm.replace(vm);
} }
@ -136,7 +136,7 @@ impl RandomXVMCache {
height: usize, height: usize,
chain: Chain, chain: Chain,
database: D, database: D,
) -> Result<Arc<RandomXVM>, ExtendedConsensusError> { ) -> Result<Arc<RandomXVm>, ExtendedConsensusError> {
let seed_height = randomx_seed_height(height); let seed_height = randomx_seed_height(height);
let BlockchainResponse::BlockHash(seed_hash) = database let BlockchainResponse::BlockHash(seed_hash) = database
@ -156,13 +156,13 @@ impl RandomXVMCache {
} }
} }
let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVM::new(&seed_hash).unwrap())).await; let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVm::new(&seed_hash).unwrap())).await;
Ok(alt_vm) Ok(alt_vm)
} }
/// Get the main-chain RandomX VMs. /// Get the main-chain RandomX VMs.
pub async fn get_vms(&mut self) -> HashMap<usize, Arc<RandomXVM>> { pub async fn get_vms(&mut self) -> HashMap<usize, Arc<RandomXVm>> {
match self.seeds.len().checked_sub(self.vms.len()) { match self.seeds.len().checked_sub(self.vms.len()) {
// No difference in the amount of seeds to VMs. // No difference in the amount of seeds to VMs.
Some(0) => (), Some(0) => (),
@ -184,7 +184,7 @@ impl RandomXVMCache {
} }
}; };
rayon_spawn_async(move || Arc::new(RandomXVM::new(&next_seed_hash).unwrap())) rayon_spawn_async(move || Arc::new(RandomXVm::new(&next_seed_hash).unwrap()))
.await .await
}; };
@ -200,7 +200,7 @@ impl RandomXVMCache {
seeds_clone seeds_clone
.par_iter() .par_iter()
.map(|(height, seed)| { .map(|(height, seed)| {
let vm = RandomXVM::new(seed).expect("Failed to create RandomX VM!"); let vm = RandomXVm::new(seed).expect("Failed to create RandomX VM!");
let vm = Arc::new(vm); let vm = Arc::new(vm);
(*height, vm) (*height, vm)
}) })

View file

@ -9,6 +9,7 @@ use tower::ServiceExt;
use tracing::Instrument; use tracing::Instrument;
use cuprate_consensus_rules::blocks::ContextToVerifyBlock; use cuprate_consensus_rules::blocks::ContextToVerifyBlock;
use cuprate_helper::cast::u64_to_usize;
use cuprate_types::{ use cuprate_types::{
blockchain::{BlockchainReadRequest, BlockchainResponse}, blockchain::{BlockchainReadRequest, BlockchainResponse},
Chain, Chain,
@ -45,7 +46,7 @@ pub struct ContextTask<D: Database> {
/// The weight cache. /// The weight cache.
weight_cache: weight::BlockWeightsCache, weight_cache: weight::BlockWeightsCache,
/// The RX VM cache. /// The RX VM cache.
rx_vm_cache: rx_vms::RandomXVMCache, rx_vm_cache: rx_vms::RandomXVmCache,
/// The hard-fork state cache. /// The hard-fork state cache.
hardfork_state: hardforks::HardForkState, hardfork_state: hardforks::HardForkState,
@ -127,7 +128,7 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
let db = database.clone(); let db = database.clone();
let rx_seed_handle = tokio::spawn(async move { let rx_seed_handle = tokio::spawn(async move {
rx_vms::RandomXVMCache::init_from_chain_height(chain_height, &current_hf, db).await rx_vms::RandomXVmCache::init_from_chain_height(chain_height, &current_hf, db).await
}); });
let context_svc = ContextTask { let context_svc = ContextTask {
@ -168,9 +169,9 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
.weight_cache .weight_cache
.effective_median_block_weight(&current_hf), .effective_median_block_weight(&current_hf),
top_hash: self.top_block_hash, top_hash: self.top_block_hash,
median_block_timestamp: self.difficulty_cache.median_timestamp( median_block_timestamp: self
usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(), .difficulty_cache
), .median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)),
chain_height: self.chain_height, chain_height: self.chain_height,
current_hf, current_hf,
next_difficulty: self.difficulty_cache.next_difficulty(&current_hf), next_difficulty: self.difficulty_cache.next_difficulty(&current_hf),

View file

@ -9,7 +9,7 @@ use cuprate_consensus_rules::{
}; };
use crate::{ use crate::{
context::rx_vms::{get_last_rx_seed_heights, RandomXVMCache}, context::rx_vms::{get_last_rx_seed_heights, RandomXVmCache},
tests::mock_db::*, tests::mock_db::*,
}; };
@ -42,7 +42,7 @@ fn rx_heights_consistent() {
async fn rx_vm_created_on_hf_12() { async fn rx_vm_created_on_hf_12() {
let db = DummyDatabaseBuilder::default().finish(Some(10)); let db = DummyDatabaseBuilder::default().finish(Some(10));
let mut cache = RandomXVMCache::init_from_chain_height(10, &HardFork::V11, db) let mut cache = RandomXVmCache::init_from_chain_height(10, &HardFork::V11, db)
.await .await
.unwrap(); .unwrap();
@ -67,7 +67,7 @@ proptest! {
let rt = Builder::new_multi_thread().enable_all().build().unwrap(); let rt = Builder::new_multi_thread().enable_all().build().unwrap();
rt.block_on(async move { rt.block_on(async move {
let cache = RandomXVMCache::init_from_chain_height(10, &hf, db).await.unwrap(); let cache = RandomXVmCache::init_from_chain_height(10, &hf, db).await.unwrap();
assert!(cache.seeds.len() == cache.vms.len() || hf < HardFork::V12); assert!(cache.seeds.len() == cache.vms.len() || hf < HardFork::V12);
}); });
} }

View file

@ -10,14 +10,15 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus"
[features] [features]
# All features on by default. # All features on by default.
default = ["std", "atomic", "asynch", "fs", "num", "map", "time", "thread", "constants"] default = ["std", "atomic", "asynch", "cast", "fs", "num", "map", "time", "thread", "constants"]
std = [] std = []
atomic = ["dep:crossbeam"] atomic = ["dep:crossbeam"]
asynch = ["dep:futures", "dep:rayon"] asynch = ["dep:futures", "dep:rayon"]
cast = []
constants = [] constants = []
fs = ["dep:dirs"] fs = ["dep:dirs"]
num = [] num = []
map = ["dep:monero-serai"] map = ["cast", "dep:monero-serai"]
time = ["dep:chrono", "std"] time = ["dep:chrono", "std"]
thread = ["std", "dep:target_os_lib"] thread = ["std", "dep:target_os_lib"]
@ -39,3 +40,6 @@ target_os_lib = { package = "libc", version = "0.2.151", optional = true }
[dev-dependencies] [dev-dependencies]
tokio = { workspace = true, features = ["full"] } tokio = { workspace = true, features = ["full"] }
[lints]
workspace = true

View file

@ -19,7 +19,7 @@ pub struct InfallibleOneshotReceiver<T>(oneshot::Receiver<T>);
impl<T> From<oneshot::Receiver<T>> for InfallibleOneshotReceiver<T> { impl<T> From<oneshot::Receiver<T>> for InfallibleOneshotReceiver<T> {
fn from(value: oneshot::Receiver<T>) -> Self { fn from(value: oneshot::Receiver<T>) -> Self {
InfallibleOneshotReceiver(value) Self(value)
} }
} }
@ -43,7 +43,7 @@ where
{ {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
rayon::spawn(move || { rayon::spawn(move || {
let _ = tx.send(f()); drop(tx.send(f()));
}); });
rx.await.expect("The sender must not be dropped") rx.await.expect("The sender must not be dropped")
} }
@ -62,7 +62,7 @@ mod test {
#[tokio::test] #[tokio::test]
// Assert that basic channel operations work. // Assert that basic channel operations work.
async fn infallible_oneshot_receiver() { async fn infallible_oneshot_receiver() {
let (tx, rx) = futures::channel::oneshot::channel::<String>(); let (tx, rx) = oneshot::channel::<String>();
let msg = "hello world!".to_string(); let msg = "hello world!".to_string();
tx.send(msg.clone()).unwrap(); tx.send(msg.clone()).unwrap();
@ -84,7 +84,7 @@ mod test {
let barrier = Arc::new(Barrier::new(2)); let barrier = Arc::new(Barrier::new(2));
let task = |barrier: &Barrier| barrier.wait(); let task = |barrier: &Barrier| barrier.wait();
let b_2 = barrier.clone(); let b_2 = Arc::clone(&barrier);
let (tx, rx) = std::sync::mpsc::channel(); let (tx, rx) = std::sync::mpsc::channel();

View file

@ -49,6 +49,8 @@ pub type AtomicF64 = AtomicCell<f64>;
//---------------------------------------------------------------------------------------------------- TESTS //---------------------------------------------------------------------------------------------------- TESTS
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
#![allow(clippy::float_cmp)]
use super::*; use super::*;
#[test] #[test]

84
helper/src/cast.rs Normal file
View file

@ -0,0 +1,84 @@
//! Casting.
//!
//! This modules provides utilities for casting between types.
//!
//! `#[no_std]` compatible.
#[rustfmt::skip]
//============================ SAFETY: DO NOT REMOVE ===========================//
// //
// //
// Only allow building 64-bit targets. //
// This allows us to assume 64-bit invariants in this file. //
#[cfg(not(target_pointer_width = "64"))]
compile_error!("Cuprate is only compatible with 64-bit CPUs");
// //
// //
//============================ SAFETY: DO NOT REMOVE ===========================//
//---------------------------------------------------------------------------------------------------- Free functions
/// Cast [`u64`] to [`usize`].
#[inline(always)]
pub const fn u64_to_usize(u: u64) -> usize {
u as usize
}
/// Cast [`u32`] to [`usize`].
#[inline(always)]
pub const fn u32_to_usize(u: u32) -> usize {
u as usize
}
/// Cast [`usize`] to [`u64`].
#[inline(always)]
pub const fn usize_to_u64(u: usize) -> u64 {
u as u64
}
/// Cast [`i64`] to [`isize`].
#[inline(always)]
pub const fn i64_to_isize(i: i64) -> isize {
i as isize
}
/// Cast [`i32`] to [`isize`].
#[inline(always)]
pub const fn i32_to_isize(i: i32) -> isize {
i as isize
}
/// Cast [`isize`] to [`i64`].
#[inline(always)]
pub const fn isize_to_i64(i: isize) -> i64 {
i as i64
}
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
use super::*;
#[test]
fn max_unsigned() {
assert_eq!(u32_to_usize(u32::MAX), u32::MAX as usize);
assert_eq!(usize_to_u64(u32_to_usize(u32::MAX)), u32::MAX as u64);
assert_eq!(u64_to_usize(u64::MAX), usize::MAX);
assert_eq!(usize_to_u64(u64_to_usize(u64::MAX)), u64::MAX);
assert_eq!(usize_to_u64(usize::MAX), u64::MAX);
assert_eq!(u64_to_usize(usize_to_u64(usize::MAX)), usize::MAX);
}
#[test]
fn max_signed() {
assert_eq!(i32_to_isize(i32::MAX), i32::MAX as isize);
assert_eq!(isize_to_i64(i32_to_isize(i32::MAX)), i32::MAX as i64);
assert_eq!(i64_to_isize(i64::MAX), isize::MAX);
assert_eq!(isize_to_i64(i64_to_isize(i64::MAX)), i64::MAX);
assert_eq!(isize_to_i64(isize::MAX), i64::MAX);
assert_eq!(i64_to_isize(isize_to_i64(isize::MAX)), isize::MAX);
}
}

View file

@ -190,72 +190,41 @@ mod test {
// - It must `ends_with()` the expected end PATH for the OS // - It must `ends_with()` the expected end PATH for the OS
#[test] #[test]
fn path_sanity_check() { fn path_sanity_check() {
assert!(CUPRATE_CACHE_DIR.is_absolute()); // Array of (PATH, expected_path_as_string).
assert!(CUPRATE_CONFIG_DIR.is_absolute()); //
assert!(CUPRATE_DATA_DIR.is_absolute()); // The different OS's will set the expected path below.
assert!(CUPRATE_BLOCKCHAIN_DIR.is_absolute()); let mut array = [
(&*CUPRATE_CACHE_DIR, ""),
(&*CUPRATE_CONFIG_DIR, ""),
(&*CUPRATE_DATA_DIR, ""),
(&*CUPRATE_BLOCKCHAIN_DIR, ""),
(&*CUPRATE_TXPOOL_DIR, ""),
];
if cfg!(target_os = "windows") { if cfg!(target_os = "windows") {
let dir = &*CUPRATE_CACHE_DIR; array[0].1 = r"AppData\Local\Cuprate";
println!("cuprate_cache_dir: {dir:?}"); array[1].1 = r"AppData\Roaming\Cuprate";
assert!(dir.ends_with(r"AppData\Local\Cuprate")); array[2].1 = r"AppData\Roaming\Cuprate";
array[3].1 = r"AppData\Roaming\Cuprate\blockchain";
let dir = &*CUPRATE_CONFIG_DIR; array[4].1 = r"AppData\Roaming\Cuprate\txpool";
println!("cuprate_config_dir: {dir:?}");
assert!(dir.ends_with(r"AppData\Roaming\Cuprate"));
let dir = &*CUPRATE_DATA_DIR;
println!("cuprate_data_dir: {dir:?}");
assert!(dir.ends_with(r"AppData\Roaming\Cuprate"));
let dir = &*CUPRATE_BLOCKCHAIN_DIR;
println!("cuprate_blockchain_dir: {dir:?}");
assert!(dir.ends_with(r"AppData\Roaming\Cuprate\blockchain"));
let dir = &*CUPRATE_TXPOOL_DIR;
println!("cuprate_txpool_dir: {dir:?}");
assert!(dir.ends_with(r"AppData\Roaming\Cuprate\txpool"));
} else if cfg!(target_os = "macos") { } else if cfg!(target_os = "macos") {
let dir = &*CUPRATE_CACHE_DIR; array[0].1 = "Library/Caches/Cuprate";
println!("cuprate_cache_dir: {dir:?}"); array[1].1 = "Library/Application Support/Cuprate";
assert!(dir.ends_with("Library/Caches/Cuprate")); array[2].1 = "Library/Application Support/Cuprate";
array[3].1 = "Library/Application Support/Cuprate/blockchain";
let dir = &*CUPRATE_CONFIG_DIR; array[4].1 = "Library/Application Support/Cuprate/txpool";
println!("cuprate_config_dir: {dir:?}");
assert!(dir.ends_with("Library/Application Support/Cuprate"));
let dir = &*CUPRATE_DATA_DIR;
println!("cuprate_data_dir: {dir:?}");
assert!(dir.ends_with("Library/Application Support/Cuprate"));
let dir = &*CUPRATE_BLOCKCHAIN_DIR;
println!("cuprate_blockchain_dir: {dir:?}");
assert!(dir.ends_with("Library/Application Support/Cuprate/blockchain"));
let dir = &*CUPRATE_TXPOOL_DIR;
println!("cuprate_txpool_dir: {dir:?}");
assert!(dir.ends_with("Library/Application Support/Cuprate/txpool"));
} else { } else {
// Assumes Linux. // Assumes Linux.
let dir = &*CUPRATE_CACHE_DIR; array[0].1 = ".cache/cuprate";
println!("cuprate_cache_dir: {dir:?}"); array[1].1 = ".config/cuprate";
assert!(dir.ends_with(".cache/cuprate")); array[2].1 = ".local/share/cuprate";
array[3].1 = ".local/share/cuprate/blockchain";
array[4].1 = ".local/share/cuprate/txpool";
};
let dir = &*CUPRATE_CONFIG_DIR; for (path, expected) in array {
println!("cuprate_config_dir: {dir:?}"); assert!(path.is_absolute());
assert!(dir.ends_with(".config/cuprate")); assert!(path.ends_with(expected));
let dir = &*CUPRATE_DATA_DIR;
println!("cuprate_data_dir: {dir:?}");
assert!(dir.ends_with(".local/share/cuprate"));
let dir = &*CUPRATE_BLOCKCHAIN_DIR;
println!("cuprate_blockchain_dir: {dir:?}");
assert!(dir.ends_with(".local/share/cuprate/blockchain"));
let dir = &*CUPRATE_TXPOOL_DIR;
println!("cuprate_txpool_dir: {dir:?}");
assert!(dir.ends_with(".local/share/cuprate/txpool"));
} }
} }
} }

View file

@ -1,36 +1,4 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints
#![allow(clippy::len_zero, clippy::type_complexity, clippy::module_inception)]
#![deny(nonstandard_style, deprecated, missing_docs, unused_mut)]
#![forbid(
unused_unsafe,
future_incompatible,
break_with_label_and_loop,
coherence_leak_check,
duplicate_macro_attributes,
exported_private_dependencies,
for_loops_over_fallibles,
large_assignments,
overlapping_range_endpoints,
// private_in_public,
semicolon_in_expressions_from_macros,
redundant_semicolons,
unconditional_recursion,
unreachable_patterns,
unused_allocation,
unused_braces,
unused_comparisons,
unused_doc_comments,
unused_parens,
unused_labels,
while_true,
keyword_idents,
non_ascii_idents,
noop_method_call,
unreachable_pub,
single_use_lifetimes,
// variant_size_differences,
)]
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
//---------------------------------------------------------------------------------------------------- Public API //---------------------------------------------------------------------------------------------------- Public API
@ -40,6 +8,9 @@ pub mod asynch; // async collides
#[cfg(feature = "atomic")] #[cfg(feature = "atomic")]
pub mod atomic; pub mod atomic;
#[cfg(feature = "cast")]
pub mod cast;
#[cfg(feature = "constants")] #[cfg(feature = "constants")]
pub mod constants; pub mod constants;

View file

@ -7,6 +7,8 @@
//---------------------------------------------------------------------------------------------------- Use //---------------------------------------------------------------------------------------------------- Use
use monero_serai::transaction::Timelock; use monero_serai::transaction::Timelock;
use crate::cast::{u64_to_usize, usize_to_u64};
//---------------------------------------------------------------------------------------------------- `(u64, u64) <-> u128` //---------------------------------------------------------------------------------------------------- `(u64, u64) <-> u128`
/// Split a [`u128`] value into 2 64-bit values. /// Split a [`u128`] value into 2 64-bit values.
/// ///
@ -27,6 +29,7 @@ use monero_serai::transaction::Timelock;
/// ``` /// ```
#[inline] #[inline]
pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) { pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) {
#[allow(clippy::cast_possible_truncation)]
(value as u64, (value >> 64) as u64) (value as u64, (value >> 64) as u64)
} }
@ -58,7 +61,7 @@ pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u12
/// Map a [`u64`] to a [`Timelock`]. /// Map a [`u64`] to a [`Timelock`].
/// ///
/// Height/time is not differentiated via type, but rather: /// Height/time is not differentiated via type, but rather:
/// "height is any value less than 500_000_000 and timestamp is any value above" /// "height is any value less than `500_000_000` and timestamp is any value above"
/// so the `u64/usize` is stored without any tag. /// so the `u64/usize` is stored without any tag.
/// ///
/// See [`timelock_to_u64`] for the inverse function. /// See [`timelock_to_u64`] for the inverse function.
@ -77,7 +80,7 @@ pub fn u64_to_timelock(u: u64) -> Timelock {
if u == 0 { if u == 0 {
Timelock::None Timelock::None
} else if u < 500_000_000 { } else if u < 500_000_000 {
Timelock::Block(usize::try_from(u).unwrap()) Timelock::Block(u64_to_usize(u))
} else { } else {
Timelock::Time(u) Timelock::Time(u)
} }
@ -97,7 +100,7 @@ pub fn u64_to_timelock(u: u64) -> Timelock {
pub fn timelock_to_u64(timelock: Timelock) -> u64 { pub fn timelock_to_u64(timelock: Timelock) -> u64 {
match timelock { match timelock {
Timelock::None => 0, Timelock::None => 0,
Timelock::Block(u) => u64::try_from(u).unwrap(), Timelock::Block(u) => usize_to_u64(u),
Timelock::Time(u) => u, Timelock::Time(u) => u,
} }
} }

View file

@ -30,11 +30,11 @@ pub enum Network {
impl Network { impl Network {
/// Returns the network ID for the current network. /// Returns the network ID for the current network.
pub fn network_id(&self) -> [u8; 16] { pub const fn network_id(&self) -> [u8; 16] {
match self { match self {
Network::Mainnet => MAINNET_NETWORK_ID, Self::Mainnet => MAINNET_NETWORK_ID,
Network::Testnet => TESTNET_NETWORK_ID, Self::Testnet => TESTNET_NETWORK_ID,
Network::Stagenet => STAGENET_NETWORK_ID, Self::Stagenet => STAGENET_NETWORK_ID,
} }
} }
} }

View file

@ -89,8 +89,9 @@ where
/// assert_eq!(median(vec), 5); /// assert_eq!(median(vec), 5);
/// ``` /// ```
/// ///
/// # Safety /// # Invariant
/// If not sorted the output will be invalid. /// If not sorted the output will be invalid.
#[allow(clippy::debug_assert_with_mut_call)]
pub fn median<T>(array: impl AsRef<[T]>) -> T pub fn median<T>(array: impl AsRef<[T]>) -> T
where where
T: Add<Output = T> T: Add<Output = T>

View file

@ -28,10 +28,10 @@ macro_rules! impl_thread_percent {
$( $(
$(#[$doc])* $(#[$doc])*
pub fn $fn_name() -> NonZeroUsize { pub fn $fn_name() -> NonZeroUsize {
// SAFETY:
// unwrap here is okay because: // unwrap here is okay because:
// - THREADS().get() is always non-zero // - THREADS().get() is always non-zero
// - max() guards against 0 // - max() guards against 0
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss)]
NonZeroUsize::new(max(1, (threads().get() as f64 * $percent).floor() as usize)).unwrap() NonZeroUsize::new(max(1, (threads().get() as f64 * $percent).floor() as usize)).unwrap()
} }
)* )*
@ -58,10 +58,10 @@ impl_thread_percent! {
/// Originally from <https://docs.rs/lpt>. /// Originally from <https://docs.rs/lpt>.
/// ///
/// # Windows /// # Windows
/// Uses SetThreadPriority() with THREAD_PRIORITY_IDLE (-15). /// Uses `SetThreadPriority()` with `THREAD_PRIORITY_IDLE` (-15).
/// ///
/// # Unix /// # Unix
/// Uses libc::nice() with the max nice level. /// Uses `libc::nice()` with the max nice level.
/// ///
/// On macOS and *BSD: +20 /// On macOS and *BSD: +20
/// On Linux: +19 /// On Linux: +19
@ -74,7 +74,7 @@ pub fn low_priority_thread() {
// SAFETY: calling C. // SAFETY: calling C.
// We are _lowering_ our priority, not increasing, so this function should never fail. // We are _lowering_ our priority, not increasing, so this function should never fail.
unsafe { unsafe {
let _ = SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE); drop(SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE));
} }
} }
@ -87,7 +87,7 @@ pub fn low_priority_thread() {
// SAFETY: calling C. // SAFETY: calling C.
// We are _lowering_ our priority, not increasing, so this function should never fail. // We are _lowering_ our priority, not increasing, so this function should never fail.
unsafe { unsafe {
let _ = libc::nice(NICE_MAX); libc::nice(NICE_MAX);
} }
} }
} }

View file

@ -129,6 +129,7 @@ pub const fn secs_to_clock(seconds: u32) -> (u8, u8, u8) {
debug_assert!(m < 60); debug_assert!(m < 60);
debug_assert!(s < 60); debug_assert!(s < 60);
#[allow(clippy::cast_possible_truncation)] // checked above
(h as u8, m, s) (h as u8, m, s)
} }
@ -153,6 +154,7 @@ pub fn time() -> u32 {
/// ///
/// This is guaranteed to return a value between `0..=86399` /// This is guaranteed to return a value between `0..=86399`
pub fn time_utc() -> u32 { pub fn time_utc() -> u32 {
#[allow(clippy::cast_sign_loss)] // checked in function calls
unix_clock(chrono::offset::Local::now().timestamp() as u64) unix_clock(chrono::offset::Local::now().timestamp() as u64)
} }

View file

@ -15,6 +15,7 @@ default = ["std"]
std = ["dep:thiserror", "bytes/std", "cuprate-fixed-bytes/std"] std = ["dep:thiserror", "bytes/std", "cuprate-fixed-bytes/std"]
[dependencies] [dependencies]
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
cuprate-fixed-bytes = { path = "../fixed-bytes", default-features = false } cuprate-fixed-bytes = { path = "../fixed-bytes", default-features = false }
paste = "1.0.14" paste = "1.0.14"

View file

@ -65,6 +65,8 @@ use core::{ops::Deref, str::from_utf8 as str_from_utf8};
use bytes::{Buf, BufMut, Bytes, BytesMut}; use bytes::{Buf, BufMut, Bytes, BytesMut};
use cuprate_helper::cast::{u64_to_usize, usize_to_u64};
pub mod container_as_blob; pub mod container_as_blob;
pub mod error; pub mod error;
mod io; mod io;
@ -242,7 +244,7 @@ pub fn write_bytes<T: AsRef<[u8]>, B: BufMut>(t: T, w: &mut B) -> Result<()> {
let bytes = t.as_ref(); let bytes = t.as_ref();
let len = bytes.len(); let len = bytes.len();
write_varint(len.try_into()?, w)?; write_varint(usize_to_u64(len), w)?;
if w.remaining_mut() < len { if w.remaining_mut() < len {
return Err(Error::IO("Not enough capacity to write bytes")); return Err(Error::IO("Not enough capacity to write bytes"));
@ -286,7 +288,7 @@ where
I: Iterator<Item = T> + ExactSizeIterator, I: Iterator<Item = T> + ExactSizeIterator,
B: BufMut, B: BufMut,
{ {
write_varint(iterator.len().try_into()?, w)?; write_varint(usize_to_u64(iterator.len()), w)?;
for item in iterator.into_iter() { for item in iterator.into_iter() {
item.write(w)?; item.write(w)?;
} }
@ -334,7 +336,7 @@ fn skip_epee_value<B: Buf>(r: &mut B, skipped_objects: &mut u8) -> Result<()> {
if let Some(size) = marker.inner_marker.size() { if let Some(size) = marker.inner_marker.size() {
let bytes_to_skip = size let bytes_to_skip = size
.checked_mul(len.try_into()?) .checked_mul(u64_to_usize(len))
.ok_or(Error::Value("List is too big".to_string()))?; .ok_or(Error::Value("List is too big".to_string()))?;
return advance(bytes_to_skip, r); return advance(bytes_to_skip, r);
}; };
@ -352,8 +354,8 @@ fn skip_epee_value<B: Buf>(r: &mut B, skipped_objects: &mut u8) -> Result<()> {
| InnerMarker::U8 | InnerMarker::U8
| InnerMarker::Bool => unreachable!("These types are constant size."), | InnerMarker::Bool => unreachable!("These types are constant size."),
InnerMarker::String => { InnerMarker::String => {
let len = read_varint(r)?; let len = u64_to_usize(read_varint(r)?);
advance(len.try_into()?, r)?; advance(len, r)?;
} }
InnerMarker::Object => { InnerMarker::Object => {
*skipped_objects += 1; *skipped_objects += 1;

View file

@ -7,6 +7,7 @@ use core::fmt::Debug;
use bytes::{Buf, BufMut, Bytes, BytesMut}; use bytes::{Buf, BufMut, Bytes, BytesMut};
use cuprate_fixed_bytes::{ByteArray, ByteArrayVec}; use cuprate_fixed_bytes::{ByteArray, ByteArrayVec};
use cuprate_helper::cast::u64_to_usize;
use crate::{ use crate::{
io::{checked_read_primitive, checked_write_primitive}, io::{checked_read_primitive, checked_write_primitive},
@ -66,11 +67,11 @@ impl<T: EpeeObject> EpeeValue for Vec<T> {
"Marker is not sequence when a sequence was expected", "Marker is not sequence when a sequence was expected",
)); ));
} }
let len = read_varint(r)?; let len = u64_to_usize(read_varint(r)?);
let individual_marker = Marker::new(marker.inner_marker); let individual_marker = Marker::new(marker.inner_marker);
let mut res = Vec::with_capacity(len.try_into()?); let mut res = Vec::with_capacity(len);
for _ in 0..len { for _ in 0..len {
res.push(T::read(r, &individual_marker)?); res.push(T::read(r, &individual_marker)?);
} }
@ -167,11 +168,13 @@ impl EpeeValue for Vec<u8> {
return Err(Error::Format("Byte array exceeded max length")); return Err(Error::Format("Byte array exceeded max length"));
} }
if r.remaining() < len.try_into()? { let len = u64_to_usize(len);
if r.remaining() < len {
return Err(Error::IO("Not enough bytes to fill object")); return Err(Error::IO("Not enough bytes to fill object"));
} }
let mut res = vec![0; len.try_into()?]; let mut res = vec![0; len];
r.copy_to_slice(&mut res); r.copy_to_slice(&mut res);
Ok(res) Ok(res)
@ -203,11 +206,13 @@ impl EpeeValue for Bytes {
return Err(Error::Format("Byte array exceeded max length")); return Err(Error::Format("Byte array exceeded max length"));
} }
if r.remaining() < len.try_into()? { let len = u64_to_usize(len);
if r.remaining() < len {
return Err(Error::IO("Not enough bytes to fill object")); return Err(Error::IO("Not enough bytes to fill object"));
} }
Ok(r.copy_to_bytes(len.try_into()?)) Ok(r.copy_to_bytes(len))
} }
fn epee_default_value() -> Option<Self> { fn epee_default_value() -> Option<Self> {
@ -236,11 +241,13 @@ impl EpeeValue for BytesMut {
return Err(Error::Format("Byte array exceeded max length")); return Err(Error::Format("Byte array exceeded max length"));
} }
if r.remaining() < len.try_into()? { let len = u64_to_usize(len);
if r.remaining() < len {
return Err(Error::IO("Not enough bytes to fill object")); return Err(Error::IO("Not enough bytes to fill object"));
} }
let mut bytes = BytesMut::zeroed(len.try_into()?); let mut bytes = BytesMut::zeroed(len);
r.copy_to_slice(&mut bytes); r.copy_to_slice(&mut bytes);
Ok(bytes) Ok(bytes)
@ -272,11 +279,13 @@ impl<const N: usize> EpeeValue for ByteArrayVec<N> {
return Err(Error::Format("Byte array exceeded max length")); return Err(Error::Format("Byte array exceeded max length"));
} }
if r.remaining() < usize::try_from(len)? { let len = u64_to_usize(len);
if r.remaining() < len {
return Err(Error::IO("Not enough bytes to fill object")); return Err(Error::IO("Not enough bytes to fill object"));
} }
ByteArrayVec::try_from(r.copy_to_bytes(usize::try_from(len)?)) ByteArrayVec::try_from(r.copy_to_bytes(len))
.map_err(|_| Error::Format("Field has invalid length")) .map_err(|_| Error::Format("Field has invalid length"))
} }
@ -302,7 +311,7 @@ impl<const N: usize> EpeeValue for ByteArray<N> {
return Err(Error::Format("Marker does not match expected Marker")); return Err(Error::Format("Marker does not match expected Marker"));
} }
let len: usize = read_varint(r)?.try_into()?; let len = u64_to_usize(read_varint(r)?);
if len != N { if len != N {
return Err(Error::Format("Byte array has incorrect length")); return Err(Error::Format("Byte array has incorrect length"));
} }
@ -370,11 +379,11 @@ impl<const N: usize> EpeeValue for Vec<[u8; N]> {
)); ));
} }
let len = read_varint(r)?; let len = u64_to_usize(read_varint(r)?);
let individual_marker = Marker::new(marker.inner_marker); let individual_marker = Marker::new(marker.inner_marker);
let mut res = Vec::with_capacity(len.try_into()?); let mut res = Vec::with_capacity(len);
for _ in 0..len { for _ in 0..len {
res.push(<[u8; N]>::read(r, &individual_marker)?); res.push(<[u8; N]>::read(r, &individual_marker)?);
} }
@ -406,11 +415,11 @@ macro_rules! epee_seq {
)); ));
} }
let len = read_varint(r)?; let len = u64_to_usize(read_varint(r)?);
let individual_marker = Marker::new(marker.inner_marker.clone()); let individual_marker = Marker::new(marker.inner_marker.clone());
let mut res = Vec::with_capacity(len.try_into()?); let mut res = Vec::with_capacity(len);
for _ in 0..len { for _ in 0..len {
res.push(<$val>::read(r, &individual_marker)?); res.push(<$val>::read(r, &individual_marker)?);
} }

View file

@ -9,12 +9,12 @@ epee_object!(
a: u8, a: u8,
); );
struct TT { struct T2 {
a: u8, a: u8,
} }
epee_object!( epee_object!(
TT, T2,
a: u8 = 0, a: u8 = 0,
); );
@ -35,5 +35,5 @@ fn duplicate_key_with_default() {
b'a', 0x0B, 0x00, b'a', 0x0B, 0x00,
]; ];
assert!(from_bytes::<TT, _>(&mut &data[..]).is_err()); assert!(from_bytes::<T2, _>(&mut &data[..]).is_err());
} }

View file

@ -12,6 +12,8 @@ default = []
tracing = ["dep:tracing", "tokio-util/tracing"] tracing = ["dep:tracing", "tokio-util/tracing"]
[dependencies] [dependencies]
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
thiserror = { workspace = true } thiserror = { workspace = true }
bytes = { workspace = true, features = ["std"] } bytes = { workspace = true, features = ["std"] }
bitflags = { workspace = true } bitflags = { workspace = true }

View file

@ -20,6 +20,8 @@ use std::{fmt::Debug, marker::PhantomData};
use bytes::{Buf, BufMut, BytesMut}; use bytes::{Buf, BufMut, BytesMut};
use tokio_util::codec::{Decoder, Encoder}; use tokio_util::codec::{Decoder, Encoder};
use cuprate_helper::cast::u64_to_usize;
use crate::{ use crate::{
header::{Flags, HEADER_SIZE}, header::{Flags, HEADER_SIZE},
message::{make_dummy_message, LevinMessage}, message::{make_dummy_message, LevinMessage},
@ -114,10 +116,7 @@ impl<C: LevinCommand + Debug> Decoder for LevinBucketCodec<C> {
std::mem::replace(&mut self.state, LevinBucketState::WaitingForBody(head)); std::mem::replace(&mut self.state, LevinBucketState::WaitingForBody(head));
} }
LevinBucketState::WaitingForBody(head) => { LevinBucketState::WaitingForBody(head) => {
let body_len = head let body_len = u64_to_usize(head.size);
.size
.try_into()
.map_err(|_| BucketError::BucketExceededMaxSize)?;
if src.len() < body_len { if src.len() < body_len {
src.reserve(body_len - src.len()); src.reserve(body_len - src.len());
return Ok(None); return Ok(None);
@ -255,13 +254,11 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
continue; continue;
}; };
let max_size = if self.bucket_codec.handshake_message_seen { let max_size = u64_to_usize(if self.bucket_codec.handshake_message_seen {
self.bucket_codec.protocol.max_packet_size self.bucket_codec.protocol.max_packet_size
} else { } else {
self.bucket_codec.protocol.max_packet_size_before_handshake self.bucket_codec.protocol.max_packet_size_before_handshake
} });
.try_into()
.expect("Levin max message size is too large, does not fit into a usize.");
if bytes.len().saturating_add(bucket.body.len()) > max_size { if bytes.len().saturating_add(bucket.body.len()) > max_size {
return Err(BucketError::InvalidFragmentedMessage( return Err(BucketError::InvalidFragmentedMessage(
@ -300,12 +297,7 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
} }
// Check the fragmented message contains enough bytes to build the message. // Check the fragmented message contains enough bytes to build the message.
if bytes.len().saturating_sub(HEADER_SIZE) if bytes.len().saturating_sub(HEADER_SIZE) < u64_to_usize(header.size) {
< header
.size
.try_into()
.map_err(|_| BucketError::BucketExceededMaxSize)?
{
return Err(BucketError::InvalidFragmentedMessage( return Err(BucketError::InvalidFragmentedMessage(
"Fragmented message does not have enough bytes to fill bucket body", "Fragmented message does not have enough bytes to fill bucket body",
)); ));

View file

@ -38,6 +38,8 @@ use std::fmt::Debug;
use bytes::{Buf, Bytes}; use bytes::{Buf, Bytes};
use thiserror::Error; use thiserror::Error;
use cuprate_helper::cast::usize_to_u64;
pub mod codec; pub mod codec;
pub mod header; pub mod header;
pub mod message; pub mod message;
@ -212,7 +214,7 @@ impl<C: LevinCommand> BucketBuilder<C> {
Bucket { Bucket {
header: BucketHead { header: BucketHead {
signature: self.signature.unwrap(), signature: self.signature.unwrap(),
size: body.len().try_into().unwrap(), size: usize_to_u64(body.len()),
have_to_return_data: ty.have_to_return_data(), have_to_return_data: ty.have_to_return_data(),
command: self.command.unwrap(), command: self.command.unwrap(),
return_code: self.return_code.unwrap(), return_code: self.return_code.unwrap(),

View file

@ -5,6 +5,8 @@
//! for more control over what is actually sent over the wire at certain times. //! for more control over what is actually sent over the wire at certain times.
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use cuprate_helper::cast::usize_to_u64;
use crate::{ use crate::{
header::{Flags, HEADER_SIZE}, header::{Flags, HEADER_SIZE},
Bucket, BucketBuilder, BucketError, BucketHead, LevinBody, LevinCommand, Protocol, Bucket, BucketBuilder, BucketError, BucketHead, LevinBody, LevinCommand, Protocol,
@ -106,9 +108,7 @@ pub fn make_fragmented_messages<T: LevinBody>(
new_body.resize(fragment_size - HEADER_SIZE, 0); new_body.resize(fragment_size - HEADER_SIZE, 0);
bucket.body = new_body.freeze(); bucket.body = new_body.freeze();
bucket.header.size = (fragment_size - HEADER_SIZE) bucket.header.size = usize_to_u64(fragment_size - HEADER_SIZE);
.try_into()
.expect("Bucket size does not fit into u64");
} }
return Ok(vec![bucket]); return Ok(vec![bucket]);
@ -118,9 +118,7 @@ pub fn make_fragmented_messages<T: LevinBody>(
// The first fragment will set the START flag, the last will set the END flag. // The first fragment will set the START flag, the last will set the END flag.
let fragment_head = BucketHead { let fragment_head = BucketHead {
signature: protocol.signature, signature: protocol.signature,
size: (fragment_size - HEADER_SIZE) size: usize_to_u64(fragment_size - HEADER_SIZE),
.try_into()
.expect("Bucket size does not fit into u64"),
have_to_return_data: false, have_to_return_data: false,
// Just use a default command. // Just use a default command.
command: T::Command::from(0), command: T::Command::from(0),
@ -191,7 +189,7 @@ pub(crate) fn make_dummy_message<T: LevinCommand>(protocol: &Protocol, size: usi
// A header to put on the dummy message. // A header to put on the dummy message.
let header = BucketHead { let header = BucketHead {
signature: protocol.signature, signature: protocol.signature,
size: size.try_into().expect("Bucket size does not fit into u64"), size: usize_to_u64(size),
have_to_return_data: false, have_to_return_data: false,
// Just use a default command. // Just use a default command.
command: T::from(0), command: T::from(0),

View file

@ -8,6 +8,8 @@ use tokio::{
}; };
use tokio_util::codec::{FramedRead, FramedWrite}; use tokio_util::codec::{FramedRead, FramedWrite};
use cuprate_helper::cast::u64_to_usize;
use cuprate_levin::{ use cuprate_levin::{
message::make_fragmented_messages, BucketBuilder, BucketError, LevinBody, LevinCommand, message::make_fragmented_messages, BucketBuilder, BucketError, LevinBody, LevinCommand,
LevinMessageCodec, MessageType, Protocol, LevinMessageCodec, MessageType, Protocol,
@ -54,7 +56,7 @@ impl LevinBody for TestBody {
_: MessageType, _: MessageType,
_: Self::Command, _: Self::Command,
) -> Result<Self, BucketError> { ) -> Result<Self, BucketError> {
let size = body.get_u64_le().try_into().unwrap(); let size = u64_to_usize(body.get_u64_le());
// bucket // bucket
Ok(TestBody::Bytes(size, body.copy_to_bytes(size))) Ok(TestBody::Bytes(size, body.copy_to_bytes(size)))
} }

View file

@ -15,6 +15,7 @@ cuprate-levin = { path = "../levin" }
cuprate-epee-encoding = { path = "../epee-encoding" } cuprate-epee-encoding = { path = "../epee-encoding" }
cuprate-fixed-bytes = { path = "../fixed-bytes" } cuprate-fixed-bytes = { path = "../fixed-bytes" }
cuprate-types = { path = "../../types", default-features = false, features = ["epee"] } cuprate-types = { path = "../../types", default-features = false, features = ["epee"] }
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
bitflags = { workspace = true, features = ["std"] } bitflags = { workspace = true, features = ["std"] }
bytes = { workspace = true, features = ["std"] } bytes = { workspace = true, features = ["std"] }

View file

@ -99,7 +99,7 @@ impl LevinCommandTrait for LevinCommand {
LevinCommand::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB LevinCommand::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB
LevinCommand::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB LevinCommand::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB
LevinCommand::Unknown(_) => usize::MAX.try_into().unwrap_or(u64::MAX), LevinCommand::Unknown(_) => u64::MAX,
} }
} }

View file

@ -20,13 +20,17 @@ cuprate-helper = { path = "../../helper", features = ["asynch"], default-
axum = { version = "0.7.5", features = ["json"], default-features = false } axum = { version = "0.7.5", features = ["json"], default-features = false }
serde = { workspace = true, optional = true } serde = { workspace = true, optional = true }
serde_json = { workspace = true, features = ["std"] }
tower = { workspace = true } tower = { workspace = true }
paste = { workspace = true } paste = { workspace = true }
futures = { workspace = true } futures = { workspace = true }
[dev-dependencies] [dev-dependencies]
cuprate-test-utils = { path = "../../test-utils" }
axum = { version = "0.7.5", features = ["json", "tokio", "http2"] } axum = { version = "0.7.5", features = ["json", "tokio", "http2"] }
serde_json = { workspace = true, features = ["std"] } serde_json = { workspace = true, features = ["std"] }
tokio = { workspace = true, features = ["full"] } tokio = { workspace = true, features = ["full"] }
ureq = { version = "2.10.0", features = ["json"] } ureq = { version = "2.10.0", features = ["json"] }
[lints]
workspace = true

View file

@ -1,99 +1,6 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
while_true,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
single_use_lifetimes,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_doc_comments,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style,
unreachable_pub
)]
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
// FIXME: good lint but too many false positives
// with our `Env` + `RwLock` setup.
clippy::significant_drop_tightening,
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
// TODO
rustdoc::bare_urls,
clippy::multiple_crate_versions,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
)]
// Allow some lints in tests.
#![cfg_attr(
test,
allow(
clippy::cognitive_complexity,
clippy::needless_pass_by_value,
clippy::cast_possible_truncation,
clippy::too_many_lines
)
)]
//---------------------------------------------------------------------------------------------------- Mod
mod route; mod route;
mod router_builder; mod router_builder;
mod rpc_error; mod rpc_error;
@ -110,3 +17,13 @@ pub use rpc_handler::RpcHandler;
pub use rpc_handler_dummy::RpcHandlerDummy; pub use rpc_handler_dummy::RpcHandlerDummy;
pub use rpc_request::RpcRequest; pub use rpc_request::RpcRequest;
pub use rpc_response::RpcResponse; pub use rpc_response::RpcResponse;
// false-positive: used in `README.md`'s doc-test.
#[cfg(test)]
mod test {
extern crate axum;
extern crate cuprate_test_utils;
extern crate serde_json;
extern crate tokio;
extern crate ureq;
}

View file

@ -18,3 +18,6 @@ thiserror = { workspace = true }
[dev-dependencies] [dev-dependencies]
pretty_assertions = { workspace = true } pretty_assertions = { workspace = true }
[lints]
workspace = true

View file

@ -1,94 +1,5 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
while_true,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
single_use_lifetimes,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
unreachable_pub,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
clippy::missing_docs_in_private_items,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style
)]
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
// FIXME: good lint but too many false positives
// with our `Env` + `RwLock` setup.
clippy::significant_drop_tightening,
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
)]
// Allow some lints in tests.
#![cfg_attr(
test,
allow(
clippy::cognitive_complexity,
clippy::needless_pass_by_value,
clippy::cast_possible_truncation,
clippy::too_many_lines
)
)]
//---------------------------------------------------------------------------------------------------- Mod/Use
pub mod error; pub mod error;
mod id; mod id;
@ -103,6 +14,5 @@ pub use request::Request;
mod response; mod response;
pub use response::Response; pub use response::Response;
//---------------------------------------------------------------------------------------------------- TESTS
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;

View file

@ -304,14 +304,14 @@ where
if payload.is_none() { if payload.is_none() {
payload = Some(Ok(map.next_value::<T>()?)); payload = Some(Ok(map.next_value::<T>()?));
} else { } else {
return Err(serde::de::Error::duplicate_field("result/error")); return Err(Error::duplicate_field("result/error"));
} }
} }
Key::Error => { Key::Error => {
if payload.is_none() { if payload.is_none() {
payload = Some(Err(map.next_value::<ErrorObject>()?)); payload = Some(Err(map.next_value::<ErrorObject>()?));
} else { } else {
return Err(serde::de::Error::duplicate_field("result/error")); return Err(Error::duplicate_field("result/error"));
} }
} }
Key::Unknown => { Key::Unknown => {

View file

@ -52,6 +52,7 @@ where
} }
/// Tests an input JSON string matches an expected type `T`. /// Tests an input JSON string matches an expected type `T`.
#[allow(clippy::needless_pass_by_value)] // serde signature
fn assert_de<T>(json: &'static str, expected: T) fn assert_de<T>(json: &'static str, expected: T)
where where
T: DeserializeOwned + std::fmt::Debug + Clone + PartialEq, T: DeserializeOwned + std::fmt::Debug + Clone + PartialEq,

View file

@ -18,13 +18,14 @@ cuprate-epee-encoding = { path = "../../net/epee-encoding", optional = true }
cuprate-fixed-bytes = { path = "../../net/fixed-bytes" } cuprate-fixed-bytes = { path = "../../net/fixed-bytes" }
cuprate-types = { path = "../../types" } cuprate-types = { path = "../../types" }
monero-serai = { workspace = true }
paste = { workspace = true } paste = { workspace = true }
serde = { workspace = true, optional = true } serde = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]
cuprate-test-utils = { path = "../../test-utils" } cuprate-test-utils = { path = "../../test-utils" }
cuprate-json-rpc = { path = "../json-rpc" }
serde_json = { workspace = true } serde = { workspace = true }
pretty_assertions = { workspace = true } serde_json = { workspace = true }
[lints]
workspace = true

View file

@ -1,96 +1,6 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
while_true,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
single_use_lifetimes,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_doc_comments,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style,
unreachable_pub
)]
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
// FIXME: good lint but too many false positives
// with our `Env` + `RwLock` setup.
clippy::significant_drop_tightening,
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
clippy::multiple_crate_versions,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
)]
// Allow some lints in tests.
#![cfg_attr(
test,
allow(
clippy::cognitive_complexity,
clippy::needless_pass_by_value,
clippy::cast_possible_truncation,
clippy::too_many_lines
)
)]
//---------------------------------------------------------------------------------------------------- Mod
mod constants; mod constants;
mod defaults; mod defaults;
mod free; mod free;
@ -112,3 +22,10 @@ pub use constants::{
CORE_RPC_VERSION_MINOR, CORE_RPC_VERSION_MINOR,
}; };
pub use rpc_call::{RpcCall, RpcCallValue}; pub use rpc_call::{RpcCall, RpcCallValue};
// false-positive: used in tests
#[cfg(test)]
mod test {
extern crate cuprate_test_utils;
extern crate serde_json;
}

View file

@ -15,6 +15,7 @@
mod binary_string; mod binary_string;
mod distribution; mod distribution;
mod key_image_spent_status; mod key_image_spent_status;
#[allow(clippy::module_inception)]
mod misc; mod misc;
mod pool_info_extent; mod pool_info_extent;
mod status; mod status;

View file

@ -39,7 +39,7 @@ thread_local = { workspace = true, optional = true }
rayon = { workspace = true, optional = true } rayon = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]
cuprate-helper = { path = "../../helper", features = ["thread"] } cuprate-helper = { path = "../../helper", features = ["thread", "cast"] }
cuprate-test-utils = { path = "../../test-utils" } cuprate-test-utils = { path = "../../test-utils" }
tokio = { workspace = true, features = ["full"] } tokio = { workspace = true, features = ["full"] }
@ -48,3 +48,6 @@ pretty_assertions = { workspace = true }
proptest = { workspace = true } proptest = { workspace = true }
hex = { workspace = true } hex = { workspace = true }
hex-literal = { workspace = true } hex-literal = { workspace = true }
[lints]
workspace = true

View file

@ -1,4 +1,44 @@
//! The main [`Config`] struct, holding all configurable values. //! Database configuration.
//!
//! This module contains the main [`Config`]uration struct
//! for the database [`Env`](cuprate_database::Env)ironment,
//! and blockchain-specific configuration.
//!
//! It also contains types related to configuration settings.
//!
//! The main constructor is the [`ConfigBuilder`].
//!
//! These configurations are processed at runtime, meaning
//! the `Env` can/will dynamically adjust its behavior based
//! on these values.
//!
//! # Example
//! ```rust
//! use cuprate_blockchain::{
//! cuprate_database::{Env, config::SyncMode},
//! config::{ConfigBuilder, ReaderThreads},
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let tmp_dir = tempfile::tempdir()?;
//! let db_dir = tmp_dir.path().to_owned();
//!
//! let config = ConfigBuilder::new()
//! // Use a custom database directory.
//! .db_directory(db_dir.into())
//! // Use as many reader threads as possible (when using `service`).
//! .reader_threads(ReaderThreads::OnePerThread)
//! // Use the fastest sync mode.
//! .sync_mode(SyncMode::Fast)
//! // Build into `Config`
//! .build();
//!
//! // Start a database `service` using this configuration.
//! let (_, _, env) = cuprate_blockchain::service::init(config.clone())?;
//! // It's using the config we provided.
//! assert_eq!(env.config(), &config.db_config);
//! # Ok(()) }
//! ```
//---------------------------------------------------------------------------------------------------- Import //---------------------------------------------------------------------------------------------------- Import
use std::{borrow::Cow, path::Path}; use std::{borrow::Cow, path::Path};

View file

@ -1,44 +0,0 @@
//! Database configuration.
//!
//! This module contains the main [`Config`]uration struct
//! for the database [`Env`](cuprate_database::Env)ironment,
//! and blockchain-specific configuration.
//!
//! It also contains types related to configuration settings.
//!
//! The main constructor is the [`ConfigBuilder`].
//!
//! These configurations are processed at runtime, meaning
//! the `Env` can/will dynamically adjust its behavior based
//! on these values.
//!
//! # Example
//! ```rust
//! use cuprate_blockchain::{
//! cuprate_database::{Env, config::SyncMode},
//! config::{ConfigBuilder, ReaderThreads},
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let tmp_dir = tempfile::tempdir()?;
//! let db_dir = tmp_dir.path().to_owned();
//!
//! let config = ConfigBuilder::new()
//! // Use a custom database directory.
//! .db_directory(db_dir.into())
//! // Use as many reader threads as possible (when using `service`).
//! .reader_threads(ReaderThreads::OnePerThread)
//! // Use the fastest sync mode.
//! .sync_mode(SyncMode::Fast)
//! // Build into `Config`
//! .build();
//!
//! // Start a database `service` using this configuration.
//! let (_, _, env) = cuprate_blockchain::service::init(config.clone())?;
//! // It's using the config we provided.
//! assert_eq!(env.config(), &config.db_config);
//! # Ok(()) }
//! ```
mod config;
pub use config::{Config, ConfigBuilder, ReaderThreads};

View file

@ -1,103 +1,9 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
while_true,
clippy::missing_docs_in_private_items,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
single_use_lifetimes,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
unreachable_pub,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_crate_dependencies,
unused_doc_comments,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style
)]
#![allow( #![allow(
// FIXME: this lint affects crates outside of // See `cuprate-database` for reasoning.
// `database/` for some reason, allow for now. clippy::significant_drop_tightening
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
// FIXME: good lint but too many false positives
// with our `Env` + `RwLock` setup.
clippy::significant_drop_tightening,
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
)]
// Allow some lints when running in debug mode.
#![cfg_attr(
debug_assertions,
allow(
clippy::todo,
clippy::multiple_crate_versions,
// unused_crate_dependencies,
)
)]
// Allow some lints in tests.
#![cfg_attr(
test,
allow(
clippy::cognitive_complexity,
clippy::needless_pass_by_value,
clippy::cast_possible_truncation,
clippy::too_many_lines
)
)] )]
// Only allow building 64-bit targets. // Only allow building 64-bit targets.
// //
// This allows us to assume 64-bit // This allows us to assume 64-bit

View file

@ -442,7 +442,7 @@ mod test {
let mut block = BLOCK_V9_TX3.clone(); let mut block = BLOCK_V9_TX3.clone();
block.height = usize::try_from(u32::MAX).unwrap() + 1; block.height = cuprate_helper::cast::u32_to_usize(u32::MAX) + 1;
add_block(&block, &mut tables).unwrap(); add_block(&block, &mut tables).unwrap();
} }

View file

@ -37,8 +37,8 @@ pub fn init(
let db = Arc::new(crate::open(config)?); let db = Arc::new(crate::open(config)?);
// Spawn the Reader thread pool and Writer. // Spawn the Reader thread pool and Writer.
let readers = init_read_service(db.clone(), reader_threads); let readers = init_read_service(Arc::clone(&db), reader_threads);
let writer = init_write_service(db.clone()); let writer = init_write_service(Arc::clone(&db));
Ok((readers, writer, db)) Ok((readers, writer, db))
} }

View file

@ -304,8 +304,9 @@ async fn test_template(
// Assert we get back the same map of // Assert we get back the same map of
// `Amount`'s and `AmountIndex`'s. // `Amount`'s and `AmountIndex`'s.
let mut response_output_count = 0; let mut response_output_count = 0;
#[allow(clippy::iter_over_hash_type)] // order doesn't matter in this test
for (amount, output_map) in response { for (amount, output_map) in response {
let amount_index_set = map.get(&amount).unwrap(); let amount_index_set = &map[&amount];
for (amount_index, output) in output_map { for (amount_index, output) in output_map {
response_output_count += 1; response_output_count += 1;

View file

@ -33,3 +33,6 @@ serde = { workspace = true, optional = true }
bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] }
page_size = { version = "0.6.0" } page_size = { version = "0.6.0" }
tempfile = { version = "3.10.0" } tempfile = { version = "3.10.0" }
[lints]
workspace = true

View file

@ -70,7 +70,7 @@ impl Drop for ConcreteEnv {
// We need to do `mdb_env_set_flags(&env, MDB_NOSYNC|MDB_ASYNCMAP, 0)` // We need to do `mdb_env_set_flags(&env, MDB_NOSYNC|MDB_ASYNCMAP, 0)`
// to clear the no sync and async flags such that the below `self.sync()` // to clear the no sync and async flags such that the below `self.sync()`
// _actually_ synchronously syncs. // _actually_ synchronously syncs.
if let Err(_e) = crate::Env::sync(self) { if let Err(_e) = Env::sync(self) {
// TODO: log error? // TODO: log error?
} }

View file

@ -78,8 +78,8 @@ mod test {
println!("left: {left:?}, right: {right:?}, expected: {expected:?}"); println!("left: {left:?}, right: {right:?}, expected: {expected:?}");
assert_eq!( assert_eq!(
<StorableHeed::<T> as heed::Comparator>::compare( <StorableHeed::<T> as heed::Comparator>::compare(
&<StorableHeed::<T> as heed::BytesEncode>::bytes_encode(&left).unwrap(), &<StorableHeed::<T> as BytesEncode>::bytes_encode(&left).unwrap(),
&<StorableHeed::<T> as heed::BytesEncode>::bytes_encode(&right).unwrap() &<StorableHeed::<T> as BytesEncode>::bytes_encode(&right).unwrap()
), ),
expected expected
); );

View file

@ -23,7 +23,7 @@ use crate::{
/// Shared [`DatabaseRo::get()`]. /// Shared [`DatabaseRo::get()`].
#[inline] #[inline]
fn get<T: Table + 'static>( fn get<T: Table + 'static>(
db: &impl redb::ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>, db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
key: &T::Key, key: &T::Key,
) -> Result<T::Value, RuntimeError> { ) -> Result<T::Value, RuntimeError> {
Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value()) Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value())
@ -32,7 +32,7 @@ fn get<T: Table + 'static>(
/// Shared [`DatabaseRo::len()`]. /// Shared [`DatabaseRo::len()`].
#[inline] #[inline]
fn len<T: Table>( fn len<T: Table>(
db: &impl redb::ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>, db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<u64, RuntimeError> { ) -> Result<u64, RuntimeError> {
Ok(db.len()?) Ok(db.len()?)
} }
@ -40,7 +40,7 @@ fn len<T: Table>(
/// Shared [`DatabaseRo::first()`]. /// Shared [`DatabaseRo::first()`].
#[inline] #[inline]
fn first<T: Table>( fn first<T: Table>(
db: &impl redb::ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>, db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<(T::Key, T::Value), RuntimeError> { ) -> Result<(T::Key, T::Value), RuntimeError> {
let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?; let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?;
Ok((key.value(), value.value())) Ok((key.value(), value.value()))
@ -49,7 +49,7 @@ fn first<T: Table>(
/// Shared [`DatabaseRo::last()`]. /// Shared [`DatabaseRo::last()`].
#[inline] #[inline]
fn last<T: Table>( fn last<T: Table>(
db: &impl redb::ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>, db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<(T::Key, T::Value), RuntimeError> { ) -> Result<(T::Key, T::Value), RuntimeError> {
let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?; let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?;
Ok((key.value(), value.value())) Ok((key.value(), value.value()))
@ -58,7 +58,7 @@ fn last<T: Table>(
/// Shared [`DatabaseRo::is_empty()`]. /// Shared [`DatabaseRo::is_empty()`].
#[inline] #[inline]
fn is_empty<T: Table>( fn is_empty<T: Table>(
db: &impl redb::ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>, db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<bool, RuntimeError> { ) -> Result<bool, RuntimeError> {
Ok(db.is_empty()?) Ok(db.is_empty()?)
} }

View file

@ -33,6 +33,7 @@
//! # Ok(()) } //! # Ok(()) }
//! ``` //! ```
#[allow(clippy::module_inception)]
mod config; mod config;
pub use config::{Config, ConfigBuilder, READER_THREADS_DEFAULT}; pub use config::{Config, ConfigBuilder, READER_THREADS_DEFAULT};

View file

@ -163,7 +163,7 @@ pub trait Env: Sized {
// We have the direct PATH to the file, // We have the direct PATH to the file,
// no need to use backend-specific functions. // no need to use backend-specific functions.
// //
// SAFETY: as we are only accessing the metadata of // INVARIANT: as we are only accessing the metadata of
// the file and not reading the bytes, it should be // the file and not reading the bytes, it should be
// fine even with a memory mapped file being actively // fine even with a memory mapped file being actively
// written to. // written to.

View file

@ -163,11 +163,11 @@ impl KeyCompare {
#[inline] #[inline]
pub const fn as_compare_fn<K: Key>(self) -> fn(&[u8], &[u8]) -> Ordering { pub const fn as_compare_fn<K: Key>(self) -> fn(&[u8], &[u8]) -> Ordering {
match self { match self {
Self::Default => std::cmp::Ord::cmp, Self::Default => Ord::cmp,
Self::Number => |left, right| { Self::Number => |left, right| {
let left = <K as Storable>::from_bytes(left); let left = <K as Storable>::from_bytes(left);
let right = <K as Storable>::from_bytes(right); let right = <K as Storable>::from_bytes(right);
std::cmp::Ord::cmp(&left, &right) Ord::cmp(&left, &right)
}, },
Self::Custom(f) => f, Self::Custom(f) => f,
} }

View file

@ -1,94 +1,18 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
while_true,
clippy::missing_docs_in_private_items,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
single_use_lifetimes,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
unreachable_pub,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_crate_dependencies,
unused_doc_comments,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style
)]
#![allow( #![allow(
// FIXME: this lint affects crates outside of // This lint is allowed because the following
// `database/` for some reason, allow for now. // code exists a lot in this crate:
clippy::cargo_common_metadata, //
// ```rust
// FIXME: adding `#[must_use]` onto everything // let env_inner = env.env_inner();
// might just be more annoying than useful... // let tx_rw = env_inner.tx_rw()?;
// although it is sometimes nice. // OpenTables::create_tables(&env_inner, &tx_rw)?;
clippy::must_use_candidate, // ```
//
// FIXME: good lint but too many false positives // Rust thinks `env_inner` can be dropped earlier
// with our `Env` + `RwLock` setup. // but it cannot, we need it for the lifetime of
clippy::significant_drop_tightening, // the database transaction + tables.
clippy::significant_drop_tightening
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
// unused_crate_dependencies, // false-positive with `paste`
)]
// Allow some lints when running in debug mode.
#![cfg_attr(
debug_assertions,
allow(
clippy::todo,
clippy::multiple_crate_versions,
// unused_crate_dependencies,
)
)] )]
// Allow some lints in tests. // Allow some lints in tests.
#![cfg_attr( #![cfg_attr(

View file

@ -41,3 +41,6 @@ cuprate-test-utils = { path = "../../test-utils" }
tokio = { workspace = true } tokio = { workspace = true }
tempfile = { workspace = true } tempfile = { workspace = true }
hex-literal = { workspace = true } hex-literal = { workspace = true }
[lints]
workspace = true

View file

@ -211,7 +211,7 @@ impl Config {
/// assert_eq!(config.reader_threads, ReaderThreads::default()); /// assert_eq!(config.reader_threads, ReaderThreads::default());
/// ``` /// ```
pub fn new() -> Self { pub fn new() -> Self {
Config { Self {
db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)), db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)),
reader_threads: ReaderThreads::default(), reader_threads: ReaderThreads::default(),
max_txpool_weight: 0, max_txpool_weight: 0,

View file

@ -1,4 +1,8 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![allow(
// See `cuprate-database` for reasoning.
clippy::significant_drop_tightening
)]
pub mod config; pub mod config;
mod free; mod free;
@ -13,3 +17,13 @@ pub use free::open;
//re-exports //re-exports
pub use cuprate_database; pub use cuprate_database;
// TODO: remove when used.
use tower as _;
#[cfg(test)]
mod test {
use cuprate_test_utils as _;
use hex_literal as _;
use tempfile as _;
use tokio as _;
}

View file

@ -11,7 +11,7 @@ use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHas
/// ///
/// # Panics /// # Panics
/// This function will panic if any of the [`Input`]s are not [`Input::ToKey`] /// This function will panic if any of the [`Input`]s are not [`Input::ToKey`]
pub fn add_tx_key_images( pub(super) fn add_tx_key_images(
inputs: &[Input], inputs: &[Input],
tx_hash: &TransactionHash, tx_hash: &TransactionHash,
kis_table: &mut impl DatabaseRw<SpentKeyImages>, kis_table: &mut impl DatabaseRw<SpentKeyImages>,
@ -31,7 +31,7 @@ pub fn add_tx_key_images(
/// ///
/// # Panics /// # Panics
/// This function will panic if any of the [`Input`]s are not [`Input::ToKey`] /// This function will panic if any of the [`Input`]s are not [`Input::ToKey`]
pub fn remove_tx_key_images( pub(super) fn remove_tx_key_images(
inputs: &[Input], inputs: &[Input],
kis_table: &mut impl DatabaseRw<SpentKeyImages>, kis_table: &mut impl DatabaseRw<SpentKeyImages>,
) -> Result<(), RuntimeError> { ) -> Result<(), RuntimeError> {

View file

@ -30,8 +30,8 @@ pub fn init(
let db = Arc::new(crate::open(config)?); let db = Arc::new(crate::open(config)?);
// Spawn the Reader thread pool and Writer. // Spawn the Reader thread pool and Writer.
let readers = init_read_service(db.clone(), reader_threads); let readers = init_read_service(Arc::clone(&db), reader_threads);
let writer = init_write_service(db.clone()); let writer = init_write_service(Arc::clone(&db));
Ok((readers, writer, db)) Ok((readers, writer, db))
} }

View file

@ -25,7 +25,7 @@ use crate::{
/// Should be called _once_ per actual database. /// Should be called _once_ per actual database.
#[cold] #[cold]
#[inline(never)] // Only called once. #[inline(never)] // Only called once.
pub fn init_read_service(env: Arc<ConcreteEnv>, threads: ReaderThreads) -> TxpoolReadHandle { pub(super) fn init_read_service(env: Arc<ConcreteEnv>, threads: ReaderThreads) -> TxpoolReadHandle {
init_read_service_with_pool(env, init_thread_pool(threads)) init_read_service_with_pool(env, init_thread_pool(threads))
} }
@ -35,10 +35,7 @@ pub fn init_read_service(env: Arc<ConcreteEnv>, threads: ReaderThreads) -> Txpoo
/// Should be called _once_ per actual database. /// Should be called _once_ per actual database.
#[cold] #[cold]
#[inline(never)] // Only called once. #[inline(never)] // Only called once.
pub fn init_read_service_with_pool( fn init_read_service_with_pool(env: Arc<ConcreteEnv>, pool: Arc<ThreadPool>) -> TxpoolReadHandle {
env: Arc<ConcreteEnv>,
pool: Arc<ThreadPool>,
) -> TxpoolReadHandle {
DatabaseReadService::new(env, pool, map_request) DatabaseReadService::new(env, pool, map_request)
} }
@ -53,6 +50,7 @@ pub fn init_read_service_with_pool(
/// 1. `Request` is mapped to a handler function /// 1. `Request` is mapped to a handler function
/// 2. Handler function is called /// 2. Handler function is called
/// 3. [`TxpoolReadResponse`] is returned /// 3. [`TxpoolReadResponse`] is returned
#[allow(clippy::needless_pass_by_value)]
fn map_request( fn map_request(
env: &ConcreteEnv, // Access to the database env: &ConcreteEnv, // Access to the database
request: TxpoolReadRequest, // The request we must fulfill request: TxpoolReadRequest, // The request we must fulfill

View file

@ -16,7 +16,7 @@ use crate::{
//---------------------------------------------------------------------------------------------------- init_write_service //---------------------------------------------------------------------------------------------------- init_write_service
/// Initialize the txpool write service from a [`ConcreteEnv`]. /// Initialize the txpool write service from a [`ConcreteEnv`].
pub fn init_write_service(env: Arc<ConcreteEnv>) -> TxpoolWriteHandle { pub(super) fn init_write_service(env: Arc<ConcreteEnv>) -> TxpoolWriteHandle {
DatabaseWriteHandle::init(env, handle_txpool_request) DatabaseWriteHandle::init(env, handle_txpool_request)
} }

View file

@ -35,10 +35,11 @@ bitflags::bitflags! {
pub struct TransactionInfo { pub struct TransactionInfo {
/// The transaction's fee. /// The transaction's fee.
pub fee: u64, pub fee: u64,
/// The transaction`s weight. /// The transaction's weight.
pub weight: usize, pub weight: usize,
/// [`TxStateFlags`] of this transaction. /// [`TxStateFlags`] of this transaction.
pub flags: TxStateFlags, pub flags: TxStateFlags,
#[allow(clippy::pub_underscore_fields)]
/// Explicit padding so that we have no implicit padding bytes in `repr(C)`. /// Explicit padding so that we have no implicit padding bytes in `repr(C)`.
/// ///
/// Allows potential future expansion of this type. /// Allows potential future expansion of this type.
@ -68,21 +69,21 @@ impl From<RawCachedVerificationState> for CachedVerificationState {
fn from(value: RawCachedVerificationState) -> Self { fn from(value: RawCachedVerificationState) -> Self {
// if the hash is all `0`s then there is no hash this is valid at. // if the hash is all `0`s then there is no hash this is valid at.
if value.raw_valid_at_hash == [0; 32] { if value.raw_valid_at_hash == [0; 32] {
return CachedVerificationState::NotVerified; return Self::NotVerified;
} }
let raw_valid_past_timestamp = u64::from_le_bytes(value.raw_valid_past_timestamp); let raw_valid_past_timestamp = u64::from_le_bytes(value.raw_valid_past_timestamp);
// if the timestamp is 0, there is no timestamp that needs to be passed. // if the timestamp is 0, there is no timestamp that needs to be passed.
if raw_valid_past_timestamp == 0 { if raw_valid_past_timestamp == 0 {
return CachedVerificationState::ValidAtHashAndHF { return Self::ValidAtHashAndHF {
block_hash: value.raw_valid_at_hash, block_hash: value.raw_valid_at_hash,
hf: HardFork::from_version(value.raw_hf) hf: HardFork::from_version(value.raw_hf)
.expect("hard-fork values stored in the DB should always be valid"), .expect("hard-fork values stored in the DB should always be valid"),
}; };
} }
CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock { Self::ValidAtHashAndHFWithTimeBasedLock {
block_hash: value.raw_valid_at_hash, block_hash: value.raw_valid_at_hash,
hf: HardFork::from_version(value.raw_hf) hf: HardFork::from_version(value.raw_hf)
.expect("hard-fork values stored in the DB should always be valid"), .expect("hard-fork values stored in the DB should always be valid"),
@ -91,6 +92,7 @@ impl From<RawCachedVerificationState> for CachedVerificationState {
} }
} }
#[allow(clippy::fallible_impl_from)] // only panics in invalid states
impl From<CachedVerificationState> for RawCachedVerificationState { impl From<CachedVerificationState> for RawCachedVerificationState {
fn from(value: CachedVerificationState) -> Self { fn from(value: CachedVerificationState) -> Self {
match value { match value {

View file

@ -23,10 +23,12 @@ bytes = { workspace = true }
curve25519-dalek = { workspace = true } curve25519-dalek = { workspace = true }
monero-serai = { workspace = true } monero-serai = { workspace = true }
serde = { workspace = true, features = ["derive"], optional = true } serde = { workspace = true, features = ["derive"], optional = true }
borsh = { workspace = true, optional = true }
thiserror = { workspace = true } thiserror = { workspace = true }
proptest = { workspace = true, optional = true } proptest = { workspace = true, optional = true }
proptest-derive = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true }
[dev-dependencies] [dev-dependencies]
[lints]
workspace = true

View file

@ -1,76 +1,6 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints // `proptest` needs this internally.
// Forbid lints. #![cfg_attr(any(feature = "proptest"), allow(non_local_definitions))]
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
single_use_lifetimes,
while_true,
clippy::missing_docs_in_private_items,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_doc_comments,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
unreachable_pub,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style
)]
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
)]
// Allow some lints when running in debug mode. // Allow some lints when running in debug mode.
#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))] #![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))]
@ -97,4 +27,5 @@ pub use types::{
//---------------------------------------------------------------------------------------------------- Feature-gated //---------------------------------------------------------------------------------------------------- Feature-gated
#[cfg(feature = "blockchain")] #[cfg(feature = "blockchain")]
pub mod blockchain; pub mod blockchain;
//---------------------------------------------------------------------------------------------------- Private //---------------------------------------------------------------------------------------------------- Private