Merge branch 'main' into rpc-handler

This commit is contained in:
hinto.janai 2024-09-02 16:50:49 -04:00
commit 29587d9bb4
No known key found for this signature in database
GPG key ID: D47CE05FA175A499
68 changed files with 443 additions and 837 deletions

47
Cargo.lock generated
View file

@ -17,6 +17,12 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "adler2"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
[[package]]
name = "ahash"
version = "0.8.11"
@ -160,7 +166,7 @@ dependencies = [
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"miniz_oxide 0.7.3",
"object",
"rustc-demangle",
]
@ -646,6 +652,7 @@ version = "0.5.0"
dependencies = [
"bytes",
"cuprate-fixed-bytes",
"cuprate-helper",
"hex",
"paste",
"ref-cast",
@ -713,6 +720,7 @@ version = "0.1.0"
dependencies = [
"bitflags 2.5.0",
"bytes",
"cuprate-helper",
"futures",
"proptest",
"rand",
@ -796,6 +804,7 @@ dependencies = [
"cuprate-helper",
"cuprate-json-rpc",
"cuprate-rpc-types",
"cuprate-test-utils",
"futures",
"paste",
"serde",
@ -811,12 +820,9 @@ version = "0.0.0"
dependencies = [
"cuprate-epee-encoding",
"cuprate-fixed-bytes",
"cuprate-json-rpc",
"cuprate-test-utils",
"cuprate-types",
"monero-serai",
"paste",
"pretty_assertions",
"serde",
"serde_json",
]
@ -873,7 +879,6 @@ dependencies = [
name = "cuprate-types"
version = "0.0.0"
dependencies = [
"borsh",
"bytes",
"cuprate-epee-encoding",
"cuprate-fixed-bytes",
@ -893,6 +898,7 @@ dependencies = [
"bytes",
"cuprate-epee-encoding",
"cuprate-fixed-bytes",
"cuprate-helper",
"cuprate-levin",
"cuprate-types",
"hex",
@ -1140,12 +1146,12 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
[[package]]
name = "flate2"
version = "1.0.30"
version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae"
checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253"
dependencies = [
"crc32fast",
"miniz_oxide",
"miniz_oxide 0.8.0",
]
[[package]]
@ -1299,9 +1305,9 @@ dependencies = [
[[package]]
name = "h2"
version = "0.4.5"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab"
checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205"
dependencies = [
"atomic-waker",
"bytes",
@ -1795,6 +1801,15 @@ dependencies = [
"adler",
]
[[package]]
name = "miniz_oxide"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1"
dependencies = [
"adler2",
]
[[package]]
name = "mio"
version = "0.8.11"
@ -2457,9 +2472,9 @@ dependencies = [
[[package]]
name = "rustls-pki-types"
version = "1.7.0"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d"
checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0"
[[package]]
name = "rustls-webpki"
@ -3027,9 +3042,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "ureq"
version = "2.10.0"
version = "2.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72139d247e5f97a3eff96229a7ae85ead5328a39efe76f8bf5a06313d505b6ea"
checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a"
dependencies = [
"base64",
"flate2",
@ -3152,9 +3167,9 @@ checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
[[package]]
name = "webpki-roots"
version = "0.26.3"
version = "0.26.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd"
checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a"
dependencies = [
"rustls-pki-types",
]

View file

@ -262,6 +262,7 @@ empty_structs_with_brackets = "deny"
empty_enum_variants_with_brackets = "deny"
empty_drop = "deny"
clone_on_ref_ptr = "deny"
upper_case_acronyms = "deny"
# Hot
# inline_always = "deny"
@ -278,13 +279,15 @@ clone_on_ref_ptr = "deny"
# allow_attributes_without_reason = "deny"
# missing_assert_message = "deny"
# missing_docs_in_private_items = "deny"
# undocumented_unsafe_blocks = "deny"
undocumented_unsafe_blocks = "deny"
# multiple_unsafe_ops_per_block = "deny"
# single_char_lifetime_names = "deny"
# wildcard_enum_match_arm = "deny"
[workspace.lints.rust]
# Cold
future_incompatible = { level = "deny", priority = -1 }
nonstandard_style = { level = "deny", priority = -1 }
absolute_paths_not_starting_with_crate = "deny"
explicit_outlives_requirements = "deny"
keyword_idents_2018 = "deny"
@ -305,10 +308,11 @@ ambiguous_glob_imports = "deny"
unused_unsafe = "deny"
# Warm
let_underscore_drop = "deny"
let_underscore = { level = "deny", priority = -1 }
unreachable_pub = "deny"
unused_qualifications = "deny"
variant_size_differences = "deny"
non_camel_case_types = "deny"
# Hot
# unused_results = "deny"

1
clippy.toml Normal file
View file

@ -0,0 +1 @@
upper-case-acronyms-aggressive = true

View file

@ -11,7 +11,7 @@ proptest = ["dep:proptest", "dep:proptest-derive", "cuprate-types/proptest"]
rayon = ["dep:rayon"]
[dependencies]
cuprate-helper = { path = "../../helper", default-features = false, features = ["std"] }
cuprate-helper = { path = "../../helper", default-features = false, features = ["std", "cast"] }
cuprate-types = { path = "../../types", default-features = false }
cuprate-cryptonight = {path = "../../cryptonight"}

View file

@ -9,6 +9,8 @@ use proptest::{collection::vec, prelude::*};
use monero_serai::transaction::Output;
use cuprate_helper::cast::u64_to_usize;
use super::*;
use crate::decomposed_amount::DECOMPOSED_AMOUNTS;
@ -164,7 +166,7 @@ prop_compose! {
if timebased || lock_height > 500_000_000 {
Timelock::Time(time_for_time_lock)
} else {
Timelock::Block(usize::try_from(lock_height).unwrap())
Timelock::Block(u64_to_usize(lock_height))
}
}
}
@ -179,7 +181,7 @@ prop_compose! {
match ty {
0 => Timelock::None,
1 => Timelock::Time(time_for_time_lock),
_ => Timelock::Block(usize::try_from(lock_height).unwrap())
_ => Timelock::Block(u64_to_usize(lock_height))
}
}
}

View file

@ -14,7 +14,7 @@ use cuprate_consensus_rules::{
miner_tx::MinerTxError,
ConsensusError,
};
use cuprate_helper::asynch::rayon_spawn_async;
use cuprate_helper::{asynch::rayon_spawn_async, cast::u64_to_usize};
use cuprate_types::{
AltBlockInformation, Chain, ChainId, TransactionVerificationData,
VerifiedTransactionInformation,
@ -24,7 +24,7 @@ use crate::{
block::{free::pull_ordered_transactions, PreparedBlock},
context::{
difficulty::DifficultyCache,
rx_vms::RandomXVM,
rx_vms::RandomXVm,
weight::{self, BlockWeightsCache},
AltChainContextCache, AltChainRequestToken, BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW,
},
@ -101,7 +101,7 @@ where
// Check the alt block timestamp is in the correct range.
if let Some(median_timestamp) =
difficulty_cache.median_timestamp(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW.try_into().unwrap())
difficulty_cache.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW))
{
check_timestamp(&prepped_block.block, median_timestamp).map_err(ConsensusError::Block)?
};
@ -195,7 +195,7 @@ async fn alt_rx_vm<C>(
parent_chain: Chain,
alt_chain_context: &mut AltChainContextCache,
context_svc: C,
) -> Result<Option<Arc<RandomXVM>>, ExtendedConsensusError>
) -> Result<Option<Arc<RandomXVm>>, ExtendedConsensusError>
where
C: Service<
BlockChainContextRequest,

View file

@ -15,7 +15,7 @@ use cuprate_helper::asynch::rayon_spawn_async;
use crate::{
block::{free::pull_ordered_transactions, PreparedBlock, PreparedBlockExPow},
context::rx_vms::RandomXVM,
context::rx_vms::RandomXVm,
transactions::new_tx_verification_data,
BlockChainContextRequest, BlockChainContextResponse, ExtendedConsensusError,
VerifyBlockResponse,
@ -148,7 +148,7 @@ where
tracing::debug!("New randomX seed in batch, initialising VM");
let new_vm = rayon_spawn_async(move || {
Arc::new(RandomXVM::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
Arc::new(RandomXVm::new(&new_vm_seed).expect("RandomX VM gave an error on set up!"))
})
.await;

View file

@ -33,7 +33,7 @@ mod tokens;
use cuprate_types::Chain;
use difficulty::DifficultyCache;
use rx_vms::RandomXVM;
use rx_vms::RandomXVm;
use weight::BlockWeightsCache;
pub(crate) use alt_chains::{sealed::AltChainRequestToken, AltChainContextCache};
@ -236,7 +236,7 @@ pub enum BlockChainContextRequest {
/// seed.
///
/// This should include the seed used to init this VM and the VM.
NewRXVM(([u8; 32], Arc<RandomXVM>)),
NewRXVM(([u8; 32], Arc<RandomXVm>)),
/// A request to add a new block to the cache.
Update(NewBlockData),
/// Pop blocks from the cache to the specified height.
@ -313,7 +313,7 @@ pub enum BlockChainContextResponse {
/// Blockchain context response.
Context(BlockChainContext),
/// A map of seed height to RandomX VMs.
RxVms(HashMap<usize, Arc<RandomXVM>>),
RxVms(HashMap<usize, Arc<RandomXVm>>),
/// A list of difficulties.
BatchDifficulties(Vec<u128>),
/// An alt chain context cache.
@ -321,7 +321,7 @@ pub enum BlockChainContextResponse {
/// A difficulty cache for an alt chain.
AltChainDifficultyCache(DifficultyCache),
/// A randomX VM for an alt chain.
AltChainRxVM(Arc<RandomXVM>),
AltChainRxVM(Arc<RandomXVm>),
/// A weight cache for an alt chain
AltChainWeightCache(BlockWeightsCache),
/// A generic Ok response.

View file

@ -11,7 +11,7 @@ use cuprate_types::{
use crate::{
ExtendedConsensusError,
__private::Database,
context::{difficulty::DifficultyCache, rx_vms::RandomXVM, weight::BlockWeightsCache},
context::{difficulty::DifficultyCache, rx_vms::RandomXVm, weight::BlockWeightsCache},
};
pub(crate) mod sealed {
@ -32,7 +32,7 @@ pub struct AltChainContextCache {
pub difficulty_cache: Option<DifficultyCache>,
/// A cached RX VM.
pub cached_rx_vm: Option<(usize, Arc<RandomXVM>)>,
pub cached_rx_vm: Option<(usize, Arc<RandomXVm>)>,
/// The chain height of the alt chain.
pub chain_height: usize,

View file

@ -9,7 +9,7 @@ use std::{
};
use futures::{stream::FuturesOrdered, StreamExt};
use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VMInner};
use randomx_rs::{RandomXCache, RandomXError, RandomXFlag, RandomXVM as VmInner};
use rayon::prelude::*;
use thread_local::ThreadLocal;
use tower::ServiceExt;
@ -33,16 +33,16 @@ const RX_SEEDS_CACHED: usize = 2;
/// A multithreaded randomX VM.
#[derive(Debug)]
pub struct RandomXVM {
pub struct RandomXVm {
/// These RandomX VMs all share the same cache.
vms: ThreadLocal<VMInner>,
vms: ThreadLocal<VmInner>,
/// The RandomX cache.
cache: RandomXCache,
/// The flags used to start the RandomX VMs.
flags: RandomXFlag,
}
impl RandomXVM {
impl RandomXVm {
/// Create a new multithreaded randomX VM with the provided seed.
pub fn new(seed: &[u8; 32]) -> Result<Self, RandomXError> {
// TODO: allow passing in flags.
@ -50,7 +50,7 @@ impl RandomXVM {
let cache = RandomXCache::new(flags, seed.as_slice())?;
Ok(RandomXVM {
Ok(RandomXVm {
vms: ThreadLocal::new(),
cache,
flags,
@ -58,12 +58,12 @@ impl RandomXVM {
}
}
impl RandomX for RandomXVM {
impl RandomX for RandomXVm {
type Error = RandomXError;
fn calculate_hash(&self, buf: &[u8]) -> Result<[u8; 32], Self::Error> {
self.vms
.get_or_try(|| VMInner::new(self.flags, Some(self.cache.clone()), None))?
.get_or_try(|| VmInner::new(self.flags, Some(self.cache.clone()), None))?
.calculate_hash(buf)
.map(|out| out.try_into().unwrap())
}
@ -72,17 +72,17 @@ impl RandomX for RandomXVM {
/// The randomX VMs cache, keeps the VM needed to calculate the current block's PoW hash (if a VM is needed) and a
/// couple more around this VM.
#[derive(Clone, Debug)]
pub struct RandomXVMCache {
pub struct RandomXVmCache {
/// The top [`RX_SEEDS_CACHED`] RX seeds.
pub(crate) seeds: VecDeque<(usize, [u8; 32])>,
/// The VMs for `seeds` (if after hf 12, otherwise this will be empty).
pub(crate) vms: HashMap<usize, Arc<RandomXVM>>,
pub(crate) vms: HashMap<usize, Arc<RandomXVm>>,
/// A single cached VM that was given to us from a part of Cuprate.
pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVM>)>,
pub(crate) cached_vm: Option<([u8; 32], Arc<RandomXVm>)>,
}
impl RandomXVMCache {
impl RandomXVmCache {
#[instrument(name = "init_rx_vm_cache", level = "info", skip(database))]
pub async fn init_from_chain_height<D: Database + Clone>(
chain_height: usize,
@ -106,7 +106,7 @@ impl RandomXVMCache {
.map(|(height, seed)| {
(
*height,
Arc::new(RandomXVM::new(seed).expect("Failed to create RandomX VM!")),
Arc::new(RandomXVm::new(seed).expect("Failed to create RandomX VM!")),
)
})
.collect()
@ -117,7 +117,7 @@ impl RandomXVMCache {
HashMap::new()
};
Ok(RandomXVMCache {
Ok(RandomXVmCache {
seeds,
vms,
cached_vm: None,
@ -125,7 +125,7 @@ impl RandomXVMCache {
}
/// Add a randomX VM to the cache, with the seed it was created with.
pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVM>)) {
pub fn add_vm(&mut self, vm: ([u8; 32], Arc<RandomXVm>)) {
self.cached_vm.replace(vm);
}
@ -136,7 +136,7 @@ impl RandomXVMCache {
height: usize,
chain: Chain,
database: D,
) -> Result<Arc<RandomXVM>, ExtendedConsensusError> {
) -> Result<Arc<RandomXVm>, ExtendedConsensusError> {
let seed_height = randomx_seed_height(height);
let BlockchainResponse::BlockHash(seed_hash) = database
@ -156,13 +156,13 @@ impl RandomXVMCache {
}
}
let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVM::new(&seed_hash).unwrap())).await;
let alt_vm = rayon_spawn_async(move || Arc::new(RandomXVm::new(&seed_hash).unwrap())).await;
Ok(alt_vm)
}
/// Get the main-chain RandomX VMs.
pub async fn get_vms(&mut self) -> HashMap<usize, Arc<RandomXVM>> {
pub async fn get_vms(&mut self) -> HashMap<usize, Arc<RandomXVm>> {
match self.seeds.len().checked_sub(self.vms.len()) {
// No difference in the amount of seeds to VMs.
Some(0) => (),
@ -184,7 +184,7 @@ impl RandomXVMCache {
}
};
rayon_spawn_async(move || Arc::new(RandomXVM::new(&next_seed_hash).unwrap()))
rayon_spawn_async(move || Arc::new(RandomXVm::new(&next_seed_hash).unwrap()))
.await
};
@ -200,7 +200,7 @@ impl RandomXVMCache {
seeds_clone
.par_iter()
.map(|(height, seed)| {
let vm = RandomXVM::new(seed).expect("Failed to create RandomX VM!");
let vm = RandomXVm::new(seed).expect("Failed to create RandomX VM!");
let vm = Arc::new(vm);
(*height, vm)
})

View file

@ -9,6 +9,7 @@ use tower::ServiceExt;
use tracing::Instrument;
use cuprate_consensus_rules::blocks::ContextToVerifyBlock;
use cuprate_helper::cast::u64_to_usize;
use cuprate_types::{
blockchain::{BlockchainReadRequest, BlockchainResponse},
Chain,
@ -45,7 +46,7 @@ pub struct ContextTask<D: Database> {
/// The weight cache.
weight_cache: weight::BlockWeightsCache,
/// The RX VM cache.
rx_vm_cache: rx_vms::RandomXVMCache,
rx_vm_cache: rx_vms::RandomXVmCache,
/// The hard-fork state cache.
hardfork_state: hardforks::HardForkState,
@ -127,7 +128,7 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
let db = database.clone();
let rx_seed_handle = tokio::spawn(async move {
rx_vms::RandomXVMCache::init_from_chain_height(chain_height, &current_hf, db).await
rx_vms::RandomXVmCache::init_from_chain_height(chain_height, &current_hf, db).await
});
let context_svc = ContextTask {
@ -168,9 +169,9 @@ impl<D: Database + Clone + Send + 'static> ContextTask<D> {
.weight_cache
.effective_median_block_weight(&current_hf),
top_hash: self.top_block_hash,
median_block_timestamp: self.difficulty_cache.median_timestamp(
usize::try_from(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW).unwrap(),
),
median_block_timestamp: self
.difficulty_cache
.median_timestamp(u64_to_usize(BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW)),
chain_height: self.chain_height,
current_hf,
next_difficulty: self.difficulty_cache.next_difficulty(&current_hf),

View file

@ -9,7 +9,7 @@ use cuprate_consensus_rules::{
};
use crate::{
context::rx_vms::{get_last_rx_seed_heights, RandomXVMCache},
context::rx_vms::{get_last_rx_seed_heights, RandomXVmCache},
tests::mock_db::*,
};
@ -42,7 +42,7 @@ fn rx_heights_consistent() {
async fn rx_vm_created_on_hf_12() {
let db = DummyDatabaseBuilder::default().finish(Some(10));
let mut cache = RandomXVMCache::init_from_chain_height(10, &HardFork::V11, db)
let mut cache = RandomXVmCache::init_from_chain_height(10, &HardFork::V11, db)
.await
.unwrap();
@ -67,7 +67,7 @@ proptest! {
let rt = Builder::new_multi_thread().enable_all().build().unwrap();
rt.block_on(async move {
let cache = RandomXVMCache::init_from_chain_height(10, &hf, db).await.unwrap();
let cache = RandomXVmCache::init_from_chain_height(10, &hf, db).await.unwrap();
assert!(cache.seeds.len() == cache.vms.len() || hf < HardFork::V12);
});
}

View file

@ -10,14 +10,15 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/consensus"
[features]
# All features on by default.
default = ["std", "atomic", "asynch", "fs", "num", "map", "time", "thread", "constants"]
default = ["std", "atomic", "asynch", "cast", "fs", "num", "map", "time", "thread", "constants"]
std = []
atomic = ["dep:crossbeam"]
asynch = ["dep:futures", "dep:rayon"]
cast = []
constants = []
fs = ["dep:dirs"]
num = []
map = ["dep:monero-serai"]
map = ["cast", "dep:monero-serai"]
time = ["dep:chrono", "std"]
thread = ["std", "dep:target_os_lib"]
@ -39,3 +40,6 @@ target_os_lib = { package = "libc", version = "0.2.151", optional = true }
[dev-dependencies]
tokio = { workspace = true, features = ["full"] }
[lints]
workspace = true

View file

@ -19,7 +19,7 @@ pub struct InfallibleOneshotReceiver<T>(oneshot::Receiver<T>);
impl<T> From<oneshot::Receiver<T>> for InfallibleOneshotReceiver<T> {
fn from(value: oneshot::Receiver<T>) -> Self {
InfallibleOneshotReceiver(value)
Self(value)
}
}
@ -43,7 +43,7 @@ where
{
let (tx, rx) = oneshot::channel();
rayon::spawn(move || {
let _ = tx.send(f());
drop(tx.send(f()));
});
rx.await.expect("The sender must not be dropped")
}
@ -62,7 +62,7 @@ mod test {
#[tokio::test]
// Assert that basic channel operations work.
async fn infallible_oneshot_receiver() {
let (tx, rx) = futures::channel::oneshot::channel::<String>();
let (tx, rx) = oneshot::channel::<String>();
let msg = "hello world!".to_string();
tx.send(msg.clone()).unwrap();
@ -84,7 +84,7 @@ mod test {
let barrier = Arc::new(Barrier::new(2));
let task = |barrier: &Barrier| barrier.wait();
let b_2 = barrier.clone();
let b_2 = Arc::clone(&barrier);
let (tx, rx) = std::sync::mpsc::channel();

View file

@ -49,6 +49,8 @@ pub type AtomicF64 = AtomicCell<f64>;
//---------------------------------------------------------------------------------------------------- TESTS
#[cfg(test)]
mod tests {
#![allow(clippy::float_cmp)]
use super::*;
#[test]

84
helper/src/cast.rs Normal file
View file

@ -0,0 +1,84 @@
//! Casting.
//!
//! This modules provides utilities for casting between types.
//!
//! `#[no_std]` compatible.
#[rustfmt::skip]
//============================ SAFETY: DO NOT REMOVE ===========================//
// //
// //
// Only allow building 64-bit targets. //
// This allows us to assume 64-bit invariants in this file. //
#[cfg(not(target_pointer_width = "64"))]
compile_error!("Cuprate is only compatible with 64-bit CPUs");
// //
// //
//============================ SAFETY: DO NOT REMOVE ===========================//
//---------------------------------------------------------------------------------------------------- Free functions
/// Cast [`u64`] to [`usize`].
#[inline(always)]
pub const fn u64_to_usize(u: u64) -> usize {
u as usize
}
/// Cast [`u32`] to [`usize`].
#[inline(always)]
pub const fn u32_to_usize(u: u32) -> usize {
u as usize
}
/// Cast [`usize`] to [`u64`].
#[inline(always)]
pub const fn usize_to_u64(u: usize) -> u64 {
u as u64
}
/// Cast [`i64`] to [`isize`].
#[inline(always)]
pub const fn i64_to_isize(i: i64) -> isize {
i as isize
}
/// Cast [`i32`] to [`isize`].
#[inline(always)]
pub const fn i32_to_isize(i: i32) -> isize {
i as isize
}
/// Cast [`isize`] to [`i64`].
#[inline(always)]
pub const fn isize_to_i64(i: isize) -> i64 {
i as i64
}
//---------------------------------------------------------------------------------------------------- Tests
#[cfg(test)]
mod test {
use super::*;
#[test]
fn max_unsigned() {
assert_eq!(u32_to_usize(u32::MAX), u32::MAX as usize);
assert_eq!(usize_to_u64(u32_to_usize(u32::MAX)), u32::MAX as u64);
assert_eq!(u64_to_usize(u64::MAX), usize::MAX);
assert_eq!(usize_to_u64(u64_to_usize(u64::MAX)), u64::MAX);
assert_eq!(usize_to_u64(usize::MAX), u64::MAX);
assert_eq!(u64_to_usize(usize_to_u64(usize::MAX)), usize::MAX);
}
#[test]
fn max_signed() {
assert_eq!(i32_to_isize(i32::MAX), i32::MAX as isize);
assert_eq!(isize_to_i64(i32_to_isize(i32::MAX)), i32::MAX as i64);
assert_eq!(i64_to_isize(i64::MAX), isize::MAX);
assert_eq!(isize_to_i64(i64_to_isize(i64::MAX)), i64::MAX);
assert_eq!(isize_to_i64(isize::MAX), i64::MAX);
assert_eq!(i64_to_isize(isize_to_i64(isize::MAX)), isize::MAX);
}
}

View file

@ -190,72 +190,41 @@ mod test {
// - It must `ends_with()` the expected end PATH for the OS
#[test]
fn path_sanity_check() {
assert!(CUPRATE_CACHE_DIR.is_absolute());
assert!(CUPRATE_CONFIG_DIR.is_absolute());
assert!(CUPRATE_DATA_DIR.is_absolute());
assert!(CUPRATE_BLOCKCHAIN_DIR.is_absolute());
// Array of (PATH, expected_path_as_string).
//
// The different OS's will set the expected path below.
let mut array = [
(&*CUPRATE_CACHE_DIR, ""),
(&*CUPRATE_CONFIG_DIR, ""),
(&*CUPRATE_DATA_DIR, ""),
(&*CUPRATE_BLOCKCHAIN_DIR, ""),
(&*CUPRATE_TXPOOL_DIR, ""),
];
if cfg!(target_os = "windows") {
let dir = &*CUPRATE_CACHE_DIR;
println!("cuprate_cache_dir: {dir:?}");
assert!(dir.ends_with(r"AppData\Local\Cuprate"));
let dir = &*CUPRATE_CONFIG_DIR;
println!("cuprate_config_dir: {dir:?}");
assert!(dir.ends_with(r"AppData\Roaming\Cuprate"));
let dir = &*CUPRATE_DATA_DIR;
println!("cuprate_data_dir: {dir:?}");
assert!(dir.ends_with(r"AppData\Roaming\Cuprate"));
let dir = &*CUPRATE_BLOCKCHAIN_DIR;
println!("cuprate_blockchain_dir: {dir:?}");
assert!(dir.ends_with(r"AppData\Roaming\Cuprate\blockchain"));
let dir = &*CUPRATE_TXPOOL_DIR;
println!("cuprate_txpool_dir: {dir:?}");
assert!(dir.ends_with(r"AppData\Roaming\Cuprate\txpool"));
array[0].1 = r"AppData\Local\Cuprate";
array[1].1 = r"AppData\Roaming\Cuprate";
array[2].1 = r"AppData\Roaming\Cuprate";
array[3].1 = r"AppData\Roaming\Cuprate\blockchain";
array[4].1 = r"AppData\Roaming\Cuprate\txpool";
} else if cfg!(target_os = "macos") {
let dir = &*CUPRATE_CACHE_DIR;
println!("cuprate_cache_dir: {dir:?}");
assert!(dir.ends_with("Library/Caches/Cuprate"));
let dir = &*CUPRATE_CONFIG_DIR;
println!("cuprate_config_dir: {dir:?}");
assert!(dir.ends_with("Library/Application Support/Cuprate"));
let dir = &*CUPRATE_DATA_DIR;
println!("cuprate_data_dir: {dir:?}");
assert!(dir.ends_with("Library/Application Support/Cuprate"));
let dir = &*CUPRATE_BLOCKCHAIN_DIR;
println!("cuprate_blockchain_dir: {dir:?}");
assert!(dir.ends_with("Library/Application Support/Cuprate/blockchain"));
let dir = &*CUPRATE_TXPOOL_DIR;
println!("cuprate_txpool_dir: {dir:?}");
assert!(dir.ends_with("Library/Application Support/Cuprate/txpool"));
array[0].1 = "Library/Caches/Cuprate";
array[1].1 = "Library/Application Support/Cuprate";
array[2].1 = "Library/Application Support/Cuprate";
array[3].1 = "Library/Application Support/Cuprate/blockchain";
array[4].1 = "Library/Application Support/Cuprate/txpool";
} else {
// Assumes Linux.
let dir = &*CUPRATE_CACHE_DIR;
println!("cuprate_cache_dir: {dir:?}");
assert!(dir.ends_with(".cache/cuprate"));
array[0].1 = ".cache/cuprate";
array[1].1 = ".config/cuprate";
array[2].1 = ".local/share/cuprate";
array[3].1 = ".local/share/cuprate/blockchain";
array[4].1 = ".local/share/cuprate/txpool";
};
let dir = &*CUPRATE_CONFIG_DIR;
println!("cuprate_config_dir: {dir:?}");
assert!(dir.ends_with(".config/cuprate"));
let dir = &*CUPRATE_DATA_DIR;
println!("cuprate_data_dir: {dir:?}");
assert!(dir.ends_with(".local/share/cuprate"));
let dir = &*CUPRATE_BLOCKCHAIN_DIR;
println!("cuprate_blockchain_dir: {dir:?}");
assert!(dir.ends_with(".local/share/cuprate/blockchain"));
let dir = &*CUPRATE_TXPOOL_DIR;
println!("cuprate_txpool_dir: {dir:?}");
assert!(dir.ends_with(".local/share/cuprate/txpool"));
for (path, expected) in array {
assert!(path.is_absolute());
assert!(path.ends_with(expected));
}
}
}

View file

@ -1,36 +1,4 @@
#![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints
#![allow(clippy::len_zero, clippy::type_complexity, clippy::module_inception)]
#![deny(nonstandard_style, deprecated, missing_docs, unused_mut)]
#![forbid(
unused_unsafe,
future_incompatible,
break_with_label_and_loop,
coherence_leak_check,
duplicate_macro_attributes,
exported_private_dependencies,
for_loops_over_fallibles,
large_assignments,
overlapping_range_endpoints,
// private_in_public,
semicolon_in_expressions_from_macros,
redundant_semicolons,
unconditional_recursion,
unreachable_patterns,
unused_allocation,
unused_braces,
unused_comparisons,
unused_doc_comments,
unused_parens,
unused_labels,
while_true,
keyword_idents,
non_ascii_idents,
noop_method_call,
unreachable_pub,
single_use_lifetimes,
// variant_size_differences,
)]
#![cfg_attr(not(feature = "std"), no_std)]
//---------------------------------------------------------------------------------------------------- Public API
@ -40,6 +8,9 @@ pub mod asynch; // async collides
#[cfg(feature = "atomic")]
pub mod atomic;
#[cfg(feature = "cast")]
pub mod cast;
#[cfg(feature = "constants")]
pub mod constants;

View file

@ -7,6 +7,8 @@
//---------------------------------------------------------------------------------------------------- Use
use monero_serai::transaction::Timelock;
use crate::cast::{u64_to_usize, usize_to_u64};
//---------------------------------------------------------------------------------------------------- `(u64, u64) <-> u128`
/// Split a [`u128`] value into 2 64-bit values.
///
@ -27,6 +29,7 @@ use monero_serai::transaction::Timelock;
/// ```
#[inline]
pub const fn split_u128_into_low_high_bits(value: u128) -> (u64, u64) {
#[allow(clippy::cast_possible_truncation)]
(value as u64, (value >> 64) as u64)
}
@ -58,7 +61,7 @@ pub const fn combine_low_high_bits_to_u128(low_bits: u64, high_bits: u64) -> u12
/// Map a [`u64`] to a [`Timelock`].
///
/// Height/time is not differentiated via type, but rather:
/// "height is any value less than 500_000_000 and timestamp is any value above"
/// "height is any value less than `500_000_000` and timestamp is any value above"
/// so the `u64/usize` is stored without any tag.
///
/// See [`timelock_to_u64`] for the inverse function.
@ -77,7 +80,7 @@ pub fn u64_to_timelock(u: u64) -> Timelock {
if u == 0 {
Timelock::None
} else if u < 500_000_000 {
Timelock::Block(usize::try_from(u).unwrap())
Timelock::Block(u64_to_usize(u))
} else {
Timelock::Time(u)
}
@ -97,7 +100,7 @@ pub fn u64_to_timelock(u: u64) -> Timelock {
pub fn timelock_to_u64(timelock: Timelock) -> u64 {
match timelock {
Timelock::None => 0,
Timelock::Block(u) => u64::try_from(u).unwrap(),
Timelock::Block(u) => usize_to_u64(u),
Timelock::Time(u) => u,
}
}

View file

@ -30,11 +30,11 @@ pub enum Network {
impl Network {
/// Returns the network ID for the current network.
pub fn network_id(&self) -> [u8; 16] {
pub const fn network_id(&self) -> [u8; 16] {
match self {
Network::Mainnet => MAINNET_NETWORK_ID,
Network::Testnet => TESTNET_NETWORK_ID,
Network::Stagenet => STAGENET_NETWORK_ID,
Self::Mainnet => MAINNET_NETWORK_ID,
Self::Testnet => TESTNET_NETWORK_ID,
Self::Stagenet => STAGENET_NETWORK_ID,
}
}
}

View file

@ -89,8 +89,9 @@ where
/// assert_eq!(median(vec), 5);
/// ```
///
/// # Safety
/// # Invariant
/// If not sorted the output will be invalid.
#[allow(clippy::debug_assert_with_mut_call)]
pub fn median<T>(array: impl AsRef<[T]>) -> T
where
T: Add<Output = T>

View file

@ -28,10 +28,10 @@ macro_rules! impl_thread_percent {
$(
$(#[$doc])*
pub fn $fn_name() -> NonZeroUsize {
// SAFETY:
// unwrap here is okay because:
// - THREADS().get() is always non-zero
// - max() guards against 0
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_precision_loss)]
NonZeroUsize::new(max(1, (threads().get() as f64 * $percent).floor() as usize)).unwrap()
}
)*
@ -58,10 +58,10 @@ impl_thread_percent! {
/// Originally from <https://docs.rs/lpt>.
///
/// # Windows
/// Uses SetThreadPriority() with THREAD_PRIORITY_IDLE (-15).
/// Uses `SetThreadPriority()` with `THREAD_PRIORITY_IDLE` (-15).
///
/// # Unix
/// Uses libc::nice() with the max nice level.
/// Uses `libc::nice()` with the max nice level.
///
/// On macOS and *BSD: +20
/// On Linux: +19
@ -74,7 +74,7 @@ pub fn low_priority_thread() {
// SAFETY: calling C.
// We are _lowering_ our priority, not increasing, so this function should never fail.
unsafe {
let _ = SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE);
drop(SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_IDLE));
}
}
@ -87,7 +87,7 @@ pub fn low_priority_thread() {
// SAFETY: calling C.
// We are _lowering_ our priority, not increasing, so this function should never fail.
unsafe {
let _ = libc::nice(NICE_MAX);
libc::nice(NICE_MAX);
}
}
}

View file

@ -129,6 +129,7 @@ pub const fn secs_to_clock(seconds: u32) -> (u8, u8, u8) {
debug_assert!(m < 60);
debug_assert!(s < 60);
#[allow(clippy::cast_possible_truncation)] // checked above
(h as u8, m, s)
}
@ -153,6 +154,7 @@ pub fn time() -> u32 {
///
/// This is guaranteed to return a value between `0..=86399`
pub fn time_utc() -> u32 {
#[allow(clippy::cast_sign_loss)] // checked in function calls
unix_clock(chrono::offset::Local::now().timestamp() as u64)
}

View file

@ -15,6 +15,7 @@ default = ["std"]
std = ["dep:thiserror", "bytes/std", "cuprate-fixed-bytes/std"]
[dependencies]
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
cuprate-fixed-bytes = { path = "../fixed-bytes", default-features = false }
paste = "1.0.14"

View file

@ -65,6 +65,8 @@ use core::{ops::Deref, str::from_utf8 as str_from_utf8};
use bytes::{Buf, BufMut, Bytes, BytesMut};
use cuprate_helper::cast::{u64_to_usize, usize_to_u64};
pub mod container_as_blob;
pub mod error;
mod io;
@ -242,7 +244,7 @@ pub fn write_bytes<T: AsRef<[u8]>, B: BufMut>(t: T, w: &mut B) -> Result<()> {
let bytes = t.as_ref();
let len = bytes.len();
write_varint(len.try_into()?, w)?;
write_varint(usize_to_u64(len), w)?;
if w.remaining_mut() < len {
return Err(Error::IO("Not enough capacity to write bytes"));
@ -286,7 +288,7 @@ where
I: Iterator<Item = T> + ExactSizeIterator,
B: BufMut,
{
write_varint(iterator.len().try_into()?, w)?;
write_varint(usize_to_u64(iterator.len()), w)?;
for item in iterator.into_iter() {
item.write(w)?;
}
@ -334,7 +336,7 @@ fn skip_epee_value<B: Buf>(r: &mut B, skipped_objects: &mut u8) -> Result<()> {
if let Some(size) = marker.inner_marker.size() {
let bytes_to_skip = size
.checked_mul(len.try_into()?)
.checked_mul(u64_to_usize(len))
.ok_or(Error::Value("List is too big".to_string()))?;
return advance(bytes_to_skip, r);
};
@ -352,8 +354,8 @@ fn skip_epee_value<B: Buf>(r: &mut B, skipped_objects: &mut u8) -> Result<()> {
| InnerMarker::U8
| InnerMarker::Bool => unreachable!("These types are constant size."),
InnerMarker::String => {
let len = read_varint(r)?;
advance(len.try_into()?, r)?;
let len = u64_to_usize(read_varint(r)?);
advance(len, r)?;
}
InnerMarker::Object => {
*skipped_objects += 1;

View file

@ -7,6 +7,7 @@ use core::fmt::Debug;
use bytes::{Buf, BufMut, Bytes, BytesMut};
use cuprate_fixed_bytes::{ByteArray, ByteArrayVec};
use cuprate_helper::cast::u64_to_usize;
use crate::{
io::{checked_read_primitive, checked_write_primitive},
@ -66,11 +67,11 @@ impl<T: EpeeObject> EpeeValue for Vec<T> {
"Marker is not sequence when a sequence was expected",
));
}
let len = read_varint(r)?;
let len = u64_to_usize(read_varint(r)?);
let individual_marker = Marker::new(marker.inner_marker);
let mut res = Vec::with_capacity(len.try_into()?);
let mut res = Vec::with_capacity(len);
for _ in 0..len {
res.push(T::read(r, &individual_marker)?);
}
@ -167,11 +168,13 @@ impl EpeeValue for Vec<u8> {
return Err(Error::Format("Byte array exceeded max length"));
}
if r.remaining() < len.try_into()? {
let len = u64_to_usize(len);
if r.remaining() < len {
return Err(Error::IO("Not enough bytes to fill object"));
}
let mut res = vec![0; len.try_into()?];
let mut res = vec![0; len];
r.copy_to_slice(&mut res);
Ok(res)
@ -203,11 +206,13 @@ impl EpeeValue for Bytes {
return Err(Error::Format("Byte array exceeded max length"));
}
if r.remaining() < len.try_into()? {
let len = u64_to_usize(len);
if r.remaining() < len {
return Err(Error::IO("Not enough bytes to fill object"));
}
Ok(r.copy_to_bytes(len.try_into()?))
Ok(r.copy_to_bytes(len))
}
fn epee_default_value() -> Option<Self> {
@ -236,11 +241,13 @@ impl EpeeValue for BytesMut {
return Err(Error::Format("Byte array exceeded max length"));
}
if r.remaining() < len.try_into()? {
let len = u64_to_usize(len);
if r.remaining() < len {
return Err(Error::IO("Not enough bytes to fill object"));
}
let mut bytes = BytesMut::zeroed(len.try_into()?);
let mut bytes = BytesMut::zeroed(len);
r.copy_to_slice(&mut bytes);
Ok(bytes)
@ -272,11 +279,13 @@ impl<const N: usize> EpeeValue for ByteArrayVec<N> {
return Err(Error::Format("Byte array exceeded max length"));
}
if r.remaining() < usize::try_from(len)? {
let len = u64_to_usize(len);
if r.remaining() < len {
return Err(Error::IO("Not enough bytes to fill object"));
}
ByteArrayVec::try_from(r.copy_to_bytes(usize::try_from(len)?))
ByteArrayVec::try_from(r.copy_to_bytes(len))
.map_err(|_| Error::Format("Field has invalid length"))
}
@ -302,7 +311,7 @@ impl<const N: usize> EpeeValue for ByteArray<N> {
return Err(Error::Format("Marker does not match expected Marker"));
}
let len: usize = read_varint(r)?.try_into()?;
let len = u64_to_usize(read_varint(r)?);
if len != N {
return Err(Error::Format("Byte array has incorrect length"));
}
@ -370,11 +379,11 @@ impl<const N: usize> EpeeValue for Vec<[u8; N]> {
));
}
let len = read_varint(r)?;
let len = u64_to_usize(read_varint(r)?);
let individual_marker = Marker::new(marker.inner_marker);
let mut res = Vec::with_capacity(len.try_into()?);
let mut res = Vec::with_capacity(len);
for _ in 0..len {
res.push(<[u8; N]>::read(r, &individual_marker)?);
}
@ -406,11 +415,11 @@ macro_rules! epee_seq {
));
}
let len = read_varint(r)?;
let len = u64_to_usize(read_varint(r)?);
let individual_marker = Marker::new(marker.inner_marker.clone());
let mut res = Vec::with_capacity(len.try_into()?);
let mut res = Vec::with_capacity(len);
for _ in 0..len {
res.push(<$val>::read(r, &individual_marker)?);
}

View file

@ -9,12 +9,12 @@ epee_object!(
a: u8,
);
struct TT {
struct T2 {
a: u8,
}
epee_object!(
TT,
T2,
a: u8 = 0,
);
@ -35,5 +35,5 @@ fn duplicate_key_with_default() {
b'a', 0x0B, 0x00,
];
assert!(from_bytes::<TT, _>(&mut &data[..]).is_err());
assert!(from_bytes::<T2, _>(&mut &data[..]).is_err());
}

View file

@ -12,6 +12,8 @@ default = []
tracing = ["dep:tracing", "tokio-util/tracing"]
[dependencies]
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
thiserror = { workspace = true }
bytes = { workspace = true, features = ["std"] }
bitflags = { workspace = true }

View file

@ -20,6 +20,8 @@ use std::{fmt::Debug, marker::PhantomData};
use bytes::{Buf, BufMut, BytesMut};
use tokio_util::codec::{Decoder, Encoder};
use cuprate_helper::cast::u64_to_usize;
use crate::{
header::{Flags, HEADER_SIZE},
message::{make_dummy_message, LevinMessage},
@ -114,10 +116,7 @@ impl<C: LevinCommand + Debug> Decoder for LevinBucketCodec<C> {
std::mem::replace(&mut self.state, LevinBucketState::WaitingForBody(head));
}
LevinBucketState::WaitingForBody(head) => {
let body_len = head
.size
.try_into()
.map_err(|_| BucketError::BucketExceededMaxSize)?;
let body_len = u64_to_usize(head.size);
if src.len() < body_len {
src.reserve(body_len - src.len());
return Ok(None);
@ -255,13 +254,11 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
continue;
};
let max_size = if self.bucket_codec.handshake_message_seen {
let max_size = u64_to_usize(if self.bucket_codec.handshake_message_seen {
self.bucket_codec.protocol.max_packet_size
} else {
self.bucket_codec.protocol.max_packet_size_before_handshake
}
.try_into()
.expect("Levin max message size is too large, does not fit into a usize.");
});
if bytes.len().saturating_add(bucket.body.len()) > max_size {
return Err(BucketError::InvalidFragmentedMessage(
@ -300,12 +297,7 @@ impl<T: LevinBody> Decoder for LevinMessageCodec<T> {
}
// Check the fragmented message contains enough bytes to build the message.
if bytes.len().saturating_sub(HEADER_SIZE)
< header
.size
.try_into()
.map_err(|_| BucketError::BucketExceededMaxSize)?
{
if bytes.len().saturating_sub(HEADER_SIZE) < u64_to_usize(header.size) {
return Err(BucketError::InvalidFragmentedMessage(
"Fragmented message does not have enough bytes to fill bucket body",
));

View file

@ -38,6 +38,8 @@ use std::fmt::Debug;
use bytes::{Buf, Bytes};
use thiserror::Error;
use cuprate_helper::cast::usize_to_u64;
pub mod codec;
pub mod header;
pub mod message;
@ -212,7 +214,7 @@ impl<C: LevinCommand> BucketBuilder<C> {
Bucket {
header: BucketHead {
signature: self.signature.unwrap(),
size: body.len().try_into().unwrap(),
size: usize_to_u64(body.len()),
have_to_return_data: ty.have_to_return_data(),
command: self.command.unwrap(),
return_code: self.return_code.unwrap(),

View file

@ -5,6 +5,8 @@
//! for more control over what is actually sent over the wire at certain times.
use bytes::{Bytes, BytesMut};
use cuprate_helper::cast::usize_to_u64;
use crate::{
header::{Flags, HEADER_SIZE},
Bucket, BucketBuilder, BucketError, BucketHead, LevinBody, LevinCommand, Protocol,
@ -106,9 +108,7 @@ pub fn make_fragmented_messages<T: LevinBody>(
new_body.resize(fragment_size - HEADER_SIZE, 0);
bucket.body = new_body.freeze();
bucket.header.size = (fragment_size - HEADER_SIZE)
.try_into()
.expect("Bucket size does not fit into u64");
bucket.header.size = usize_to_u64(fragment_size - HEADER_SIZE);
}
return Ok(vec![bucket]);
@ -118,9 +118,7 @@ pub fn make_fragmented_messages<T: LevinBody>(
// The first fragment will set the START flag, the last will set the END flag.
let fragment_head = BucketHead {
signature: protocol.signature,
size: (fragment_size - HEADER_SIZE)
.try_into()
.expect("Bucket size does not fit into u64"),
size: usize_to_u64(fragment_size - HEADER_SIZE),
have_to_return_data: false,
// Just use a default command.
command: T::Command::from(0),
@ -191,7 +189,7 @@ pub(crate) fn make_dummy_message<T: LevinCommand>(protocol: &Protocol, size: usi
// A header to put on the dummy message.
let header = BucketHead {
signature: protocol.signature,
size: size.try_into().expect("Bucket size does not fit into u64"),
size: usize_to_u64(size),
have_to_return_data: false,
// Just use a default command.
command: T::from(0),

View file

@ -8,6 +8,8 @@ use tokio::{
};
use tokio_util::codec::{FramedRead, FramedWrite};
use cuprate_helper::cast::u64_to_usize;
use cuprate_levin::{
message::make_fragmented_messages, BucketBuilder, BucketError, LevinBody, LevinCommand,
LevinMessageCodec, MessageType, Protocol,
@ -54,7 +56,7 @@ impl LevinBody for TestBody {
_: MessageType,
_: Self::Command,
) -> Result<Self, BucketError> {
let size = body.get_u64_le().try_into().unwrap();
let size = u64_to_usize(body.get_u64_le());
// bucket
Ok(TestBody::Bytes(size, body.copy_to_bytes(size)))
}

View file

@ -15,6 +15,7 @@ cuprate-levin = { path = "../levin" }
cuprate-epee-encoding = { path = "../epee-encoding" }
cuprate-fixed-bytes = { path = "../fixed-bytes" }
cuprate-types = { path = "../../types", default-features = false, features = ["epee"] }
cuprate-helper = { path = "../../helper", default-features = false, features = ["cast"] }
bitflags = { workspace = true, features = ["std"] }
bytes = { workspace = true, features = ["std"] }

View file

@ -99,7 +99,7 @@ impl LevinCommandTrait for LevinCommand {
LevinCommand::FluffyMissingTxsRequest => 1024 * 1024, // 1 MB
LevinCommand::GetTxPoolCompliment => 1024 * 1024 * 4, // 4 MB
LevinCommand::Unknown(_) => usize::MAX.try_into().unwrap_or(u64::MAX),
LevinCommand::Unknown(_) => u64::MAX,
}
}

View file

@ -20,13 +20,17 @@ cuprate-helper = { path = "../../helper", features = ["asynch"], default-
axum = { version = "0.7.5", features = ["json"], default-features = false }
serde = { workspace = true, optional = true }
serde_json = { workspace = true, features = ["std"] }
tower = { workspace = true }
paste = { workspace = true }
futures = { workspace = true }
[dev-dependencies]
cuprate-test-utils = { path = "../../test-utils" }
axum = { version = "0.7.5", features = ["json", "tokio", "http2"] }
serde_json = { workspace = true, features = ["std"] }
tokio = { workspace = true, features = ["full"] }
ureq = { version = "2.10.0", features = ["json"] }
[lints]
workspace = true

View file

@ -1,99 +1,6 @@
#![doc = include_str!("../README.md")]
#![cfg_attr(docsrs, feature(doc_cfg))]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
while_true,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
single_use_lifetimes,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_doc_comments,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style,
unreachable_pub
)]
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
// FIXME: good lint but too many false positives
// with our `Env` + `RwLock` setup.
clippy::significant_drop_tightening,
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
// TODO
rustdoc::bare_urls,
clippy::multiple_crate_versions,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
)]
// Allow some lints in tests.
#![cfg_attr(
test,
allow(
clippy::cognitive_complexity,
clippy::needless_pass_by_value,
clippy::cast_possible_truncation,
clippy::too_many_lines
)
)]
//---------------------------------------------------------------------------------------------------- Mod
mod route;
mod router_builder;
mod rpc_error;
@ -110,3 +17,13 @@ pub use rpc_handler::RpcHandler;
pub use rpc_handler_dummy::RpcHandlerDummy;
pub use rpc_request::RpcRequest;
pub use rpc_response::RpcResponse;
// false-positive: used in `README.md`'s doc-test.
#[cfg(test)]
mod test {
extern crate axum;
extern crate cuprate_test_utils;
extern crate serde_json;
extern crate tokio;
extern crate ureq;
}

View file

@ -18,3 +18,6 @@ thiserror = { workspace = true }
[dev-dependencies]
pretty_assertions = { workspace = true }
[lints]
workspace = true

View file

@ -1,94 +1,5 @@
#![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
while_true,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
single_use_lifetimes,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
unreachable_pub,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
clippy::missing_docs_in_private_items,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style
)]
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
// FIXME: good lint but too many false positives
// with our `Env` + `RwLock` setup.
clippy::significant_drop_tightening,
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
)]
// Allow some lints in tests.
#![cfg_attr(
test,
allow(
clippy::cognitive_complexity,
clippy::needless_pass_by_value,
clippy::cast_possible_truncation,
clippy::too_many_lines
)
)]
//---------------------------------------------------------------------------------------------------- Mod/Use
pub mod error;
mod id;
@ -103,6 +14,5 @@ pub use request::Request;
mod response;
pub use response::Response;
//---------------------------------------------------------------------------------------------------- TESTS
#[cfg(test)]
mod tests;

View file

@ -304,14 +304,14 @@ where
if payload.is_none() {
payload = Some(Ok(map.next_value::<T>()?));
} else {
return Err(serde::de::Error::duplicate_field("result/error"));
return Err(Error::duplicate_field("result/error"));
}
}
Key::Error => {
if payload.is_none() {
payload = Some(Err(map.next_value::<ErrorObject>()?));
} else {
return Err(serde::de::Error::duplicate_field("result/error"));
return Err(Error::duplicate_field("result/error"));
}
}
Key::Unknown => {

View file

@ -52,6 +52,7 @@ where
}
/// Tests an input JSON string matches an expected type `T`.
#[allow(clippy::needless_pass_by_value)] // serde signature
fn assert_de<T>(json: &'static str, expected: T)
where
T: DeserializeOwned + std::fmt::Debug + Clone + PartialEq,

View file

@ -18,13 +18,14 @@ cuprate-epee-encoding = { path = "../../net/epee-encoding", optional = true }
cuprate-fixed-bytes = { path = "../../net/fixed-bytes" }
cuprate-types = { path = "../../types" }
monero-serai = { workspace = true }
paste = { workspace = true }
serde = { workspace = true, optional = true }
[dev-dependencies]
cuprate-test-utils = { path = "../../test-utils" }
cuprate-json-rpc = { path = "../json-rpc" }
serde_json = { workspace = true }
pretty_assertions = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
[lints]
workspace = true

View file

@ -1,96 +1,6 @@
#![doc = include_str!("../README.md")]
#![cfg_attr(docsrs, feature(doc_cfg))]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
while_true,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
single_use_lifetimes,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_doc_comments,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style,
unreachable_pub
)]
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
// FIXME: good lint but too many false positives
// with our `Env` + `RwLock` setup.
clippy::significant_drop_tightening,
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
clippy::multiple_crate_versions,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
)]
// Allow some lints in tests.
#![cfg_attr(
test,
allow(
clippy::cognitive_complexity,
clippy::needless_pass_by_value,
clippy::cast_possible_truncation,
clippy::too_many_lines
)
)]
//---------------------------------------------------------------------------------------------------- Mod
mod constants;
mod defaults;
mod free;
@ -112,3 +22,10 @@ pub use constants::{
CORE_RPC_VERSION_MINOR,
};
pub use rpc_call::{RpcCall, RpcCallValue};
// false-positive: used in tests
#[cfg(test)]
mod test {
extern crate cuprate_test_utils;
extern crate serde_json;
}

View file

@ -15,6 +15,7 @@
mod binary_string;
mod distribution;
mod key_image_spent_status;
#[allow(clippy::module_inception)]
mod misc;
mod pool_info_extent;
mod status;

View file

@ -39,7 +39,7 @@ thread_local = { workspace = true, optional = true }
rayon = { workspace = true, optional = true }
[dev-dependencies]
cuprate-helper = { path = "../../helper", features = ["thread"] }
cuprate-helper = { path = "../../helper", features = ["thread", "cast"] }
cuprate-test-utils = { path = "../../test-utils" }
tokio = { workspace = true, features = ["full"] }
@ -48,3 +48,6 @@ pretty_assertions = { workspace = true }
proptest = { workspace = true }
hex = { workspace = true }
hex-literal = { workspace = true }
[lints]
workspace = true

View file

@ -1,4 +1,44 @@
//! The main [`Config`] struct, holding all configurable values.
//! Database configuration.
//!
//! This module contains the main [`Config`]uration struct
//! for the database [`Env`](cuprate_database::Env)ironment,
//! and blockchain-specific configuration.
//!
//! It also contains types related to configuration settings.
//!
//! The main constructor is the [`ConfigBuilder`].
//!
//! These configurations are processed at runtime, meaning
//! the `Env` can/will dynamically adjust its behavior based
//! on these values.
//!
//! # Example
//! ```rust
//! use cuprate_blockchain::{
//! cuprate_database::{Env, config::SyncMode},
//! config::{ConfigBuilder, ReaderThreads},
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let tmp_dir = tempfile::tempdir()?;
//! let db_dir = tmp_dir.path().to_owned();
//!
//! let config = ConfigBuilder::new()
//! // Use a custom database directory.
//! .db_directory(db_dir.into())
//! // Use as many reader threads as possible (when using `service`).
//! .reader_threads(ReaderThreads::OnePerThread)
//! // Use the fastest sync mode.
//! .sync_mode(SyncMode::Fast)
//! // Build into `Config`
//! .build();
//!
//! // Start a database `service` using this configuration.
//! let (_, _, env) = cuprate_blockchain::service::init(config.clone())?;
//! // It's using the config we provided.
//! assert_eq!(env.config(), &config.db_config);
//! # Ok(()) }
//! ```
//---------------------------------------------------------------------------------------------------- Import
use std::{borrow::Cow, path::Path};

View file

@ -1,44 +0,0 @@
//! Database configuration.
//!
//! This module contains the main [`Config`]uration struct
//! for the database [`Env`](cuprate_database::Env)ironment,
//! and blockchain-specific configuration.
//!
//! It also contains types related to configuration settings.
//!
//! The main constructor is the [`ConfigBuilder`].
//!
//! These configurations are processed at runtime, meaning
//! the `Env` can/will dynamically adjust its behavior based
//! on these values.
//!
//! # Example
//! ```rust
//! use cuprate_blockchain::{
//! cuprate_database::{Env, config::SyncMode},
//! config::{ConfigBuilder, ReaderThreads},
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let tmp_dir = tempfile::tempdir()?;
//! let db_dir = tmp_dir.path().to_owned();
//!
//! let config = ConfigBuilder::new()
//! // Use a custom database directory.
//! .db_directory(db_dir.into())
//! // Use as many reader threads as possible (when using `service`).
//! .reader_threads(ReaderThreads::OnePerThread)
//! // Use the fastest sync mode.
//! .sync_mode(SyncMode::Fast)
//! // Build into `Config`
//! .build();
//!
//! // Start a database `service` using this configuration.
//! let (_, _, env) = cuprate_blockchain::service::init(config.clone())?;
//! // It's using the config we provided.
//! assert_eq!(env.config(), &config.db_config);
//! # Ok(()) }
//! ```
mod config;
pub use config::{Config, ConfigBuilder, ReaderThreads};

View file

@ -1,103 +1,9 @@
#![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
while_true,
clippy::missing_docs_in_private_items,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
single_use_lifetimes,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
unreachable_pub,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_crate_dependencies,
unused_doc_comments,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style
)]
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
// FIXME: good lint but too many false positives
// with our `Env` + `RwLock` setup.
clippy::significant_drop_tightening,
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
)]
// Allow some lints when running in debug mode.
#![cfg_attr(
debug_assertions,
allow(
clippy::todo,
clippy::multiple_crate_versions,
// unused_crate_dependencies,
)
)]
// Allow some lints in tests.
#![cfg_attr(
test,
allow(
clippy::cognitive_complexity,
clippy::needless_pass_by_value,
clippy::cast_possible_truncation,
clippy::too_many_lines
)
// See `cuprate-database` for reasoning.
clippy::significant_drop_tightening
)]
// Only allow building 64-bit targets.
//
// This allows us to assume 64-bit

View file

@ -442,7 +442,7 @@ mod test {
let mut block = BLOCK_V9_TX3.clone();
block.height = usize::try_from(u32::MAX).unwrap() + 1;
block.height = cuprate_helper::cast::u32_to_usize(u32::MAX) + 1;
add_block(&block, &mut tables).unwrap();
}

View file

@ -37,8 +37,8 @@ pub fn init(
let db = Arc::new(crate::open(config)?);
// Spawn the Reader thread pool and Writer.
let readers = init_read_service(db.clone(), reader_threads);
let writer = init_write_service(db.clone());
let readers = init_read_service(Arc::clone(&db), reader_threads);
let writer = init_write_service(Arc::clone(&db));
Ok((readers, writer, db))
}

View file

@ -304,8 +304,9 @@ async fn test_template(
// Assert we get back the same map of
// `Amount`'s and `AmountIndex`'s.
let mut response_output_count = 0;
#[allow(clippy::iter_over_hash_type)] // order doesn't matter in this test
for (amount, output_map) in response {
let amount_index_set = map.get(&amount).unwrap();
let amount_index_set = &map[&amount];
for (amount_index, output) in output_map {
response_output_count += 1;

View file

@ -33,3 +33,6 @@ serde = { workspace = true, optional = true }
bytemuck = { version = "1.14.3", features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] }
page_size = { version = "0.6.0" }
tempfile = { version = "3.10.0" }
[lints]
workspace = true

View file

@ -70,7 +70,7 @@ impl Drop for ConcreteEnv {
// We need to do `mdb_env_set_flags(&env, MDB_NOSYNC|MDB_ASYNCMAP, 0)`
// to clear the no sync and async flags such that the below `self.sync()`
// _actually_ synchronously syncs.
if let Err(_e) = crate::Env::sync(self) {
if let Err(_e) = Env::sync(self) {
// TODO: log error?
}

View file

@ -78,8 +78,8 @@ mod test {
println!("left: {left:?}, right: {right:?}, expected: {expected:?}");
assert_eq!(
<StorableHeed::<T> as heed::Comparator>::compare(
&<StorableHeed::<T> as heed::BytesEncode>::bytes_encode(&left).unwrap(),
&<StorableHeed::<T> as heed::BytesEncode>::bytes_encode(&right).unwrap()
&<StorableHeed::<T> as BytesEncode>::bytes_encode(&left).unwrap(),
&<StorableHeed::<T> as BytesEncode>::bytes_encode(&right).unwrap()
),
expected
);

View file

@ -23,7 +23,7 @@ use crate::{
/// Shared [`DatabaseRo::get()`].
#[inline]
fn get<T: Table + 'static>(
db: &impl redb::ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
key: &T::Key,
) -> Result<T::Value, RuntimeError> {
Ok(db.get(key)?.ok_or(RuntimeError::KeyNotFound)?.value())
@ -32,7 +32,7 @@ fn get<T: Table + 'static>(
/// Shared [`DatabaseRo::len()`].
#[inline]
fn len<T: Table>(
db: &impl redb::ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<u64, RuntimeError> {
Ok(db.len()?)
}
@ -40,7 +40,7 @@ fn len<T: Table>(
/// Shared [`DatabaseRo::first()`].
#[inline]
fn first<T: Table>(
db: &impl redb::ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<(T::Key, T::Value), RuntimeError> {
let (key, value) = db.first()?.ok_or(RuntimeError::KeyNotFound)?;
Ok((key.value(), value.value()))
@ -49,7 +49,7 @@ fn first<T: Table>(
/// Shared [`DatabaseRo::last()`].
#[inline]
fn last<T: Table>(
db: &impl redb::ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<(T::Key, T::Value), RuntimeError> {
let (key, value) = db.last()?.ok_or(RuntimeError::KeyNotFound)?;
Ok((key.value(), value.value()))
@ -58,7 +58,7 @@ fn last<T: Table>(
/// Shared [`DatabaseRo::is_empty()`].
#[inline]
fn is_empty<T: Table>(
db: &impl redb::ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
db: &impl ReadableTable<StorableRedb<T::Key>, StorableRedb<T::Value>>,
) -> Result<bool, RuntimeError> {
Ok(db.is_empty()?)
}

View file

@ -33,6 +33,7 @@
//! # Ok(()) }
//! ```
#[allow(clippy::module_inception)]
mod config;
pub use config::{Config, ConfigBuilder, READER_THREADS_DEFAULT};

View file

@ -163,7 +163,7 @@ pub trait Env: Sized {
// We have the direct PATH to the file,
// no need to use backend-specific functions.
//
// SAFETY: as we are only accessing the metadata of
// INVARIANT: as we are only accessing the metadata of
// the file and not reading the bytes, it should be
// fine even with a memory mapped file being actively
// written to.

View file

@ -163,11 +163,11 @@ impl KeyCompare {
#[inline]
pub const fn as_compare_fn<K: Key>(self) -> fn(&[u8], &[u8]) -> Ordering {
match self {
Self::Default => std::cmp::Ord::cmp,
Self::Default => Ord::cmp,
Self::Number => |left, right| {
let left = <K as Storable>::from_bytes(left);
let right = <K as Storable>::from_bytes(right);
std::cmp::Ord::cmp(&left, &right)
Ord::cmp(&left, &right)
},
Self::Custom(f) => f,
}

View file

@ -1,94 +1,18 @@
#![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
while_true,
clippy::missing_docs_in_private_items,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
single_use_lifetimes,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
unreachable_pub,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_crate_dependencies,
unused_doc_comments,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style
)]
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
// FIXME: good lint but too many false positives
// with our `Env` + `RwLock` setup.
clippy::significant_drop_tightening,
// FIXME: good lint but is less clear in most cases.
clippy::items_after_statements,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
// unused_crate_dependencies, // false-positive with `paste`
)]
// Allow some lints when running in debug mode.
#![cfg_attr(
debug_assertions,
allow(
clippy::todo,
clippy::multiple_crate_versions,
// unused_crate_dependencies,
)
// This lint is allowed because the following
// code exists a lot in this crate:
//
// ```rust
// let env_inner = env.env_inner();
// let tx_rw = env_inner.tx_rw()?;
// OpenTables::create_tables(&env_inner, &tx_rw)?;
// ```
//
// Rust thinks `env_inner` can be dropped earlier
// but it cannot, we need it for the lifetime of
// the database transaction + tables.
clippy::significant_drop_tightening
)]
// Allow some lints in tests.
#![cfg_attr(

View file

@ -41,3 +41,6 @@ cuprate-test-utils = { path = "../../test-utils" }
tokio = { workspace = true }
tempfile = { workspace = true }
hex-literal = { workspace = true }
[lints]
workspace = true

View file

@ -211,7 +211,7 @@ impl Config {
/// assert_eq!(config.reader_threads, ReaderThreads::default());
/// ```
pub fn new() -> Self {
Config {
Self {
db_config: DbConfig::new(Cow::Borrowed(&*CUPRATE_TXPOOL_DIR)),
reader_threads: ReaderThreads::default(),
max_txpool_weight: 0,

View file

@ -1,4 +1,8 @@
#![doc = include_str!("../README.md")]
#![allow(
// See `cuprate-database` for reasoning.
clippy::significant_drop_tightening
)]
pub mod config;
mod free;
@ -13,3 +17,13 @@ pub use free::open;
//re-exports
pub use cuprate_database;
// TODO: remove when used.
use tower as _;
#[cfg(test)]
mod test {
use cuprate_test_utils as _;
use hex_literal as _;
use tempfile as _;
use tokio as _;
}

View file

@ -11,7 +11,7 @@ use crate::{ops::TxPoolWriteError, tables::SpentKeyImages, types::TransactionHas
///
/// # Panics
/// This function will panic if any of the [`Input`]s are not [`Input::ToKey`]
pub fn add_tx_key_images(
pub(super) fn add_tx_key_images(
inputs: &[Input],
tx_hash: &TransactionHash,
kis_table: &mut impl DatabaseRw<SpentKeyImages>,
@ -31,7 +31,7 @@ pub fn add_tx_key_images(
///
/// # Panics
/// This function will panic if any of the [`Input`]s are not [`Input::ToKey`]
pub fn remove_tx_key_images(
pub(super) fn remove_tx_key_images(
inputs: &[Input],
kis_table: &mut impl DatabaseRw<SpentKeyImages>,
) -> Result<(), RuntimeError> {

View file

@ -30,8 +30,8 @@ pub fn init(
let db = Arc::new(crate::open(config)?);
// Spawn the Reader thread pool and Writer.
let readers = init_read_service(db.clone(), reader_threads);
let writer = init_write_service(db.clone());
let readers = init_read_service(Arc::clone(&db), reader_threads);
let writer = init_write_service(Arc::clone(&db));
Ok((readers, writer, db))
}

View file

@ -25,7 +25,7 @@ use crate::{
/// Should be called _once_ per actual database.
#[cold]
#[inline(never)] // Only called once.
pub fn init_read_service(env: Arc<ConcreteEnv>, threads: ReaderThreads) -> TxpoolReadHandle {
pub(super) fn init_read_service(env: Arc<ConcreteEnv>, threads: ReaderThreads) -> TxpoolReadHandle {
init_read_service_with_pool(env, init_thread_pool(threads))
}
@ -35,10 +35,7 @@ pub fn init_read_service(env: Arc<ConcreteEnv>, threads: ReaderThreads) -> Txpoo
/// Should be called _once_ per actual database.
#[cold]
#[inline(never)] // Only called once.
pub fn init_read_service_with_pool(
env: Arc<ConcreteEnv>,
pool: Arc<ThreadPool>,
) -> TxpoolReadHandle {
fn init_read_service_with_pool(env: Arc<ConcreteEnv>, pool: Arc<ThreadPool>) -> TxpoolReadHandle {
DatabaseReadService::new(env, pool, map_request)
}
@ -53,6 +50,7 @@ pub fn init_read_service_with_pool(
/// 1. `Request` is mapped to a handler function
/// 2. Handler function is called
/// 3. [`TxpoolReadResponse`] is returned
#[allow(clippy::needless_pass_by_value)]
fn map_request(
env: &ConcreteEnv, // Access to the database
request: TxpoolReadRequest, // The request we must fulfill

View file

@ -16,7 +16,7 @@ use crate::{
//---------------------------------------------------------------------------------------------------- init_write_service
/// Initialize the txpool write service from a [`ConcreteEnv`].
pub fn init_write_service(env: Arc<ConcreteEnv>) -> TxpoolWriteHandle {
pub(super) fn init_write_service(env: Arc<ConcreteEnv>) -> TxpoolWriteHandle {
DatabaseWriteHandle::init(env, handle_txpool_request)
}

View file

@ -35,10 +35,11 @@ bitflags::bitflags! {
pub struct TransactionInfo {
/// The transaction's fee.
pub fee: u64,
/// The transaction`s weight.
/// The transaction's weight.
pub weight: usize,
/// [`TxStateFlags`] of this transaction.
pub flags: TxStateFlags,
#[allow(clippy::pub_underscore_fields)]
/// Explicit padding so that we have no implicit padding bytes in `repr(C)`.
///
/// Allows potential future expansion of this type.
@ -68,21 +69,21 @@ impl From<RawCachedVerificationState> for CachedVerificationState {
fn from(value: RawCachedVerificationState) -> Self {
// if the hash is all `0`s then there is no hash this is valid at.
if value.raw_valid_at_hash == [0; 32] {
return CachedVerificationState::NotVerified;
return Self::NotVerified;
}
let raw_valid_past_timestamp = u64::from_le_bytes(value.raw_valid_past_timestamp);
// if the timestamp is 0, there is no timestamp that needs to be passed.
if raw_valid_past_timestamp == 0 {
return CachedVerificationState::ValidAtHashAndHF {
return Self::ValidAtHashAndHF {
block_hash: value.raw_valid_at_hash,
hf: HardFork::from_version(value.raw_hf)
.expect("hard-fork values stored in the DB should always be valid"),
};
}
CachedVerificationState::ValidAtHashAndHFWithTimeBasedLock {
Self::ValidAtHashAndHFWithTimeBasedLock {
block_hash: value.raw_valid_at_hash,
hf: HardFork::from_version(value.raw_hf)
.expect("hard-fork values stored in the DB should always be valid"),
@ -91,6 +92,7 @@ impl From<RawCachedVerificationState> for CachedVerificationState {
}
}
#[allow(clippy::fallible_impl_from)] // only panics in invalid states
impl From<CachedVerificationState> for RawCachedVerificationState {
fn from(value: CachedVerificationState) -> Self {
match value {

View file

@ -23,10 +23,12 @@ bytes = { workspace = true }
curve25519-dalek = { workspace = true }
monero-serai = { workspace = true }
serde = { workspace = true, features = ["derive"], optional = true }
borsh = { workspace = true, optional = true }
thiserror = { workspace = true }
proptest = { workspace = true, optional = true }
proptest-derive = { workspace = true, optional = true }
[dev-dependencies]
[lints]
workspace = true

View file

@ -1,76 +1,6 @@
#![doc = include_str!("../README.md")]
//---------------------------------------------------------------------------------------------------- Lints
// Forbid lints.
// Our code, and code generated (e.g macros) cannot overrule these.
#![forbid(
// `unsafe` is allowed but it _must_ be
// commented with `SAFETY: reason`.
clippy::undocumented_unsafe_blocks,
// Never.
unused_unsafe,
redundant_semicolons,
unused_allocation,
coherence_leak_check,
single_use_lifetimes,
while_true,
clippy::missing_docs_in_private_items,
// Maybe can be put into `#[deny]`.
unconditional_recursion,
for_loops_over_fallibles,
unused_braces,
unused_doc_comments,
unused_labels,
keyword_idents,
non_ascii_idents,
variant_size_differences,
// Probably can be put into `#[deny]`.
future_incompatible,
let_underscore,
break_with_label_and_loop,
duplicate_macro_attributes,
exported_private_dependencies,
large_assignments,
overlapping_range_endpoints,
semicolon_in_expressions_from_macros,
noop_method_call,
unreachable_pub,
)]
// Deny lints.
// Some of these are `#[allow]`'ed on a per-case basis.
#![deny(
clippy::all,
clippy::correctness,
clippy::suspicious,
clippy::style,
clippy::complexity,
clippy::perf,
clippy::pedantic,
clippy::nursery,
clippy::cargo,
unused_mut,
missing_docs,
deprecated,
unused_comparisons,
nonstandard_style
)]
#![allow(
// FIXME: this lint affects crates outside of
// `database/` for some reason, allow for now.
clippy::cargo_common_metadata,
// FIXME: adding `#[must_use]` onto everything
// might just be more annoying than useful...
// although it is sometimes nice.
clippy::must_use_candidate,
clippy::module_name_repetitions,
clippy::module_inception,
clippy::redundant_pub_crate,
clippy::option_if_let_else,
)]
// `proptest` needs this internally.
#![cfg_attr(any(feature = "proptest"), allow(non_local_definitions))]
// Allow some lints when running in debug mode.
#![cfg_attr(debug_assertions, allow(clippy::todo, clippy::multiple_crate_versions))]
@ -97,4 +27,5 @@ pub use types::{
//---------------------------------------------------------------------------------------------------- Feature-gated
#[cfg(feature = "blockchain")]
pub mod blockchain;
//---------------------------------------------------------------------------------------------------- Private