From b57ee2f4cf83dcb5fc63626a0cefe426d2b7a418 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Tue, 29 Oct 2024 15:30:51 +0000 Subject: [PATCH 01/14] cuprated: txpool (#312) * init dandelion integration * add dandelion start function * finish incoming tx handler * Add tx blob hash table * Add missing txpool requests * handle duplicate stem txs * check txpool on incoming block * add request to remove tx in new blocks from the pool * tell the txpool about incoming blocks * fix merge * typos * remove blockchain height from txpool * add function to start the pool * add cross network address * pre-review changes * fix CI * review fixes * review fixes * abort on DB error * fix clippy --- Cargo.lock | 32 ++ Cargo.toml | 1 + binaries/cuprated/Cargo.toml | 2 +- binaries/cuprated/src/blockchain.rs | 2 +- binaries/cuprated/src/blockchain/interface.rs | 93 +++-- binaries/cuprated/src/blockchain/manager.rs | 5 + .../src/blockchain/manager/handler.rs | 31 +- binaries/cuprated/src/blockchain/syncer.rs | 5 +- binaries/cuprated/src/blockchain/types.rs | 8 +- binaries/cuprated/src/p2p.rs | 3 + binaries/cuprated/src/p2p/network_address.rs | 16 + binaries/cuprated/src/statics.rs | 2 +- binaries/cuprated/src/txpool.rs | 14 +- binaries/cuprated/src/txpool/dandelion.rs | 65 +++ .../src/txpool/dandelion/diffuse_service.rs | 44 ++ .../src/txpool/dandelion/stem_service.rs | 68 ++++ .../cuprated/src/txpool/dandelion/tx_store.rs | 74 ++++ binaries/cuprated/src/txpool/incoming_tx.rs | 379 ++++++++++++++++++ .../cuprated/src/txpool/txs_being_handled.rs | 53 +++ p2p/dandelion-tower/src/router.rs | 9 + p2p/p2p/src/client_pool.rs | 15 +- p2p/p2p/src/lib.rs | 9 +- storage/service/src/service/write.rs | 8 + storage/txpool/Cargo.toml | 1 + storage/txpool/src/free.rs | 12 +- storage/txpool/src/lib.rs | 2 +- storage/txpool/src/ops.rs | 2 +- storage/txpool/src/ops/tx_read.rs | 19 +- storage/txpool/src/ops/tx_write.rs | 11 + storage/txpool/src/service/interface.rs | 62 ++- storage/txpool/src/service/read.rs | 102 ++++- storage/txpool/src/service/write.rs | 73 +++- storage/txpool/src/tables.rs | 10 +- storage/txpool/src/types.rs | 4 +- 34 files changed, 1146 insertions(+), 90 deletions(-) create mode 100644 binaries/cuprated/src/p2p/network_address.rs create mode 100644 binaries/cuprated/src/txpool/dandelion.rs create mode 100644 binaries/cuprated/src/txpool/dandelion/diffuse_service.rs create mode 100644 binaries/cuprated/src/txpool/dandelion/stem_service.rs create mode 100644 binaries/cuprated/src/txpool/dandelion/tx_store.rs create mode 100644 binaries/cuprated/src/txpool/incoming_tx.rs create mode 100644 binaries/cuprated/src/txpool/txs_being_handled.rs diff --git a/Cargo.lock b/Cargo.lock index ca0174b..0f851dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -56,6 +56,18 @@ version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + [[package]] name = "async-stream" version = "0.3.5" @@ -238,6 +250,19 @@ dependencies = [ "digest", ] +[[package]] +name = "blake3" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -403,6 +428,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + [[package]] name = "core-foundation" version = "0.9.4" @@ -919,6 +950,7 @@ name = "cuprate-txpool" version = "0.0.0" dependencies = [ "bitflags 2.6.0", + "blake3", "bytemuck", "cuprate-database", "cuprate-database-service", diff --git a/Cargo.toml b/Cargo.toml index 3865863..d5aca71 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,6 +82,7 @@ cuprate-rpc-interface = { path = "rpc/interface" ,default-feature anyhow = { version = "1.0.89", default-features = false } async-trait = { version = "0.1.82", default-features = false } bitflags = { version = "2.6.0", default-features = false } +blake3 = { version = "1", default-features = false } borsh = { version = "1.5.1", default-features = false } bytemuck = { version = "1.18.0", default-features = false } bytes = { version = "1.7.2", default-features = false } diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index 2f22be0..880c205 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -21,7 +21,7 @@ cuprate-levin = { workspace = true } cuprate-wire = { workspace = true } cuprate-p2p = { workspace = true } cuprate-p2p-core = { workspace = true } -cuprate-dandelion-tower = { workspace = true } +cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } cuprate-async-buffer = { workspace = true } cuprate-address-book = { workspace = true } cuprate-blockchain = { workspace = true, features = ["service"] } diff --git a/binaries/cuprated/src/blockchain.rs b/binaries/cuprated/src/blockchain.rs index a06f3fa..c4b75e4 100644 --- a/binaries/cuprated/src/blockchain.rs +++ b/binaries/cuprated/src/blockchain.rs @@ -25,7 +25,7 @@ mod manager; mod syncer; mod types; -use types::{ +pub use types::{ ConcreteBlockVerifierService, ConcreteTxVerifierService, ConsensusBlockchainReadHandle, }; diff --git a/binaries/cuprated/src/blockchain/interface.rs b/binaries/cuprated/src/blockchain/interface.rs index 985e60d..2482784 100644 --- a/binaries/cuprated/src/blockchain/interface.rs +++ b/binaries/cuprated/src/blockchain/interface.rs @@ -8,17 +8,16 @@ use std::{ }; use monero_serai::{block::Block, transaction::Transaction}; -use rayon::prelude::*; use tokio::sync::{mpsc, oneshot}; use tower::{Service, ServiceExt}; use cuprate_blockchain::service::BlockchainReadHandle; use cuprate_consensus::transactions::new_tx_verification_data; -use cuprate_helper::cast::usize_to_u64; -use cuprate_types::{ - blockchain::{BlockchainReadRequest, BlockchainResponse}, - Chain, +use cuprate_txpool::service::{ + interface::{TxpoolReadRequest, TxpoolReadResponse}, + TxpoolReadHandle, }; +use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse}; use crate::{ blockchain::manager::{BlockchainManagerCommand, IncomingBlockOk}, @@ -38,7 +37,7 @@ pub enum IncomingBlockError { /// /// The inner values are the block hash and the indexes of the missing txs in the block. #[error("Unknown transactions in block.")] - UnknownTransactions([u8; 32], Vec), + UnknownTransactions([u8; 32], Vec), /// We are missing the block's parent. #[error("The block has an unknown parent.")] Orphan, @@ -59,8 +58,9 @@ pub enum IncomingBlockError { /// - the block's parent is unknown pub async fn handle_incoming_block( block: Block, - given_txs: Vec, + mut given_txs: HashMap<[u8; 32], Transaction>, blockchain_read_handle: &mut BlockchainReadHandle, + txpool_read_handle: &mut TxpoolReadHandle, ) -> Result { /// A [`HashSet`] of block hashes that the blockchain manager is currently handling. /// @@ -72,7 +72,12 @@ pub async fn handle_incoming_block( /// which are also more expensive than `Mutex`s. static BLOCKS_BEING_HANDLED: LazyLock>> = LazyLock::new(|| Mutex::new(HashSet::new())); - // FIXME: we should look in the tx-pool for txs when that is ready. + + if given_txs.len() > block.transactions.len() { + return Err(IncomingBlockError::InvalidBlock(anyhow::anyhow!( + "Too many transactions given for block" + ))); + } if !block_exists(block.header.previous, blockchain_read_handle) .await @@ -90,23 +95,36 @@ pub async fn handle_incoming_block( return Ok(IncomingBlockOk::AlreadyHave); } - // TODO: remove this when we have a working tx-pool. - if given_txs.len() != block.transactions.len() { - return Err(IncomingBlockError::UnknownTransactions( - block_hash, - (0..usize_to_u64(block.transactions.len())).collect(), - )); - } + let TxpoolReadResponse::TxsForBlock { mut txs, missing } = txpool_read_handle + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(TxpoolReadRequest::TxsForBlock(block.transactions.clone())) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + else { + unreachable!() + }; - // TODO: check we actually got given the right txs. - let prepped_txs = given_txs - .into_par_iter() - .map(|tx| { - let tx = new_tx_verification_data(tx)?; - Ok((tx.tx_hash, tx)) - }) - .collect::>() - .map_err(IncomingBlockError::InvalidBlock)?; + if !missing.is_empty() { + let needed_hashes = missing.iter().map(|index| block.transactions[*index]); + + for needed_hash in needed_hashes { + let Some(tx) = given_txs.remove(&needed_hash) else { + // We return back the indexes of all txs missing from our pool, not taking into account the txs + // that were given with the block, as these txs will be dropped. It is not worth it to try to add + // these txs to the pool as this will only happen with a misbehaving peer or if the txpool reaches + // the size limit. + return Err(IncomingBlockError::UnknownTransactions(block_hash, missing)); + }; + + txs.insert( + needed_hash, + new_tx_verification_data(tx) + .map_err(|e| IncomingBlockError::InvalidBlock(e.into()))?, + ); + } + } let Some(incoming_block_tx) = COMMAND_TX.get() else { // We could still be starting up the blockchain manager. @@ -119,28 +137,37 @@ pub async fn handle_incoming_block( return Ok(IncomingBlockOk::AlreadyHave); } - // From this point on we MUST not early return without removing the block hash from `BLOCKS_BEING_HANDLED`. + // We must remove the block hash from `BLOCKS_BEING_HANDLED`. + let _guard = { + struct RemoveFromBlocksBeingHandled { + block_hash: [u8; 32], + } + impl Drop for RemoveFromBlocksBeingHandled { + fn drop(&mut self) { + BLOCKS_BEING_HANDLED + .lock() + .unwrap() + .remove(&self.block_hash); + } + } + RemoveFromBlocksBeingHandled { block_hash } + }; let (response_tx, response_rx) = oneshot::channel(); incoming_block_tx .send(BlockchainManagerCommand::AddBlock { block, - prepped_txs, + prepped_txs: txs, response_tx, }) .await .expect("TODO: don't actually panic here, an err means we are shutting down"); - let res = response_rx + response_rx .await .expect("The blockchain manager will always respond") - .map_err(IncomingBlockError::InvalidBlock); - - // Remove the block hash from the blocks being handled. - BLOCKS_BEING_HANDLED.lock().unwrap().remove(&block_hash); - - res + .map_err(IncomingBlockError::InvalidBlock) } /// Check if we have a block with the given hash. diff --git a/binaries/cuprated/src/blockchain/manager.rs b/binaries/cuprated/src/blockchain/manager.rs index 8e613bc..2166795 100644 --- a/binaries/cuprated/src/blockchain/manager.rs +++ b/binaries/cuprated/src/blockchain/manager.rs @@ -18,6 +18,7 @@ use cuprate_p2p::{ BroadcastSvc, NetworkInterface, }; use cuprate_p2p_core::ClearNet; +use cuprate_txpool::service::TxpoolWriteHandle; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, Chain, TransactionVerificationData, @@ -46,6 +47,7 @@ pub async fn init_blockchain_manager( clearnet_interface: NetworkInterface, blockchain_write_handle: BlockchainWriteHandle, blockchain_read_handle: BlockchainReadHandle, + txpool_write_handle: TxpoolWriteHandle, mut blockchain_context_service: BlockChainContextService, block_verifier_service: ConcreteBlockVerifierService, block_downloader_config: BlockDownloaderConfig, @@ -80,6 +82,7 @@ pub async fn init_blockchain_manager( let manager = BlockchainManager { blockchain_write_handle, blockchain_read_handle, + txpool_write_handle, blockchain_context_service, cached_blockchain_context: blockchain_context.unchecked_blockchain_context().clone(), block_verifier_service, @@ -102,6 +105,8 @@ pub struct BlockchainManager { blockchain_write_handle: BlockchainWriteHandle, /// A [`BlockchainReadHandle`]. blockchain_read_handle: BlockchainReadHandle, + /// A [`TxpoolWriteHandle`]. + txpool_write_handle: TxpoolWriteHandle, // TODO: Improve the API of the cache service. // TODO: rename the cache service -> `BlockchainContextService`. /// The blockchain context cache, this caches the current state of the blockchain to quickly calculate/retrieve diff --git a/binaries/cuprated/src/blockchain/manager/handler.rs b/binaries/cuprated/src/blockchain/manager/handler.rs index e9805cd..5d1cd2d 100644 --- a/binaries/cuprated/src/blockchain/manager/handler.rs +++ b/binaries/cuprated/src/blockchain/manager/handler.rs @@ -1,7 +1,10 @@ //! The blockchain manager handler functions. use bytes::Bytes; use futures::{TryFutureExt, TryStreamExt}; -use monero_serai::{block::Block, transaction::Transaction}; +use monero_serai::{ + block::Block, + transaction::{Input, Transaction}, +}; use rayon::prelude::*; use std::ops::ControlFlow; use std::{collections::HashMap, sync::Arc}; @@ -17,16 +20,14 @@ use cuprate_consensus::{ use cuprate_consensus_context::NewBlockData; use cuprate_helper::cast::usize_to_u64; use cuprate_p2p::{block_downloader::BlockBatch, constants::LONG_BAN, BroadcastRequest}; +use cuprate_txpool::service::interface::TxpoolWriteRequest; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse, BlockchainWriteRequest}, AltBlockInformation, HardFork, TransactionVerificationData, VerifiedBlockInformation, }; -use crate::blockchain::manager::commands::IncomingBlockOk; use crate::{ - blockchain::{ - manager::commands::BlockchainManagerCommand, types::ConsensusBlockchainReadHandle, - }, + blockchain::manager::commands::{BlockchainManagerCommand, IncomingBlockOk}, constants::PANIC_CRITICAL_SERVICE_ERROR, signals::REORG_LOCK, }; @@ -434,6 +435,18 @@ impl super::BlockchainManager { &mut self, verified_block: VerifiedBlockInformation, ) { + // FIXME: this is pretty inefficient, we should probably return the KI map created in the consensus crate. + let spent_key_images = verified_block + .txs + .iter() + .flat_map(|tx| { + tx.tx.prefix().inputs.iter().map(|input| match input { + Input::ToKey { key_image, .. } => key_image.compress().0, + Input::Gen(_) => unreachable!(), + }) + }) + .collect::>(); + self.blockchain_context_service .ready() .await @@ -472,6 +485,14 @@ impl super::BlockchainManager { }; self.cached_blockchain_context = blockchain_context.unchecked_blockchain_context().clone(); + + self.txpool_write_handle + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(TxpoolWriteRequest::NewBlock { spent_key_images }) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR); } } diff --git a/binaries/cuprated/src/blockchain/syncer.rs b/binaries/cuprated/src/blockchain/syncer.rs index 7d6874e..913c983 100644 --- a/binaries/cuprated/src/blockchain/syncer.rs +++ b/binaries/cuprated/src/blockchain/syncer.rs @@ -1,11 +1,10 @@ // FIXME: This whole module is not great and should be rewritten when the PeerSet is made. -use std::{pin::pin, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use futures::StreamExt; -use tokio::time::interval; use tokio::{ sync::{mpsc, Notify}, - time::sleep, + time::interval, }; use tower::{Service, ServiceExt}; use tracing::instrument; diff --git a/binaries/cuprated/src/blockchain/types.rs b/binaries/cuprated/src/blockchain/types.rs index e3ee62b..54e4662 100644 --- a/binaries/cuprated/src/blockchain/types.rs +++ b/binaries/cuprated/src/blockchain/types.rs @@ -1,13 +1,7 @@ -use std::task::{Context, Poll}; - -use futures::future::BoxFuture; -use futures::{FutureExt, TryFutureExt}; -use tower::{util::MapErr, Service}; +use tower::util::MapErr; use cuprate_blockchain::{cuprate_database::RuntimeError, service::BlockchainReadHandle}; use cuprate_consensus::{BlockChainContextService, BlockVerifierService, TxVerifierService}; -use cuprate_p2p::block_downloader::{ChainSvcRequest, ChainSvcResponse}; -use cuprate_types::blockchain::{BlockchainReadRequest, BlockchainResponse}; /// The [`BlockVerifierService`] with all generic types defined. pub type ConcreteBlockVerifierService = BlockVerifierService< diff --git a/binaries/cuprated/src/p2p.rs b/binaries/cuprated/src/p2p.rs index f55d41d..cdf1cef 100644 --- a/binaries/cuprated/src/p2p.rs +++ b/binaries/cuprated/src/p2p.rs @@ -2,4 +2,7 @@ //! //! Will handle initiating the P2P and contains a protocol request handler. +mod network_address; pub mod request_handler; + +pub use network_address::CrossNetworkInternalPeerId; diff --git a/binaries/cuprated/src/p2p/network_address.rs b/binaries/cuprated/src/p2p/network_address.rs new file mode 100644 index 0000000..7fa8e86 --- /dev/null +++ b/binaries/cuprated/src/p2p/network_address.rs @@ -0,0 +1,16 @@ +use std::net::SocketAddr; + +use cuprate_p2p_core::{client::InternalPeerID, ClearNet, NetworkZone}; + +/// An identifier for a P2P peer on any network. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum CrossNetworkInternalPeerId { + /// A clear-net peer. + ClearNet(InternalPeerID<::Addr>), +} + +impl From::Addr>> for CrossNetworkInternalPeerId { + fn from(addr: InternalPeerID<::Addr>) -> Self { + Self::ClearNet(addr) + } +} diff --git a/binaries/cuprated/src/statics.rs b/binaries/cuprated/src/statics.rs index 8aab1c9..9839608 100644 --- a/binaries/cuprated/src/statics.rs +++ b/binaries/cuprated/src/statics.rs @@ -1,7 +1,7 @@ //! Global `static`s used throughout `cuprated`. use std::{ - sync::{atomic::AtomicU64, LazyLock}, + sync::LazyLock, time::{SystemTime, UNIX_EPOCH}, }; diff --git a/binaries/cuprated/src/txpool.rs b/binaries/cuprated/src/txpool.rs index a6f05e7..9592c2b 100644 --- a/binaries/cuprated/src/txpool.rs +++ b/binaries/cuprated/src/txpool.rs @@ -1,3 +1,15 @@ //! Transaction Pool //! -//! Will handle initiating the tx-pool, providing the preprocessor required for the dandelion pool. +//! Handles initiating the tx-pool, providing the preprocessor required for the dandelion pool. +use cuprate_consensus::BlockChainContextService; +use cuprate_p2p::NetworkInterface; +use cuprate_p2p_core::ClearNet; +use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle}; + +use crate::blockchain::ConcreteTxVerifierService; + +mod dandelion; +mod incoming_tx; +mod txs_being_handled; + +pub use incoming_tx::IncomingTxHandler; diff --git a/binaries/cuprated/src/txpool/dandelion.rs b/binaries/cuprated/src/txpool/dandelion.rs new file mode 100644 index 0000000..d791b62 --- /dev/null +++ b/binaries/cuprated/src/txpool/dandelion.rs @@ -0,0 +1,65 @@ +use std::time::Duration; + +use cuprate_dandelion_tower::{ + pool::DandelionPoolService, DandelionConfig, DandelionRouter, Graph, +}; +use cuprate_p2p::NetworkInterface; +use cuprate_p2p_core::ClearNet; +use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle}; + +use crate::{ + p2p::CrossNetworkInternalPeerId, + txpool::incoming_tx::{DandelionTx, TxId}, +}; + +mod diffuse_service; +mod stem_service; +mod tx_store; + +/// The configuration used for [`cuprate_dandelion_tower`]. +/// +/// TODO: should we expose this to users of cuprated? probably not. +const DANDELION_CONFIG: DandelionConfig = DandelionConfig { + time_between_hop: Duration::from_millis(175), + epoch_duration: Duration::from_secs(10 * 60), + fluff_probability: 0.12, + graph: Graph::FourRegular, +}; + +/// A [`DandelionRouter`] with all generic types defined. +type ConcreteDandelionRouter = DandelionRouter< + stem_service::OutboundPeerStream, + diffuse_service::DiffuseService, + CrossNetworkInternalPeerId, + stem_service::StemPeerService, + DandelionTx, +>; + +/// Starts the dandelion pool manager task and returns a handle to send txs to broadcast. +pub fn start_dandelion_pool_manager( + router: ConcreteDandelionRouter, + txpool_read_handle: TxpoolReadHandle, + txpool_write_handle: TxpoolWriteHandle, +) -> DandelionPoolService { + cuprate_dandelion_tower::pool::start_dandelion_pool_manager( + // TODO: make this constant configurable? + 32, + router, + tx_store::TxStoreService { + txpool_read_handle, + txpool_write_handle, + }, + DANDELION_CONFIG, + ) +} + +/// Creates a [`DandelionRouter`] from a [`NetworkInterface`]. +pub fn dandelion_router(clear_net: NetworkInterface) -> ConcreteDandelionRouter { + DandelionRouter::new( + diffuse_service::DiffuseService { + clear_net_broadcast_service: clear_net.broadcast_svc(), + }, + stem_service::OutboundPeerStream { clear_net }, + DANDELION_CONFIG, + ) +} diff --git a/binaries/cuprated/src/txpool/dandelion/diffuse_service.rs b/binaries/cuprated/src/txpool/dandelion/diffuse_service.rs new file mode 100644 index 0000000..621503f --- /dev/null +++ b/binaries/cuprated/src/txpool/dandelion/diffuse_service.rs @@ -0,0 +1,44 @@ +use std::{ + future::{ready, Ready}, + task::{Context, Poll}, +}; + +use futures::FutureExt; +use tower::Service; + +use cuprate_dandelion_tower::traits::DiffuseRequest; +use cuprate_p2p::{BroadcastRequest, BroadcastSvc}; +use cuprate_p2p_core::ClearNet; + +use crate::txpool::dandelion::DandelionTx; + +/// The dandelion diffusion service. +pub struct DiffuseService { + pub clear_net_broadcast_service: BroadcastSvc, +} + +impl Service> for DiffuseService { + type Response = (); + type Error = tower::BoxError; + type Future = Ready>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.clear_net_broadcast_service + .poll_ready(cx) + .map_err(Into::into) + } + + fn call(&mut self, req: DiffuseRequest) -> Self::Future { + // TODO: the dandelion crate should pass along where we got the tx from. + let Ok(()) = self + .clear_net_broadcast_service + .call(BroadcastRequest::Transaction { + tx_bytes: req.0 .0, + direction: None, + received_from: None, + }) + .into_inner(); + + ready(Ok(())) + } +} diff --git a/binaries/cuprated/src/txpool/dandelion/stem_service.rs b/binaries/cuprated/src/txpool/dandelion/stem_service.rs new file mode 100644 index 0000000..5c0ba65 --- /dev/null +++ b/binaries/cuprated/src/txpool/dandelion/stem_service.rs @@ -0,0 +1,68 @@ +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +use bytes::Bytes; +use futures::Stream; +use tower::Service; + +use cuprate_dandelion_tower::{traits::StemRequest, OutboundPeer}; +use cuprate_p2p::{ClientPoolDropGuard, NetworkInterface}; +use cuprate_p2p_core::{ + client::{Client, InternalPeerID}, + ClearNet, NetworkZone, PeerRequest, ProtocolRequest, +}; +use cuprate_wire::protocol::NewTransactions; + +use crate::{p2p::CrossNetworkInternalPeerId, txpool::dandelion::DandelionTx}; + +/// The dandelion outbound peer stream. +pub struct OutboundPeerStream { + pub clear_net: NetworkInterface, +} + +impl Stream for OutboundPeerStream { + type Item = Result< + OutboundPeer>, + tower::BoxError, + >; + + fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + // TODO: make the outbound peer choice random. + Poll::Ready(Some(Ok(self + .clear_net + .client_pool() + .outbound_client() + .map_or(OutboundPeer::Exhausted, |client| { + OutboundPeer::Peer( + CrossNetworkInternalPeerId::ClearNet(client.info.id), + StemPeerService(client), + ) + })))) + } +} + +/// The stem service, used to send stem txs. +pub struct StemPeerService(ClientPoolDropGuard); + +impl Service> for StemPeerService { + type Response = as Service>::Response; + type Error = tower::BoxError; + type Future = as Service>::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.0.poll_ready(cx) + } + + fn call(&mut self, req: StemRequest) -> Self::Future { + self.0 + .call(PeerRequest::Protocol(ProtocolRequest::NewTransactions( + NewTransactions { + txs: vec![req.0 .0], + dandelionpp_fluff: false, + padding: Bytes::new(), + }, + ))) + } +} diff --git a/binaries/cuprated/src/txpool/dandelion/tx_store.rs b/binaries/cuprated/src/txpool/dandelion/tx_store.rs new file mode 100644 index 0000000..b890ffd --- /dev/null +++ b/binaries/cuprated/src/txpool/dandelion/tx_store.rs @@ -0,0 +1,74 @@ +use std::task::{Context, Poll}; + +use bytes::Bytes; +use futures::{future::BoxFuture, FutureExt}; +use tower::{Service, ServiceExt}; + +use cuprate_dandelion_tower::{ + traits::{TxStoreRequest, TxStoreResponse}, + State, +}; +use cuprate_database::RuntimeError; +use cuprate_txpool::service::{ + interface::{TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest}, + TxpoolReadHandle, TxpoolWriteHandle, +}; + +use super::{DandelionTx, TxId}; + +/// The dandelion tx-store service. +/// +/// This is just mapping the interface [`cuprate_dandelion_tower`] wants to what [`cuprate_txpool`] provides. +pub struct TxStoreService { + pub txpool_read_handle: TxpoolReadHandle, + pub txpool_write_handle: TxpoolWriteHandle, +} + +impl Service> for TxStoreService { + type Response = TxStoreResponse; + type Error = tower::BoxError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: TxStoreRequest) -> Self::Future { + match req { + TxStoreRequest::Get(tx_id) => self + .txpool_read_handle + .clone() + .oneshot(TxpoolReadRequest::TxBlob(tx_id)) + .map(|res| match res { + Ok(TxpoolReadResponse::TxBlob { + tx_blob, + state_stem, + }) => { + let state = if state_stem { + State::Stem + } else { + State::Fluff + }; + + Ok(TxStoreResponse::Transaction(Some(( + DandelionTx(Bytes::from(tx_blob)), + state, + )))) + } + Err(RuntimeError::KeyNotFound) => Ok(TxStoreResponse::Transaction(None)), + Err(e) => Err(e.into()), + Ok(_) => unreachable!(), + }) + .boxed(), + TxStoreRequest::Promote(tx_id) => self + .txpool_write_handle + .clone() + .oneshot(TxpoolWriteRequest::Promote(tx_id)) + .map(|res| match res { + Ok(_) | Err(RuntimeError::KeyNotFound) => Ok(TxStoreResponse::Ok), + Err(e) => Err(e.into()), + }) + .boxed(), + } + } +} diff --git a/binaries/cuprated/src/txpool/incoming_tx.rs b/binaries/cuprated/src/txpool/incoming_tx.rs new file mode 100644 index 0000000..e204159 --- /dev/null +++ b/binaries/cuprated/src/txpool/incoming_tx.rs @@ -0,0 +1,379 @@ +use std::{ + collections::HashSet, + sync::Arc, + task::{Context, Poll}, +}; + +use bytes::Bytes; +use futures::{future::BoxFuture, FutureExt}; +use monero_serai::transaction::Transaction; +use tower::{Service, ServiceExt}; + +use cuprate_consensus::{ + transactions::new_tx_verification_data, BlockChainContextRequest, BlockChainContextResponse, + BlockChainContextService, ExtendedConsensusError, VerifyTxRequest, +}; +use cuprate_dandelion_tower::{ + pool::{DandelionPoolService, IncomingTxBuilder}, + State, TxState, +}; +use cuprate_helper::asynch::rayon_spawn_async; +use cuprate_p2p::NetworkInterface; +use cuprate_p2p_core::ClearNet; +use cuprate_txpool::{ + service::{ + interface::{ + TxpoolReadRequest, TxpoolReadResponse, TxpoolWriteRequest, TxpoolWriteResponse, + }, + TxpoolReadHandle, TxpoolWriteHandle, + }, + transaction_blob_hash, +}; +use cuprate_types::TransactionVerificationData; + +use crate::{ + blockchain::ConcreteTxVerifierService, + constants::PANIC_CRITICAL_SERVICE_ERROR, + p2p::CrossNetworkInternalPeerId, + signals::REORG_LOCK, + txpool::{ + dandelion, + txs_being_handled::{TxsBeingHandled, TxsBeingHandledLocally}, + }, +}; + +/// An error that can happen handling an incoming tx. +pub enum IncomingTxError { + Parse(std::io::Error), + Consensus(ExtendedConsensusError), + DuplicateTransaction, +} + +/// Incoming transactions. +pub struct IncomingTxs { + /// The raw bytes of the transactions. + pub txs: Vec, + /// The routing state of the transactions. + pub state: TxState, +} + +/// The transaction type used for dandelion++. +#[derive(Clone)] +pub struct DandelionTx(pub Bytes); + +/// A transaction ID/hash. +pub(super) type TxId = [u8; 32]; + +/// The service than handles incoming transaction pool transactions. +/// +/// This service handles everything including verifying the tx, adding it to the pool and routing it to other nodes. +pub struct IncomingTxHandler { + /// A store of txs currently being handled in incoming tx requests. + pub(super) txs_being_handled: TxsBeingHandled, + /// The blockchain context cache. + pub(super) blockchain_context_cache: BlockChainContextService, + /// The dandelion txpool manager. + pub(super) dandelion_pool_manager: + DandelionPoolService, + /// The transaction verifier service. + pub(super) tx_verifier_service: ConcreteTxVerifierService, + /// The txpool write handle. + pub(super) txpool_write_handle: TxpoolWriteHandle, + /// The txpool read handle. + pub(super) txpool_read_handle: TxpoolReadHandle, +} + +impl IncomingTxHandler { + /// Initialize the [`IncomingTxHandler`]. + #[expect(clippy::significant_drop_tightening)] + pub fn init( + clear_net: NetworkInterface, + txpool_write_handle: TxpoolWriteHandle, + txpool_read_handle: TxpoolReadHandle, + blockchain_context_cache: BlockChainContextService, + tx_verifier_service: ConcreteTxVerifierService, + ) -> Self { + let dandelion_router = dandelion::dandelion_router(clear_net); + + let dandelion_pool_manager = dandelion::start_dandelion_pool_manager( + dandelion_router, + txpool_read_handle.clone(), + txpool_write_handle.clone(), + ); + + Self { + txs_being_handled: TxsBeingHandled::new(), + blockchain_context_cache, + dandelion_pool_manager, + tx_verifier_service, + txpool_write_handle, + txpool_read_handle, + } + } +} + +impl Service for IncomingTxHandler { + type Response = (); + type Error = IncomingTxError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: IncomingTxs) -> Self::Future { + handle_incoming_txs( + req, + self.txs_being_handled.clone(), + self.blockchain_context_cache.clone(), + self.tx_verifier_service.clone(), + self.txpool_write_handle.clone(), + self.txpool_read_handle.clone(), + self.dandelion_pool_manager.clone(), + ) + .boxed() + } +} + +/// Handles the incoming txs. +async fn handle_incoming_txs( + IncomingTxs { txs, state }: IncomingTxs, + txs_being_handled: TxsBeingHandled, + mut blockchain_context_cache: BlockChainContextService, + mut tx_verifier_service: ConcreteTxVerifierService, + mut txpool_write_handle: TxpoolWriteHandle, + mut txpool_read_handle: TxpoolReadHandle, + mut dandelion_pool_manager: DandelionPoolService, +) -> Result<(), IncomingTxError> { + let _reorg_guard = REORG_LOCK.read().await; + + let (txs, stem_pool_txs, txs_being_handled_guard) = + prepare_incoming_txs(txs, txs_being_handled, &mut txpool_read_handle).await?; + + let BlockChainContextResponse::Context(context) = blockchain_context_cache + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(BlockChainContextRequest::Context) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + else { + unreachable!() + }; + + let context = context.unchecked_blockchain_context(); + + tx_verifier_service + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(VerifyTxRequest::Prepped { + txs: txs.clone(), + current_chain_height: context.chain_height, + top_hash: context.top_hash, + time_for_time_lock: context.current_adjusted_timestamp_for_time_lock(), + hf: context.current_hf, + }) + .await + .map_err(IncomingTxError::Consensus)?; + + for tx in txs { + handle_valid_tx( + tx, + state.clone(), + &mut txpool_write_handle, + &mut dandelion_pool_manager, + ) + .await; + } + + // Re-relay any txs we got in the block that were already in our stem pool. + for stem_tx in stem_pool_txs { + rerelay_stem_tx( + &stem_tx, + state.clone(), + &mut txpool_read_handle, + &mut dandelion_pool_manager, + ) + .await; + } + + Ok(()) +} + +/// Prepares the incoming transactions for verification. +/// +/// This will filter out all transactions already in the pool or txs already being handled in another request. +/// +/// Returns in order: +/// - The [`TransactionVerificationData`] for all the txs we did not already have +/// - The Ids of the transactions in the incoming message that are in our stem-pool +/// - A [`TxsBeingHandledLocally`] guard that prevents verifying the same tx at the same time across 2 tasks. +async fn prepare_incoming_txs( + tx_blobs: Vec, + txs_being_handled: TxsBeingHandled, + txpool_read_handle: &mut TxpoolReadHandle, +) -> Result< + ( + Vec>, + Vec, + TxsBeingHandledLocally, + ), + IncomingTxError, +> { + let mut tx_blob_hashes = HashSet::new(); + let mut txs_being_handled_locally = txs_being_handled.local_tracker(); + + // Compute the blob hash for each tx and filter out the txs currently being handled by another incoming tx batch. + let txs = tx_blobs + .into_iter() + .filter_map(|tx_blob| { + let tx_blob_hash = transaction_blob_hash(&tx_blob); + + // If a duplicate is in here the incoming tx batch contained the same tx twice. + if !tx_blob_hashes.insert(tx_blob_hash) { + return Some(Err(IncomingTxError::DuplicateTransaction)); + } + + // If a duplicate is here it is being handled in another batch. + if !txs_being_handled_locally.try_add_tx(tx_blob_hash) { + return None; + } + + Some(Ok((tx_blob_hash, tx_blob))) + }) + .collect::, _>>()?; + + // Filter the txs already in the txpool out. + // This will leave the txs already in the pool in [`TxBeingHandledLocally`] but that shouldn't be an issue. + let TxpoolReadResponse::FilterKnownTxBlobHashes { + unknown_blob_hashes, + stem_pool_hashes, + } = txpool_read_handle + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(TxpoolReadRequest::FilterKnownTxBlobHashes(tx_blob_hashes)) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + else { + unreachable!() + }; + + // Now prepare the txs for verification. + rayon_spawn_async(move || { + let txs = txs + .into_iter() + .filter_map(|(tx_blob_hash, tx_blob)| { + if unknown_blob_hashes.contains(&tx_blob_hash) { + Some(tx_blob) + } else { + None + } + }) + .map(|bytes| { + let tx = Transaction::read(&mut bytes.as_ref()).map_err(IncomingTxError::Parse)?; + + let tx = new_tx_verification_data(tx) + .map_err(|e| IncomingTxError::Consensus(e.into()))?; + + Ok(Arc::new(tx)) + }) + .collect::, IncomingTxError>>()?; + + Ok((txs, stem_pool_hashes, txs_being_handled_locally)) + }) + .await +} + +/// Handle a verified tx. +/// +/// This will add the tx to the txpool and route it to the network. +async fn handle_valid_tx( + tx: Arc, + state: TxState, + txpool_write_handle: &mut TxpoolWriteHandle, + dandelion_pool_manager: &mut DandelionPoolService< + DandelionTx, + TxId, + CrossNetworkInternalPeerId, + >, +) { + let incoming_tx = + IncomingTxBuilder::new(DandelionTx(Bytes::copy_from_slice(&tx.tx_blob)), tx.tx_hash); + + let TxpoolWriteResponse::AddTransaction(double_spend) = txpool_write_handle + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(TxpoolWriteRequest::AddTransaction { + tx, + state_stem: state.is_stem_stage(), + }) + .await + .expect("TODO") + else { + unreachable!() + }; + + // TODO: track double spends to quickly ignore them from their blob hash. + if let Some(tx_hash) = double_spend { + return; + }; + + // TODO: There is a race condition possible if a tx and block come in at the same time: . + + let incoming_tx = incoming_tx + .with_routing_state(state) + .with_state_in_db(None) + .build() + .unwrap(); + + dandelion_pool_manager + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(incoming_tx) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR); +} + +/// Re-relay a tx that was already in our stem pool. +async fn rerelay_stem_tx( + tx_hash: &TxId, + state: TxState, + txpool_read_handle: &mut TxpoolReadHandle, + dandelion_pool_manager: &mut DandelionPoolService< + DandelionTx, + TxId, + CrossNetworkInternalPeerId, + >, +) { + let Ok(TxpoolReadResponse::TxBlob { tx_blob, .. }) = txpool_read_handle + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(TxpoolReadRequest::TxBlob(*tx_hash)) + .await + else { + // The tx could have been dropped from the pool. + return; + }; + + let incoming_tx = + IncomingTxBuilder::new(DandelionTx(Bytes::copy_from_slice(&tx_blob)), *tx_hash); + + let incoming_tx = incoming_tx + .with_routing_state(state) + .with_state_in_db(Some(State::Stem)) + .build() + .unwrap(); + + dandelion_pool_manager + .ready() + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR) + .call(incoming_tx) + .await + .expect(PANIC_CRITICAL_SERVICE_ERROR); +} diff --git a/binaries/cuprated/src/txpool/txs_being_handled.rs b/binaries/cuprated/src/txpool/txs_being_handled.rs new file mode 100644 index 0000000..122b8ac --- /dev/null +++ b/binaries/cuprated/src/txpool/txs_being_handled.rs @@ -0,0 +1,53 @@ +use std::sync::Arc; + +use dashmap::DashSet; + +/// A set of txs currently being handled, shared between instances of the incoming tx handler. +#[derive(Clone)] +pub struct TxsBeingHandled(Arc>); + +impl TxsBeingHandled { + /// Create a new [`TxsBeingHandled`] + pub fn new() -> Self { + Self(Arc::new(DashSet::new())) + } + + /// Create a new [`TxsBeingHandledLocally`] that will keep track of txs being handled in a request. + pub fn local_tracker(&self) -> TxsBeingHandledLocally { + TxsBeingHandledLocally { + txs_being_handled: self.clone(), + txs: vec![], + } + } +} + +/// A tracker of txs being handled in a single request. This will add the txs to the global [`TxsBeingHandled`] +/// tracker as well. +/// +/// When this is dropped the txs will be removed from [`TxsBeingHandled`]. +pub struct TxsBeingHandledLocally { + txs_being_handled: TxsBeingHandled, + txs: Vec<[u8; 32]>, +} + +impl TxsBeingHandledLocally { + /// Try add a tx to the map from its [`transaction_blob_hash`](cuprate_txpool::transaction_blob_hash). + /// + /// Returns `true` if the tx was added and `false` if another task is already handling this tx. + pub fn try_add_tx(&mut self, tx_blob_hash: [u8; 32]) -> bool { + if !self.txs_being_handled.0.insert(tx_blob_hash) { + return false; + } + + self.txs.push(tx_blob_hash); + true + } +} + +impl Drop for TxsBeingHandledLocally { + fn drop(&mut self) { + for hash in &self.txs { + self.txs_being_handled.0.remove(hash); + } + } +} diff --git a/p2p/dandelion-tower/src/router.rs b/p2p/dandelion-tower/src/router.rs index 88702be..7ca0598 100644 --- a/p2p/dandelion-tower/src/router.rs +++ b/p2p/dandelion-tower/src/router.rs @@ -73,6 +73,15 @@ pub enum TxState { Local, } +impl TxState { + /// Returns `true` if the tx is in the stem stage. + /// + /// [`TxState::Local`] & [`TxState::Stem`] are the 2 stem stage states. + pub const fn is_stem_stage(&self) -> bool { + matches!(self, Self::Local | Self::Stem { .. }) + } +} + /// A request to route a transaction. pub struct DandelionRouteReq { /// The transaction. diff --git a/p2p/p2p/src/client_pool.rs b/p2p/p2p/src/client_pool.rs index fc97fc1..67c8f11 100644 --- a/p2p/p2p/src/client_pool.rs +++ b/p2p/p2p/src/client_pool.rs @@ -18,13 +18,13 @@ use tracing::{Instrument, Span}; use cuprate_p2p_core::{ client::{Client, InternalPeerID}, handles::ConnectionHandle, - NetworkZone, + ConnectionDirection, NetworkZone, }; pub(crate) mod disconnect_monitor; mod drop_guard_client; -pub(crate) use drop_guard_client::ClientPoolDropGuard; +pub use drop_guard_client::ClientPoolDropGuard; /// The client pool, which holds currently connected free peers. /// @@ -165,6 +165,17 @@ impl ClientPool { sync_data.cumulative_difficulty() > cumulative_difficulty }) } + + /// Returns the first outbound peer when iterating over the peers. + pub fn outbound_client(self: &Arc) -> Option> { + let client = self + .clients + .iter() + .find(|element| element.value().info.direction == ConnectionDirection::Outbound)?; + let id = *client.key(); + + Some(self.borrow_client(&id).unwrap()) + } } mod sealed { diff --git a/p2p/p2p/src/lib.rs b/p2p/p2p/src/lib.rs index b3577a7..541784c 100644 --- a/p2p/p2p/src/lib.rs +++ b/p2p/p2p/src/lib.rs @@ -18,7 +18,7 @@ use cuprate_p2p_core::{ pub mod block_downloader; mod broadcast; -mod client_pool; +pub mod client_pool; pub mod config; pub mod connection_maintainer; pub mod constants; @@ -26,6 +26,7 @@ mod inbound_server; use block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}; pub use broadcast::{BroadcastRequest, BroadcastSvc}; +pub use client_pool::{ClientPool, ClientPoolDropGuard}; pub use config::{AddressBookConfig, P2PConfig}; use connection_maintainer::MakeConnectionRequest; @@ -82,7 +83,7 @@ where let outbound_handshaker = outbound_handshaker_builder.build(); - let client_pool = client_pool::ClientPool::new(); + let client_pool = ClientPool::new(); let (make_connection_tx, make_connection_rx) = mpsc::channel(3); @@ -132,7 +133,7 @@ where #[derive(Clone)] pub struct NetworkInterface { /// A pool of free connected peers. - pool: Arc>, + pool: Arc>, /// A [`Service`] that allows broadcasting to all connected peers. broadcast_svc: BroadcastSvc, /// A channel to request extra connections. @@ -173,7 +174,7 @@ impl NetworkInterface { } /// Borrows the `ClientPool`, for access to connected peers. - pub const fn client_pool(&self) -> &Arc> { + pub const fn client_pool(&self) -> &Arc> { &self.pool } } diff --git a/storage/service/src/service/write.rs b/storage/service/src/service/write.rs index f75d615..607c4aa 100644 --- a/storage/service/src/service/write.rs +++ b/storage/service/src/service/write.rs @@ -30,6 +30,14 @@ pub struct DatabaseWriteHandle { crossbeam::channel::Sender<(Req, oneshot::Sender>)>, } +impl Clone for DatabaseWriteHandle { + fn clone(&self) -> Self { + Self { + sender: self.sender.clone(), + } + } +} + impl DatabaseWriteHandle where Req: Send + 'static, diff --git a/storage/txpool/Cargo.toml b/storage/txpool/Cargo.toml index b9d4218..c301166 100644 --- a/storage/txpool/Cargo.toml +++ b/storage/txpool/Cargo.toml @@ -29,6 +29,7 @@ bytemuck = { workspace = true, features = ["must_cast", "derive" bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } thiserror = { workspace = true } hex = { workspace = true } +blake3 = { workspace = true, features = ["std"] } tower = { workspace = true, optional = true } rayon = { workspace = true, optional = true } diff --git a/storage/txpool/src/free.rs b/storage/txpool/src/free.rs index d394002..d0f9a31 100644 --- a/storage/txpool/src/free.rs +++ b/storage/txpool/src/free.rs @@ -3,7 +3,7 @@ //---------------------------------------------------------------------------------------------------- Import use cuprate_database::{ConcreteEnv, Env, EnvInner, InitError, RuntimeError, TxRw}; -use crate::{config::Config, tables::OpenTables}; +use crate::{config::Config, tables::OpenTables, types::TransactionBlobHash}; //---------------------------------------------------------------------------------------------------- Free functions /// Open the txpool database using the passed [`Config`]. @@ -60,3 +60,13 @@ pub fn open(config: Config) -> Result { Ok(env) } + +/// Calculate the transaction blob hash. +/// +/// This value is supposed to be quick to compute just based of the tx-blob without needing to parse the tx. +/// +/// The exact way the hash is calculated is not stable and is subject to change, as such it should not be exposed +/// as a way to interact with Cuprate externally. +pub fn transaction_blob_hash(tx_blob: &[u8]) -> TransactionBlobHash { + blake3::hash(tx_blob).into() +} diff --git a/storage/txpool/src/lib.rs b/storage/txpool/src/lib.rs index 5fb3b14..8a57c72 100644 --- a/storage/txpool/src/lib.rs +++ b/storage/txpool/src/lib.rs @@ -14,7 +14,7 @@ mod tx; pub mod types; pub use config::Config; -pub use free::open; +pub use free::{open, transaction_blob_hash}; pub use tx::TxEntry; //re-exports diff --git a/storage/txpool/src/ops.rs b/storage/txpool/src/ops.rs index 50d9ea4..289a8bb 100644 --- a/storage/txpool/src/ops.rs +++ b/storage/txpool/src/ops.rs @@ -85,7 +85,7 @@ mod key_images; mod tx_read; mod tx_write; -pub use tx_read::get_transaction_verification_data; +pub use tx_read::{get_transaction_verification_data, in_stem_pool}; pub use tx_write::{add_transaction, remove_transaction}; /// An error that can occur on some tx-write ops. diff --git a/storage/txpool/src/ops/tx_read.rs b/storage/txpool/src/ops/tx_read.rs index db89415..5569075 100644 --- a/storage/txpool/src/ops/tx_read.rs +++ b/storage/txpool/src/ops/tx_read.rs @@ -8,7 +8,10 @@ use monero_serai::transaction::Transaction; use cuprate_database::{DatabaseRo, RuntimeError}; use cuprate_types::{TransactionVerificationData, TxVersion}; -use crate::{tables::Tables, types::TransactionHash}; +use crate::{ + tables::{Tables, TransactionInfos}, + types::{TransactionHash, TxStateFlags}, +}; /// Gets the [`TransactionVerificationData`] of a transaction in the tx-pool, leaving the tx in the pool. pub fn get_transaction_verification_data( @@ -34,3 +37,17 @@ pub fn get_transaction_verification_data( cached_verification_state: Mutex::new(cached_verification_state), }) } + +/// Returns `true` if the transaction with the given hash is in the stem pool. +/// +/// # Errors +/// This will return an [`Err`] if the transaction is not in the pool. +pub fn in_stem_pool( + tx_hash: &TransactionHash, + tx_infos: &impl DatabaseRo, +) -> Result { + Ok(tx_infos + .get(tx_hash)? + .flags + .contains(TxStateFlags::STATE_STEM)) +} diff --git a/storage/txpool/src/ops/tx_write.rs b/storage/txpool/src/ops/tx_write.rs index 9885b9c..dc5ab46 100644 --- a/storage/txpool/src/ops/tx_write.rs +++ b/storage/txpool/src/ops/tx_write.rs @@ -8,6 +8,7 @@ use cuprate_database::{DatabaseRw, RuntimeError, StorableVec}; use cuprate_types::TransactionVerificationData; use crate::{ + free::transaction_blob_hash, ops::{ key_images::{add_tx_key_images, remove_tx_key_images}, TxPoolWriteError, @@ -56,6 +57,12 @@ pub fn add_transaction( let kis_table = tables.spent_key_images_mut(); add_tx_key_images(&tx.tx.prefix().inputs, &tx.tx_hash, kis_table)?; + // Add the blob hash to table 4. + let blob_hash = transaction_blob_hash(&tx.tx_blob); + tables + .known_blob_hashes_mut() + .put(&blob_hash, &tx.tx_hash)?; + Ok(()) } @@ -79,5 +86,9 @@ pub fn remove_transaction( let kis_table = tables.spent_key_images_mut(); remove_tx_key_images(&tx.prefix().inputs, kis_table)?; + // Remove the blob hash from table 4. + let blob_hash = transaction_blob_hash(&tx_blob); + tables.known_blob_hashes_mut().delete(&blob_hash)?; + Ok(()) } diff --git a/storage/txpool/src/service/interface.rs b/storage/txpool/src/service/interface.rs index 88dd02e..5cd518f 100644 --- a/storage/txpool/src/service/interface.rs +++ b/storage/txpool/src/service/interface.rs @@ -1,21 +1,36 @@ //! Tx-pool [`service`](super) interface. //! //! This module contains `cuprate_txpool`'s [`tower::Service`] request and response enums. -use std::sync::Arc; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use cuprate_types::TransactionVerificationData; -use crate::{tx::TxEntry, types::TransactionHash}; +use crate::{ + tx::TxEntry, + types::{KeyImage, TransactionBlobHash, TransactionHash}, +}; //---------------------------------------------------------------------------------------------------- TxpoolReadRequest /// The transaction pool [`tower::Service`] read request type. +#[derive(Clone)] pub enum TxpoolReadRequest { - /// A request for the blob (raw bytes) of a transaction with the given hash. + /// Get the blob (raw bytes) of a transaction with the given hash. TxBlob(TransactionHash), - /// A request for the [`TransactionVerificationData`] of a transaction in the tx pool. + /// Get the [`TransactionVerificationData`] of a transaction in the tx pool. TxVerificationData(TransactionHash), + /// Filter (remove) all **known** transactions from the set. + /// + /// The hash is **not** the transaction hash, it is the hash of the serialized tx-blob. + FilterKnownTxBlobHashes(HashSet), + + /// Get some transactions for an incoming block. + TxsForBlock(Vec), + /// Get information on all transactions in the pool. Backlog, @@ -27,15 +42,28 @@ pub enum TxpoolReadRequest { /// The transaction pool [`tower::Service`] read response type. #[expect(clippy::large_enum_variant)] pub enum TxpoolReadResponse { - /// Response to [`TxpoolReadRequest::TxBlob`]. - /// - /// The inner value is the raw bytes of a transaction. - // TODO: use bytes::Bytes. - TxBlob(Vec), + /// The response for [`TxpoolReadRequest::TxBlob`]. + TxBlob { tx_blob: Vec, state_stem: bool }, - /// Response to [`TxpoolReadRequest::TxVerificationData`]. + /// The response for [`TxpoolReadRequest::TxVerificationData`]. TxVerificationData(TransactionVerificationData), + /// The response for [`TxpoolReadRequest::FilterKnownTxBlobHashes`]. + FilterKnownTxBlobHashes { + /// The blob hashes that are unknown. + unknown_blob_hashes: HashSet, + /// The tx hashes of the blob hashes that were known but were in the stem pool. + stem_pool_hashes: Vec, + }, + + /// The response for [`TxpoolReadRequest::TxsForBlock`]. + TxsForBlock { + /// The txs we had in the txpool. + txs: HashMap<[u8; 32], TransactionVerificationData>, + /// The indexes of the missing txs. + missing: Vec, + }, + /// Response to [`TxpoolReadRequest::Backlog`]. /// /// The inner `Vec` contains information on all @@ -66,9 +94,17 @@ pub enum TxpoolWriteRequest { }, /// Remove a transaction with the given hash from the pool. - /// - /// Returns [`TxpoolWriteResponse::Ok`]. RemoveTransaction(TransactionHash), + + /// Promote a transaction from the stem pool to the fluff pool. + /// If the tx is already in the fluff pool this does nothing. + Promote(TransactionHash), + + /// Tell the tx-pool about a new block. + NewBlock { + /// The spent key images in the new block. + spent_key_images: Vec, + }, } //---------------------------------------------------------------------------------------------------- TxpoolWriteResponse @@ -77,6 +113,8 @@ pub enum TxpoolWriteRequest { pub enum TxpoolWriteResponse { /// Response to: /// - [`TxpoolWriteRequest::RemoveTransaction`] + /// - [`TxpoolWriteRequest::Promote`] + /// - [`TxpoolWriteRequest::NewBlock`] Ok, /// Response to [`TxpoolWriteRequest::AddTransaction`]. diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs index 3135322..257fe8e 100644 --- a/storage/txpool/src/service/read.rs +++ b/storage/txpool/src/service/read.rs @@ -4,22 +4,24 @@ clippy::unnecessary_wraps, reason = "TODO: finish implementing the signatures from " )] - -use std::sync::Arc; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use rayon::ThreadPool; -use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner}; +use cuprate_database::{ConcreteEnv, DatabaseRo, Env, EnvInner, RuntimeError}; use cuprate_database_service::{init_thread_pool, DatabaseReadService, ReaderThreads}; use crate::{ - ops::get_transaction_verification_data, + ops::{get_transaction_verification_data, in_stem_pool}, service::{ interface::{TxpoolReadRequest, TxpoolReadResponse}, types::{ReadResponseResult, TxpoolReadHandle}, }, - tables::{OpenTables, TransactionBlobs}, - types::TransactionHash, + tables::{KnownBlobHashes, OpenTables, TransactionBlobs, TransactionInfos}, + types::{TransactionBlobHash, TransactionHash}, }; // TODO: update the docs here @@ -57,7 +59,6 @@ fn init_read_service_with_pool(env: Arc, pool: Arc) -> /// 1. `Request` is mapped to a handler function /// 2. Handler function is called /// 3. [`TxpoolReadResponse`] is returned -#[expect(clippy::needless_pass_by_value)] fn map_request( env: &ConcreteEnv, // Access to the database request: TxpoolReadRequest, // The request we must fulfill @@ -65,6 +66,10 @@ fn map_request( match request { TxpoolReadRequest::TxBlob(tx_hash) => tx_blob(env, &tx_hash), TxpoolReadRequest::TxVerificationData(tx_hash) => tx_verification_data(env, &tx_hash), + TxpoolReadRequest::FilterKnownTxBlobHashes(blob_hashes) => { + filter_known_tx_blob_hashes(env, blob_hashes) + } + TxpoolReadRequest::TxsForBlock(txs_needed) => txs_for_block(env, txs_needed), TxpoolReadRequest::Backlog => backlog(env), TxpoolReadRequest::Size => size(env), } @@ -94,10 +99,14 @@ fn tx_blob(env: &ConcreteEnv, tx_hash: &TransactionHash) -> ReadResponseResult { let tx_ro = inner_env.tx_ro()?; let tx_blobs_table = inner_env.open_db_ro::(&tx_ro)?; + let tx_infos_table = inner_env.open_db_ro::(&tx_ro)?; - tx_blobs_table - .get(tx_hash) - .map(|blob| TxpoolReadResponse::TxBlob(blob.0)) + let tx_blob = tx_blobs_table.get(tx_hash)?.0; + + Ok(TxpoolReadResponse::TxBlob { + tx_blob, + state_stem: in_stem_pool(tx_hash, &tx_infos_table)?, + }) } /// [`TxpoolReadRequest::TxVerificationData`]. @@ -111,6 +120,79 @@ fn tx_verification_data(env: &ConcreteEnv, tx_hash: &TransactionHash) -> ReadRes get_transaction_verification_data(tx_hash, &tables).map(TxpoolReadResponse::TxVerificationData) } +/// [`TxpoolReadRequest::FilterKnownTxBlobHashes`]. +fn filter_known_tx_blob_hashes( + env: &ConcreteEnv, + mut blob_hashes: HashSet, +) -> ReadResponseResult { + let inner_env = env.env_inner(); + let tx_ro = inner_env.tx_ro()?; + + let tx_blob_hashes = inner_env.open_db_ro::(&tx_ro)?; + let tx_infos = inner_env.open_db_ro::(&tx_ro)?; + + let mut stem_pool_hashes = Vec::new(); + + // A closure that returns `true` if a tx with a certain blob hash is unknown. + // This also fills in `stem_tx_hashes`. + let mut tx_unknown = |blob_hash| -> Result { + match tx_blob_hashes.get(&blob_hash) { + Ok(tx_hash) => { + if in_stem_pool(&tx_hash, &tx_infos)? { + stem_pool_hashes.push(tx_hash); + } + Ok(false) + } + Err(RuntimeError::KeyNotFound) => Ok(true), + Err(e) => Err(e), + } + }; + + let mut err = None; + blob_hashes.retain(|blob_hash| match tx_unknown(*blob_hash) { + Ok(res) => res, + Err(e) => { + err = Some(e); + false + } + }); + + if let Some(e) = err { + return Err(e); + } + + Ok(TxpoolReadResponse::FilterKnownTxBlobHashes { + unknown_blob_hashes: blob_hashes, + stem_pool_hashes, + }) +} + +/// [`TxpoolReadRequest::TxsForBlock`]. +fn txs_for_block(env: &ConcreteEnv, txs: Vec) -> ReadResponseResult { + let inner_env = env.env_inner(); + let tx_ro = inner_env.tx_ro()?; + + let tables = inner_env.open_tables(&tx_ro)?; + + let mut missing_tx_indexes = Vec::with_capacity(txs.len()); + let mut txs_verification_data = HashMap::with_capacity(txs.len()); + + for (i, tx_hash) in txs.into_iter().enumerate() { + match get_transaction_verification_data(&tx_hash, &tables) { + Ok(tx) => { + txs_verification_data.insert(tx_hash, tx); + } + Err(RuntimeError::KeyNotFound) => missing_tx_indexes.push(i), + Err(e) => return Err(e), + } + } + + Ok(TxpoolReadResponse::TxsForBlock { + txs: txs_verification_data, + missing: missing_tx_indexes, + }) +} + /// [`TxpoolReadRequest::Backlog`]. #[inline] fn backlog(env: &ConcreteEnv) -> ReadResponseResult { diff --git a/storage/txpool/src/service/write.rs b/storage/txpool/src/service/write.rs index 8a3b1bf..13ab81f 100644 --- a/storage/txpool/src/service/write.rs +++ b/storage/txpool/src/service/write.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use cuprate_database::{ConcreteEnv, Env, EnvInner, RuntimeError, TxRw}; +use cuprate_database::{ConcreteEnv, DatabaseRo, DatabaseRw, Env, EnvInner, RuntimeError, TxRw}; use cuprate_database_service::DatabaseWriteHandle; use cuprate_types::TransactionVerificationData; @@ -10,8 +10,8 @@ use crate::{ interface::{TxpoolWriteRequest, TxpoolWriteResponse}, types::TxpoolWriteHandle, }, - tables::OpenTables, - types::TransactionHash, + tables::{OpenTables, Tables, TransactionInfos}, + types::{KeyImage, TransactionHash, TxStateFlags}, }; //---------------------------------------------------------------------------------------------------- init_write_service @@ -31,6 +31,8 @@ fn handle_txpool_request( add_transaction(env, tx, *state_stem) } TxpoolWriteRequest::RemoveTransaction(tx_hash) => remove_transaction(env, tx_hash), + TxpoolWriteRequest::Promote(tx_hash) => promote(env, tx_hash), + TxpoolWriteRequest::NewBlock { spent_key_images } => new_block(env, spent_key_images), } } @@ -101,3 +103,68 @@ fn remove_transaction( TxRw::commit(tx_rw)?; Ok(TxpoolWriteResponse::Ok) } + +/// [`TxpoolWriteRequest::Promote`] +fn promote( + env: &ConcreteEnv, + tx_hash: &TransactionHash, +) -> Result { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + + let res = || { + let mut tx_infos = env_inner.open_db_rw::(&tx_rw)?; + + tx_infos.update(tx_hash, |mut info| { + info.flags.remove(TxStateFlags::STATE_STEM); + Some(info) + }) + }; + + if let Err(e) = res() { + // error promoting the tx, abort the DB transaction. + TxRw::abort(tx_rw) + .expect("could not maintain database atomicity by aborting write transaction"); + + return Err(e); + } + + TxRw::commit(tx_rw)?; + Ok(TxpoolWriteResponse::Ok) +} + +/// [`TxpoolWriteRequest::NewBlock`] +fn new_block( + env: &ConcreteEnv, + spent_key_images: &[KeyImage], +) -> Result { + let env_inner = env.env_inner(); + let tx_rw = env_inner.tx_rw()?; + + // FIXME: use try blocks once stable. + let result = || { + let mut tables_mut = env_inner.open_tables_mut(&tx_rw)?; + + // Remove all txs which spend key images that were spent in the new block. + for key_image in spent_key_images { + match tables_mut + .spent_key_images() + .get(key_image) + .and_then(|tx_hash| ops::remove_transaction(&tx_hash, &mut tables_mut)) + { + Ok(()) | Err(RuntimeError::KeyNotFound) => (), + Err(e) => return Err(e), + } + } + + Ok(()) + }; + + if let Err(e) = result() { + TxRw::abort(tx_rw)?; + return Err(e); + } + + TxRw::commit(tx_rw)?; + Ok(TxpoolWriteResponse::Ok) +} diff --git a/storage/txpool/src/tables.rs b/storage/txpool/src/tables.rs index dbb686a..1f2d449 100644 --- a/storage/txpool/src/tables.rs +++ b/storage/txpool/src/tables.rs @@ -16,7 +16,9 @@ //! accessing _all_ tables defined here at once. use cuprate_database::{define_tables, StorableVec}; -use crate::types::{KeyImage, RawCachedVerificationState, TransactionHash, TransactionInfo}; +use crate::types::{ + KeyImage, RawCachedVerificationState, TransactionBlobHash, TransactionHash, TransactionInfo, +}; define_tables! { /// Serialized transaction blobs. @@ -41,5 +43,9 @@ define_tables! { /// /// This table contains the spent key images from all transactions in the pool. 3 => SpentKeyImages, - KeyImage => TransactionHash + KeyImage => TransactionHash, + + /// Transaction blob hashes that are in the pool. + 4 => KnownBlobHashes, + TransactionBlobHash => TransactionHash, } diff --git a/storage/txpool/src/types.rs b/storage/txpool/src/types.rs index 4da2d0f..2acb819 100644 --- a/storage/txpool/src/types.rs +++ b/storage/txpool/src/types.rs @@ -6,7 +6,6 @@ //! //! use bytemuck::{Pod, Zeroable}; - use monero_serai::transaction::Timelock; use cuprate_types::{CachedVerificationState, HardFork}; @@ -17,6 +16,9 @@ pub type KeyImage = [u8; 32]; /// A transaction hash. pub type TransactionHash = [u8; 32]; +/// A transaction blob hash. +pub type TransactionBlobHash = [u8; 32]; + bitflags::bitflags! { /// Flags representing the state of the transaction in the pool. #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Pod, Zeroable)] From 44981f2b2490daf7f4d2467d4ebed7cdff1bd707 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Fri, 1 Nov 2024 20:22:14 +0000 Subject: [PATCH 02/14] CI: add cargo hack (#170) * add workflow * fix errors * fix workflow * install dependencies * fix more errors * Update CONTRIBUTING.md * Update CONTRIBUTING.md Co-authored-by: hinto-janai * fix hack + enable it for cuprate-database * move hack to main CI * fix docs * fix ci formatting * fix txpool tests * fix CONTRIBUTING.md formatting * service -> tower::Service * review fixes * review fixes * fix CI --------- Co-authored-by: hinto-janai --- .github/workflows/ci.yml | 7 ++++++- CONTRIBUTING.md | 15 +++++++++------ binaries/cuprated/Cargo.toml | 2 +- helper/Cargo.toml | 2 +- helper/src/lib.rs | 2 +- net/epee-encoding/src/container_as_blob.rs | 2 ++ net/epee-encoding/src/error.rs | 1 + net/epee-encoding/src/lib.rs | 1 + net/epee-encoding/src/value.rs | 2 +- p2p/address-book/Cargo.toml | 2 +- pruning/Cargo.toml | 2 +- rpc/interface/Cargo.toml | 8 ++++---- rpc/types/src/bin.rs | 8 ++++++-- rpc/types/src/json.rs | 10 ++++++---- rpc/types/src/lib.rs | 1 + rpc/types/src/misc/distribution.rs | 14 +++++--------- rpc/types/src/misc/misc.rs | 10 +++++----- rpc/types/src/other.rs | 4 +++- storage/blockchain/Cargo.toml | 13 ++++++------- storage/blockchain/README.md | 5 +---- storage/blockchain/src/lib.rs | 6 +----- storage/blockchain/src/service/mod.rs | 2 -- storage/database/Cargo.toml | 6 +++--- storage/database/src/backend/mod.rs | 2 ++ storage/service/Cargo.toml | 10 ++++++++-- storage/txpool/Cargo.toml | 11 +++++------ storage/txpool/README.md | 4 ---- storage/txpool/src/lib.rs | 6 +++--- storage/txpool/src/service.rs | 4 +--- types/Cargo.toml | 10 ++++++---- types/src/hex.rs | 1 + types/src/json/block.rs | 6 +++--- types/src/json/output.rs | 2 +- types/src/json/tx.rs | 6 +++--- 34 files changed, 99 insertions(+), 88 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8c2271d..367e8e1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -133,7 +133,12 @@ jobs: - name: Test run: | cargo test --all-features --workspace - cargo test --package cuprate-blockchain --no-default-features --features redb --features service + cargo test --package cuprate-blockchain --no-default-features --features redb + + - name: Hack Check + run: | + cargo install cargo-hack --locked + cargo hack --workspace check --feature-powerset --no-dev-deps # TODO: upload binaries with `actions/upload-artifact@v3` - name: Build diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1b66a58..2d99060 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -120,12 +120,15 @@ Before pushing your code, please run the following at the root of the repository After that, ensure all other CI passes by running: -| Command | Does what | -|------------------------------------------------------------------------|-----------| -| `RUSTDOCFLAGS='-D warnings' cargo doc --workspace --all-features` | Checks documentation is OK -| `cargo clippy --workspace --all-features --all-targets -- -D warnings` | Checks clippy lints are satisfied -| `cargo test --all-features --workspace` | Runs all tests -| `cargo build --all-features --all-targets --workspace` | Builds all code +| Command | Does what | +|------------------------------------------------------------------------|-------------------------------------------------------------------------| +| `RUSTDOCFLAGS='-D warnings' cargo doc --workspace --all-features` | Checks documentation is OK | +| `cargo clippy --workspace --all-features --all-targets -- -D warnings` | Checks clippy lints are satisfied | +| `cargo test --all-features --workspace` | Runs all tests | +| `cargo build --all-features --all-targets --workspace` | Builds all code | +| `cargo hack --workspace check --feature-powerset --no-dev-deps` | Uses `cargo hack` to check our crates build with different features set | + +`cargo hack` can be installed with `cargo` from: https://github.com/taiki-e/cargo-hack. **Note: in order for some tests to work, you will need to place a [`monerod`](https://www.getmonero.org/downloads/) binary at the root of the repository.** diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index 880c205..d59b4c3 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -24,7 +24,7 @@ cuprate-p2p-core = { workspace = true } cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } cuprate-async-buffer = { workspace = true } cuprate-address-book = { workspace = true } -cuprate-blockchain = { workspace = true, features = ["service"] } +cuprate-blockchain = { workspace = true } cuprate-database-service = { workspace = true } cuprate-txpool = { workspace = true } cuprate-database = { workspace = true } diff --git a/helper/Cargo.toml b/helper/Cargo.toml index ad78a44..1b3158f 100644 --- a/helper/Cargo.toml +++ b/helper/Cargo.toml @@ -17,7 +17,7 @@ asynch = ["dep:futures", "dep:rayon"] cast = [] constants = [] crypto = ["dep:curve25519-dalek", "dep:monero-serai", "std"] -fs = ["dep:dirs"] +fs = ["dep:dirs", "std"] num = [] map = ["cast", "dep:monero-serai", "dep:cuprate-constants"] time = ["dep:chrono", "std"] diff --git a/helper/src/lib.rs b/helper/src/lib.rs index 47d47a2..9bd64fa 100644 --- a/helper/src/lib.rs +++ b/helper/src/lib.rs @@ -11,7 +11,7 @@ pub mod atomic; #[cfg(feature = "cast")] pub mod cast; -#[cfg(feature = "fs")] +#[cfg(all(feature = "fs", feature = "std"))] pub mod fs; pub mod network; diff --git a/net/epee-encoding/src/container_as_blob.rs b/net/epee-encoding/src/container_as_blob.rs index 83078c2..363e157 100644 --- a/net/epee-encoding/src/container_as_blob.rs +++ b/net/epee-encoding/src/container_as_blob.rs @@ -1,3 +1,5 @@ +use alloc::{string::ToString, vec, vec::Vec}; + use bytes::{Buf, BufMut, Bytes, BytesMut}; use ref_cast::RefCast; diff --git a/net/epee-encoding/src/error.rs b/net/epee-encoding/src/error.rs index 756cd13..7206189 100644 --- a/net/epee-encoding/src/error.rs +++ b/net/epee-encoding/src/error.rs @@ -1,3 +1,4 @@ +use alloc::string::{String, ToString}; use core::{ fmt::{Debug, Formatter}, num::TryFromIntError, diff --git a/net/epee-encoding/src/lib.rs b/net/epee-encoding/src/lib.rs index d55a546..a6ff1b0 100644 --- a/net/epee-encoding/src/lib.rs +++ b/net/epee-encoding/src/lib.rs @@ -64,6 +64,7 @@ use hex as _; extern crate alloc; +use alloc::string::ToString; use core::str::from_utf8 as str_from_utf8; use bytes::{Buf, BufMut, Bytes, BytesMut}; diff --git a/net/epee-encoding/src/value.rs b/net/epee-encoding/src/value.rs index 816203e..4762c96 100644 --- a/net/epee-encoding/src/value.rs +++ b/net/epee-encoding/src/value.rs @@ -1,7 +1,7 @@ //! This module contains a [`EpeeValue`] trait and //! impls for some possible base epee values. -use alloc::{string::String, vec::Vec}; +use alloc::{string::String, vec, vec::Vec}; use core::fmt::Debug; use bytes::{Buf, BufMut, Bytes, BytesMut}; diff --git a/p2p/address-book/Cargo.toml b/p2p/address-book/Cargo.toml index 9cbba71..a88819f 100644 --- a/p2p/address-book/Cargo.toml +++ b/p2p/address-book/Cargo.toml @@ -9,7 +9,7 @@ authors = ["Boog900"] [dependencies] cuprate-constants = { workspace = true } cuprate-pruning = { workspace = true } -cuprate-p2p-core = { workspace = true } +cuprate-p2p-core = { workspace = true, features = ["borsh"] } tower = { workspace = true, features = ["util"] } tokio = { workspace = true, features = ["time", "fs", "rt"]} diff --git a/pruning/Cargo.toml b/pruning/Cargo.toml index 6fcc74e..4b03551 100644 --- a/pruning/Cargo.toml +++ b/pruning/Cargo.toml @@ -10,7 +10,7 @@ default = [] borsh = ["dep:borsh"] [dependencies] -cuprate-constants = { workspace = true } +cuprate-constants = { workspace = true, features = ["block"] } thiserror = { workspace = true } diff --git a/rpc/interface/Cargo.toml b/rpc/interface/Cargo.toml index ef62d34..c5d4db7 100644 --- a/rpc/interface/Cargo.toml +++ b/rpc/interface/Cargo.toml @@ -10,20 +10,20 @@ keywords = ["cuprate", "rpc", "interface"] [features] default = ["dummy", "serde"] -dummy = [] +dummy = ["dep:cuprate-helper", "dep:futures"] [dependencies] cuprate-epee-encoding = { workspace = true, default-features = false } cuprate-json-rpc = { workspace = true, default-features = false } cuprate-rpc-types = { workspace = true, features = ["serde", "epee"], default-features = false } -cuprate-helper = { workspace = true, features = ["asynch"], default-features = false } +cuprate-helper = { workspace = true, features = ["asynch"], default-features = false, optional = true } anyhow = { workspace = true } axum = { version = "0.7.5", features = ["json"], default-features = false } serde = { workspace = true, optional = true } -tower = { workspace = true } +tower = { workspace = true, features = ["util"] } paste = { workspace = true } -futures = { workspace = true } +futures = { workspace = true, optional = true } [dev-dependencies] cuprate-test-utils = { workspace = true } diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index a68d3e1..7b94191 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -20,12 +20,16 @@ use cuprate_types::BlockCompleteEntry; use crate::{ base::AccessResponseBase, - defaults::{default_false, default_zero}, macros::{define_request, define_request_and_response, define_request_and_response_doc}, - misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfoExtent, PoolTxInfo, Status}, + misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolTxInfo, Status}, rpc_call::RpcCallValue, }; +#[cfg(any(feature = "epee", feature = "serde"))] +use crate::defaults::{default_false, default_zero}; +#[cfg(feature = "epee")] +use crate::misc::PoolInfoExtent; + //---------------------------------------------------------------------------------------------------- Definitions define_request_and_response! { get_blocks_by_heightbin, diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index fd9ffa3..6fb538c 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -8,10 +8,6 @@ use serde::{Deserialize, Serialize}; use crate::{ base::{AccessResponseBase, ResponseBase}, - defaults::{ - default_false, default_height, default_one, default_string, default_true, default_vec, - default_zero, - }, macros::define_request_and_response, misc::{ AuxPow, BlockHeader, ChainInfo, ConnectionInfo, Distribution, GetBan, @@ -21,6 +17,12 @@ use crate::{ rpc_call::RpcCallValue, }; +#[cfg(any(feature = "epee", feature = "serde"))] +use crate::defaults::{ + default_false, default_height, default_one, default_string, default_true, default_vec, + default_zero, +}; + //---------------------------------------------------------------------------------------------------- Macro /// Adds a (de)serialization doc-test to a type in `json.rs`. /// diff --git a/rpc/types/src/lib.rs b/rpc/types/src/lib.rs index be1069e..403a3ea 100644 --- a/rpc/types/src/lib.rs +++ b/rpc/types/src/lib.rs @@ -6,6 +6,7 @@ )] mod constants; +#[cfg(any(feature = "serde", feature = "epee"))] mod defaults; mod free; mod macros; diff --git a/rpc/types/src/misc/distribution.rs b/rpc/types/src/misc/distribution.rs index faac7ad..e920d12 100644 --- a/rpc/types/src/misc/distribution.rs +++ b/rpc/types/src/misc/distribution.rs @@ -20,8 +20,8 @@ use cuprate_epee_encoding::{ "rpc/core_rpc_server_commands_defs.h", 45..=55 )] -#[cfg(feature = "epee")] -fn compress_integer_array(_: &[u64]) -> error::Result> { +#[cfg(any(feature = "epee", feature = "serde"))] +fn compress_integer_array(_: &[u64]) -> Vec { todo!() } @@ -33,6 +33,7 @@ fn compress_integer_array(_: &[u64]) -> error::Result> { "rpc/core_rpc_server_commands_defs.h", 57..=72 )] +#[cfg(any(feature = "epee", feature = "serde"))] fn decompress_integer_array(_: &[u8]) -> Vec { todo!() } @@ -135,12 +136,7 @@ fn serialize_distribution_as_compressed_data(v: &Vec, s: S) -> Result compressed_data.serialize(s), - Err(_) => Err(serde::ser::Error::custom( - "error compressing distribution array", - )), - } + compress_integer_array(v).serialize(s) } /// Deserializer function for [`DistributionCompressedBinary::distribution`]. @@ -256,7 +252,7 @@ impl EpeeObject for Distribution { distribution, amount, }) => { - let compressed_data = compress_integer_array(&distribution)?; + let compressed_data = compress_integer_array(&distribution); start_height.write(w)?; base.write(w)?; diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 842997b..4430dbe 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -11,10 +11,10 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::epee_object; -use crate::{ - defaults::{default_string, default_zero}, - macros::monero_definition_link, -}; +use crate::macros::monero_definition_link; + +#[cfg(any(feature = "epee", feature = "serde"))] +use crate::defaults::default_zero; //---------------------------------------------------------------------------------------------------- Macros /// This macro (local to this file) defines all the misc types. @@ -148,7 +148,7 @@ define_struct_and_impl_epee! { )] /// Used in [`crate::json::SetBansRequest`]. SetBan { - #[cfg_attr(feature = "serde", serde(default = "default_string"))] + #[cfg_attr(feature = "serde", serde(default = "crate::defaults::default_string"))] host: String, #[cfg_attr(feature = "serde", serde(default = "default_zero"))] ip: u32, diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index e7f3394..f743392 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -8,7 +8,6 @@ use serde::{Deserialize, Serialize}; use crate::{ base::{AccessResponseBase, ResponseBase}, - defaults::{default_false, default_string, default_true, default_vec, default_zero}, macros::define_request_and_response, misc::{ GetOutputsOut, OutKey, Peer, PublicNode, SpentKeyImageInfo, Status, TxEntry, TxInfo, @@ -17,6 +16,9 @@ use crate::{ RpcCallValue, }; +#[cfg(any(feature = "serde", feature = "epee"))] +use crate::defaults::{default_false, default_string, default_true, default_vec, default_zero}; + //---------------------------------------------------------------------------------------------------- Macro /// Adds a (de)serialization doc-test to a type in `other.rs`. /// diff --git a/storage/blockchain/Cargo.toml b/storage/blockchain/Cargo.toml index d0a43b3..6fd973c 100644 --- a/storage/blockchain/Cargo.toml +++ b/storage/blockchain/Cargo.toml @@ -9,32 +9,31 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/cuprate-bloc keywords = ["cuprate", "blockchain", "database"] [features] -default = ["heed", "service"] +default = ["heed"] # default = ["redb", "service"] # default = ["redb-memory", "service"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -service = ["dep:thread_local", "dep:rayon", "cuprate-helper/thread"] +serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] [dependencies] cuprate-database = { workspace = true } cuprate-database-service = { workspace = true } -cuprate-helper = { workspace = true, features = ["fs", "map", "crypto"] } +cuprate-helper = { workspace = true, features = ["fs", "map", "crypto", "tx", "thread"] } cuprate-types = { workspace = true, features = ["blockchain"] } cuprate-pruning = { workspace = true } bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } curve25519-dalek = { workspace = true } -rand = { workspace = true } +rand = { workspace = true, features = ["std", "std_rng"] } monero-serai = { workspace = true, features = ["std"] } serde = { workspace = true, optional = true } -# `service` feature. tower = { workspace = true } -thread_local = { workspace = true, optional = true } -rayon = { workspace = true, optional = true } +thread_local = { workspace = true } +rayon = { workspace = true } [dev-dependencies] cuprate-constants = { workspace = true } diff --git a/storage/blockchain/README.md b/storage/blockchain/README.md index 4800546..3f97a3d 100644 --- a/storage/blockchain/README.md +++ b/storage/blockchain/README.md @@ -32,9 +32,6 @@ use cuprate_blockchain::{ This ensures the types/traits used from `cuprate_database` are the same ones used by `cuprate_blockchain` internally. # Feature flags -The `service` module requires the `service` feature to be enabled. -See the module for more documentation. - Different database backends are enabled by the feature flags: - `heed` (LMDB) - `redb` @@ -45,7 +42,7 @@ The default is `heed`. # Invariants when not using `service` -`cuprate_blockchain` can be used without the `service` feature enabled but +`cuprate_blockchain` can be used without the `service` module but there are some things that must be kept in mind when doing so. Failing to uphold these invariants may cause panics. diff --git a/storage/blockchain/src/lib.rs b/storage/blockchain/src/lib.rs index f66cd99..7db8cc6 100644 --- a/storage/blockchain/src/lib.rs +++ b/storage/blockchain/src/lib.rs @@ -29,16 +29,12 @@ pub use free::open; pub mod config; pub mod ops; +pub mod service; pub mod tables; pub mod types; -//---------------------------------------------------------------------------------------------------- Feature-gated -#[cfg(feature = "service")] -pub mod service; - //---------------------------------------------------------------------------------------------------- Private #[cfg(test)] pub(crate) mod tests; -#[cfg(feature = "service")] // only needed in `service` for now pub(crate) mod unsafe_sendable; diff --git a/storage/blockchain/src/service/mod.rs b/storage/blockchain/src/service/mod.rs index 53bf1df..c5eb80c 100644 --- a/storage/blockchain/src/service/mod.rs +++ b/storage/blockchain/src/service/mod.rs @@ -10,8 +10,6 @@ //! //! The system is managed by this crate, and only requires [`init`] by the user. //! -//! This module must be enabled with the `service` feature. -//! //! ## Handles //! The 2 handles to the database are: //! - [`BlockchainReadHandle`] diff --git a/storage/database/Cargo.toml b/storage/database/Cargo.toml index 7a2f4ae..feeaf87 100644 --- a/storage/database/Cargo.toml +++ b/storage/database/Cargo.toml @@ -9,10 +9,10 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/database" keywords = ["cuprate", "database"] [features] -# default = ["heed"] +default = ["heed"] # default = ["redb"] # default = ["redb-memory"] -heed = ["dep:heed"] +heed = [] redb = ["dep:redb"] redb-memory = ["redb"] @@ -25,7 +25,7 @@ paste = { workspace = true } thiserror = { workspace = true } # Optional features. -heed = { version = "0.20.5", features = ["read-txn-no-tls"], optional = true } +heed = { version = "0.20.5", features = ["read-txn-no-tls"] } redb = { version = "2.1.3", optional = true } serde = { workspace = true, optional = true } diff --git a/storage/database/src/backend/mod.rs b/storage/database/src/backend/mod.rs index 11ae40b..ebe12d8 100644 --- a/storage/database/src/backend/mod.rs +++ b/storage/database/src/backend/mod.rs @@ -4,6 +4,8 @@ cfg_if::cfg_if! { // If both backends are enabled, fallback to `heed`. // This is useful when using `--all-features`. if #[cfg(all(feature = "redb", not(feature = "heed")))] { + use heed as _; + mod redb; pub use redb::ConcreteEnv; } else { diff --git a/storage/service/Cargo.toml b/storage/service/Cargo.toml index fa6971c..ebdb13e 100644 --- a/storage/service/Cargo.toml +++ b/storage/service/Cargo.toml @@ -8,14 +8,20 @@ authors = ["Boog900"] repository = "https://github.com/Cuprate/cuprate/tree/main/storage/service" keywords = ["cuprate", "service", "database"] +[features] +default = ["heed"] +heed = ["cuprate-database/heed"] +redb = ["cuprate-database/redb"] +redb-memorey = ["cuprate-database/redb-memory"] + [dependencies] cuprate-database = { workspace = true } -cuprate-helper = { workspace = true, features = ["fs", "thread", "map"] } +cuprate-helper = { workspace = true, features = ["fs", "thread", "map", "asynch"] } serde = { workspace = true, optional = true } rayon = { workspace = true } tower = { workspace = true } -futures = { workspace = true } +futures = { workspace = true, features = ["std"] } crossbeam = { workspace = true, features = ["std"] } [lints] diff --git a/storage/txpool/Cargo.toml b/storage/txpool/Cargo.toml index c301166..c908265 100644 --- a/storage/txpool/Cargo.toml +++ b/storage/txpool/Cargo.toml @@ -9,18 +9,17 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/storage/txpool" keywords = ["cuprate", "txpool", "transaction", "pool", "database"] [features] -default = ["heed", "service"] +default = ["heed"] # default = ["redb", "service"] # default = ["redb-memory", "service"] heed = ["cuprate-database/heed"] redb = ["cuprate-database/redb"] redb-memory = ["cuprate-database/redb-memory"] -service = ["dep:tower", "dep:rayon", "dep:cuprate-database-service"] serde = ["dep:serde", "cuprate-database/serde", "cuprate-database-service/serde"] [dependencies] cuprate-database = { workspace = true, features = ["heed"] } -cuprate-database-service = { workspace = true, optional = true } +cuprate-database-service = { workspace = true } cuprate-types = { workspace = true } cuprate-helper = { workspace = true, default-features = false, features = ["constants"] } @@ -28,11 +27,11 @@ monero-serai = { workspace = true, features = ["std"] } bytemuck = { workspace = true, features = ["must_cast", "derive", "min_const_generics", "extern_crate_alloc"] } bitflags = { workspace = true, features = ["std", "serde", "bytemuck"] } thiserror = { workspace = true } -hex = { workspace = true } +hex = { workspace = true, features = ["std"] } blake3 = { workspace = true, features = ["std"] } -tower = { workspace = true, optional = true } -rayon = { workspace = true, optional = true } +tower = { workspace = true } +rayon = { workspace = true } serde = { workspace = true, optional = true } diff --git a/storage/txpool/README.md b/storage/txpool/README.md index 80d3b25..d14f445 100644 --- a/storage/txpool/README.md +++ b/storage/txpool/README.md @@ -37,10 +37,6 @@ use cuprate_txpool::{ This ensures the types/traits used from `cuprate_database` are the same ones used by `cuprate_txpool` internally. # Feature flags - -The `service` module requires the `service` feature to be enabled. -See the module for more documentation. - Different database backends are enabled by the feature flags: - `heed` (LMDB) diff --git a/storage/txpool/src/lib.rs b/storage/txpool/src/lib.rs index 8a57c72..53e53ec 100644 --- a/storage/txpool/src/lib.rs +++ b/storage/txpool/src/lib.rs @@ -4,10 +4,12 @@ clippy::significant_drop_tightening )] +// Used in docs: . +use tower as _; + pub mod config; mod free; pub mod ops; -#[cfg(feature = "service")] pub mod service; pub mod tables; mod tx; @@ -20,8 +22,6 @@ pub use tx::TxEntry; //re-exports pub use cuprate_database; -// TODO: remove when used. -use tower as _; #[cfg(test)] mod test { use cuprate_test_utils as _; diff --git a/storage/txpool/src/service.rs b/storage/txpool/src/service.rs index 91a7060..a82de5b 100644 --- a/storage/txpool/src/service.rs +++ b/storage/txpool/src/service.rs @@ -10,8 +10,6 @@ //! //! The system is managed by this crate, and only requires [`init`] by the user. //! -//! This module must be enabled with the `service` feature. -//! //! ## Handles //! The 2 handles to the database are: //! - [`TxpoolReadHandle`] @@ -42,7 +40,7 @@ //! To interact with the database (whether reading or writing data), //! a `Request` can be sent using one of the above handles. //! -//! Both the handles implement `tower::Service`, so they can be [`tower::Service::call`]ed. +//! Both the handles implement [`tower::Service`], so they can be [`tower::Service::call`]ed. //! //! An `async`hronous channel will be returned from the call. //! This channel can be `.await`ed upon to (eventually) receive diff --git a/types/Cargo.toml b/types/Cargo.toml index 29887bd..e1ffb19 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -12,21 +12,23 @@ keywords = ["cuprate", "types"] default = ["blockchain", "epee", "serde", "json", "hex"] blockchain = [] epee = ["dep:cuprate-epee-encoding"] -serde = ["dep:serde"] +serde = ["dep:serde", "hex"] proptest = ["dep:proptest", "dep:proptest-derive"] json = ["hex", "dep:cuprate-helper"] -hex = ["dep:hex"] +# We sadly have no choice but to enable serde here as otherwise we will get warnings from the `hex` dep being unused. +# This isn't too bad as `HexBytes` only makes sense with serde anyway. +hex = ["serde", "dep:hex"] [dependencies] cuprate-epee-encoding = { workspace = true, optional = true, features = ["std"] } cuprate-helper = { workspace = true, optional = true, features = ["cast"] } -cuprate-fixed-bytes = { workspace = true } +cuprate-fixed-bytes = { workspace = true, features = ["std", "serde"] } bytes = { workspace = true } curve25519-dalek = { workspace = true } monero-serai = { workspace = true } hex = { workspace = true, features = ["serde", "alloc"], optional = true } -serde = { workspace = true, features = ["derive"], optional = true } +serde = { workspace = true, features = ["std", "derive"], optional = true } strum = { workspace = true, features = ["derive"] } thiserror = { workspace = true } diff --git a/types/src/hex.rs b/types/src/hex.rs index 34da09d..de4fc81 100644 --- a/types/src/hex.rs +++ b/types/src/hex.rs @@ -22,6 +22,7 @@ pub struct HexBytes( #[cfg_attr(feature = "serde", serde(with = "hex::serde"))] pub [u8; N], ); +#[cfg(feature = "serde")] impl<'de, const N: usize> Deserialize<'de> for HexBytes where [u8; N]: hex::FromHex, diff --git a/types/src/json/block.rs b/types/src/json/block.rs index 1397f6f..88f134d 100644 --- a/types/src/json/block.rs +++ b/types/src/json/block.rs @@ -51,17 +51,17 @@ impl From for Block { /// [`Block::miner_tx`]. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[serde(untagged)] +#[cfg_attr(feature = "serde", serde(untagged))] pub enum MinerTransaction { V1 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: MinerTransactionPrefix, signatures: [(); 0], }, V2 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: MinerTransactionPrefix, rct_signatures: MinerTransactionRctSignatures, }, diff --git a/types/src/json/output.rs b/types/src/json/output.rs index 050132a..182618c 100644 --- a/types/src/json/output.rs +++ b/types/src/json/output.rs @@ -20,7 +20,7 @@ pub struct Output { /// [`Output::target`]. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[serde(untagged)] +#[cfg_attr(feature = "serde", serde(untagged))] pub enum Target { Key { key: HexBytes<32> }, TaggedKey { tagged_key: TaggedKey }, diff --git a/types/src/json/tx.rs b/types/src/json/tx.rs index 46ec827..a18dc89 100644 --- a/types/src/json/tx.rs +++ b/types/src/json/tx.rs @@ -24,17 +24,17 @@ use crate::{ /// - [`/get_transaction_pool` -> `tx_json`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#get_transaction_pool) #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[serde(untagged)] +#[cfg_attr(feature = "serde", serde(untagged))] pub enum Transaction { V1 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: TransactionPrefix, signatures: Vec>, }, V2 { /// This field is [flattened](https://serde.rs/field-attrs.html#flatten). - #[serde(flatten)] + #[cfg_attr(feature = "serde", serde(flatten))] prefix: TransactionPrefix, rct_signatures: RctSignatures, /// This field is [`Some`] if [`Self::V2::rct_signatures`] From 372cab24d72bd61a51b55421e1730f57b12ea4f6 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Fri, 1 Nov 2024 18:25:55 -0400 Subject: [PATCH 03/14] cuprated: internal signatures required for RPC pt. 2 (#320) * apply diffs * clippy * fix tests * rpc: fix tests * remove `BlockchainManagerRequest::Overview` * cuprated/p2p: fix `ConnectionInfo` * move `CalculatePow` * remove `AddAuxPow` * move `Spans` and `NextNeededPruningSeed` * factor types into `cuprate-types` * scope cargo features * fix/doc type serde * Update binaries/cuprated/src/rpc/request/address_book.rs Co-authored-by: Boog900 * Update binaries/cuprated/src/rpc/request/blockchain_context.rs Co-authored-by: Boog900 * Update binaries/cuprated/src/rpc/request/blockchain_manager.rs Co-authored-by: Boog900 * fmt * txpool: collapse `TxEntry` * `ConnectionId` * fix import * fix bin --------- Co-authored-by: Boog900 --- Cargo.lock | 2 + binaries/cuprated/Cargo.toml | 49 +++--- binaries/cuprated/src/main.rs | 4 + binaries/cuprated/src/rpc.rs | 1 + binaries/cuprated/src/rpc/constants.rs | 5 + binaries/cuprated/src/rpc/handler.rs | 50 ++++++ .../cuprated/src/rpc/request/address_book.rs | 104 +++++++++--- .../cuprated/src/rpc/request/blockchain.rs | 141 ++++++++++++----- .../src/rpc/request/blockchain_context.rs | 64 ++++++-- .../src/rpc/request/blockchain_manager.rs | 104 ++++++++++-- binaries/cuprated/src/rpc/request/txpool.rs | 37 +++-- consensus/context/src/lib.rs | 19 +++ consensus/context/src/task.rs | 3 +- helper/src/cast.rs | 1 - p2p/address-book/src/book.rs | 3 +- p2p/p2p-core/Cargo.toml | 5 +- p2p/p2p-core/src/ban.rs | 23 --- .../src/client/handshaker/builder/dummy.rs | 3 +- p2p/p2p-core/src/lib.rs | 2 +- p2p/p2p-core/src/services.rs | 8 +- p2p/p2p-core/src/types.rs | 96 ++++++++++++ rpc/types/Cargo.toml | 10 +- rpc/types/src/base.rs | 90 ++++------- rpc/types/src/json.rs | 74 ++++----- rpc/types/src/misc/misc.rs | 4 +- rpc/types/src/other.rs | 42 ++--- storage/blockchain/src/service/read.rs | 12 ++ storage/txpool/src/service/interface.rs | 8 +- storage/txpool/src/service/read.rs | 6 +- storage/txpool/src/tx.rs | 2 + types/src/address_type.rs | 147 +++++++++++++++++ types/src/blockchain.rs | 15 +- types/src/connection_state.rs | 148 ++++++++++++++++++ types/src/lib.rs | 11 +- types/src/types.rs | 20 ++- 35 files changed, 1028 insertions(+), 285 deletions(-) create mode 100644 binaries/cuprated/src/rpc/constants.rs delete mode 100644 p2p/p2p-core/src/ban.rs create mode 100644 p2p/p2p-core/src/types.rs create mode 100644 types/src/address_type.rs create mode 100644 types/src/connection_state.rs diff --git a/Cargo.lock b/Cargo.lock index 0f851dc..7ad2f2a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -865,6 +865,7 @@ dependencies = [ "cuprate-helper", "cuprate-pruning", "cuprate-test-utils", + "cuprate-types", "cuprate-wire", "futures", "hex", @@ -1026,6 +1027,7 @@ dependencies = [ "cuprate-consensus", "cuprate-consensus-context", "cuprate-consensus-rules", + "cuprate-constants", "cuprate-cryptonight", "cuprate-dandelion-tower", "cuprate-database", diff --git a/binaries/cuprated/Cargo.toml b/binaries/cuprated/Cargo.toml index d59b4c3..9ebdd78 100644 --- a/binaries/cuprated/Cargo.toml +++ b/binaries/cuprated/Cargo.toml @@ -9,31 +9,32 @@ repository = "https://github.com/Cuprate/cuprate/tree/main/binaries/cuprated" [dependencies] # TODO: after v1.0.0, remove unneeded dependencies. -cuprate-consensus = { workspace = true } -cuprate-fast-sync = { workspace = true } +cuprate-consensus = { workspace = true } +cuprate-fast-sync = { workspace = true } cuprate-consensus-context = { workspace = true } -cuprate-consensus-rules = { workspace = true } -cuprate-cryptonight = { workspace = true } -cuprate-helper = { workspace = true } -cuprate-epee-encoding = { workspace = true } -cuprate-fixed-bytes = { workspace = true } -cuprate-levin = { workspace = true } -cuprate-wire = { workspace = true } -cuprate-p2p = { workspace = true } -cuprate-p2p-core = { workspace = true } -cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } -cuprate-async-buffer = { workspace = true } -cuprate-address-book = { workspace = true } -cuprate-blockchain = { workspace = true } -cuprate-database-service = { workspace = true } -cuprate-txpool = { workspace = true } -cuprate-database = { workspace = true } -cuprate-pruning = { workspace = true } -cuprate-test-utils = { workspace = true } -cuprate-types = { workspace = true } -cuprate-json-rpc = { workspace = true } -cuprate-rpc-interface = { workspace = true } -cuprate-rpc-types = { workspace = true } +cuprate-consensus-rules = { workspace = true } +cuprate-constants = { workspace = true } +cuprate-cryptonight = { workspace = true } +cuprate-helper = { workspace = true } +cuprate-epee-encoding = { workspace = true } +cuprate-fixed-bytes = { workspace = true } +cuprate-levin = { workspace = true } +cuprate-wire = { workspace = true } +cuprate-p2p = { workspace = true } +cuprate-p2p-core = { workspace = true } +cuprate-dandelion-tower = { workspace = true, features = ["txpool"] } +cuprate-async-buffer = { workspace = true } +cuprate-address-book = { workspace = true } +cuprate-blockchain = { workspace = true } +cuprate-database-service = { workspace = true } +cuprate-txpool = { workspace = true } +cuprate-database = { workspace = true } +cuprate-pruning = { workspace = true } +cuprate-test-utils = { workspace = true } +cuprate-types = { workspace = true } +cuprate-json-rpc = { workspace = true } +cuprate-rpc-interface = { workspace = true } +cuprate-rpc-types = { workspace = true } # TODO: after v1.0.0, remove unneeded dependencies. anyhow = { workspace = true } diff --git a/binaries/cuprated/src/main.rs b/binaries/cuprated/src/main.rs index d3fe1f5..d5c832e 100644 --- a/binaries/cuprated/src/main.rs +++ b/binaries/cuprated/src/main.rs @@ -9,6 +9,10 @@ unused_variables, clippy::needless_pass_by_value, clippy::unused_async, + clippy::diverging_sub_expression, + unused_mut, + clippy::let_unit_value, + clippy::needless_pass_by_ref_mut, reason = "TODO: remove after v1.0.0" )] diff --git a/binaries/cuprated/src/rpc.rs b/binaries/cuprated/src/rpc.rs index fe8e5f2..255d90d 100644 --- a/binaries/cuprated/src/rpc.rs +++ b/binaries/cuprated/src/rpc.rs @@ -3,6 +3,7 @@ //! Will contain the code to initiate the RPC and a request handler. mod bin; +mod constants; mod handler; mod json; mod other; diff --git a/binaries/cuprated/src/rpc/constants.rs b/binaries/cuprated/src/rpc/constants.rs new file mode 100644 index 0000000..1236269 --- /dev/null +++ b/binaries/cuprated/src/rpc/constants.rs @@ -0,0 +1,5 @@ +//! Constants used within RPC. + +/// The string message used in RPC response fields for when +/// `cuprated` does not support a field that `monerod` has. +pub(super) const FIELD_NOT_SUPPORTED: &str = "`cuprated` does not support this field."; diff --git a/binaries/cuprated/src/rpc/handler.rs b/binaries/cuprated/src/rpc/handler.rs index af2e3f2..1f73403 100644 --- a/binaries/cuprated/src/rpc/handler.rs +++ b/binaries/cuprated/src/rpc/handler.rs @@ -8,6 +8,8 @@ use monero_serai::block::Block; use tower::Service; use cuprate_blockchain::service::{BlockchainReadHandle, BlockchainWriteHandle}; +use cuprate_consensus::BlockChainContextService; +use cuprate_pruning::PruningSeed; use cuprate_rpc_interface::RpcHandler; use cuprate_rpc_types::{ bin::{BinRequest, BinResponse}, @@ -15,6 +17,7 @@ use cuprate_rpc_types::{ other::{OtherRequest, OtherResponse}, }; use cuprate_txpool::service::{TxpoolReadHandle, TxpoolWriteHandle}; +use cuprate_types::{AddAuxPow, AuxPow, HardFork}; use crate::rpc::{bin, json, other}; @@ -54,6 +57,32 @@ pub enum BlockchainManagerRequest { /// The height of the next block in the chain. TargetHeight, + + /// Generate new blocks. + /// + /// This request is only for regtest, see RPC's `generateblocks`. + GenerateBlocks { + /// Number of the blocks to be generated. + amount_of_blocks: u64, + /// The previous block's hash. + prev_block: [u8; 32], + /// The starting value for the nonce. + starting_nonce: u32, + /// The address that will receive the coinbase reward. + wallet_address: String, + }, + + // // TODO: the below requests actually belong to the block downloader/syncer: + // // + // /// Get [`Span`] data. + // /// + // /// This is data that describes an active downloading process, + // /// if we are fully synced, this will return an empty [`Vec`]. + // Spans, + + // + /// Get the next [`PruningSeed`] needed for a pruned sync. + NextNeededPruningSeed, } /// TODO: use real type when public. @@ -69,6 +98,9 @@ pub enum BlockchainManagerResponse { /// Response to [`BlockchainManagerRequest::PopBlocks`] PopBlocks { new_height: usize }, + /// Response to [`BlockchainManagerRequest::Prune`] + Prune(PruningSeed), + /// Response to [`BlockchainManagerRequest::Pruned`] Pruned(bool), @@ -83,6 +115,19 @@ pub enum BlockchainManagerResponse { /// Response to [`BlockchainManagerRequest::TargetHeight`] TargetHeight { height: usize }, + + /// Response to [`BlockchainManagerRequest::GenerateBlocks`] + GenerateBlocks { + /// Hashes of the blocks generated. + blocks: Vec<[u8; 32]>, + /// The new top height. (TODO: is this correct?) + height: usize, + }, + + // /// Response to [`BlockchainManagerRequest::Spans`]. + // Spans(Vec>), + /// Response to [`BlockchainManagerRequest::NextNeededPruningSeed`]. + NextNeededPruningSeed(PruningSeed), } /// TODO: use real type when public. @@ -102,6 +147,9 @@ pub struct CupratedRpcHandler { /// Read handle to the blockchain database. pub blockchain_read: BlockchainReadHandle, + /// Handle to the blockchain context service. + pub blockchain_context: BlockChainContextService, + /// Handle to the blockchain manager. pub blockchain_manager: BlockchainManagerHandle, @@ -117,6 +165,7 @@ impl CupratedRpcHandler { pub const fn new( restricted: bool, blockchain_read: BlockchainReadHandle, + blockchain_context: BlockChainContextService, blockchain_manager: BlockchainManagerHandle, txpool_read: TxpoolReadHandle, txpool_manager: std::convert::Infallible, @@ -124,6 +173,7 @@ impl CupratedRpcHandler { Self { restricted, blockchain_read, + blockchain_context, blockchain_manager, txpool_read, txpool_manager, diff --git a/binaries/cuprated/src/rpc/request/address_book.rs b/binaries/cuprated/src/rpc/request/address_book.rs index 2aa58e8..6760a6c 100644 --- a/binaries/cuprated/src/rpc/request/address_book.rs +++ b/binaries/cuprated/src/rpc/request/address_book.rs @@ -2,26 +2,33 @@ use std::convert::Infallible; -use anyhow::Error; +use anyhow::{anyhow, Error}; use tower::ServiceExt; use cuprate_helper::cast::usize_to_u64; use cuprate_p2p_core::{ services::{AddressBookRequest, AddressBookResponse}, + types::{BanState, ConnectionId}, AddressBook, NetworkZone, }; +use cuprate_pruning::PruningSeed; +use cuprate_rpc_types::misc::{ConnectionInfo, Span}; + +use crate::rpc::constants::FIELD_NOT_SUPPORTED; + +// FIXME: use `anyhow::Error` over `tower::BoxError` in address book. /// [`AddressBookRequest::PeerlistSize`] -pub(super) async fn peerlist_size( +pub(crate) async fn peerlist_size( address_book: &mut impl AddressBook, ) -> Result<(u64, u64), Error> { let AddressBookResponse::PeerlistSize { white, grey } = address_book .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(AddressBookRequest::PeerlistSize) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -29,17 +36,74 @@ pub(super) async fn peerlist_size( Ok((usize_to_u64(white), usize_to_u64(grey))) } +/// [`AddressBookRequest::ConnectionInfo`] +pub(crate) async fn connection_info( + address_book: &mut impl AddressBook, +) -> Result, Error> { + let AddressBookResponse::ConnectionInfo(vec) = address_book + .ready() + .await + .map_err(|e| anyhow!(e))? + .call(AddressBookRequest::ConnectionInfo) + .await + .map_err(|e| anyhow!(e))? + else { + unreachable!(); + }; + + // FIXME: impl this map somewhere instead of inline. + let vec = vec + .into_iter() + .map(|info| { + let (ip, port) = match info.socket_addr { + Some(socket) => (socket.ip().to_string(), socket.port().to_string()), + None => (String::new(), String::new()), + }; + + ConnectionInfo { + address: info.address.to_string(), + address_type: info.address_type, + avg_download: info.avg_download, + avg_upload: info.avg_upload, + connection_id: String::from(ConnectionId::DEFAULT_STR), + current_download: info.current_download, + current_upload: info.current_upload, + height: info.height, + host: info.host, + incoming: info.incoming, + ip, + live_time: info.live_time, + localhost: info.localhost, + local_ip: info.local_ip, + peer_id: hex::encode(info.peer_id.to_ne_bytes()), + port, + pruning_seed: info.pruning_seed.compress(), + recv_count: info.recv_count, + recv_idle_time: info.recv_idle_time, + rpc_credits_per_hash: info.rpc_credits_per_hash, + rpc_port: info.rpc_port, + send_count: info.send_count, + send_idle_time: info.send_idle_time, + state: info.state, + support_flags: info.support_flags, + } + }) + .collect(); + + Ok(vec) +} + /// [`AddressBookRequest::ConnectionCount`] -pub(super) async fn connection_count( +pub(crate) async fn connection_count( address_book: &mut impl AddressBook, ) -> Result<(u64, u64), Error> { let AddressBookResponse::ConnectionCount { incoming, outgoing } = address_book .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(AddressBookRequest::ConnectionCount) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -48,17 +112,17 @@ pub(super) async fn connection_count( } /// [`AddressBookRequest::SetBan`] -pub(super) async fn set_ban( +pub(crate) async fn set_ban( address_book: &mut impl AddressBook, - peer: cuprate_p2p_core::ban::SetBan, + set_ban: cuprate_p2p_core::types::SetBan, ) -> Result<(), Error> { let AddressBookResponse::Ok = address_book .ready() .await - .expect("TODO") - .call(AddressBookRequest::SetBan(peer)) + .map_err(|e| anyhow!(e))? + .call(AddressBookRequest::SetBan(set_ban)) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -67,17 +131,17 @@ pub(super) async fn set_ban( } /// [`AddressBookRequest::GetBan`] -pub(super) async fn get_ban( +pub(crate) async fn get_ban( address_book: &mut impl AddressBook, peer: Z::Addr, ) -> Result, Error> { let AddressBookResponse::GetBan { unban_instant } = address_book .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(AddressBookRequest::GetBan(peer)) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -86,19 +150,19 @@ pub(super) async fn get_ban( } /// [`AddressBookRequest::GetBans`] -pub(super) async fn get_bans( +pub(crate) async fn get_bans( address_book: &mut impl AddressBook, -) -> Result<(), Error> { +) -> Result>, Error> { let AddressBookResponse::GetBans(bans) = address_book .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(AddressBookRequest::GetBans) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; - Ok(todo!()) + Ok(bans) } diff --git a/binaries/cuprated/src/rpc/request/blockchain.rs b/binaries/cuprated/src/rpc/request/blockchain.rs index 8af80e5..97c7f48 100644 --- a/binaries/cuprated/src/rpc/request/blockchain.rs +++ b/binaries/cuprated/src/rpc/request/blockchain.rs @@ -1,24 +1,61 @@ //! Functions for [`BlockchainReadRequest`]. use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, ops::Range, }; use anyhow::Error; -use cuprate_blockchain::service::BlockchainReadHandle; +use monero_serai::block::Block; use tower::{Service, ServiceExt}; +use cuprate_blockchain::{service::BlockchainReadHandle, types::AltChainInfo}; use cuprate_helper::cast::{u64_to_usize, usize_to_u64}; use cuprate_types::{ blockchain::{BlockchainReadRequest, BlockchainResponse}, - Chain, CoinbaseTxSum, ExtendedBlockHeader, MinerData, OutputHistogramEntry, - OutputHistogramInput, OutputOnChain, + Chain, ChainInfo, CoinbaseTxSum, ExtendedBlockHeader, HardFork, MinerData, + OutputHistogramEntry, OutputHistogramInput, OutputOnChain, }; +/// [`BlockchainReadRequest::Block`]. +pub(crate) async fn block( + blockchain_read: &mut BlockchainReadHandle, + height: u64, +) -> Result { + let BlockchainResponse::Block(block) = blockchain_read + .ready() + .await? + .call(BlockchainReadRequest::Block { + height: u64_to_usize(height), + }) + .await? + else { + unreachable!(); + }; + + Ok(block) +} + +/// [`BlockchainReadRequest::BlockByHash`]. +pub(crate) async fn block_by_hash( + blockchain_read: &mut BlockchainReadHandle, + hash: [u8; 32], +) -> Result { + let BlockchainResponse::Block(block) = blockchain_read + .ready() + .await? + .call(BlockchainReadRequest::BlockByHash(hash)) + .await? + else { + unreachable!(); + }; + + Ok(block) +} + /// [`BlockchainReadRequest::BlockExtendedHeader`]. -pub(super) async fn block_extended_header( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn block_extended_header( + blockchain_read: &mut BlockchainReadHandle, height: u64, ) -> Result { let BlockchainResponse::BlockExtendedHeader(header) = blockchain_read @@ -36,8 +73,8 @@ pub(super) async fn block_extended_header( } /// [`BlockchainReadRequest::BlockHash`]. -pub(super) async fn block_hash( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn block_hash( + blockchain_read: &mut BlockchainReadHandle, height: u64, chain: Chain, ) -> Result<[u8; 32], Error> { @@ -57,8 +94,8 @@ pub(super) async fn block_hash( } /// [`BlockchainReadRequest::FindBlock`]. -pub(super) async fn find_block( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn find_block( + blockchain_read: &mut BlockchainReadHandle, block_hash: [u8; 32], ) -> Result, Error> { let BlockchainResponse::FindBlock(option) = blockchain_read @@ -74,8 +111,8 @@ pub(super) async fn find_block( } /// [`BlockchainReadRequest::FilterUnknownHashes`]. -pub(super) async fn filter_unknown_hashes( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn filter_unknown_hashes( + blockchain_read: &mut BlockchainReadHandle, block_hashes: HashSet<[u8; 32]>, ) -> Result, Error> { let BlockchainResponse::FilterUnknownHashes(output) = blockchain_read @@ -91,8 +128,8 @@ pub(super) async fn filter_unknown_hashes( } /// [`BlockchainReadRequest::BlockExtendedHeaderInRange`] -pub(super) async fn block_extended_header_in_range( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn block_extended_header_in_range( + blockchain_read: &mut BlockchainReadHandle, range: Range, chain: Chain, ) -> Result, Error> { @@ -111,8 +148,8 @@ pub(super) async fn block_extended_header_in_range( } /// [`BlockchainReadRequest::ChainHeight`]. -pub(super) async fn chain_height( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn chain_height( + blockchain_read: &mut BlockchainReadHandle, ) -> Result<(u64, [u8; 32]), Error> { let BlockchainResponse::ChainHeight(height, hash) = blockchain_read .ready() @@ -127,8 +164,8 @@ pub(super) async fn chain_height( } /// [`BlockchainReadRequest::GeneratedCoins`]. -pub(super) async fn generated_coins( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn generated_coins( + blockchain_read: &mut BlockchainReadHandle, block_height: u64, ) -> Result { let BlockchainResponse::GeneratedCoins(generated_coins) = blockchain_read @@ -146,8 +183,8 @@ pub(super) async fn generated_coins( } /// [`BlockchainReadRequest::Outputs`] -pub(super) async fn outputs( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn outputs( + blockchain_read: &mut BlockchainReadHandle, outputs: HashMap>, ) -> Result>, Error> { let BlockchainResponse::Outputs(outputs) = blockchain_read @@ -163,8 +200,8 @@ pub(super) async fn outputs( } /// [`BlockchainReadRequest::NumberOutputsWithAmount`] -pub(super) async fn number_outputs_with_amount( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn number_outputs_with_amount( + blockchain_read: &mut BlockchainReadHandle, output_amounts: Vec, ) -> Result, Error> { let BlockchainResponse::NumberOutputsWithAmount(map) = blockchain_read @@ -182,8 +219,8 @@ pub(super) async fn number_outputs_with_amount( } /// [`BlockchainReadRequest::KeyImagesSpent`] -pub(super) async fn key_images_spent( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn key_images_spent( + blockchain_read: &mut BlockchainReadHandle, key_images: HashSet<[u8; 32]>, ) -> Result { let BlockchainResponse::KeyImagesSpent(is_spent) = blockchain_read @@ -199,8 +236,8 @@ pub(super) async fn key_images_spent( } /// [`BlockchainReadRequest::CompactChainHistory`] -pub(super) async fn compact_chain_history( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn compact_chain_history( + blockchain_read: &mut BlockchainReadHandle, ) -> Result<(Vec<[u8; 32]>, u128), Error> { let BlockchainResponse::CompactChainHistory { block_ids, @@ -218,8 +255,8 @@ pub(super) async fn compact_chain_history( } /// [`BlockchainReadRequest::FindFirstUnknown`] -pub(super) async fn find_first_unknown( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn find_first_unknown( + blockchain_read: &mut BlockchainReadHandle, hashes: Vec<[u8; 32]>, ) -> Result, Error> { let BlockchainResponse::FindFirstUnknown(resp) = blockchain_read @@ -235,8 +272,8 @@ pub(super) async fn find_first_unknown( } /// [`BlockchainReadRequest::TotalTxCount`] -pub(super) async fn total_tx_count( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn total_tx_count( + blockchain_read: &mut BlockchainReadHandle, ) -> Result { let BlockchainResponse::TotalTxCount(tx_count) = blockchain_read .ready() @@ -251,8 +288,8 @@ pub(super) async fn total_tx_count( } /// [`BlockchainReadRequest::DatabaseSize`] -pub(super) async fn database_size( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn database_size( + blockchain_read: &mut BlockchainReadHandle, ) -> Result<(u64, u64), Error> { let BlockchainResponse::DatabaseSize { database_size, @@ -270,8 +307,8 @@ pub(super) async fn database_size( } /// [`BlockchainReadRequest::OutputHistogram`] -pub(super) async fn output_histogram( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn output_histogram( + blockchain_read: &mut BlockchainReadHandle, input: OutputHistogramInput, ) -> Result, Error> { let BlockchainResponse::OutputHistogram(histogram) = blockchain_read @@ -287,8 +324,8 @@ pub(super) async fn output_histogram( } /// [`BlockchainReadRequest::CoinbaseTxSum`] -pub(super) async fn coinbase_tx_sum( - mut blockchain_read: BlockchainReadHandle, +pub(crate) async fn coinbase_tx_sum( + blockchain_read: &mut BlockchainReadHandle, height: u64, count: u64, ) -> Result { @@ -306,3 +343,35 @@ pub(super) async fn coinbase_tx_sum( Ok(sum) } + +/// [`BlockchainReadRequest::AltChains`] +pub(crate) async fn alt_chains( + blockchain_read: &mut BlockchainReadHandle, +) -> Result, Error> { + let BlockchainResponse::AltChains(vec) = blockchain_read + .ready() + .await? + .call(BlockchainReadRequest::AltChains) + .await? + else { + unreachable!(); + }; + + Ok(vec) +} + +/// [`BlockchainReadRequest::AltChainCount`] +pub(crate) async fn alt_chain_count( + blockchain_read: &mut BlockchainReadHandle, +) -> Result { + let BlockchainResponse::AltChainCount(count) = blockchain_read + .ready() + .await? + .call(BlockchainReadRequest::AltChainCount) + .await? + else { + unreachable!(); + }; + + Ok(usize_to_u64(count)) +} diff --git a/binaries/cuprated/src/rpc/request/blockchain_context.rs b/binaries/cuprated/src/rpc/request/blockchain_context.rs index 2b14d46..c6f0f22 100644 --- a/binaries/cuprated/src/rpc/request/blockchain_context.rs +++ b/binaries/cuprated/src/rpc/request/blockchain_context.rs @@ -2,27 +2,30 @@ use std::convert::Infallible; -use anyhow::Error; +use anyhow::{anyhow, Error}; +use monero_serai::block::Block; use tower::{Service, ServiceExt}; use cuprate_consensus_context::{ BlockChainContext, BlockChainContextRequest, BlockChainContextResponse, BlockChainContextService, }; +use cuprate_helper::cast::u64_to_usize; use cuprate_types::{FeeEstimate, HardFork, HardForkInfo}; +// FIXME: use `anyhow::Error` over `tower::BoxError` in blockchain context. + /// [`BlockChainContextRequest::Context`]. -pub(super) async fn context( - service: &mut BlockChainContextService, - height: u64, +pub(crate) async fn context( + blockchain_context: &mut BlockChainContextService, ) -> Result { - let BlockChainContextResponse::Context(context) = service + let BlockChainContextResponse::Context(context) = blockchain_context .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(BlockChainContextRequest::Context) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -31,17 +34,17 @@ pub(super) async fn context( } /// [`BlockChainContextRequest::HardForkInfo`]. -pub(super) async fn hard_fork_info( - service: &mut BlockChainContextService, +pub(crate) async fn hard_fork_info( + blockchain_context: &mut BlockChainContextService, hard_fork: HardFork, ) -> Result { - let BlockChainContextResponse::HardForkInfo(hf_info) = service + let BlockChainContextResponse::HardForkInfo(hf_info) = blockchain_context .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(BlockChainContextRequest::HardForkInfo(hard_fork)) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -50,20 +53,47 @@ pub(super) async fn hard_fork_info( } /// [`BlockChainContextRequest::FeeEstimate`]. -pub(super) async fn fee_estimate( - service: &mut BlockChainContextService, +pub(crate) async fn fee_estimate( + blockchain_context: &mut BlockChainContextService, grace_blocks: u64, ) -> Result { - let BlockChainContextResponse::FeeEstimate(fee) = service + let BlockChainContextResponse::FeeEstimate(fee) = blockchain_context .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(BlockChainContextRequest::FeeEstimate { grace_blocks }) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; Ok(fee) } + +/// [`BlockChainContextRequest::CalculatePow`] +pub(crate) async fn calculate_pow( + blockchain_context: &mut BlockChainContextService, + hardfork: HardFork, + height: u64, + block: Box, + seed_hash: [u8; 32], +) -> Result<[u8; 32], Error> { + let BlockChainContextResponse::CalculatePow(hash) = blockchain_context + .ready() + .await + .map_err(|e| anyhow!(e))? + .call(BlockChainContextRequest::CalculatePow { + hardfork, + height: u64_to_usize(height), + block, + seed_hash, + }) + .await + .map_err(|e| anyhow!(e))? + else { + unreachable!(); + }; + + Ok(hash) +} diff --git a/binaries/cuprated/src/rpc/request/blockchain_manager.rs b/binaries/cuprated/src/rpc/request/blockchain_manager.rs index 4dc91c8..18b75de 100644 --- a/binaries/cuprated/src/rpc/request/blockchain_manager.rs +++ b/binaries/cuprated/src/rpc/request/blockchain_manager.rs @@ -5,13 +5,18 @@ use monero_serai::block::Block; use tower::{Service, ServiceExt}; use cuprate_helper::cast::{u64_to_usize, usize_to_u64}; +use cuprate_p2p_core::{types::ConnectionId, NetworkZone}; +use cuprate_pruning::PruningSeed; +use cuprate_rpc_types::misc::Span; +use cuprate_types::{AddAuxPow, AuxPow, HardFork}; -use crate::rpc::handler::{ - BlockchainManagerHandle, BlockchainManagerRequest, BlockchainManagerResponse, +use crate::rpc::{ + constants::FIELD_NOT_SUPPORTED, + handler::{BlockchainManagerHandle, BlockchainManagerRequest, BlockchainManagerResponse}, }; /// [`BlockchainManagerRequest::PopBlocks`] -pub(super) async fn pop_blocks( +pub(crate) async fn pop_blocks( blockchain_manager: &mut BlockchainManagerHandle, amount: u64, ) -> Result { @@ -30,8 +35,10 @@ pub(super) async fn pop_blocks( } /// [`BlockchainManagerRequest::Prune`] -pub(super) async fn prune(blockchain_manager: &mut BlockchainManagerHandle) -> Result<(), Error> { - let BlockchainManagerResponse::Ok = blockchain_manager +pub(crate) async fn prune( + blockchain_manager: &mut BlockchainManagerHandle, +) -> Result { + let BlockchainManagerResponse::Prune(seed) = blockchain_manager .ready() .await? .call(BlockchainManagerRequest::Prune) @@ -40,11 +47,11 @@ pub(super) async fn prune(blockchain_manager: &mut BlockchainManagerHandle) -> R unreachable!(); }; - Ok(()) + Ok(seed) } /// [`BlockchainManagerRequest::Pruned`] -pub(super) async fn pruned( +pub(crate) async fn pruned( blockchain_manager: &mut BlockchainManagerHandle, ) -> Result { let BlockchainManagerResponse::Pruned(pruned) = blockchain_manager @@ -60,7 +67,7 @@ pub(super) async fn pruned( } /// [`BlockchainManagerRequest::RelayBlock`] -pub(super) async fn relay_block( +pub(crate) async fn relay_block( blockchain_manager: &mut BlockchainManagerHandle, block: Block, ) -> Result<(), Error> { @@ -77,7 +84,7 @@ pub(super) async fn relay_block( } /// [`BlockchainManagerRequest::Syncing`] -pub(super) async fn syncing( +pub(crate) async fn syncing( blockchain_manager: &mut BlockchainManagerHandle, ) -> Result { let BlockchainManagerResponse::Syncing(syncing) = blockchain_manager @@ -93,7 +100,7 @@ pub(super) async fn syncing( } /// [`BlockchainManagerRequest::Synced`] -pub(super) async fn synced( +pub(crate) async fn synced( blockchain_manager: &mut BlockchainManagerHandle, ) -> Result { let BlockchainManagerResponse::Synced(syncing) = blockchain_manager @@ -109,7 +116,7 @@ pub(super) async fn synced( } /// [`BlockchainManagerRequest::Target`] -pub(super) async fn target( +pub(crate) async fn target( blockchain_manager: &mut BlockchainManagerHandle, ) -> Result { let BlockchainManagerResponse::Target(target) = blockchain_manager @@ -125,7 +132,7 @@ pub(super) async fn target( } /// [`BlockchainManagerRequest::TargetHeight`] -pub(super) async fn target_height( +pub(crate) async fn target_height( blockchain_manager: &mut BlockchainManagerHandle, ) -> Result { let BlockchainManagerResponse::TargetHeight { height } = blockchain_manager @@ -139,3 +146,76 @@ pub(super) async fn target_height( Ok(usize_to_u64(height)) } + +/// [`BlockchainManagerRequest::GenerateBlocks`] +pub(crate) async fn generate_blocks( + blockchain_manager: &mut BlockchainManagerHandle, + amount_of_blocks: u64, + prev_block: [u8; 32], + starting_nonce: u32, + wallet_address: String, +) -> Result<(Vec<[u8; 32]>, u64), Error> { + let BlockchainManagerResponse::GenerateBlocks { blocks, height } = blockchain_manager + .ready() + .await? + .call(BlockchainManagerRequest::GenerateBlocks { + amount_of_blocks, + prev_block, + starting_nonce, + wallet_address, + }) + .await? + else { + unreachable!(); + }; + + Ok((blocks, usize_to_u64(height))) +} + +// [`BlockchainManagerRequest::Spans`] +pub(crate) async fn spans( + blockchain_manager: &mut BlockchainManagerHandle, +) -> Result, Error> { + // let BlockchainManagerResponse::Spans(vec) = blockchain_manager + // .ready() + // .await? + // .call(BlockchainManagerRequest::Spans) + // .await? + // else { + // unreachable!(); + // }; + + let vec: Vec> = todo!(); + + // FIXME: impl this map somewhere instead of inline. + let vec = vec + .into_iter() + .map(|span| Span { + connection_id: String::from(ConnectionId::DEFAULT_STR), + nblocks: span.nblocks, + rate: span.rate, + remote_address: span.remote_address.to_string(), + size: span.size, + speed: span.speed, + start_block_height: span.start_block_height, + }) + .collect(); + + Ok(vec) +} + +/// [`BlockchainManagerRequest::NextNeededPruningSeed`] +pub(crate) async fn next_needed_pruning_seed( + blockchain_manager: &mut BlockchainManagerHandle, +) -> Result { + let BlockchainManagerResponse::NextNeededPruningSeed(seed) = blockchain_manager + .ready() + .await? + .call(BlockchainManagerRequest::NextNeededPruningSeed) + .await? + else { + unreachable!(); + }; + + Ok(seed) +} diff --git a/binaries/cuprated/src/rpc/request/txpool.rs b/binaries/cuprated/src/rpc/request/txpool.rs index a36778e..eadbb23 100644 --- a/binaries/cuprated/src/rpc/request/txpool.rs +++ b/binaries/cuprated/src/rpc/request/txpool.rs @@ -2,7 +2,7 @@ use std::convert::Infallible; -use anyhow::Error; +use anyhow::{anyhow, Error}; use tower::{Service, ServiceExt}; use cuprate_helper::cast::usize_to_u64; @@ -14,15 +14,17 @@ use cuprate_txpool::{ TxEntry, }; +// FIXME: use `anyhow::Error` over `tower::BoxError` in txpool. + /// [`TxpoolReadRequest::Backlog`] -pub(super) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result, Error> { +pub(crate) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result, Error> { let TxpoolReadResponse::Backlog(tx_entries) = txpool_read .ready() .await - .expect("TODO") + .map_err(|e| anyhow!(e))? .call(TxpoolReadRequest::Backlog) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -31,14 +33,19 @@ pub(super) async fn backlog(txpool_read: &mut TxpoolReadHandle) -> Result Result { +pub(crate) async fn size( + txpool_read: &mut TxpoolReadHandle, + include_sensitive_txs: bool, +) -> Result { let TxpoolReadResponse::Size(size) = txpool_read .ready() .await - .expect("TODO") - .call(TxpoolReadRequest::Size) + .map_err(|e| anyhow!(e))? + .call(TxpoolReadRequest::Size { + include_sensitive_txs, + }) .await - .expect("TODO") + .map_err(|e| anyhow!(e))? else { unreachable!(); }; @@ -47,9 +54,17 @@ pub(super) async fn size(txpool_read: &mut TxpoolReadHandle) -> Result, +) -> Result<(), Error> { + todo!(); + Ok(()) +} + +/// TODO +pub(crate) async fn relay( + txpool_manager: &mut Infallible, tx_hashes: Vec<[u8; 32]>, ) -> Result<(), Error> { todo!(); diff --git a/consensus/context/src/lib.rs b/consensus/context/src/lib.rs index 198d5a1..acc4d23 100644 --- a/consensus/context/src/lib.rs +++ b/consensus/context/src/lib.rs @@ -18,6 +18,7 @@ use std::{ }; use futures::{channel::oneshot, FutureExt}; +use monero_serai::block::Block; use tokio::sync::mpsc; use tokio_util::sync::PollSender; use tower::Service; @@ -267,6 +268,21 @@ pub enum BlockChainContextRequest { grace_blocks: u64, }, + /// Calculate proof-of-work for this block. + CalculatePow { + /// The hardfork of the protocol at this block height. + hardfork: HardFork, + /// The height of the block. + height: usize, + /// The block data. + /// + /// This is boxed because [`Block`] causes this enum to be 1200 bytes, + /// where the 2nd variant is only 96 bytes. + block: Box, + /// The seed hash for the proof-of-work. + seed_hash: [u8; 32], + }, + /// Clear the alt chain context caches. ClearAltCache, @@ -364,6 +380,9 @@ pub enum BlockChainContextResponse { /// Response to [`BlockChainContextRequest::FeeEstimate`] FeeEstimate(FeeEstimate), + /// Response to [`BlockChainContextRequest::CalculatePow`] + CalculatePow([u8; 32]), + /// Response to [`BlockChainContextRequest::AltChains`] /// /// If the inner [`Vec::is_empty`], there were no alternate chains. diff --git a/consensus/context/src/task.rs b/consensus/context/src/task.rs index 65cfea9..b075995 100644 --- a/consensus/context/src/task.rs +++ b/consensus/context/src/task.rs @@ -324,7 +324,8 @@ impl ContextTask { } BlockChainContextRequest::HardForkInfo(_) | BlockChainContextRequest::FeeEstimate { .. } - | BlockChainContextRequest::AltChains => { + | BlockChainContextRequest::AltChains + | BlockChainContextRequest::CalculatePow { .. } => { todo!("finish https://github.com/Cuprate/cuprate/pull/297") } }) diff --git a/helper/src/cast.rs b/helper/src/cast.rs index 99b7f53..5628d7d 100644 --- a/helper/src/cast.rs +++ b/helper/src/cast.rs @@ -18,7 +18,6 @@ // // //============================ SAFETY: DO NOT REMOVE ===========================// -//---------------------------------------------------------------------------------------------------- Free functions /// Cast [`u64`] to [`usize`]. #[inline(always)] pub const fn u64_to_usize(u: u64) -> usize { diff --git a/p2p/address-book/src/book.rs b/p2p/address-book/src/book.rs index 907d691..3e5269f 100644 --- a/p2p/address-book/src/book.rs +++ b/p2p/address-book/src/book.rs @@ -423,7 +423,8 @@ impl Service> for AddressBook { AddressBookRequest::PeerlistSize | AddressBookRequest::ConnectionCount | AddressBookRequest::SetBan(_) - | AddressBookRequest::GetBans => { + | AddressBookRequest::GetBans + | AddressBookRequest::ConnectionInfo => { todo!("finish https://github.com/Cuprate/cuprate/pull/297") } }; diff --git a/p2p/p2p-core/Cargo.toml b/p2p/p2p-core/Cargo.toml index 0a6aaf3..bc6c833 100644 --- a/p2p/p2p-core/Cargo.toml +++ b/p2p/p2p-core/Cargo.toml @@ -10,9 +10,10 @@ default = ["borsh"] borsh = ["dep:borsh", "cuprate-pruning/borsh"] [dependencies] -cuprate-helper = { workspace = true, features = ["asynch"], default-features = false } -cuprate-wire = { workspace = true, features = ["tracing"] } +cuprate-helper = { workspace = true, features = ["asynch"], default-features = false } +cuprate-wire = { workspace = true, features = ["tracing"] } cuprate-pruning = { workspace = true } +cuprate-types = { workspace = true } tokio = { workspace = true, features = ["net", "sync", "macros", "time", "rt", "rt-multi-thread"]} tokio-util = { workspace = true, features = ["codec"] } diff --git a/p2p/p2p-core/src/ban.rs b/p2p/p2p-core/src/ban.rs deleted file mode 100644 index 76fd3eb..0000000 --- a/p2p/p2p-core/src/ban.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Data structures related to bans. - -use std::time::{Duration, Instant}; - -use crate::NetZoneAddress; - -/// Data within [`crate::services::AddressBookRequest::SetBan`]. -pub struct SetBan { - /// Address of the peer. - pub address: A, - /// - If [`Some`], how long this peer should be banned for - /// - If [`None`], the peer will be unbanned - pub ban: Option, -} - -/// Data within [`crate::services::AddressBookResponse::GetBans`]. -pub struct BanState { - /// Address of the peer. - pub address: A, - /// - If [`Some`], the peer is banned until this [`Instant`] - /// - If [`None`], the peer is not currently banned - pub unban_instant: Option, -} diff --git a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs index 8bb966d..48b3daf 100644 --- a/p2p/p2p-core/src/client/handshaker/builder/dummy.rs +++ b/p2p/p2p-core/src/client/handshaker/builder/dummy.rs @@ -111,7 +111,8 @@ impl Service> for DummyAddressBook { AddressBookRequest::PeerlistSize | AddressBookRequest::ConnectionCount | AddressBookRequest::SetBan(_) - | AddressBookRequest::GetBans => { + | AddressBookRequest::GetBans + | AddressBookRequest::ConnectionInfo => { todo!("finish https://github.com/Cuprate/cuprate/pull/297") } })) diff --git a/p2p/p2p-core/src/lib.rs b/p2p/p2p-core/src/lib.rs index 5b93b59..26e1068 100644 --- a/p2p/p2p-core/src/lib.rs +++ b/p2p/p2p-core/src/lib.rs @@ -75,7 +75,6 @@ use cuprate_wire::{ NetworkAddress, }; -pub mod ban; pub mod client; mod constants; pub mod error; @@ -83,6 +82,7 @@ pub mod handles; mod network_zones; pub mod protocol; pub mod services; +pub mod types; pub use error::*; pub use network_zones::{ClearNet, ClearNetServerCfg}; diff --git a/p2p/p2p-core/src/services.rs b/p2p/p2p-core/src/services.rs index 495b719..6d1089c 100644 --- a/p2p/p2p-core/src/services.rs +++ b/p2p/p2p-core/src/services.rs @@ -4,9 +4,9 @@ use cuprate_pruning::{PruningError, PruningSeed}; use cuprate_wire::{CoreSyncData, PeerListEntryBase}; use crate::{ - ban::{BanState, SetBan}, client::InternalPeerID, handles::ConnectionHandle, + types::{BanState, ConnectionInfo, SetBan}, NetZoneAddress, NetworkAddressIncorrectZone, NetworkZone, }; @@ -118,6 +118,9 @@ pub enum AddressBookRequest { /// Get the amount of white & grey peers. PeerlistSize, + /// Get information on all connections. + ConnectionInfo, + /// Get the amount of incoming & outgoing connections. ConnectionCount, @@ -152,6 +155,9 @@ pub enum AddressBookResponse { /// Response to [`AddressBookRequest::PeerlistSize`]. PeerlistSize { white: usize, grey: usize }, + /// Response to [`AddressBookRequest::ConnectionInfo`]. + ConnectionInfo(Vec>), + /// Response to [`AddressBookRequest::ConnectionCount`]. ConnectionCount { incoming: usize, outgoing: usize }, diff --git a/p2p/p2p-core/src/types.rs b/p2p/p2p-core/src/types.rs new file mode 100644 index 0000000..ca56055 --- /dev/null +++ b/p2p/p2p-core/src/types.rs @@ -0,0 +1,96 @@ +//! General data structures. + +use std::time::{Duration, Instant}; + +use cuprate_pruning::PruningSeed; +use cuprate_types::{AddressType, ConnectionState}; + +use crate::NetZoneAddress; + +/// Data within [`crate::services::AddressBookRequest::SetBan`]. +pub struct SetBan { + /// Address of the peer. + pub address: A, + /// - If [`Some`], how long this peer should be banned for + /// - If [`None`], the peer will be unbanned + pub ban: Option, +} + +/// Data within [`crate::services::AddressBookResponse::GetBans`]. +pub struct BanState { + /// Address of the peer. + pub address: A, + /// - If [`Some`], the peer is banned until this [`Instant`] + /// - If [`None`], the peer is not currently banned + pub unban_instant: Option, +} + +/// Data within [`crate::services::AddressBookResponse::ConnectionInfo`]. +pub struct ConnectionInfo { + // The following fields are mostly the same as `monerod`. + pub address: A, + pub address_type: AddressType, + pub avg_download: u64, + pub avg_upload: u64, + pub current_download: u64, + pub current_upload: u64, + pub height: u64, + /// Either a domain or an IP without the port. + pub host: String, + pub incoming: bool, + pub live_time: u64, + pub localhost: bool, + pub local_ip: bool, + pub peer_id: u64, + pub pruning_seed: PruningSeed, + pub recv_count: u64, + pub recv_idle_time: u64, + pub rpc_credits_per_hash: u32, + pub rpc_port: u16, + pub send_count: u64, + pub send_idle_time: u64, + pub state: ConnectionState, + pub support_flags: u32, + + // The following fields are slightly different than `monerod`. + + // + /// [`None`] if Tor/i2p or unknown. + pub socket_addr: Option, + + /// This field does not exist for `cuprated`'s RPC, this is just a marker type: + /// - + /// - + /// + /// [`ConnectionId::DEFAULT_STR`] is used when mapping to the RPC type. + pub connection_id: ConnectionId, +} + +/// Marker type for `monerod`'s connection ID. +/// +/// `connection_id` is a 128-bit `uuid` in `monerod`. +/// `cuprated` does not support this field so it returns +/// the default value in the RPC interface, an all 0-bit UUID. +/// +/// This default value in string form is [`ConnectionId::DEFAULT_STR`]. +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ConnectionId; + +impl ConnectionId { + /// [`str`] representation of a default connection ID. + pub const DEFAULT_STR: &str = "00000000000000000000000000000000"; +} + +/// Used in RPC's `sync_info`. +/// +// TODO: fix docs after +// Data within [`crate::services::AddressBookResponse::Spans`]. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Span { + pub nblocks: u64, + pub rate: u32, + pub remote_address: A, + pub size: u64, + pub speed: u32, + pub start_block_height: u64, +} diff --git a/rpc/types/Cargo.toml b/rpc/types/Cargo.toml index e9ca529..6d8797b 100644 --- a/rpc/types/Cargo.toml +++ b/rpc/types/Cargo.toml @@ -10,16 +10,16 @@ keywords = ["cuprate", "rpc", "types", "monero"] [features] default = ["serde", "epee"] -serde = ["dep:serde", "cuprate-fixed-bytes/serde"] -epee = ["dep:cuprate-epee-encoding"] +serde = ["dep:serde", "cuprate-fixed-bytes/serde", "cuprate-types/serde"] +epee = ["dep:cuprate-epee-encoding", "cuprate-types/epee"] [dependencies] cuprate-epee-encoding = { workspace = true, optional = true } cuprate-fixed-bytes = { workspace = true } -cuprate-types = { workspace = true, default-features = false, features = ["epee", "serde"] } +cuprate-types = { workspace = true, default-features = false } -paste = { workspace = true } -serde = { workspace = true, optional = true } +paste = { workspace = true } +serde = { workspace = true, optional = true } [dev-dependencies] cuprate-test-utils = { workspace = true } diff --git a/rpc/types/src/base.rs b/rpc/types/src/base.rs index c131e41..89eafc5 100644 --- a/rpc/types/src/base.rs +++ b/rpc/types/src/base.rs @@ -58,61 +58,37 @@ pub struct ResponseBase { } impl ResponseBase { - /// `const` version of [`Default::default`]. - /// - /// ```rust - /// use cuprate_rpc_types::{misc::*, base::*}; - /// - /// let new = ResponseBase::new(); - /// assert_eq!(new, ResponseBase { - /// status: Status::Ok, - /// untrusted: false, - /// }); - /// ``` - pub const fn new() -> Self { - Self { - status: Status::Ok, - untrusted: false, - } - } - - /// Returns OK and trusted [`Self`]. + /// [`Status::Ok`] and trusted [`Self`]. /// /// This is the most common version of [`Self`]. /// /// ```rust /// use cuprate_rpc_types::{misc::*, base::*}; /// - /// let ok = ResponseBase::ok(); - /// assert_eq!(ok, ResponseBase { + /// assert_eq!(ResponseBase::OK, ResponseBase { /// status: Status::Ok, /// untrusted: false, /// }); /// ``` - pub const fn ok() -> Self { - Self { - status: Status::Ok, - untrusted: false, - } - } + pub const OK: Self = Self { + status: Status::Ok, + untrusted: false, + }; - /// Same as [`Self::ok`] but with [`Self::untrusted`] set to `true`. + /// Same as [`Self::OK`] but with [`Self::untrusted`] set to `true`. /// /// ```rust /// use cuprate_rpc_types::{misc::*, base::*}; /// - /// let ok_untrusted = ResponseBase::ok_untrusted(); - /// assert_eq!(ok_untrusted, ResponseBase { + /// assert_eq!(ResponseBase::OK_UNTRUSTED, ResponseBase { /// status: Status::Ok, /// untrusted: true, /// }); /// ``` - pub const fn ok_untrusted() -> Self { - Self { - status: Status::Ok, - untrusted: true, - } - } + pub const OK_UNTRUSTED: Self = Self { + status: Status::Ok, + untrusted: true, + }; } #[cfg(feature = "epee")] @@ -148,9 +124,9 @@ impl AccessResponseBase { /// ```rust /// use cuprate_rpc_types::{misc::*, base::*}; /// - /// let new = AccessResponseBase::new(ResponseBase::ok()); + /// let new = AccessResponseBase::new(ResponseBase::OK); /// assert_eq!(new, AccessResponseBase { - /// response_base: ResponseBase::ok(), + /// response_base: ResponseBase::OK, /// credits: 0, /// top_hash: "".into(), /// }); @@ -163,47 +139,41 @@ impl AccessResponseBase { } } - /// Returns OK and trusted [`Self`]. + /// [`Status::Ok`] and trusted [`Self`]. /// /// This is the most common version of [`Self`]. /// /// ```rust /// use cuprate_rpc_types::{misc::*, base::*}; /// - /// let ok = AccessResponseBase::ok(); - /// assert_eq!(ok, AccessResponseBase { - /// response_base: ResponseBase::ok(), + /// assert_eq!(AccessResponseBase::OK, AccessResponseBase { + /// response_base: ResponseBase::OK, /// credits: 0, /// top_hash: "".into(), /// }); /// ``` - pub const fn ok() -> Self { - Self { - response_base: ResponseBase::ok(), - credits: 0, - top_hash: String::new(), - } - } + pub const OK: Self = Self { + response_base: ResponseBase::OK, + credits: 0, + top_hash: String::new(), + }; - /// Same as [`Self::ok`] but with `untrusted` set to `true`. + /// Same as [`Self::OK`] but with `untrusted` set to `true`. /// /// ```rust /// use cuprate_rpc_types::{misc::*, base::*}; /// - /// let ok_untrusted = AccessResponseBase::ok_untrusted(); - /// assert_eq!(ok_untrusted, AccessResponseBase { - /// response_base: ResponseBase::ok_untrusted(), + /// assert_eq!(AccessResponseBase::OK_UNTRUSTED, AccessResponseBase { + /// response_base: ResponseBase::OK_UNTRUSTED, /// credits: 0, /// top_hash: "".into(), /// }); /// ``` - pub const fn ok_untrusted() -> Self { - Self { - response_base: ResponseBase::ok_untrusted(), - credits: 0, - top_hash: String::new(), - } - } + pub const OK_UNTRUSTED: Self = Self { + response_base: ResponseBase::OK_UNTRUSTED, + credits: 0, + top_hash: String::new(), + }; } #[cfg(feature = "epee")] diff --git a/rpc/types/src/json.rs b/rpc/types/src/json.rs index 6fb538c..cb55e64 100644 --- a/rpc/types/src/json.rs +++ b/rpc/types/src/json.rs @@ -186,7 +186,7 @@ define_request_and_response! { // . #[doc = serde_doc_test!( GET_BLOCK_TEMPLATE_RESPONSE => GetBlockTemplateResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, blockhashing_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a00000000e0c20372be23d356347091025c5b5e8f2abf83ab618378565cce2b703491523401".into(), blocktemplate_blob: "1010f4bae0b4069d648e741d85ca0e7acb4501f051b27e9b107d3cd7a3f03aa7f776089117c81a0000000002c681c30101ff8a81c3010180e0a596bb11033b7eedf47baf878f3490cb20b696079c34bd017fe59b0d070e74d73ffabc4bb0e05f011decb630f3148d0163b3bd39690dde4078e4cfb69fecf020d6278a27bad10c58023c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".into(), difficulty_top64: 0, @@ -242,7 +242,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BLOCK_COUNT_RESPONSE => GetBlockCountResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, count: 3195019, } )] @@ -334,7 +334,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GENERATE_BLOCKS_RESPONSE => GenerateBlocksResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, blocks: vec!["49b712db7760e3728586f8434ee8bc8d7b3d410dac6bb6e98bf5845c83b917e4".into()], height: 9783, } @@ -359,7 +359,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_LAST_BLOCK_HEADER_RESPONSE => GetLastBlockHeaderResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, block_header: BlockHeader { block_size: 200419, block_weight: 200419, @@ -411,7 +411,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BLOCK_HEADER_BY_HASH_RESPONSE => GetBlockHeaderByHashResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, block_headers: vec![], block_header: BlockHeader { block_size: 210, @@ -466,7 +466,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BLOCK_HEADER_BY_HEIGHT_RESPONSE => GetBlockHeaderByHeightResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, block_header: BlockHeader { block_size: 210, block_weight: 210, @@ -521,7 +521,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BLOCK_HEADERS_RANGE_RESPONSE => GetBlockHeadersRangeResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, headers: vec![ BlockHeader { block_size: 301413, @@ -603,7 +603,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BLOCK_RESPONSE => GetBlockResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, blob: "1010c58bab9b06b27bdecfc6cd0a46172d136c08831cf67660377ba992332363228b1b722781e7807e07f502cef8a70101ff92f8a7010180e0a596bb1103d7cbf826b665d7a532c316982dc8dbc24f285cbc18bbcc27c7164cd9b3277a85d034019f629d8b36bd16a2bfce3ea80c31dc4d8762c67165aec21845494e32b7582fe00211000000297a787a000000000000000000000000".into(), block_header: BlockHeader { block_size: 106, @@ -656,11 +656,11 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_CONNECTIONS_RESPONSE => GetConnectionsResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, connections: vec![ ConnectionInfo { address: "3evk3kezfjg44ma6tvesy7rbxwwpgpympj45xar5fo4qajrsmkoaqdqd.onion:18083".into(), - address_type: 4, + address_type: cuprate_types::AddressType::Tor, avg_download: 0, avg_upload: 0, connection_id: "22ef856d0f1d44cc95e84fecfd065fe2".into(), @@ -682,12 +682,12 @@ define_request_and_response! { rpc_port: 0, send_count: 3406572, send_idle_time: 30, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 0 }, ConnectionInfo { address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(), - address_type: 4, + address_type: cuprate_types::AddressType::Tor, avg_download: 0, avg_upload: 0, connection_id: "c7734e15936f485a86d2b0534f87e499".into(), @@ -709,7 +709,7 @@ define_request_and_response! { rpc_port: 0, send_count: 3370566, send_idle_time: 120, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 0 } ], @@ -730,7 +730,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_INFO_RESPONSE => GetInfoResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, adjusted_time: 1721245289, alt_blocks_count: 16, block_size_limit: 600000, @@ -833,7 +833,7 @@ define_request_and_response! { #[doc = serde_doc_test!( HARD_FORK_INFO_RESPONSE => HardForkInfoResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, earliest_height: 2689608, enabled: true, state: 0, @@ -879,7 +879,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SET_BANS_RESPONSE => SetBansResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -894,7 +894,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_BANS_RESPONSE => GetBansResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, bans: vec![ GetBan { host: "104.248.206.131".into(), @@ -996,7 +996,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_OUTPUT_HISTOGRAM_RESPONSE => GetOutputHistogramResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, histogram: vec![HistogramEntry { amount: 20000000000, recent_instances: 0, @@ -1030,7 +1030,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_COINBASE_TX_SUM_RESPONSE => GetCoinbaseTxSumResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, emission_amount: 9387854817320, emission_amount_top64: 0, fee_amount: 83981380000, @@ -1059,7 +1059,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_VERSION_RESPONSE => GetVersionResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, current_height: 3195051, hard_forks: vec![ HardforkEntry { @@ -1145,12 +1145,16 @@ define_request_and_response! { get_fee_estimate, cc73fe71162d564ffda8e549b79a350bca53c454 => core_rpc_server_commands_defs.h => 2250..=2277, - GetFeeEstimate (empty), - Request {}, + + GetFeeEstimate, + + Request { + grace_blocks: u64 = default_zero::(), "default_zero", + }, #[doc = serde_doc_test!( GET_FEE_ESTIMATE_RESPONSE => GetFeeEstimateResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, fee: 20000, fees: vec![20000,80000,320000,4000000], quantization_mask: 10000, @@ -1172,7 +1176,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_ALTERNATE_CHAINS_RESPONSE => GetAlternateChainsResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, chains: vec![ ChainInfo { block_hash: "4826c7d45d7cf4f02985b5c405b0e5d7f92c8d25e015492ce19aa3b209295dce".into(), @@ -1240,7 +1244,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SYNC_INFO_RESPONSE => SyncInfoResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, height: 3195157, next_needed_pruning_seed: 0, overview: "[]".into(), @@ -1249,7 +1253,7 @@ define_request_and_response! { SyncInfoPeer { info: ConnectionInfo { address: "142.93.128.65:44986".into(), - address_type: 1, + address_type: cuprate_types::AddressType::Ipv4, avg_download: 1, avg_upload: 1, connection_id: "a5803c4c2dac49e7b201dccdef54c862".into(), @@ -1271,14 +1275,14 @@ define_request_and_response! { rpc_port: 18089, send_count: 32235, send_idle_time: 6, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 1 } }, SyncInfoPeer { info: ConnectionInfo { address: "4iykytmumafy5kjahdqc7uzgcs34s2vwsadfjpk4znvsa5vmcxeup2qd.onion:18083".into(), - address_type: 4, + address_type: cuprate_types::AddressType::Tor, avg_download: 0, avg_upload: 0, connection_id: "277f7c821bc546878c8bd29977e780f5".into(), @@ -1300,7 +1304,7 @@ define_request_and_response! { rpc_port: 0, send_count: 99120, send_idle_time: 15, - state: "normal".into(), + state: cuprate_types::ConnectionState::Normal, support_flags: 0 } } @@ -1330,7 +1334,7 @@ define_request_and_response! { // TODO: enable test after binary string impl. // #[doc = serde_doc_test!( // GET_TRANSACTION_POOL_BACKLOG_RESPONSE => GetTransactionPoolBacklogResponse { - // base: ResponseBase::ok(), + // base: ResponseBase::OK, // backlog: "...Binary...".into(), // } // )] @@ -1372,7 +1376,7 @@ define_request_and_response! { // TODO: enable test after binary string impl. // #[doc = serde_doc_test!( // GET_OUTPUT_DISTRIBUTION_RESPONSE => GetOutputDistributionResponse { - // base: AccessResponseBase::ok(), + // base: AccessResponseBase::OK, // distributions: vec![Distribution::Uncompressed(DistributionUncompressed { // start_height: 1462078, // base: 0, @@ -1396,7 +1400,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_MINER_DATA_RESPONSE => GetMinerDataResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, already_generated_coins: 18186022843595960691, difficulty: "0x48afae42de".into(), height: 2731375, @@ -1449,7 +1453,7 @@ define_request_and_response! { #[doc = serde_doc_test!( PRUNE_BLOCKCHAIN_RESPONSE => PruneBlockchainResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, pruned: true, pruning_seed: 387, } @@ -1515,7 +1519,7 @@ define_request_and_response! { #[doc = serde_doc_test!( FLUSH_CACHE_RESPONSE => FlushCacheResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -1544,7 +1548,7 @@ define_request_and_response! { #[doc = serde_doc_test!( ADD_AUX_POW_RESPONSE => AddAuxPowResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, aux_pow: vec![AuxPow { hash: "7b35762de164b20885e15dbe656b1138db06bb402fa1796f5765a23933d8859a".into(), id: "3200b4ea97c3b2081cd4190b58e49572b2319fed00d030ad51809dff06b5d8c8".into(), diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 4430dbe..8f7467b 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -110,7 +110,7 @@ define_struct_and_impl_epee! { /// Used in [`crate::json::GetConnectionsResponse`]. ConnectionInfo { address: String, - address_type: u8, + address_type: cuprate_types::AddressType, avg_download: u64, avg_upload: u64, connection_id: String, @@ -135,7 +135,7 @@ define_struct_and_impl_epee! { // Exists in the original definition, but isn't // used or (de)serialized for RPC purposes. // ssl: bool, - state: String, + state: cuprate_types::ConnectionState, support_flags: u32, } } diff --git a/rpc/types/src/other.rs b/rpc/types/src/other.rs index f743392..3694041 100644 --- a/rpc/types/src/other.rs +++ b/rpc/types/src/other.rs @@ -104,7 +104,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_HEIGHT_RESPONSE => GetHeightResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, hash: "68bb1a1cff8e2a44c3221e8e1aff80bc6ca45d06fa8eff4d2a3a7ac31d4efe3f".into(), height: 3195160, } @@ -159,7 +159,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_ALT_BLOCKS_HASHES_RESPONSE => GetAltBlocksHashesResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, blks_hashes: vec!["8ee10db35b1baf943f201b303890a29e7d45437bd76c2bd4df0d2f2ee34be109".into()], } )] @@ -189,7 +189,7 @@ define_request_and_response! { #[doc = serde_doc_test!( IS_KEY_IMAGE_SPENT_RESPONSE => IsKeyImageSpentResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, spent_status: vec![1, 1], } )] @@ -285,7 +285,7 @@ define_request_and_response! { #[doc = serde_doc_test!( START_MINING_RESPONSE => StartMiningResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -300,7 +300,7 @@ define_request_and_response! { #[doc = serde_doc_test!( STOP_MINING_RESPONSE => StopMiningResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -315,7 +315,7 @@ define_request_and_response! { #[doc = serde_doc_test!( MINING_STATUS_RESPONSE => MiningStatusResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, active: false, address: "".into(), bg_idle_threshold: 0, @@ -361,7 +361,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SAVE_BC_RESPONSE => SaveBcResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -387,7 +387,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_PEER_LIST_RESPONSE => GetPeerListResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, gray_list: vec![ Peer { host: "161.97.193.0".into(), @@ -469,7 +469,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SET_LOG_HASH_RATE_RESPONSE => SetLogHashRateResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -494,7 +494,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SET_LOG_LEVEL_RESPONSE => SetLogLevelResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, } )] ResponseBase {} @@ -518,7 +518,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SET_LOG_CATEGORIES_RESPONSE => SetLogCategoriesResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, categories: "*:INFO".into(), } )] @@ -584,7 +584,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_TRANSACTION_POOL_STATS_RESPONSE => GetTransactionPoolStatsResponse { - base: AccessResponseBase::ok(), + base: AccessResponseBase::OK, pool_stats: TxpoolStats { bytes_max: 11843, bytes_med: 2219, @@ -646,7 +646,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_LIMIT_RESPONSE => GetLimitResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, limit_down: 1280000, limit_up: 1280000, } @@ -678,7 +678,7 @@ define_request_and_response! { #[doc = serde_doc_test!( SET_LIMIT_RESPONSE => SetLimitResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, limit_down: 1024, limit_up: 128, } @@ -709,7 +709,7 @@ define_request_and_response! { #[doc = serde_doc_test!( OUT_PEERS_RESPONSE => OutPeersResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, out_peers: 3232235535, } )] @@ -742,7 +742,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_NET_STATS_RESPONSE => GetNetStatsResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, start_time: 1721251858, total_bytes_in: 16283817214, total_bytes_out: 34225244079, @@ -781,7 +781,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_OUTS_RESPONSE => GetOutsResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, outs: vec![ OutKey { height: 51941, @@ -825,7 +825,7 @@ define_request_and_response! { #[doc = serde_doc_test!( UPDATE_RESPONSE => UpdateResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, auto_uri: "".into(), hash: "".into(), path: "".into(), @@ -862,7 +862,7 @@ define_request_and_response! { #[doc = serde_doc_test!( POP_BLOCKS_RESPONSE => PopBlocksResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, height: 76482, } )] @@ -881,7 +881,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_TRANSACTION_POOL_HASHES_RESPONSE => GetTransactionPoolHashesResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, tx_hashes: vec![ "aa928aed888acd6152c60194d50a4df29b0b851be6169acf11b6a8e304dd6c03".into(), "794345f321a98f3135151f3056c0fdf8188646a8dab27de971428acf3551dd11".into(), @@ -931,7 +931,7 @@ define_request_and_response! { #[doc = serde_doc_test!( GET_PUBLIC_NODES_RESPONSE => GetPublicNodesResponse { - base: ResponseBase::ok(), + base: ResponseBase::OK, gray: vec![], white: vec![ PublicNode { diff --git a/storage/blockchain/src/service/read.rs b/storage/blockchain/src/service/read.rs index a3b82bd..e3c0180 100644 --- a/storage/blockchain/src/service/read.rs +++ b/storage/blockchain/src/service/read.rs @@ -121,6 +121,8 @@ fn map_request( R::DatabaseSize => database_size(env), R::OutputHistogram(input) => output_histogram(env, input), R::CoinbaseTxSum { height, count } => coinbase_tx_sum(env, height, count), + R::AltChains => alt_chains(env), + R::AltChainCount => alt_chain_count(env), } /* SOMEDAY: post-request handling, run some code for each request? */ @@ -648,3 +650,13 @@ fn output_histogram(env: &ConcreteEnv, input: OutputHistogramInput) -> ResponseR fn coinbase_tx_sum(env: &ConcreteEnv, height: usize, count: u64) -> ResponseResult { Ok(BlockchainResponse::CoinbaseTxSum(todo!())) } + +/// [`BlockchainReadRequest::AltChains`] +fn alt_chains(env: &ConcreteEnv) -> ResponseResult { + Ok(BlockchainResponse::AltChains(todo!())) +} + +/// [`BlockchainReadRequest::AltChainCount`] +fn alt_chain_count(env: &ConcreteEnv) -> ResponseResult { + Ok(BlockchainResponse::AltChainCount(todo!())) +} diff --git a/storage/txpool/src/service/interface.rs b/storage/txpool/src/service/interface.rs index 5cd518f..a27c630 100644 --- a/storage/txpool/src/service/interface.rs +++ b/storage/txpool/src/service/interface.rs @@ -35,7 +35,11 @@ pub enum TxpoolReadRequest { Backlog, /// Get the number of transactions in the pool. - Size, + Size { + /// If this is [`true`], the size returned will + /// include private transactions in the pool. + include_sensitive_txs: bool, + }, } //---------------------------------------------------------------------------------------------------- TxpoolReadResponse @@ -66,7 +70,7 @@ pub enum TxpoolReadResponse { /// Response to [`TxpoolReadRequest::Backlog`]. /// - /// The inner `Vec` contains information on all + /// The inner [`Vec`] contains information on all /// the transactions currently in the pool. Backlog(Vec), diff --git a/storage/txpool/src/service/read.rs b/storage/txpool/src/service/read.rs index 257fe8e..0de1e7d 100644 --- a/storage/txpool/src/service/read.rs +++ b/storage/txpool/src/service/read.rs @@ -71,7 +71,9 @@ fn map_request( } TxpoolReadRequest::TxsForBlock(txs_needed) => txs_for_block(env, txs_needed), TxpoolReadRequest::Backlog => backlog(env), - TxpoolReadRequest::Size => size(env), + TxpoolReadRequest::Size { + include_sensitive_txs, + } => size(env, include_sensitive_txs), } } @@ -201,6 +203,6 @@ fn backlog(env: &ConcreteEnv) -> ReadResponseResult { /// [`TxpoolReadRequest::Size`]. #[inline] -fn size(env: &ConcreteEnv) -> ReadResponseResult { +fn size(env: &ConcreteEnv, include_sensitive_txs: bool) -> ReadResponseResult { Ok(TxpoolReadResponse::Size(todo!())) } diff --git a/storage/txpool/src/tx.rs b/storage/txpool/src/tx.rs index 6425326..29afae8 100644 --- a/storage/txpool/src/tx.rs +++ b/storage/txpool/src/tx.rs @@ -5,6 +5,8 @@ /// Used in [`TxpoolReadResponse::Backlog`](crate::service::interface::TxpoolReadResponse::Backlog). #[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq, Hash)] pub struct TxEntry { + /// The transaction's ID (hash). + pub id: [u8; 32], /// The transaction's weight. pub weight: u64, /// The transaction's fee. diff --git a/types/src/address_type.rs b/types/src/address_type.rs new file mode 100644 index 0000000..743902d --- /dev/null +++ b/types/src/address_type.rs @@ -0,0 +1,147 @@ +//! Types of network addresses; used in P2P. + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + error, + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +use strum::{ + AsRefStr, Display, EnumCount, EnumIs, EnumString, FromRepr, IntoStaticStr, VariantArray, +}; + +/// An enumeration of address types. +/// +/// Used in `cuprate_p2p` and `cuprate_types` +/// +/// Original definition: +/// - +/// +/// # Serde +/// This type's `serde` implementation (de)serializes from a [`u8`]. +/// +/// ```rust +/// use cuprate_types::AddressType as A; +/// use serde_json::{to_string, from_str}; +/// +/// assert_eq!(from_str::(&"0").unwrap(), A::Invalid); +/// assert_eq!(from_str::(&"1").unwrap(), A::Ipv4); +/// assert_eq!(from_str::(&"2").unwrap(), A::Ipv6); +/// assert_eq!(from_str::(&"3").unwrap(), A::I2p); +/// assert_eq!(from_str::(&"4").unwrap(), A::Tor); +/// +/// assert_eq!(to_string(&A::Invalid).unwrap(), "0"); +/// assert_eq!(to_string(&A::Ipv4).unwrap(), "1"); +/// assert_eq!(to_string(&A::Ipv6).unwrap(), "2"); +/// assert_eq!(to_string(&A::I2p).unwrap(), "3"); +/// assert_eq!(to_string(&A::Tor).unwrap(), "4"); +/// ``` +#[derive( + Copy, + Clone, + Default, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + AsRefStr, + Display, + EnumCount, + EnumIs, + EnumString, + FromRepr, + IntoStaticStr, + VariantArray, +)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", serde(untagged, try_from = "u8", into = "u8"))] +#[repr(u8)] +pub enum AddressType { + #[default] + Invalid, + Ipv4, + Ipv6, + I2p, + Tor, +} + +impl AddressType { + /// Convert [`Self`] to a [`u8`]. + /// + /// ```rust + /// use cuprate_types::AddressType as A; + /// + /// assert_eq!(A::Invalid.to_u8(), 0); + /// assert_eq!(A::Ipv4.to_u8(), 1); + /// assert_eq!(A::Ipv6.to_u8(), 2); + /// assert_eq!(A::I2p.to_u8(), 3); + /// assert_eq!(A::Tor.to_u8(), 4); + /// ``` + pub const fn to_u8(self) -> u8 { + self as u8 + } + + /// Convert a [`u8`] to a [`Self`]. + /// + /// # Errors + /// This returns [`None`] if `u > 4`. + /// + /// ```rust + /// use cuprate_types::AddressType as A; + /// + /// assert_eq!(A::from_u8(0), Some(A::Invalid)); + /// assert_eq!(A::from_u8(1), Some(A::Ipv4)); + /// assert_eq!(A::from_u8(2), Some(A::Ipv6)); + /// assert_eq!(A::from_u8(3), Some(A::I2p)); + /// assert_eq!(A::from_u8(4), Some(A::Tor)); + /// assert_eq!(A::from_u8(5), None); + /// ``` + pub const fn from_u8(u: u8) -> Option { + Some(match u { + 0 => Self::Invalid, + 1 => Self::Ipv4, + 2 => Self::Ipv6, + 3 => Self::I2p, + 4 => Self::Tor, + _ => return None, + }) + } +} + +impl From for u8 { + fn from(value: AddressType) -> Self { + value.to_u8() + } +} + +impl TryFrom for AddressType { + type Error = u8; + fn try_from(value: u8) -> Result { + match Self::from_u8(value) { + Some(s) => Ok(s), + None => Err(value), + } + } +} + +#[cfg(feature = "epee")] +impl EpeeValue for AddressType { + const MARKER: Marker = u8::MARKER; + + fn read(r: &mut B, marker: &Marker) -> error::Result { + let u = u8::read(r, marker)?; + Self::from_u8(u).ok_or(error::Error::Format("u8 was greater than 4")) + } + + fn write(self, w: &mut B) -> error::Result<()> { + let u = self.to_u8(); + u8::write(u, w)?; + Ok(()) + } +} diff --git a/types/src/blockchain.rs b/types/src/blockchain.rs index b7436f0..c39c0bd 100644 --- a/types/src/blockchain.rs +++ b/types/src/blockchain.rs @@ -12,7 +12,8 @@ use monero_serai::block::Block; use crate::{ types::{Chain, ExtendedBlockHeader, OutputOnChain, VerifiedBlockInformation}, - AltBlockInformation, ChainId, CoinbaseTxSum, OutputHistogramEntry, OutputHistogramInput, + AltBlockInformation, ChainId, ChainInfo, CoinbaseTxSum, OutputHistogramEntry, + OutputHistogramInput, }; //---------------------------------------------------------------------------------------------------- ReadRequest @@ -128,6 +129,12 @@ pub enum BlockchainReadRequest { /// /// TODO: document fields after impl. CoinbaseTxSum { height: usize, count: u64 }, + + /// Get information on all alternative chains. + AltChains, + + /// Get the amount of alternative chains that exist. + AltChainCount, } //---------------------------------------------------------------------------------------------------- WriteRequest @@ -276,6 +283,12 @@ pub enum BlockchainResponse { /// Response to [`BlockchainReadRequest::CoinbaseTxSum`]. CoinbaseTxSum(CoinbaseTxSum), + /// Response to [`BlockchainReadRequest::AltChains`]. + AltChains(Vec), + + /// Response to [`BlockchainReadRequest::AltChainCount`]. + AltChainCount(usize), + //------------------------------------------------------ Writes /// A generic Ok response to indicate a request was successfully handled. /// diff --git a/types/src/connection_state.rs b/types/src/connection_state.rs new file mode 100644 index 0000000..69b8ed6 --- /dev/null +++ b/types/src/connection_state.rs @@ -0,0 +1,148 @@ +//! [`ConnectionState`]. + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + error, + macros::bytes::{Buf, BufMut}, + EpeeValue, Marker, +}; + +use strum::{ + AsRefStr, Display, EnumCount, EnumIs, EnumString, FromRepr, IntoStaticStr, VariantArray, +}; + +/// An enumeration of P2P connection states. +/// +/// Used in `cuprate_p2p` and `cuprate_rpc_types`. +/// +/// Original definition: +/// - +/// +/// # Serde +/// This type's `serde` implementation depends on `snake_case`. +/// +/// ```rust +/// use cuprate_types::ConnectionState as C; +/// use serde_json::to_string; +/// +/// assert_eq!(to_string(&C::BeforeHandshake).unwrap(), r#""before_handshake""#); +/// assert_eq!(to_string(&C::Synchronizing).unwrap(), r#""synchronizing""#); +/// assert_eq!(to_string(&C::Standby).unwrap(), r#""standby""#); +/// assert_eq!(to_string(&C::Idle).unwrap(), r#""idle""#); +/// assert_eq!(to_string(&C::Normal).unwrap(), r#""normal""#); +/// +/// assert_eq!(C::BeforeHandshake.to_string(), "before_handshake"); +/// assert_eq!(C::Synchronizing.to_string(), "synchronizing"); +/// assert_eq!(C::Standby.to_string(), "standby"); +/// assert_eq!(C::Idle.to_string(), "idle"); +/// assert_eq!(C::Normal.to_string(), "normal"); +/// ``` +#[derive( + Copy, + Clone, + Default, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + AsRefStr, + Display, + EnumCount, + EnumIs, + EnumString, + FromRepr, + IntoStaticStr, + VariantArray, +)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "snake_case"))] // cuprate-rpc-types depends on snake_case +#[strum(serialize_all = "snake_case")] +#[repr(u8)] +pub enum ConnectionState { + BeforeHandshake, + Synchronizing, + Standby, + Idle, + #[default] + Normal, +} + +impl ConnectionState { + /// Convert [`Self`] to a [`u8`]. + /// + /// ```rust + /// use cuprate_types::ConnectionState as C; + /// + /// assert_eq!(C::BeforeHandshake.to_u8(), 0); + /// assert_eq!(C::Synchronizing.to_u8(), 1); + /// assert_eq!(C::Standby.to_u8(), 2); + /// assert_eq!(C::Idle.to_u8(), 3); + /// assert_eq!(C::Normal.to_u8(), 4); + /// ``` + pub const fn to_u8(self) -> u8 { + self as u8 + } + + /// Convert a [`u8`] to a [`Self`]. + /// + /// # Errors + /// This returns [`None`] if `u > 4`. + /// + /// ```rust + /// use cuprate_types::ConnectionState as C; + /// + /// assert_eq!(C::from_u8(0), Some(C::BeforeHandshake)); + /// assert_eq!(C::from_u8(1), Some(C::Synchronizing)); + /// assert_eq!(C::from_u8(2), Some(C::Standby)); + /// assert_eq!(C::from_u8(3), Some(C::Idle)); + /// assert_eq!(C::from_u8(4), Some(C::Normal)); + /// assert_eq!(C::from_u8(5), None); + /// ``` + pub const fn from_u8(u: u8) -> Option { + Some(match u { + 0 => Self::BeforeHandshake, + 1 => Self::Synchronizing, + 2 => Self::Standby, + 3 => Self::Idle, + 4 => Self::Normal, + _ => return None, + }) + } +} + +impl From for u8 { + fn from(value: ConnectionState) -> Self { + value.to_u8() + } +} + +impl TryFrom for ConnectionState { + type Error = u8; + fn try_from(value: u8) -> Result { + match Self::from_u8(value) { + Some(s) => Ok(s), + None => Err(value), + } + } +} + +#[cfg(feature = "epee")] +impl EpeeValue for ConnectionState { + const MARKER: Marker = u8::MARKER; + + fn read(r: &mut B, marker: &Marker) -> error::Result { + let u = u8::read(r, marker)?; + Self::from_u8(u).ok_or(error::Error::Format("u8 was greater than 4")) + } + + fn write(self, w: &mut B) -> error::Result<()> { + let u = self.to_u8(); + u8::write(u, w)?; + Ok(()) + } +} diff --git a/types/src/lib.rs b/types/src/lib.rs index 0fd1ec7..a5a04f9 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -9,20 +9,25 @@ // // Documentation for each module is located in the respective file. +mod address_type; mod block_complete_entry; +mod connection_state; mod hard_fork; mod transaction_verification_data; mod types; +pub use address_type::AddressType; pub use block_complete_entry::{BlockCompleteEntry, PrunedTxBlobEntry, TransactionBlobs}; +pub use connection_state::ConnectionState; pub use hard_fork::{HardFork, HardForkError}; pub use transaction_verification_data::{ CachedVerificationState, TransactionVerificationData, TxVersion, }; pub use types::{ - AltBlockInformation, Chain, ChainId, ChainInfo, CoinbaseTxSum, ExtendedBlockHeader, - FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry, OutputHistogramEntry, - OutputHistogramInput, OutputOnChain, VerifiedBlockInformation, VerifiedTransactionInformation, + AddAuxPow, AltBlockInformation, AuxPow, Chain, ChainId, ChainInfo, CoinbaseTxSum, + ExtendedBlockHeader, FeeEstimate, HardForkInfo, MinerData, MinerDataTxBacklogEntry, + OutputHistogramEntry, OutputHistogramInput, OutputOnChain, VerifiedBlockInformation, + VerifiedTransactionInformation, }; //---------------------------------------------------------------------------------------------------- Feature-gated diff --git a/types/src/types.rs b/types/src/types.rs index 7d5c377..720ad0a 100644 --- a/types/src/types.rs +++ b/types/src/types.rs @@ -177,8 +177,6 @@ pub struct OutputHistogramEntry { pub struct CoinbaseTxSum { pub emission_amount: u128, pub fee_amount: u128, - pub wide_emission_amount: u128, - pub wide_fee_amount: u128, } /// Data to create a custom block template. @@ -242,7 +240,23 @@ pub struct ChainInfo { pub height: u64, pub length: u64, pub main_chain_parent_block: [u8; 32], - pub wide_difficulty: u128, +} + +/// Used in RPC's `add_aux_pow`. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AuxPow { + pub id: [u8; 32], + pub hash: [u8; 32], +} + +/// Used in RPC's `add_aux_pow`. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct AddAuxPow { + pub blocktemplate_blob: Vec, + pub blockhashing_blob: Vec, + pub merkle_root: [u8; 32], + pub merkle_tree_depth: u64, + pub aux_pow: Vec, } //---------------------------------------------------------------------------------------------------- Tests From b6c4adc83a199886d6f932c1321857fb8a535af5 Mon Sep 17 00:00:00 2001 From: SyntheticBird <118022351+SyntheticBird45@users.noreply.github.com> Date: Sat, 2 Nov 2024 00:45:56 +0000 Subject: [PATCH 04/14] p2p: Implement P2P Bucket data structure (#329) Implements P2P Bucket data structure This commit implements a "Bucket" data structure that is a collection of data that discriminates its items into "buckets" (vector of size N) following a defined function. - Implements Bucket data structure and Bucketable trait - Implements Bucketable for Ipv4Addr - Added the crate to the workspace dependencies - Added arrayvec as a dependency --- Cargo.lock | 8 + Cargo.toml | 3 + books/architecture/src/appendix/crates.md | 1 + p2p/bucket/Cargo.toml | 13 ++ p2p/bucket/src/lib.rs | 172 ++++++++++++++++++++++ 5 files changed, 197 insertions(+) create mode 100644 p2p/bucket/Cargo.toml create mode 100644 p2p/bucket/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 7ad2f2a..9a0ebd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -855,6 +855,14 @@ dependencies = [ "tracing", ] +[[package]] +name = "cuprate-p2p-bucket" +version = "0.1.0" +dependencies = [ + "arrayvec", + "rand", +] + [[package]] name = "cuprate-p2p-core" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index d5aca71..614788d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ members = [ "net/wire", "p2p/p2p", "p2p/p2p-core", + "p2p/bucket", "p2p/dandelion-tower", "p2p/async-buffer", "p2p/address-book", @@ -64,6 +65,7 @@ cuprate-levin = { path = "net/levin" ,default-feature cuprate-wire = { path = "net/wire" ,default-features = false} cuprate-p2p = { path = "p2p/p2p" ,default-features = false} cuprate-p2p-core = { path = "p2p/p2p-core" ,default-features = false} +cuprate-p2p-bucket = { path = "p2p/p2p-bucket" ,default-features = false} cuprate-dandelion-tower = { path = "p2p/dandelion-tower" ,default-features = false} cuprate-async-buffer = { path = "p2p/async-buffer" ,default-features = false} cuprate-address-book = { path = "p2p/address-book" ,default-features = false} @@ -80,6 +82,7 @@ cuprate-rpc-interface = { path = "rpc/interface" ,default-feature # External dependencies anyhow = { version = "1.0.89", default-features = false } +arrayvec = { version = "0.7", default-features = false } async-trait = { version = "0.1.82", default-features = false } bitflags = { version = "2.6.0", default-features = false } blake3 = { version = "1", default-features = false } diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md index fe8f1f0..ac2780e 100644 --- a/books/architecture/src/appendix/crates.md +++ b/books/architecture/src/appendix/crates.md @@ -35,6 +35,7 @@ cargo doc --open --package cuprate-blockchain | [`cuprate-async-buffer`](https://doc.cuprate.org/cuprate_async_buffer) | [`p2p/async-buffer/`](https://github.com/Cuprate/cuprate/tree/main/p2p/async-buffer) | A bounded SPSC, FIFO, asynchronous buffer that supports arbitrary weights for values | [`cuprate-dandelion-tower`](https://doc.cuprate.org/cuprate_dandelion_tower) | [`p2p/dandelion-tower/`](https://github.com/Cuprate/cuprate/tree/main/p2p/dandelion-tower) | TODO | [`cuprate-p2p`](https://doc.cuprate.org/cuprate_p2p) | [`p2p/p2p/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p) | TODO +| [`cuprate-p2p-bucket`](https://doc.cuprate.org/cuprate_p2p_bucket) | [`p2p/bucket/`](https://github.com/Cuprate/cuprate/tree/main/p2p/bucket) | A collection data structure discriminating its items into "buckets" of limited size. | [`cuprate-p2p-core`](https://doc.cuprate.org/cuprate_p2p_core) | [`p2p/p2p-core/`](https://github.com/Cuprate/cuprate/tree/main/p2p/p2p-core) | TODO ## Storage diff --git a/p2p/bucket/Cargo.toml b/p2p/bucket/Cargo.toml new file mode 100644 index 0000000..1a53e85 --- /dev/null +++ b/p2p/bucket/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "cuprate-p2p-bucket" +version = "0.1.0" +edition = "2021" +license = "MIT" +authors = ["SyntheticBird"] + +[dependencies] +arrayvec = { workspace = true } +rand = { workspace = true, features = ["std", "std_rng"]} + +[lints] +workspace = true diff --git a/p2p/bucket/src/lib.rs b/p2p/bucket/src/lib.rs new file mode 100644 index 0000000..0f73eea --- /dev/null +++ b/p2p/bucket/src/lib.rs @@ -0,0 +1,172 @@ +//! Bucket data structure +//! +//! A collection data structure that discriminates its unique items and place them into "buckets". +//! +//! The item must implement the [`Bucketable`] trait that defines how to create the discriminant +//! from the item type. The data structure will internally contain any item into "buckets" or vectors +//! of sized capacity `N` that regroup all the stored items with this specific discriminant. +//! +//! A practical example of this data structure is for storing `N` amount of IP discriminated by their subnets. +//! You can store in each "buckets" corresponding to a `/16` subnet up to `N` IPs of that subnet. +//! +//! # Example +//! +//! ``` +//! use cuprate_p2p_bucket::Bucket; +//! use std::net::Ipv4Addr; +//! +//! // Create a new bucket that can store at most 2 IPs in a particular `/16` subnet. +//! let mut bucket = Bucket::<2,Ipv4Addr>::new(); +//! +//! // Fulfill the `96.96.0.0/16` bucket. +//! bucket.push("96.96.0.1".parse().unwrap()); +//! bucket.push("96.96.0.2".parse().unwrap()); +//! assert_eq!(2, bucket.len()); +//! assert_eq!(2, bucket.len_bucket(&[96_u8,96_u8]).unwrap()); +//! +//! // Push a new IP from another subnet +//! bucket.push("127.0.0.1".parse().unwrap()); +//! assert_eq!(3, bucket.len()); +//! assert_eq!(2, bucket.len_bucket(&[96_u8,96_u8]).unwrap()); +//! assert_eq!(1, bucket.len_bucket(&[127_u8,0_u8]).unwrap()); +//! +//! // Attempting to push a new IP within `96.96.0.0/16` bucket will return the IP back +//! // as this subnet is already full. +//! let pushed = bucket.push("96.96.0.3".parse().unwrap()); +//! assert!(pushed.is_some()); +//! assert_eq!(2, bucket.len_bucket(&[96_u8,96_u8]).unwrap()); +//! +//! ``` + +use arrayvec::{ArrayVec, CapacityError}; +use rand::random; + +use std::{collections::BTreeMap, net::Ipv4Addr}; + +/// A discriminant that can be computed from the type. +pub trait Bucketable: Sized + Eq + Clone { + /// The type of the discriminant being used in the Binary tree. + type Discriminant: Ord + AsRef<[u8]>; + + /// Method that can compute the discriminant from the item. + fn discriminant(&self) -> Self::Discriminant; +} + +/// A collection data structure discriminating its unique items +/// with a specified method. Limiting the amount of items stored +/// with that discriminant to the const `N`. +pub struct Bucket { + /// The storage of the bucket + storage: BTreeMap>, +} + +impl Bucket { + /// Create a new Bucket + pub const fn new() -> Self { + Self { + storage: BTreeMap::new(), + } + } + + /// Push a new element into the Bucket + /// + /// Will internally create a new vector for each new discriminant being + /// generated from an item. + /// + /// This function WILL NOT push the element if it already exists. + /// + /// Return `None` if the item has been pushed or ignored. `Some(I)` if + /// the vector is full. + /// + /// # Example + /// + /// ``` + /// use cuprate_p2p_bucket::Bucket; + /// use std::net::Ipv4Addr; + /// + /// let mut bucket = Bucket::<8,Ipv4Addr>::new(); + /// + /// // Push a first IP address. + /// bucket.push("127.0.0.1".parse().unwrap()); + /// assert_eq!(1, bucket.len()); + /// + /// // Push the same IP address a second time. + /// bucket.push("127.0.0.1".parse().unwrap()); + /// assert_eq!(1, bucket.len()); + /// ``` + pub fn push(&mut self, item: I) -> Option { + let discriminant = item.discriminant(); + + if let Some(vec) = self.storage.get_mut(&discriminant) { + // Push the item if it doesn't exist. + if !vec.contains(&item) { + return vec.try_push(item).err().map(CapacityError::element); + } + } else { + // Initialize the vector if not found. + let mut vec = ArrayVec::::new(); + vec.push(item); + self.storage.insert(discriminant, vec); + } + + None + } + + /// Will attempt to remove an item from the bucket. + pub fn remove(&mut self, item: &I) -> Option { + self.storage.get_mut(&item.discriminant()).and_then(|vec| { + vec.iter() + .enumerate() + .find_map(|(i, v)| (item == v).then_some(i)) + .map(|index| vec.swap_remove(index)) + }) + } + + /// Return the number of item stored within the storage + pub fn len(&self) -> usize { + self.storage.values().map(ArrayVec::len).sum() + } + + /// Return the number of item stored with a specific discriminant. + /// + /// This method returns None if the bucket with this discriminant + /// doesn't exist. + pub fn len_bucket(&self, discriminant: &I::Discriminant) -> Option { + self.storage.get(discriminant).map(ArrayVec::len) + } + + /// Return `true` if the storage contains no items + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Return a reference to an item chosen at random. + /// + /// Repeated use of this function will provide a normal distribution of + /// items based on their discriminants. + pub fn get_random(&mut self) -> Option<&I> { + // Get the total amount of discriminants to explore. + let len = self.storage.len(); + + // Get a random bucket. + let (_, vec) = self.storage.iter().nth(random::() / len).unwrap(); + + // Return a reference chose at random. + vec.get(random::() / vec.len()) + } +} + +impl Default for Bucket { + fn default() -> Self { + Self::new() + } +} + +impl Bucketable for Ipv4Addr { + /// We are discriminating by `/16` subnets. + type Discriminant = [u8; 2]; + + fn discriminant(&self) -> Self::Discriminant { + [self.octets()[0], self.octets()[1]] + } +} From 525e20e841cd6db0422d30d5bde85277c26a947f Mon Sep 17 00:00:00 2001 From: Boog900 Date: Mon, 4 Nov 2024 15:22:43 +0000 Subject: [PATCH 05/14] Fix ci and loosen version requirements (#335) * add deny exception + loosen version requirements * add a comment * remove `expect` --- .github/workflows/audit.yml | 34 --- Cargo.lock | 378 ++++++++++++++++----------------- Cargo.toml | 74 +++---- consensus/src/tests/mock_db.rs | 2 - deny.toml | 4 + 5 files changed, 227 insertions(+), 265 deletions(-) delete mode 100644 .github/workflows/audit.yml diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml deleted file mode 100644 index 84b1995..0000000 --- a/.github/workflows/audit.yml +++ /dev/null @@ -1,34 +0,0 @@ -# This runs `cargo audit` on all dependencies (only if Cargo deps changed) - -name: Audit - -on: - push: - paths: - - '**/Cargo.toml' - - '**/Cargo.lock' - workflow_dispatch: - -env: - CARGO_TERM_COLOR: always - -jobs: - audit: - - runs-on: ubuntu-latest - - steps: - - name: Cache - uses: actions/cache@v4 - with: - path: | - ~/.cargo - target - key: audit - - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Install dependencies - run: cargo install cargo-audit --locked - - name: Audit - run: cargo audit diff --git a/Cargo.lock b/Cargo.lock index 9a0ebd5..b446bf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -46,15 +46,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13" [[package]] name = "arrayref" @@ -70,9 +70,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -81,24 +81,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -109,15 +109,15 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", "axum-core", @@ -141,7 +141,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower 0.4.13", + "tower 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "tower-layer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -162,7 +162,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", @@ -292,7 +292,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.77", + "syn", "syn_derive", ] @@ -304,22 +304,22 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" +checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc8b54b395f2fcfbb3d90c47b01c7f444d94d05bdeb775811dec868ac3bbc26" +checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -330,18 +330,18 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" dependencies = [ "serde", ] [[package]] name = "cc" -version = "1.1.21" +version = "1.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" dependencies = [ "shlex", ] @@ -372,9 +372,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.17" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -382,9 +382,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.17" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstyle", "clap_lex", @@ -392,14 +392,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -559,7 +559,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -598,7 +598,7 @@ dependencies = [ "tempfile", "thread_local", "tokio", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", ] [[package]] @@ -624,7 +624,7 @@ dependencies = [ "thread_local", "tokio", "tokio-test", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -644,7 +644,7 @@ dependencies = [ "thread_local", "tokio", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -701,7 +701,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -731,7 +731,7 @@ dependencies = [ "futures", "rayon", "serde", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", ] [[package]] @@ -764,7 +764,7 @@ dependencies = [ "sha3", "thiserror", "tokio", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", ] [[package]] @@ -851,7 +851,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -883,7 +883,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", ] @@ -912,7 +912,7 @@ dependencies = [ "serde", "serde_json", "tokio", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "ureq", ] @@ -974,7 +974,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", ] [[package]] @@ -1077,7 +1077,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tower 0.5.1", + "tower 0.5.1 (git+https://github.com/Cuprate/tower.git?rev=6c7faf0)", "tracing", "tracing-subscriber", ] @@ -1108,7 +1108,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -1129,12 +1129,13 @@ dependencies = [ [[package]] name = "dashmap" -version = "5.5.3" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if", - "hashbrown", + "crossbeam-utils", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core", @@ -1247,9 +1248,9 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "flate2" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide", @@ -1291,9 +1292,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1305,9 +1306,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1315,44 +1316,44 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1389,9 +1390,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "groestl" @@ -1441,6 +1442,12 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "heck" version = "0.5.0" @@ -1542,9 +1549,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -1554,9 +1561,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -1593,9 +1600,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -1606,7 +1613,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", ] @@ -1646,12 +1652,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.15.0", ] [[package]] @@ -1673,9 +1679,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -1697,15 +1703,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libredox" @@ -1992,18 +1998,18 @@ dependencies = [ [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "openssl-probe" @@ -2098,7 +2104,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -2112,29 +2118,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -2195,9 +2201,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] @@ -2224,13 +2230,13 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf16337405ca084e9c78985114633b6827711d22b9e6ef6c6c0d665eb3f0b6e" +checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn", ] [[package]] @@ -2335,18 +2341,18 @@ dependencies = [ [[package]] name = "redb" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4760ad04a88ef77075ba86ba9ea79b919e6bab29c1764c5747237cd6eaedcaa" +checksum = "84b1de48a7cf7ba193e81e078d17ee2b786236eed1d3f7c60f8a09545efc4925" dependencies = [ "libc", ] [[package]] name = "redox_syscall" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -2379,14 +2385,14 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "ring" @@ -2420,9 +2426,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" dependencies = [ "bitflags 2.6.0", "errno", @@ -2433,9 +2439,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", "once_cell", @@ -2461,19 +2467,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-webpki" @@ -2488,9 +2493,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "rusty-fork" @@ -2512,9 +2517,9 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] @@ -2540,9 +2545,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -2562,9 +2567,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] @@ -2580,20 +2585,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", @@ -2733,7 +2738,7 @@ name = "std-shims" version = "0.1.1" source = "git+https://github.com/Cuprate/serai.git?rev=d5205ce#d5205ce2319e09414eb91d12cf38e83a08165f79" dependencies = [ - "hashbrown", + "hashbrown 0.14.5", "spin", ] @@ -2756,7 +2761,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.77", + "syn", ] [[package]] @@ -2767,20 +2772,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "1.0.109" +version = "2.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "e89275301d38033efb81a6e60e3497e734dfcc62571f2854bf4b16690398824c" dependencies = [ "proc-macro2", "quote", @@ -2796,7 +2790,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -2828,9 +2822,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", @@ -2841,22 +2835,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "5d171f59dbaa811dbbb1aee1e73db92ec2b122911a48e1390dfe327a821ddede" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "b08be0f17bd307950653ce45db00cd31200d82b624b36e181337d9c7d92765b5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -2892,9 +2886,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" dependencies = [ "backtrace", "bytes", @@ -2916,7 +2910,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -2978,9 +2972,9 @@ checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" [[package]] name = "toml_edit" -version = "0.22.21" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", "toml_datetime", @@ -2989,14 +2983,14 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.13" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" dependencies = [ "futures-core", "futures-util", - "pin-project", "pin-project-lite", + "sync_wrapper 0.1.2", "tokio", "tower-layer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "tower-service 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3061,7 +3055,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -3119,9 +3113,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -3217,9 +3211,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", @@ -3228,24 +3222,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.77", + "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3253,28 +3247,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "webpki-roots" -version = "0.26.5" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] @@ -3341,7 +3335,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -3352,7 +3346,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -3524,9 +3518,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -3564,7 +3558,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] [[package]] @@ -3584,5 +3578,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn", ] diff --git a/Cargo.toml b/Cargo.toml index 614788d..ccc5513 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,55 +81,55 @@ cuprate-rpc-types = { path = "rpc/types" ,default-feature cuprate-rpc-interface = { path = "rpc/interface" ,default-features = false} # External dependencies -anyhow = { version = "1.0.89", default-features = false } +anyhow = { version = "1", default-features = false } arrayvec = { version = "0.7", default-features = false } -async-trait = { version = "0.1.82", default-features = false } -bitflags = { version = "2.6.0", default-features = false } +async-trait = { version = "0.1", default-features = false } +bitflags = { version = "2", default-features = false } blake3 = { version = "1", default-features = false } -borsh = { version = "1.5.1", default-features = false } -bytemuck = { version = "1.18.0", default-features = false } -bytes = { version = "1.7.2", default-features = false } -cfg-if = { version = "1.0.0", default-features = false } -clap = { version = "4.5.17", default-features = false } -chrono = { version = "0.4.38", default-features = false } -crypto-bigint = { version = "0.5.5", default-features = false } -crossbeam = { version = "0.8.4", default-features = false } -const_format = { version = "0.2.33", default-features = false } -curve25519-dalek = { version = "4.1.3", default-features = false } -dashmap = { version = "5.5.3", default-features = false } -dirs = { version = "5.0.1", default-features = false } -futures = { version = "0.3.30", default-features = false } -hex = { version = "0.4.3", default-features = false } +borsh = { version = "1", default-features = false } +bytemuck = { version = "1", default-features = false } +bytes = { version = "1", default-features = false } +cfg-if = { version = "1", default-features = false } +clap = { version = "4", default-features = false } +chrono = { version = "0.4", default-features = false } +crypto-bigint = { version = "0.5", default-features = false } +crossbeam = { version = "0.8", default-features = false } +const_format = { version = "0.2", default-features = false } +curve25519-dalek = { version = "4", default-features = false } +dashmap = { version = "6", default-features = false } +dirs = { version = "5", default-features = false } +futures = { version = "0.3", default-features = false } +hex = { version = "0.4", default-features = false } hex-literal = { version = "0.4", default-features = false } -indexmap = { version = "2.5.0", default-features = false } +indexmap = { version = "2", default-features = false } monero-serai = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce", default-features = false } -paste = { version = "1.0.15", default-features = false } -pin-project = { version = "1.1.5", default-features = false } +paste = { version = "1", default-features = false } +pin-project = { version = "1", default-features = false } randomx-rs = { git = "https://github.com/Cuprate/randomx-rs.git", rev = "0028464", default-features = false } -rand = { version = "0.8.5", default-features = false } -rand_distr = { version = "0.4.3", default-features = false } -rayon = { version = "1.10.0", default-features = false } -serde_bytes = { version = "0.11.15", default-features = false } -serde_json = { version = "1.0.128", default-features = false } -serde = { version = "1.0.210", default-features = false } -strum = { version = "0.26.3", default-features = false } -thiserror = { version = "1.0.63", default-features = false } -thread_local = { version = "1.1.8", default-features = false } -tokio-util = { version = "0.7.12", default-features = false } -tokio-stream = { version = "0.1.16", default-features = false } -tokio = { version = "1.40.0", default-features = false } +rand = { version = "0.8", default-features = false } +rand_distr = { version = "0.4", default-features = false } +rayon = { version = "1", default-features = false } +serde_bytes = { version = "0.11", default-features = false } +serde_json = { version = "1", default-features = false } +serde = { version = "1", default-features = false } +strum = { version = "0.26", default-features = false } +thiserror = { version = "1", default-features = false } +thread_local = { version = "1", default-features = false } +tokio-util = { version = "0.7", default-features = false } +tokio-stream = { version = "0.1", default-features = false } +tokio = { version = "1", default-features = false } tower = { git = "https://github.com/Cuprate/tower.git", rev = "6c7faf0", default-features = false } # -tracing-subscriber = { version = "0.3.18", default-features = false } -tracing = { version = "0.1.40", default-features = false } +tracing-subscriber = { version = "0.3", default-features = false } +tracing = { version = "0.1", default-features = false } ## workspace.dev-dependencies monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } tempfile = { version = "3" } -pretty_assertions = { version = "1.4.1" } +pretty_assertions = { version = "1" } proptest = { version = "1" } -proptest-derive = { version = "0.4.0" } -tokio-test = { version = "0.4.4" } +proptest-derive = { version = "0.5" } +tokio-test = { version = "0.4" } ## TODO: ## Potential dependencies. diff --git a/consensus/src/tests/mock_db.rs b/consensus/src/tests/mock_db.rs index 5ca53d8..bf005d2 100644 --- a/consensus/src/tests/mock_db.rs +++ b/consensus/src/tests/mock_db.rs @@ -1,5 +1,3 @@ -#![expect(non_local_definitions, reason = "proptest macro")] - use std::{ future::Future, pin::Pin, diff --git a/deny.toml b/deny.toml index f469d06..e54d116 100644 --- a/deny.toml +++ b/deny.toml @@ -81,6 +81,9 @@ ignore = [ #{ id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" }, #"a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish #{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" }, + + # TODO: check this is sorted before a beta release. + { id = "RUSTSEC-2024-0370", reason = "unmaintained crate, not necessarily vulnerable yet." } ] # If this is true, then cargo deny will use the git executable to fetch advisory database. # If this is false, then it uses a built-in git library. @@ -110,6 +113,7 @@ allow = [ "Apache-2.0", # https://tldrlegal.com/license/apache-license-2.0-(apache-2.0) "MPL-2.0", # https://www.mozilla.org/en-US/MPL/2.0/FAQ/ "BSL-1.0", # https://tldrlegal.com/license/boost-software-license-1.0-explained + "Zlib", # https://spdx.org/licenses/Zlib.html # OpenSSL 3.0+ uses Apache-2.0 # OpenSSL 1.x.x uses https://www.openssl.org/source/license-openssl-ssleay.txt From 5a5f88cb139c9e64b486060e34d59e94c0d8a433 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Mon, 11 Nov 2024 23:16:08 +0000 Subject: [PATCH 06/14] types: fix pruned `BlockCompleteEntry` (#338) fix pruned `BlockCompleteEntry` --- types/src/block_complete_entry.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/types/src/block_complete_entry.rs b/types/src/block_complete_entry.rs index 77ed82d..af5fa88 100644 --- a/types/src/block_complete_entry.rs +++ b/types/src/block_complete_entry.rs @@ -136,7 +136,7 @@ impl TransactionBlobs { #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct PrunedTxBlobEntry { /// The transaction. - pub tx: Bytes, + pub blob: Bytes, /// The prunable transaction hash. pub prunable_hash: ByteArray<32>, } @@ -144,7 +144,7 @@ pub struct PrunedTxBlobEntry { #[cfg(feature = "epee")] epee_object!( PrunedTxBlobEntry, - tx: Bytes, + blob: Bytes, prunable_hash: ByteArray<32>, ); From 0f1ad6db1b1a3c5b3c66086ca1592a28d2012f3e Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 13 Nov 2024 06:01:15 -0500 Subject: [PATCH 07/14] Cargo.toml: move commas (#340) cargo.toml: move comma --- Cargo.toml | 54 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ccc5513..0f460e8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,33 +52,33 @@ opt-level = 3 [workspace.dependencies] # Cuprate members -cuprate-fast-sync = { path = "consensus/fast-sync" ,default-features = false} -cuprate-consensus-rules = { path = "consensus/rules" ,default-features = false} -cuprate-constants = { path = "constants" ,default-features = false} -cuprate-consensus = { path = "consensus" ,default-features = false} -cuprate-consensus-context = { path = "consensus/context" ,default-features = false} -cuprate-cryptonight = { path = "cryptonight" ,default-features = false} -cuprate-helper = { path = "helper" ,default-features = false} -cuprate-epee-encoding = { path = "net/epee-encoding" ,default-features = false} -cuprate-fixed-bytes = { path = "net/fixed-bytes" ,default-features = false} -cuprate-levin = { path = "net/levin" ,default-features = false} -cuprate-wire = { path = "net/wire" ,default-features = false} -cuprate-p2p = { path = "p2p/p2p" ,default-features = false} -cuprate-p2p-core = { path = "p2p/p2p-core" ,default-features = false} -cuprate-p2p-bucket = { path = "p2p/p2p-bucket" ,default-features = false} -cuprate-dandelion-tower = { path = "p2p/dandelion-tower" ,default-features = false} -cuprate-async-buffer = { path = "p2p/async-buffer" ,default-features = false} -cuprate-address-book = { path = "p2p/address-book" ,default-features = false} -cuprate-blockchain = { path = "storage/blockchain" ,default-features = false} -cuprate-database = { path = "storage/database" ,default-features = false} -cuprate-database-service = { path = "storage/service" ,default-features = false} -cuprate-txpool = { path = "storage/txpool" ,default-features = false} -cuprate-pruning = { path = "pruning" ,default-features = false} -cuprate-test-utils = { path = "test-utils" ,default-features = false} -cuprate-types = { path = "types" ,default-features = false} -cuprate-json-rpc = { path = "rpc/json-rpc" ,default-features = false} -cuprate-rpc-types = { path = "rpc/types" ,default-features = false} -cuprate-rpc-interface = { path = "rpc/interface" ,default-features = false} +cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false } +cuprate-consensus-rules = { path = "consensus/rules", default-features = false } +cuprate-constants = { path = "constants", default-features = false } +cuprate-consensus = { path = "consensus", default-features = false } +cuprate-consensus-context = { path = "consensus/context", default-features = false } +cuprate-cryptonight = { path = "cryptonight", default-features = false } +cuprate-helper = { path = "helper", default-features = false } +cuprate-epee-encoding = { path = "net/epee-encoding", default-features = false } +cuprate-fixed-bytes = { path = "net/fixed-bytes", default-features = false } +cuprate-levin = { path = "net/levin", default-features = false } +cuprate-wire = { path = "net/wire", default-features = false } +cuprate-p2p = { path = "p2p/p2p", default-features = false } +cuprate-p2p-core = { path = "p2p/p2p-core", default-features = false } +cuprate-p2p-bucket = { path = "p2p/p2p-bucket", default-features = false } +cuprate-dandelion-tower = { path = "p2p/dandelion-tower", default-features = false } +cuprate-async-buffer = { path = "p2p/async-buffer", default-features = false } +cuprate-address-book = { path = "p2p/address-book", default-features = false } +cuprate-blockchain = { path = "storage/blockchain", default-features = false } +cuprate-database = { path = "storage/database", default-features = false } +cuprate-database-service = { path = "storage/service", default-features = false } +cuprate-txpool = { path = "storage/txpool", default-features = false } +cuprate-pruning = { path = "pruning", default-features = false } +cuprate-test-utils = { path = "test-utils", default-features = false } +cuprate-types = { path = "types", default-features = false } +cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false } +cuprate-rpc-types = { path = "rpc/types", default-features = false } +cuprate-rpc-interface = { path = "rpc/interface", default-features = false } # External dependencies anyhow = { version = "1", default-features = false } From 241088e2736e8b5f29d2703a6f1e846f6f437867 Mon Sep 17 00:00:00 2001 From: Boog900 Date: Sun, 17 Nov 2024 20:32:41 +0000 Subject: [PATCH 08/14] Wire: fix IPv4 Endianness (#342) * fix IPv4 Endianness * fix import order --- net/wire/src/network_address.rs | 6 ++++-- net/wire/src/network_address/epee_builder.rs | 12 ++++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/net/wire/src/network_address.rs b/net/wire/src/network_address.rs index ad599b7..3e15c46 100644 --- a/net/wire/src/network_address.rs +++ b/net/wire/src/network_address.rs @@ -17,10 +17,12 @@ //! Monero network. Core Monero has 4 main addresses: IPv4, IPv6, Tor, //! I2p. Currently this module only has IPv(4/6). //! -use bytes::BufMut; -use cuprate_epee_encoding::EpeeObject; use std::{hash::Hash, net, net::SocketAddr}; +use bytes::BufMut; + +use cuprate_epee_encoding::EpeeObject; + mod epee_builder; use epee_builder::*; diff --git a/net/wire/src/network_address/epee_builder.rs b/net/wire/src/network_address/epee_builder.rs index c1d1742..bd481a5 100644 --- a/net/wire/src/network_address/epee_builder.rs +++ b/net/wire/src/network_address/epee_builder.rs @@ -1,9 +1,10 @@ -use bytes::Buf; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; -use cuprate_epee_encoding::{epee_object, EpeeObjectBuilder}; +use bytes::Buf; use thiserror::Error; +use cuprate_epee_encoding::{epee_object, EpeeObjectBuilder}; + use crate::NetworkAddress; #[derive(Default)] @@ -77,7 +78,7 @@ impl From for TaggedNetworkAddress { SocketAddr::V4(addr) => Self { ty: Some(1), addr: Some(AllFieldsNetworkAddress { - m_ip: Some(u32::from_be_bytes(addr.ip().octets())), + m_ip: Some(u32::from_le_bytes(addr.ip().octets())), m_port: Some(addr.port()), addr: None, }), @@ -112,7 +113,10 @@ epee_object!( impl AllFieldsNetworkAddress { fn try_into_network_address(self, ty: u8) -> Option { Some(match ty { - 1 => NetworkAddress::from(SocketAddrV4::new(Ipv4Addr::from(self.m_ip?), self.m_port?)), + 1 => NetworkAddress::from(SocketAddrV4::new( + Ipv4Addr::from(self.m_ip?.to_le_bytes()), + self.m_port?, + )), 2 => NetworkAddress::from(SocketAddrV6::new( Ipv6Addr::from(self.addr?), self.m_port?, From e8598a082d5df0660c5f52a2375c68777408b9f4 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Mon, 18 Nov 2024 10:21:52 -0500 Subject: [PATCH 09/14] books/architecture: add `Monero oddities` (#343) * add `oddities/` * swap `Expected`, `Why` --- books/architecture/src/SUMMARY.md | 5 +++ books/architecture/src/oddities/intro.md | 37 ++++++++++++++++++++++ books/architecture/src/oddities/le-ipv4.md | 24 ++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 books/architecture/src/oddities/intro.md create mode 100644 books/architecture/src/oddities/le-ipv4.md diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index bf66860..0961d8f 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -157,6 +157,11 @@ --- +- [🟢 Monero oddities](oddities/intro.md) + - [🟡 Little-endian IPv4 addresses](oddities/le-ipv4.md) + +--- + - [⚪️ Appendix](appendix/intro.md) - [🟢 Crates](appendix/crates.md) - [🔴 Contributing](appendix/contributing.md) diff --git a/books/architecture/src/oddities/intro.md b/books/architecture/src/oddities/intro.md new file mode 100644 index 0000000..c0275b3 --- /dev/null +++ b/books/architecture/src/oddities/intro.md @@ -0,0 +1,37 @@ +# Monero oddities +This section is a list of any peculiar, interesting, +or non-standard behavior that Monero has that is not +planned on being changed or deprecated. + +This section exists to hold all the small yet noteworthy knowledge in one place, +instead of in any single contributor's mind. + +These are usually behaviors stemming from implementation rather than protocol/cryptography. + +## Formatting +This is the markdown formatting for each entry in this section. + +If applicable, consider using this formatting when adding to this section. + +```md +# + +## What +A detailed description of the behavior. + +## Expected +The norm or standard behavior that is usually expected. + +## Why +The reasoning behind why this behavior exists and/or +any links to more detailed discussion on the behavior. + +## Affects +A (potentially non-exhaustive) list of places that this behavior can/does affect. + +## Example +An example link or section of code where the behavior occurs. + +## Source +A link to original `monerod` code that defines the behavior. +``` \ No newline at end of file diff --git a/books/architecture/src/oddities/le-ipv4.md b/books/architecture/src/oddities/le-ipv4.md new file mode 100644 index 0000000..f64c1d7 --- /dev/null +++ b/books/architecture/src/oddities/le-ipv4.md @@ -0,0 +1,24 @@ +# Little-endian IPv4 addresses + +## What +Monero encodes IPv4 addresses in [little-endian](https://en.wikipedia.org/wiki/Endianness) byte order. + +## Expected +In general, [networking-related protocols/code use _networking order_ (big-endian)](https://en.wikipedia.org/wiki/Endianness#Networking). + +## Why +TODO + +- +- + +## Affects +Any representation and (de)serialization of IPv4 addresses must keep little +endian in-mind, e.g. the P2P wire format or `int` encoded IPv4 addresses in RPC. + +For example, [the `ip` field in `set_bans`](https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#set_bans). + +For Cuprate, this means Rust's [`Ipv4Addr::from_bits/from`](https://doc.rust-lang.org/1.82.0/src/core/net/ip_addr.rs.html#1182) cannot be used in these cases as [it assumes big-endian encoding](https://doc.rust-lang.org/1.82.0/src/core/net/ip_addr.rs.html#540). + +## Source +- From c54bb0c8b20b209c8e841ab1187c0bf772ecc33c Mon Sep 17 00:00:00 2001 From: Boog900 Date: Wed, 20 Nov 2024 01:37:52 +0000 Subject: [PATCH 10/14] P2P: Change `ClientPool` to `PeerSet` (#337) * add WeakClient * todo * client pool -> peer set * more peer set changes * fix cuprated builds * add docs * more docs + better disconnect handling * more docs * fix imports * review fixes --- Cargo.lock | 1 - binaries/cuprated/src/blockchain/syncer.rs | 27 ++- binaries/cuprated/src/txpool/dandelion.rs | 2 +- .../src/txpool/dandelion/stem_service.rs | 71 ++++-- p2p/p2p-core/src/client.rs | 13 ++ p2p/p2p-core/src/client/weak.rs | 114 +++++++++ p2p/p2p/Cargo.toml | 4 +- p2p/p2p/src/block_downloader.rs | 68 +++--- .../src/block_downloader/download_batch.rs | 8 +- p2p/p2p/src/block_downloader/request_chain.rs | 24 +- p2p/p2p/src/block_downloader/tests.rs | 15 +- p2p/p2p/src/client_pool.rs | 188 --------------- p2p/p2p/src/client_pool/disconnect_monitor.rs | 83 ------- p2p/p2p/src/client_pool/drop_guard_client.rs | 41 ---- p2p/p2p/src/connection_maintainer.rs | 11 +- p2p/p2p/src/inbound_server.rs | 9 +- p2p/p2p/src/lib.rs | 38 +-- p2p/p2p/src/peer_set.rs | 217 ++++++++++++++++++ p2p/p2p/src/peer_set/client_wrappers.rs | 86 +++++++ 19 files changed, 602 insertions(+), 418 deletions(-) create mode 100644 p2p/p2p-core/src/client/weak.rs delete mode 100644 p2p/p2p/src/client_pool.rs delete mode 100644 p2p/p2p/src/client_pool/disconnect_monitor.rs delete mode 100644 p2p/p2p/src/client_pool/drop_guard_client.rs create mode 100644 p2p/p2p/src/peer_set.rs create mode 100644 p2p/p2p/src/peer_set/client_wrappers.rs diff --git a/Cargo.lock b/Cargo.lock index b446bf6..a947a15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -837,7 +837,6 @@ dependencies = [ "cuprate-test-utils", "cuprate-types", "cuprate-wire", - "dashmap", "futures", "indexmap", "monero-serai", diff --git a/binaries/cuprated/src/blockchain/syncer.rs b/binaries/cuprated/src/blockchain/syncer.rs index 913c983..69ad330 100644 --- a/binaries/cuprated/src/blockchain/syncer.rs +++ b/binaries/cuprated/src/blockchain/syncer.rs @@ -12,7 +12,7 @@ use tracing::instrument; use cuprate_consensus::{BlockChainContext, BlockChainContextRequest, BlockChainContextResponse}; use cuprate_p2p::{ block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}, - NetworkInterface, + NetworkInterface, PeerSetRequest, PeerSetResponse, }; use cuprate_p2p_core::ClearNet; @@ -28,15 +28,11 @@ pub enum SyncerError { } /// The syncer tasks that makes sure we are fully synchronised with our connected peers. -#[expect( - clippy::significant_drop_tightening, - reason = "Client pool which will be removed" -)] #[instrument(level = "debug", skip_all)] pub async fn syncer( mut context_svc: C, our_chain: CN, - clearnet_interface: NetworkInterface, + mut clearnet_interface: NetworkInterface, incoming_block_batch_tx: mpsc::Sender, stop_current_block_downloader: Arc, block_downloader_config: BlockDownloaderConfig, @@ -67,8 +63,6 @@ where unreachable!(); }; - let client_pool = clearnet_interface.client_pool(); - tracing::debug!("Waiting for new sync info in top sync channel"); loop { @@ -79,9 +73,20 @@ where check_update_blockchain_context(&mut context_svc, &mut blockchain_ctx).await?; let raw_blockchain_context = blockchain_ctx.unchecked_blockchain_context(); - if !client_pool.contains_client_with_more_cumulative_difficulty( - raw_blockchain_context.cumulative_difficulty, - ) { + let PeerSetResponse::MostPoWSeen { + cumulative_difficulty, + .. + } = clearnet_interface + .peer_set() + .ready() + .await? + .call(PeerSetRequest::MostPoWSeen) + .await? + else { + unreachable!(); + }; + + if cumulative_difficulty <= raw_blockchain_context.cumulative_difficulty { continue; } diff --git a/binaries/cuprated/src/txpool/dandelion.rs b/binaries/cuprated/src/txpool/dandelion.rs index d791b62..00d9f5a 100644 --- a/binaries/cuprated/src/txpool/dandelion.rs +++ b/binaries/cuprated/src/txpool/dandelion.rs @@ -59,7 +59,7 @@ pub fn dandelion_router(clear_net: NetworkInterface) -> ConcreteDandel diffuse_service::DiffuseService { clear_net_broadcast_service: clear_net.broadcast_svc(), }, - stem_service::OutboundPeerStream { clear_net }, + stem_service::OutboundPeerStream::new(clear_net), DANDELION_CONFIG, ) } diff --git a/binaries/cuprated/src/txpool/dandelion/stem_service.rs b/binaries/cuprated/src/txpool/dandelion/stem_service.rs index 5c0ba65..2debfd4 100644 --- a/binaries/cuprated/src/txpool/dandelion/stem_service.rs +++ b/binaries/cuprated/src/txpool/dandelion/stem_service.rs @@ -1,14 +1,15 @@ use std::{ + future::Future, pin::Pin, - task::{Context, Poll}, + task::{ready, Context, Poll}, }; use bytes::Bytes; -use futures::Stream; +use futures::{future::BoxFuture, FutureExt, Stream}; use tower::Service; use cuprate_dandelion_tower::{traits::StemRequest, OutboundPeer}; -use cuprate_p2p::{ClientPoolDropGuard, NetworkInterface}; +use cuprate_p2p::{ClientDropGuard, NetworkInterface, PeerSetRequest, PeerSetResponse}; use cuprate_p2p_core::{ client::{Client, InternalPeerID}, ClearNet, NetworkZone, PeerRequest, ProtocolRequest, @@ -19,7 +20,17 @@ use crate::{p2p::CrossNetworkInternalPeerId, txpool::dandelion::DandelionTx}; /// The dandelion outbound peer stream. pub struct OutboundPeerStream { - pub clear_net: NetworkInterface, + clear_net: NetworkInterface, + state: OutboundPeerStreamState, +} + +impl OutboundPeerStream { + pub const fn new(clear_net: NetworkInterface) -> Self { + Self { + clear_net, + state: OutboundPeerStreamState::Standby, + } + } } impl Stream for OutboundPeerStream { @@ -28,23 +39,49 @@ impl Stream for OutboundPeerStream { tower::BoxError, >; - fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // TODO: make the outbound peer choice random. - Poll::Ready(Some(Ok(self - .clear_net - .client_pool() - .outbound_client() - .map_or(OutboundPeer::Exhausted, |client| { - OutboundPeer::Peer( - CrossNetworkInternalPeerId::ClearNet(client.info.id), - StemPeerService(client), - ) - })))) + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match &mut self.state { + OutboundPeerStreamState::Standby => { + let peer_set = self.clear_net.peer_set(); + let res = ready!(peer_set.poll_ready(cx)); + + self.state = OutboundPeerStreamState::AwaitingPeer( + peer_set.call(PeerSetRequest::StemPeer).boxed(), + ); + } + OutboundPeerStreamState::AwaitingPeer(fut) => { + let res = ready!(fut.poll_unpin(cx)); + + return Poll::Ready(Some(res.map(|res| { + let PeerSetResponse::StemPeer(stem_peer) = res else { + unreachable!() + }; + + match stem_peer { + Some(peer) => OutboundPeer::Peer( + CrossNetworkInternalPeerId::ClearNet(peer.info.id), + StemPeerService(peer), + ), + None => OutboundPeer::Exhausted, + } + }))); + } + } + } } } +/// The state of the [`OutboundPeerStream`]. +enum OutboundPeerStreamState { + /// Standby state. + Standby, + /// Awaiting a response from the peer-set. + AwaitingPeer(BoxFuture<'static, Result, tower::BoxError>>), +} + /// The stem service, used to send stem txs. -pub struct StemPeerService(ClientPoolDropGuard); +pub struct StemPeerService(ClientDropGuard); impl Service> for StemPeerService { type Response = as Service>::Response; diff --git a/p2p/p2p-core/src/client.rs b/p2p/p2p-core/src/client.rs index 73b33ba..f2fde67 100644 --- a/p2p/p2p-core/src/client.rs +++ b/p2p/p2p-core/src/client.rs @@ -27,9 +27,11 @@ mod connector; pub mod handshaker; mod request_handler; mod timeout_monitor; +mod weak; pub use connector::{ConnectRequest, Connector}; pub use handshaker::{DoHandshakeRequest, HandshakeError, HandshakerBuilder}; +pub use weak::WeakClient; /// An internal identifier for a given peer, will be their address if known /// or a random u128 if not. @@ -128,6 +130,17 @@ impl Client { } .into() } + + /// Create a [`WeakClient`] for this [`Client`]. + pub fn downgrade(&self) -> WeakClient { + WeakClient { + info: self.info.clone(), + connection_tx: self.connection_tx.downgrade(), + semaphore: self.semaphore.clone(), + permit: None, + error: self.error.clone(), + } + } } impl Service for Client { diff --git a/p2p/p2p-core/src/client/weak.rs b/p2p/p2p-core/src/client/weak.rs new file mode 100644 index 0000000..90f25dd --- /dev/null +++ b/p2p/p2p-core/src/client/weak.rs @@ -0,0 +1,114 @@ +use std::task::{ready, Context, Poll}; + +use futures::channel::oneshot; +use tokio::sync::{mpsc, OwnedSemaphorePermit}; +use tokio_util::sync::PollSemaphore; +use tower::Service; + +use cuprate_helper::asynch::InfallibleOneshotReceiver; + +use crate::{ + client::{connection, PeerInformation}, + NetworkZone, PeerError, PeerRequest, PeerResponse, SharedError, +}; + +/// A weak handle to a [`Client`](super::Client). +/// +/// When this is dropped the peer will not be disconnected. +pub struct WeakClient { + /// Information on the connected peer. + pub info: PeerInformation, + + /// The channel to the [`Connection`](connection::Connection) task. + pub(super) connection_tx: mpsc::WeakSender, + + /// The semaphore that limits the requests sent to the peer. + pub(super) semaphore: PollSemaphore, + /// A permit for the semaphore, will be [`Some`] after `poll_ready` returns ready. + pub(super) permit: Option, + + /// The error slot shared between the [`Client`] and [`Connection`](connection::Connection). + pub(super) error: SharedError, +} + +impl WeakClient { + /// Internal function to set an error on the [`SharedError`]. + fn set_err(&self, err: PeerError) -> tower::BoxError { + let err_str = err.to_string(); + match self.error.try_insert_err(err) { + Ok(()) => err_str, + Err(e) => e.to_string(), + } + .into() + } +} + +impl Service for WeakClient { + type Response = PeerResponse; + type Error = tower::BoxError; + type Future = InfallibleOneshotReceiver>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + if let Some(err) = self.error.try_get_err() { + return Poll::Ready(Err(err.to_string().into())); + } + + if self.connection_tx.strong_count() == 0 { + let err = self.set_err(PeerError::ClientChannelClosed); + return Poll::Ready(Err(err)); + } + + if self.permit.is_some() { + return Poll::Ready(Ok(())); + } + + let permit = ready!(self.semaphore.poll_acquire(cx)) + .expect("Client semaphore should not be closed!"); + + self.permit = Some(permit); + + Poll::Ready(Ok(())) + } + + #[expect(clippy::significant_drop_tightening)] + fn call(&mut self, request: PeerRequest) -> Self::Future { + let permit = self + .permit + .take() + .expect("poll_ready did not return ready before call to call"); + + let (tx, rx) = oneshot::channel(); + let req = connection::ConnectionTaskRequest { + response_channel: tx, + request, + permit: Some(permit), + }; + + match self.connection_tx.upgrade() { + None => { + self.set_err(PeerError::ClientChannelClosed); + + let resp = Err(PeerError::ClientChannelClosed.into()); + drop(req.response_channel.send(resp)); + } + Some(sender) => { + if let Err(e) = sender.try_send(req) { + // The connection task could have closed between a call to `poll_ready` and the call to + // `call`, which means if we don't handle the error here the receiver would panic. + use mpsc::error::TrySendError; + + match e { + TrySendError::Closed(req) | TrySendError::Full(req) => { + self.set_err(PeerError::ClientChannelClosed); + + let resp = Err(PeerError::ClientChannelClosed.into()); + drop(req.response_channel.send(resp)); + } + } + } + } + } + + rx.into() + } +} diff --git a/p2p/p2p/Cargo.toml b/p2p/p2p/Cargo.toml index 866fb91..e6ebccb 100644 --- a/p2p/p2p/Cargo.toml +++ b/p2p/p2p/Cargo.toml @@ -20,12 +20,12 @@ monero-serai = { workspace = true, features = ["std"] } tower = { workspace = true, features = ["buffer"] } tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } -rayon = { workspace = true } tokio-util = { workspace = true } +rayon = { workspace = true } tokio-stream = { workspace = true, features = ["sync", "time"] } futures = { workspace = true, features = ["std"] } pin-project = { workspace = true } -dashmap = { workspace = true } +indexmap = { workspace = true, features = ["std"] } thiserror = { workspace = true } bytes = { workspace = true, features = ["std"] } diff --git a/p2p/p2p/src/block_downloader.rs b/p2p/p2p/src/block_downloader.rs index fcc9eb6..faac4d5 100644 --- a/p2p/p2p/src/block_downloader.rs +++ b/p2p/p2p/src/block_downloader.rs @@ -8,7 +8,6 @@ use std::{ cmp::{max, min, Reverse}, collections::{BTreeMap, BinaryHeap}, - sync::Arc, time::Duration, }; @@ -18,7 +17,7 @@ use tokio::{ task::JoinSet, time::{interval, timeout, MissedTickBehavior}, }; -use tower::{Service, ServiceExt}; +use tower::{util::BoxCloneService, Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; use cuprate_async_buffer::{BufferAppender, BufferStream}; @@ -27,11 +26,11 @@ use cuprate_p2p_core::{handles::ConnectionHandle, NetworkZone}; use cuprate_pruning::PruningSeed; use crate::{ - client_pool::{ClientPool, ClientPoolDropGuard}, constants::{ BLOCK_DOWNLOADER_REQUEST_TIMEOUT, EMPTY_CHAIN_ENTRIES_BEFORE_TOP_ASSUMED, LONG_BAN, MAX_BLOCK_BATCH_LEN, MAX_DOWNLOAD_FAILURES, }, + peer_set::ClientDropGuard, }; mod block_queue; @@ -41,6 +40,7 @@ mod request_chain; #[cfg(test)] mod tests; +use crate::peer_set::{PeerSetRequest, PeerSetResponse}; use block_queue::{BlockQueue, ReadyQueueBatch}; use chain_tracker::{BlocksToRetrieve, ChainEntry, ChainTracker}; use download_batch::download_batch_task; @@ -135,7 +135,7 @@ pub enum ChainSvcResponse { /// call this function again, so it can start the search again. #[instrument(level = "error", skip_all, name = "block_downloader")] pub fn download_blocks( - client_pool: Arc>, + peer_set: BoxCloneService, tower::BoxError>, our_chain_svc: C, config: BlockDownloaderConfig, ) -> BufferStream @@ -147,8 +147,7 @@ where { let (buffer_appender, buffer_stream) = cuprate_async_buffer::new_buffer(config.buffer_size); - let block_downloader = - BlockDownloader::new(client_pool, our_chain_svc, buffer_appender, config); + let block_downloader = BlockDownloader::new(peer_set, our_chain_svc, buffer_appender, config); tokio::spawn( block_downloader @@ -186,8 +185,8 @@ where /// - download an already requested batch of blocks (this might happen due to an error in the previous request /// or because the queue of ready blocks is too large, so we need the oldest block to clear it). struct BlockDownloader { - /// The client pool. - client_pool: Arc>, + /// The peer set. + peer_set: BoxCloneService, tower::BoxError>, /// The service that holds our current chain state. our_chain_svc: C, @@ -208,7 +207,7 @@ struct BlockDownloader { /// /// Returns a result of the chain entry or an error. #[expect(clippy::type_complexity)] - chain_entry_task: JoinSet, ChainEntry), BlockDownloadError>>, + chain_entry_task: JoinSet, ChainEntry), BlockDownloadError>>, /// The current inflight requests. /// @@ -235,13 +234,13 @@ where { /// Creates a new [`BlockDownloader`] fn new( - client_pool: Arc>, + peer_set: BoxCloneService, tower::BoxError>, our_chain_svc: C, buffer_appender: BufferAppender, config: BlockDownloaderConfig, ) -> Self { Self { - client_pool, + peer_set, our_chain_svc, amount_of_blocks_to_request: config.initial_batch_size, amount_of_blocks_to_request_updated_at: 0, @@ -259,7 +258,7 @@ where fn check_pending_peers( &mut self, chain_tracker: &mut ChainTracker, - pending_peers: &mut BTreeMap>>, + pending_peers: &mut BTreeMap>>, ) { tracing::debug!("Checking if we can give any work to pending peers."); @@ -286,11 +285,11 @@ where /// This function will find the batch(es) that we are waiting on to clear our ready queue and sends another request /// for them. /// - /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the batch according to its pruning seed. + /// Returns the [`ClientDropGuard`] back if it doesn't have the batch according to its pruning seed. fn request_inflight_batch_again( &mut self, - client: ClientPoolDropGuard, - ) -> Option> { + client: ClientDropGuard, + ) -> Option> { tracing::debug!( "Requesting an inflight batch, current ready queue size: {}", self.block_queue.size() @@ -336,13 +335,13 @@ where /// /// The batch requested will depend on our current state, failed batches will be prioritised. /// - /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according + /// Returns the [`ClientDropGuard`] back if it doesn't have the data we currently need according /// to its pruning seed. fn request_block_batch( &mut self, chain_tracker: &mut ChainTracker, - client: ClientPoolDropGuard, - ) -> Option> { + client: ClientDropGuard, + ) -> Option> { tracing::trace!("Using peer to request a batch of blocks."); // First look to see if we have any failed requests. while let Some(failed_request) = self.failed_batches.peek() { @@ -416,13 +415,13 @@ where /// This function will use our current state to decide if we should send a request for a chain entry /// or if we should request a batch of blocks. /// - /// Returns the [`ClientPoolDropGuard`] back if it doesn't have the data we currently need according + /// Returns the [`ClientDropGuard`] back if it doesn't have the data we currently need according /// to its pruning seed. fn try_handle_free_client( &mut self, chain_tracker: &mut ChainTracker, - client: ClientPoolDropGuard, - ) -> Option> { + client: ClientDropGuard, + ) -> Option> { // We send 2 requests, so if one of them is slow or doesn't have the next chain, we still have a backup. if self.chain_entry_task.len() < 2 // If we have had too many failures then assume the tip has been found so no more chain entries. @@ -463,7 +462,7 @@ where async fn check_for_free_clients( &mut self, chain_tracker: &mut ChainTracker, - pending_peers: &mut BTreeMap>>, + pending_peers: &mut BTreeMap>>, ) -> Result<(), BlockDownloadError> { tracing::debug!("Checking for free peers"); @@ -478,10 +477,19 @@ where panic!("Chain service returned wrong response."); }; - for client in self - .client_pool - .clients_with_more_cumulative_difficulty(current_cumulative_difficulty) - { + let PeerSetResponse::PeersWithMorePoW(clients) = self + .peer_set + .ready() + .await? + .call(PeerSetRequest::PeersWithMorePoW( + current_cumulative_difficulty, + )) + .await? + else { + unreachable!(); + }; + + for client in clients { pending_peers .entry(client.info.pruning_seed) .or_default() @@ -497,9 +505,9 @@ where async fn handle_download_batch_res( &mut self, start_height: usize, - res: Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError>, + res: Result<(ClientDropGuard, BlockBatch), BlockDownloadError>, chain_tracker: &mut ChainTracker, - pending_peers: &mut BTreeMap>>, + pending_peers: &mut BTreeMap>>, ) -> Result<(), BlockDownloadError> { tracing::debug!("Handling block download response"); @@ -593,7 +601,7 @@ where /// Starts the main loop of the block downloader. async fn run(mut self) -> Result<(), BlockDownloadError> { let mut chain_tracker = - initial_chain_search(&self.client_pool, &mut self.our_chain_svc).await?; + initial_chain_search(&mut self.peer_set, &mut self.our_chain_svc).await?; let mut pending_peers = BTreeMap::new(); @@ -662,7 +670,7 @@ struct BlockDownloadTaskResponse { /// The start height of the batch. start_height: usize, /// A result containing the batch or an error. - result: Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError>, + result: Result<(ClientDropGuard, BlockBatch), BlockDownloadError>, } /// Returns if a peer has all the blocks in a range, according to its [`PruningSeed`]. diff --git a/p2p/p2p/src/block_downloader/download_batch.rs b/p2p/p2p/src/block_downloader/download_batch.rs index bbb14b3..ef621ce 100644 --- a/p2p/p2p/src/block_downloader/download_batch.rs +++ b/p2p/p2p/src/block_downloader/download_batch.rs @@ -16,8 +16,8 @@ use cuprate_wire::protocol::{GetObjectsRequest, GetObjectsResponse}; use crate::{ block_downloader::{BlockBatch, BlockDownloadError, BlockDownloadTaskResponse}, - client_pool::ClientPoolDropGuard, constants::{BLOCK_DOWNLOADER_REQUEST_TIMEOUT, MAX_TRANSACTION_BLOB_SIZE, MEDIUM_BAN}, + peer_set::ClientDropGuard, }; /// Attempts to request a batch of blocks from a peer, returning [`BlockDownloadTaskResponse`]. @@ -32,7 +32,7 @@ use crate::{ )] #[expect(clippy::used_underscore_binding)] pub async fn download_batch_task( - client: ClientPoolDropGuard, + client: ClientDropGuard, ids: ByteArrayVec<32>, previous_id: [u8; 32], expected_start_height: usize, @@ -49,11 +49,11 @@ pub async fn download_batch_task( /// This function will validate the blocks that were downloaded were the ones asked for and that they match /// the expected height. async fn request_batch_from_peer( - mut client: ClientPoolDropGuard, + mut client: ClientDropGuard, ids: ByteArrayVec<32>, previous_id: [u8; 32], expected_start_height: usize, -) -> Result<(ClientPoolDropGuard, BlockBatch), BlockDownloadError> { +) -> Result<(ClientDropGuard, BlockBatch), BlockDownloadError> { let request = PeerRequest::Protocol(ProtocolRequest::GetObjects(GetObjectsRequest { blocks: ids.clone(), pruned: false, diff --git a/p2p/p2p/src/block_downloader/request_chain.rs b/p2p/p2p/src/block_downloader/request_chain.rs index d6a2a0a..4e0f855 100644 --- a/p2p/p2p/src/block_downloader/request_chain.rs +++ b/p2p/p2p/src/block_downloader/request_chain.rs @@ -1,7 +1,7 @@ -use std::{mem, sync::Arc}; +use std::mem; use tokio::{task::JoinSet, time::timeout}; -use tower::{Service, ServiceExt}; +use tower::{util::BoxCloneService, Service, ServiceExt}; use tracing::{instrument, Instrument, Span}; use cuprate_p2p_core::{ @@ -15,11 +15,11 @@ use crate::{ chain_tracker::{ChainEntry, ChainTracker}, BlockDownloadError, ChainSvcRequest, ChainSvcResponse, }, - client_pool::{ClientPool, ClientPoolDropGuard}, constants::{ BLOCK_DOWNLOADER_REQUEST_TIMEOUT, INITIAL_CHAIN_REQUESTS_TO_SEND, MAX_BLOCKS_IDS_IN_CHAIN_ENTRY, MEDIUM_BAN, }, + peer_set::{ClientDropGuard, PeerSetRequest, PeerSetResponse}, }; /// Request a chain entry from a peer. @@ -27,9 +27,9 @@ use crate::{ /// Because the block downloader only follows and downloads one chain we only have to send the block hash of /// top block we have found and the genesis block, this is then called `short_history`. pub(crate) async fn request_chain_entry_from_peer( - mut client: ClientPoolDropGuard, + mut client: ClientDropGuard, short_history: [[u8; 32]; 2], -) -> Result<(ClientPoolDropGuard, ChainEntry), BlockDownloadError> { +) -> Result<(ClientDropGuard, ChainEntry), BlockDownloadError> { let PeerResponse::Protocol(ProtocolResponse::GetChain(chain_res)) = client .ready() .await? @@ -80,7 +80,7 @@ pub(crate) async fn request_chain_entry_from_peer( /// We then wait for their response and choose the peer who claims the highest cumulative difficulty. #[instrument(level = "error", skip_all)] pub async fn initial_chain_search( - client_pool: &Arc>, + peer_set: &mut BoxCloneService, tower::BoxError>, mut our_chain_svc: C, ) -> Result, BlockDownloadError> where @@ -102,9 +102,15 @@ where let our_genesis = *block_ids.last().expect("Blockchain had no genesis block."); - let mut peers = client_pool - .clients_with_more_cumulative_difficulty(cumulative_difficulty) - .into_iter(); + let PeerSetResponse::PeersWithMorePoW(clients) = peer_set + .ready() + .await? + .call(PeerSetRequest::PeersWithMorePoW(cumulative_difficulty)) + .await? + else { + unreachable!(); + }; + let mut peers = clients.into_iter(); let mut futs = JoinSet::new(); diff --git a/p2p/p2p/src/block_downloader/tests.rs b/p2p/p2p/src/block_downloader/tests.rs index 83dd417..6799482 100644 --- a/p2p/p2p/src/block_downloader/tests.rs +++ b/p2p/p2p/src/block_downloader/tests.rs @@ -14,8 +14,8 @@ use monero_serai::{ transaction::{Input, Timelock, Transaction, TransactionPrefix}, }; use proptest::{collection::vec, prelude::*}; -use tokio::time::timeout; -use tower::{service_fn, Service}; +use tokio::{sync::mpsc, time::timeout}; +use tower::{buffer::Buffer, service_fn, Service, ServiceExt}; use cuprate_fixed_bytes::ByteArrayVec; use cuprate_p2p_core::{ @@ -31,7 +31,7 @@ use cuprate_wire::{ use crate::{ block_downloader::{download_blocks, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}, - client_pool::ClientPool, + peer_set::PeerSet, }; proptest! { @@ -48,19 +48,20 @@ proptest! { let tokio_pool = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); - #[expect(clippy::significant_drop_tightening)] tokio_pool.block_on(async move { timeout(Duration::from_secs(600), async move { - let client_pool = ClientPool::new(); + let (new_connection_tx, new_connection_rx) = mpsc::channel(peers); + + let peer_set = PeerSet::new(new_connection_rx); for _ in 0..peers { let client = mock_block_downloader_client(Arc::clone(&blockchain)); - client_pool.add_new_client(client); + new_connection_tx.try_send(client).unwrap(); } let stream = download_blocks( - client_pool, + Buffer::new(peer_set, 10).boxed_clone(), OurChainSvc { genesis: *blockchain.blocks.first().unwrap().0 }, diff --git a/p2p/p2p/src/client_pool.rs b/p2p/p2p/src/client_pool.rs deleted file mode 100644 index 67c8f11..0000000 --- a/p2p/p2p/src/client_pool.rs +++ /dev/null @@ -1,188 +0,0 @@ -//! # Client Pool. -//! -//! The [`ClientPool`], is a pool of currently connected peers that can be pulled from. -//! It does _not_ necessarily contain every connected peer as another place could have -//! taken a peer from the pool. -//! -//! When taking peers from the pool they are wrapped in [`ClientPoolDropGuard`], which -//! returns the peer to the pool when it is dropped. -//! -//! Internally the pool is a [`DashMap`] which means care should be taken in `async` code -//! as internally this uses blocking `RwLock`s. -use std::sync::Arc; - -use dashmap::DashMap; -use tokio::sync::mpsc; -use tracing::{Instrument, Span}; - -use cuprate_p2p_core::{ - client::{Client, InternalPeerID}, - handles::ConnectionHandle, - ConnectionDirection, NetworkZone, -}; - -pub(crate) mod disconnect_monitor; -mod drop_guard_client; - -pub use drop_guard_client::ClientPoolDropGuard; - -/// The client pool, which holds currently connected free peers. -/// -/// See the [module docs](self) for more. -pub struct ClientPool { - /// The connected [`Client`]s. - clients: DashMap, Client>, - /// A channel to send new peer ids down to monitor for disconnect. - new_connection_tx: mpsc::UnboundedSender<(ConnectionHandle, InternalPeerID)>, -} - -impl ClientPool { - /// Returns a new [`ClientPool`] wrapped in an [`Arc`]. - pub fn new() -> Arc { - let (tx, rx) = mpsc::unbounded_channel(); - - let pool = Arc::new(Self { - clients: DashMap::new(), - new_connection_tx: tx, - }); - - tokio::spawn( - disconnect_monitor::disconnect_monitor(rx, Arc::clone(&pool)) - .instrument(Span::current()), - ); - - pool - } - - /// Adds a [`Client`] to the pool, the client must have previously been taken from the - /// pool. - /// - /// See [`ClientPool::add_new_client`] to add a [`Client`] which was not taken from the pool before. - /// - /// # Panics - /// This function panics if `client` already exists in the pool. - fn add_client(&self, client: Client) { - let handle = client.info.handle.clone(); - let id = client.info.id; - - // Fast path: if the client is disconnected don't add it to the peer set. - if handle.is_closed() { - return; - } - - assert!(self.clients.insert(id, client).is_none()); - - // We have to check this again otherwise we could have a race condition where a - // peer is disconnected after the first check, the disconnect monitor tries to remove it, - // and then it is added to the pool. - if handle.is_closed() { - self.remove_client(&id); - } - } - - /// Adds a _new_ [`Client`] to the pool, this client should be a new connection, and not already - /// from the pool. - /// - /// # Panics - /// This function panics if `client` already exists in the pool. - pub fn add_new_client(&self, client: Client) { - self.new_connection_tx - .send((client.info.handle.clone(), client.info.id)) - .unwrap(); - - self.add_client(client); - } - - /// Remove a [`Client`] from the pool. - /// - /// [`None`] is returned if the client did not exist in the pool. - fn remove_client(&self, peer: &InternalPeerID) -> Option> { - self.clients.remove(peer).map(|(_, client)| client) - } - - /// Borrows a [`Client`] from the pool. - /// - /// The [`Client`] is wrapped in [`ClientPoolDropGuard`] which - /// will return the client to the pool when it's dropped. - /// - /// See [`Self::borrow_clients`] for borrowing multiple clients. - pub fn borrow_client( - self: &Arc, - peer: &InternalPeerID, - ) -> Option> { - self.remove_client(peer).map(|client| ClientPoolDropGuard { - pool: Arc::clone(self), - client: Some(client), - }) - } - - /// Borrows multiple [`Client`]s from the pool. - /// - /// Note that the returned iterator is not guaranteed to contain every peer asked for. - /// - /// See [`Self::borrow_client`] for borrowing a single client. - pub fn borrow_clients<'a, 'b>( - self: &'a Arc, - peers: &'b [InternalPeerID], - ) -> impl Iterator> + sealed::Captures<(&'a (), &'b ())> { - peers.iter().filter_map(|peer| self.borrow_client(peer)) - } - - /// Borrows all [`Client`]s from the pool that have claimed a higher cumulative difficulty than - /// the amount passed in. - /// - /// The [`Client`]s are wrapped in [`ClientPoolDropGuard`] which - /// will return the clients to the pool when they are dropped. - pub fn clients_with_more_cumulative_difficulty( - self: &Arc, - cumulative_difficulty: u128, - ) -> Vec> { - let peers = self - .clients - .iter() - .filter_map(|element| { - let peer_sync_info = element.value().info.core_sync_data.lock().unwrap(); - - if peer_sync_info.cumulative_difficulty() > cumulative_difficulty { - Some(*element.key()) - } else { - None - } - }) - .collect::>(); - - self.borrow_clients(&peers).collect() - } - - /// Checks all clients in the pool checking if any claim a higher cumulative difficulty than the - /// amount specified. - pub fn contains_client_with_more_cumulative_difficulty( - &self, - cumulative_difficulty: u128, - ) -> bool { - self.clients.iter().any(|element| { - let sync_data = element.value().info.core_sync_data.lock().unwrap(); - sync_data.cumulative_difficulty() > cumulative_difficulty - }) - } - - /// Returns the first outbound peer when iterating over the peers. - pub fn outbound_client(self: &Arc) -> Option> { - let client = self - .clients - .iter() - .find(|element| element.value().info.direction == ConnectionDirection::Outbound)?; - let id = *client.key(); - - Some(self.borrow_client(&id).unwrap()) - } -} - -mod sealed { - /// TODO: Remove me when 2024 Rust - /// - /// - pub trait Captures {} - - impl Captures for T {} -} diff --git a/p2p/p2p/src/client_pool/disconnect_monitor.rs b/p2p/p2p/src/client_pool/disconnect_monitor.rs deleted file mode 100644 index f54b560..0000000 --- a/p2p/p2p/src/client_pool/disconnect_monitor.rs +++ /dev/null @@ -1,83 +0,0 @@ -//! # Disconnect Monitor -//! -//! This module contains the [`disconnect_monitor`] task, which monitors connected peers for disconnection -//! and then removes them from the [`ClientPool`] if they do. -use std::{ - future::Future, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -use futures::{stream::FuturesUnordered, StreamExt}; -use tokio::sync::mpsc; -use tokio_util::sync::WaitForCancellationFutureOwned; -use tracing::instrument; - -use cuprate_p2p_core::{client::InternalPeerID, handles::ConnectionHandle, NetworkZone}; - -use super::ClientPool; - -/// The disconnect monitor task. -#[instrument(level = "info", skip_all)] -pub async fn disconnect_monitor( - mut new_connection_rx: mpsc::UnboundedReceiver<(ConnectionHandle, InternalPeerID)>, - client_pool: Arc>, -) { - // We need to hold a weak reference otherwise the client pool and this would hold a reference to - // each other causing the pool to be leaked. - let weak_client_pool = Arc::downgrade(&client_pool); - drop(client_pool); - - tracing::info!("Starting peer disconnect monitor."); - - let mut futs: FuturesUnordered> = FuturesUnordered::new(); - - loop { - tokio::select! { - Some((con_handle, peer_id)) = new_connection_rx.recv() => { - tracing::debug!("Monitoring {peer_id} for disconnect"); - futs.push(PeerDisconnectFut { - closed_fut: con_handle.closed(), - peer_id: Some(peer_id), - }); - } - Some(peer_id) = futs.next() => { - tracing::debug!("{peer_id} has disconnected, removing from client pool."); - let Some(pool) = weak_client_pool.upgrade() else { - tracing::info!("Peer disconnect monitor shutting down."); - return; - }; - - pool.remove_client(&peer_id); - drop(pool); - } - else => { - tracing::info!("Peer disconnect monitor shutting down."); - return; - } - } - } -} - -/// A [`Future`] that resolves when a peer disconnects. -#[pin_project::pin_project] -pub(crate) struct PeerDisconnectFut { - /// The inner [`Future`] that resolves when a peer disconnects. - #[pin] - pub(crate) closed_fut: WaitForCancellationFutureOwned, - /// The peers ID. - pub(crate) peer_id: Option>, -} - -impl Future for PeerDisconnectFut { - type Output = InternalPeerID; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); - - this.closed_fut - .poll(cx) - .map(|()| this.peer_id.take().unwrap()) - } -} diff --git a/p2p/p2p/src/client_pool/drop_guard_client.rs b/p2p/p2p/src/client_pool/drop_guard_client.rs deleted file mode 100644 index b10c4e9..0000000 --- a/p2p/p2p/src/client_pool/drop_guard_client.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::{ - ops::{Deref, DerefMut}, - sync::Arc, -}; - -use cuprate_p2p_core::{client::Client, NetworkZone}; - -use crate::client_pool::ClientPool; - -/// A wrapper around [`Client`] which returns the client to the [`ClientPool`] when dropped. -pub struct ClientPoolDropGuard { - /// The [`ClientPool`] to return the peer to. - pub(super) pool: Arc>, - /// The [`Client`]. - /// - /// This is set to [`Some`] when this guard is created, then - /// [`take`](Option::take)n and returned to the pool when dropped. - pub(super) client: Option>, -} - -impl Deref for ClientPoolDropGuard { - type Target = Client; - - fn deref(&self) -> &Self::Target { - self.client.as_ref().unwrap() - } -} - -impl DerefMut for ClientPoolDropGuard { - fn deref_mut(&mut self) -> &mut Self::Target { - self.client.as_mut().unwrap() - } -} - -impl Drop for ClientPoolDropGuard { - fn drop(&mut self) { - let client = self.client.take().unwrap(); - - self.pool.add_client(client); - } -} diff --git a/p2p/p2p/src/connection_maintainer.rs b/p2p/p2p/src/connection_maintainer.rs index cd9d931..245fbf1 100644 --- a/p2p/p2p/src/connection_maintainer.rs +++ b/p2p/p2p/src/connection_maintainer.rs @@ -21,7 +21,6 @@ use cuprate_p2p_core::{ }; use crate::{ - client_pool::ClientPool, config::P2PConfig, constants::{HANDSHAKE_TIMEOUT, MAX_SEED_CONNECTIONS, OUTBOUND_CONNECTION_ATTEMPT_TIMEOUT}, }; @@ -46,7 +45,7 @@ pub struct MakeConnectionRequest { /// This handles maintaining a minimum number of connections and making extra connections when needed, upto a maximum. pub struct OutboundConnectionKeeper { /// The pool of currently connected peers. - pub client_pool: Arc>, + pub new_peers_tx: mpsc::Sender>, /// The channel that tells us to make new _extra_ outbound connections. pub make_connection_rx: mpsc::Receiver, /// The address book service @@ -77,7 +76,7 @@ where { pub fn new( config: P2PConfig, - client_pool: Arc>, + new_peers_tx: mpsc::Sender>, make_connection_rx: mpsc::Receiver, address_book_svc: A, connector_svc: C, @@ -86,7 +85,7 @@ where .expect("Gray peer percent is incorrect should be 0..=1"); Self { - client_pool, + new_peers_tx, make_connection_rx, address_book_svc, connector_svc, @@ -149,7 +148,7 @@ where /// Connects to a given outbound peer. #[instrument(level = "info", skip_all)] async fn connect_to_outbound_peer(&mut self, permit: OwnedSemaphorePermit, addr: N::Addr) { - let client_pool = Arc::clone(&self.client_pool); + let new_peers_tx = self.new_peers_tx.clone(); let connection_fut = self .connector_svc .ready() @@ -164,7 +163,7 @@ where async move { #[expect(clippy::significant_drop_in_scrutinee)] if let Ok(Ok(peer)) = timeout(HANDSHAKE_TIMEOUT, connection_fut).await { - client_pool.add_new_client(peer); + drop(new_peers_tx.send(peer).await); } } .instrument(Span::current()), diff --git a/p2p/p2p/src/inbound_server.rs b/p2p/p2p/src/inbound_server.rs index 6e793bd..0479560 100644 --- a/p2p/p2p/src/inbound_server.rs +++ b/p2p/p2p/src/inbound_server.rs @@ -6,7 +6,7 @@ use std::{pin::pin, sync::Arc}; use futures::{SinkExt, StreamExt}; use tokio::{ - sync::Semaphore, + sync::{mpsc, Semaphore}, task::JoinSet, time::{sleep, timeout}, }; @@ -24,7 +24,6 @@ use cuprate_wire::{ }; use crate::{ - client_pool::ClientPool, constants::{ HANDSHAKE_TIMEOUT, INBOUND_CONNECTION_COOL_DOWN, PING_REQUEST_CONCURRENCY, PING_REQUEST_TIMEOUT, @@ -36,7 +35,7 @@ use crate::{ /// and initiate handshake if needed, after verifying the address isn't banned. #[instrument(level = "warn", skip_all)] pub async fn inbound_server( - client_pool: Arc>, + new_connection_tx: mpsc::Sender>, mut handshaker: HS, mut address_book: A, config: P2PConfig, @@ -111,13 +110,13 @@ where permit: Some(permit), }); - let cloned_pool = Arc::clone(&client_pool); + let new_connection_tx = new_connection_tx.clone(); tokio::spawn( async move { let client = timeout(HANDSHAKE_TIMEOUT, fut).await; if let Ok(Ok(peer)) = client { - cloned_pool.add_new_client(peer); + drop(new_connection_tx.send(peer).await); } } .instrument(Span::current()), diff --git a/p2p/p2p/src/lib.rs b/p2p/p2p/src/lib.rs index 541784c..fb50658 100644 --- a/p2p/p2p/src/lib.rs +++ b/p2p/p2p/src/lib.rs @@ -18,17 +18,18 @@ use cuprate_p2p_core::{ pub mod block_downloader; mod broadcast; -pub mod client_pool; pub mod config; pub mod connection_maintainer; pub mod constants; mod inbound_server; +mod peer_set; use block_downloader::{BlockBatch, BlockDownloaderConfig, ChainSvcRequest, ChainSvcResponse}; pub use broadcast::{BroadcastRequest, BroadcastSvc}; -pub use client_pool::{ClientPool, ClientPoolDropGuard}; pub use config::{AddressBookConfig, P2PConfig}; use connection_maintainer::MakeConnectionRequest; +use peer_set::PeerSet; +pub use peer_set::{ClientDropGuard, PeerSetRequest, PeerSetResponse}; /// Initializes the P2P [`NetworkInterface`] for a specific [`NetworkZone`]. /// @@ -54,7 +55,10 @@ where cuprate_address_book::init_address_book(config.address_book_config.clone()).await?; let address_book = Buffer::new( address_book, - config.max_inbound_connections + config.outbound_connections, + config + .max_inbound_connections + .checked_add(config.outbound_connections) + .unwrap(), ); // Use the default config. Changing the defaults affects tx fluff times, which could affect D++ so for now don't allow changing @@ -83,19 +87,25 @@ where let outbound_handshaker = outbound_handshaker_builder.build(); - let client_pool = ClientPool::new(); - + let (new_connection_tx, new_connection_rx) = mpsc::channel( + config + .outbound_connections + .checked_add(config.max_inbound_connections) + .unwrap(), + ); let (make_connection_tx, make_connection_rx) = mpsc::channel(3); let outbound_connector = Connector::new(outbound_handshaker); let outbound_connection_maintainer = connection_maintainer::OutboundConnectionKeeper::new( config.clone(), - Arc::clone(&client_pool), + new_connection_tx.clone(), make_connection_rx, address_book.clone(), outbound_connector, ); + let peer_set = PeerSet::new(new_connection_rx); + let mut background_tasks = JoinSet::new(); background_tasks.spawn( @@ -105,7 +115,7 @@ where ); background_tasks.spawn( inbound_server::inbound_server( - Arc::clone(&client_pool), + new_connection_tx, inbound_handshaker, address_book.clone(), config, @@ -121,7 +131,7 @@ where ); Ok(NetworkInterface { - pool: client_pool, + peer_set: Buffer::new(peer_set, 10).boxed_clone(), broadcast_svc, make_connection_tx, address_book: address_book.boxed_clone(), @@ -133,7 +143,7 @@ where #[derive(Clone)] pub struct NetworkInterface { /// A pool of free connected peers. - pool: Arc>, + peer_set: BoxCloneService, tower::BoxError>, /// A [`Service`] that allows broadcasting to all connected peers. broadcast_svc: BroadcastSvc, /// A channel to request extra connections. @@ -163,7 +173,7 @@ impl NetworkInterface { + 'static, C::Future: Send + 'static, { - block_downloader::download_blocks(Arc::clone(&self.pool), our_chain_service, config) + block_downloader::download_blocks(self.peer_set.clone(), our_chain_service, config) } /// Returns the address book service. @@ -173,8 +183,10 @@ impl NetworkInterface { self.address_book.clone() } - /// Borrows the `ClientPool`, for access to connected peers. - pub const fn client_pool(&self) -> &Arc> { - &self.pool + /// Borrows the `PeerSet`, for access to connected peers. + pub fn peer_set( + &mut self, + ) -> &mut BoxCloneService, tower::BoxError> { + &mut self.peer_set } } diff --git a/p2p/p2p/src/peer_set.rs b/p2p/p2p/src/peer_set.rs new file mode 100644 index 0000000..498eaaf --- /dev/null +++ b/p2p/p2p/src/peer_set.rs @@ -0,0 +1,217 @@ +use std::{ + future::{ready, Future, Ready}, + pin::{pin, Pin}, + task::{Context, Poll}, +}; + +use futures::{stream::FuturesUnordered, StreamExt}; +use indexmap::{IndexMap, IndexSet}; +use rand::{seq::index::sample, thread_rng}; +use tokio::sync::mpsc::Receiver; +use tokio_util::sync::WaitForCancellationFutureOwned; +use tower::Service; + +use cuprate_helper::cast::u64_to_usize; +use cuprate_p2p_core::{ + client::{Client, InternalPeerID}, + ConnectionDirection, NetworkZone, +}; + +mod client_wrappers; + +pub use client_wrappers::ClientDropGuard; +use client_wrappers::StoredClient; + +/// A request to the peer-set. +pub enum PeerSetRequest { + /// The most claimed proof-of-work from a peer in the peer-set. + MostPoWSeen, + /// Peers with more cumulative difficulty than the given cumulative difficulty. + /// + /// Returned peers will be remembered and won't be returned from subsequent calls until the guard is dropped. + PeersWithMorePoW(u128), + /// A random outbound peer. + /// + /// The returned peer will be remembered and won't be returned from subsequent calls until the guard is dropped. + StemPeer, +} + +/// A response from the peer-set. +pub enum PeerSetResponse { + /// [`PeerSetRequest::MostPoWSeen`] + MostPoWSeen { + /// The cumulative difficulty claimed. + cumulative_difficulty: u128, + /// The height claimed. + height: usize, + /// The claimed hash of the top block. + top_hash: [u8; 32], + }, + /// [`PeerSetRequest::PeersWithMorePoW`] + /// + /// Returned peers will be remembered and won't be returned from subsequent calls until the guard is dropped. + PeersWithMorePoW(Vec>), + /// [`PeerSetRequest::StemPeer`] + /// + /// The returned peer will be remembered and won't be returned from subsequent calls until the guard is dropped. + StemPeer(Option>), +} + +/// A [`Future`] that completes when a peer disconnects. +#[pin_project::pin_project] +struct ClosedConnectionFuture { + #[pin] + fut: WaitForCancellationFutureOwned, + id: Option>, +} + +impl Future for ClosedConnectionFuture { + type Output = InternalPeerID; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + + this.fut.poll(cx).map(|()| this.id.take().unwrap()) + } +} + +/// A collection of all connected peers on a [`NetworkZone`]. +pub(crate) struct PeerSet { + /// The connected peers. + peers: IndexMap, StoredClient>, + /// A [`FuturesUnordered`] that resolves when a peer disconnects. + closed_connections: FuturesUnordered>, + /// The [`InternalPeerID`]s of all outbound peers. + outbound_peers: IndexSet>, + /// A channel of new peers from the inbound server or outbound connector. + new_peers: Receiver>, +} + +impl PeerSet { + pub(crate) fn new(new_peers: Receiver>) -> Self { + Self { + peers: IndexMap::new(), + closed_connections: FuturesUnordered::new(), + outbound_peers: IndexSet::new(), + new_peers, + } + } + + /// Polls the new peers channel for newly connected peers. + fn poll_new_peers(&mut self, cx: &mut Context<'_>) { + while let Poll::Ready(Some(new_peer)) = self.new_peers.poll_recv(cx) { + if new_peer.info.direction == ConnectionDirection::Outbound { + self.outbound_peers.insert(new_peer.info.id); + } + + self.closed_connections.push(ClosedConnectionFuture { + fut: new_peer.info.handle.closed(), + id: Some(new_peer.info.id), + }); + + self.peers + .insert(new_peer.info.id, StoredClient::new(new_peer)); + } + } + + /// Remove disconnected peers from the peer set. + fn remove_dead_peers(&mut self, cx: &mut Context<'_>) { + while let Poll::Ready(Some(dead_peer)) = self.closed_connections.poll_next_unpin(cx) { + let Some(peer) = self.peers.swap_remove(&dead_peer) else { + continue; + }; + + if peer.client.info.direction == ConnectionDirection::Outbound { + self.outbound_peers.swap_remove(&peer.client.info.id); + } + + self.peers.swap_remove(&dead_peer); + } + } + + /// [`PeerSetRequest::MostPoWSeen`] + fn most_pow_seen(&self) -> PeerSetResponse { + let most_pow_chain = self + .peers + .values() + .map(|peer| { + let core_sync_data = peer.client.info.core_sync_data.lock().unwrap(); + + ( + core_sync_data.cumulative_difficulty(), + u64_to_usize(core_sync_data.current_height), + core_sync_data.top_id, + ) + }) + .max_by_key(|(cumulative_difficulty, ..)| *cumulative_difficulty) + .unwrap_or_default(); + + PeerSetResponse::MostPoWSeen { + cumulative_difficulty: most_pow_chain.0, + height: most_pow_chain.1, + top_hash: most_pow_chain.2, + } + } + + /// [`PeerSetRequest::PeersWithMorePoW`] + fn peers_with_more_pow(&self, cumulative_difficulty: u128) -> PeerSetResponse { + PeerSetResponse::PeersWithMorePoW( + self.peers + .values() + .filter(|&client| { + !client.is_downloading_blocks() + && client + .client + .info + .core_sync_data + .lock() + .unwrap() + .cumulative_difficulty() + > cumulative_difficulty + }) + .map(StoredClient::downloading_blocks_guard) + .collect(), + ) + } + + /// [`PeerSetRequest::StemPeer`] + fn random_peer_for_stem(&self) -> PeerSetResponse { + PeerSetResponse::StemPeer( + sample( + &mut thread_rng(), + self.outbound_peers.len(), + self.outbound_peers.len(), + ) + .into_iter() + .find_map(|i| { + let peer = self.outbound_peers.get_index(i).unwrap(); + let client = self.peers.get(peer).unwrap(); + (!client.is_a_stem_peer()).then(|| client.stem_peer_guard()) + }), + ) + } +} + +impl Service for PeerSet { + type Response = PeerSetResponse; + type Error = tower::BoxError; + type Future = Ready>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.poll_new_peers(cx); + self.remove_dead_peers(cx); + + // TODO: should we return `Pending` if we don't have any peers? + + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: PeerSetRequest) -> Self::Future { + ready(match req { + PeerSetRequest::MostPoWSeen => Ok(self.most_pow_seen()), + PeerSetRequest::PeersWithMorePoW(cumulative_difficulty) => { + Ok(self.peers_with_more_pow(cumulative_difficulty)) + } + PeerSetRequest::StemPeer => Ok(self.random_peer_for_stem()), + }) + } +} diff --git a/p2p/p2p/src/peer_set/client_wrappers.rs b/p2p/p2p/src/peer_set/client_wrappers.rs new file mode 100644 index 0000000..97d7493 --- /dev/null +++ b/p2p/p2p/src/peer_set/client_wrappers.rs @@ -0,0 +1,86 @@ +use std::{ + ops::{Deref, DerefMut}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; + +use cuprate_p2p_core::{ + client::{Client, WeakClient}, + NetworkZone, +}; + +/// A client stored in the peer-set. +pub(super) struct StoredClient { + pub client: Client, + /// An [`AtomicBool`] for if the peer is currently downloading blocks. + downloading_blocks: Arc, + /// An [`AtomicBool`] for if the peer is currently being used to stem txs. + stem_peer: Arc, +} + +impl StoredClient { + pub(super) fn new(client: Client) -> Self { + Self { + client, + downloading_blocks: Arc::new(AtomicBool::new(false)), + stem_peer: Arc::new(AtomicBool::new(false)), + } + } + + /// Returns [`true`] if the [`StoredClient`] is currently downloading blocks. + pub(super) fn is_downloading_blocks(&self) -> bool { + self.downloading_blocks.load(Ordering::Relaxed) + } + + /// Returns [`true`] if the [`StoredClient`] is currently being used to stem txs. + pub(super) fn is_a_stem_peer(&self) -> bool { + self.stem_peer.load(Ordering::Relaxed) + } + + /// Returns a [`ClientDropGuard`] that while it is alive keeps the [`StoredClient`] in the downloading blocks state. + pub(super) fn downloading_blocks_guard(&self) -> ClientDropGuard { + self.downloading_blocks.store(true, Ordering::Relaxed); + + ClientDropGuard { + client: self.client.downgrade(), + bool: Arc::clone(&self.downloading_blocks), + } + } + + /// Returns a [`ClientDropGuard`] that while it is alive keeps the [`StoredClient`] in the stemming peers state. + pub(super) fn stem_peer_guard(&self) -> ClientDropGuard { + self.stem_peer.store(true, Ordering::Relaxed); + + ClientDropGuard { + client: self.client.downgrade(), + bool: Arc::clone(&self.stem_peer), + } + } +} + +/// A [`Drop`] guard for a client returned from the peer-set. +pub struct ClientDropGuard { + client: WeakClient, + bool: Arc, +} + +impl Deref for ClientDropGuard { + type Target = WeakClient; + fn deref(&self) -> &Self::Target { + &self.client + } +} + +impl DerefMut for ClientDropGuard { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.client + } +} + +impl Drop for ClientDropGuard { + fn drop(&mut self) { + self.bool.store(false, Ordering::Relaxed); + } +} From 4b925b8c78bfce9089a74f12f07ace94cb8b57f7 Mon Sep 17 00:00:00 2001 From: Dmitry Holodov Date: Wed, 20 Nov 2024 19:08:24 -0600 Subject: [PATCH 11/14] ZMQ PUB/SUB JSON Types (#330) --- Cargo.lock | 21 + Cargo.toml | 2 + books/architecture/src/appendix/crates.md | 5 + zmq/types/Cargo.toml | 20 + zmq/types/src/json_message_types.rs | 646 ++++++++++++++++++++++ zmq/types/src/lib.rs | 1 + 6 files changed, 695 insertions(+) create mode 100644 zmq/types/Cargo.toml create mode 100644 zmq/types/src/json_message_types.rs create mode 100644 zmq/types/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index a947a15..08c017c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -68,6 +68,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -1012,6 +1022,17 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cuprate-zmq-types" +version = "0.1.0" +dependencies = [ + "assert-json-diff", + "cuprate-types", + "hex", + "serde", + "serde_json", +] + [[package]] name = "cuprated" version = "0.0.1" diff --git a/Cargo.toml b/Cargo.toml index 0f460e8..1bfd680 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ members = [ "rpc/json-rpc", "rpc/types", "rpc/interface", + "zmq/types", ] [profile.release] @@ -79,6 +80,7 @@ cuprate-types = { path = "types", default-features = cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false } cuprate-rpc-types = { path = "rpc/types", default-features = false } cuprate-rpc-interface = { path = "rpc/interface", default-features = false } +cuprate-zmq-types = { path = "zmq/types", default-features = false } # External dependencies anyhow = { version = "1", default-features = false } diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md index ac2780e..a0dff48 100644 --- a/books/architecture/src/appendix/crates.md +++ b/books/architecture/src/appendix/crates.md @@ -54,6 +54,11 @@ cargo doc --open --package cuprate-blockchain | [`cuprate-rpc-interface`](https://doc.cuprate.org/cuprate_rpc_interface) | [`rpc/interface/`](https://github.com/Cuprate/cuprate/tree/main/rpc/interface) | RPC interface & routing | [`cuprate-rpc-handler`](https://doc.cuprate.org/cuprate_rpc_handler) | [`rpc/handler/`](https://github.com/Cuprate/cuprate/tree/main/rpc/handler) | RPC inner handlers +## ZMQ +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-zmq-types`](https://doc.cuprate.org/cuprate_zmq_types) | [`zmq/types/`](https://github.com/Cuprate/cuprate/tree/main/zmq/types) | Message types for ZMQ Pub/Sub interface + ## 1-off crates | Crate | In-tree path | Purpose | |-------|--------------|---------| diff --git a/zmq/types/Cargo.toml b/zmq/types/Cargo.toml new file mode 100644 index 0000000..78e7d00 --- /dev/null +++ b/zmq/types/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "cuprate-zmq-types" +version = "0.1.0" +edition = "2021" +description = "Types for the ZMQ Pub/Sub API" +license = "MIT" +authors = ["dimalinux"] +repository = "https://github.com/Cuprate/cuprate/tree/main/zmq/types" + +[dependencies] +serde = { workspace = true, features = ["derive"] } +hex = { workspace = true, features = ["std", "serde"] } +cuprate-types = { workspace = true, features = ["hex"] } + +[dev-dependencies] +serde_json = { workspace = true, features = ["std"] } +assert-json-diff = "2.0.2" + +[lints] +workspace = true diff --git a/zmq/types/src/json_message_types.rs b/zmq/types/src/json_message_types.rs new file mode 100644 index 0000000..2699600 --- /dev/null +++ b/zmq/types/src/json_message_types.rs @@ -0,0 +1,646 @@ +//! Objects for JSON serialization and deserialization in message bodies of +//! the ZMQ pub/sub interface. Handles JSON for the following subscriptions: +//! * `json-full-txpool_add` (`Vec`) +//! * `json-minimal-txpool_add` (`Vec`) +//! * `json-full-chain_main` (`Vec`) +//! * `json-minimal-chain_main` (`ChainMainMin`) +//! * `json-full-miner_data` (`MinerData`) +use cuprate_types::hex::HexBytes; +use serde::{Deserialize, Serialize}; + +/// ZMQ `json-full-txpool_add` packets contain an array of `TxPoolAdd`. +/// +/// Each `TxPoolAdd` object represents a new transaction in the mempool that was +/// not previously seen in a block. Miner coinbase transactions *are not* +/// included. `do-not-relay` transactions *are* included. Values are not +/// republished during a re-org. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct TxPoolAdd { + /// transaction version number. `2` indicates Ring CT (all sub-variants). + pub version: u8, + /// if not `0` and less than `500_000_000`, this is the block height when + /// transaction output(s) are spendable; if >= `500_000_000` this is roughly + /// the unix epoch block timestamp when the output(s) are spendable. + pub unlock_time: u64, + /// transaction inputs (key images) with separate rings for each input + pub inputs: Vec, + /// transaction outputs + pub outputs: Vec, + /// extra data for the transaction with variable size, but limited to `1060` + /// bytes (`2120` hex nibbles). + #[serde(with = "hex::serde")] + pub extra: Vec, + /// obsolete, empty array in JSON + signatures: [Obsolete; 0], + /// ring confidential transaction data + pub ringct: PoolRingCt, +} + +/// ZMQ `json-minimal-txpool_add` subscriber messages contain an array of +/// `TxPoolAddMin` JSON objects. See `TxPoolAdd` for information on which +/// transactions are published to subscribers. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct TxPoolAddMin { + /// transaction ID + pub id: HexBytes<32>, + /// size of the full transaction blob + pub blob_size: u64, + /// metric used to calculate transaction fee + pub weight: u64, + /// mining fee included in the transaction in piconeros + pub fee: u64, +} + +/// ZMQ `json-full-chain_main` subscriber messages contain an array of +/// `ChainMain` JSON objects. Each `ChainMain` object represents a new block. +/// Push messages only contain more than one block if a re-org occurred. +#[derive(Debug, Serialize, Deserialize)] +pub struct ChainMain { + /// major version of the monero protocol at this block's height + pub major_version: u8, + /// minor version of the monero protocol at this block's height + pub minor_version: u8, + /// epoch time, decided by the miner, at which the block was mined + pub timestamp: u64, + /// block id of the previous block + pub prev_id: HexBytes<32>, + /// cryptographic random one-time number used in mining a Monero block + pub nonce: u32, + /// coinbase transaction information + pub miner_tx: MinerTx, + /// non-coinbase transaction IDs in the block (can be empty) + pub tx_hashes: Vec>, +} + +/// ZMQ `json-minimal-chain_main` subscriber messages contain a single +/// `ChainMainMin` JSON object. Unlike the full version, only the topmost +/// block is sent in the case of a re-org. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ChainMainMin { + /// height of the block + pub first_height: u64, + /// block id of the previous block + pub first_prev_id: HexBytes<32>, + /// block ID of the current block is the 0th entry; additional block IDs + /// will only be included if this is the topmost block of a re-org. + pub ids: Vec>, +} + +/// ZMQ `json-full-miner_data` subscriber messages contain a single +/// `MinerData` object that provides the necessary data to create a +/// custom block template. There is no min version of this object. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct MinerData { + /// major version of the monero protocol for the next mined block + pub major_version: u8, + /// height on which to mine + pub height: u64, + /// block id of the most recent block on which to mine the next block + pub prev_id: HexBytes<32>, + /// hash of block to use as seed for Random-X proof-of-work + pub seed_hash: HexBytes<32>, + /// least-significant 64 bits of the 128-bit network difficulty + #[serde(with = "hex_difficulty")] + pub difficulty: u64, + /// median adjusted block size of the latest 100000 blocks + pub median_weight: u64, + /// fixed at `u64::MAX` in perpetuity as Monero has already reached tail emission + pub already_generated_coins: u64, + /// mineable mempool transactions + pub tx_backlog: Vec, +} + +/// Holds a single input for the `TxPoolAdd` `inputs` array. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct PoolInput { + pub to_key: ToKey, +} + +/// Same as `PoolInput` (adds an extra JSON name layer) +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct ToKey { + /// obsolete field (always 0), non-coinbase TX amounts are now encrypted + amount: u64, + /// integer offsets for ring members + pub key_offsets: Vec, + /// key image for the given input + pub key_image: HexBytes<32>, +} + +/// Holds the block height of the coinbase transaction. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct MinerInput { + /// namespace layer around the block height + pub r#gen: Gen, +} + +/// Additional namespace layer around the block height in `ChainMain`; gen is +/// another name for a coinbase transaction +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Gen { + /// block height when the coinbase transaction was created + pub height: u64, +} + +/// Transaction output data used by both `TxPoolAdd` and `MinerTx` +#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)] +pub struct Output { + /// zero for non-coinbase transactions which use encrypted amounts or + /// an amount in piconeros for coinbase transactions + pub amount: u64, + /// public key of the output destination + pub to_tagged_key: ToTaggedKey, +} + +/// Holds the public key of an output destination with its view tag. +#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)] +pub struct ToTaggedKey { + /// public key used to indicate the destination of a transaction output + pub key: HexBytes<32>, + /// 1st byte of a shared secret used to reduce wallet synchronization time + pub view_tag: HexBytes<1>, +} + +/// Ring CT information used inside `TxPoolAdd` +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct PoolRingCt { + /// ring CT type; `6` is CLSAG Bulletproof Plus + pub r#type: u8, + /// encrypted amount values of the transaction outputs + pub encrypted: Vec, + /// Ring CT commitments, 1 per transaction input + pub commitments: Vec>, + /// mining fee in piconeros + pub fee: u64, + /// data to validate the transaction that can be pruned from older blocks + pub prunable: Prunable, +} + +/// Ring CT information used inside `MinerTx`. Miner coinbase transactions don't +/// use Ring CT, so this only holds a block height. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +struct MinerRingCt { + /// always zero to indicate that Ring CT is not used + r#type: u8, +} + +/// Holds the encrypted amount of a non-coinbase transaction output. +#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize)] +pub struct Encrypted { + /// obsolete field, but present as zeros in JSON; this does not represent + /// the newer deterministically derived mask + mask: HexBytes<32>, + /// encrypted amount of the transaction output + pub amount: HexBytes<32>, +} + +/// Data needed to validate a transaction that can optionally be pruned from +/// older blocks. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Prunable { + /// obsolete, empty array in JSON + range_proofs: [Obsolete; 0], + /// obsolete, empty array in JSON + bulletproofs: [Obsolete; 0], + /// Bulletproofs+ data used to validate a Ring CT transaction + pub bulletproofs_plus: [BulletproofPlus; 1], + /// obsolete, empty array in JSON + mlsags: [Obsolete; 0], + /// CLSAG signatures; 1 per transaction input + pub clsags: Vec, + /// Ring CT pseudo output commitments; 1 per transaction input (*not* + /// output) + pub pseudo_outs: Vec>, +} + +/// Bulletproofs+ data used to validate the legitimacy of a Ring CT transaction. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[expect(non_snake_case)] +pub struct BulletproofPlus { + pub V: Vec>, + pub A: HexBytes<32>, + pub A1: HexBytes<32>, + pub B: HexBytes<32>, + pub r1: HexBytes<32>, + pub s1: HexBytes<32>, + pub d1: HexBytes<32>, + pub L: Vec>, + pub R: Vec>, +} + +/// Placeholder element type so obsolete fields can be deserialized +/// to the empty vector for backwards compatibility. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +struct Obsolete; + +/// CLSAG signature fields +#[expect(non_snake_case)] +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Clsag { + pub s: Vec>, + pub c1: HexBytes<32>, + pub D: HexBytes<32>, +} + +/// Part of the new block information in `ChainMain` +#[derive(Debug, Serialize, Deserialize)] +pub struct MinerTx { + /// transaction version number + pub version: u8, + /// block height when the coinbase transaction becomes spendable (currently + /// 60 blocks above the coinbase transaction height) + pub unlock_time: u64, + /// contains the block height in `inputs[0].gen.height` and nothing else as + /// coinbase transactions have no inputs + pub inputs: [MinerInput; 1], + /// transaction outputs + pub outputs: Vec, + /// extra data for the transaction with variable size; not limited to `1060` + /// bytes like the extra field of non-coinbase transactions + #[serde(with = "hex::serde")] + pub extra: Vec, + /// obsolete, empty array in JSON + signatures: [Obsolete; 0], + /// only for JSON compatibility; miners' don't use Ring CT + ringct: MinerRingCt, +} + +/// Holds a transaction entry in the `MinerData` `tx_backlog` field. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct TxBacklog { + /// transaction ID + pub id: HexBytes<32>, + /// metric used to calculate transaction fee + pub weight: u64, + /// mining fee in piconeros + pub fee: u64, +} + +mod hex_difficulty { + //! Serializes the u64 difficulty field of `MinerData` in the same ways as + //! monerod. The difficulty value is inside a string, in big-endian hex, and + //! has a 0x prefix with no leading zeros. + use serde::{Deserialize, Deserializer, Serializer}; + + #[expect(clippy::trivially_copy_pass_by_ref)] + pub(super) fn serialize(difficulty: &u64, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&format!("0x{difficulty:x}")) + } + + pub(super) fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + let s = s.strip_prefix("0x").unwrap_or(&s); + u64::from_str_radix(s, 16).map_err(serde::de::Error::custom) + } +} + +#[cfg(test)] +mod tests { + use assert_json_diff::assert_json_eq; + use serde_json::{self, json}; + + use super::*; + + #[test] + fn test_txpooladd_json() { + let json1 = json!([ + { + "version": 2, + "unlock_time": 0, + "inputs": [ + { + "to_key": { + "amount": 0, + "key_offsets": [ + 82773133, + 30793552, + 578803, + 620532, + 114291, + 291870, + 111275, + 86455, + 19769, + 1238, + 15164, + 11374, + 5240, + 3547, + 7423, + 4198 + ], + "key_image": "89c060b57bba20c0b795bda4b618749e04eba5b40b30062b071dff6e8dd9071d" + } + } + ], + "outputs": [ + { + "amount": 0, + "to_tagged_key": { + "key": "05b4ff4c3ced6ba078a078af8fee5916512a1893f2b6d9373fb90e0eb4040095", + "view_tag": "7a" + } + }, + { + "amount": 0, + "to_tagged_key": { + "key": "60250376bca49bf24cef45c12738b86347df10954cd35630e81b90bf01e922af", + "view_tag": "b8" + } + } + ], + "extra": "01154b87b3334ce9f99d04635eae4e31252a20ba22acb96ff0764a03dc91d203ed020901be80cbce0723d0b4", + "signatures": [], + "ringct": { + "type": 6, + "encrypted": [ + { + "mask": "0000000000000000000000000000000000000000000000000000000000000000", + "amount": "a956be1858615454000000000000000000000000000000000000000000000000" + }, + { + "mask": "0000000000000000000000000000000000000000000000000000000000000000", + "amount": "72972be61af1210b000000000000000000000000000000000000000000000000" + } + ], + "commitments": [ + "cc2a17e43f0b183235a06e8582fcaaa7c21a07732077e66d4dcfaa0db691ea20", + "04e3cd1d3430bb7a1d9ede5ce9ec0ef2f6f9dd9fd31fb95c9e0b3148f1a660c8" + ], + "fee": 30660000, + "prunable": { + "range_proofs": [], + "bulletproofs": [], + "bulletproofs_plus": [ + { + "V": [ + "0196c1e9ba57ae053ae19c1bfd49e13146bd4b6e49401582f8a5a6f65ae560d0", + "aecd14b0e2d788315023601947c12d7e9227d8a1a0aee41f0b34fe196d96119f" + ], + "A": "8011fb75ba56d16b1ef1193e1fdfdb81e6b83afd726087427163857e8fcdf08e", + "A1": "ab91ab6863fbdee1fb71791e5297d007269f1b2cc050df40628ee7d0a1a5f3cb", + "B": "df1d082111b51d479b7fa72f6363bb731207c9343a528dc05b5798af56702521", + "r1": "2e212ae9ad704611a39b9b242453d2408045b303738b51d6f88f9dba06233401", + "s1": "36be53973fd971edff1f43cc5d04dda78d2b01f4caeaf38bbe195b04e309b30d", + "d1": "592116ca54b2d3ca0e9f222ffcc5fd63d3c992470473911fc70822f37672350a", + "L": [ + "98f1e11d62b90c665a8a96fb1b10332e37a790ea1e01a9e8ec8de74b7b27b0df", + "3a14689f3d743a3be719df9af28ca2f0f398e3a2731d5d6f342d0485bf81a525", + "bcb9e389fd494db66e4c796ff03795daa131426c0776ded6d37bfae51f29623d", + "5aa7e1f2bfcfcd74ac8305ce59a7baf5a901f84f8fbdd3a2d639e4058f35e98b", + "5939aa7ea012f88a26bab20270ce5b164c1880f793dc249ec215a0783b4d4ca7", + "08286f78d1bb0d7fc2efc7a3ac314707a4a1ac9656656d496180e131c1748496", + "7fc1de780305601aab95fda4b005927a4643f222e28407c31ad46cc935b7a27c" + ], + "R": [ + "69b4f329c0a5f8ae05891ac5ac35b947a7442b66e5b5693c99435deac3a62662", + "a193038cb8dc9d22abe6577fe44271c1693176cb636f9d101723670fb5ca5cda", + "90670e7083e503c2989b6548500234740dabf3451b0bd376979e03ca0cb5e50c", + "6ab149089f73799811f631eab272bd6c8f190f38efff4d49577364956d0148bf", + "62f2178cbdc760a0d3787b5fd42161c3c98394c2ff2b88efc039df59d2116e5d", + "536f91da278f730f2524260d2778dc5959d40a5c724dd789d35bbd309eabd933", + "e47c5c8181e692f3ad91733e7d9a52f8b7e3f5016c5e65f789eea367a13f16cd" + ] + } + ], + "mlsags": [], + "clsags": [ + { + "s": [ + "f70840a8d65da85e962d2ce5ed1293ae3de83318b464363db85505d99e317b01", + "b7c1125be139b4ed201ce85b8453920306cac7c5da11e0f8c0fd7702f15c6a06", + "5a04335699f5a816eed1cab79085814dbcf3be5cef51b078b1c3e0210bbba606", + "e4743e114fd6352ea29e0b48ac96688edaba1d5d0634c34301756902eeb1fb0e", + "34aae87ab091082356d2815a7c8e973124245ebc6d163b9f01fbfeb360edcf04", + "d2d0b6ddb44ed42096affec08ea9cd77d2c7cdc5b2e1e964f836d3717640ec00", + "79b34258c8be04ddd955389f7ee3b912286c23492c519a5687b81d770619620e", + "3c889c19693463160d6c7e642c46f5d41db052ee3358c7dcb4826f48bca26607", + "da04927a438fd0d9674e64f0c016f30fde27f251d3466f29dcd5b3d757fec90c", + "f3e08d83b11ca6529bc18748d3f732c325fca8ff79f69f0ed754bcd529898102", + "f00d7125909a9a8cc5283ffc7727fce945e85828459eecb836c7aedca414350e", + "0a635a193af37be1c9519309f25eaf9f37b7bc5892864646d8d2a2187fcec601", + "0c4154d575dff3699bd41f0c354601de6535161755bd2164526076f37e2c6908", + "f7b21e2698333285ea10a95edbe80fe0bb8740c30b35c25bd2002e3693867e02", + "a637f338ff2ed65fa96e5529abc575fc2a35ed1a3f62a9e7be495069d8438800", + "f7c355f1c3a663978c5fe1c9337aabd4085ee537a61eec2c5c1e837cb3728c09" + ], + "c1": "c5dd25e0e32dbefa6ac1d0dc9072620eb97a99224462cdd163287f2b60b9810b", + "D": "c4fa3f939ccf02e4c8842cbd417cf3690421986e558734a0a029f8a86d2791a8" + } + ], + "pseudo_outs": [ + "bcb08920f5476d74294aeb89c8001123bffd2f2ab84e105d553b807674c595ce" + ] + } + } + } + ]); + + let tx_pool_adds: Vec = serde_json::from_value(json1.clone()).unwrap(); + let json2 = serde_json::to_value(&tx_pool_adds).unwrap(); + assert_json_eq!(json1, json2); + } + + #[test] + fn test_txpooladd_min_json() { + let json1 = json!([ + { + "id": "b5086746e805d875cbbbbb49e19aac29d9b75019f656fab8516cdf64ac5cd346", + "blob_size": 1533, + "weight": 1533, + "fee": 30660000 + } + ]); + + let tx_pool_adds: Vec = serde_json::from_value(json1.clone()).unwrap(); + let json2 = serde_json::to_value(&tx_pool_adds).unwrap(); + assert_json_eq!(json1, json2); + } + + #[test] + fn test_chain_main_json() { + let json1 = json!([ + { + "major_version": 16, + "minor_version": 16, + "timestamp": 1726973843, + "prev_id": "ce3731311b7e4c1e58a2fe902dbb5c60bb2c0decc163d5397fa52a260d7f09c1", + "nonce": 537273946, + "miner_tx": { + "version": 2, + "unlock_time": 3242818, + "inputs": [ + { + "gen": { + "height": 3242758 + } + } + ], + "outputs": [ + { + "amount": 618188180000_u64, + "to_tagged_key": { + "key": "83faf44df7e9fb4cf54a8dd6a63868507d1a1896bdb35ea9110d739d5da6cf21", + "view_tag": "38" + } + } + ], + "extra": "010e3356a86dbb339354afbc693408dfe8648bffd0b276e6a431861eb73643d88d02115162e362c98e2d00000000000000000000", + "signatures": [], + "ringct": { + "type": 0 + } + }, + "tx_hashes": [ + "2c1b67d3f10b21270cac116e6d5278dc4024ee2d727e4ad56d6dedb1abc0270c", + "c2cfec0de23229a2ab80ca464cef66fc1cad53647a444f048834ec236c38c867", + "03c7649af2373c0f739d3c2eff9ee1580986b460d2abdd5e2aa332281e52da7e", + "1e0834cc658599e786040bdcd9b589a5e8d975233b72279d04ece1a3dd5572b0", + "ba65c30150e906a8799ee99bb2e6481873e42ed8b025cf967c5798528ddc81b4", + "6fc7b1da1cf433edafb142173e9ac13fe05142a36d8a72e9efdf7a3b94da11d6", + "847c06dcda4540d45cae868d4d031781bd87d9bfa4b2186a611428f52e68ccee", + "79f87a1b2fc17295d2cf25b6a65dd17fd8630829ee50f9c48f15e4a24e72d872", + "32b4f7ce6d864006b274dbd73fc8058151d0fd2dd0bb4b423120e32451fd59eb", + "430fe7fa00b63b68b301a4e4810bef2b5be1f651dba8c360e86eac61227382e7", + "9f8d2bf5e39071abccb336404ea72ab85cb731500a1d386a3bf537b9046df29d", + "f63893b8226ca28c290cb65541d60c1675dda1e2a77a629b6b2d7c3886240b23", + "ee8608b6e80cce14beaf95f747f4da8e40e40a49ad1adc20038843a6da3df3c6", + "05783765c150ed1e46d6380726e7ca1f788305754e553f5f2d49b9f09aaaf88d", + "20b4b95e62f45b72014d6ab14edb0b31e273cdc8c8d106068dd32ef6e92fc0a2", + "9230fb0a9dce8e2ca7e109ebf3480838251691de8ed73ea91f74723c5cf19bac", + "d59cf84a25f56ec0f1352bb05645efe9b9326598c4f7c5bc39a87eb7a20c48fc", + "465deb73c48a460df71861d61666dabb906648035a1fecfd0e988ee37616c655", + "5767bc633729ba4555561510f3db739431b16744234dcd549a0d346eaa6685b1", + "2c8d9af5d5774de96e67835ac5adbc6ca5579125b08bc907b395645eea6410ec", + "d385c884a0687c3360725dd3a3f6acf6f64bf38d8eeea1644d80bc23b13ee870", + "b2bc7e9fa9c1da08a8b6ee58505611c05bc388fd30aece00e9a0041470f7e950", + "69a4a79b50d42d372e91c6608c2652d1d5ddd343526c387ef6cf1e3c158b1765", + "ef508dfa79bbedd226835c42a9d000a64cc4abe0250c9aa55fd968224e2b45c3", + "0413c3b3fc621c472e10a102d77456db506f0df10a909833aed0c6738fb31eeb", + "e0c52d6d649c2f1abce4c6ffce4dd75a23308afbb6abe33af53da232c40caf5f", + "cd1fd68d2a15002ca6236083ff6ae165c8fd922f410da79640a4342fd8ebd1c8", + "ba746f80ca4ee496f4188ba278f1ed69a913238450d52bd2e2f3d3bf6fdd43d3", + "13c964bc13a55621b7bbbfe9a6d703536d951bfa19eedee93dd1286020959021", + "41a6f8d0df227a401a9bd6f5c0fbc21ed89f515ea5c8434a087e8b880080ee1f", + "41c2b5994284790b1ee158f7b87aa1231c14975d6456a91ff6f93c6f81277965", + "7e6b7f169cc6cab88e652771157cf8c2eb6f69dffb6939a79b34c6554fe6c00b", + "619517d9d138bf95c6b77eb801526b8419616de2b8618ccfd3b6d1c10364bc64", + "52cca64fb20fc2f6d06034a1a2d9b5665972ebc2569ec69f8d473caada309add", + "219c106d09da5a27b339ea0f070da090779b31ef9ccfa90d6d25e7388341eff9", + "e07ce6e96e73cff80c9cc4c1b349ad1ef53cff210b876d4e7afd89fcc8b2e5dd", + "e98f2a00b2892cd65c0252d956d88a4bb8024c7db98ca003c127b097f097f276", + "ed81aa398071fe495e37095e51ff50053e132bd11f27ba9c06ac4bf4063b756f", + "667d29a0cefa311e06fcfc22c98ef75edf81deb6c8a812492eb255a049c826db", + "8b16e8cbc1765247456bd67a3106498f686401b7529dc0f6b03360caf8671135", + "013e443e63259748f6d1a5653374826618ba066b7febcf55c829333f0dd9a6c3", + "517a05d82de59a973eb4d343c45558841c9165ccd75ca7c9d2e1a35f80c26c15", + "af74d5dd44cfed8f40f853a6fc405dae23d547482296f8dbbc13c1aed2c3d8c5", + "b5086746e805d875cbbbbb49e19aac29d9b75019f656fab8516cdf64ac5cd346", + "cfcda18d058656797a1272b384774dcfc26a504a24298aa49ba060eb6b4a19e0", + "1f380660a99030cc45f85ba8ee0e0541035c0fde719c84aa692796328974c9dd", + "53127181a0301a27b3a2749dc997556b211d949a99aa34d1c52d5c54220f49d2", + "5d50a66df97f4decc4ecc3f5030589ef966d5af84a995f7fb14f1c02ae9704db", + "cdab9628acdb57c460e292660e7a07caf2ddbcffdfff92f3e5e4fb12119a11ca", + "e740a098a74d7a66a821c4ac3c5f913a82fc7445b5593cc5fa3e48ad1b4589b1", + "760549176fec210cfe0ff58eabbf2670cf33b4cd3942a3b60a98bf8f328a6d01", + "961b0956aa6303ed8ca1687d93ed46b9aa8a0203ec4ce0cbc2e86b364fbfb613", + "b9db041b2c3bfc6b5b0facb638b0b4643eec76b060039a6b11fb43682ed77a97", + "1011c321eb386b9975e8124bdb130790dcf4ac0021da3103cabbf7dfa18ccea7", + "6a9d3d15be4b25bd544d96bb1d7685e53f9484735bb22994feffb9037009aeeb", + "bf20d6193890cf7fdead9e3b60197564c663b5a62eda782a49d4aa7819bb9665", + "472d28f9d25a95e625eb808ff3827e7f6792009e1ba0b3b21951f3058b65a75d", + "e3931b2b66da07f983d2235d9d0b3a3098008458bdc0c1ad4370fae73e1eaa9e", + "e18a0dea6382c95aa4089a971190683b171e9405c06fd4111924144600f3bcf3", + "1a336bcf24026307821b76b9ca18b178c285c591c5df9906e3ffbd2050ccd356", + "8ca2d0e5ae9b9981bb8b76ba0da383c585664b2a2f4e861d58aab00c9b0cc808", + "e1866c27023ccea276034c4d572eab42713132e4fdb2aafa9488f6d74cd49303", + "3674cfafba4cdea5775a72a82e5d553bf180beab456b3cbaa7b41a1574fe1948", + "9bb400dd317425f40176c3094a5573037b0217e0b60761cb66a8fa15b63b36c3", + "c078048028aca3e9bc40f68f4d42ef25c6af2cef4da20bf3be70dd6a23b82d52", + "c28cc85f945085e70259ed02131ae3f8c5992e789c9c75c2c6e257306beaf26e", + "4c2b121795fe2b90fda84813543952382daa29c7b96edd9f96040df13e48e347", + "63c6fba30b5471fd60e715cbaf4448badafde68dbc42c54d96b56dd2c4bf2d15", + "a4240138ecfe736113581f318f261a01992eaa8fa5b7bd6938d9dbeb65aa85d7", + "b9d088a7b21f655d0cf50f8404e874f4d1655fb5565a354d2c0dd6d113619c66", + "9133e7e98a83f6e10a7fd44c104d9124d93e0d3e920f5c160873b394dd3a2fcb", + "953985dbd0ea6f86746e83be144ec2ff2897ef1f3506eede083b893e98dd63ea", + "83af840c4cad46de96c86fcf700ade32e73260d4a16cefa330cb5a722ef59fdf", + "eea3c0c2b016ea0c269f954fd8172c3d118f08103c9842b81b05290c9faf3780", + "ac43a363fdb81fa4f6df1cb06ba49a5f4eeef411957cf2afad55cbc1e79bc4d1", + "ca72cf7bda22aed15c16ca67e7b6cc57109cdc86d4ffe38fd71210a5380fcada", + "477dc1cd62106d9df6b37f8515579a48d01b310387087c08ce7062a8eb5df98d", + "d47b6dcd3b13288825c954df6c6e30eb683d1f79434beaee7172082f8ae74280", + "9c64ef20c69589c56fcc5f3a0d10f6957ecea248e44acb432aaf16a88eeef946", + "d2aa256bfd61bdb64ac38da6cbc3e77fb315bb9fbaf422087c10345377df44f6", + "8b9623e4513594a6eaeb3475ea7d0eb585dd8f6e20e21c316db0b942fada2336", + "860725ed0bd18c744e6b8b02888ad88be1cf23d7153131b220a0f9fbb76976bf", + "387cc6e807efc263a0ad6a30e6313a27d16abef038264d0afa0e6ad943be55da" + ] + } + ]); + + let chain_main: Vec = serde_json::from_value(json1.clone()).unwrap(); + let json2 = serde_json::to_value(&chain_main).unwrap(); + assert_json_eq!(json1, json2); + } + + #[test] + fn test_chain_main_min_json() { + let json1 = json!({ + "first_height": 3242758, + "first_prev_id": "ce3731311b7e4c1e58a2fe902dbb5c60bb2c0decc163d5397fa52a260d7f09c1", + "ids": [ + "ee1238b884e64f7e438223aa8d42d0efc15e7640f1a432448fbad116dc72f1b2" + ] + }); + + let chain_main_min: ChainMainMin = serde_json::from_value(json1.clone()).unwrap(); + let json2 = serde_json::to_value(&chain_main_min).unwrap(); + assert_json_eq!(json1, json2); + } + + #[test] + fn test_miner_data_json() { + let json1 = json!({ + "major_version": 16, + "height": 3242764, + "prev_id": "dc53c24683dca14586fb2909b9aa4a44adb524e010d438e2491e7d8cc1c80831", + "seed_hash": "526577d6e6689ba8736c16ccc76e6ce4ada3b0ceeaa3a2260b96ba188a17d705", + "difficulty": "0x526f2623ce", + "median_weight": 300000, + "already_generated_coins": 18446744073709551615_u64, + "tx_backlog": [ + { + "id": "dbec64651bb4e83d0e9a05c2826bde605a940f12179fab0ab5dc8bc4392c776b", + "weight": 2905, + "fee": 929600000 + }, + { + "id": "ec5728dd1fbd98db1f93d612826e73b95f52cca49f247a6dbc35390f45766a7d", + "weight": 2222, + "fee": 44440000 + }, + { + "id": "41f613b1a470af494e0a705993e305dfaad3e365fcc0b0db0118256fc54559aa", + "weight": 2221, + "fee": 44420000 + }, + { + "id": "34fa33bf96dc2f825fe870e8f5402be6225c1623b345224e0dbc38b6407873de", + "weight": 2217, + "fee": 709440000 + } + ] + }); + + let miner_data: MinerData = serde_json::from_value(json1.clone()).unwrap(); + let json2 = serde_json::to_value(&miner_data).unwrap(); + assert_json_eq!(json1, json2); + } +} diff --git a/zmq/types/src/lib.rs b/zmq/types/src/lib.rs new file mode 100644 index 0000000..3f9562b --- /dev/null +++ b/zmq/types/src/lib.rs @@ -0,0 +1 @@ +pub mod json_message_types; From f3c1a5c2aa4629bf69b75268de21fc9112f09405 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Thu, 21 Nov 2024 17:32:48 -0500 Subject: [PATCH 12/14] Fix #346 (#347) fixes --- cryptonight/src/util.rs | 2 +- storage/blockchain/src/ops/block.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cryptonight/src/util.rs b/cryptonight/src/util.rs index 7fbf5cb..de8b70b 100644 --- a/cryptonight/src/util.rs +++ b/cryptonight/src/util.rs @@ -49,7 +49,7 @@ pub(crate) fn subarray_copy + ?Sized, U: Copy, const LEN: usize>( /// A mutable reference to a fixed-size subarray of type `[U; LEN]`. /// /// # Panics -/// Panics if `start + LEN > array.as_ref().len()`. +/// Panics if `start + LEN > array.as_mut().len()`. #[inline] pub(crate) fn subarray_mut + ?Sized, U, const LEN: usize>( array: &mut T, diff --git a/storage/blockchain/src/ops/block.rs b/storage/blockchain/src/ops/block.rs index 6d32fd8..cc5cb80 100644 --- a/storage/blockchain/src/ops/block.rs +++ b/storage/blockchain/src/ops/block.rs @@ -42,7 +42,7 @@ use crate::{ /// # Panics /// This function will panic if: /// - `block.height > u32::MAX` (not normally possible) -/// - `block.height` is not != [`chain_height`] +/// - `block.height` is != [`chain_height`] // no inline, too big. pub fn add_block( block: &VerifiedBlockInformation, From caa08d5eaa1063919797a830eb0d9358d2a72b80 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Mon, 25 Nov 2024 15:10:42 -0500 Subject: [PATCH 13/14] benches: initial implementation (#196) * add readme * readme, basic examples * name changes, bin impl * example, docs * book * add `cuprate-criterion-example` * docs, tracing * fix clippy * docs * lib readme * json-rpc benchmarks * add to crates.md * add `fixme` * fix `cargo b` failing this `cfg()` existing makes a regular workspace `cargo b` fail * fix cargo.toml --- Cargo.lock | 313 +++++++++++++++++- Cargo.toml | 95 ++++-- benches/README.md | 6 +- benches/benchmark/bin/Cargo.toml | 43 +++ benches/benchmark/bin/README.md | 27 ++ benches/benchmark/bin/src/log.rs | 29 ++ benches/benchmark/bin/src/main.rs | 49 +++ benches/benchmark/bin/src/print.rs | 38 +++ benches/benchmark/bin/src/run.rs | 36 ++ benches/benchmark/bin/src/timings.rs | 5 + benches/benchmark/example/Cargo.toml | 17 + benches/benchmark/example/README.md | 3 + benches/benchmark/example/src/lib.rs | 42 +++ benches/benchmark/lib/Cargo.toml | 18 + benches/benchmark/lib/README.md | 15 + benches/benchmark/lib/src/benchmark.rs | 45 +++ benches/benchmark/lib/src/lib.rs | 5 + benches/criterion/cuprate-json-rpc/Cargo.toml | 23 ++ .../cuprate-json-rpc/benches/main.rs | 8 + .../cuprate-json-rpc/benches/response.rs | 110 ++++++ benches/criterion/cuprate-json-rpc/src/lib.rs | 2 + benches/criterion/example/Cargo.toml | 21 ++ benches/criterion/example/README.md | 14 + benches/criterion/example/benches/example.rs | 48 +++ benches/criterion/example/benches/main.rs | 10 + benches/criterion/example/src/lib.rs | 13 + books/architecture/src/SUMMARY.md | 13 +- books/architecture/src/appendix/crates.md | 8 + .../src/benchmarking/criterion.md | 1 - .../src/benchmarking/criterion/creating.md | 21 ++ .../src/benchmarking/criterion/intro.md | 4 + .../src/benchmarking/criterion/running.md | 15 + .../src/benchmarking/cuprate/creating.md | 57 ++++ .../src/benchmarking/cuprate/intro.md | 37 +++ .../src/benchmarking/cuprate/running.md | 16 + .../architecture/src/benchmarking/harness.md | 1 - books/architecture/src/benchmarking/intro.md | 23 +- 37 files changed, 1188 insertions(+), 43 deletions(-) create mode 100644 benches/benchmark/bin/Cargo.toml create mode 100644 benches/benchmark/bin/README.md create mode 100644 benches/benchmark/bin/src/log.rs create mode 100644 benches/benchmark/bin/src/main.rs create mode 100644 benches/benchmark/bin/src/print.rs create mode 100644 benches/benchmark/bin/src/run.rs create mode 100644 benches/benchmark/bin/src/timings.rs create mode 100644 benches/benchmark/example/Cargo.toml create mode 100644 benches/benchmark/example/README.md create mode 100644 benches/benchmark/example/src/lib.rs create mode 100644 benches/benchmark/lib/Cargo.toml create mode 100644 benches/benchmark/lib/README.md create mode 100644 benches/benchmark/lib/src/benchmark.rs create mode 100644 benches/benchmark/lib/src/lib.rs create mode 100644 benches/criterion/cuprate-json-rpc/Cargo.toml create mode 100644 benches/criterion/cuprate-json-rpc/benches/main.rs create mode 100644 benches/criterion/cuprate-json-rpc/benches/response.rs create mode 100644 benches/criterion/cuprate-json-rpc/src/lib.rs create mode 100644 benches/criterion/example/Cargo.toml create mode 100644 benches/criterion/example/README.md create mode 100644 benches/criterion/example/benches/example.rs create mode 100644 benches/criterion/example/benches/main.rs create mode 100644 benches/criterion/example/src/lib.rs delete mode 100644 books/architecture/src/benchmarking/criterion.md create mode 100644 books/architecture/src/benchmarking/criterion/creating.md create mode 100644 books/architecture/src/benchmarking/criterion/intro.md create mode 100644 books/architecture/src/benchmarking/criterion/running.md create mode 100644 books/architecture/src/benchmarking/cuprate/creating.md create mode 100644 books/architecture/src/benchmarking/cuprate/intro.md create mode 100644 books/architecture/src/benchmarking/cuprate/running.md delete mode 100644 books/architecture/src/benchmarking/harness.md diff --git a/Cargo.lock b/Cargo.lock index 08c017c..ac36c56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,6 +29,15 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -44,6 +53,12 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "anstyle" version = "1.0.10" @@ -347,6 +362,12 @@ dependencies = [ "serde", ] +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" version = "1.1.31" @@ -380,6 +401,33 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "clap" version = "4.5.20" @@ -478,6 +526,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + [[package]] name = "crossbeam" version = "0.8.4" @@ -534,6 +618,12 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + [[package]] name = "crypto-bigint" version = "0.5.5" @@ -583,6 +673,30 @@ dependencies = [ "tokio", ] +[[package]] +name = "cuprate-benchmark" +version = "0.0.0" +dependencies = [ + "cfg-if", + "cuprate-benchmark-example", + "cuprate-benchmark-lib", + "serde", + "serde_json", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "cuprate-benchmark-example" +version = "0.0.0" +dependencies = [ + "cuprate-benchmark-lib", +] + +[[package]] +name = "cuprate-benchmark-lib" +version = "0.0.0" + [[package]] name = "cuprate-blockchain" version = "0.0.0" @@ -685,6 +799,25 @@ dependencies = [ name = "cuprate-constants" version = "0.1.0" +[[package]] +name = "cuprate-criterion-example" +version = "0.0.0" +dependencies = [ + "criterion", + "function_name", + "serde_json", +] + +[[package]] +name = "cuprate-criterion-json-rpc" +version = "0.0.0" +dependencies = [ + "criterion", + "cuprate-json-rpc", + "function_name", + "serde_json", +] + [[package]] name = "cuprate-cryptonight" version = "0.1.0" @@ -1304,6 +1437,21 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "function_name" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1ab577a896d09940b5fe12ec5ae71f9d8211fff62c919c03a3750a9901e98a7" +dependencies = [ + "function_name-proc-macro", +] + +[[package]] +name = "function_name-proc-macro" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673464e1e314dd67a0fd9544abc99e8eb28d0c7e3b69b033bcff9b2d00b87333" + [[package]] name = "funty" version = "2.0.0" @@ -1453,6 +1601,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hashbrown" version = "0.14.5" @@ -1680,6 +1838,26 @@ dependencies = [ "hashbrown 0.15.0", ] +[[package]] +name = "is-terminal" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -1776,6 +1954,15 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + [[package]] name = "matchit" version = "0.7.3" @@ -2031,6 +2218,12 @@ version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + [[package]] name = "openssl-probe" version = "0.1.5" @@ -2168,6 +2361,34 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "plotters" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" + +[[package]] +name = "plotters-svg" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" +dependencies = [ + "plotters-backend", +] + [[package]] name = "ppv-lite86" version = "0.2.20" @@ -2242,7 +2463,7 @@ dependencies = [ "rand", "rand_chacha", "rand_xorshift", - "regex-syntax", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -2408,6 +2629,44 @@ dependencies = [ "syn", ] +[[package]] +name = "regex" +version = "1.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.7", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.8.5" @@ -2535,6 +2794,15 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "schannel" version = "0.1.26" @@ -2889,6 +3157,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a693d0c8cf16973fac5a93fbe47b8c6452e7097d4fcac49f3d7a18e39c76e62e" +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.8.0" @@ -3105,10 +3383,14 @@ version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ + "matchers", "nu-ansi-term", + "once_cell", + "regex", "sharded-slab", "smallvec", "thread_local", + "tracing", "tracing-core", "tracing-log", ] @@ -3214,6 +3496,16 @@ dependencies = [ "libc", ] +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -3284,6 +3576,16 @@ version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +[[package]] +name = "web-sys" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki-roots" version = "0.26.6" @@ -3309,6 +3611,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 1bfd680..a507631 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,36 +1,57 @@ [workspace] resolver = "2" - members = [ + # Binaries "binaries/cuprated", - "constants", + + # Benchmarks + "benches/benchmark/bin", + "benches/benchmark/lib", + "benches/benchmark/example", + "benches/criterion/example", + "benches/criterion/cuprate-json-rpc", + + # Consensus "consensus", "consensus/context", "consensus/fast-sync", "consensus/rules", - "cryptonight", - "helper", + + # Net "net/epee-encoding", "net/fixed-bytes", "net/levin", "net/wire", + + # P2P "p2p/p2p", "p2p/p2p-core", "p2p/bucket", "p2p/dandelion-tower", "p2p/async-buffer", "p2p/address-book", + + # Storage "storage/blockchain", "storage/service", "storage/txpool", "storage/database", - "pruning", - "test-utils", - "types", + + # RPC "rpc/json-rpc", "rpc/types", "rpc/interface", + + # ZMQ "zmq/types", + + # Misc + "constants", + "cryptonight", + "helper", + "pruning", + "test-utils", + "types", ] [profile.release] @@ -53,34 +74,36 @@ opt-level = 3 [workspace.dependencies] # Cuprate members -cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false } -cuprate-consensus-rules = { path = "consensus/rules", default-features = false } -cuprate-constants = { path = "constants", default-features = false } -cuprate-consensus = { path = "consensus", default-features = false } -cuprate-consensus-context = { path = "consensus/context", default-features = false } -cuprate-cryptonight = { path = "cryptonight", default-features = false } -cuprate-helper = { path = "helper", default-features = false } -cuprate-epee-encoding = { path = "net/epee-encoding", default-features = false } -cuprate-fixed-bytes = { path = "net/fixed-bytes", default-features = false } -cuprate-levin = { path = "net/levin", default-features = false } -cuprate-wire = { path = "net/wire", default-features = false } -cuprate-p2p = { path = "p2p/p2p", default-features = false } -cuprate-p2p-core = { path = "p2p/p2p-core", default-features = false } -cuprate-p2p-bucket = { path = "p2p/p2p-bucket", default-features = false } -cuprate-dandelion-tower = { path = "p2p/dandelion-tower", default-features = false } -cuprate-async-buffer = { path = "p2p/async-buffer", default-features = false } -cuprate-address-book = { path = "p2p/address-book", default-features = false } -cuprate-blockchain = { path = "storage/blockchain", default-features = false } -cuprate-database = { path = "storage/database", default-features = false } -cuprate-database-service = { path = "storage/service", default-features = false } -cuprate-txpool = { path = "storage/txpool", default-features = false } -cuprate-pruning = { path = "pruning", default-features = false } -cuprate-test-utils = { path = "test-utils", default-features = false } -cuprate-types = { path = "types", default-features = false } -cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false } -cuprate-rpc-types = { path = "rpc/types", default-features = false } -cuprate-rpc-interface = { path = "rpc/interface", default-features = false } -cuprate-zmq-types = { path = "zmq/types", default-features = false } +cuprate-benchmark-lib = { path = "benches/benchmark/lib", default-features = false } +cuprate-benchmark-example = { path = "benches/benchmark/example", default-features = false } +cuprate-fast-sync = { path = "consensus/fast-sync", default-features = false } +cuprate-consensus-rules = { path = "consensus/rules", default-features = false } +cuprate-constants = { path = "constants", default-features = false } +cuprate-consensus = { path = "consensus", default-features = false } +cuprate-consensus-context = { path = "consensus/context", default-features = false } +cuprate-cryptonight = { path = "cryptonight", default-features = false } +cuprate-helper = { path = "helper", default-features = false } +cuprate-epee-encoding = { path = "net/epee-encoding", default-features = false } +cuprate-fixed-bytes = { path = "net/fixed-bytes", default-features = false } +cuprate-levin = { path = "net/levin", default-features = false } +cuprate-wire = { path = "net/wire", default-features = false } +cuprate-p2p = { path = "p2p/p2p", default-features = false } +cuprate-p2p-core = { path = "p2p/p2p-core", default-features = false } +cuprate-p2p-bucket = { path = "p2p/p2p-bucket", default-features = false } +cuprate-dandelion-tower = { path = "p2p/dandelion-tower", default-features = false } +cuprate-async-buffer = { path = "p2p/async-buffer", default-features = false } +cuprate-address-book = { path = "p2p/address-book", default-features = false } +cuprate-blockchain = { path = "storage/blockchain", default-features = false } +cuprate-database = { path = "storage/database", default-features = false } +cuprate-database-service = { path = "storage/service", default-features = false } +cuprate-txpool = { path = "storage/txpool", default-features = false } +cuprate-pruning = { path = "pruning", default-features = false } +cuprate-test-utils = { path = "test-utils", default-features = false } +cuprate-types = { path = "types", default-features = false } +cuprate-json-rpc = { path = "rpc/json-rpc", default-features = false } +cuprate-rpc-types = { path = "rpc/types", default-features = false } +cuprate-rpc-interface = { path = "rpc/interface", default-features = false } +cuprate-zmq-types = { path = "zmq/types", default-features = false } # External dependencies anyhow = { version = "1", default-features = false } @@ -125,6 +148,8 @@ tracing-subscriber = { version = "0.3", default-features = false } tracing = { version = "0.1", default-features = false } ## workspace.dev-dependencies +criterion = { version = "0.5" } +function_name = { version = "0.3" } monero-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } monero-simple-request-rpc = { git = "https://github.com/Cuprate/serai.git", rev = "d5205ce" } tempfile = { version = "3" } diff --git a/benches/README.md b/benches/README.md index 4640904..af6bb93 100644 --- a/benches/README.md +++ b/benches/README.md @@ -1 +1,5 @@ -# TODO +# Benches +This directory contains Cuprate's benchmarks and benchmarking utilities. + +See the [`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html) +to see how to create and run these benchmarks. \ No newline at end of file diff --git a/benches/benchmark/bin/Cargo.toml b/benches/benchmark/bin/Cargo.toml new file mode 100644 index 0000000..36d0b2c --- /dev/null +++ b/benches/benchmark/bin/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "cuprate-benchmark" +version = "0.0.0" +edition = "2021" +description = "Cuprate's benchmarking binary" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin" +keywords = ["cuprate", "benchmarking", "binary"] + +[features] +# All new benchmarks should be added here! +all = ["example"] + +# Non-benchmark features. +default = [] +json = [] +trace = [] +debug = [] +warn = [] +info = [] +error = [] + +# Benchmark features. +# New benchmarks should be added here! +example = [ + "dep:cuprate-benchmark-example" +] + +[dependencies] +cuprate-benchmark-lib = { workspace = true } +cuprate-benchmark-example = { workspace = true, optional = true } + +cfg-if = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true, features = ["std"] } +tracing = { workspace = true, features = ["std", "attributes"] } +tracing-subscriber = { workspace = true, features = ["fmt", "std", "env-filter"] } + +[dev-dependencies] + +[lints] +workspace = true \ No newline at end of file diff --git a/benches/benchmark/bin/README.md b/benches/benchmark/bin/README.md new file mode 100644 index 0000000..ad0700f --- /dev/null +++ b/benches/benchmark/bin/README.md @@ -0,0 +1,27 @@ +## `cuprate-benchmark` +This crate links all benchmarks together into a single binary that can be run as: `cuprate-benchmark`. + +`cuprate-benchmark` will run all enabled benchmarks sequentially and print data at the end. + +## Benchmarks +Benchmarks are opt-in and enabled via features. + +| Feature | Enables which benchmark crate? | +|----------|--------------------------------| +| example | cuprate-benchmark-example | +| database | cuprate-benchmark-database | + +## Features +These are features that aren't for enabling benchmarks, but rather for other things. + +Since `cuprate-benchmark` is built right before it is ran, +these features almost act like command line arguments. + +| Features | Does what | +|----------|-----------| +| json | Prints JSON timings instead of a markdown table +| trace | Use the `trace` log-level +| debug | Use the `debug` log-level +| warn | Use the `warn` log-level +| info | Use the `info` log-level (default) +| error | Use the `error` log-level \ No newline at end of file diff --git a/benches/benchmark/bin/src/log.rs b/benches/benchmark/bin/src/log.rs new file mode 100644 index 0000000..455f130 --- /dev/null +++ b/benches/benchmark/bin/src/log.rs @@ -0,0 +1,29 @@ +use cfg_if::cfg_if; +use tracing::{info, instrument, Level}; +use tracing_subscriber::FmtSubscriber; + +/// Initializes the `tracing` logger. +#[instrument] +pub(crate) fn init_logger() { + const LOG_LEVEL: Level = { + cfg_if! { + if #[cfg(feature = "trace")] { + Level::TRACE + } else if #[cfg(feature = "debug")] { + Level::DEBUG + } else if #[cfg(feature = "warn")] { + Level::WARN + } else if #[cfg(feature = "info")] { + Level::INFO + } else if #[cfg(feature = "error")] { + Level::ERROR + } else { + Level::INFO + } + } + }; + + FmtSubscriber::builder().with_max_level(LOG_LEVEL).init(); + + info!("Log level: {LOG_LEVEL}"); +} diff --git a/benches/benchmark/bin/src/main.rs b/benches/benchmark/bin/src/main.rs new file mode 100644 index 0000000..02c480a --- /dev/null +++ b/benches/benchmark/bin/src/main.rs @@ -0,0 +1,49 @@ +#![doc = include_str!("../README.md")] +#![allow( + unused_crate_dependencies, + reason = "this crate imports many potentially unused dependencies" +)] + +mod log; +mod print; +mod run; +mod timings; + +use cfg_if::cfg_if; + +/// What `main()` does: +/// 1. Run all enabled benchmarks +/// 2. Record benchmark timings +/// 3. Print timing data +/// +/// To add a new benchmark to be ran here: +/// 1. Copy + paste a `cfg_if` block +/// 2. Change it to your benchmark's feature flag +/// 3. Change it to your benchmark's type +#[allow( + clippy::allow_attributes, + unused_variables, + unused_mut, + unreachable_code, + reason = "clippy does not account for all cfg()s" +)] +fn main() { + log::init_logger(); + + let mut timings = timings::Timings::new(); + + cfg_if! { + if #[cfg(not(any(feature = "example")))] { + println!("No feature specified. Use `--features $BENCHMARK_FEATURE` when building."); + return; + } + } + + cfg_if! { + if #[cfg(feature = "example")] { + run::run_benchmark::(&mut timings); + } + } + + print::print_timings(&timings); +} diff --git a/benches/benchmark/bin/src/print.rs b/benches/benchmark/bin/src/print.rs new file mode 100644 index 0000000..36a5f05 --- /dev/null +++ b/benches/benchmark/bin/src/print.rs @@ -0,0 +1,38 @@ +#![expect(dead_code, reason = "code hidden behind feature flags")] + +use cfg_if::cfg_if; + +use crate::timings::Timings; + +/// Print the final the final markdown table of benchmark timings. +pub(crate) fn print_timings(timings: &Timings) { + println!("\nFinished all benchmarks, printing results:"); + + cfg_if! { + if #[cfg(feature = "json")] { + print_timings_json(timings); + } else { + print_timings_markdown(timings); + } + } +} + +/// Default timing formatting. +pub(crate) fn print_timings_markdown(timings: &Timings) { + let mut s = String::new(); + s.push_str("| Benchmark | Time (seconds) |\n"); + s.push_str("|------------------------------------|----------------|"); + + #[expect(clippy::iter_over_hash_type)] + for (k, v) in timings { + s += &format!("\n| {k:<34} | {v:<14} |"); + } + + println!("\n{s}"); +} + +/// Enabled via `json` feature. +pub(crate) fn print_timings_json(timings: &Timings) { + let json = serde_json::to_string_pretty(timings).unwrap(); + println!("\n{json}"); +} diff --git a/benches/benchmark/bin/src/run.rs b/benches/benchmark/bin/src/run.rs new file mode 100644 index 0000000..05a220f --- /dev/null +++ b/benches/benchmark/bin/src/run.rs @@ -0,0 +1,36 @@ +use tracing::{info, instrument, trace}; + +use cuprate_benchmark_lib::Benchmark; + +use crate::timings::Timings; + +/// Run a [`Benchmark`] and record its timing. +#[instrument(skip_all)] +pub(crate) fn run_benchmark(timings: &mut Timings) { + // Get the benchmark name. + let name = B::name(); + trace!("Running benchmark: {name}"); + + // Setup the benchmark input. + let input = B::SETUP(); + + // Sleep before running the benchmark. + trace!("Pre-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION); + std::thread::sleep(B::PRE_SLEEP_DURATION); + + // Run/time the benchmark. + let now = std::time::Instant::now(); + B::MAIN(input); + let time = now.elapsed().as_secs_f32(); + + // Print the benchmark timings. + info!("{name:>34} ... {time}"); + assert!( + timings.insert(name, time).is_none(), + "There were 2 benchmarks with the same name - this collides the final output: {name}", + ); + + // Sleep for a cooldown period after the benchmark run. + trace!("Post-benchmark, sleeping for: {:?}", B::POST_SLEEP_DURATION); + std::thread::sleep(B::POST_SLEEP_DURATION); +} diff --git a/benches/benchmark/bin/src/timings.rs b/benches/benchmark/bin/src/timings.rs new file mode 100644 index 0000000..34a0795 --- /dev/null +++ b/benches/benchmark/bin/src/timings.rs @@ -0,0 +1,5 @@ +/// Benchmark timing data. +/// +/// - Key = benchmark name +/// - Value = benchmark time in seconds +pub(crate) type Timings = std::collections::HashMap<&'static str, f32>; diff --git a/benches/benchmark/example/Cargo.toml b/benches/benchmark/example/Cargo.toml new file mode 100644 index 0000000..5728bcd --- /dev/null +++ b/benches/benchmark/example/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "cuprate-benchmark-example" +version = "0.0.0" +edition = "2021" +description = "Example showcasing Cuprate's benchmarking harness" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example" +keywords = ["cuprate", "benchmarking", "example"] + +[dependencies] +cuprate-benchmark-lib = { path = "../lib" } + +[dev-dependencies] + +[lints] +workspace = true \ No newline at end of file diff --git a/benches/benchmark/example/README.md b/benches/benchmark/example/README.md new file mode 100644 index 0000000..be6b716 --- /dev/null +++ b/benches/benchmark/example/README.md @@ -0,0 +1,3 @@ +## `cuprate-benchmark-example` +This crate contains a short example benchmark that shows how to implement and use +`cuprate-benchmark-lib` so that it can be ran by `cuprate-benchmark`. \ No newline at end of file diff --git a/benches/benchmark/example/src/lib.rs b/benches/benchmark/example/src/lib.rs new file mode 100644 index 0000000..cc704a7 --- /dev/null +++ b/benches/benchmark/example/src/lib.rs @@ -0,0 +1,42 @@ +#![doc = include_str!("../README.md")] + +use std::hint::black_box; + +use cuprate_benchmark_lib::Benchmark; + +/// Marker struct that implements [`Benchmark`] +pub struct Example; + +/// The input to our benchmark function. +pub type ExampleBenchmarkInput = u64; + +/// The setup function that creates the input. +pub const fn example_benchmark_setup() -> ExampleBenchmarkInput { + 1 +} + +/// The main benchmarking function. +#[expect(clippy::unit_arg)] +pub fn example_benchmark_main(input: ExampleBenchmarkInput) { + // In this case, we're simply benchmarking the + // performance of simple arithmetic on the input data. + + fn math(input: ExampleBenchmarkInput, number: u64) { + let x = input; + let x = black_box(x * number); + let x = black_box(x / number); + let x = black_box(x + number); + let _ = black_box(x - number); + } + + for number in 1..100_000_000 { + black_box(math(input, number)); + } +} + +// This implementation will be run by `cuprate-benchmark`. +impl Benchmark for Example { + type Input = ExampleBenchmarkInput; + const SETUP: fn() -> Self::Input = example_benchmark_setup; + const MAIN: fn(Self::Input) = example_benchmark_main; +} diff --git a/benches/benchmark/lib/Cargo.toml b/benches/benchmark/lib/Cargo.toml new file mode 100644 index 0000000..b0771f0 --- /dev/null +++ b/benches/benchmark/lib/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cuprate-benchmark-lib" +version = "0.0.0" +edition = "2021" +description = "Cuprate's benchmarking library" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib" +keywords = ["cuprate", "benchmarking", "library"] + +[features] + +[dependencies] + +[dev-dependencies] + +[lints] +workspace = true \ No newline at end of file diff --git a/benches/benchmark/lib/README.md b/benches/benchmark/lib/README.md new file mode 100644 index 0000000..9ea79ae --- /dev/null +++ b/benches/benchmark/lib/README.md @@ -0,0 +1,15 @@ +## `cuprate-benchmark-lib` +This crate is the glue between +[`cuprate-benchmark`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/bin) +and all the benchmark crates. + +It defines the [`crate::Benchmark`] trait, which is the behavior of all benchmarks. + +See the [`cuprate-benchmark-example`](https://github.com/Cuprate/cuprate/tree/benches/benches/benchmark/example) +crate to see an example implementation of this trait. + +After implementing this trait, a few steps must +be done such that the `cuprate-benchmark` binary +can actually run your benchmark crate; see the +[`Benchmarking` section in the Architecture book](https://architecture.cuprate.org/benchmarking/intro.html) +to see how to do this. \ No newline at end of file diff --git a/benches/benchmark/lib/src/benchmark.rs b/benches/benchmark/lib/src/benchmark.rs new file mode 100644 index 0000000..4dca550 --- /dev/null +++ b/benches/benchmark/lib/src/benchmark.rs @@ -0,0 +1,45 @@ +//! Benchmarking trait. + +use std::time::Duration; + +/// A benchmarking function and its inputs. +pub trait Benchmark { + /// The benchmark's name. + /// + /// This is automatically implemented + /// as the name of the [`Self`] type. + // + // FIXME: use `const` instead of `fn` when stable + // + fn name() -> &'static str { + std::any::type_name::() + } + + /// Input to the main benchmarking function. + /// + /// This is passed to [`Self::MAIN`]. + type Input; + + /// Setup function to generate the input. + /// + /// This function is not timed. + const SETUP: fn() -> Self::Input; + + /// The main function to benchmark. + /// + /// The start of the timer begins right before + /// this function is called and ends after the + /// function returns. + const MAIN: fn(Self::Input); + + /// `cuprate-benchmark` will sleep for this [`Duration`] after + /// creating the [`Self::Input`], but before starting [`Self::MAIN`]. + /// + /// 1 second by default. + const PRE_SLEEP_DURATION: Duration = Duration::from_secs(1); + + /// `cuprate-benchmark` will sleep for this [`Duration`] after [`Self::MAIN`]. + /// + /// 1 second by default. + const POST_SLEEP_DURATION: Duration = Duration::from_secs(1); +} diff --git a/benches/benchmark/lib/src/lib.rs b/benches/benchmark/lib/src/lib.rs new file mode 100644 index 0000000..a6bec82 --- /dev/null +++ b/benches/benchmark/lib/src/lib.rs @@ -0,0 +1,5 @@ +#![doc = include_str!("../README.md")] + +mod benchmark; + +pub use benchmark::Benchmark; diff --git a/benches/criterion/cuprate-json-rpc/Cargo.toml b/benches/criterion/cuprate-json-rpc/Cargo.toml new file mode 100644 index 0000000..a0cae64 --- /dev/null +++ b/benches/criterion/cuprate-json-rpc/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "cuprate-criterion-json-rpc" +version = "0.0.0" +edition = "2021" +description = "Criterion benchmarking for cuprate-json-rpc" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc" +keywords = ["cuprate", "json-rpc", "criterion", "benchmark"] + +[dependencies] +cuprate-json-rpc = { workspace = true } + +criterion = { workspace = true } +function_name = { workspace = true } +serde_json = { workspace = true, features = ["default"] } + +[[bench]] +name = "main" +harness = false + +[lints] +workspace = true \ No newline at end of file diff --git a/benches/criterion/cuprate-json-rpc/benches/main.rs b/benches/criterion/cuprate-json-rpc/benches/main.rs new file mode 100644 index 0000000..a724943 --- /dev/null +++ b/benches/criterion/cuprate-json-rpc/benches/main.rs @@ -0,0 +1,8 @@ +//! Benchmarks for `cuprate-json-rpc`. +#![allow(unused_crate_dependencies)] + +mod response; + +criterion::criterion_main! { + response::serde, +} diff --git a/benches/criterion/cuprate-json-rpc/benches/response.rs b/benches/criterion/cuprate-json-rpc/benches/response.rs new file mode 100644 index 0000000..908a9f4 --- /dev/null +++ b/benches/criterion/cuprate-json-rpc/benches/response.rs @@ -0,0 +1,110 @@ +//! Benchmarks for [`Response`]. +#![allow(unused_attributes, unused_crate_dependencies)] + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use function_name::named; +use serde_json::{from_str, to_string_pretty}; + +use cuprate_json_rpc::{Id, Response}; + +// `serde` benchmarks on `Response`. +// +// These are benchmarked as `Response` has a custom serde implementation. +criterion_group! { + name = serde; + config = Criterion::default(); + targets = + response_from_str_u8, + response_from_str_u64, + response_from_str_string_5_len, + response_from_str_string_10_len, + response_from_str_string_100_len, + response_from_str_string_500_len, + response_to_string_pretty_u8, + response_to_string_pretty_u64, + response_to_string_pretty_string_5_len, + response_to_string_pretty_string_10_len, + response_to_string_pretty_string_100_len, + response_to_string_pretty_string_500_len, + response_from_str_bad_field_1, + response_from_str_bad_field_5, + response_from_str_bad_field_10, + response_from_str_bad_field_100, + response_from_str_missing_field, +} +criterion_main!(serde); + +/// Generate `from_str` deserialization benchmark functions for [`Response`]. +macro_rules! impl_from_str_benchmark { + ( + $( + $fn_name:ident => $request_type:ty => $request_string:literal, + )* + ) => { + $( + #[named] + fn $fn_name(c: &mut Criterion) { + let request_string = $request_string; + + c.bench_function(function_name!(), |b| { + b.iter(|| { + let _r = from_str::>( + black_box(request_string) + ); + }); + }); + } + )* + }; +} + +impl_from_str_benchmark! { + response_from_str_u8 => u8 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#, + response_from_str_u64 => u64 => r#"{"jsonrpc":"2.0","id":123,"result":0}"#, + response_from_str_string_5_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hello"}"#, + response_from_str_string_10_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"hellohello"}"#, + response_from_str_string_100_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#, + response_from_str_string_500_len => String => r#"{"jsonrpc":"2.0","id":123,"result":"helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld"}"#, + + // The custom serde currently looks at all fields. + // These are for testing the performance if the serde + // has to parse through a bunch of unrelated fields. + response_from_str_bad_field_1 => u8 => r#"{"bad_field":0,"jsonrpc":"2.0","id":123,"result":0}"#, + response_from_str_bad_field_5 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"jsonrpc":"2.0","id":123,"result":0}"#, + response_from_str_bad_field_10 => u8 => r#"{"bad_field_1":0,"bad_field_2":0,"bad_field_3":0,"bad_field_4":0,"bad_field_5":0,"bad_field_6":0,"bad_field_7":0,"bad_field_8":0,"bad_field_9":0,"bad_field_10":0,"jsonrpc":"2.0","id":123,"result":0}"#, + response_from_str_bad_field_100 => u8 => r#"{"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":0,"31":0,"32":0,"33":0,"34":0,"35":0,"36":0,"37":0,"38":0,"39":0,"40":0,"41":0,"42":0,"43":0,"44":0,"45":0,"46":0,"47":0,"48":0,"49":0,"50":0,"51":0,"52":0,"53":0,"54":0,"55":0,"56":0,"57":0,"58":0,"59":0,"60":0,"61":0,"62":0,"63":0,"64":0,"65":0,"66":0,"67":0,"68":0,"69":0,"70":0,"71":0,"72":0,"73":0,"74":0,"75":0,"76":0,"77":0,"78":0,"79":0,"80":0,"81":0,"82":0,"83":0,"84":0,"85":0,"86":0,"87":0,"88":0,"89":0,"90":0,"91":0,"92":0,"93":0,"94":0,"95":0,"96":0,"97":0,"98":0,"99":0,"100":0,"jsonrpc":"2.0","id":123,"result":0}"#, + + // These are missing the `jsonrpc` field. + response_from_str_missing_field => u8 => r#"{"id":123,"result":0}"#, +} + +/// Generate `to_string_pretty` serialization benchmark functions for [`Response`]. +macro_rules! impl_to_string_pretty_benchmark { + ( + $( + $fn_name:ident => $request_constructor:expr, + )* + ) => { + $( + #[named] + fn $fn_name(c: &mut Criterion) { + let request = $request_constructor; + + c.bench_function(function_name!(), |b| { + b.iter(|| { + let _s = to_string_pretty(black_box(&request)).unwrap(); + }); + }); + } + )* + }; +} + +impl_to_string_pretty_benchmark! { + response_to_string_pretty_u8 => Response::::ok(Id::Null, 0), + response_to_string_pretty_u64 => Response::::ok(Id::Null, 0), + response_to_string_pretty_string_5_len => Response::ok(Id::Null, String::from("hello")), + response_to_string_pretty_string_10_len => Response::ok(Id::Null, String::from("hellohello")), + response_to_string_pretty_string_100_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")), + response_to_string_pretty_string_500_len => Response::ok(Id::Null, String::from("helloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworldhelloworld")), +} diff --git a/benches/criterion/cuprate-json-rpc/src/lib.rs b/benches/criterion/cuprate-json-rpc/src/lib.rs new file mode 100644 index 0000000..b29887a --- /dev/null +++ b/benches/criterion/cuprate-json-rpc/src/lib.rs @@ -0,0 +1,2 @@ +//! Benchmark lib for `cuprate-json-rpc`. +#![allow(unused_crate_dependencies, reason = "used in benchmarks")] diff --git a/benches/criterion/example/Cargo.toml b/benches/criterion/example/Cargo.toml new file mode 100644 index 0000000..43e6011 --- /dev/null +++ b/benches/criterion/example/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "cuprate-criterion-example" +version = "0.0.0" +edition = "2021" +description = "Criterion benchmarking example for Cuprate" +license = "MIT" +authors = ["hinto-janai"] +repository = "https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example" +keywords = ["cuprate", "criterion", "benchmark", "example"] + +[dependencies] +criterion = { workspace = true } +function_name = { workspace = true } +serde_json = { workspace = true, features = ["default"] } + +[[bench]] +name = "main" +harness = false + +[lints] +workspace = true \ No newline at end of file diff --git a/benches/criterion/example/README.md b/benches/criterion/example/README.md new file mode 100644 index 0000000..cf1983f --- /dev/null +++ b/benches/criterion/example/README.md @@ -0,0 +1,14 @@ +## `cuprate-criterion-example` +An example of using Criterion for benchmarking Cuprate crates. + +Consider copy+pasting this crate to use as a base when creating new Criterion benchmark crates. + +## `src/` +Benchmark crates have a `benches/` ran by `cargo bench`, but they are also crates themselves, +as in, they have a `src` folder that `benches/` can pull code from. + +The `src` directories in these benchmarking crates are usually filled with +helper functions, types, etc, that are used repeatedly in the benchmarks. + +## `benches/` +These are the actual benchmarks ran by `cargo bench`. diff --git a/benches/criterion/example/benches/example.rs b/benches/criterion/example/benches/example.rs new file mode 100644 index 0000000..7ea8e9a --- /dev/null +++ b/benches/criterion/example/benches/example.rs @@ -0,0 +1,48 @@ +//! Benchmarks. +#![allow(unused_attributes, unused_crate_dependencies)] + +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use function_name::named; + +use cuprate_criterion_example::SomeHardToCreateObject; + +// This is how you register criterion benchmarks. +criterion_group! { + name = benches; + config = Criterion::default(); + targets = benchmark_1, benchmark_range, +} +criterion_main!(benches); + +/// Benchmark a single input. +/// +/// +#[named] +fn benchmark_1(c: &mut Criterion) { + // It is recommended to use `function_name!()` as a benchmark + // identifier instead of manually re-typing the function name. + c.bench_function(function_name!(), |b| { + b.iter(|| { + black_box(SomeHardToCreateObject::from(1)); + }); + }); +} + +/// Benchmark a range of inputs. +/// +/// +#[named] +fn benchmark_range(c: &mut Criterion) { + let mut group = c.benchmark_group(function_name!()); + + for i in 0..4 { + group.throughput(Throughput::Elements(i)); + group.bench_with_input(BenchmarkId::from_parameter(i), &i, |b, &i| { + b.iter(|| { + black_box(SomeHardToCreateObject::from(i)); + }); + }); + } + + group.finish(); +} diff --git a/benches/criterion/example/benches/main.rs b/benches/criterion/example/benches/main.rs new file mode 100644 index 0000000..d4f0bf8 --- /dev/null +++ b/benches/criterion/example/benches/main.rs @@ -0,0 +1,10 @@ +//! Benchmarks examples. +#![allow(unused_crate_dependencies)] + +// All modules within `benches/` are `mod`ed here. +mod example; + +// And all the Criterion benchmarks are registered like so: +criterion::criterion_main! { + example::benches, +} diff --git a/benches/criterion/example/src/lib.rs b/benches/criterion/example/src/lib.rs new file mode 100644 index 0000000..0f732a4 --- /dev/null +++ b/benches/criterion/example/src/lib.rs @@ -0,0 +1,13 @@ +#![doc = include_str!("../README.md")] // See the README for crate documentation. +#![allow(unused_crate_dependencies, reason = "used in benchmarks")] + +/// Shared type that all benchmarks can use. +#[expect(dead_code)] +pub struct SomeHardToCreateObject(u64); + +impl From for SomeHardToCreateObject { + /// Shared function that all benchmarks can use. + fn from(value: u64) -> Self { + Self(value) + } +} diff --git a/books/architecture/src/SUMMARY.md b/books/architecture/src/SUMMARY.md index 0961d8f..a99d099 100644 --- a/books/architecture/src/SUMMARY.md +++ b/books/architecture/src/SUMMARY.md @@ -143,9 +143,16 @@ --- -- [⚪️ Benchmarking](benchmarking/intro.md) - - [⚪️ Criterion](benchmarking/criterion.md) - - [⚪️ Harness](benchmarking/harness.md) +- [🟢 Benchmarking](benchmarking/intro.md) + - [🟢 Criterion](benchmarking/criterion/intro.md) + - [🟢 Creating](benchmarking/criterion/creating.md) + - [🟢 Running](benchmarking/criterion/running.md) + - [🟢 `cuprate-benchmark`](benchmarking/cuprate/intro.md) + - [🟢 Creating](benchmarking/cuprate/creating.md) + - [🟢 Running](benchmarking/cuprate/running.md) + +--- + - [⚪️ Testing](testing/intro.md) - [⚪️ Monero data](testing/monero-data.md) - [⚪️ RPC client](testing/rpc-client.md) diff --git a/books/architecture/src/appendix/crates.md b/books/architecture/src/appendix/crates.md index a0dff48..5124180 100644 --- a/books/architecture/src/appendix/crates.md +++ b/books/architecture/src/appendix/crates.md @@ -68,3 +68,11 @@ cargo doc --open --package cuprate-blockchain | [`cuprate-helper`](https://doc.cuprate.org/cuprate_helper) | [`helper/`](https://github.com/Cuprate/cuprate/tree/main/helper) | Kitchen-sink helper crate for Cuprate | [`cuprate-test-utils`](https://doc.cuprate.org/cuprate_test_utils) | [`test-utils/`](https://github.com/Cuprate/cuprate/tree/main/test-utils) | Testing utilities for Cuprate | [`cuprate-types`](https://doc.cuprate.org/cuprate_types) | [`types/`](https://github.com/Cuprate/cuprate/tree/main/types) | Shared types across Cuprate + +## Benchmarks +| Crate | In-tree path | Purpose | +|-------|--------------|---------| +| [`cuprate-benchmark`](https://doc.cuprate.org/cuprate_benchmark) | [`benches/benchmark/bin/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | Cuprate benchmarking binary +| [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) | [`benches/benchmark/lib/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | Cuprate benchmarking library +| `cuprate-benchmark-*` | [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/) | Benchmark for a Cuprate crate that uses `cuprate-benchmark` +| `cuprate-criterion-*` | [`benches/criterion/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Benchmark for a Cuprate crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book) \ No newline at end of file diff --git a/books/architecture/src/benchmarking/criterion.md b/books/architecture/src/benchmarking/criterion.md deleted file mode 100644 index e9d61e6..0000000 --- a/books/architecture/src/benchmarking/criterion.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Criterion diff --git a/books/architecture/src/benchmarking/criterion/creating.md b/books/architecture/src/benchmarking/criterion/creating.md new file mode 100644 index 0000000..0100904 --- /dev/null +++ b/books/architecture/src/benchmarking/criterion/creating.md @@ -0,0 +1,21 @@ +# Creating +Creating a new Criterion-based benchmarking crate for one of Cuprate's crates is relatively simple, +although, it requires knowledge of how to use Criterion first: + +1. Read the `Getting Started` section of +2. Copy [`benches/criterion/example`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/example) as base +3. Get started + +## Naming +New benchmark crates using Criterion should: +- Be in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/) +- Be in the `cuprate-criterion-$CRATE_NAME` format + +For a real example, see: +[`cuprate-criterion-json-rpc`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion/cuprate-json-rpc). + +## Workspace +Finally, make sure to add the benchmark crate to the workspace +[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file. + +Your benchmark is now ready to be ran. \ No newline at end of file diff --git a/books/architecture/src/benchmarking/criterion/intro.md b/books/architecture/src/benchmarking/criterion/intro.md new file mode 100644 index 0000000..b7a79b2 --- /dev/null +++ b/books/architecture/src/benchmarking/criterion/intro.md @@ -0,0 +1,4 @@ +# Criterion +Each sub-directory in [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) is a crate that uses [Criterion](https://bheisler.github.io/criterion.rs/book) for timing single functions and/or groups of functions. + +They are generally be small in scope. \ No newline at end of file diff --git a/books/architecture/src/benchmarking/criterion/running.md b/books/architecture/src/benchmarking/criterion/running.md new file mode 100644 index 0000000..14067f6 --- /dev/null +++ b/books/architecture/src/benchmarking/criterion/running.md @@ -0,0 +1,15 @@ +# Running +To run all Criterion benchmarks, run this from the repository root: +```bash +cargo bench +``` + +To run specific package(s), use: +```bash +cargo bench --package $CRITERION_BENCHMARK_CRATE_NAME +``` + +For example: +```bash +cargo bench --package cuprate-criterion-json-rpc +``` \ No newline at end of file diff --git a/books/architecture/src/benchmarking/cuprate/creating.md b/books/architecture/src/benchmarking/cuprate/creating.md new file mode 100644 index 0000000..76eab78 --- /dev/null +++ b/books/architecture/src/benchmarking/cuprate/creating.md @@ -0,0 +1,57 @@ +# Creating +New benchmarks are plugged into `cuprate-benchmark` by: +1. Implementing `cuprate_benchmark_lib::Benchmark` +1. Registering the benchmark in the `cuprate_benchmark` binary + +See [`benches/benchmark/example`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/example) +for an example. + +## Creating the benchmark crate +Before plugging into `cuprate-benchmark`, your actual benchmark crate must be created: + +1. Create a new crate inside `benches/benchmark` (consider copying `benches/benchmark/example` as a base) +1. Pull in `cuprate_benchmark_lib` as a dependency +1. Create a benchmark +1. Implement `cuprate_benchmark_lib::Benchmark` + +New benchmark crates using `cuprate-database` should: +- Be in [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/) +- Be in the `cuprate-benchmark-$CRATE_NAME` format + +For a real example, see: +[`cuprate-benchmark-database`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/cuprate-database). + +## `cuprate_benchmark_lib::Benchmark` +This is the trait that standardizes all benchmarks ran under `cuprate-benchmark`. + +It must be implemented by your benchmarking crate. + +See `cuprate-benchmark-lib` crate documentation for a user-guide: . + +## Adding a feature to `cuprate-benchmark` +After your benchmark's behavior is defined, it must be registered +in the binary that is actually ran: `cuprate-benchmark`. + +If your benchmark is new, add a new crate feature to [`cuprate-benchmark`'s Cargo.toml file](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/Cargo.toml) with an optional dependency to your benchmarking crate. + +Please remember to edit the feature table in the +[`README.md`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin/README.md) as well! + +## Adding to `cuprate-benchmark`'s `main()` +After adding your crate's feature, add a conditional line that run the benchmark +if the feature is enabled to the `main()` function: + +For example, if your crate's name is `egg`: +```rust +cfg_if! { + if #[cfg(feature = "egg")] { + run::run_benchmark::(&mut timings); + } +} +``` + +## Workspace +Finally, make sure to add the benchmark crate to the workspace +[`Cargo.toml`](https://github.com/Cuprate/cuprate/blob/main/Cargo.toml) file. + +Your benchmark is now ready to be ran. \ No newline at end of file diff --git a/books/architecture/src/benchmarking/cuprate/intro.md b/books/architecture/src/benchmarking/cuprate/intro.md new file mode 100644 index 0000000..25efb46 --- /dev/null +++ b/books/architecture/src/benchmarking/cuprate/intro.md @@ -0,0 +1,37 @@ +# cuprate-benchmark +Cuprate has 2 custom crates for general benchmarking: +- `cuprate-benchmark`; the actual binary crate ran +- `cuprate-benchmark-lib`; the library that other crates hook into + +The abstract purpose of `cuprate-benchmark` is very simple: +1. Set-up the benchmark +1. Start timer +1. Run benchmark +1. Output data + +`cuprate-benchmark` runs the benchmarks found in [`benches/benchmark/cuprate-*`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark). + +`cuprate-benchmark-lib` defines the `Benchmark` trait that all +benchmark crates implement to "plug-in" to the benchmarking harness. + +## Diagram +A diagram displaying the relation between `cuprate-benchmark` and related crates. + +``` + ┌─────────────────────┐ + │ cuprate_benchmark │ + │ (actual binary ran) │ + └──────────┬──────────┘ + ┌──────────────────┴───────────────────┐ + │ cuprate_benchmark_lib │ + │ ┌───────────────────────────────────┐│ + │ │ trait Benchmark ││ + │ └───────────────────────────────────┘│ + └──────────────────┬───────────────────┘ +┌───────────────────────────┐ │ ┌───────────────────────────┐ +│ cuprate_benchmark_example ├──┼───┤ cuprate_benchmark_* │ +└───────────────────────────┘ │ └───────────────────────────┘ +┌───────────────────────────┐ │ ┌───────────────────────────┐ +│ cuprate_benchmark_* ├──┴───┤ cuprate_benchmark_* │ +└───────────────────────────┘ └───────────────────────────┘ +``` \ No newline at end of file diff --git a/books/architecture/src/benchmarking/cuprate/running.md b/books/architecture/src/benchmarking/cuprate/running.md new file mode 100644 index 0000000..b776163 --- /dev/null +++ b/books/architecture/src/benchmarking/cuprate/running.md @@ -0,0 +1,16 @@ +# Running +`cuprate-benchmark` benchmarks are ran with this command: +```bash +cargo run --release --package cuprate-benchmark --features $BENCHMARK_CRATE_FEATURE +``` + +For example, to run the example benchmark: +```bash +cargo run --release --package cuprate-benchmark --features example +``` + +Use the `all` feature to run all benchmarks: +```bash +# Run all benchmarks +cargo run --release --package cuprate-benchmark --features all +``` diff --git a/books/architecture/src/benchmarking/harness.md b/books/architecture/src/benchmarking/harness.md deleted file mode 100644 index 6f82b52..0000000 --- a/books/architecture/src/benchmarking/harness.md +++ /dev/null @@ -1 +0,0 @@ -# ⚪️ Harness diff --git a/books/architecture/src/benchmarking/intro.md b/books/architecture/src/benchmarking/intro.md index f043a0b..e6ab6b1 100644 --- a/books/architecture/src/benchmarking/intro.md +++ b/books/architecture/src/benchmarking/intro.md @@ -1 +1,22 @@ -# ⚪️ Benchmarking +# Benchmarking +Cuprate has 2 types of benchmarks: +- [Criterion](https://bheisler.github.io/criterion.rs/book/user_guide/advanced_configuration.html) benchmarks +- `cuprate-benchmark` benchmarks + +Criterion is used for micro benchmarks; they time single functions, groups of functions, and generally are small in scope. + +`cuprate-benchmark` and [`cuprate-benchmark-lib`](https://doc.cuprate.org/cuprate_benchmark_lib) are custom in-house crates Cuprate uses for macro benchmarks; these test sub-systems, sections of a sub-system, or otherwise larger or more complicated code that isn't well-suited for micro benchmarks. + +## File layout and purpose +All benchmarking related files are in the [`benches/`](https://github.com/Cuprate/cuprate/tree/main/benches) folder. + +This directory is organized like such: + +| Directory | Purpose | +|-------------------------------|---------| +| [`benches/criterion/`](https://github.com/Cuprate/cuprate/tree/main/benches/criterion) | Criterion (micro) benchmarks +| `benches/criterion/cuprate-*` | Criterion benchmarks for the crate with the same name +| [`benches/benchmark/`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark) | Cuprate's custom benchmarking files +| [`benches/benchmark/bin`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/bin) | The `cuprate-benchmark` crate; the actual binary run that links all benchmarks +| [`benches/benchmark/lib`](https://github.com/Cuprate/cuprate/tree/main/benches/benchmark/lib) | The `cuprate-benchmark-lib` crate; the benchmarking framework all benchmarks plug into +| `benches/benchmark/cuprate-*` | `cuprate-benchmark` benchmarks for the crate with the same name From 01150ab84c1d24147bb45c61dffc45a70956aba3 Mon Sep 17 00:00:00 2001 From: hinto-janai Date: Wed, 27 Nov 2024 18:04:58 -0500 Subject: [PATCH 14/14] rpc/types: fix epee deserialization for `GetBlocksResponse` (#345) * header + flatten * fix optional values * `fn error() -> String` -> `error!() -> &'static str` * extract out `PoolInfo` * fix cargo hack --- rpc/types/src/bin.rs | 292 +++----------------------------- rpc/types/src/misc/misc.rs | 4 +- rpc/types/src/misc/mod.rs | 2 + rpc/types/src/misc/pool_info.rs | 171 +++++++++++++++++++ rpc/types/src/misc/tx_entry.rs | 5 +- 5 files changed, 199 insertions(+), 275 deletions(-) create mode 100644 rpc/types/src/misc/pool_info.rs diff --git a/rpc/types/src/bin.rs b/rpc/types/src/bin.rs index 7b94191..414214c 100644 --- a/rpc/types/src/bin.rs +++ b/rpc/types/src/bin.rs @@ -9,26 +9,19 @@ use cuprate_fixed_bytes::ByteArrayVec; use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] -use cuprate_epee_encoding::{ - container_as_blob::ContainerAsBlob, - epee_object, error, - macros::bytes::{Buf, BufMut}, - read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, -}; +use cuprate_epee_encoding::container_as_blob::ContainerAsBlob; use cuprate_types::BlockCompleteEntry; use crate::{ base::AccessResponseBase, - macros::{define_request, define_request_and_response, define_request_and_response_doc}, - misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolTxInfo, Status}, + macros::define_request_and_response, + misc::{BlockOutputIndices, GetOutputsOut, OutKeyBin, PoolInfo}, rpc_call::RpcCallValue, }; #[cfg(any(feature = "epee", feature = "serde"))] use crate::defaults::{default_false, default_zero}; -#[cfg(feature = "epee")] -use crate::misc::PoolInfoExtent; //---------------------------------------------------------------------------------------------------- Definitions define_request_and_response! { @@ -115,15 +108,14 @@ define_request_and_response! { } } -//---------------------------------------------------------------------------------------------------- GetBlocks -define_request! { - #[doc = define_request_and_response_doc!( - "response" => GetBlocksResponse, - get_blocksbin, - cc73fe71162d564ffda8e549b79a350bca53c454, - core_rpc_server_commands_defs, h, 162, 262, - )] - GetBlocksRequest { +define_request_and_response! { + get_blocksbin, + cc73fe71162d564ffda8e549b79a350bca53c454 => + core_rpc_server_commands_defs.h => 162..=262, + + GetBlocks, + + Request { requested_info: u8 = default_zero::(), "default_zero", // FIXME: This is a `std::list` in `monerod` because...? block_ids: ByteArrayVec<32>, @@ -131,259 +123,17 @@ define_request! { prune: bool, no_miner_tx: bool = default_false(), "default_false", pool_info_since: u64 = default_zero::(), "default_zero", - } -} + }, -#[doc = define_request_and_response_doc!( - "request" => GetBlocksRequest, - get_blocksbin, - cc73fe71162d564ffda8e549b79a350bca53c454, - core_rpc_server_commands_defs, h, 162, 262, -)] -/// -/// This response's variant depends upon [`PoolInfoExtent`]. -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum GetBlocksResponse { - /// Will always serialize a [`PoolInfoExtent::None`] field. - PoolInfoNone(GetBlocksResponsePoolInfoNone), - /// Will always serialize a [`PoolInfoExtent::Incremental`] field. - PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental), - /// Will always serialize a [`PoolInfoExtent::Full`] field. - PoolInfoFull(GetBlocksResponsePoolInfoFull), -} - -impl Default for GetBlocksResponse { - fn default() -> Self { - Self::PoolInfoNone(GetBlocksResponsePoolInfoNone::default()) - } -} - -/// Data within [`GetBlocksResponse::PoolInfoNone`]. -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct GetBlocksResponsePoolInfoNone { - pub status: Status, - pub untrusted: bool, - pub blocks: Vec, - pub start_height: u64, - pub current_height: u64, - pub output_indices: Vec, - pub daemon_time: u64, -} - -#[cfg(feature = "epee")] -epee_object! { - GetBlocksResponsePoolInfoNone, - status: Status, - untrusted: bool, - blocks: Vec, - start_height: u64, - current_height: u64, - output_indices: Vec, - daemon_time: u64, -} - -/// Data within [`GetBlocksResponse::PoolInfoIncremental`]. -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct GetBlocksResponsePoolInfoIncremental { - pub status: Status, - pub untrusted: bool, - pub blocks: Vec, - pub start_height: u64, - pub current_height: u64, - pub output_indices: Vec, - pub daemon_time: u64, - pub added_pool_txs: Vec, - pub remaining_added_pool_txids: ByteArrayVec<32>, - pub removed_pool_txids: ByteArrayVec<32>, -} - -#[cfg(feature = "epee")] -epee_object! { - GetBlocksResponsePoolInfoIncremental, - status: Status, - untrusted: bool, - blocks: Vec, - start_height: u64, - current_height: u64, - output_indices: Vec, - daemon_time: u64, - added_pool_txs: Vec, - remaining_added_pool_txids: ByteArrayVec<32>, - removed_pool_txids: ByteArrayVec<32>, -} - -/// Data within [`GetBlocksResponse::PoolInfoFull`]. -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct GetBlocksResponsePoolInfoFull { - pub status: Status, - pub untrusted: bool, - pub blocks: Vec, - pub start_height: u64, - pub current_height: u64, - pub output_indices: Vec, - pub daemon_time: u64, - pub added_pool_txs: Vec, - pub remaining_added_pool_txids: ByteArrayVec<32>, -} - -#[cfg(feature = "epee")] -epee_object! { - GetBlocksResponsePoolInfoFull, - status: Status, - untrusted: bool, - blocks: Vec, - start_height: u64, - current_height: u64, - output_indices: Vec, - daemon_time: u64, - added_pool_txs: Vec, - remaining_added_pool_txids: ByteArrayVec<32>, -} - -#[cfg(feature = "epee")] -/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`]. -/// -/// Not for public usage. -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct __GetBlocksResponseEpeeBuilder { - pub status: Option, - pub untrusted: Option, - pub blocks: Option>, - pub start_height: Option, - pub current_height: Option, - pub output_indices: Option>, - pub daemon_time: Option, - pub pool_info_extent: Option, - pub added_pool_txs: Option>, - pub remaining_added_pool_txids: Option>, - pub removed_pool_txids: Option>, -} - -#[cfg(feature = "epee")] -impl EpeeObjectBuilder for __GetBlocksResponseEpeeBuilder { - fn add_field(&mut self, name: &str, r: &mut B) -> error::Result { - macro_rules! read_epee_field { - ($($field:ident),*) => { - match name { - $( - stringify!($field) => { self.$field = Some(read_epee_value(r)?); }, - )* - _ => return Ok(false), - } - }; - } - - read_epee_field! { - status, - untrusted, - blocks, - start_height, - current_height, - output_indices, - daemon_time, - pool_info_extent, - added_pool_txs, - remaining_added_pool_txids, - removed_pool_txids - } - - Ok(true) - } - - fn finish(self) -> error::Result { - const ELSE: error::Error = error::Error::Format("Required field was not found!"); - - let status = self.status.ok_or(ELSE)?; - let untrusted = self.untrusted.ok_or(ELSE)?; - let blocks = self.blocks.ok_or(ELSE)?; - let start_height = self.start_height.ok_or(ELSE)?; - let current_height = self.current_height.ok_or(ELSE)?; - let output_indices = self.output_indices.ok_or(ELSE)?; - let daemon_time = self.daemon_time.ok_or(ELSE)?; - let pool_info_extent = self.pool_info_extent.ok_or(ELSE)?; - - let this = match pool_info_extent { - PoolInfoExtent::None => { - GetBlocksResponse::PoolInfoNone(GetBlocksResponsePoolInfoNone { - status, - untrusted, - blocks, - start_height, - current_height, - output_indices, - daemon_time, - }) - } - PoolInfoExtent::Incremental => { - GetBlocksResponse::PoolInfoIncremental(GetBlocksResponsePoolInfoIncremental { - status, - untrusted, - blocks, - start_height, - current_height, - output_indices, - daemon_time, - added_pool_txs: self.added_pool_txs.ok_or(ELSE)?, - remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?, - removed_pool_txids: self.removed_pool_txids.ok_or(ELSE)?, - }) - } - PoolInfoExtent::Full => { - GetBlocksResponse::PoolInfoFull(GetBlocksResponsePoolInfoFull { - status, - untrusted, - blocks, - start_height, - current_height, - output_indices, - daemon_time, - added_pool_txs: self.added_pool_txs.ok_or(ELSE)?, - remaining_added_pool_txids: self.remaining_added_pool_txids.ok_or(ELSE)?, - }) - } - }; - - Ok(this) - } -} - -#[cfg(feature = "epee")] -impl EpeeObject for GetBlocksResponse { - type Builder = __GetBlocksResponseEpeeBuilder; - - fn number_of_fields(&self) -> u64 { - // [`PoolInfoExtent`] + inner struct fields. - let inner_fields = match self { - Self::PoolInfoNone(s) => s.number_of_fields(), - Self::PoolInfoIncremental(s) => s.number_of_fields(), - Self::PoolInfoFull(s) => s.number_of_fields(), - }; - - 1 + inner_fields - } - - fn write_fields(self, w: &mut B) -> error::Result<()> { - match self { - Self::PoolInfoNone(s) => { - s.write_fields(w)?; - write_field(PoolInfoExtent::None.to_u8(), "pool_info_extent", w)?; - } - Self::PoolInfoIncremental(s) => { - s.write_fields(w)?; - write_field(PoolInfoExtent::Incremental.to_u8(), "pool_info_extent", w)?; - } - Self::PoolInfoFull(s) => { - s.write_fields(w)?; - write_field(PoolInfoExtent::Full.to_u8(), "pool_info_extent", w)?; - } - } - - Ok(()) + // TODO: add `top_block_hash` field + // + AccessResponseBase { + blocks: Vec, + start_height: u64, + current_height: u64, + output_indices: Vec, + daemon_time: u64, + pool_info: PoolInfo, } } diff --git a/rpc/types/src/misc/misc.rs b/rpc/types/src/misc/misc.rs index 8f7467b..67ec756 100644 --- a/rpc/types/src/misc/misc.rs +++ b/rpc/types/src/misc/misc.rs @@ -11,11 +11,11 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] use cuprate_epee_encoding::epee_object; -use crate::macros::monero_definition_link; - #[cfg(any(feature = "epee", feature = "serde"))] use crate::defaults::default_zero; +use crate::macros::monero_definition_link; + //---------------------------------------------------------------------------------------------------- Macros /// This macro (local to this file) defines all the misc types. /// diff --git a/rpc/types/src/misc/mod.rs b/rpc/types/src/misc/mod.rs index e09f847..4976756 100644 --- a/rpc/types/src/misc/mod.rs +++ b/rpc/types/src/misc/mod.rs @@ -17,6 +17,7 @@ mod distribution; mod key_image_spent_status; #[expect(clippy::module_inception)] mod misc; +mod pool_info; mod pool_info_extent; mod status; mod tx_entry; @@ -30,6 +31,7 @@ pub use misc::{ OutputDistributionData, Peer, PoolTxInfo, PublicNode, SetBan, Span, SpentKeyImageInfo, SyncInfoPeer, TxBacklogEntry, TxInfo, TxOutputIndices, TxpoolHisto, TxpoolStats, }; +pub use pool_info::PoolInfo; pub use pool_info_extent::PoolInfoExtent; pub use status::Status; pub use tx_entry::TxEntry; diff --git a/rpc/types/src/misc/pool_info.rs b/rpc/types/src/misc/pool_info.rs new file mode 100644 index 0000000..e9ba875 --- /dev/null +++ b/rpc/types/src/misc/pool_info.rs @@ -0,0 +1,171 @@ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "epee")] +use crate::misc::PoolInfoExtent; +#[cfg(feature = "epee")] +use cuprate_epee_encoding::{ + epee_object, error, + macros::bytes::{Buf, BufMut}, + read_epee_value, write_field, EpeeObject, EpeeObjectBuilder, +}; + +use cuprate_fixed_bytes::ByteArrayVec; + +use crate::misc::PoolTxInfo; + +//---------------------------------------------------------------------------------------------------- PoolInfo +#[doc = crate::macros::monero_definition_link!( + cc73fe71162d564ffda8e549b79a350bca53c454, + "rpc/core_rpc_server_commands_defs.h", + 223..=228 +)] +/// Used in [`crate::bin::GetBlocksResponse`]. +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[repr(u8)] +pub enum PoolInfo { + #[default] + None, + Incremental(PoolInfoIncremental), + Full(PoolInfoFull), +} + +//---------------------------------------------------------------------------------------------------- Internal data +/// Data within [`PoolInfo::Incremental`]. +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PoolInfoIncremental { + pub added_pool_txs: Vec, + pub remaining_added_pool_txids: ByteArrayVec<32>, + pub removed_pool_txids: ByteArrayVec<32>, +} + +#[cfg(feature = "epee")] +epee_object! { + PoolInfoIncremental, + added_pool_txs: Vec, + remaining_added_pool_txids: ByteArrayVec<32>, + removed_pool_txids: ByteArrayVec<32>, +} + +/// Data within [`PoolInfo::Full`]. +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, Default, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PoolInfoFull { + pub added_pool_txs: Vec, + pub remaining_added_pool_txids: ByteArrayVec<32>, +} + +#[cfg(feature = "epee")] +epee_object! { + PoolInfoFull, + added_pool_txs: Vec, + remaining_added_pool_txids: ByteArrayVec<32>, +} + +//---------------------------------------------------------------------------------------------------- PoolInfo epee impl +#[cfg(feature = "epee")] +/// [`EpeeObjectBuilder`] for [`GetBlocksResponse`]. +/// +/// Not for public usage. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct __PoolInfoEpeeBuilder { + /// This is a distinct field in `monerod`, + /// which as represented in this library with [`PoolInfo`]'s `u8` tag. + pub pool_info_extent: Option, + + pub added_pool_txs: Option>, + pub remaining_added_pool_txids: Option>, + pub removed_pool_txids: Option>, +} + +// Custom epee implementation. +// +// HACK/INVARIANT: +// If any data within [`PoolInfo`] changes, the below code should be changed as well. +#[cfg(feature = "epee")] +impl EpeeObjectBuilder for __PoolInfoEpeeBuilder { + fn add_field(&mut self, name: &str, r: &mut B) -> error::Result { + macro_rules! read_epee_field { + ($($field:ident),*) => { + match name { + $( + stringify!($field) => { self.$field = Some(read_epee_value(r)?); }, + )* + _ => return Ok(false), + } + }; + } + + read_epee_field! { + pool_info_extent, + added_pool_txs, + remaining_added_pool_txids, + removed_pool_txids + } + + Ok(true) + } + + fn finish(self) -> error::Result { + // INVARIANT: + // `monerod` omits serializing the field itself when a container is empty, + // `unwrap_or_default()` is used over `error()` in these cases. + // Some of the uses are when values have default fallbacks: `pool_info_extent`. + + let pool_info_extent = self.pool_info_extent.unwrap_or_default(); + let this = match pool_info_extent { + PoolInfoExtent::None => PoolInfo::None, + PoolInfoExtent::Incremental => PoolInfo::Incremental(PoolInfoIncremental { + added_pool_txs: self.added_pool_txs.unwrap_or_default(), + remaining_added_pool_txids: self.remaining_added_pool_txids.unwrap_or_default(), + removed_pool_txids: self.removed_pool_txids.unwrap_or_default(), + }), + PoolInfoExtent::Full => PoolInfo::Full(PoolInfoFull { + added_pool_txs: self.added_pool_txs.unwrap_or_default(), + remaining_added_pool_txids: self.remaining_added_pool_txids.unwrap_or_default(), + }), + }; + + Ok(this) + } +} + +#[cfg(feature = "epee")] +impl EpeeObject for PoolInfo { + type Builder = __PoolInfoEpeeBuilder; + + fn number_of_fields(&self) -> u64 { + // Inner struct fields. + let inner_fields = match self { + Self::None => 0, + Self::Incremental(s) => s.number_of_fields(), + Self::Full(s) => s.number_of_fields(), + }; + + // [`PoolInfoExtent`] + inner struct fields + 1 + inner_fields + } + + fn write_fields(self, w: &mut B) -> error::Result<()> { + const FIELD: &str = "pool_info_extent"; + + match self { + Self::None => { + write_field(PoolInfoExtent::None.to_u8(), FIELD, w)?; + } + Self::Incremental(s) => { + s.write_fields(w)?; + write_field(PoolInfoExtent::Incremental.to_u8(), FIELD, w)?; + } + Self::Full(s) => { + s.write_fields(w)?; + write_field(PoolInfoExtent::Full.to_u8(), FIELD, w)?; + } + } + + Ok(()) + } +} diff --git a/rpc/types/src/misc/tx_entry.rs b/rpc/types/src/misc/tx_entry.rs index 86d0207..59dd460 100644 --- a/rpc/types/src/misc/tx_entry.rs +++ b/rpc/types/src/misc/tx_entry.rs @@ -2,8 +2,6 @@ //---------------------------------------------------------------------------------------------------- Use #[cfg(feature = "serde")] -use crate::serde::{serde_false, serde_true}; -#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; #[cfg(feature = "epee")] @@ -13,6 +11,9 @@ use cuprate_epee_encoding::{ EpeeObject, EpeeObjectBuilder, }; +#[cfg(feature = "serde")] +use crate::serde::{serde_false, serde_true}; + //---------------------------------------------------------------------------------------------------- TxEntry #[doc = crate::macros::monero_definition_link!( cc73fe71162d564ffda8e549b79a350bca53c454,